diff --git a/build.gradle b/build.gradle index 2156dbccc898..66c8ad0effc2 100644 --- a/build.gradle +++ b/build.gradle @@ -88,6 +88,7 @@ subprojects { all { exclude group: 'org.slf4j', module: 'slf4j-log4j12' exclude group: 'org.mortbay.jetty' + exclude group: 'org.pentaho', module: 'pentaho-aggdesigner-algorithm' resolutionStrategy { force 'com.fasterxml.jackson.module:jackson-module-scala_2.11:2.10.2' @@ -338,6 +339,34 @@ project(':iceberg-mr') { compileOnly("org.apache.hadoop:hadoop-client") { exclude group: 'org.apache.avro', module: 'avro' } + compileOnly("org.apache.hive:hive-serde") + + compileOnly("org.apache.hive:hive-exec::core") { + //exclude group: 'org.apache.avro', module: 'avro' + exclude group: 'org.slf4j', module: 'slf4j-log4j12' + exclude group: 'org.pentaho' // missing dependency + exclude group: 'org.apache.hive', module: 'hive-llap-tez' + exclude group: 'org.apache.logging.log4j' + exclude group: 'com.google.protobuf', module: 'protobuf-java' + exclude group: 'org.apache.calcite.avatica' + exclude group: 'com.google.code.findbugs', module: 'jsr305' + exclude group: 'com.google.guava' + } + + compileOnly "org.apache.hive:hive-metastore" + compileOnly "org.apache.hive:hive-serde" + + testCompile("com.klarna:hiverunner:5.2.1") { + exclude group: 'javax.jms', module: 'jms' + exclude group: 'org.apache.hive', module: 'hive-exec' + exclude group: 'org.codehaus.jettison', module: 'jettison' + exclude group: 'org.apache.calcite.avatica' + } + + testCompile("org.apache.avro:avro:1.9.2") + testCompile("org.apache.calcite:calcite-core") + testCompile("com.esotericsoftware:kryo-shaded:4.0.2") + testCompile("com.fasterxml.jackson.core:jackson-annotations:2.6.5") testCompile project(path: ':iceberg-data', configuration: 'testArtifacts') testCompile project(path: ':iceberg-api', configuration: 'testArtifacts') diff --git a/mr/dependencies.lock b/mr/dependencies.lock index b0e2c279c6ff..27533bf9efe4 100644 --- a/mr/dependencies.lock +++ b/mr/dependencies.lock @@ -1,4 +1,365 @@ { + "allProcessors": { + "com.github.kevinstern:software-and-algorithms": { + "locked": "1.0", + "transitive": [ + "com.google.errorprone:error_prone_check_api" + ] + }, + "com.github.stephenc.jcip:jcip-annotations": { + "locked": "1.0-1", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.auto:auto-common": { + "locked": "0.10", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.code.findbugs:jFormatString": { + "locked": "3.0.0", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.code.findbugs:jsr305": { + "locked": "3.0.2", + "transitive": [ + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_core", + "com.google.guava:guava" + ] + }, + "com.google.errorprone:error_prone_annotation": { + "locked": "2.3.3", + "transitive": [ + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.3.3", + "transitive": [ + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_core", + "com.google.guava:guava" + ] + }, + "com.google.errorprone:error_prone_check_api": { + "locked": "2.3.3", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.errorprone:error_prone_core": { + "locked": "2.3.3", + "transitive": [ + "com.palantir.baseline:baseline-error-prone" + ] + }, + "com.google.errorprone:error_prone_type_annotations": { + "locked": "2.3.3", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.guava:failureaccess": { + "locked": "1.0.1", + "transitive": [ + "com.google.guava:guava" + ] + }, + "com.google.guava:guava": { + "locked": "27.0.1-jre", + "transitive": [ + "com.google.auto:auto-common", + "com.google.errorprone:error_prone_annotation", + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.guava:listenablefuture": { + "locked": "9999.0-empty-to-avoid-conflict-with-guava", + "transitive": [ + "com.google.guava:guava" + ] + }, + "com.google.j2objc:j2objc-annotations": { + "locked": "1.1", + "transitive": [ + "com.google.guava:guava" + ] + }, + "com.google.protobuf:protobuf-java": { + "locked": "3.4.0", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.googlecode.java-diff-utils:diffutils": { + "locked": "1.3.0", + "transitive": [ + "com.google.errorprone:error_prone_check_api" + ] + }, + "com.palantir.baseline:baseline-error-prone": { + "locked": "0.55.0", + "requested": "0.55.0" + }, + "org.checkerframework:checker-qual": { + "locked": "2.5.3", + "transitive": [ + "com.google.guava:guava", + "org.checkerframework:dataflow", + "org.checkerframework:javacutil" + ] + }, + "org.checkerframework:dataflow": { + "locked": "2.5.3", + "transitive": [ + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_core" + ] + }, + "org.checkerframework:javacutil": { + "locked": "2.5.3", + "transitive": [ + "org.checkerframework:dataflow" + ] + }, + "org.codehaus.mojo:animal-sniffer-annotations": { + "locked": "1.17", + "transitive": [ + "com.google.guava:guava" + ] + }, + "org.pcollections:pcollections": { + "locked": "2.1.2", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + } + }, + "annotationProcessor": { + "com.github.kevinstern:software-and-algorithms": { + "locked": "1.0", + "transitive": [ + "com.google.errorprone:error_prone_check_api" + ] + }, + "com.github.stephenc.jcip:jcip-annotations": { + "locked": "1.0-1", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.auto:auto-common": { + "locked": "0.10", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.code.findbugs:jFormatString": { + "locked": "3.0.0", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.code.findbugs:jsr305": { + "locked": "3.0.2", + "transitive": [ + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_core", + "com.google.guava:guava" + ] + }, + "com.google.errorprone:error_prone_annotation": { + "locked": "2.3.3", + "transitive": [ + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.3.3", + "transitive": [ + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_core", + "com.google.guava:guava" + ] + }, + "com.google.errorprone:error_prone_check_api": { + "locked": "2.3.3", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.errorprone:error_prone_core": { + "locked": "2.3.3", + "transitive": [ + "com.palantir.baseline:baseline-error-prone" + ] + }, + "com.google.errorprone:error_prone_type_annotations": { + "locked": "2.3.3", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.guava:failureaccess": { + "locked": "1.0.1", + "transitive": [ + "com.google.guava:guava" + ] + }, + "com.google.guava:guava": { + "locked": "27.0.1-jre", + "transitive": [ + "com.google.auto:auto-common", + "com.google.errorprone:error_prone_annotation", + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.guava:listenablefuture": { + "locked": "9999.0-empty-to-avoid-conflict-with-guava", + "transitive": [ + "com.google.guava:guava" + ] + }, + "com.google.j2objc:j2objc-annotations": { + "locked": "1.1", + "transitive": [ + "com.google.guava:guava" + ] + }, + "com.google.protobuf:protobuf-java": { + "locked": "3.4.0", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.googlecode.java-diff-utils:diffutils": { + "locked": "1.3.0", + "transitive": [ + "com.google.errorprone:error_prone_check_api" + ] + }, + "com.palantir.baseline:baseline-error-prone": { + "locked": "0.55.0", + "requested": "0.55.0" + }, + "org.checkerframework:checker-qual": { + "locked": "2.5.3", + "transitive": [ + "com.google.guava:guava", + "org.checkerframework:dataflow", + "org.checkerframework:javacutil" + ] + }, + "org.checkerframework:dataflow": { + "locked": "2.5.3", + "transitive": [ + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_core" + ] + }, + "org.checkerframework:javacutil": { + "locked": "2.5.3", + "transitive": [ + "org.checkerframework:dataflow" + ] + }, + "org.codehaus.mojo:animal-sniffer-annotations": { + "locked": "1.17", + "transitive": [ + "com.google.guava:guava" + ] + }, + "org.pcollections:pcollections": { + "locked": "2.1.2", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + } + }, + "checkstyle": { + "antlr:antlr": { + "locked": "2.7.7", + "transitive": [ + "com.puppycrawl.tools:checkstyle" + ] + }, + "com.google.code.findbugs:jsr305": { + "locked": "3.0.2", + "transitive": [ + "com.google.guava:guava" + ] + }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.1.3", + "transitive": [ + "com.google.guava:guava" + ] + }, + "com.google.guava:guava": { + "locked": "26.0-jre", + "transitive": [ + "com.puppycrawl.tools:checkstyle" + ] + }, + "com.google.j2objc:j2objc-annotations": { + "locked": "1.1", + "transitive": [ + "com.google.guava:guava" + ] + }, + "com.puppycrawl.tools:checkstyle": { + "locked": "8.13" + }, + "commons-beanutils:commons-beanutils": { + "locked": "1.9.3", + "transitive": [ + "com.puppycrawl.tools:checkstyle" + ] + }, + "commons-cli:commons-cli": { + "locked": "1.4", + "transitive": [ + "com.puppycrawl.tools:checkstyle" + ] + }, + "commons-collections:commons-collections": { + "locked": "3.2.2", + "transitive": [ + "commons-beanutils:commons-beanutils" + ] + }, + "net.sf.saxon:Saxon-HE": { + "locked": "9.8.0-14", + "transitive": [ + "com.puppycrawl.tools:checkstyle" + ] + }, + "org.antlr:antlr4-runtime": { + "locked": "4.7.1", + "transitive": [ + "com.puppycrawl.tools:checkstyle" + ] + }, + "org.checkerframework:checker-qual": { + "locked": "2.5.2", + "transitive": [ + "com.google.guava:guava" + ] + }, + "org.codehaus.mojo:animal-sniffer-annotations": { + "locked": "1.14", + "transitive": [ + "com.google.guava:guava" + ] + } + }, "compile": { "com.fasterxml.jackson.core:jackson-annotations": { "locked": "2.10.2", @@ -225,6 +586,12 @@ } }, "compileClasspath": { + "ant:ant": { + "locked": "1.6.5", + "transitive": [ + "tomcat:jasper-compiler" + ] + }, "aopalliance:aopalliance": { "locked": "1.0", "transitive": [ @@ -234,10 +601,61 @@ "asm:asm": { "locked": "3.1", "transitive": [ + "asm:asm-tree", "com.sun.jersey:jersey-server", "org.sonatype.sisu.inject:cglib" ] }, + "asm:asm-commons": { + "locked": "3.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "asm:asm-tree": { + "locked": "3.1", + "transitive": [ + "asm:asm-commons" + ] + }, + "ch.qos.logback:logback-classic": { + "locked": "1.0.9", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "ch.qos.logback:logback-core": { + "locked": "1.0.9", + "transitive": [ + "ch.qos.logback:logback-classic", + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "co.cask.tephra:tephra-api": { + "locked": "0.6.0", + "transitive": [ + "co.cask.tephra:tephra-core", + "co.cask.tephra:tephra-hbase-compat-1.0", + "org.apache.hive:hive-metastore" + ] + }, + "co.cask.tephra:tephra-core": { + "locked": "0.6.0", + "transitive": [ + "co.cask.tephra:tephra-hbase-compat-1.0", + "org.apache.hive:hive-metastore" + ] + }, + "co.cask.tephra:tephra-hbase-compat-1.0": { + "locked": "0.6.0", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, "com.fasterxml.jackson.core:jackson-annotations": { "locked": "2.10.2", "transitive": [ @@ -255,7 +673,9 @@ "com.fasterxml.jackson.core:jackson-databind": { "locked": "2.10.2", "transitive": [ + "io.dropwizard.metrics:metrics-json", "org.apache.avro:avro", + "org.apache.hive:hive-common", "org.apache.iceberg:iceberg-core" ] }, @@ -265,9 +685,19 @@ "org.apache.iceberg:iceberg-core" ] }, + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter": { + "locked": "0.1.2", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, "com.github.stephenc.findbugs:findbugs-annotations": { "locked": "1.3.9-1", "transitive": [ + "org.apache.hbase:hbase-annotations", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-protocol", "org.apache.iceberg:iceberg-api", "org.apache.iceberg:iceberg-common", "org.apache.iceberg:iceberg-core", @@ -279,13 +709,21 @@ "com.google.code.findbugs:jsr305": { "locked": "3.0.0", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hive:hive-serde", + "org.apache.twill:twill-api", + "org.apache.twill:twill-common", + "org.apache.twill:twill-zookeeper" ] }, "com.google.code.gson:gson": { "locked": "2.2.4", "transitive": [ - "org.apache.hadoop:hadoop-common" + "co.cask.tephra:tephra-core", + "org.apache.hadoop:hadoop-common", + "org.apache.hive:hive-exec", + "org.apache.twill:twill-core", + "org.apache.twill:twill-discovery-core" ] }, "com.google.errorprone:error_prone_annotations": { @@ -294,9 +732,26 @@ "com.github.ben-manes.caffeine:caffeine" ] }, + "com.google.inject.extensions:guice-assistedinject": { + "locked": "3.0", + "transitive": [ + "co.cask.tephra:tephra-core" + ] + }, + "com.google.inject.extensions:guice-servlet": { + "locked": "3.0", + "transitive": [ + "com.sun.jersey.contribs:jersey-guice", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hadoop:hadoop-yarn-common" + ] + }, "com.google.inject:guice": { "locked": "3.0", "transitive": [ + "co.cask.tephra:tephra-core", + "com.google.inject.extensions:guice-assistedinject", + "com.google.inject.extensions:guice-servlet", "com.sun.jersey.contribs:jersey-guice", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager" @@ -315,18 +770,41 @@ "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-protocol", + "org.apache.hive:hive-metastore", + "org.apache.orc:orc-core" ] }, - "com.sun.jersey.contribs:jersey-guice": { - "locked": "1.9", + "com.jamesmurty.utils:java-xmlbuilder": { + "locked": "0.4", "transitive": [ - "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "net.java.dev.jets3t:jets3t" ] }, - "com.sun.jersey:jersey-client": { - "locked": "1.9", + "com.jcraft:jsch": { + "locked": "0.1.42", + "transitive": [ + "org.apache.hadoop:hadoop-common" + ] + }, + "com.jolbox:bonecp": { + "locked": "0.8.0.RELEASE", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "com.sun.jersey.contribs:jersey-guice": { + "locked": "1.9", + "transitive": [ + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-nodemanager" + ] + }, + "com.sun.jersey:jersey-client": { + "locked": "1.9", "transitive": [ "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager" @@ -338,6 +816,7 @@ "com.sun.jersey:jersey-client", "com.sun.jersey:jersey-json", "com.sun.jersey:jersey-server", + "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager" ] @@ -345,6 +824,7 @@ "com.sun.jersey:jersey-json": { "locked": "1.9", "transitive": [ + "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager" ] @@ -353,6 +833,7 @@ "locked": "1.9", "transitive": [ "com.sun.jersey.contribs:jersey-guice", + "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-yarn-common" ] }, @@ -362,6 +843,18 @@ "com.sun.jersey:jersey-json" ] }, + "com.tdunning:json": { + "locked": "1.8", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "com.zaxxer:HikariCP": { + "locked": "2.5.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, "commons-beanutils:commons-beanutils": { "locked": "1.7.0", "transitive": [ @@ -380,18 +873,27 @@ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-client", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hive:hive-common", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-service-rpc" ] }, "commons-codec:commons-codec": { - "locked": "1.6", + "locked": "1.9", "transitive": [ "commons-httpclient:commons-httpclient", + "net.java.dev.jets3t:jets3t", "org.apache.hadoop:hadoop-auth", "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service-rpc", "org.apache.httpcomponents:httpclient" ] }, @@ -399,7 +901,8 @@ "locked": "3.2.2", "transitive": [ "commons-configuration:commons-configuration", - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hbase:hbase-common" ] }, "commons-configuration:commons-configuration": { @@ -408,16 +911,30 @@ "org.apache.hadoop:hadoop-common" ] }, + "commons-dbcp:commons-dbcp": { + "locked": "1.4", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.apache.hive:hive-metastore" + ] + }, "commons-digester:commons-digester": { "locked": "1.8", "transitive": [ "commons-configuration:commons-configuration" ] }, + "commons-el:commons-el": { + "locked": "1.0", + "transitive": [ + "tomcat:jasper-runtime" + ] + }, "commons-httpclient:commons-httpclient": { "locked": "3.1", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hive:hive-exec" ] }, "commons-io:commons-io": { @@ -425,7 +942,10 @@ "transitive": [ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hive:hive-exec" ] }, "commons-lang:commons-lang": { @@ -437,17 +957,29 @@ "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-common", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-storage-api", + "org.apache.hive:hive-vector-code-gen", + "org.apache.orc:orc-core", + "org.apache.velocity:velocity" ] }, "commons-logging:commons-logging": { - "locked": "1.1.3", + "locked": "1.2", "transitive": [ "commons-beanutils:commons-beanutils", "commons-beanutils:commons-beanutils-core", "commons-configuration:commons-configuration", "commons-digester:commons-digester", + "commons-el:commons-el", "commons-httpclient:commons-httpclient", + "net.java.dev.jets3t:jets3t", "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-api", @@ -455,6 +987,9 @@ "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-protocol", "org.apache.httpcomponents:httpclient" ] }, @@ -467,6 +1002,8 @@ "commons-pool:commons-pool": { "locked": "1.6", "transitive": [ + "commons-dbcp:commons-dbcp", + "org.apache.hive:hive-metastore", "org.apache.parquet:parquet-hadoop" ] }, @@ -476,17 +1013,54 @@ "org.apache.orc:orc-core" ] }, + "io.dropwizard.metrics:metrics-core": { + "locked": "3.1.2", + "transitive": [ + "co.cask.tephra:tephra-core", + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter", + "io.dropwizard.metrics:metrics-json", + "io.dropwizard.metrics:metrics-jvm", + "org.apache.hive:hive-common" + ] + }, + "io.dropwizard.metrics:metrics-json": { + "locked": "3.1.0", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "io.dropwizard.metrics:metrics-jvm": { + "locked": "3.1.0", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, "io.netty:netty": { "locked": "3.7.0.Final", "transitive": [ "org.apache.hadoop:hadoop-hdfs", + "org.apache.hadoop:hadoop-mapreduce-client-core", "org.apache.zookeeper:zookeeper" ] }, "io.netty:netty-all": { "locked": "4.0.23.Final", "transitive": [ - "org.apache.hadoop:hadoop-hdfs" + "org.apache.hadoop:hadoop-hdfs", + "org.apache.hbase:hbase-client" + ] + }, + "it.unimi.dsi:fastutil": { + "locked": "6.5.6", + "transitive": [ + "co.cask.tephra:tephra-core" + ] + }, + "javax.activation:activation": { + "locked": "1.1", + "transitive": [ + "javax.mail:mail", + "org.eclipse.jetty.aggregate:jetty-all" ] }, "javax.annotation:javax.annotation-api": { @@ -502,11 +1076,44 @@ "com.sun.jersey.contribs:jersey-guice" ] }, + "javax.jdo:jdo-api": { + "locked": "3.0.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "javax.mail:mail": { + "locked": "1.4.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "javax.servlet:jsp-api": { + "locked": "2.0", + "transitive": [ + "tomcat:jasper-compiler" + ] + }, "javax.servlet:servlet-api": { "locked": "2.5", "transitive": [ + "javax.servlet:jsp-api", + "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "tomcat:jasper-runtime" + ] + }, + "javax.transaction:jta": { + "locked": "1.1", + "transitive": [ + "javax.jdo:jdo-api" + ] + }, + "javax.transaction:transaction-api": { + "locked": "1.1", + "transitive": [ + "org.datanucleus:javax.jdo" ] }, "javax.xml.bind:jaxb-api": { @@ -518,12 +1125,35 @@ "org.apache.orc:orc-core" ] }, + "javolution:javolution": { + "locked": "5.5.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, "jline:jline": { - "locked": "0.9.94", + "locked": "2.12", "transitive": [ + "org.apache.hive:hive-common", "org.apache.zookeeper:zookeeper" ] }, + "joda-time:joda-time": { + "locked": "2.8.1", + "transitive": [ + "org.apache.calcite:calcite-druid", + "org.apache.hive:hive-common" + ] + }, + "junit:junit": { + "locked": "4.11", + "transitive": [ + "org.apache.hbase:hbase-annotations", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-protocol" + ] + }, "log4j:log4j": { "locked": "1.2.17", "transitive": [ @@ -531,21 +1161,102 @@ "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hbase:hbase-annotations", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-protocol", "org.apache.zookeeper:zookeeper" ] }, + "net.hydromatic:eigenbase-properties": { + "locked": "1.1.5", + "transitive": [ + "org.apache.calcite:calcite-core" + ] + }, + "net.java.dev.jets3t:jets3t": { + "locked": "0.9.0", + "transitive": [ + "org.apache.hadoop:hadoop-common" + ] + }, + "net.sf.opencsv:opencsv": { + "locked": "2.3", + "transitive": [ + "org.apache.hive:hive-serde" + ] + }, + "org.antlr:ST4": { + "locked": "4.0.4", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.antlr:antlr-runtime": { + "locked": "3.5.2", + "transitive": [ + "org.antlr:ST4", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-metastore" + ] + }, + "org.apache.ant:ant": { + "locked": "1.9.1", + "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-vector-code-gen" + ] + }, + "org.apache.ant:ant-launcher": { + "locked": "1.9.1", + "transitive": [ + "org.apache.ant:ant" + ] + }, "org.apache.avro:avro": { "locked": "1.9.2", "transitive": [ + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hive:hive-serde", "org.apache.iceberg:iceberg-core" ] }, + "org.apache.calcite:calcite-core": { + "locked": "1.10.0", + "transitive": [ + "org.apache.calcite:calcite-druid", + "org.apache.hive:hive-exec" + ] + }, + "org.apache.calcite:calcite-druid": { + "locked": "1.10.0", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.apache.calcite:calcite-linq4j": { + "locked": "1.10.0", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.apache.calcite:calcite-druid" + ] + }, "org.apache.commons:commons-compress": { "locked": "1.19", "transitive": [ "org.apache.avro:avro", "org.apache.hadoop:hadoop-common", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec" + ] + }, + "org.apache.commons:commons-lang3": { + "locked": "3.2", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.apache.hive:hive-common" ] }, "org.apache.commons:commons-math3": { @@ -554,6 +1265,12 @@ "org.apache.hadoop:hadoop-common" ] }, + "org.apache.curator:apache-curator": { + "locked": "2.7.1", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, "org.apache.curator:curator-client": { "locked": "2.7.1", "transitive": [ @@ -565,7 +1282,9 @@ "locked": "2.7.1", "transitive": [ "org.apache.curator:curator-recipes", - "org.apache.hadoop:hadoop-auth" + "org.apache.hadoop:hadoop-auth", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-exec" ] }, "org.apache.curator:curator-recipes": { @@ -574,6 +1293,12 @@ "org.apache.hadoop:hadoop-common" ] }, + "org.apache.derby:derby": { + "locked": "10.10.2.0", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, "org.apache.directory.api:api-asn1-api": { "locked": "1.0.0-M20", "transitive": [ @@ -598,17 +1323,39 @@ "org.apache.hadoop:hadoop-auth" ] }, + "org.apache.geronimo.specs:geronimo-annotation_1.0_spec": { + "locked": "1.1.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "org.apache.geronimo.specs:geronimo-jaspic_1.0_spec": { + "locked": "1.0", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "org.apache.geronimo.specs:geronimo-jta_1.1_spec": { + "locked": "1.1.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, "org.apache.hadoop:hadoop-annotations": { "locked": "2.7.3", "transitive": [ "org.apache.hadoop:hadoop-client", - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hadoop:hadoop-yarn-api", + "org.apache.hadoop:hadoop-yarn-common" ] }, "org.apache.hadoop:hadoop-auth": { "locked": "2.7.3", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hbase:hbase-client" ] }, "org.apache.hadoop:hadoop-client": { @@ -617,7 +1364,10 @@ "org.apache.hadoop:hadoop-common": { "locked": "2.7.3", "transitive": [ - "org.apache.hadoop:hadoop-client" + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter", + "org.apache.hadoop:hadoop-client", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common" ] }, "org.apache.hadoop:hadoop-hdfs": { @@ -644,7 +1394,9 @@ "locked": "2.7.3", "transitive": [ "org.apache.hadoop:hadoop-client", - "org.apache.hadoop:hadoop-mapreduce-client-common" + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common" ] }, "org.apache.hadoop:hadoop-mapreduce-client-jobclient": { @@ -700,23 +1452,107 @@ "org.apache.hadoop:hadoop-mapreduce-client-shuffle" ] }, + "org.apache.hbase:hbase-annotations": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-protocol" + ] + }, + "org.apache.hbase:hbase-client": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.apache.hbase:hbase-common": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-client" + ] + }, + "org.apache.hbase:hbase-protocol": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common" + ] + }, + "org.apache.hive.shims:hive-shims-common": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-shims" + ] + }, + "org.apache.hive:hive-common": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-serde" + ] + }, + "org.apache.hive:hive-exec": { + "locked": "2.3.7" + }, + "org.apache.hive:hive-metastore": { + "locked": "2.3.7" + }, + "org.apache.hive:hive-serde": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.apache.hive:hive-service-rpc": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-serde" + ] + }, + "org.apache.hive:hive-shims": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde" + ] + }, + "org.apache.hive:hive-storage-api": { + "locked": "2.4.0", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "org.apache.hive:hive-vector-code-gen": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, "org.apache.htrace:htrace-core": { "locked": "3.1.0-incubating", "transitive": [ "org.apache.hadoop:hadoop-common", - "org.apache.hadoop:hadoop-hdfs" + "org.apache.hadoop:hadoop-hdfs", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common" ] }, "org.apache.httpcomponents:httpclient": { - "locked": "4.2.5", + "locked": "4.4.1", "transitive": [ - "org.apache.hadoop:hadoop-auth" + "net.java.dev.jets3t:jets3t", + "org.apache.hadoop:hadoop-auth", + "org.apache.thrift:libthrift" ] }, "org.apache.httpcomponents:httpcore": { - "locked": "4.2.4", + "locked": "4.4.1", "transitive": [ - "org.apache.httpcomponents:httpclient" + "net.java.dev.jets3t:jets3t", + "org.apache.httpcomponents:httpclient", + "org.apache.thrift:libthrift" ] }, "org.apache.iceberg:iceberg-api": { @@ -758,9 +1594,51 @@ "org.apache.iceberg:iceberg-parquet": { "project": true }, + "org.apache.ivy:ivy": { + "locked": "2.4.0", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.apache.logging.log4j:log4j-1.2-api": { + "locked": "2.6.2", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "org.apache.logging.log4j:log4j-api": { + "locked": "2.6.2", + "transitive": [ + "org.apache.logging.log4j:log4j-1.2-api", + "org.apache.logging.log4j:log4j-core", + "org.apache.logging.log4j:log4j-slf4j-impl", + "org.apache.logging.log4j:log4j-web" + ] + }, + "org.apache.logging.log4j:log4j-core": { + "locked": "2.6.2", + "transitive": [ + "org.apache.logging.log4j:log4j-1.2-api", + "org.apache.logging.log4j:log4j-web" + ] + }, + "org.apache.logging.log4j:log4j-slf4j-impl": { + "locked": "2.6.2", + "transitive": [ + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-common" + ] + }, + "org.apache.logging.log4j:log4j-web": { + "locked": "2.6.2", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, "org.apache.orc:orc-core": { "locked": "1.6.3", "transitive": [ + "org.apache.hive:hive-common", "org.apache.iceberg:iceberg-orc" ] }, @@ -810,12 +1688,87 @@ "org.apache.parquet:parquet-avro" ] }, + "org.apache.parquet:parquet-hadoop-bundle": { + "locked": "1.8.1", + "transitive": [ + "org.apache.hive:hive-serde" + ] + }, "org.apache.parquet:parquet-jackson": { "locked": "1.11.0", "transitive": [ "org.apache.parquet:parquet-hadoop" ] }, + "org.apache.thrift:libfb303": { + "locked": "0.9.3", + "transitive": [ + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-service-rpc" + ] + }, + "org.apache.thrift:libthrift": { + "locked": "0.9.3", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service-rpc", + "org.apache.thrift:libfb303" + ] + }, + "org.apache.twill:twill-api": { + "locked": "0.6.0-incubating", + "transitive": [ + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "org.apache.twill:twill-common": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-api", + "org.apache.twill:twill-discovery-api", + "org.apache.twill:twill-zookeeper" + ] + }, + "org.apache.twill:twill-core": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core" + ] + }, + "org.apache.twill:twill-discovery-api": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-api", + "org.apache.twill:twill-discovery-core" + ] + }, + "org.apache.twill:twill-discovery-core": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core" + ] + }, + "org.apache.twill:twill-zookeeper": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-discovery-core" + ] + }, + "org.apache.velocity:velocity": { + "locked": "1.5", + "transitive": [ + "org.apache.hive:hive-vector-code-gen" + ] + }, "org.apache.yetus:audience-annotations": { "locked": "0.11.0", "transitive": [ @@ -825,12 +1778,17 @@ "org.apache.zookeeper:zookeeper": { "locked": "3.4.6", "transitive": [ + "org.apache.curator:apache-curator", "org.apache.curator:curator-client", "org.apache.curator:curator-framework", "org.apache.curator:curator-recipes", "org.apache.hadoop:hadoop-auth", "org.apache.hadoop:hadoop-common", - "org.apache.hadoop:hadoop-yarn-server-common" + "org.apache.hadoop:hadoop-yarn-server-common", + "org.apache.hbase:hbase-client", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-exec", + "org.apache.twill:twill-zookeeper" ] }, "org.checkerframework:checker-qual": { @@ -839,6 +1797,12 @@ "com.github.ben-manes.caffeine:caffeine" ] }, + "org.codehaus.groovy:groovy-all": { + "locked": "2.4.4", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, "org.codehaus.jackson:jackson-core-asl": { "locked": "1.9.13", "transitive": [ @@ -865,6 +1829,7 @@ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hbase:hbase-client", "org.codehaus.jackson:jackson-jaxrs", "org.codehaus.jackson:jackson-xc" ] @@ -876,6 +1841,19 @@ "org.apache.hadoop:hadoop-yarn-common" ] }, + "org.codehaus.janino:commons-compiler": { + "locked": "2.7.6", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.codehaus.janino:janino" + ] + }, + "org.codehaus.janino:janino": { + "locked": "2.7.6", + "transitive": [ + "org.apache.calcite:calcite-core" + ] + }, "org.codehaus.jettison:jettison": { "locked": "1.1", "transitive": [ @@ -883,6 +1861,43 @@ "org.apache.hadoop:hadoop-yarn-server-nodemanager" ] }, + "org.datanucleus:datanucleus-api-jdo": { + "locked": "4.2.4", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.datanucleus:datanucleus-core": { + "locked": "4.1.17", + "transitive": [ + "org.apache.hive:hive-exec", + "org.apache.hive:hive-metastore" + ] + }, + "org.datanucleus:datanucleus-rdbms": { + "locked": "4.1.19", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.datanucleus:javax.jdo": { + "locked": "3.2.0-m3", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.eclipse.jetty.aggregate:jetty-all": { + "locked": "7.6.0.v20120127", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "org.eclipse.jetty.orbit:javax.servlet": { + "locked": "3.0.0.v201112011016", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, "org.fusesource.leveldbjni:leveldbjni-all": { "locked": "1.8", "transitive": [ @@ -892,16 +1907,51 @@ "org.apache.hadoop:hadoop-yarn-server-nodemanager" ] }, + "org.hamcrest:hamcrest-core": { + "locked": "1.3", + "transitive": [ + "junit:junit" + ] + }, "org.jetbrains:annotations": { "locked": "17.0.0", "transitive": [ "org.apache.orc:orc-core" ] }, + "org.jruby.jcodings:jcodings": { + "locked": "1.0.8", + "transitive": [ + "org.apache.hbase:hbase-client", + "org.jruby.joni:joni" + ] + }, + "org.jruby.joni:joni": { + "locked": "2.1.2", + "transitive": [ + "org.apache.hbase:hbase-client" + ] + }, + "org.ow2.asm:asm-all": { + "locked": "5.0.2", + "transitive": [ + "org.apache.twill:twill-core" + ] + }, "org.slf4j:slf4j-api": { "locked": "1.7.25", "transitive": [ + "ch.qos.logback:logback-classic", + "co.cask.tephra:tephra-core", + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter", + "com.jolbox:bonecp", + "com.zaxxer:HikariCP", + "io.dropwizard.metrics:metrics-core", + "io.dropwizard.metrics:metrics-json", + "io.dropwizard.metrics:metrics-jvm", "org.apache.avro:avro", + "org.apache.calcite:calcite-core", + "org.apache.calcite:calcite-druid", "org.apache.curator:curator-client", "org.apache.directory.api:api-asn1-api", "org.apache.directory.api:api-util", @@ -916,16 +1966,30 @@ "org.apache.hadoop:hadoop-mapreduce-client-shuffle", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service-rpc", + "org.apache.hive:hive-shims", + "org.apache.hive:hive-storage-api", + "org.apache.hive:hive-vector-code-gen", "org.apache.iceberg:iceberg-api", "org.apache.iceberg:iceberg-common", "org.apache.iceberg:iceberg-core", "org.apache.iceberg:iceberg-data", "org.apache.iceberg:iceberg-orc", "org.apache.iceberg:iceberg-parquet", + "org.apache.logging.log4j:log4j-slf4j-impl", "org.apache.orc:orc-core", "org.apache.orc:orc-shims", "org.apache.parquet:parquet-common", "org.apache.parquet:parquet-format-structures", + "org.apache.thrift:libthrift", + "org.apache.twill:twill-common", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper", "org.apache.zookeeper:zookeeper" ] }, @@ -947,6 +2011,30 @@ "org.apache.parquet:parquet-hadoop" ] }, + "oro:oro": { + "locked": "2.0.8", + "transitive": [ + "org.apache.velocity:velocity" + ] + }, + "stax:stax-api": { + "locked": "1.0.1", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "tomcat:jasper-compiler": { + "locked": "5.5.23", + "transitive": [ + "org.apache.hive:hive-service-rpc" + ] + }, + "tomcat:jasper-runtime": { + "locked": "5.5.23", + "transitive": [ + "org.apache.hive:hive-service-rpc" + ] + }, "xerces:xercesImpl": { "locked": "2.9.1", "transitive": [ @@ -968,6 +2056,12 @@ } }, "compileOnly": { + "ant:ant": { + "locked": "1.6.5", + "transitive": [ + "tomcat:jasper-compiler" + ] + }, "aopalliance:aopalliance": { "locked": "1.0", "transitive": [ @@ -977,25 +2071,120 @@ "asm:asm": { "locked": "3.1", "transitive": [ + "asm:asm-tree", "com.sun.jersey:jersey-server", "org.sonatype.sisu.inject:cglib" ] }, + "asm:asm-commons": { + "locked": "3.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "asm:asm-tree": { + "locked": "3.1", + "transitive": [ + "asm:asm-commons" + ] + }, + "ch.qos.logback:logback-classic": { + "locked": "1.0.9", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "ch.qos.logback:logback-core": { + "locked": "1.0.9", + "transitive": [ + "ch.qos.logback:logback-classic", + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "co.cask.tephra:tephra-api": { + "locked": "0.6.0", + "transitive": [ + "co.cask.tephra:tephra-core", + "co.cask.tephra:tephra-hbase-compat-1.0", + "org.apache.hive:hive-metastore" + ] + }, + "co.cask.tephra:tephra-core": { + "locked": "0.6.0", + "transitive": [ + "co.cask.tephra:tephra-hbase-compat-1.0", + "org.apache.hive:hive-metastore" + ] + }, + "co.cask.tephra:tephra-hbase-compat-1.0": { + "locked": "0.6.0", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "com.fasterxml.jackson.core:jackson-annotations": { + "locked": "2.6.0", + "transitive": [ + "com.fasterxml.jackson.core:jackson-databind" + ] + }, + "com.fasterxml.jackson.core:jackson-core": { + "locked": "2.6.5", + "transitive": [ + "com.fasterxml.jackson.core:jackson-databind" + ] + }, + "com.fasterxml.jackson.core:jackson-databind": { + "locked": "2.6.5", + "transitive": [ + "io.dropwizard.metrics:metrics-json", + "org.apache.hive:hive-common" + ] + }, + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter": { + "locked": "0.1.2", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "com.github.stephenc.findbugs:findbugs-annotations": { + "locked": "1.3.9-1", + "transitive": [ + "org.apache.hbase:hbase-annotations", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-protocol" + ] + }, "com.google.code.findbugs:jsr305": { "locked": "3.0.0", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hive:hive-serde", + "org.apache.twill:twill-api", + "org.apache.twill:twill-common", + "org.apache.twill:twill-zookeeper" ] }, "com.google.code.gson:gson": { "locked": "2.2.4", "transitive": [ - "org.apache.hadoop:hadoop-common" + "co.cask.tephra:tephra-core", + "org.apache.hadoop:hadoop-common", + "org.apache.hive:hive-exec", + "org.apache.twill:twill-core", + "org.apache.twill:twill-discovery-core" ] }, "com.google.guava:guava": { "locked": "16.0.1", "transitive": [ + "co.cask.tephra:tephra-core", + "com.jolbox:bonecp", "org.apache.curator:curator-client", "org.apache.curator:curator-framework", "org.apache.curator:curator-recipes", @@ -1004,16 +2193,46 @@ "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-metastore", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "com.google.inject.extensions:guice-assistedinject": { + "locked": "3.0", + "transitive": [ + "co.cask.tephra:tephra-core" + ] + }, + "com.google.inject.extensions:guice-servlet": { + "locked": "3.0", + "transitive": [ + "com.sun.jersey.contribs:jersey-guice", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "com.google.inject:guice": { "locked": "3.0", "transitive": [ + "co.cask.tephra:tephra-core", + "com.google.inject.extensions:guice-assistedinject", + "com.google.inject.extensions:guice-servlet", "com.sun.jersey.contribs:jersey-guice", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "com.google.protobuf:protobuf-java": { @@ -1028,22 +2247,51 @@ "org.apache.hadoop:hadoop-mapreduce-client-shuffle", "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-protocol", + "org.apache.hive:hive-metastore", + "org.apache.orc:orc-core" + ] + }, + "com.jamesmurty.utils:java-xmlbuilder": { + "locked": "0.4", + "transitive": [ + "net.java.dev.jets3t:jets3t" + ] + }, + "com.jcraft:jsch": { + "locked": "0.1.42", + "transitive": [ + "org.apache.hadoop:hadoop-common" + ] + }, + "com.jolbox:bonecp": { + "locked": "0.8.0.RELEASE", + "transitive": [ + "org.apache.hive:hive-metastore" ] }, "com.sun.jersey.contribs:jersey-guice": { "locked": "1.9", "transitive": [ "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "com.sun.jersey:jersey-client": { "locked": "1.9", "transitive": [ "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "com.sun.jersey:jersey-core": { @@ -1052,21 +2300,28 @@ "com.sun.jersey:jersey-client", "com.sun.jersey:jersey-json", "com.sun.jersey:jersey-server", + "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "com.sun.jersey:jersey-json": { "locked": "1.9", "transitive": [ + "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "com.sun.jersey:jersey-server": { "locked": "1.9", "transitive": [ "com.sun.jersey.contribs:jersey-guice", + "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-yarn-common" ] }, @@ -1076,6 +2331,24 @@ "com.sun.jersey:jersey-json" ] }, + "com.tdunning:json": { + "locked": "1.8", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "com.thoughtworks.paranamer:paranamer": { + "locked": "2.3", + "transitive": [ + "org.apache.avro:avro" + ] + }, + "com.zaxxer:HikariCP": { + "locked": "2.5.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, "commons-beanutils:commons-beanutils": { "locked": "1.7.0", "transitive": [ @@ -1094,18 +2367,27 @@ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-client", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hive:hive-common", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-service-rpc" ] }, "commons-codec:commons-codec": { - "locked": "1.6", + "locked": "1.9", "transitive": [ "commons-httpclient:commons-httpclient", + "net.java.dev.jets3t:jets3t", "org.apache.hadoop:hadoop-auth", "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service-rpc", "org.apache.httpcomponents:httpclient" ] }, @@ -1113,7 +2395,9 @@ "locked": "3.2.2", "transitive": [ "commons-configuration:commons-configuration", - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hbase:hbase-common" ] }, "commons-configuration:commons-configuration": { @@ -1122,16 +2406,30 @@ "org.apache.hadoop:hadoop-common" ] }, + "commons-dbcp:commons-dbcp": { + "locked": "1.4", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.apache.hive:hive-metastore" + ] + }, "commons-digester:commons-digester": { "locked": "1.8", "transitive": [ "commons-configuration:commons-configuration" ] }, + "commons-el:commons-el": { + "locked": "1.0", + "transitive": [ + "tomcat:jasper-runtime" + ] + }, "commons-httpclient:commons-httpclient": { "locked": "3.1", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hive:hive-exec" ] }, "commons-io:commons-io": { @@ -1139,7 +2437,11 @@ "transitive": [ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hive:hive-exec" ] }, "commons-lang:commons-lang": { @@ -1151,24 +2453,44 @@ "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hive.shims:hive-shims-0.23", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-common", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-storage-api", + "org.apache.hive:hive-vector-code-gen", + "org.apache.orc:orc-core", + "org.apache.velocity:velocity" ] }, "commons-logging:commons-logging": { - "locked": "1.1.3", + "locked": "1.2", "transitive": [ "commons-beanutils:commons-beanutils", "commons-beanutils:commons-beanutils-core", "commons-configuration:commons-configuration", "commons-digester:commons-digester", + "commons-el:commons-el", "commons-httpclient:commons-httpclient", + "net.java.dev.jets3t:jets3t", "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-protocol", "org.apache.httpcomponents:httpclient" ] }, @@ -1178,23 +2500,74 @@ "org.apache.hadoop:hadoop-common" ] }, + "commons-pool:commons-pool": { + "locked": "1.5.4", + "transitive": [ + "commons-dbcp:commons-dbcp", + "org.apache.hive:hive-metastore" + ] + }, + "io.airlift:aircompressor": { + "locked": "0.8", + "transitive": [ + "org.apache.orc:orc-core" + ] + }, + "io.airlift:slice": { + "locked": "0.29", + "transitive": [ + "io.airlift:aircompressor" + ] + }, + "io.dropwizard.metrics:metrics-core": { + "locked": "3.1.2", + "transitive": [ + "co.cask.tephra:tephra-core", + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter", + "io.dropwizard.metrics:metrics-json", + "io.dropwizard.metrics:metrics-jvm", + "org.apache.hive:hive-common" + ] + }, + "io.dropwizard.metrics:metrics-json": { + "locked": "3.1.0", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "io.dropwizard.metrics:metrics-jvm": { + "locked": "3.1.0", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, "io.netty:netty": { "locked": "3.7.0.Final", "transitive": [ "org.apache.hadoop:hadoop-hdfs", + "org.apache.hadoop:hadoop-mapreduce-client-core", "org.apache.zookeeper:zookeeper" ] }, "io.netty:netty-all": { "locked": "4.0.23.Final", "transitive": [ - "org.apache.hadoop:hadoop-hdfs" + "org.apache.hadoop:hadoop-hdfs", + "org.apache.hbase:hbase-client" + ] + }, + "it.unimi.dsi:fastutil": { + "locked": "6.5.6", + "transitive": [ + "co.cask.tephra:tephra-core" ] }, "javax.activation:activation": { "locked": "1.1", "transitive": [ - "javax.xml.bind:jaxb-api" + "javax.mail:mail", + "javax.xml.bind:jaxb-api", + "org.eclipse.jetty.aggregate:jetty-all" ] }, "javax.inject:javax.inject": { @@ -1204,17 +2577,50 @@ "com.sun.jersey.contribs:jersey-guice" ] }, + "javax.jdo:jdo-api": { + "locked": "3.0.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "javax.mail:mail": { + "locked": "1.4.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, "javax.servlet.jsp:jsp-api": { "locked": "2.1", "transitive": [ "org.apache.hadoop:hadoop-common" ] }, + "javax.servlet:jsp-api": { + "locked": "2.0", + "transitive": [ + "tomcat:jasper-compiler" + ] + }, "javax.servlet:servlet-api": { "locked": "2.5", "transitive": [ + "javax.servlet:jsp-api", + "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "tomcat:jasper-runtime" + ] + }, + "javax.transaction:jta": { + "locked": "1.1", + "transitive": [ + "javax.jdo:jdo-api" + ] + }, + "javax.transaction:transaction-api": { + "locked": "1.1", + "transitive": [ + "org.datanucleus:javax.jdo" ] }, "javax.xml.bind:jaxb-api": { @@ -1222,7 +2628,9 @@ "transitive": [ "com.sun.xml.bind:jaxb-impl", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "javax.xml.stream:stax-api": { @@ -1231,12 +2639,35 @@ "javax.xml.bind:jaxb-api" ] }, + "javolution:javolution": { + "locked": "5.5.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, "jline:jline": { - "locked": "0.9.94", + "locked": "2.12", "transitive": [ + "org.apache.hive:hive-common", "org.apache.zookeeper:zookeeper" ] }, + "joda-time:joda-time": { + "locked": "2.8.1", + "transitive": [ + "org.apache.calcite:calcite-druid", + "org.apache.hive:hive-common" + ] + }, + "junit:junit": { + "locked": "4.11", + "transitive": [ + "org.apache.hbase:hbase-annotations", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-protocol" + ] + }, "log4j:log4j": { "locked": "1.2.17", "transitive": [ @@ -1245,42 +2676,144 @@ "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-annotations", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-protocol", "org.apache.zookeeper:zookeeper" ] }, - "org.apache.commons:commons-compress": { - "locked": "1.4.1", + "net.hydromatic:eigenbase-properties": { + "locked": "1.1.5", "transitive": [ - "org.apache.hadoop:hadoop-common", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.calcite:calcite-core" ] }, - "org.apache.commons:commons-math3": { - "locked": "3.1.1", + "net.java.dev.jets3t:jets3t": { + "locked": "0.9.0", "transitive": [ "org.apache.hadoop:hadoop-common" ] }, - "org.apache.curator:curator-client": { - "locked": "2.7.1", + "net.sf.opencsv:opencsv": { + "locked": "2.3", "transitive": [ - "org.apache.curator:curator-framework", - "org.apache.hadoop:hadoop-common" + "org.apache.hive:hive-serde" ] }, - "org.apache.curator:curator-framework": { - "locked": "2.7.1", + "org.antlr:ST4": { + "locked": "4.0.4", "transitive": [ - "org.apache.curator:curator-recipes", - "org.apache.hadoop:hadoop-auth" + "org.apache.hive:hive-exec" ] }, - "org.apache.curator:curator-recipes": { - "locked": "2.7.1", + "org.antlr:antlr-runtime": { + "locked": "3.5.2", "transitive": [ + "org.antlr:ST4", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-metastore" + ] + }, + "org.apache.ant:ant": { + "locked": "1.9.1", + "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-vector-code-gen" + ] + }, + "org.apache.ant:ant-launcher": { + "locked": "1.9.1", + "transitive": [ + "org.apache.ant:ant" + ] + }, + "org.apache.avro:avro": { + "locked": "1.7.7", + "transitive": [ + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hive:hive-serde" + ] + }, + "org.apache.calcite:calcite-core": { + "locked": "1.10.0", + "transitive": [ + "org.apache.calcite:calcite-druid", + "org.apache.hive:hive-exec" + ] + }, + "org.apache.calcite:calcite-druid": { + "locked": "1.10.0", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.apache.calcite:calcite-linq4j": { + "locked": "1.10.0", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.apache.calcite:calcite-druid" + ] + }, + "org.apache.commons:commons-compress": { + "locked": "1.9", + "transitive": [ + "org.apache.avro:avro", + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec" + ] + }, + "org.apache.commons:commons-lang3": { + "locked": "3.2", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.apache.hive:hive-common" + ] + }, + "org.apache.commons:commons-math3": { + "locked": "3.1.1", + "transitive": [ + "org.apache.hadoop:hadoop-common" + ] + }, + "org.apache.curator:apache-curator": { + "locked": "2.7.1", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.apache.curator:curator-client": { + "locked": "2.7.1", + "transitive": [ + "org.apache.curator:curator-framework", "org.apache.hadoop:hadoop-common" ] }, + "org.apache.curator:curator-framework": { + "locked": "2.7.1", + "transitive": [ + "org.apache.curator:curator-recipes", + "org.apache.hadoop:hadoop-auth", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-exec" + ] + }, + "org.apache.curator:curator-recipes": { + "locked": "2.7.1", + "transitive": [ + "org.apache.hadoop:hadoop-common" + ] + }, + "org.apache.derby:derby": { + "locked": "10.10.2.0", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, "org.apache.directory.api:api-asn1-api": { "locked": "1.0.0-M20", "transitive": [ @@ -1305,17 +2838,42 @@ "org.apache.hadoop:hadoop-auth" ] }, + "org.apache.geronimo.specs:geronimo-annotation_1.0_spec": { + "locked": "1.1.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "org.apache.geronimo.specs:geronimo-jaspic_1.0_spec": { + "locked": "1.0", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "org.apache.geronimo.specs:geronimo-jta_1.1_spec": { + "locked": "1.1.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, "org.apache.hadoop:hadoop-annotations": { "locked": "2.7.3", "transitive": [ "org.apache.hadoop:hadoop-client", - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hadoop:hadoop-yarn-api", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-common", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "org.apache.hadoop:hadoop-auth": { "locked": "2.7.3", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hbase:hbase-client" ] }, "org.apache.hadoop:hadoop-client": { @@ -1324,7 +2882,10 @@ "org.apache.hadoop:hadoop-common": { "locked": "2.7.3", "transitive": [ - "org.apache.hadoop:hadoop-client" + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter", + "org.apache.hadoop:hadoop-client", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common" ] }, "org.apache.hadoop:hadoop-hdfs": { @@ -1351,7 +2912,9 @@ "locked": "2.7.3", "transitive": [ "org.apache.hadoop:hadoop-client", - "org.apache.hadoop:hadoop-mapreduce-client-common" + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common" ] }, "org.apache.hadoop:hadoop-mapreduce-client-jobclient": { @@ -1373,8 +2936,11 @@ "org.apache.hadoop:hadoop-client", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy" ] }, "org.apache.hadoop:hadoop-yarn-client": { @@ -1389,8 +2955,17 @@ "org.apache.hadoop:hadoop-mapreduce-client-common", "org.apache.hadoop:hadoop-mapreduce-client-core", "org.apache.hadoop:hadoop-yarn-client", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy" + ] + }, + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice": { + "locked": "2.7.2", + "transitive": [ + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "org.apache.hadoop:hadoop-yarn-server-common": { @@ -1398,7 +2973,10 @@ "transitive": [ "org.apache.hadoop:hadoop-mapreduce-client-common", "org.apache.hadoop:hadoop-mapreduce-client-shuffle", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy" ] }, "org.apache.hadoop:hadoop-yarn-server-nodemanager": { @@ -1407,40 +2985,285 @@ "org.apache.hadoop:hadoop-mapreduce-client-shuffle" ] }, + "org.apache.hadoop:hadoop-yarn-server-resourcemanager": { + "locked": "2.7.2", + "transitive": [ + "org.apache.hive.shims:hive-shims-0.23" + ] + }, + "org.apache.hadoop:hadoop-yarn-server-web-proxy": { + "locked": "2.7.2", + "transitive": [ + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" + ] + }, + "org.apache.hbase:hbase-annotations": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-protocol" + ] + }, + "org.apache.hbase:hbase-client": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.apache.hbase:hbase-common": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-client" + ] + }, + "org.apache.hbase:hbase-protocol": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common" + ] + }, + "org.apache.hive.shims:hive-shims-0.23": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-shims" + ] + }, + "org.apache.hive.shims:hive-shims-common": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive.shims:hive-shims-0.23", + "org.apache.hive.shims:hive-shims-scheduler", + "org.apache.hive:hive-shims" + ] + }, + "org.apache.hive.shims:hive-shims-scheduler": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-shims" + ] + }, + "org.apache.hive:hive-common": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-serde" + ] + }, + "org.apache.hive:hive-exec": { + "locked": "2.3.7" + }, + "org.apache.hive:hive-metastore": { + "locked": "2.3.7" + }, + "org.apache.hive:hive-serde": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.apache.hive:hive-service-rpc": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-serde" + ] + }, + "org.apache.hive:hive-shims": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde" + ] + }, + "org.apache.hive:hive-storage-api": { + "locked": "2.4.0", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "org.apache.hive:hive-vector-code-gen": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, "org.apache.htrace:htrace-core": { "locked": "3.1.0-incubating", "transitive": [ "org.apache.hadoop:hadoop-common", - "org.apache.hadoop:hadoop-hdfs" + "org.apache.hadoop:hadoop-hdfs", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common" ] }, "org.apache.httpcomponents:httpclient": { - "locked": "4.2.5", + "locked": "4.4.1", "transitive": [ - "org.apache.hadoop:hadoop-auth" + "net.java.dev.jets3t:jets3t", + "org.apache.hadoop:hadoop-auth", + "org.apache.thrift:libthrift" ] }, "org.apache.httpcomponents:httpcore": { - "locked": "4.2.4", + "locked": "4.4.1", "transitive": [ - "org.apache.httpcomponents:httpclient" + "net.java.dev.jets3t:jets3t", + "org.apache.httpcomponents:httpclient", + "org.apache.thrift:libthrift" + ] + }, + "org.apache.ivy:ivy": { + "locked": "2.4.0", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.apache.logging.log4j:log4j-1.2-api": { + "locked": "2.6.2", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "org.apache.logging.log4j:log4j-api": { + "locked": "2.6.2", + "transitive": [ + "org.apache.logging.log4j:log4j-1.2-api", + "org.apache.logging.log4j:log4j-core", + "org.apache.logging.log4j:log4j-slf4j-impl", + "org.apache.logging.log4j:log4j-web" + ] + }, + "org.apache.logging.log4j:log4j-core": { + "locked": "2.6.2", + "transitive": [ + "org.apache.logging.log4j:log4j-1.2-api", + "org.apache.logging.log4j:log4j-web" + ] + }, + "org.apache.logging.log4j:log4j-slf4j-impl": { + "locked": "2.6.2", + "transitive": [ + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-common" + ] + }, + "org.apache.logging.log4j:log4j-web": { + "locked": "2.6.2", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "org.apache.orc:orc-core": { + "locked": "1.3.4", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "org.apache.parquet:parquet-hadoop-bundle": { + "locked": "1.8.1", + "transitive": [ + "org.apache.hive:hive-serde" + ] + }, + "org.apache.thrift:libfb303": { + "locked": "0.9.3", + "transitive": [ + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-service-rpc" + ] + }, + "org.apache.thrift:libthrift": { + "locked": "0.9.3", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service-rpc", + "org.apache.thrift:libfb303" + ] + }, + "org.apache.twill:twill-api": { + "locked": "0.6.0-incubating", + "transitive": [ + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "org.apache.twill:twill-common": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-api", + "org.apache.twill:twill-discovery-api", + "org.apache.twill:twill-zookeeper" + ] + }, + "org.apache.twill:twill-core": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core" + ] + }, + "org.apache.twill:twill-discovery-api": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-api", + "org.apache.twill:twill-discovery-core" + ] + }, + "org.apache.twill:twill-discovery-core": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core" + ] + }, + "org.apache.twill:twill-zookeeper": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-discovery-core" + ] + }, + "org.apache.velocity:velocity": { + "locked": "1.5", + "transitive": [ + "org.apache.hive:hive-vector-code-gen" ] }, "org.apache.zookeeper:zookeeper": { "locked": "3.4.6", "transitive": [ + "org.apache.curator:apache-curator", "org.apache.curator:curator-client", "org.apache.curator:curator-framework", "org.apache.curator:curator-recipes", "org.apache.hadoop:hadoop-auth", "org.apache.hadoop:hadoop-common", - "org.apache.hadoop:hadoop-yarn-server-common" + "org.apache.hadoop:hadoop-yarn-server-common", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-client", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-exec", + "org.apache.twill:twill-zookeeper" + ] + }, + "org.codehaus.groovy:groovy-all": { + "locked": "2.4.4", + "transitive": [ + "org.apache.hive:hive-exec" ] }, "org.codehaus.jackson:jackson-core-asl": { "locked": "1.9.13", "transitive": [ "com.sun.jersey:jersey-json", + "org.apache.avro:avro", "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", @@ -1460,9 +3283,11 @@ "locked": "1.9.13", "transitive": [ "com.sun.jersey:jersey-json", + "org.apache.avro:avro", "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hbase:hbase-client", "org.codehaus.jackson:jackson-jaxrs", "org.codehaus.jackson:jackson-xc" ] @@ -1474,11 +3299,63 @@ "org.apache.hadoop:hadoop-yarn-common" ] }, + "org.codehaus.janino:commons-compiler": { + "locked": "2.7.6", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.codehaus.janino:janino" + ] + }, + "org.codehaus.janino:janino": { + "locked": "2.7.6", + "transitive": [ + "org.apache.calcite:calcite-core" + ] + }, "org.codehaus.jettison:jettison": { "locked": "1.1", "transitive": [ "com.sun.jersey:jersey-json", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" + ] + }, + "org.datanucleus:datanucleus-api-jdo": { + "locked": "4.2.4", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.datanucleus:datanucleus-core": { + "locked": "4.1.17", + "transitive": [ + "org.apache.hive:hive-exec", + "org.apache.hive:hive-metastore" + ] + }, + "org.datanucleus:datanucleus-rdbms": { + "locked": "4.1.19", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.datanucleus:javax.jdo": { + "locked": "3.2.0-m3", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.eclipse.jetty.aggregate:jetty-all": { + "locked": "7.6.0.v20120127", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "org.eclipse.jetty.orbit:javax.servlet": { + "locked": "3.0.0.v201112011016", + "transitive": [ + "org.apache.hive:hive-common" ] }, "org.fusesource.leveldbjni:leveldbjni-all": { @@ -1486,13 +3363,57 @@ "transitive": [ "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-mapreduce-client-shuffle", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, - "org.slf4j:slf4j-api": { - "locked": "1.7.10", + "org.hamcrest:hamcrest-core": { + "locked": "1.3", + "transitive": [ + "junit:junit" + ] + }, + "org.jruby.jcodings:jcodings": { + "locked": "1.0.8", + "transitive": [ + "org.apache.hbase:hbase-client", + "org.jruby.joni:joni" + ] + }, + "org.jruby.joni:joni": { + "locked": "2.1.2", + "transitive": [ + "org.apache.hbase:hbase-client" + ] + }, + "org.openjdk.jol:jol-core": { + "locked": "0.2", "transitive": [ + "io.airlift:slice" + ] + }, + "org.ow2.asm:asm-all": { + "locked": "5.0.2", + "transitive": [ + "org.apache.twill:twill-core" + ] + }, + "org.slf4j:slf4j-api": { + "locked": "1.7.21", + "transitive": [ + "ch.qos.logback:logback-classic", + "co.cask.tephra:tephra-core", + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter", + "com.jolbox:bonecp", + "com.zaxxer:HikariCP", + "io.dropwizard.metrics:metrics-core", + "io.dropwizard.metrics:metrics-json", + "io.dropwizard.metrics:metrics-jvm", + "org.apache.avro:avro", + "org.apache.calcite:calcite-core", + "org.apache.calcite:calcite-druid", "org.apache.curator:curator-client", "org.apache.directory.api:api-asn1-api", "org.apache.directory.api:api-util", @@ -1507,6 +3428,24 @@ "org.apache.hadoop:hadoop-mapreduce-client-shuffle", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hive.shims:hive-shims-0.23", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive.shims:hive-shims-scheduler", + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service-rpc", + "org.apache.hive:hive-shims", + "org.apache.hive:hive-storage-api", + "org.apache.hive:hive-vector-code-gen", + "org.apache.logging.log4j:log4j-slf4j-impl", + "org.apache.orc:orc-core", + "org.apache.thrift:libthrift", + "org.apache.twill:twill-common", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper", "org.apache.zookeeper:zookeeper" ] }, @@ -1516,10 +3455,34 @@ "com.google.inject:guice" ] }, - "org.tukaani:xz": { - "locked": "1.0", + "org.xerial.snappy:snappy-java": { + "locked": "1.0.5", + "transitive": [ + "org.apache.avro:avro" + ] + }, + "oro:oro": { + "locked": "2.0.8", + "transitive": [ + "org.apache.velocity:velocity" + ] + }, + "stax:stax-api": { + "locked": "1.0.1", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "tomcat:jasper-compiler": { + "locked": "5.5.23", "transitive": [ - "org.apache.commons:commons-compress" + "org.apache.hive:hive-service-rpc" + ] + }, + "tomcat:jasper-runtime": { + "locked": "5.5.23", + "transitive": [ + "org.apache.hive:hive-service-rpc" ] }, "xerces:xercesImpl": { @@ -1767,6 +3730,154 @@ ] } }, + "errorprone": { + "com.github.kevinstern:software-and-algorithms": { + "locked": "1.0", + "transitive": [ + "com.google.errorprone:error_prone_check_api" + ] + }, + "com.github.stephenc.jcip:jcip-annotations": { + "locked": "1.0-1", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.auto:auto-common": { + "locked": "0.10", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.code.findbugs:jFormatString": { + "locked": "3.0.0", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.code.findbugs:jsr305": { + "locked": "3.0.2", + "transitive": [ + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_core", + "com.google.guava:guava" + ] + }, + "com.google.errorprone:error_prone_annotation": { + "locked": "2.3.3", + "transitive": [ + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.3.3", + "transitive": [ + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_core", + "com.google.guava:guava" + ] + }, + "com.google.errorprone:error_prone_check_api": { + "locked": "2.3.3", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.errorprone:error_prone_core": { + "locked": "2.3.3", + "transitive": [ + "com.palantir.baseline:baseline-error-prone" + ] + }, + "com.google.errorprone:error_prone_type_annotations": { + "locked": "2.3.3", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.guava:failureaccess": { + "locked": "1.0.1", + "transitive": [ + "com.google.guava:guava" + ] + }, + "com.google.guava:guava": { + "locked": "27.0.1-jre", + "transitive": [ + "com.google.auto:auto-common", + "com.google.errorprone:error_prone_annotation", + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.guava:listenablefuture": { + "locked": "9999.0-empty-to-avoid-conflict-with-guava", + "transitive": [ + "com.google.guava:guava" + ] + }, + "com.google.j2objc:j2objc-annotations": { + "locked": "1.1", + "transitive": [ + "com.google.guava:guava" + ] + }, + "com.google.protobuf:protobuf-java": { + "locked": "3.4.0", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.googlecode.java-diff-utils:diffutils": { + "locked": "1.3.0", + "transitive": [ + "com.google.errorprone:error_prone_check_api" + ] + }, + "com.palantir.baseline:baseline-error-prone": { + "locked": "0.55.0", + "requested": "0.55.0" + }, + "org.checkerframework:checker-qual": { + "locked": "2.5.3", + "transitive": [ + "com.google.guava:guava", + "org.checkerframework:dataflow", + "org.checkerframework:javacutil" + ] + }, + "org.checkerframework:dataflow": { + "locked": "2.5.3", + "transitive": [ + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_core" + ] + }, + "org.checkerframework:javacutil": { + "locked": "2.5.3", + "transitive": [ + "org.checkerframework:dataflow" + ] + }, + "org.codehaus.mojo:animal-sniffer-annotations": { + "locked": "1.17", + "transitive": [ + "com.google.guava:guava" + ] + }, + "org.pcollections:pcollections": { + "locked": "2.1.2", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + } + }, + "errorproneJavac": { + "com.google.errorprone:javac": { + "locked": "9+181-r4173-1", + "requested": "9+181-r4173-1" + } + }, "runtime": { "com.fasterxml.jackson.core:jackson-annotations": { "locked": "2.10.2", @@ -2217,7 +4328,155 @@ ] } }, + "testAnnotationProcessor": { + "com.github.kevinstern:software-and-algorithms": { + "locked": "1.0", + "transitive": [ + "com.google.errorprone:error_prone_check_api" + ] + }, + "com.github.stephenc.jcip:jcip-annotations": { + "locked": "1.0-1", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.auto:auto-common": { + "locked": "0.10", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.code.findbugs:jFormatString": { + "locked": "3.0.0", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.code.findbugs:jsr305": { + "locked": "3.0.2", + "transitive": [ + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_core", + "com.google.guava:guava" + ] + }, + "com.google.errorprone:error_prone_annotation": { + "locked": "2.3.3", + "transitive": [ + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.3.3", + "transitive": [ + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_core", + "com.google.guava:guava" + ] + }, + "com.google.errorprone:error_prone_check_api": { + "locked": "2.3.3", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.errorprone:error_prone_core": { + "locked": "2.3.3", + "transitive": [ + "com.palantir.baseline:baseline-error-prone" + ] + }, + "com.google.errorprone:error_prone_type_annotations": { + "locked": "2.3.3", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.guava:failureaccess": { + "locked": "1.0.1", + "transitive": [ + "com.google.guava:guava" + ] + }, + "com.google.guava:guava": { + "locked": "27.0.1-jre", + "transitive": [ + "com.google.auto:auto-common", + "com.google.errorprone:error_prone_annotation", + "com.google.errorprone:error_prone_core" + ] + }, + "com.google.guava:listenablefuture": { + "locked": "9999.0-empty-to-avoid-conflict-with-guava", + "transitive": [ + "com.google.guava:guava" + ] + }, + "com.google.j2objc:j2objc-annotations": { + "locked": "1.1", + "transitive": [ + "com.google.guava:guava" + ] + }, + "com.google.protobuf:protobuf-java": { + "locked": "3.4.0", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + }, + "com.googlecode.java-diff-utils:diffutils": { + "locked": "1.3.0", + "transitive": [ + "com.google.errorprone:error_prone_check_api" + ] + }, + "com.palantir.baseline:baseline-error-prone": { + "locked": "0.55.0", + "requested": "0.55.0" + }, + "org.checkerframework:checker-qual": { + "locked": "2.5.3", + "transitive": [ + "com.google.guava:guava", + "org.checkerframework:dataflow", + "org.checkerframework:javacutil" + ] + }, + "org.checkerframework:dataflow": { + "locked": "2.5.3", + "transitive": [ + "com.google.errorprone:error_prone_check_api", + "com.google.errorprone:error_prone_core" + ] + }, + "org.checkerframework:javacutil": { + "locked": "2.5.3", + "transitive": [ + "org.checkerframework:dataflow" + ] + }, + "org.codehaus.mojo:animal-sniffer-annotations": { + "locked": "1.17", + "transitive": [ + "com.google.guava:guava" + ] + }, + "org.pcollections:pcollections": { + "locked": "2.1.2", + "transitive": [ + "com.google.errorprone:error_prone_core" + ] + } + }, "testCompile": { + "ant:ant": { + "locked": "1.6.5", + "transitive": [ + "tomcat:jasper-compiler" + ] + }, "aopalliance:aopalliance": { "locked": "1.0", "transitive": [ @@ -2227,14 +4486,83 @@ "asm:asm": { "locked": "3.1", "transitive": [ + "asm:asm-tree", "com.sun.jersey:jersey-server", "org.sonatype.sisu.inject:cglib" ] }, + "asm:asm-commons": { + "locked": "3.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "asm:asm-tree": { + "locked": "3.1", + "transitive": [ + "asm:asm-commons" + ] + }, + "ch.qos.logback:logback-classic": { + "locked": "1.0.9", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "ch.qos.logback:logback-core": { + "locked": "1.0.9", + "transitive": [ + "ch.qos.logback:logback-classic", + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "co.cask.tephra:tephra-api": { + "locked": "0.6.0", + "transitive": [ + "co.cask.tephra:tephra-core", + "co.cask.tephra:tephra-hbase-compat-1.0", + "org.apache.hive:hive-metastore" + ] + }, + "co.cask.tephra:tephra-core": { + "locked": "0.6.0", + "transitive": [ + "co.cask.tephra:tephra-hbase-compat-1.0", + "org.apache.hive:hive-metastore" + ] + }, + "co.cask.tephra:tephra-hbase-compat-1.0": { + "locked": "0.6.0", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "com.beust:jcommander": { + "locked": "1.30", + "transitive": [ + "org.apache.slider:slider-core" + ] + }, + "com.esotericsoftware:kryo-shaded": { + "locked": "4.0.2", + "requested": "4.0.2" + }, + "com.esotericsoftware:minlog": { + "locked": "1.3.0", + "transitive": [ + "com.esotericsoftware:kryo-shaded" + ] + }, "com.fasterxml.jackson.core:jackson-annotations": { "locked": "2.10.2", + "requested": "2.6.5", "transitive": [ - "com.fasterxml.jackson.core:jackson-databind" + "com.fasterxml.jackson.core:jackson-databind", + "org.apache.calcite.avatica:avatica" ] }, "com.fasterxml.jackson.core:jackson-core": { @@ -2242,13 +4570,17 @@ "transitive": [ "com.fasterxml.jackson.core:jackson-databind", "org.apache.avro:avro", + "org.apache.calcite.avatica:avatica", "org.apache.iceberg:iceberg-core" ] }, "com.fasterxml.jackson.core:jackson-databind": { "locked": "2.10.2", "transitive": [ + "io.dropwizard.metrics:metrics-json", "org.apache.avro:avro", + "org.apache.calcite.avatica:avatica", + "org.apache.hive:hive-common", "org.apache.iceberg:iceberg-core" ] }, @@ -2258,9 +4590,24 @@ "org.apache.iceberg:iceberg-core" ] }, + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter": { + "locked": "0.1.2", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, "com.github.stephenc.findbugs:findbugs-annotations": { "locked": "1.3.9-1", "transitive": [ + "org.apache.hbase:hbase-annotations", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server", "org.apache.iceberg:iceberg-api", "org.apache.iceberg:iceberg-common", "org.apache.iceberg:iceberg-core", @@ -2272,13 +4619,26 @@ "com.google.code.findbugs:jsr305": { "locked": "3.0.0", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.calcite:calcite-core", + "org.apache.hadoop:hadoop-common", + "org.apache.hive:hive-serde", + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-runtime-internals", + "org.apache.twill:twill-api", + "org.apache.twill:twill-common", + "org.apache.twill:twill-zookeeper" ] }, "com.google.code.gson:gson": { "locked": "2.2.4", "transitive": [ - "org.apache.hadoop:hadoop-common" + "co.cask.tephra:tephra-core", + "org.apache.hadoop:hadoop-common", + "org.apache.hive:hive-exec", + "org.apache.slider:slider-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-discovery-core" ] }, "com.google.errorprone:error_prone_annotations": { @@ -2288,8 +4648,13 @@ ] }, "com.google.guava:guava": { - "locked": "16.0.1", + "locked": "18.0", "transitive": [ + "co.cask.tephra:tephra-core", + "com.jolbox:bonecp", + "org.apache.calcite:calcite-core", + "org.apache.calcite:calcite-linq4j", + "org.apache.curator:apache-curator", "org.apache.curator:curator-client", "org.apache.curator:curator-framework", "org.apache.curator:curator-recipes", @@ -2298,21 +4663,67 @@ "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-metastore", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper", + "org.reflections:reflections" + ] + }, + "com.google.inject.extensions:guice-assistedinject": { + "locked": "3.0", + "transitive": [ + "co.cask.tephra:tephra-core" + ] + }, + "com.google.inject.extensions:guice-servlet": { + "locked": "3.0", + "transitive": [ + "com.sun.jersey.contribs:jersey-guice", + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.slider:slider-core" ] }, "com.google.inject:guice": { "locked": "3.0", "transitive": [ + "co.cask.tephra:tephra-core", + "com.google.inject.extensions:guice-assistedinject", + "com.google.inject.extensions:guice-servlet", "com.sun.jersey.contribs:jersey-guice", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "com.google.protobuf:protobuf-java": { - "locked": "2.5.0", + "locked": "3.0.0-beta-1", "transitive": [ + "org.apache.calcite.avatica:avatica", "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-mapreduce-client-app", @@ -2322,22 +4733,77 @@ "org.apache.hadoop:hadoop-mapreduce-client-shuffle", "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-metastore", + "org.apache.orc:orc-core", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "com.jamesmurty.utils:java-xmlbuilder": { + "locked": "0.4", + "transitive": [ + "net.java.dev.jets3t:jets3t" + ] + }, + "com.jcraft:jsch": { + "locked": "0.1.42", + "transitive": [ + "org.apache.hadoop:hadoop-common" + ] + }, + "com.jolbox:bonecp": { + "locked": "0.8.0.RELEASE", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "com.klarna:hiverunner": { + "locked": "5.2.1", + "requested": "5.2.1" + }, + "com.lmax:disruptor": { + "locked": "3.3.0", + "transitive": [ + "org.apache.hbase:hbase-server" + ] + }, + "com.ning:async-http-client": { + "locked": "1.8.16", + "transitive": [ + "org.apache.tez:tez-runtime-library" ] }, "com.sun.jersey.contribs:jersey-guice": { "locked": "1.9", "transitive": [ "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "com.sun.jersey:jersey-client": { "locked": "1.9", "transitive": [ "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api" ] }, "com.sun.jersey:jersey-core": { @@ -2346,22 +4812,36 @@ "com.sun.jersey:jersey-client", "com.sun.jersey:jersey-json", "com.sun.jersey:jersey-server", - "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-hdfs", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-server" ] }, "com.sun.jersey:jersey-json": { "locked": "1.9", "transitive": [ + "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api" ] }, "com.sun.jersey:jersey-server": { "locked": "1.9", "transitive": [ "com.sun.jersey.contribs:jersey-guice", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-hdfs", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core" ] }, "com.sun.xml.bind:jaxb-impl": { @@ -2370,6 +4850,26 @@ "com.sun.jersey:jersey-json" ] }, + "com.tdunning:json": { + "locked": "1.8", + "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-llap-server" + ] + }, + "com.yammer.metrics:metrics-core": { + "locked": "2.2.0", + "transitive": [ + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-server" + ] + }, + "com.zaxxer:HikariCP": { + "locked": "2.5.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, "commons-beanutils:commons-beanutils": { "locked": "1.7.0", "transitive": [ @@ -2388,26 +4888,48 @@ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-client", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc", + "org.apache.tez:tez-dag" ] }, "commons-codec:commons-codec": { - "locked": "1.6", + "locked": "1.9", "transitive": [ "commons-httpclient:commons-httpclient", + "net.java.dev.jets3t:jets3t", "org.apache.hadoop:hadoop-auth", "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager", - "org.apache.httpcomponents:httpclient" + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc", + "org.apache.httpcomponents:httpclient", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-runtime-library" ] }, "commons-collections:commons-collections": { "locked": "3.2.2", "transitive": [ "commons-configuration:commons-configuration", - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-server", + "org.apache.tez:tez-mapreduce" ] }, "commons-configuration:commons-configuration": { @@ -2416,16 +4938,38 @@ "org.apache.hadoop:hadoop-common" ] }, + "commons-daemon:commons-daemon": { + "locked": "1.0.13", + "transitive": [ + "org.apache.hadoop:hadoop-hdfs" + ] + }, + "commons-dbcp:commons-dbcp": { + "locked": "1.4", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.apache.hive:hive-metastore" + ] + }, "commons-digester:commons-digester": { "locked": "1.8", "transitive": [ "commons-configuration:commons-configuration" ] }, + "commons-el:commons-el": { + "locked": "1.0", + "transitive": [ + "tomcat:jasper-runtime" + ] + }, "commons-httpclient:commons-httpclient": { "locked": "3.1", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-exec", + "org.apache.slider:slider-core" ] }, "commons-io:commons-io": { @@ -2433,7 +4977,14 @@ "transitive": [ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-exec", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api" ] }, "commons-lang:commons-lang": { @@ -2445,25 +4996,62 @@ "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-server", + "org.apache.hive.shims:hive-shims-0.23", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service", + "org.apache.hive:hive-storage-api", + "org.apache.hive:hive-vector-code-gen", + "org.apache.orc:orc-core", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-library", + "org.apache.velocity:velocity" ] }, "commons-logging:commons-logging": { - "locked": "1.1.3", + "locked": "1.2", "transitive": [ "commons-beanutils:commons-beanutils", "commons-beanutils:commons-beanutils-core", "commons-configuration:commons-configuration", "commons-digester:commons-digester", + "commons-el:commons-el", "commons-httpclient:commons-httpclient", + "net.java.dev.jets3t:jets3t", + "net.sf.jpam:jpam", "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager", - "org.apache.httpcomponents:httpclient" + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server", + "org.apache.httpcomponents:httpclient", + "org.apache.slider:slider-core" ] }, "commons-net:commons-net": { @@ -2475,26 +5063,88 @@ "commons-pool:commons-pool": { "locked": "1.6", "transitive": [ + "commons-dbcp:commons-dbcp", + "org.apache.hive:hive-metastore", "org.apache.parquet:parquet-hadoop" ] }, + "dom4j:dom4j": { + "locked": "1.6.1", + "transitive": [ + "org.reflections:reflections" + ] + }, "io.airlift:aircompressor": { "locked": "0.15", "transitive": [ "org.apache.orc:orc-core" ] }, + "io.dropwizard.metrics:metrics-core": { + "locked": "3.1.2", + "transitive": [ + "co.cask.tephra:tephra-core", + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter", + "io.dropwizard.metrics:metrics-json", + "io.dropwizard.metrics:metrics-jvm", + "org.apache.hive:hive-common" + ] + }, + "io.dropwizard.metrics:metrics-json": { + "locked": "3.1.0", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "io.dropwizard.metrics:metrics-jvm": { + "locked": "3.1.0", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, "io.netty:netty": { - "locked": "3.7.0.Final", + "locked": "3.9.2.Final", "transitive": [ + "com.ning:async-http-client", "org.apache.hadoop:hadoop-hdfs", + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hive:hive-llap-server", "org.apache.zookeeper:zookeeper" ] }, "io.netty:netty-all": { "locked": "4.0.23.Final", "transitive": [ - "org.apache.hadoop:hadoop-hdfs" + "org.apache.hadoop:hadoop-hdfs", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-server" + ] + }, + "it.unimi.dsi:fastutil": { + "locked": "6.5.6", + "transitive": [ + "co.cask.tephra:tephra-core" + ] + }, + "jakarta.jms:jakarta.jms-api": { + "locked": "2.0.2", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions" + ] + }, + "javassist:javassist": { + "locked": "3.12.1.GA", + "transitive": [ + "org.reflections:reflections" + ] + }, + "javax.activation:activation": { + "locked": "1.1", + "transitive": [ + "javax.mail:mail", + "org.eclipse.jetty.aggregate:jetty-all" ] }, "javax.annotation:javax.annotation-api": { @@ -2510,17 +5160,56 @@ "com.sun.jersey.contribs:jersey-guice" ] }, + "javax.jdo:jdo-api": { + "locked": "3.0.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "javax.mail:mail": { + "locked": "1.4.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, "javax.servlet.jsp:jsp-api": { "locked": "2.1", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.slider:slider-core" + ] + }, + "javax.servlet:jsp-api": { + "locked": "2.0", + "transitive": [ + "tomcat:jasper-compiler" ] }, "javax.servlet:servlet-api": { "locked": "2.5", "transitive": [ + "javax.servlet:jsp-api", + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.slider:slider-core", + "org.apache.tez:tez-dag", + "org.eclipse.jetty.aggregate:jetty-all", + "tomcat:jasper-runtime" + ] + }, + "javax.transaction:jta": { + "locked": "1.1", + "transitive": [ + "javax.jdo:jdo-api" + ] + }, + "javax.transaction:transaction-api": { + "locked": "1.1", + "transitive": [ + "org.datanucleus:javax.jdo" ] }, "javax.xml.bind:jaxb-api": { @@ -2528,18 +5217,46 @@ "transitive": [ "com.sun.xml.bind:jaxb-impl", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", "org.apache.orc:orc-core" ] }, + "javolution:javolution": { + "locked": "5.5.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, "jline:jline": { - "locked": "0.9.94", + "locked": "2.12", "transitive": [ + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", "org.apache.zookeeper:zookeeper" ] }, + "joda-time:joda-time": { + "locked": "2.8.1", + "transitive": [ + "org.apache.calcite:calcite-druid", + "org.apache.hive:hive-common" + ] + }, "junit:junit": { - "locked": "4.12" + "locked": "4.12", + "transitive": [ + "org.apache.hbase:hbase-annotations", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server" + ] }, "log4j:log4j": { "locked": "1.2.17", @@ -2549,13 +5266,121 @@ "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-annotations", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core", "org.apache.zookeeper:zookeeper" ] }, + "net.hydromatic:eigenbase-properties": { + "locked": "1.1.5", + "transitive": [ + "org.apache.calcite:calcite-core" + ] + }, + "net.java.dev.jets3t:jets3t": { + "locked": "0.9.0", + "transitive": [ + "org.apache.hadoop:hadoop-common" + ] + }, + "net.sf.jpam:jpam": { + "locked": "1.1", + "transitive": [ + "org.apache.hive:hive-service" + ] + }, + "net.sf.opencsv:opencsv": { + "locked": "2.3", + "transitive": [ + "org.apache.hive:hive-serde" + ] + }, + "org.antlr:ST4": { + "locked": "4.0.4", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.antlr:antlr-runtime": { + "locked": "3.5.2", + "transitive": [ + "org.antlr:ST4", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-metastore" + ] + }, + "org.apache.ant:ant": { + "locked": "1.9.1", + "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-vector-code-gen" + ] + }, + "org.apache.ant:ant-launcher": { + "locked": "1.9.1", + "transitive": [ + "org.apache.ant:ant" + ] + }, "org.apache.avro:avro": { "locked": "1.9.2", + "requested": "1.9.2", "transitive": [ - "org.apache.iceberg:iceberg-core" + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-serde", + "org.apache.iceberg:iceberg-core", + "org.apache.slider:slider-core" + ] + }, + "org.apache.calcite.avatica:avatica": { + "locked": "1.8.0", + "transitive": [ + "org.apache.calcite:calcite-core" + ] + }, + "org.apache.calcite.avatica:avatica-metrics": { + "locked": "1.8.0", + "transitive": [ + "org.apache.calcite.avatica:avatica" + ] + }, + "org.apache.calcite:calcite-core": { + "locked": "1.10.0", + "transitive": [ + "org.apache.calcite:calcite-druid", + "org.apache.hive:hive-exec" + ] + }, + "org.apache.calcite:calcite-druid": { + "locked": "1.10.0", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.apache.calcite:calcite-linq4j": { + "locked": "1.10.0", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.apache.calcite:calcite-druid" + ] + }, + "org.apache.commons:commons-collections4": { + "locked": "4.1", + "transitive": [ + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag" ] }, "org.apache.commons:commons-compress": { @@ -2563,33 +5388,78 @@ "transitive": [ "org.apache.avro:avro", "org.apache.hadoop:hadoop-common", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.slider:slider-core" + ] + }, + "org.apache.commons:commons-lang3": { + "locked": "3.2", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.apache.hive:hive-common", + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-llap-common", + "org.apache.hive:hive-llap-tez" + ] + }, + "org.apache.commons:commons-math": { + "locked": "2.2", + "transitive": [ + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-server" ] }, "org.apache.commons:commons-math3": { "locked": "3.1.1", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.tez:tez-dag" + ] + }, + "org.apache.curator:apache-curator": { + "locked": "2.7.1", + "transitive": [ + "org.apache.hive:hive-exec", + "org.apache.hive:hive-llap-client" ] }, "org.apache.curator:curator-client": { "locked": "2.7.1", "transitive": [ "org.apache.curator:curator-framework", - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.slider:slider-core" ] }, "org.apache.curator:curator-framework": { "locked": "2.7.1", "transitive": [ "org.apache.curator:curator-recipes", - "org.apache.hadoop:hadoop-auth" + "org.apache.hadoop:hadoop-auth", + "org.apache.hadoop:hadoop-yarn-registry", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-service", + "org.apache.slider:slider-core" ] }, "org.apache.curator:curator-recipes": { "locked": "2.7.1", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hive:hive-service", + "org.apache.slider:slider-core" + ] + }, + "org.apache.derby:derby": { + "locked": "10.10.2.0", + "transitive": [ + "org.apache.hive:hive-metastore" ] }, "org.apache.directory.api:api-asn1-api": { @@ -2616,32 +5486,100 @@ "org.apache.hadoop:hadoop-auth" ] }, + "org.apache.geronimo.specs:geronimo-annotation_1.0_spec": { + "locked": "1.1.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "org.apache.geronimo.specs:geronimo-jaspic_1.0_spec": { + "locked": "1.0", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "org.apache.geronimo.specs:geronimo-jta_1.1_spec": { + "locked": "1.1.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, "org.apache.hadoop:hadoop-annotations": { "locked": "2.7.3", "transitive": [ "org.apache.hadoop:hadoop-client", - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hadoop:hadoop-yarn-api", + "org.apache.hadoop:hadoop-yarn-client", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-common", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.hadoop:hadoop-archives": { + "locked": "2.7.2", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-core" ] }, "org.apache.hadoop:hadoop-auth": { "locked": "2.7.3", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-server", + "org.apache.tez:tez-api", + "org.apache.tez:tez-runtime-library" ] }, "org.apache.hadoop:hadoop-client": { - "locked": "2.7.3" + "locked": "2.7.3", + "transitive": [ + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core" + ] }, "org.apache.hadoop:hadoop-common": { "locked": "2.7.3", "transitive": [ - "org.apache.hadoop:hadoop-client" + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter", + "org.apache.hadoop:hadoop-client", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions", + "org.apache.hive.hcatalog:hive-webhcat-java-client", + "org.apache.tez:hadoop-shim", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" ] }, "org.apache.hadoop:hadoop-hdfs": { "locked": "2.7.3", "transitive": [ - "org.apache.hadoop:hadoop-client" + "org.apache.hadoop:hadoop-client", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api" ] }, "org.apache.hadoop:hadoop-mapreduce-client-app": { @@ -2655,14 +5593,22 @@ "transitive": [ "org.apache.hadoop:hadoop-mapreduce-client-app", "org.apache.hadoop:hadoop-mapreduce-client-jobclient", - "org.apache.hadoop:hadoop-mapreduce-client-shuffle" + "org.apache.hadoop:hadoop-mapreduce-client-shuffle", + "org.apache.tez:tez-mapreduce" ] }, "org.apache.hadoop:hadoop-mapreduce-client-core": { "locked": "2.7.3", "transitive": [ "org.apache.hadoop:hadoop-client", - "org.apache.hadoop:hadoop-mapreduce-client-common" + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.hcatalog:hive-webhcat-java-client", + "org.apache.tez:tez-mapreduce" ] }, "org.apache.hadoop:hadoop-mapreduce-client-jobclient": { @@ -2684,14 +5630,28 @@ "org.apache.hadoop:hadoop-client", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-registry", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.tez:hadoop-shim", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" ] }, "org.apache.hadoop:hadoop-yarn-client": { "locked": "2.7.3", "transitive": [ - "org.apache.hadoop:hadoop-mapreduce-client-common" + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce" ] }, "org.apache.hadoop:hadoop-yarn-common": { @@ -2700,8 +5660,30 @@ "org.apache.hadoop:hadoop-mapreduce-client-common", "org.apache.hadoop:hadoop-mapreduce-client-core", "org.apache.hadoop:hadoop-yarn-client", + "org.apache.hadoop:hadoop-yarn-registry", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.hadoop:hadoop-yarn-registry": { + "locked": "2.7.1", + "transitive": [ + "org.apache.slider:slider-core" + ] + }, + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice": { + "locked": "2.7.2", + "transitive": [ + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "org.apache.hadoop:hadoop-yarn-server-common": { @@ -2709,7 +5691,10 @@ "transitive": [ "org.apache.hadoop:hadoop-mapreduce-client-common", "org.apache.hadoop:hadoop-mapreduce-client-shuffle", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy" ] }, "org.apache.hadoop:hadoop-yarn-server-nodemanager": { @@ -2718,42 +5703,292 @@ "org.apache.hadoop:hadoop-mapreduce-client-shuffle" ] }, - "org.apache.htrace:htrace-core": { - "locked": "3.1.0-incubating", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager": { + "locked": "2.7.2", "transitive": [ - "org.apache.hadoop:hadoop-common", - "org.apache.hadoop:hadoop-hdfs" + "org.apache.hive.shims:hive-shims-0.23" ] }, - "org.apache.httpcomponents:httpclient": { - "locked": "4.2.5", + "org.apache.hadoop:hadoop-yarn-server-web-proxy": { + "locked": "2.7.2", "transitive": [ - "org.apache.hadoop:hadoop-auth" + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.tez:tez-dag" ] }, - "org.apache.httpcomponents:httpcore": { - "locked": "4.2.4", + "org.apache.hbase:hbase-annotations": { + "locked": "1.1.1", "transitive": [ - "org.apache.httpcomponents:httpclient" + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-protocol" ] }, - "org.apache.iceberg:iceberg-api": { - "project": true, + "org.apache.hbase:hbase-client": { + "locked": "1.1.1", "transitive": [ - "org.apache.iceberg:iceberg-core", - "org.apache.iceberg:iceberg-data", - "org.apache.iceberg:iceberg-orc", - "org.apache.iceberg:iceberg-parquet" + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore" ] }, - "org.apache.iceberg:iceberg-bundled-guava": { - "project": true, + "org.apache.hbase:hbase-common": { + "locked": "1.1.1", "transitive": [ - "org.apache.iceberg:iceberg-api", - "org.apache.iceberg:iceberg-common" + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-llap-server" ] }, - "org.apache.iceberg:iceberg-common": { + "org.apache.hbase:hbase-hadoop-compat": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.hbase:hbase-hadoop2-compat": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.hbase:hbase-prefix-tree": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-server" + ] + }, + "org.apache.hbase:hbase-procedure": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-server" + ] + }, + "org.apache.hbase:hbase-protocol": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-server" + ] + }, + "org.apache.hbase:hbase-server": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.hive.hcatalog:hive-hcatalog-core": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions", + "org.apache.hive.hcatalog:hive-webhcat-java-client" + ] + }, + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive.hcatalog:hive-webhcat-java-client" + ] + }, + "org.apache.hive.hcatalog:hive-webhcat-java-client": { + "locked": "2.3.7", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.apache.hive.shims:hive-shims-0.23": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-shims" + ] + }, + "org.apache.hive.shims:hive-shims-common": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive.shims:hive-shims-0.23", + "org.apache.hive.shims:hive-shims-scheduler", + "org.apache.hive:hive-shims" + ] + }, + "org.apache.hive.shims:hive-shims-scheduler": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-shims" + ] + }, + "org.apache.hive:hive-cli": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-core" + ] + }, + "org.apache.hive:hive-common": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-llap-common", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-llap-tez", + "org.apache.hive:hive-serde" + ] + }, + "org.apache.hive:hive-exec": { + "locked": "2.3.7" + }, + "org.apache.hive:hive-jdbc": { + "locked": "2.3.7", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.apache.hive:hive-llap-client": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-llap-tez" + ] + }, + "org.apache.hive:hive-llap-common": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.hive:hive-llap-server": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-service" + ] + }, + "org.apache.hive:hive-llap-tez": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.hive:hive-metastore": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-service" + ] + }, + "org.apache.hive:hive-serde": { + "locked": "2.3.7", + "transitive": [ + "com.klarna:hiverunner", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-common", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore" + ] + }, + "org.apache.hive:hive-service": { + "locked": "2.3.7", + "transitive": [ + "com.klarna:hiverunner", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc" + ] + }, + "org.apache.hive:hive-service-rpc": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service" + ] + }, + "org.apache.hive:hive-shims": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde" + ] + }, + "org.apache.hive:hive-storage-api": { + "locked": "2.4.0", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "org.apache.hive:hive-vector-code-gen": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.apache.htrace:htrace-core": { + "locked": "3.1.0-incubating", + "transitive": [ + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-hdfs", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-server" + ] + }, + "org.apache.httpcomponents:httpclient": { + "locked": "4.5.2", + "transitive": [ + "net.java.dev.jets3t:jets3t", + "org.apache.calcite.avatica:avatica", + "org.apache.hadoop:hadoop-auth", + "org.apache.hive:hive-jdbc", + "org.apache.slider:slider-core", + "org.apache.thrift:libthrift" + ] + }, + "org.apache.httpcomponents:httpcore": { + "locked": "4.4.4", + "transitive": [ + "net.java.dev.jets3t:jets3t", + "org.apache.calcite.avatica:avatica", + "org.apache.hive:hive-jdbc", + "org.apache.httpcomponents:httpclient", + "org.apache.slider:slider-core", + "org.apache.thrift:libthrift" + ] + }, + "org.apache.iceberg:iceberg-api": { + "project": true, + "transitive": [ + "org.apache.iceberg:iceberg-core", + "org.apache.iceberg:iceberg-data", + "org.apache.iceberg:iceberg-orc", + "org.apache.iceberg:iceberg-parquet" + ] + }, + "org.apache.iceberg:iceberg-bundled-guava": { + "project": true, + "transitive": [ + "org.apache.iceberg:iceberg-api", + "org.apache.iceberg:iceberg-common" + ] + }, + "org.apache.iceberg:iceberg-common": { "project": true, "transitive": [ "org.apache.iceberg:iceberg-core" @@ -2776,9 +6011,52 @@ "org.apache.iceberg:iceberg-parquet": { "project": true }, + "org.apache.ivy:ivy": { + "locked": "2.4.0", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.apache.logging.log4j:log4j-1.2-api": { + "locked": "2.6.2", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "org.apache.logging.log4j:log4j-api": { + "locked": "2.6.2", + "transitive": [ + "org.apache.logging.log4j:log4j-1.2-api", + "org.apache.logging.log4j:log4j-core", + "org.apache.logging.log4j:log4j-slf4j-impl", + "org.apache.logging.log4j:log4j-web" + ] + }, + "org.apache.logging.log4j:log4j-core": { + "locked": "2.6.2", + "transitive": [ + "org.apache.logging.log4j:log4j-1.2-api", + "org.apache.logging.log4j:log4j-web" + ] + }, + "org.apache.logging.log4j:log4j-slf4j-impl": { + "locked": "2.6.2", + "transitive": [ + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-common" + ] + }, + "org.apache.logging.log4j:log4j-web": { + "locked": "2.6.2", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, "org.apache.orc:orc-core": { "locked": "1.6.3", "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-llap-server", "org.apache.iceberg:iceberg-orc" ] }, @@ -2828,12 +6106,151 @@ "org.apache.parquet:parquet-avro" ] }, + "org.apache.parquet:parquet-hadoop-bundle": { + "locked": "1.8.1", + "transitive": [ + "org.apache.hive:hive-serde" + ] + }, "org.apache.parquet:parquet-jackson": { "locked": "1.11.0", "transitive": [ "org.apache.parquet:parquet-hadoop" ] }, + "org.apache.slider:slider-core": { + "locked": "0.90.2-incubating", + "transitive": [ + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.tez:hadoop-shim": { + "locked": "0.9.1", + "transitive": [ + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-runtime-internals" + ] + }, + "org.apache.tez:tez-api": { + "locked": "0.9.1", + "transitive": [ + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.tez:tez-common": { + "locked": "0.9.1", + "transitive": [ + "com.klarna:hiverunner", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.tez:tez-dag": { + "locked": "0.9.1", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.apache.tez:tez-mapreduce": { + "locked": "0.9.1", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.apache.tez:tez-runtime-internals": { + "locked": "0.9.1", + "transitive": [ + "org.apache.tez:tez-dag" + ] + }, + "org.apache.tez:tez-runtime-library": { + "locked": "0.9.1", + "transitive": [ + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce" + ] + }, + "org.apache.thrift:libfb303": { + "locked": "0.9.3", + "transitive": [ + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc" + ] + }, + "org.apache.thrift:libthrift": { + "locked": "0.9.3", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc", + "org.apache.thrift:libfb303" + ] + }, + "org.apache.twill:twill-api": { + "locked": "0.6.0-incubating", + "transitive": [ + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "org.apache.twill:twill-common": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-api", + "org.apache.twill:twill-discovery-api", + "org.apache.twill:twill-zookeeper" + ] + }, + "org.apache.twill:twill-core": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core" + ] + }, + "org.apache.twill:twill-discovery-api": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-api", + "org.apache.twill:twill-discovery-core" + ] + }, + "org.apache.twill:twill-discovery-core": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core" + ] + }, + "org.apache.twill:twill-zookeeper": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-discovery-core" + ] + }, + "org.apache.velocity:velocity": { + "locked": "1.5", + "transitive": [ + "org.apache.hive:hive-vector-code-gen" + ] + }, "org.apache.yetus:audience-annotations": { "locked": "0.11.0", "transitive": [ @@ -2843,12 +6260,32 @@ "org.apache.zookeeper:zookeeper": { "locked": "3.4.6", "transitive": [ + "org.apache.curator:apache-curator", "org.apache.curator:curator-client", "org.apache.curator:curator-framework", "org.apache.curator:curator-recipes", "org.apache.hadoop:hadoop-auth", "org.apache.hadoop:hadoop-common", - "org.apache.hadoop:hadoop-yarn-server-common" + "org.apache.hadoop:hadoop-yarn-server-common", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-server", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-client", + "org.apache.slider:slider-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "org.apiguardian:apiguardian-api": { + "locked": "1.1.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter-api", + "org.junit.jupiter:junit-jupiter-engine", + "org.junit.jupiter:junit-jupiter-params", + "org.junit.platform:junit-platform-commons", + "org.junit.platform:junit-platform-engine" ] }, "org.checkerframework:checker-qual": { @@ -2857,6 +6294,12 @@ "com.github.ben-manes.caffeine:caffeine" ] }, + "org.codehaus.groovy:groovy-all": { + "locked": "2.4.4", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, "org.codehaus.jackson:jackson-core-asl": { "locked": "1.9.13", "transitive": [ @@ -2864,6 +6307,9 @@ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-registry", + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core", "org.codehaus.jackson:jackson-jaxrs", "org.codehaus.jackson:jackson-mapper-asl", "org.codehaus.jackson:jackson-xc" @@ -2873,7 +6319,9 @@ "locked": "1.9.13", "transitive": [ "com.sun.jersey:jersey-json", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core" ] }, "org.codehaus.jackson:jackson-mapper-asl": { @@ -2883,6 +6331,12 @@ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-registry", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions", + "org.apache.slider:slider-core", "org.codehaus.jackson:jackson-jaxrs", "org.codehaus.jackson:jackson-xc" ] @@ -2891,14 +6345,68 @@ "locked": "1.9.13", "transitive": [ "com.sun.jersey:jersey-json", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.slider:slider-core" + ] + }, + "org.codehaus.janino:commons-compiler": { + "locked": "2.7.6", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.codehaus.janino:janino" + ] + }, + "org.codehaus.janino:janino": { + "locked": "2.7.6", + "transitive": [ + "org.apache.calcite:calcite-core" ] }, "org.codehaus.jettison:jettison": { "locked": "1.1", "transitive": [ "com.sun.jersey:jersey-json", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" + ] + }, + "org.datanucleus:datanucleus-api-jdo": { + "locked": "4.2.4", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.datanucleus:datanucleus-core": { + "locked": "4.1.17", + "transitive": [ + "org.apache.hive:hive-exec", + "org.apache.hive:hive-metastore" + ] + }, + "org.datanucleus:datanucleus-rdbms": { + "locked": "4.1.19", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.datanucleus:javax.jdo": { + "locked": "3.2.0-m3", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.eclipse.jetty.aggregate:jetty-all": { + "locked": "7.6.0.v20120127", + "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-service" + ] + }, + "org.eclipse.jetty.orbit:javax.servlet": { + "locked": "3.0.0.v201112011016", + "transitive": [ + "org.apache.hive:hive-common" ] }, "org.fusesource.leveldbjni:leveldbjni-all": { @@ -2906,8 +6414,10 @@ "transitive": [ "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-mapreduce-client-shuffle", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "org.hamcrest:hamcrest-core": { @@ -2917,26 +6427,125 @@ "org.mockito:mockito-core" ] }, + "org.jamon:jamon-runtime": { + "locked": "2.3.1", + "transitive": [ + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-service" + ] + }, "org.jetbrains:annotations": { "locked": "17.0.0", "transitive": [ "org.apache.orc:orc-core" ] }, + "org.jruby.jcodings:jcodings": { + "locked": "1.0.8", + "transitive": [ + "org.apache.hbase:hbase-client", + "org.jruby.joni:joni" + ] + }, + "org.jruby.joni:joni": { + "locked": "2.1.2", + "transitive": [ + "org.apache.hbase:hbase-client" + ] + }, + "org.junit.jupiter:junit-jupiter": { + "locked": "5.6.0", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.junit.jupiter:junit-jupiter-api": { + "locked": "5.6.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter", + "org.junit.jupiter:junit-jupiter-engine", + "org.junit.jupiter:junit-jupiter-params" + ] + }, + "org.junit.jupiter:junit-jupiter-engine": { + "locked": "5.6.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter" + ] + }, + "org.junit.jupiter:junit-jupiter-params": { + "locked": "5.6.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter" + ] + }, + "org.junit.platform:junit-platform-commons": { + "locked": "1.6.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter-api", + "org.junit.platform:junit-platform-engine" + ] + }, + "org.junit.platform:junit-platform-engine": { + "locked": "1.6.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter-engine" + ] + }, "org.mockito:mockito-core": { "locked": "1.10.19" }, "org.objenesis:objenesis": { - "locked": "2.1", + "locked": "2.5.1", "transitive": [ + "com.esotericsoftware:kryo-shaded", "org.mockito:mockito-core" ] }, - "org.slf4j:slf4j-api": { - "locked": "1.7.25", + "org.opentest4j:opentest4j": { + "locked": "1.2.0", "transitive": [ - "org.apache.avro:avro", - "org.apache.curator:curator-client", + "org.junit.jupiter:junit-jupiter-api", + "org.junit.platform:junit-platform-engine" + ] + }, + "org.ow2.asm:asm-all": { + "locked": "5.0.2", + "transitive": [ + "org.apache.twill:twill-core" + ] + }, + "org.reflections:reflections": { + "locked": "0.9.8", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.roaringbitmap:RoaringBitmap": { + "locked": "0.4.9", + "transitive": [ + "org.apache.tez:tez-runtime-library" + ] + }, + "org.slf4j:slf4j-api": { + "locked": "1.7.25", + "transitive": [ + "ch.qos.logback:logback-classic", + "co.cask.tephra:tephra-core", + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter", + "com.jolbox:bonecp", + "com.ning:async-http-client", + "com.yammer.metrics:metrics-core", + "com.zaxxer:HikariCP", + "io.dropwizard.metrics:metrics-core", + "io.dropwizard.metrics:metrics-json", + "io.dropwizard.metrics:metrics-jvm", + "org.apache.avro:avro", + "org.apache.calcite.avatica:avatica", + "org.apache.calcite.avatica:avatica-metrics", + "org.apache.calcite:calcite-core", + "org.apache.calcite:calcite-druid", + "org.apache.curator:curator-client", "org.apache.directory.api:api-asn1-api", "org.apache.directory.api:api-util", "org.apache.directory.server:apacheds-i18n", @@ -2950,16 +6559,49 @@ "org.apache.hadoop:hadoop-mapreduce-client-shuffle", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions", + "org.apache.hive.hcatalog:hive-webhcat-java-client", + "org.apache.hive.shims:hive-shims-0.23", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive.shims:hive-shims-scheduler", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-llap-common", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-llap-tez", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc", + "org.apache.hive:hive-shims", + "org.apache.hive:hive-storage-api", + "org.apache.hive:hive-vector-code-gen", "org.apache.iceberg:iceberg-api", "org.apache.iceberg:iceberg-common", "org.apache.iceberg:iceberg-core", "org.apache.iceberg:iceberg-data", "org.apache.iceberg:iceberg-orc", "org.apache.iceberg:iceberg-parquet", + "org.apache.logging.log4j:log4j-slf4j-impl", "org.apache.orc:orc-core", "org.apache.orc:orc-shims", "org.apache.parquet:parquet-common", "org.apache.parquet:parquet-format-structures", + "org.apache.slider:slider-core", + "org.apache.tez:hadoop-shim", + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-library", + "org.apache.thrift:libthrift", + "org.apache.twill:twill-common", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper", "org.apache.zookeeper:zookeeper", "org.slf4j:slf4j-simple" ] @@ -2985,6 +6627,35 @@ "org.apache.parquet:parquet-hadoop" ] }, + "oro:oro": { + "locked": "2.0.8", + "transitive": [ + "org.apache.velocity:velocity" + ] + }, + "stax:stax-api": { + "locked": "1.0.1", + "transitive": [ + "org.apache.hive:hive-exec", + "org.codehaus.jettison:jettison" + ] + }, + "tomcat:jasper-compiler": { + "locked": "5.5.23", + "transitive": [ + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc" + ] + }, + "tomcat:jasper-runtime": { + "locked": "5.5.23", + "transitive": [ + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc" + ] + }, "xerces:xercesImpl": { "locked": "2.9.1", "transitive": [ @@ -2994,6 +6665,7 @@ "xml-apis:xml-apis": { "locked": "1.3.04", "transitive": [ + "dom4j:dom4j", "xerces:xercesImpl" ] }, @@ -3006,6 +6678,12 @@ } }, "testCompileClasspath": { + "ant:ant": { + "locked": "1.6.5", + "transitive": [ + "tomcat:jasper-compiler" + ] + }, "aopalliance:aopalliance": { "locked": "1.0", "transitive": [ @@ -3015,14 +6693,83 @@ "asm:asm": { "locked": "3.1", "transitive": [ + "asm:asm-tree", "com.sun.jersey:jersey-server", "org.sonatype.sisu.inject:cglib" ] }, + "asm:asm-commons": { + "locked": "3.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "asm:asm-tree": { + "locked": "3.1", + "transitive": [ + "asm:asm-commons" + ] + }, + "ch.qos.logback:logback-classic": { + "locked": "1.0.9", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "ch.qos.logback:logback-core": { + "locked": "1.0.9", + "transitive": [ + "ch.qos.logback:logback-classic", + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "co.cask.tephra:tephra-api": { + "locked": "0.6.0", + "transitive": [ + "co.cask.tephra:tephra-core", + "co.cask.tephra:tephra-hbase-compat-1.0", + "org.apache.hive:hive-metastore" + ] + }, + "co.cask.tephra:tephra-core": { + "locked": "0.6.0", + "transitive": [ + "co.cask.tephra:tephra-hbase-compat-1.0", + "org.apache.hive:hive-metastore" + ] + }, + "co.cask.tephra:tephra-hbase-compat-1.0": { + "locked": "0.6.0", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "com.beust:jcommander": { + "locked": "1.30", + "transitive": [ + "org.apache.slider:slider-core" + ] + }, + "com.esotericsoftware:kryo-shaded": { + "locked": "4.0.2", + "requested": "4.0.2" + }, + "com.esotericsoftware:minlog": { + "locked": "1.3.0", + "transitive": [ + "com.esotericsoftware:kryo-shaded" + ] + }, "com.fasterxml.jackson.core:jackson-annotations": { "locked": "2.10.2", + "requested": "2.6.5", "transitive": [ - "com.fasterxml.jackson.core:jackson-databind" + "com.fasterxml.jackson.core:jackson-databind", + "org.apache.calcite.avatica:avatica" ] }, "com.fasterxml.jackson.core:jackson-core": { @@ -3030,13 +6777,17 @@ "transitive": [ "com.fasterxml.jackson.core:jackson-databind", "org.apache.avro:avro", + "org.apache.calcite.avatica:avatica", "org.apache.iceberg:iceberg-core" ] }, "com.fasterxml.jackson.core:jackson-databind": { "locked": "2.10.2", "transitive": [ + "io.dropwizard.metrics:metrics-json", "org.apache.avro:avro", + "org.apache.calcite.avatica:avatica", + "org.apache.hive:hive-common", "org.apache.iceberg:iceberg-core" ] }, @@ -3046,9 +6797,23 @@ "org.apache.iceberg:iceberg-core" ] }, + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter": { + "locked": "0.1.2", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, "com.github.stephenc.findbugs:findbugs-annotations": { "locked": "1.3.9-1", "transitive": [ + "org.apache.hbase:hbase-annotations", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server", "org.apache.iceberg:iceberg-api", "org.apache.iceberg:iceberg-common", "org.apache.iceberg:iceberg-core", @@ -3060,13 +6825,26 @@ "com.google.code.findbugs:jsr305": { "locked": "3.0.0", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.calcite:calcite-core", + "org.apache.hadoop:hadoop-common", + "org.apache.hive:hive-serde", + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-runtime-internals", + "org.apache.twill:twill-api", + "org.apache.twill:twill-common", + "org.apache.twill:twill-zookeeper" ] }, "com.google.code.gson:gson": { "locked": "2.2.4", "transitive": [ - "org.apache.hadoop:hadoop-common" + "co.cask.tephra:tephra-core", + "org.apache.hadoop:hadoop-common", + "org.apache.hive:hive-exec", + "org.apache.slider:slider-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-discovery-core" ] }, "com.google.errorprone:error_prone_annotations": { @@ -3076,8 +6854,13 @@ ] }, "com.google.guava:guava": { - "locked": "16.0.1", + "locked": "18.0", "transitive": [ + "co.cask.tephra:tephra-core", + "com.jolbox:bonecp", + "org.apache.calcite:calcite-core", + "org.apache.calcite:calcite-linq4j", + "org.apache.curator:apache-curator", "org.apache.curator:curator-client", "org.apache.curator:curator-framework", "org.apache.curator:curator-recipes", @@ -3087,20 +6870,59 @@ "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-metastore", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper", + "org.reflections:reflections" + ] + }, + "com.google.inject.extensions:guice-assistedinject": { + "locked": "3.0", + "transitive": [ + "co.cask.tephra:tephra-core" + ] + }, + "com.google.inject.extensions:guice-servlet": { + "locked": "3.0", + "transitive": [ + "com.sun.jersey.contribs:jersey-guice", + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.slider:slider-core" ] }, "com.google.inject:guice": { "locked": "3.0", "transitive": [ + "co.cask.tephra:tephra-core", + "com.google.inject.extensions:guice-assistedinject", + "com.google.inject.extensions:guice-servlet", "com.sun.jersey.contribs:jersey-guice", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager" ] }, "com.google.protobuf:protobuf-java": { - "locked": "2.5.0", + "locked": "3.0.0-beta-1", "transitive": [ + "org.apache.calcite.avatica:avatica", "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-mapreduce-client-app", @@ -3111,7 +6933,54 @@ "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-metastore", + "org.apache.orc:orc-core", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "com.jamesmurty.utils:java-xmlbuilder": { + "locked": "0.4", + "transitive": [ + "net.java.dev.jets3t:jets3t" + ] + }, + "com.jcraft:jsch": { + "locked": "0.1.42", + "transitive": [ + "org.apache.hadoop:hadoop-common" + ] + }, + "com.jolbox:bonecp": { + "locked": "0.8.0.RELEASE", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "com.klarna:hiverunner": { + "locked": "5.2.1", + "requested": "5.2.1" + }, + "com.lmax:disruptor": { + "locked": "3.3.0", + "transitive": [ + "org.apache.hbase:hbase-server" + ] + }, + "com.ning:async-http-client": { + "locked": "1.8.16", + "transitive": [ + "org.apache.tez:tez-runtime-library" ] }, "com.sun.jersey.contribs:jersey-guice": { @@ -3125,7 +6994,9 @@ "locked": "1.9", "transitive": [ "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api" ] }, "com.sun.jersey:jersey-core": { @@ -3134,22 +7005,32 @@ "com.sun.jersey:jersey-client", "com.sun.jersey:jersey-json", "com.sun.jersey:jersey-server", + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hbase:hbase-server" ] }, "com.sun.jersey:jersey-json": { "locked": "1.9", "transitive": [ + "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api" ] }, "com.sun.jersey:jersey-server": { "locked": "1.9", "transitive": [ "com.sun.jersey.contribs:jersey-guice", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-hdfs", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core" ] }, "com.sun.xml.bind:jaxb-impl": { @@ -3158,6 +7039,26 @@ "com.sun.jersey:jersey-json" ] }, + "com.tdunning:json": { + "locked": "1.8", + "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-llap-server" + ] + }, + "com.yammer.metrics:metrics-core": { + "locked": "2.2.0", + "transitive": [ + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-server" + ] + }, + "com.zaxxer:HikariCP": { + "locked": "2.5.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, "commons-beanutils:commons-beanutils": { "locked": "1.7.0", "transitive": [ @@ -3176,26 +7077,47 @@ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-client", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc", + "org.apache.tez:tez-dag" ] }, "commons-codec:commons-codec": { - "locked": "1.6", + "locked": "1.9", "transitive": [ "commons-httpclient:commons-httpclient", + "net.java.dev.jets3t:jets3t", "org.apache.hadoop:hadoop-auth", "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager", - "org.apache.httpcomponents:httpclient" + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc", + "org.apache.httpcomponents:httpclient", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-runtime-library" ] }, "commons-collections:commons-collections": { "locked": "3.2.2", "transitive": [ "commons-configuration:commons-configuration", - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-server", + "org.apache.tez:tez-mapreduce" ] }, "commons-configuration:commons-configuration": { @@ -3204,16 +7126,38 @@ "org.apache.hadoop:hadoop-common" ] }, + "commons-daemon:commons-daemon": { + "locked": "1.0.13", + "transitive": [ + "org.apache.hadoop:hadoop-hdfs" + ] + }, + "commons-dbcp:commons-dbcp": { + "locked": "1.4", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.apache.hive:hive-metastore" + ] + }, "commons-digester:commons-digester": { "locked": "1.8", "transitive": [ "commons-configuration:commons-configuration" ] }, + "commons-el:commons-el": { + "locked": "1.0", + "transitive": [ + "tomcat:jasper-runtime" + ] + }, "commons-httpclient:commons-httpclient": { "locked": "3.1", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-exec", + "org.apache.slider:slider-core" ] }, "commons-io:commons-io": { @@ -3221,7 +7165,13 @@ "transitive": [ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-exec", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api" ] }, "commons-lang:commons-lang": { @@ -3233,17 +7183,40 @@ "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-server", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service", + "org.apache.hive:hive-storage-api", + "org.apache.hive:hive-vector-code-gen", + "org.apache.orc:orc-core", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-library", + "org.apache.velocity:velocity" ] }, "commons-logging:commons-logging": { - "locked": "1.1.3", + "locked": "1.2", "transitive": [ "commons-beanutils:commons-beanutils", "commons-beanutils:commons-beanutils-core", "commons-configuration:commons-configuration", "commons-digester:commons-digester", + "commons-el:commons-el", "commons-httpclient:commons-httpclient", + "net.java.dev.jets3t:jets3t", + "net.sf.jpam:jpam", "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-api", @@ -3251,7 +7224,16 @@ "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager", - "org.apache.httpcomponents:httpclient" + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server", + "org.apache.httpcomponents:httpclient", + "org.apache.slider:slider-core" ] }, "commons-net:commons-net": { @@ -3263,26 +7245,87 @@ "commons-pool:commons-pool": { "locked": "1.6", "transitive": [ + "commons-dbcp:commons-dbcp", + "org.apache.hive:hive-metastore", "org.apache.parquet:parquet-hadoop" ] }, + "dom4j:dom4j": { + "locked": "1.6.1", + "transitive": [ + "org.reflections:reflections" + ] + }, "io.airlift:aircompressor": { "locked": "0.15", "transitive": [ "org.apache.orc:orc-core" ] }, + "io.dropwizard.metrics:metrics-core": { + "locked": "3.1.2", + "transitive": [ + "co.cask.tephra:tephra-core", + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter", + "io.dropwizard.metrics:metrics-json", + "io.dropwizard.metrics:metrics-jvm", + "org.apache.hive:hive-common" + ] + }, + "io.dropwizard.metrics:metrics-json": { + "locked": "3.1.0", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "io.dropwizard.metrics:metrics-jvm": { + "locked": "3.1.0", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, "io.netty:netty": { - "locked": "3.7.0.Final", + "locked": "3.9.2.Final", "transitive": [ + "com.ning:async-http-client", "org.apache.hadoop:hadoop-hdfs", + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hive:hive-llap-server", "org.apache.zookeeper:zookeeper" ] }, "io.netty:netty-all": { "locked": "4.0.23.Final", "transitive": [ - "org.apache.hadoop:hadoop-hdfs" + "org.apache.hadoop:hadoop-hdfs", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-server" + ] + }, + "it.unimi.dsi:fastutil": { + "locked": "6.5.6", + "transitive": [ + "co.cask.tephra:tephra-core" + ] + }, + "jakarta.jms:jakarta.jms-api": { + "locked": "2.0.2", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions" + ] + }, + "javassist:javassist": { + "locked": "3.12.1.GA", + "transitive": [ + "org.reflections:reflections" + ] + }, + "javax.activation:activation": { + "locked": "1.1", + "transitive": [ + "javax.mail:mail", + "org.eclipse.jetty.aggregate:jetty-all" ] }, "javax.annotation:javax.annotation-api": { @@ -3298,11 +7341,49 @@ "com.sun.jersey.contribs:jersey-guice" ] }, + "javax.jdo:jdo-api": { + "locked": "3.0.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "javax.mail:mail": { + "locked": "1.4.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "javax.servlet:jsp-api": { + "locked": "2.0", + "transitive": [ + "tomcat:jasper-compiler" + ] + }, "javax.servlet:servlet-api": { "locked": "2.5", "transitive": [ + "javax.servlet:jsp-api", + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.slider:slider-core", + "org.apache.tez:tez-dag", + "org.eclipse.jetty.aggregate:jetty-all", + "tomcat:jasper-runtime" + ] + }, + "javax.transaction:jta": { + "locked": "1.1", + "transitive": [ + "javax.jdo:jdo-api" + ] + }, + "javax.transaction:transaction-api": { + "locked": "1.1", + "transitive": [ + "org.datanucleus:javax.jdo" ] }, "javax.xml.bind:jaxb-api": { @@ -3314,14 +7395,39 @@ "org.apache.orc:orc-core" ] }, + "javolution:javolution": { + "locked": "5.5.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, "jline:jline": { - "locked": "0.9.94", + "locked": "2.12", "transitive": [ + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", "org.apache.zookeeper:zookeeper" ] }, + "joda-time:joda-time": { + "locked": "2.8.1", + "transitive": [ + "org.apache.calcite:calcite-druid", + "org.apache.hive:hive-common" + ] + }, "junit:junit": { - "locked": "4.12" + "locked": "4.12", + "transitive": [ + "org.apache.hbase:hbase-annotations", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server" + ] }, "log4j:log4j": { "locked": "1.2.17", @@ -3330,99 +7436,313 @@ "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hbase:hbase-annotations", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server", "org.apache.zookeeper:zookeeper" ] }, - "org.apache.avro:avro": { - "locked": "1.9.2", + "net.hydromatic:eigenbase-properties": { + "locked": "1.1.5", "transitive": [ - "org.apache.iceberg:iceberg-core" + "org.apache.calcite:calcite-core" ] }, - "org.apache.commons:commons-compress": { - "locked": "1.19", + "net.java.dev.jets3t:jets3t": { + "locked": "0.9.0", "transitive": [ - "org.apache.avro:avro", - "org.apache.hadoop:hadoop-common", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-common" ] }, - "org.apache.commons:commons-math3": { - "locked": "3.1.1", + "net.sf.jpam:jpam": { + "locked": "1.1", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hive:hive-service" ] }, - "org.apache.curator:curator-client": { - "locked": "2.7.1", + "net.sf.opencsv:opencsv": { + "locked": "2.3", "transitive": [ - "org.apache.curator:curator-framework", - "org.apache.hadoop:hadoop-common" + "org.apache.hive:hive-serde" ] }, - "org.apache.curator:curator-framework": { - "locked": "2.7.1", + "org.antlr:ST4": { + "locked": "4.0.4", "transitive": [ - "org.apache.curator:curator-recipes", - "org.apache.hadoop:hadoop-auth" + "org.apache.hive:hive-exec" ] }, - "org.apache.curator:curator-recipes": { - "locked": "2.7.1", + "org.antlr:antlr-runtime": { + "locked": "3.5.2", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.antlr:ST4", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-metastore" ] }, - "org.apache.directory.api:api-asn1-api": { - "locked": "1.0.0-M20", + "org.apache.ant:ant": { + "locked": "1.9.1", "transitive": [ - "org.apache.directory.server:apacheds-kerberos-codec" + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-vector-code-gen" ] }, - "org.apache.directory.api:api-util": { - "locked": "1.0.0-M20", + "org.apache.ant:ant-launcher": { + "locked": "1.9.1", "transitive": [ - "org.apache.directory.server:apacheds-kerberos-codec" + "org.apache.ant:ant" ] }, - "org.apache.directory.server:apacheds-i18n": { - "locked": "2.0.0-M15", + "org.apache.avro:avro": { + "locked": "1.9.2", + "requested": "1.9.2", "transitive": [ - "org.apache.directory.server:apacheds-kerberos-codec" + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-serde", + "org.apache.iceberg:iceberg-core", + "org.apache.slider:slider-core" ] }, - "org.apache.directory.server:apacheds-kerberos-codec": { - "locked": "2.0.0-M15", + "org.apache.calcite.avatica:avatica": { + "locked": "1.8.0", + "transitive": [ + "org.apache.calcite:calcite-core" + ] + }, + "org.apache.calcite.avatica:avatica-metrics": { + "locked": "1.8.0", + "transitive": [ + "org.apache.calcite.avatica:avatica" + ] + }, + "org.apache.calcite:calcite-core": { + "locked": "1.10.0", + "transitive": [ + "org.apache.calcite:calcite-druid", + "org.apache.hive:hive-exec" + ] + }, + "org.apache.calcite:calcite-druid": { + "locked": "1.10.0", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.apache.calcite:calcite-linq4j": { + "locked": "1.10.0", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.apache.calcite:calcite-druid" + ] + }, + "org.apache.commons:commons-collections4": { + "locked": "4.1", + "transitive": [ + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag" + ] + }, + "org.apache.commons:commons-compress": { + "locked": "1.19", + "transitive": [ + "org.apache.avro:avro", + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.slider:slider-core" + ] + }, + "org.apache.commons:commons-lang3": { + "locked": "3.2", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.apache.hive:hive-common", + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-llap-common", + "org.apache.hive:hive-llap-tez" + ] + }, + "org.apache.commons:commons-math": { + "locked": "2.2", + "transitive": [ + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-server" + ] + }, + "org.apache.commons:commons-math3": { + "locked": "3.1.1", + "transitive": [ + "org.apache.hadoop:hadoop-common", + "org.apache.tez:tez-dag" + ] + }, + "org.apache.curator:apache-curator": { + "locked": "2.7.1", + "transitive": [ + "org.apache.hive:hive-exec", + "org.apache.hive:hive-llap-client" + ] + }, + "org.apache.curator:curator-client": { + "locked": "2.7.1", + "transitive": [ + "org.apache.curator:curator-framework", + "org.apache.hadoop:hadoop-common", + "org.apache.slider:slider-core" + ] + }, + "org.apache.curator:curator-framework": { + "locked": "2.7.1", + "transitive": [ + "org.apache.curator:curator-recipes", + "org.apache.hadoop:hadoop-auth", + "org.apache.hadoop:hadoop-yarn-registry", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-service", + "org.apache.slider:slider-core" + ] + }, + "org.apache.curator:curator-recipes": { + "locked": "2.7.1", + "transitive": [ + "org.apache.hadoop:hadoop-common", + "org.apache.hive:hive-service", + "org.apache.slider:slider-core" + ] + }, + "org.apache.derby:derby": { + "locked": "10.10.2.0", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.apache.directory.api:api-asn1-api": { + "locked": "1.0.0-M20", + "transitive": [ + "org.apache.directory.server:apacheds-kerberos-codec" + ] + }, + "org.apache.directory.api:api-util": { + "locked": "1.0.0-M20", + "transitive": [ + "org.apache.directory.server:apacheds-kerberos-codec" + ] + }, + "org.apache.directory.server:apacheds-i18n": { + "locked": "2.0.0-M15", + "transitive": [ + "org.apache.directory.server:apacheds-kerberos-codec" + ] + }, + "org.apache.directory.server:apacheds-kerberos-codec": { + "locked": "2.0.0-M15", "transitive": [ "org.apache.hadoop:hadoop-auth" ] }, + "org.apache.geronimo.specs:geronimo-annotation_1.0_spec": { + "locked": "1.1.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "org.apache.geronimo.specs:geronimo-jaspic_1.0_spec": { + "locked": "1.0", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "org.apache.geronimo.specs:geronimo-jta_1.1_spec": { + "locked": "1.1.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, "org.apache.hadoop:hadoop-annotations": { "locked": "2.7.3", "transitive": [ "org.apache.hadoop:hadoop-client", - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hadoop:hadoop-yarn-api", + "org.apache.hadoop:hadoop-yarn-client", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-common", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.hadoop:hadoop-archives": { + "locked": "2.7.2", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-core" ] }, "org.apache.hadoop:hadoop-auth": { "locked": "2.7.3", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-server", + "org.apache.tez:tez-api", + "org.apache.tez:tez-runtime-library" ] }, "org.apache.hadoop:hadoop-client": { - "locked": "2.7.3" + "locked": "2.7.3", + "transitive": [ + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core" + ] }, "org.apache.hadoop:hadoop-common": { "locked": "2.7.3", "transitive": [ - "org.apache.hadoop:hadoop-client" + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter", + "org.apache.hadoop:hadoop-client", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions", + "org.apache.hive.hcatalog:hive-webhcat-java-client", + "org.apache.tez:hadoop-shim", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" ] }, "org.apache.hadoop:hadoop-hdfs": { "locked": "2.7.3", "transitive": [ - "org.apache.hadoop:hadoop-client" + "org.apache.hadoop:hadoop-client", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.slider:slider-core" ] }, "org.apache.hadoop:hadoop-mapreduce-client-app": { @@ -3436,14 +7756,22 @@ "transitive": [ "org.apache.hadoop:hadoop-mapreduce-client-app", "org.apache.hadoop:hadoop-mapreduce-client-jobclient", - "org.apache.hadoop:hadoop-mapreduce-client-shuffle" + "org.apache.hadoop:hadoop-mapreduce-client-shuffle", + "org.apache.tez:tez-mapreduce" ] }, "org.apache.hadoop:hadoop-mapreduce-client-core": { "locked": "2.7.3", "transitive": [ "org.apache.hadoop:hadoop-client", - "org.apache.hadoop:hadoop-mapreduce-client-common" + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.hcatalog:hive-webhcat-java-client", + "org.apache.tez:tez-mapreduce" ] }, "org.apache.hadoop:hadoop-mapreduce-client-jobclient": { @@ -3465,14 +7793,26 @@ "org.apache.hadoop:hadoop-client", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-registry", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.tez:hadoop-shim", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" ] }, "org.apache.hadoop:hadoop-yarn-client": { "locked": "2.7.3", "transitive": [ - "org.apache.hadoop:hadoop-mapreduce-client-common" + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce" ] }, "org.apache.hadoop:hadoop-yarn-common": { @@ -3481,8 +7821,22 @@ "org.apache.hadoop:hadoop-mapreduce-client-common", "org.apache.hadoop:hadoop-mapreduce-client-core", "org.apache.hadoop:hadoop-yarn-client", + "org.apache.hadoop:hadoop-yarn-registry", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.hadoop:hadoop-yarn-registry": { + "locked": "2.7.1", + "transitive": [ + "org.apache.slider:slider-core" ] }, "org.apache.hadoop:hadoop-yarn-server-common": { @@ -3490,7 +7844,8 @@ "transitive": [ "org.apache.hadoop:hadoop-mapreduce-client-common", "org.apache.hadoop:hadoop-mapreduce-client-shuffle", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy" ] }, "org.apache.hadoop:hadoop-yarn-server-nodemanager": { @@ -3499,23 +7854,243 @@ "org.apache.hadoop:hadoop-mapreduce-client-shuffle" ] }, + "org.apache.hadoop:hadoop-yarn-server-web-proxy": { + "locked": "2.7.0", + "transitive": [ + "org.apache.tez:tez-dag" + ] + }, + "org.apache.hbase:hbase-annotations": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-protocol" + ] + }, + "org.apache.hbase:hbase-client": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore" + ] + }, + "org.apache.hbase:hbase-common": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.hbase:hbase-hadoop-compat": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.hbase:hbase-hadoop2-compat": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.hbase:hbase-procedure": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-server" + ] + }, + "org.apache.hbase:hbase-protocol": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-server" + ] + }, + "org.apache.hbase:hbase-server": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.hive.hcatalog:hive-hcatalog-core": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions", + "org.apache.hive.hcatalog:hive-webhcat-java-client" + ] + }, + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive.hcatalog:hive-webhcat-java-client" + ] + }, + "org.apache.hive.hcatalog:hive-webhcat-java-client": { + "locked": "2.3.7", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.apache.hive.shims:hive-shims-common": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-shims" + ] + }, + "org.apache.hive:hive-cli": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-core" + ] + }, + "org.apache.hive:hive-common": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-llap-common", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-llap-tez", + "org.apache.hive:hive-serde" + ] + }, + "org.apache.hive:hive-exec": { + "locked": "2.3.7" + }, + "org.apache.hive:hive-jdbc": { + "locked": "2.3.7", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.apache.hive:hive-llap-client": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-llap-tez" + ] + }, + "org.apache.hive:hive-llap-common": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.hive:hive-llap-server": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-service" + ] + }, + "org.apache.hive:hive-llap-tez": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.hive:hive-metastore": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-service" + ] + }, + "org.apache.hive:hive-serde": { + "locked": "2.3.7", + "transitive": [ + "com.klarna:hiverunner", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-common", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore" + ] + }, + "org.apache.hive:hive-service": { + "locked": "2.3.7", + "transitive": [ + "com.klarna:hiverunner", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc" + ] + }, + "org.apache.hive:hive-service-rpc": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service" + ] + }, + "org.apache.hive:hive-shims": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde" + ] + }, + "org.apache.hive:hive-storage-api": { + "locked": "2.4.0", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "org.apache.hive:hive-vector-code-gen": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, "org.apache.htrace:htrace-core": { "locked": "3.1.0-incubating", "transitive": [ "org.apache.hadoop:hadoop-common", - "org.apache.hadoop:hadoop-hdfs" + "org.apache.hadoop:hadoop-hdfs", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-server" ] }, "org.apache.httpcomponents:httpclient": { - "locked": "4.2.5", + "locked": "4.5.2", "transitive": [ - "org.apache.hadoop:hadoop-auth" + "net.java.dev.jets3t:jets3t", + "org.apache.calcite.avatica:avatica", + "org.apache.hadoop:hadoop-auth", + "org.apache.hive:hive-jdbc", + "org.apache.slider:slider-core", + "org.apache.thrift:libthrift" ] }, "org.apache.httpcomponents:httpcore": { - "locked": "4.2.4", + "locked": "4.4.4", "transitive": [ - "org.apache.httpcomponents:httpclient" + "net.java.dev.jets3t:jets3t", + "org.apache.calcite.avatica:avatica", + "org.apache.hive:hive-jdbc", + "org.apache.httpcomponents:httpclient", + "org.apache.slider:slider-core", + "org.apache.thrift:libthrift" ] }, "org.apache.iceberg:iceberg-api": { @@ -3557,9 +8132,52 @@ "org.apache.iceberg:iceberg-parquet": { "project": true }, + "org.apache.ivy:ivy": { + "locked": "2.4.0", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.apache.logging.log4j:log4j-1.2-api": { + "locked": "2.6.2", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "org.apache.logging.log4j:log4j-api": { + "locked": "2.6.2", + "transitive": [ + "org.apache.logging.log4j:log4j-1.2-api", + "org.apache.logging.log4j:log4j-core", + "org.apache.logging.log4j:log4j-slf4j-impl", + "org.apache.logging.log4j:log4j-web" + ] + }, + "org.apache.logging.log4j:log4j-core": { + "locked": "2.6.2", + "transitive": [ + "org.apache.logging.log4j:log4j-1.2-api", + "org.apache.logging.log4j:log4j-web" + ] + }, + "org.apache.logging.log4j:log4j-slf4j-impl": { + "locked": "2.6.2", + "transitive": [ + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-common" + ] + }, + "org.apache.logging.log4j:log4j-web": { + "locked": "2.6.2", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, "org.apache.orc:orc-core": { "locked": "1.6.3", "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-llap-server", "org.apache.iceberg:iceberg-orc" ] }, @@ -3609,35 +8227,197 @@ "org.apache.parquet:parquet-avro" ] }, + "org.apache.parquet:parquet-hadoop-bundle": { + "locked": "1.8.1", + "transitive": [ + "org.apache.hive:hive-serde" + ] + }, "org.apache.parquet:parquet-jackson": { "locked": "1.11.0", "transitive": [ "org.apache.parquet:parquet-hadoop" ] }, - "org.apache.yetus:audience-annotations": { - "locked": "0.11.0", + "org.apache.slider:slider-core": { + "locked": "0.90.2-incubating", "transitive": [ - "org.apache.parquet:parquet-common" + "org.apache.hive:hive-llap-server" ] }, - "org.apache.zookeeper:zookeeper": { - "locked": "3.4.6", + "org.apache.tez:hadoop-shim": { + "locked": "0.9.1", "transitive": [ - "org.apache.curator:curator-client", - "org.apache.curator:curator-framework", - "org.apache.curator:curator-recipes", - "org.apache.hadoop:hadoop-auth", - "org.apache.hadoop:hadoop-common", - "org.apache.hadoop:hadoop-yarn-server-common" + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-runtime-internals" ] }, - "org.checkerframework:checker-qual": { - "locked": "2.6.0", + "org.apache.tez:tez-api": { + "locked": "0.9.1", + "transitive": [ + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.tez:tez-common": { + "locked": "0.9.1", + "transitive": [ + "com.klarna:hiverunner", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.tez:tez-dag": { + "locked": "0.9.1", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.apache.tez:tez-mapreduce": { + "locked": "0.9.1", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.apache.tez:tez-runtime-internals": { + "locked": "0.9.1", + "transitive": [ + "org.apache.tez:tez-dag" + ] + }, + "org.apache.tez:tez-runtime-library": { + "locked": "0.9.1", + "transitive": [ + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce" + ] + }, + "org.apache.thrift:libfb303": { + "locked": "0.9.3", + "transitive": [ + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc" + ] + }, + "org.apache.thrift:libthrift": { + "locked": "0.9.3", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc", + "org.apache.thrift:libfb303" + ] + }, + "org.apache.twill:twill-api": { + "locked": "0.6.0-incubating", + "transitive": [ + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "org.apache.twill:twill-common": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-api", + "org.apache.twill:twill-discovery-api", + "org.apache.twill:twill-zookeeper" + ] + }, + "org.apache.twill:twill-core": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core" + ] + }, + "org.apache.twill:twill-discovery-api": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-api", + "org.apache.twill:twill-discovery-core" + ] + }, + "org.apache.twill:twill-discovery-core": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core" + ] + }, + "org.apache.twill:twill-zookeeper": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-discovery-core" + ] + }, + "org.apache.velocity:velocity": { + "locked": "1.5", + "transitive": [ + "org.apache.hive:hive-vector-code-gen" + ] + }, + "org.apache.yetus:audience-annotations": { + "locked": "0.11.0", + "transitive": [ + "org.apache.parquet:parquet-common" + ] + }, + "org.apache.zookeeper:zookeeper": { + "locked": "3.4.6", + "transitive": [ + "org.apache.curator:apache-curator", + "org.apache.curator:curator-client", + "org.apache.curator:curator-framework", + "org.apache.curator:curator-recipes", + "org.apache.hadoop:hadoop-auth", + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-yarn-server-common", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-server", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-client", + "org.apache.slider:slider-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "org.apiguardian:apiguardian-api": { + "locked": "1.1.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter-api", + "org.junit.jupiter:junit-jupiter-params", + "org.junit.platform:junit-platform-commons" + ] + }, + "org.checkerframework:checker-qual": { + "locked": "2.6.0", "transitive": [ "com.github.ben-manes.caffeine:caffeine" ] }, + "org.codehaus.groovy:groovy-all": { + "locked": "2.4.4", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, "org.codehaus.jackson:jackson-core-asl": { "locked": "1.9.13", "transitive": [ @@ -3645,6 +8425,9 @@ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-registry", + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core", "org.codehaus.jackson:jackson-jaxrs", "org.codehaus.jackson:jackson-mapper-asl", "org.codehaus.jackson:jackson-xc" @@ -3654,7 +8437,9 @@ "locked": "1.9.13", "transitive": [ "com.sun.jersey:jersey-json", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core" ] }, "org.codehaus.jackson:jackson-mapper-asl": { @@ -3664,6 +8449,12 @@ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-registry", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions", + "org.apache.slider:slider-core", "org.codehaus.jackson:jackson-jaxrs", "org.codehaus.jackson:jackson-xc" ] @@ -3672,7 +8463,21 @@ "locked": "1.9.13", "transitive": [ "com.sun.jersey:jersey-json", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.slider:slider-core" + ] + }, + "org.codehaus.janino:commons-compiler": { + "locked": "2.7.6", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.codehaus.janino:janino" + ] + }, + "org.codehaus.janino:janino": { + "locked": "2.7.6", + "transitive": [ + "org.apache.calcite:calcite-core" ] }, "org.codehaus.jettison:jettison": { @@ -3682,6 +8487,44 @@ "org.apache.hadoop:hadoop-yarn-server-nodemanager" ] }, + "org.datanucleus:datanucleus-api-jdo": { + "locked": "4.2.4", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.datanucleus:datanucleus-core": { + "locked": "4.1.17", + "transitive": [ + "org.apache.hive:hive-exec", + "org.apache.hive:hive-metastore" + ] + }, + "org.datanucleus:datanucleus-rdbms": { + "locked": "4.1.19", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.datanucleus:javax.jdo": { + "locked": "3.2.0-m3", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.eclipse.jetty.aggregate:jetty-all": { + "locked": "7.6.0.v20120127", + "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-service" + ] + }, + "org.eclipse.jetty.orbit:javax.servlet": { + "locked": "3.0.0.v201112011016", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, "org.fusesource.leveldbjni:leveldbjni-all": { "locked": "1.8", "transitive": [ @@ -3697,19 +8540,108 @@ "junit:junit" ] }, + "org.jamon:jamon-runtime": { + "locked": "2.3.1", + "transitive": [ + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-service" + ] + }, "org.jetbrains:annotations": { "locked": "17.0.0", "transitive": [ "org.apache.orc:orc-core" ] }, + "org.jruby.jcodings:jcodings": { + "locked": "1.0.8", + "transitive": [ + "org.apache.hbase:hbase-client", + "org.jruby.joni:joni" + ] + }, + "org.jruby.joni:joni": { + "locked": "2.1.2", + "transitive": [ + "org.apache.hbase:hbase-client" + ] + }, + "org.junit.jupiter:junit-jupiter": { + "locked": "5.6.0", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.junit.jupiter:junit-jupiter-api": { + "locked": "5.6.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter", + "org.junit.jupiter:junit-jupiter-params" + ] + }, + "org.junit.jupiter:junit-jupiter-params": { + "locked": "5.6.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter" + ] + }, + "org.junit.platform:junit-platform-commons": { + "locked": "1.6.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter-api" + ] + }, "org.mockito:mockito-core": { "locked": "1.10.19" }, + "org.objenesis:objenesis": { + "locked": "2.5.1", + "transitive": [ + "com.esotericsoftware:kryo-shaded" + ] + }, + "org.opentest4j:opentest4j": { + "locked": "1.2.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter-api" + ] + }, + "org.ow2.asm:asm-all": { + "locked": "5.0.2", + "transitive": [ + "org.apache.twill:twill-core" + ] + }, + "org.reflections:reflections": { + "locked": "0.9.8", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.roaringbitmap:RoaringBitmap": { + "locked": "0.4.9", + "transitive": [ + "org.apache.tez:tez-runtime-library" + ] + }, "org.slf4j:slf4j-api": { "locked": "1.7.25", "transitive": [ + "ch.qos.logback:logback-classic", + "co.cask.tephra:tephra-core", + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter", + "com.jolbox:bonecp", + "com.ning:async-http-client", + "com.yammer.metrics:metrics-core", + "com.zaxxer:HikariCP", + "io.dropwizard.metrics:metrics-core", + "io.dropwizard.metrics:metrics-json", + "io.dropwizard.metrics:metrics-jvm", "org.apache.avro:avro", + "org.apache.calcite.avatica:avatica", + "org.apache.calcite.avatica:avatica-metrics", + "org.apache.calcite:calcite-core", + "org.apache.calcite:calcite-druid", "org.apache.curator:curator-client", "org.apache.directory.api:api-asn1-api", "org.apache.directory.api:api-util", @@ -3724,16 +8656,46 @@ "org.apache.hadoop:hadoop-mapreduce-client-shuffle", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions", + "org.apache.hive.hcatalog:hive-webhcat-java-client", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-llap-common", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-llap-tez", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc", + "org.apache.hive:hive-shims", + "org.apache.hive:hive-storage-api", + "org.apache.hive:hive-vector-code-gen", "org.apache.iceberg:iceberg-api", "org.apache.iceberg:iceberg-common", "org.apache.iceberg:iceberg-core", "org.apache.iceberg:iceberg-data", "org.apache.iceberg:iceberg-orc", "org.apache.iceberg:iceberg-parquet", + "org.apache.logging.log4j:log4j-slf4j-impl", "org.apache.orc:orc-core", "org.apache.orc:orc-shims", "org.apache.parquet:parquet-common", "org.apache.parquet:parquet-format-structures", + "org.apache.slider:slider-core", + "org.apache.tez:hadoop-shim", + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-library", + "org.apache.thrift:libthrift", + "org.apache.twill:twill-common", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper", "org.apache.zookeeper:zookeeper", "org.slf4j:slf4j-simple" ] @@ -3759,6 +8721,34 @@ "org.apache.parquet:parquet-hadoop" ] }, + "oro:oro": { + "locked": "2.0.8", + "transitive": [ + "org.apache.velocity:velocity" + ] + }, + "stax:stax-api": { + "locked": "1.0.1", + "transitive": [ + "org.apache.hive:hive-exec", + "org.codehaus.jettison:jettison" + ] + }, + "tomcat:jasper-compiler": { + "locked": "5.5.23", + "transitive": [ + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc" + ] + }, + "tomcat:jasper-runtime": { + "locked": "5.5.23", + "transitive": [ + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc" + ] + }, "xerces:xercesImpl": { "locked": "2.9.1", "transitive": [ @@ -3768,6 +8758,7 @@ "xml-apis:xml-apis": { "locked": "1.3.04", "transitive": [ + "dom4j:dom4j", "xerces:xercesImpl" ] }, @@ -3780,6 +8771,12 @@ } }, "testRuntime": { + "ant:ant": { + "locked": "1.6.5", + "transitive": [ + "tomcat:jasper-compiler" + ] + }, "aopalliance:aopalliance": { "locked": "1.0", "transitive": [ @@ -3789,14 +8786,83 @@ "asm:asm": { "locked": "3.1", "transitive": [ + "asm:asm-tree", "com.sun.jersey:jersey-server", "org.sonatype.sisu.inject:cglib" ] }, + "asm:asm-commons": { + "locked": "3.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "asm:asm-tree": { + "locked": "3.1", + "transitive": [ + "asm:asm-commons" + ] + }, + "ch.qos.logback:logback-classic": { + "locked": "1.0.9", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "ch.qos.logback:logback-core": { + "locked": "1.0.9", + "transitive": [ + "ch.qos.logback:logback-classic", + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "co.cask.tephra:tephra-api": { + "locked": "0.6.0", + "transitive": [ + "co.cask.tephra:tephra-core", + "co.cask.tephra:tephra-hbase-compat-1.0", + "org.apache.hive:hive-metastore" + ] + }, + "co.cask.tephra:tephra-core": { + "locked": "0.6.0", + "transitive": [ + "co.cask.tephra:tephra-hbase-compat-1.0", + "org.apache.hive:hive-metastore" + ] + }, + "co.cask.tephra:tephra-hbase-compat-1.0": { + "locked": "0.6.0", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "com.beust:jcommander": { + "locked": "1.30", + "transitive": [ + "org.apache.slider:slider-core" + ] + }, + "com.esotericsoftware:kryo-shaded": { + "locked": "4.0.2", + "requested": "4.0.2" + }, + "com.esotericsoftware:minlog": { + "locked": "1.3.0", + "transitive": [ + "com.esotericsoftware:kryo-shaded" + ] + }, "com.fasterxml.jackson.core:jackson-annotations": { "locked": "2.10.2", + "requested": "2.6.5", "transitive": [ - "com.fasterxml.jackson.core:jackson-databind" + "com.fasterxml.jackson.core:jackson-databind", + "org.apache.calcite.avatica:avatica" ] }, "com.fasterxml.jackson.core:jackson-core": { @@ -3804,13 +8870,17 @@ "transitive": [ "com.fasterxml.jackson.core:jackson-databind", "org.apache.avro:avro", + "org.apache.calcite.avatica:avatica", "org.apache.iceberg:iceberg-core" ] }, "com.fasterxml.jackson.core:jackson-databind": { "locked": "2.10.2", "transitive": [ + "io.dropwizard.metrics:metrics-json", "org.apache.avro:avro", + "org.apache.calcite.avatica:avatica", + "org.apache.hive:hive-common", "org.apache.iceberg:iceberg-core" ] }, @@ -3820,9 +8890,24 @@ "org.apache.iceberg:iceberg-core" ] }, + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter": { + "locked": "0.1.2", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, "com.github.stephenc.findbugs:findbugs-annotations": { "locked": "1.3.9-1", "transitive": [ + "org.apache.hbase:hbase-annotations", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server", "org.apache.iceberg:iceberg-api", "org.apache.iceberg:iceberg-common", "org.apache.iceberg:iceberg-core", @@ -3834,13 +8919,26 @@ "com.google.code.findbugs:jsr305": { "locked": "3.0.0", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.calcite:calcite-core", + "org.apache.hadoop:hadoop-common", + "org.apache.hive:hive-serde", + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-runtime-internals", + "org.apache.twill:twill-api", + "org.apache.twill:twill-common", + "org.apache.twill:twill-zookeeper" ] }, "com.google.code.gson:gson": { "locked": "2.2.4", "transitive": [ - "org.apache.hadoop:hadoop-common" + "co.cask.tephra:tephra-core", + "org.apache.hadoop:hadoop-common", + "org.apache.hive:hive-exec", + "org.apache.slider:slider-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-discovery-core" ] }, "com.google.errorprone:error_prone_annotations": { @@ -3850,8 +8948,13 @@ ] }, "com.google.guava:guava": { - "locked": "16.0.1", + "locked": "18.0", "transitive": [ + "co.cask.tephra:tephra-core", + "com.jolbox:bonecp", + "org.apache.calcite:calcite-core", + "org.apache.calcite:calcite-linq4j", + "org.apache.curator:apache-curator", "org.apache.curator:curator-client", "org.apache.curator:curator-framework", "org.apache.curator:curator-recipes", @@ -3860,21 +8963,67 @@ "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-metastore", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper", + "org.reflections:reflections" + ] + }, + "com.google.inject.extensions:guice-assistedinject": { + "locked": "3.0", + "transitive": [ + "co.cask.tephra:tephra-core" + ] + }, + "com.google.inject.extensions:guice-servlet": { + "locked": "3.0", + "transitive": [ + "com.sun.jersey.contribs:jersey-guice", + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.slider:slider-core" ] }, "com.google.inject:guice": { "locked": "3.0", "transitive": [ + "co.cask.tephra:tephra-core", + "com.google.inject.extensions:guice-assistedinject", + "com.google.inject.extensions:guice-servlet", "com.sun.jersey.contribs:jersey-guice", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "com.google.protobuf:protobuf-java": { - "locked": "2.5.0", + "locked": "3.0.0-beta-1", "transitive": [ + "org.apache.calcite.avatica:avatica", "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-mapreduce-client-app", @@ -3884,22 +9033,77 @@ "org.apache.hadoop:hadoop-mapreduce-client-shuffle", "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-metastore", + "org.apache.orc:orc-core", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "com.jamesmurty.utils:java-xmlbuilder": { + "locked": "0.4", + "transitive": [ + "net.java.dev.jets3t:jets3t" + ] + }, + "com.jcraft:jsch": { + "locked": "0.1.42", + "transitive": [ + "org.apache.hadoop:hadoop-common" + ] + }, + "com.jolbox:bonecp": { + "locked": "0.8.0.RELEASE", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "com.klarna:hiverunner": { + "locked": "5.2.1", + "requested": "5.2.1" + }, + "com.lmax:disruptor": { + "locked": "3.3.0", + "transitive": [ + "org.apache.hbase:hbase-server" + ] + }, + "com.ning:async-http-client": { + "locked": "1.8.16", + "transitive": [ + "org.apache.tez:tez-runtime-library" ] }, "com.sun.jersey.contribs:jersey-guice": { "locked": "1.9", "transitive": [ "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "com.sun.jersey:jersey-client": { "locked": "1.9", "transitive": [ "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api" ] }, "com.sun.jersey:jersey-core": { @@ -3908,22 +9112,36 @@ "com.sun.jersey:jersey-client", "com.sun.jersey:jersey-json", "com.sun.jersey:jersey-server", + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-server" ] }, "com.sun.jersey:jersey-json": { "locked": "1.9", "transitive": [ + "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api" ] }, "com.sun.jersey:jersey-server": { "locked": "1.9", "transitive": [ "com.sun.jersey.contribs:jersey-guice", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-hdfs", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core" ] }, "com.sun.xml.bind:jaxb-impl": { @@ -3932,6 +9150,26 @@ "com.sun.jersey:jersey-json" ] }, + "com.tdunning:json": { + "locked": "1.8", + "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-llap-server" + ] + }, + "com.yammer.metrics:metrics-core": { + "locked": "2.2.0", + "transitive": [ + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-server" + ] + }, + "com.zaxxer:HikariCP": { + "locked": "2.5.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, "commons-beanutils:commons-beanutils": { "locked": "1.7.0", "transitive": [ @@ -3950,26 +9188,48 @@ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-client", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc", + "org.apache.tez:tez-dag" ] }, "commons-codec:commons-codec": { - "locked": "1.6", + "locked": "1.9", "transitive": [ "commons-httpclient:commons-httpclient", + "net.java.dev.jets3t:jets3t", "org.apache.hadoop:hadoop-auth", "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager", - "org.apache.httpcomponents:httpclient" + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc", + "org.apache.httpcomponents:httpclient", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-runtime-library" ] }, "commons-collections:commons-collections": { "locked": "3.2.2", "transitive": [ "commons-configuration:commons-configuration", - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-server", + "org.apache.tez:tez-mapreduce" ] }, "commons-configuration:commons-configuration": { @@ -3978,16 +9238,38 @@ "org.apache.hadoop:hadoop-common" ] }, + "commons-daemon:commons-daemon": { + "locked": "1.0.13", + "transitive": [ + "org.apache.hadoop:hadoop-hdfs" + ] + }, + "commons-dbcp:commons-dbcp": { + "locked": "1.4", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.apache.hive:hive-metastore" + ] + }, "commons-digester:commons-digester": { "locked": "1.8", "transitive": [ "commons-configuration:commons-configuration" ] }, + "commons-el:commons-el": { + "locked": "1.0", + "transitive": [ + "tomcat:jasper-runtime" + ] + }, "commons-httpclient:commons-httpclient": { "locked": "3.1", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-exec", + "org.apache.slider:slider-core" ] }, "commons-io:commons-io": { @@ -3995,7 +9277,14 @@ "transitive": [ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-exec", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api" ] }, "commons-lang:commons-lang": { @@ -4007,25 +9296,62 @@ "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-server", + "org.apache.hive.shims:hive-shims-0.23", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service", + "org.apache.hive:hive-storage-api", + "org.apache.hive:hive-vector-code-gen", + "org.apache.orc:orc-core", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-library", + "org.apache.velocity:velocity" ] }, "commons-logging:commons-logging": { - "locked": "1.1.3", + "locked": "1.2", "transitive": [ "commons-beanutils:commons-beanutils", "commons-beanutils:commons-beanutils-core", "commons-configuration:commons-configuration", "commons-digester:commons-digester", + "commons-el:commons-el", "commons-httpclient:commons-httpclient", + "net.java.dev.jets3t:jets3t", + "net.sf.jpam:jpam", "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager", - "org.apache.httpcomponents:httpclient" + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server", + "org.apache.httpcomponents:httpclient", + "org.apache.slider:slider-core" ] }, "commons-net:commons-net": { @@ -4037,26 +9363,88 @@ "commons-pool:commons-pool": { "locked": "1.6", "transitive": [ + "commons-dbcp:commons-dbcp", + "org.apache.hive:hive-metastore", "org.apache.parquet:parquet-hadoop" ] }, + "dom4j:dom4j": { + "locked": "1.6.1", + "transitive": [ + "org.reflections:reflections" + ] + }, "io.airlift:aircompressor": { "locked": "0.15", "transitive": [ "org.apache.orc:orc-core" ] }, + "io.dropwizard.metrics:metrics-core": { + "locked": "3.1.2", + "transitive": [ + "co.cask.tephra:tephra-core", + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter", + "io.dropwizard.metrics:metrics-json", + "io.dropwizard.metrics:metrics-jvm", + "org.apache.hive:hive-common" + ] + }, + "io.dropwizard.metrics:metrics-json": { + "locked": "3.1.0", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "io.dropwizard.metrics:metrics-jvm": { + "locked": "3.1.0", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, "io.netty:netty": { - "locked": "3.7.0.Final", + "locked": "3.9.2.Final", "transitive": [ + "com.ning:async-http-client", "org.apache.hadoop:hadoop-hdfs", + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hive:hive-llap-server", "org.apache.zookeeper:zookeeper" ] }, "io.netty:netty-all": { "locked": "4.0.23.Final", "transitive": [ - "org.apache.hadoop:hadoop-hdfs" + "org.apache.hadoop:hadoop-hdfs", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-server" + ] + }, + "it.unimi.dsi:fastutil": { + "locked": "6.5.6", + "transitive": [ + "co.cask.tephra:tephra-core" + ] + }, + "jakarta.jms:jakarta.jms-api": { + "locked": "2.0.2", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions" + ] + }, + "javassist:javassist": { + "locked": "3.12.1.GA", + "transitive": [ + "org.reflections:reflections" + ] + }, + "javax.activation:activation": { + "locked": "1.1", + "transitive": [ + "javax.mail:mail", + "org.eclipse.jetty.aggregate:jetty-all" ] }, "javax.annotation:javax.annotation-api": { @@ -4072,17 +9460,56 @@ "com.sun.jersey.contribs:jersey-guice" ] }, + "javax.jdo:jdo-api": { + "locked": "3.0.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "javax.mail:mail": { + "locked": "1.4.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, "javax.servlet.jsp:jsp-api": { "locked": "2.1", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.slider:slider-core" + ] + }, + "javax.servlet:jsp-api": { + "locked": "2.0", + "transitive": [ + "tomcat:jasper-compiler" ] }, "javax.servlet:servlet-api": { "locked": "2.5", "transitive": [ + "javax.servlet:jsp-api", + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.slider:slider-core", + "org.apache.tez:tez-dag", + "org.eclipse.jetty.aggregate:jetty-all", + "tomcat:jasper-runtime" + ] + }, + "javax.transaction:jta": { + "locked": "1.1", + "transitive": [ + "javax.jdo:jdo-api" + ] + }, + "javax.transaction:transaction-api": { + "locked": "1.1", + "transitive": [ + "org.datanucleus:javax.jdo" ] }, "javax.xml.bind:jaxb-api": { @@ -4090,18 +9517,46 @@ "transitive": [ "com.sun.xml.bind:jaxb-impl", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", "org.apache.orc:orc-core" ] }, + "javolution:javolution": { + "locked": "5.5.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, "jline:jline": { - "locked": "0.9.94", + "locked": "2.12", "transitive": [ + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", "org.apache.zookeeper:zookeeper" ] }, + "joda-time:joda-time": { + "locked": "2.8.1", + "transitive": [ + "org.apache.calcite:calcite-druid", + "org.apache.hive:hive-common" + ] + }, "junit:junit": { - "locked": "4.12" + "locked": "4.12", + "transitive": [ + "org.apache.hbase:hbase-annotations", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server" + ] }, "log4j:log4j": { "locked": "1.2.17", @@ -4111,13 +9566,121 @@ "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-annotations", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core", "org.apache.zookeeper:zookeeper" ] }, + "net.hydromatic:eigenbase-properties": { + "locked": "1.1.5", + "transitive": [ + "org.apache.calcite:calcite-core" + ] + }, + "net.java.dev.jets3t:jets3t": { + "locked": "0.9.0", + "transitive": [ + "org.apache.hadoop:hadoop-common" + ] + }, + "net.sf.jpam:jpam": { + "locked": "1.1", + "transitive": [ + "org.apache.hive:hive-service" + ] + }, + "net.sf.opencsv:opencsv": { + "locked": "2.3", + "transitive": [ + "org.apache.hive:hive-serde" + ] + }, + "org.antlr:ST4": { + "locked": "4.0.4", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.antlr:antlr-runtime": { + "locked": "3.5.2", + "transitive": [ + "org.antlr:ST4", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-metastore" + ] + }, + "org.apache.ant:ant": { + "locked": "1.9.1", + "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-vector-code-gen" + ] + }, + "org.apache.ant:ant-launcher": { + "locked": "1.9.1", + "transitive": [ + "org.apache.ant:ant" + ] + }, "org.apache.avro:avro": { "locked": "1.9.2", + "requested": "1.9.2", "transitive": [ - "org.apache.iceberg:iceberg-core" + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-serde", + "org.apache.iceberg:iceberg-core", + "org.apache.slider:slider-core" + ] + }, + "org.apache.calcite.avatica:avatica": { + "locked": "1.8.0", + "transitive": [ + "org.apache.calcite:calcite-core" + ] + }, + "org.apache.calcite.avatica:avatica-metrics": { + "locked": "1.8.0", + "transitive": [ + "org.apache.calcite.avatica:avatica" + ] + }, + "org.apache.calcite:calcite-core": { + "locked": "1.10.0", + "transitive": [ + "org.apache.calcite:calcite-druid", + "org.apache.hive:hive-exec" + ] + }, + "org.apache.calcite:calcite-druid": { + "locked": "1.10.0", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.apache.calcite:calcite-linq4j": { + "locked": "1.10.0", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.apache.calcite:calcite-druid" + ] + }, + "org.apache.commons:commons-collections4": { + "locked": "4.1", + "transitive": [ + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag" ] }, "org.apache.commons:commons-compress": { @@ -4125,33 +9688,78 @@ "transitive": [ "org.apache.avro:avro", "org.apache.hadoop:hadoop-common", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.slider:slider-core" + ] + }, + "org.apache.commons:commons-lang3": { + "locked": "3.2", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.apache.hive:hive-common", + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-llap-common", + "org.apache.hive:hive-llap-tez" + ] + }, + "org.apache.commons:commons-math": { + "locked": "2.2", + "transitive": [ + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-server" ] }, "org.apache.commons:commons-math3": { "locked": "3.1.1", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.tez:tez-dag" + ] + }, + "org.apache.curator:apache-curator": { + "locked": "2.7.1", + "transitive": [ + "org.apache.hive:hive-exec", + "org.apache.hive:hive-llap-client" ] }, "org.apache.curator:curator-client": { "locked": "2.7.1", "transitive": [ "org.apache.curator:curator-framework", - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.slider:slider-core" ] }, "org.apache.curator:curator-framework": { "locked": "2.7.1", "transitive": [ "org.apache.curator:curator-recipes", - "org.apache.hadoop:hadoop-auth" + "org.apache.hadoop:hadoop-auth", + "org.apache.hadoop:hadoop-yarn-registry", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-service", + "org.apache.slider:slider-core" ] }, "org.apache.curator:curator-recipes": { "locked": "2.7.1", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hive:hive-service", + "org.apache.slider:slider-core" + ] + }, + "org.apache.derby:derby": { + "locked": "10.10.2.0", + "transitive": [ + "org.apache.hive:hive-metastore" ] }, "org.apache.directory.api:api-asn1-api": { @@ -4178,32 +9786,100 @@ "org.apache.hadoop:hadoop-auth" ] }, + "org.apache.geronimo.specs:geronimo-annotation_1.0_spec": { + "locked": "1.1.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "org.apache.geronimo.specs:geronimo-jaspic_1.0_spec": { + "locked": "1.0", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "org.apache.geronimo.specs:geronimo-jta_1.1_spec": { + "locked": "1.1.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, "org.apache.hadoop:hadoop-annotations": { "locked": "2.7.3", "transitive": [ "org.apache.hadoop:hadoop-client", - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hadoop:hadoop-yarn-api", + "org.apache.hadoop:hadoop-yarn-client", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-common", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.hadoop:hadoop-archives": { + "locked": "2.7.2", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-core" ] }, "org.apache.hadoop:hadoop-auth": { "locked": "2.7.3", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-server", + "org.apache.tez:tez-api", + "org.apache.tez:tez-runtime-library" ] }, "org.apache.hadoop:hadoop-client": { - "locked": "2.7.3" + "locked": "2.7.3", + "transitive": [ + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core" + ] }, "org.apache.hadoop:hadoop-common": { "locked": "2.7.3", "transitive": [ - "org.apache.hadoop:hadoop-client" + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter", + "org.apache.hadoop:hadoop-client", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions", + "org.apache.hive.hcatalog:hive-webhcat-java-client", + "org.apache.tez:hadoop-shim", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" ] }, "org.apache.hadoop:hadoop-hdfs": { "locked": "2.7.3", "transitive": [ - "org.apache.hadoop:hadoop-client" + "org.apache.hadoop:hadoop-client", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api" ] }, "org.apache.hadoop:hadoop-mapreduce-client-app": { @@ -4217,14 +9893,22 @@ "transitive": [ "org.apache.hadoop:hadoop-mapreduce-client-app", "org.apache.hadoop:hadoop-mapreduce-client-jobclient", - "org.apache.hadoop:hadoop-mapreduce-client-shuffle" + "org.apache.hadoop:hadoop-mapreduce-client-shuffle", + "org.apache.tez:tez-mapreduce" ] }, "org.apache.hadoop:hadoop-mapreduce-client-core": { "locked": "2.7.3", "transitive": [ "org.apache.hadoop:hadoop-client", - "org.apache.hadoop:hadoop-mapreduce-client-common" + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.hcatalog:hive-webhcat-java-client", + "org.apache.tez:tez-mapreduce" ] }, "org.apache.hadoop:hadoop-mapreduce-client-jobclient": { @@ -4246,14 +9930,28 @@ "org.apache.hadoop:hadoop-client", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-registry", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.tez:hadoop-shim", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" ] }, "org.apache.hadoop:hadoop-yarn-client": { "locked": "2.7.3", "transitive": [ - "org.apache.hadoop:hadoop-mapreduce-client-common" + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce" ] }, "org.apache.hadoop:hadoop-yarn-common": { @@ -4262,8 +9960,30 @@ "org.apache.hadoop:hadoop-mapreduce-client-common", "org.apache.hadoop:hadoop-mapreduce-client-core", "org.apache.hadoop:hadoop-yarn-client", + "org.apache.hadoop:hadoop-yarn-registry", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.hadoop:hadoop-yarn-registry": { + "locked": "2.7.1", + "transitive": [ + "org.apache.slider:slider-core" + ] + }, + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice": { + "locked": "2.7.2", + "transitive": [ + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "org.apache.hadoop:hadoop-yarn-server-common": { @@ -4271,7 +9991,10 @@ "transitive": [ "org.apache.hadoop:hadoop-mapreduce-client-common", "org.apache.hadoop:hadoop-mapreduce-client-shuffle", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy" ] }, "org.apache.hadoop:hadoop-yarn-server-nodemanager": { @@ -4280,80 +10003,373 @@ "org.apache.hadoop:hadoop-mapreduce-client-shuffle" ] }, - "org.apache.htrace:htrace-core": { - "locked": "3.1.0-incubating", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager": { + "locked": "2.7.2", "transitive": [ - "org.apache.hadoop:hadoop-common", - "org.apache.hadoop:hadoop-hdfs" + "org.apache.hive.shims:hive-shims-0.23" ] }, - "org.apache.httpcomponents:httpclient": { - "locked": "4.2.5", + "org.apache.hadoop:hadoop-yarn-server-web-proxy": { + "locked": "2.7.2", "transitive": [ - "org.apache.hadoop:hadoop-auth" + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.tez:tez-dag" ] }, - "org.apache.httpcomponents:httpcore": { - "locked": "4.2.4", + "org.apache.hbase:hbase-annotations": { + "locked": "1.1.1", "transitive": [ - "org.apache.httpcomponents:httpclient" + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-protocol" ] }, - "org.apache.iceberg:iceberg-api": { - "project": true, + "org.apache.hbase:hbase-client": { + "locked": "1.1.1", "transitive": [ - "org.apache.iceberg:iceberg-core", - "org.apache.iceberg:iceberg-data", - "org.apache.iceberg:iceberg-orc", - "org.apache.iceberg:iceberg-parquet" + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore" ] }, - "org.apache.iceberg:iceberg-bundled-guava": { - "project": true, + "org.apache.hbase:hbase-common": { + "locked": "1.1.1", "transitive": [ - "org.apache.iceberg:iceberg-api", - "org.apache.iceberg:iceberg-common" + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-llap-server" ] }, - "org.apache.iceberg:iceberg-common": { - "project": true, + "org.apache.hbase:hbase-hadoop-compat": { + "locked": "1.1.1", "transitive": [ - "org.apache.iceberg:iceberg-core" + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-llap-server" ] }, - "org.apache.iceberg:iceberg-core": { - "project": true, + "org.apache.hbase:hbase-hadoop2-compat": { + "locked": "1.1.1", "transitive": [ - "org.apache.iceberg:iceberg-data", - "org.apache.iceberg:iceberg-orc", - "org.apache.iceberg:iceberg-parquet" + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-llap-server" ] }, - "org.apache.iceberg:iceberg-data": { - "project": true - }, - "org.apache.iceberg:iceberg-orc": { - "project": true + "org.apache.hbase:hbase-prefix-tree": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-server" + ] }, - "org.apache.iceberg:iceberg-parquet": { - "project": true + "org.apache.hbase:hbase-procedure": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-server" + ] }, - "org.apache.orc:orc-core": { - "locked": "1.6.3", + "org.apache.hbase:hbase-protocol": { + "locked": "1.1.1", "transitive": [ - "org.apache.iceberg:iceberg-orc" + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-server" ] }, - "org.apache.orc:orc-shims": { - "locked": "1.6.3", + "org.apache.hbase:hbase-server": { + "locked": "1.1.1", "transitive": [ - "org.apache.orc:orc-core" + "org.apache.hive:hive-llap-server" ] }, - "org.apache.parquet:parquet-avro": { - "locked": "1.11.0", + "org.apache.hive.hcatalog:hive-hcatalog-core": { + "locked": "2.3.7", "transitive": [ - "org.apache.iceberg:iceberg-parquet" + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions", + "org.apache.hive.hcatalog:hive-webhcat-java-client" + ] + }, + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive.hcatalog:hive-webhcat-java-client" + ] + }, + "org.apache.hive.hcatalog:hive-webhcat-java-client": { + "locked": "2.3.7", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.apache.hive.shims:hive-shims-0.23": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-shims" + ] + }, + "org.apache.hive.shims:hive-shims-common": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive.shims:hive-shims-0.23", + "org.apache.hive.shims:hive-shims-scheduler", + "org.apache.hive:hive-shims" + ] + }, + "org.apache.hive.shims:hive-shims-scheduler": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-shims" + ] + }, + "org.apache.hive:hive-cli": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-core" + ] + }, + "org.apache.hive:hive-common": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-llap-common", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-llap-tez", + "org.apache.hive:hive-serde" + ] + }, + "org.apache.hive:hive-exec": { + "locked": "2.3.7" + }, + "org.apache.hive:hive-jdbc": { + "locked": "2.3.7", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.apache.hive:hive-llap-client": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-llap-tez" + ] + }, + "org.apache.hive:hive-llap-common": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.hive:hive-llap-server": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-service" + ] + }, + "org.apache.hive:hive-llap-tez": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.hive:hive-metastore": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-service" + ] + }, + "org.apache.hive:hive-serde": { + "locked": "2.3.7", + "transitive": [ + "com.klarna:hiverunner", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-common", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore" + ] + }, + "org.apache.hive:hive-service": { + "locked": "2.3.7", + "transitive": [ + "com.klarna:hiverunner", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc" + ] + }, + "org.apache.hive:hive-service-rpc": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service" + ] + }, + "org.apache.hive:hive-shims": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde" + ] + }, + "org.apache.hive:hive-storage-api": { + "locked": "2.4.0", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "org.apache.hive:hive-vector-code-gen": { + "locked": "2.3.7", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.apache.htrace:htrace-core": { + "locked": "3.1.0-incubating", + "transitive": [ + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-hdfs", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-server" + ] + }, + "org.apache.httpcomponents:httpclient": { + "locked": "4.5.2", + "transitive": [ + "net.java.dev.jets3t:jets3t", + "org.apache.calcite.avatica:avatica", + "org.apache.hadoop:hadoop-auth", + "org.apache.hive:hive-jdbc", + "org.apache.slider:slider-core", + "org.apache.thrift:libthrift" + ] + }, + "org.apache.httpcomponents:httpcore": { + "locked": "4.4.4", + "transitive": [ + "net.java.dev.jets3t:jets3t", + "org.apache.calcite.avatica:avatica", + "org.apache.hive:hive-jdbc", + "org.apache.httpcomponents:httpclient", + "org.apache.slider:slider-core", + "org.apache.thrift:libthrift" + ] + }, + "org.apache.iceberg:iceberg-api": { + "project": true, + "transitive": [ + "org.apache.iceberg:iceberg-core", + "org.apache.iceberg:iceberg-data", + "org.apache.iceberg:iceberg-orc", + "org.apache.iceberg:iceberg-parquet" + ] + }, + "org.apache.iceberg:iceberg-bundled-guava": { + "project": true, + "transitive": [ + "org.apache.iceberg:iceberg-api", + "org.apache.iceberg:iceberg-common" + ] + }, + "org.apache.iceberg:iceberg-common": { + "project": true, + "transitive": [ + "org.apache.iceberg:iceberg-core" + ] + }, + "org.apache.iceberg:iceberg-core": { + "project": true, + "transitive": [ + "org.apache.iceberg:iceberg-data", + "org.apache.iceberg:iceberg-orc", + "org.apache.iceberg:iceberg-parquet" + ] + }, + "org.apache.iceberg:iceberg-data": { + "project": true + }, + "org.apache.iceberg:iceberg-orc": { + "project": true + }, + "org.apache.iceberg:iceberg-parquet": { + "project": true + }, + "org.apache.ivy:ivy": { + "locked": "2.4.0", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.apache.logging.log4j:log4j-1.2-api": { + "locked": "2.6.2", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "org.apache.logging.log4j:log4j-api": { + "locked": "2.6.2", + "transitive": [ + "org.apache.logging.log4j:log4j-1.2-api", + "org.apache.logging.log4j:log4j-core", + "org.apache.logging.log4j:log4j-slf4j-impl", + "org.apache.logging.log4j:log4j-web" + ] + }, + "org.apache.logging.log4j:log4j-core": { + "locked": "2.6.2", + "transitive": [ + "org.apache.logging.log4j:log4j-1.2-api", + "org.apache.logging.log4j:log4j-web" + ] + }, + "org.apache.logging.log4j:log4j-slf4j-impl": { + "locked": "2.6.2", + "transitive": [ + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-common" + ] + }, + "org.apache.logging.log4j:log4j-web": { + "locked": "2.6.2", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "org.apache.orc:orc-core": { + "locked": "1.6.3", + "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-llap-server", + "org.apache.iceberg:iceberg-orc" + ] + }, + "org.apache.orc:orc-shims": { + "locked": "1.6.3", + "transitive": [ + "org.apache.orc:orc-core" + ] + }, + "org.apache.parquet:parquet-avro": { + "locked": "1.11.0", + "transitive": [ + "org.apache.iceberg:iceberg-parquet" ] }, "org.apache.parquet:parquet-column": { @@ -4390,12 +10406,151 @@ "org.apache.parquet:parquet-avro" ] }, + "org.apache.parquet:parquet-hadoop-bundle": { + "locked": "1.8.1", + "transitive": [ + "org.apache.hive:hive-serde" + ] + }, "org.apache.parquet:parquet-jackson": { "locked": "1.11.0", "transitive": [ "org.apache.parquet:parquet-hadoop" ] }, + "org.apache.slider:slider-core": { + "locked": "0.90.2-incubating", + "transitive": [ + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.tez:hadoop-shim": { + "locked": "0.9.1", + "transitive": [ + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-runtime-internals" + ] + }, + "org.apache.tez:tez-api": { + "locked": "0.9.1", + "transitive": [ + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.tez:tez-common": { + "locked": "0.9.1", + "transitive": [ + "com.klarna:hiverunner", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.tez:tez-dag": { + "locked": "0.9.1", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.apache.tez:tez-mapreduce": { + "locked": "0.9.1", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.apache.tez:tez-runtime-internals": { + "locked": "0.9.1", + "transitive": [ + "org.apache.tez:tez-dag" + ] + }, + "org.apache.tez:tez-runtime-library": { + "locked": "0.9.1", + "transitive": [ + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce" + ] + }, + "org.apache.thrift:libfb303": { + "locked": "0.9.3", + "transitive": [ + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc" + ] + }, + "org.apache.thrift:libthrift": { + "locked": "0.9.3", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc", + "org.apache.thrift:libfb303" + ] + }, + "org.apache.twill:twill-api": { + "locked": "0.6.0-incubating", + "transitive": [ + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "org.apache.twill:twill-common": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-api", + "org.apache.twill:twill-discovery-api", + "org.apache.twill:twill-zookeeper" + ] + }, + "org.apache.twill:twill-core": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core" + ] + }, + "org.apache.twill:twill-discovery-api": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-api", + "org.apache.twill:twill-discovery-core" + ] + }, + "org.apache.twill:twill-discovery-core": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core" + ] + }, + "org.apache.twill:twill-zookeeper": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-discovery-core" + ] + }, + "org.apache.velocity:velocity": { + "locked": "1.5", + "transitive": [ + "org.apache.hive:hive-vector-code-gen" + ] + }, "org.apache.yetus:audience-annotations": { "locked": "0.11.0", "transitive": [ @@ -4405,12 +10560,32 @@ "org.apache.zookeeper:zookeeper": { "locked": "3.4.6", "transitive": [ + "org.apache.curator:apache-curator", "org.apache.curator:curator-client", "org.apache.curator:curator-framework", "org.apache.curator:curator-recipes", "org.apache.hadoop:hadoop-auth", "org.apache.hadoop:hadoop-common", - "org.apache.hadoop:hadoop-yarn-server-common" + "org.apache.hadoop:hadoop-yarn-server-common", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-server", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-client", + "org.apache.slider:slider-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "org.apiguardian:apiguardian-api": { + "locked": "1.1.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter-api", + "org.junit.jupiter:junit-jupiter-engine", + "org.junit.jupiter:junit-jupiter-params", + "org.junit.platform:junit-platform-commons", + "org.junit.platform:junit-platform-engine" ] }, "org.checkerframework:checker-qual": { @@ -4419,6 +10594,12 @@ "com.github.ben-manes.caffeine:caffeine" ] }, + "org.codehaus.groovy:groovy-all": { + "locked": "2.4.4", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, "org.codehaus.jackson:jackson-core-asl": { "locked": "1.9.13", "transitive": [ @@ -4426,6 +10607,9 @@ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-registry", + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core", "org.codehaus.jackson:jackson-jaxrs", "org.codehaus.jackson:jackson-mapper-asl", "org.codehaus.jackson:jackson-xc" @@ -4435,7 +10619,9 @@ "locked": "1.9.13", "transitive": [ "com.sun.jersey:jersey-json", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core" ] }, "org.codehaus.jackson:jackson-mapper-asl": { @@ -4445,6 +10631,12 @@ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-registry", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions", + "org.apache.slider:slider-core", "org.codehaus.jackson:jackson-jaxrs", "org.codehaus.jackson:jackson-xc" ] @@ -4453,14 +10645,68 @@ "locked": "1.9.13", "transitive": [ "com.sun.jersey:jersey-json", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.slider:slider-core" + ] + }, + "org.codehaus.janino:commons-compiler": { + "locked": "2.7.6", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.codehaus.janino:janino" + ] + }, + "org.codehaus.janino:janino": { + "locked": "2.7.6", + "transitive": [ + "org.apache.calcite:calcite-core" ] }, "org.codehaus.jettison:jettison": { "locked": "1.1", "transitive": [ "com.sun.jersey:jersey-json", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" + ] + }, + "org.datanucleus:datanucleus-api-jdo": { + "locked": "4.2.4", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.datanucleus:datanucleus-core": { + "locked": "4.1.17", + "transitive": [ + "org.apache.hive:hive-exec", + "org.apache.hive:hive-metastore" + ] + }, + "org.datanucleus:datanucleus-rdbms": { + "locked": "4.1.19", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.datanucleus:javax.jdo": { + "locked": "3.2.0-m3", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.eclipse.jetty.aggregate:jetty-all": { + "locked": "7.6.0.v20120127", + "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-service" + ] + }, + "org.eclipse.jetty.orbit:javax.servlet": { + "locked": "3.0.0.v201112011016", + "transitive": [ + "org.apache.hive:hive-common" ] }, "org.fusesource.leveldbjni:leveldbjni-all": { @@ -4468,8 +10714,10 @@ "transitive": [ "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-mapreduce-client-shuffle", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "org.hamcrest:hamcrest-core": { @@ -4479,25 +10727,124 @@ "org.mockito:mockito-core" ] }, + "org.jamon:jamon-runtime": { + "locked": "2.3.1", + "transitive": [ + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-service" + ] + }, "org.jetbrains:annotations": { "locked": "17.0.0", "transitive": [ "org.apache.orc:orc-core" ] }, - "org.mockito:mockito-core": { - "locked": "1.10.19" + "org.jruby.jcodings:jcodings": { + "locked": "1.0.8", + "transitive": [ + "org.apache.hbase:hbase-client", + "org.jruby.joni:joni" + ] }, - "org.objenesis:objenesis": { - "locked": "2.1", + "org.jruby.joni:joni": { + "locked": "2.1.2", "transitive": [ - "org.mockito:mockito-core" + "org.apache.hbase:hbase-client" ] }, - "org.slf4j:slf4j-api": { - "locked": "1.7.25", + "org.junit.jupiter:junit-jupiter": { + "locked": "5.6.0", "transitive": [ - "org.apache.avro:avro", + "com.klarna:hiverunner" + ] + }, + "org.junit.jupiter:junit-jupiter-api": { + "locked": "5.6.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter", + "org.junit.jupiter:junit-jupiter-engine", + "org.junit.jupiter:junit-jupiter-params" + ] + }, + "org.junit.jupiter:junit-jupiter-engine": { + "locked": "5.6.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter" + ] + }, + "org.junit.jupiter:junit-jupiter-params": { + "locked": "5.6.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter" + ] + }, + "org.junit.platform:junit-platform-commons": { + "locked": "1.6.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter-api", + "org.junit.platform:junit-platform-engine" + ] + }, + "org.junit.platform:junit-platform-engine": { + "locked": "1.6.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter-engine" + ] + }, + "org.mockito:mockito-core": { + "locked": "1.10.19" + }, + "org.objenesis:objenesis": { + "locked": "2.5.1", + "transitive": [ + "com.esotericsoftware:kryo-shaded", + "org.mockito:mockito-core" + ] + }, + "org.opentest4j:opentest4j": { + "locked": "1.2.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter-api", + "org.junit.platform:junit-platform-engine" + ] + }, + "org.ow2.asm:asm-all": { + "locked": "5.0.2", + "transitive": [ + "org.apache.twill:twill-core" + ] + }, + "org.reflections:reflections": { + "locked": "0.9.8", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.roaringbitmap:RoaringBitmap": { + "locked": "0.4.9", + "transitive": [ + "org.apache.tez:tez-runtime-library" + ] + }, + "org.slf4j:slf4j-api": { + "locked": "1.7.25", + "transitive": [ + "ch.qos.logback:logback-classic", + "co.cask.tephra:tephra-core", + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter", + "com.jolbox:bonecp", + "com.ning:async-http-client", + "com.yammer.metrics:metrics-core", + "com.zaxxer:HikariCP", + "io.dropwizard.metrics:metrics-core", + "io.dropwizard.metrics:metrics-json", + "io.dropwizard.metrics:metrics-jvm", + "org.apache.avro:avro", + "org.apache.calcite.avatica:avatica", + "org.apache.calcite.avatica:avatica-metrics", + "org.apache.calcite:calcite-core", + "org.apache.calcite:calcite-druid", "org.apache.curator:curator-client", "org.apache.directory.api:api-asn1-api", "org.apache.directory.api:api-util", @@ -4512,16 +10859,49 @@ "org.apache.hadoop:hadoop-mapreduce-client-shuffle", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions", + "org.apache.hive.hcatalog:hive-webhcat-java-client", + "org.apache.hive.shims:hive-shims-0.23", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive.shims:hive-shims-scheduler", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-llap-common", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-llap-tez", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc", + "org.apache.hive:hive-shims", + "org.apache.hive:hive-storage-api", + "org.apache.hive:hive-vector-code-gen", "org.apache.iceberg:iceberg-api", "org.apache.iceberg:iceberg-common", "org.apache.iceberg:iceberg-core", "org.apache.iceberg:iceberg-data", "org.apache.iceberg:iceberg-orc", "org.apache.iceberg:iceberg-parquet", + "org.apache.logging.log4j:log4j-slf4j-impl", "org.apache.orc:orc-core", "org.apache.orc:orc-shims", "org.apache.parquet:parquet-common", "org.apache.parquet:parquet-format-structures", + "org.apache.slider:slider-core", + "org.apache.tez:hadoop-shim", + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-library", + "org.apache.thrift:libthrift", + "org.apache.twill:twill-common", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper", "org.apache.zookeeper:zookeeper", "org.slf4j:slf4j-simple" ] @@ -4547,6 +10927,35 @@ "org.apache.parquet:parquet-hadoop" ] }, + "oro:oro": { + "locked": "2.0.8", + "transitive": [ + "org.apache.velocity:velocity" + ] + }, + "stax:stax-api": { + "locked": "1.0.1", + "transitive": [ + "org.apache.hive:hive-exec", + "org.codehaus.jettison:jettison" + ] + }, + "tomcat:jasper-compiler": { + "locked": "5.5.23", + "transitive": [ + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc" + ] + }, + "tomcat:jasper-runtime": { + "locked": "5.5.23", + "transitive": [ + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc" + ] + }, "xerces:xercesImpl": { "locked": "2.9.1", "transitive": [ @@ -4556,6 +10965,7 @@ "xml-apis:xml-apis": { "locked": "1.3.04", "transitive": [ + "dom4j:dom4j", "xerces:xercesImpl" ] }, @@ -4568,6 +10978,12 @@ } }, "testRuntimeClasspath": { + "ant:ant": { + "locked": "1.6.5", + "transitive": [ + "tomcat:jasper-compiler" + ] + }, "aopalliance:aopalliance": { "locked": "1.0", "transitive": [ @@ -4577,14 +10993,83 @@ "asm:asm": { "locked": "3.1", "transitive": [ + "asm:asm-tree", "com.sun.jersey:jersey-server", "org.sonatype.sisu.inject:cglib" ] }, + "asm:asm-commons": { + "locked": "3.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "asm:asm-tree": { + "locked": "3.1", + "transitive": [ + "asm:asm-commons" + ] + }, + "ch.qos.logback:logback-classic": { + "locked": "1.0.9", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "ch.qos.logback:logback-core": { + "locked": "1.0.9", + "transitive": [ + "ch.qos.logback:logback-classic", + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "co.cask.tephra:tephra-api": { + "locked": "0.6.0", + "transitive": [ + "co.cask.tephra:tephra-core", + "co.cask.tephra:tephra-hbase-compat-1.0", + "org.apache.hive:hive-metastore" + ] + }, + "co.cask.tephra:tephra-core": { + "locked": "0.6.0", + "transitive": [ + "co.cask.tephra:tephra-hbase-compat-1.0", + "org.apache.hive:hive-metastore" + ] + }, + "co.cask.tephra:tephra-hbase-compat-1.0": { + "locked": "0.6.0", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "com.beust:jcommander": { + "locked": "1.30", + "transitive": [ + "org.apache.slider:slider-core" + ] + }, + "com.esotericsoftware:kryo-shaded": { + "locked": "4.0.2", + "requested": "4.0.2" + }, + "com.esotericsoftware:minlog": { + "locked": "1.3.0", + "transitive": [ + "com.esotericsoftware:kryo-shaded" + ] + }, "com.fasterxml.jackson.core:jackson-annotations": { "locked": "2.10.2", + "requested": "2.6.5", "transitive": [ - "com.fasterxml.jackson.core:jackson-databind" + "com.fasterxml.jackson.core:jackson-databind", + "org.apache.calcite.avatica:avatica" ] }, "com.fasterxml.jackson.core:jackson-core": { @@ -4592,13 +11077,17 @@ "transitive": [ "com.fasterxml.jackson.core:jackson-databind", "org.apache.avro:avro", + "org.apache.calcite.avatica:avatica", "org.apache.iceberg:iceberg-core" ] }, "com.fasterxml.jackson.core:jackson-databind": { "locked": "2.10.2", "transitive": [ + "io.dropwizard.metrics:metrics-json", "org.apache.avro:avro", + "org.apache.calcite.avatica:avatica", + "org.apache.hive:hive-common", "org.apache.iceberg:iceberg-core" ] }, @@ -4608,9 +11097,24 @@ "org.apache.iceberg:iceberg-core" ] }, + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter": { + "locked": "0.1.2", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, "com.github.stephenc.findbugs:findbugs-annotations": { "locked": "1.3.9-1", "transitive": [ + "org.apache.hbase:hbase-annotations", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server", "org.apache.iceberg:iceberg-api", "org.apache.iceberg:iceberg-common", "org.apache.iceberg:iceberg-core", @@ -4622,13 +11126,26 @@ "com.google.code.findbugs:jsr305": { "locked": "3.0.0", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.calcite:calcite-core", + "org.apache.hadoop:hadoop-common", + "org.apache.hive:hive-serde", + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-runtime-internals", + "org.apache.twill:twill-api", + "org.apache.twill:twill-common", + "org.apache.twill:twill-zookeeper" ] }, "com.google.code.gson:gson": { "locked": "2.2.4", "transitive": [ - "org.apache.hadoop:hadoop-common" + "co.cask.tephra:tephra-core", + "org.apache.hadoop:hadoop-common", + "org.apache.hive:hive-exec", + "org.apache.slider:slider-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-discovery-core" ] }, "com.google.errorprone:error_prone_annotations": { @@ -4638,8 +11155,13 @@ ] }, "com.google.guava:guava": { - "locked": "16.0.1", + "locked": "18.0", "transitive": [ + "co.cask.tephra:tephra-core", + "com.jolbox:bonecp", + "org.apache.calcite:calcite-core", + "org.apache.calcite:calcite-linq4j", + "org.apache.curator:apache-curator", "org.apache.curator:curator-client", "org.apache.curator:curator-framework", "org.apache.curator:curator-recipes", @@ -4648,21 +11170,67 @@ "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-metastore", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper", + "org.reflections:reflections" + ] + }, + "com.google.inject.extensions:guice-assistedinject": { + "locked": "3.0", + "transitive": [ + "co.cask.tephra:tephra-core" + ] + }, + "com.google.inject.extensions:guice-servlet": { + "locked": "3.0", + "transitive": [ + "com.sun.jersey.contribs:jersey-guice", + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.slider:slider-core" ] }, "com.google.inject:guice": { "locked": "3.0", "transitive": [ + "co.cask.tephra:tephra-core", + "com.google.inject.extensions:guice-assistedinject", + "com.google.inject.extensions:guice-servlet", "com.sun.jersey.contribs:jersey-guice", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "com.google.protobuf:protobuf-java": { - "locked": "2.5.0", + "locked": "3.0.0-beta-1", "transitive": [ + "org.apache.calcite.avatica:avatica", "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-mapreduce-client-app", @@ -4672,22 +11240,77 @@ "org.apache.hadoop:hadoop-mapreduce-client-shuffle", "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-metastore", + "org.apache.orc:orc-core", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "com.jamesmurty.utils:java-xmlbuilder": { + "locked": "0.4", + "transitive": [ + "net.java.dev.jets3t:jets3t" + ] + }, + "com.jcraft:jsch": { + "locked": "0.1.42", + "transitive": [ + "org.apache.hadoop:hadoop-common" + ] + }, + "com.jolbox:bonecp": { + "locked": "0.8.0.RELEASE", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "com.klarna:hiverunner": { + "locked": "5.2.1", + "requested": "5.2.1" + }, + "com.lmax:disruptor": { + "locked": "3.3.0", + "transitive": [ + "org.apache.hbase:hbase-server" + ] + }, + "com.ning:async-http-client": { + "locked": "1.8.16", + "transitive": [ + "org.apache.tez:tez-runtime-library" ] }, "com.sun.jersey.contribs:jersey-guice": { "locked": "1.9", "transitive": [ "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "com.sun.jersey:jersey-client": { "locked": "1.9", "transitive": [ "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api" ] }, "com.sun.jersey:jersey-core": { @@ -4696,22 +11319,36 @@ "com.sun.jersey:jersey-client", "com.sun.jersey:jersey-json", "com.sun.jersey:jersey-server", + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-server" ] }, "com.sun.jersey:jersey-json": { "locked": "1.9", "transitive": [ + "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api" ] }, "com.sun.jersey:jersey-server": { "locked": "1.9", "transitive": [ "com.sun.jersey.contribs:jersey-guice", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-hdfs", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core" ] }, "com.sun.xml.bind:jaxb-impl": { @@ -4720,6 +11357,26 @@ "com.sun.jersey:jersey-json" ] }, + "com.tdunning:json": { + "locked": "1.8", + "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-llap-server" + ] + }, + "com.yammer.metrics:metrics-core": { + "locked": "2.2.0", + "transitive": [ + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-server" + ] + }, + "com.zaxxer:HikariCP": { + "locked": "2.5.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, "commons-beanutils:commons-beanutils": { "locked": "1.7.0", "transitive": [ @@ -4738,26 +11395,48 @@ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-client", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc", + "org.apache.tez:tez-dag" ] }, "commons-codec:commons-codec": { - "locked": "1.6", + "locked": "1.9", "transitive": [ "commons-httpclient:commons-httpclient", + "net.java.dev.jets3t:jets3t", "org.apache.hadoop:hadoop-auth", "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager", - "org.apache.httpcomponents:httpclient" + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc", + "org.apache.httpcomponents:httpclient", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-runtime-library" ] }, "commons-collections:commons-collections": { "locked": "3.2.2", "transitive": [ "commons-configuration:commons-configuration", - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-server", + "org.apache.tez:tez-mapreduce" ] }, "commons-configuration:commons-configuration": { @@ -4766,16 +11445,38 @@ "org.apache.hadoop:hadoop-common" ] }, + "commons-daemon:commons-daemon": { + "locked": "1.0.13", + "transitive": [ + "org.apache.hadoop:hadoop-hdfs" + ] + }, + "commons-dbcp:commons-dbcp": { + "locked": "1.4", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.apache.hive:hive-metastore" + ] + }, "commons-digester:commons-digester": { "locked": "1.8", "transitive": [ "commons-configuration:commons-configuration" ] }, + "commons-el:commons-el": { + "locked": "1.0", + "transitive": [ + "tomcat:jasper-runtime" + ] + }, "commons-httpclient:commons-httpclient": { "locked": "3.1", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-exec", + "org.apache.slider:slider-core" ] }, "commons-io:commons-io": { @@ -4783,7 +11484,14 @@ "transitive": [ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-exec", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api" ] }, "commons-lang:commons-lang": { @@ -4795,25 +11503,62 @@ "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-server", + "org.apache.hive.shims:hive-shims-0.23", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service", + "org.apache.hive:hive-storage-api", + "org.apache.hive:hive-vector-code-gen", + "org.apache.orc:orc-core", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-library", + "org.apache.velocity:velocity" ] }, "commons-logging:commons-logging": { - "locked": "1.1.3", + "locked": "1.2", "transitive": [ "commons-beanutils:commons-beanutils", "commons-beanutils:commons-beanutils-core", "commons-configuration:commons-configuration", "commons-digester:commons-digester", + "commons-el:commons-el", "commons-httpclient:commons-httpclient", + "net.java.dev.jets3t:jets3t", + "net.sf.jpam:jpam", "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-api", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager", - "org.apache.httpcomponents:httpclient" + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server", + "org.apache.httpcomponents:httpclient", + "org.apache.slider:slider-core" ] }, "commons-net:commons-net": { @@ -4825,26 +11570,88 @@ "commons-pool:commons-pool": { "locked": "1.6", "transitive": [ + "commons-dbcp:commons-dbcp", + "org.apache.hive:hive-metastore", "org.apache.parquet:parquet-hadoop" ] }, + "dom4j:dom4j": { + "locked": "1.6.1", + "transitive": [ + "org.reflections:reflections" + ] + }, "io.airlift:aircompressor": { "locked": "0.15", "transitive": [ "org.apache.orc:orc-core" ] }, + "io.dropwizard.metrics:metrics-core": { + "locked": "3.1.2", + "transitive": [ + "co.cask.tephra:tephra-core", + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter", + "io.dropwizard.metrics:metrics-json", + "io.dropwizard.metrics:metrics-jvm", + "org.apache.hive:hive-common" + ] + }, + "io.dropwizard.metrics:metrics-json": { + "locked": "3.1.0", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "io.dropwizard.metrics:metrics-jvm": { + "locked": "3.1.0", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, "io.netty:netty": { - "locked": "3.7.0.Final", + "locked": "3.9.2.Final", "transitive": [ + "com.ning:async-http-client", "org.apache.hadoop:hadoop-hdfs", + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hive:hive-llap-server", "org.apache.zookeeper:zookeeper" ] }, "io.netty:netty-all": { "locked": "4.0.23.Final", "transitive": [ - "org.apache.hadoop:hadoop-hdfs" + "org.apache.hadoop:hadoop-hdfs", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-server" + ] + }, + "it.unimi.dsi:fastutil": { + "locked": "6.5.6", + "transitive": [ + "co.cask.tephra:tephra-core" + ] + }, + "jakarta.jms:jakarta.jms-api": { + "locked": "2.0.2", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions" + ] + }, + "javassist:javassist": { + "locked": "3.12.1.GA", + "transitive": [ + "org.reflections:reflections" + ] + }, + "javax.activation:activation": { + "locked": "1.1", + "transitive": [ + "javax.mail:mail", + "org.eclipse.jetty.aggregate:jetty-all" ] }, "javax.annotation:javax.annotation-api": { @@ -4860,17 +11667,56 @@ "com.sun.jersey.contribs:jersey-guice" ] }, + "javax.jdo:jdo-api": { + "locked": "3.0.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "javax.mail:mail": { + "locked": "1.4.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, "javax.servlet.jsp:jsp-api": { "locked": "2.1", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hadoop:hadoop-common", + "org.apache.slider:slider-core" + ] + }, + "javax.servlet:jsp-api": { + "locked": "2.0", + "transitive": [ + "tomcat:jasper-compiler" ] }, "javax.servlet:servlet-api": { "locked": "2.5", "transitive": [ + "javax.servlet:jsp-api", + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.slider:slider-core", + "org.apache.tez:tez-dag", + "org.eclipse.jetty.aggregate:jetty-all", + "tomcat:jasper-runtime" + ] + }, + "javax.transaction:jta": { + "locked": "1.1", + "transitive": [ + "javax.jdo:jdo-api" + ] + }, + "javax.transaction:transaction-api": { + "locked": "1.1", + "transitive": [ + "org.datanucleus:javax.jdo" ] }, "javax.xml.bind:jaxb-api": { @@ -4878,18 +11724,46 @@ "transitive": [ "com.sun.xml.bind:jaxb-impl", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", "org.apache.orc:orc-core" ] }, + "javolution:javolution": { + "locked": "5.5.1", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, "jline:jline": { - "locked": "0.9.94", + "locked": "2.12", "transitive": [ + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", "org.apache.zookeeper:zookeeper" ] }, + "joda-time:joda-time": { + "locked": "2.8.1", + "transitive": [ + "org.apache.calcite:calcite-druid", + "org.apache.hive:hive-common" + ] + }, "junit:junit": { - "locked": "4.12" + "locked": "4.12", + "transitive": [ + "org.apache.hbase:hbase-annotations", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server" + ] }, "log4j:log4j": { "locked": "1.2.17", @@ -4899,192 +11773,710 @@ "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-client", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-annotations", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-protocol", + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core", "org.apache.zookeeper:zookeeper" ] }, - "org.apache.avro:avro": { - "locked": "1.9.2", + "net.hydromatic:eigenbase-properties": { + "locked": "1.1.5", "transitive": [ - "org.apache.iceberg:iceberg-core" + "org.apache.calcite:calcite-core" ] }, - "org.apache.commons:commons-compress": { - "locked": "1.19", + "net.java.dev.jets3t:jets3t": { + "locked": "0.9.0", "transitive": [ - "org.apache.avro:avro", - "org.apache.hadoop:hadoop-common", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-common" + ] + }, + "net.sf.jpam:jpam": { + "locked": "1.1", + "transitive": [ + "org.apache.hive:hive-service" + ] + }, + "net.sf.opencsv:opencsv": { + "locked": "2.3", + "transitive": [ + "org.apache.hive:hive-serde" + ] + }, + "org.antlr:ST4": { + "locked": "4.0.4", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.antlr:antlr-runtime": { + "locked": "3.5.2", + "transitive": [ + "org.antlr:ST4", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-metastore" + ] + }, + "org.apache.ant:ant": { + "locked": "1.9.1", + "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-vector-code-gen" + ] + }, + "org.apache.ant:ant-launcher": { + "locked": "1.9.1", + "transitive": [ + "org.apache.ant:ant" + ] + }, + "org.apache.avro:avro": { + "locked": "1.9.2", + "requested": "1.9.2", + "transitive": [ + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-serde", + "org.apache.iceberg:iceberg-core", + "org.apache.slider:slider-core" + ] + }, + "org.apache.calcite.avatica:avatica": { + "locked": "1.8.0", + "transitive": [ + "org.apache.calcite:calcite-core" + ] + }, + "org.apache.calcite.avatica:avatica-metrics": { + "locked": "1.8.0", + "transitive": [ + "org.apache.calcite.avatica:avatica" + ] + }, + "org.apache.calcite:calcite-core": { + "locked": "1.10.0", + "transitive": [ + "org.apache.calcite:calcite-druid", + "org.apache.hive:hive-exec" + ] + }, + "org.apache.calcite:calcite-druid": { + "locked": "1.10.0", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.apache.calcite:calcite-linq4j": { + "locked": "1.10.0", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.apache.calcite:calcite-druid" + ] + }, + "org.apache.commons:commons-collections4": { + "locked": "4.1", + "transitive": [ + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag" + ] + }, + "org.apache.commons:commons-compress": { + "locked": "1.19", + "transitive": [ + "org.apache.avro:avro", + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.slider:slider-core" + ] + }, + "org.apache.commons:commons-lang3": { + "locked": "3.2", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.apache.hive:hive-common", + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-llap-common", + "org.apache.hive:hive-llap-tez" + ] + }, + "org.apache.commons:commons-math": { + "locked": "2.2", + "transitive": [ + "org.apache.hbase:hbase-hadoop-compat", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-server" + ] + }, + "org.apache.commons:commons-math3": { + "locked": "3.1.1", + "transitive": [ + "org.apache.hadoop:hadoop-common", + "org.apache.tez:tez-dag" + ] + }, + "org.apache.curator:apache-curator": { + "locked": "2.7.1", + "transitive": [ + "org.apache.hive:hive-exec", + "org.apache.hive:hive-llap-client" + ] + }, + "org.apache.curator:curator-client": { + "locked": "2.7.1", + "transitive": [ + "org.apache.curator:curator-framework", + "org.apache.hadoop:hadoop-common", + "org.apache.slider:slider-core" + ] + }, + "org.apache.curator:curator-framework": { + "locked": "2.7.1", + "transitive": [ + "org.apache.curator:curator-recipes", + "org.apache.hadoop:hadoop-auth", + "org.apache.hadoop:hadoop-yarn-registry", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-service", + "org.apache.slider:slider-core" + ] + }, + "org.apache.curator:curator-recipes": { + "locked": "2.7.1", + "transitive": [ + "org.apache.hadoop:hadoop-common", + "org.apache.hive:hive-service", + "org.apache.slider:slider-core" + ] + }, + "org.apache.derby:derby": { + "locked": "10.10.2.0", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.apache.directory.api:api-asn1-api": { + "locked": "1.0.0-M20", + "transitive": [ + "org.apache.directory.server:apacheds-kerberos-codec" + ] + }, + "org.apache.directory.api:api-util": { + "locked": "1.0.0-M20", + "transitive": [ + "org.apache.directory.server:apacheds-kerberos-codec" + ] + }, + "org.apache.directory.server:apacheds-i18n": { + "locked": "2.0.0-M15", + "transitive": [ + "org.apache.directory.server:apacheds-kerberos-codec" + ] + }, + "org.apache.directory.server:apacheds-kerberos-codec": { + "locked": "2.0.0-M15", + "transitive": [ + "org.apache.hadoop:hadoop-auth" + ] + }, + "org.apache.geronimo.specs:geronimo-annotation_1.0_spec": { + "locked": "1.1.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "org.apache.geronimo.specs:geronimo-jaspic_1.0_spec": { + "locked": "1.0", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "org.apache.geronimo.specs:geronimo-jta_1.1_spec": { + "locked": "1.1.1", + "transitive": [ + "org.eclipse.jetty.aggregate:jetty-all" + ] + }, + "org.apache.hadoop:hadoop-annotations": { + "locked": "2.7.3", + "transitive": [ + "org.apache.hadoop:hadoop-client", + "org.apache.hadoop:hadoop-common", + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hadoop:hadoop-yarn-api", + "org.apache.hadoop:hadoop-yarn-client", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-common", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.hadoop:hadoop-archives": { + "locked": "2.7.2", + "transitive": [ + "org.apache.hive.hcatalog:hive-hcatalog-core" + ] + }, + "org.apache.hadoop:hadoop-auth": { + "locked": "2.7.3", + "transitive": [ + "org.apache.hadoop:hadoop-common", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-server", + "org.apache.tez:tez-api", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.hadoop:hadoop-client": { + "locked": "2.7.3", + "transitive": [ + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core" + ] + }, + "org.apache.hadoop:hadoop-common": { + "locked": "2.7.3", + "transitive": [ + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter", + "org.apache.hadoop:hadoop-client", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions", + "org.apache.hive.hcatalog:hive-webhcat-java-client", + "org.apache.tez:hadoop-shim", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.hadoop:hadoop-hdfs": { + "locked": "2.7.3", + "transitive": [ + "org.apache.hadoop:hadoop-client", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.slider:slider-core", + "org.apache.tez:tez-api" + ] + }, + "org.apache.hadoop:hadoop-mapreduce-client-app": { + "locked": "2.7.3", + "transitive": [ + "org.apache.hadoop:hadoop-client" + ] + }, + "org.apache.hadoop:hadoop-mapreduce-client-common": { + "locked": "2.7.3", + "transitive": [ + "org.apache.hadoop:hadoop-mapreduce-client-app", + "org.apache.hadoop:hadoop-mapreduce-client-jobclient", + "org.apache.hadoop:hadoop-mapreduce-client-shuffle", + "org.apache.tez:tez-mapreduce" + ] + }, + "org.apache.hadoop:hadoop-mapreduce-client-core": { + "locked": "2.7.3", + "transitive": [ + "org.apache.hadoop:hadoop-client", + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.hcatalog:hive-webhcat-java-client", + "org.apache.tez:tez-mapreduce" + ] + }, + "org.apache.hadoop:hadoop-mapreduce-client-jobclient": { + "locked": "2.7.3", + "transitive": [ + "org.apache.hadoop:hadoop-client" + ] + }, + "org.apache.hadoop:hadoop-mapreduce-client-shuffle": { + "locked": "2.7.3", + "transitive": [ + "org.apache.hadoop:hadoop-mapreduce-client-app", + "org.apache.hadoop:hadoop-mapreduce-client-jobclient" + ] + }, + "org.apache.hadoop:hadoop-yarn-api": { + "locked": "2.7.3", + "transitive": [ + "org.apache.hadoop:hadoop-client", + "org.apache.hadoop:hadoop-yarn-client", + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-registry", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-common", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.tez:hadoop-shim", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.hadoop:hadoop-yarn-client": { + "locked": "2.7.3", + "transitive": [ + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce" + ] + }, + "org.apache.hadoop:hadoop-yarn-common": { + "locked": "2.7.3", + "transitive": [ + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hadoop:hadoop-mapreduce-client-core", + "org.apache.hadoop:hadoop-yarn-client", + "org.apache.hadoop:hadoop-yarn-registry", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-common", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy", + "org.apache.tez:tez-api", + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.hadoop:hadoop-yarn-registry": { + "locked": "2.7.1", + "transitive": [ + "org.apache.slider:slider-core" + ] + }, + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice": { + "locked": "2.7.2", + "transitive": [ + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" + ] + }, + "org.apache.hadoop:hadoop-yarn-server-common": { + "locked": "2.7.3", + "transitive": [ + "org.apache.hadoop:hadoop-mapreduce-client-common", + "org.apache.hadoop:hadoop-mapreduce-client-shuffle", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hadoop:hadoop-yarn-server-web-proxy" + ] + }, + "org.apache.hadoop:hadoop-yarn-server-nodemanager": { + "locked": "2.7.3", + "transitive": [ + "org.apache.hadoop:hadoop-mapreduce-client-shuffle" + ] + }, + "org.apache.hadoop:hadoop-yarn-server-resourcemanager": { + "locked": "2.7.2", + "transitive": [ + "org.apache.hive.shims:hive-shims-0.23" + ] + }, + "org.apache.hadoop:hadoop-yarn-server-web-proxy": { + "locked": "2.7.2", + "transitive": [ + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.tez:tez-dag" + ] + }, + "org.apache.hbase:hbase-annotations": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-protocol" + ] + }, + "org.apache.hbase:hbase-client": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore" + ] + }, + "org.apache.hbase:hbase-common": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.hbase:hbase-hadoop-compat": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-hadoop2-compat", + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.hbase:hbase-hadoop2-compat": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-prefix-tree", + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.hbase:hbase-prefix-tree": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-server" + ] + }, + "org.apache.hbase:hbase-procedure": { + "locked": "1.1.1", + "transitive": [ + "org.apache.hbase:hbase-server" ] }, - "org.apache.commons:commons-math3": { - "locked": "3.1.1", + "org.apache.hbase:hbase-protocol": { + "locked": "1.1.1", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-procedure", + "org.apache.hbase:hbase-server" ] }, - "org.apache.curator:curator-client": { - "locked": "2.7.1", + "org.apache.hbase:hbase-server": { + "locked": "1.1.1", "transitive": [ - "org.apache.curator:curator-framework", - "org.apache.hadoop:hadoop-common" + "org.apache.hive:hive-llap-server" ] }, - "org.apache.curator:curator-framework": { - "locked": "2.7.1", + "org.apache.hive.hcatalog:hive-hcatalog-core": { + "locked": "2.3.7", "transitive": [ - "org.apache.curator:curator-recipes", - "org.apache.hadoop:hadoop-auth" + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions", + "org.apache.hive.hcatalog:hive-webhcat-java-client" ] }, - "org.apache.curator:curator-recipes": { - "locked": "2.7.1", + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions": { + "locked": "2.3.7", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hive.hcatalog:hive-webhcat-java-client" ] }, - "org.apache.directory.api:api-asn1-api": { - "locked": "1.0.0-M20", + "org.apache.hive.hcatalog:hive-webhcat-java-client": { + "locked": "2.3.7", "transitive": [ - "org.apache.directory.server:apacheds-kerberos-codec" + "com.klarna:hiverunner" ] }, - "org.apache.directory.api:api-util": { - "locked": "1.0.0-M20", + "org.apache.hive.shims:hive-shims-0.23": { + "locked": "2.3.7", "transitive": [ - "org.apache.directory.server:apacheds-kerberos-codec" + "org.apache.hive:hive-shims" ] }, - "org.apache.directory.server:apacheds-i18n": { - "locked": "2.0.0-M15", + "org.apache.hive.shims:hive-shims-common": { + "locked": "2.3.7", "transitive": [ - "org.apache.directory.server:apacheds-kerberos-codec" + "org.apache.hive.shims:hive-shims-0.23", + "org.apache.hive.shims:hive-shims-scheduler", + "org.apache.hive:hive-shims" ] }, - "org.apache.directory.server:apacheds-kerberos-codec": { - "locked": "2.0.0-M15", + "org.apache.hive.shims:hive-shims-scheduler": { + "locked": "2.3.7", "transitive": [ - "org.apache.hadoop:hadoop-auth" + "org.apache.hive:hive-shims" ] }, - "org.apache.hadoop:hadoop-annotations": { - "locked": "2.7.3", + "org.apache.hive:hive-cli": { + "locked": "2.3.7", "transitive": [ - "org.apache.hadoop:hadoop-client", - "org.apache.hadoop:hadoop-common" + "org.apache.hive.hcatalog:hive-hcatalog-core" ] }, - "org.apache.hadoop:hadoop-auth": { - "locked": "2.7.3", + "org.apache.hive:hive-common": { + "locked": "2.3.7", "transitive": [ - "org.apache.hadoop:hadoop-common" + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-llap-common", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-llap-tez", + "org.apache.hive:hive-serde" ] }, - "org.apache.hadoop:hadoop-client": { - "locked": "2.7.3" + "org.apache.hive:hive-exec": { + "locked": "2.3.7" }, - "org.apache.hadoop:hadoop-common": { - "locked": "2.7.3", + "org.apache.hive:hive-jdbc": { + "locked": "2.3.7", "transitive": [ - "org.apache.hadoop:hadoop-client" + "com.klarna:hiverunner" ] }, - "org.apache.hadoop:hadoop-hdfs": { - "locked": "2.7.3", + "org.apache.hive:hive-llap-client": { + "locked": "2.3.7", "transitive": [ - "org.apache.hadoop:hadoop-client" + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-llap-tez" ] }, - "org.apache.hadoop:hadoop-mapreduce-client-app": { - "locked": "2.7.3", + "org.apache.hive:hive-llap-common": { + "locked": "2.3.7", "transitive": [ - "org.apache.hadoop:hadoop-client" + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-llap-server" ] }, - "org.apache.hadoop:hadoop-mapreduce-client-common": { - "locked": "2.7.3", + "org.apache.hive:hive-llap-server": { + "locked": "2.3.7", "transitive": [ - "org.apache.hadoop:hadoop-mapreduce-client-app", - "org.apache.hadoop:hadoop-mapreduce-client-jobclient", - "org.apache.hadoop:hadoop-mapreduce-client-shuffle" + "org.apache.hive:hive-service" ] }, - "org.apache.hadoop:hadoop-mapreduce-client-core": { - "locked": "2.7.3", + "org.apache.hive:hive-llap-tez": { + "locked": "2.3.7", "transitive": [ - "org.apache.hadoop:hadoop-client", - "org.apache.hadoop:hadoop-mapreduce-client-common" + "org.apache.hive:hive-llap-server" ] }, - "org.apache.hadoop:hadoop-mapreduce-client-jobclient": { - "locked": "2.7.3", + "org.apache.hive:hive-metastore": { + "locked": "2.3.7", "transitive": [ - "org.apache.hadoop:hadoop-client" + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-service" ] }, - "org.apache.hadoop:hadoop-mapreduce-client-shuffle": { - "locked": "2.7.3", + "org.apache.hive:hive-serde": { + "locked": "2.3.7", "transitive": [ - "org.apache.hadoop:hadoop-mapreduce-client-app", - "org.apache.hadoop:hadoop-mapreduce-client-jobclient" + "com.klarna:hiverunner", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-common", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore" ] }, - "org.apache.hadoop:hadoop-yarn-api": { - "locked": "2.7.3", + "org.apache.hive:hive-service": { + "locked": "2.3.7", "transitive": [ - "org.apache.hadoop:hadoop-client", - "org.apache.hadoop:hadoop-yarn-client", - "org.apache.hadoop:hadoop-yarn-common", - "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "com.klarna:hiverunner", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc" ] }, - "org.apache.hadoop:hadoop-yarn-client": { - "locked": "2.7.3", + "org.apache.hive:hive-service-rpc": { + "locked": "2.3.7", "transitive": [ - "org.apache.hadoop:hadoop-mapreduce-client-common" + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service" ] }, - "org.apache.hadoop:hadoop-yarn-common": { - "locked": "2.7.3", + "org.apache.hive:hive-shims": { + "locked": "2.3.7", "transitive": [ - "org.apache.hadoop:hadoop-mapreduce-client-common", - "org.apache.hadoop:hadoop-mapreduce-client-core", - "org.apache.hadoop:hadoop-yarn-client", - "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde" ] }, - "org.apache.hadoop:hadoop-yarn-server-common": { - "locked": "2.7.3", + "org.apache.hive:hive-storage-api": { + "locked": "2.4.0", "transitive": [ - "org.apache.hadoop:hadoop-mapreduce-client-common", - "org.apache.hadoop:hadoop-mapreduce-client-shuffle", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hive:hive-common" ] }, - "org.apache.hadoop:hadoop-yarn-server-nodemanager": { - "locked": "2.7.3", + "org.apache.hive:hive-vector-code-gen": { + "locked": "2.3.7", "transitive": [ - "org.apache.hadoop:hadoop-mapreduce-client-shuffle" + "org.apache.hive:hive-exec" ] }, "org.apache.htrace:htrace-core": { "locked": "3.1.0-incubating", "transitive": [ "org.apache.hadoop:hadoop-common", - "org.apache.hadoop:hadoop-hdfs" + "org.apache.hadoop:hadoop-hdfs", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-common", + "org.apache.hbase:hbase-server" ] }, "org.apache.httpcomponents:httpclient": { - "locked": "4.2.5", + "locked": "4.5.2", "transitive": [ - "org.apache.hadoop:hadoop-auth" + "net.java.dev.jets3t:jets3t", + "org.apache.calcite.avatica:avatica", + "org.apache.hadoop:hadoop-auth", + "org.apache.hive:hive-jdbc", + "org.apache.slider:slider-core", + "org.apache.thrift:libthrift" ] }, "org.apache.httpcomponents:httpcore": { - "locked": "4.2.4", + "locked": "4.4.4", "transitive": [ - "org.apache.httpcomponents:httpclient" + "net.java.dev.jets3t:jets3t", + "org.apache.calcite.avatica:avatica", + "org.apache.hive:hive-jdbc", + "org.apache.httpcomponents:httpclient", + "org.apache.slider:slider-core", + "org.apache.thrift:libthrift" ] }, "org.apache.iceberg:iceberg-api": { @@ -5126,9 +12518,52 @@ "org.apache.iceberg:iceberg-parquet": { "project": true }, + "org.apache.ivy:ivy": { + "locked": "2.4.0", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, + "org.apache.logging.log4j:log4j-1.2-api": { + "locked": "2.6.2", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, + "org.apache.logging.log4j:log4j-api": { + "locked": "2.6.2", + "transitive": [ + "org.apache.logging.log4j:log4j-1.2-api", + "org.apache.logging.log4j:log4j-core", + "org.apache.logging.log4j:log4j-slf4j-impl", + "org.apache.logging.log4j:log4j-web" + ] + }, + "org.apache.logging.log4j:log4j-core": { + "locked": "2.6.2", + "transitive": [ + "org.apache.logging.log4j:log4j-1.2-api", + "org.apache.logging.log4j:log4j-web" + ] + }, + "org.apache.logging.log4j:log4j-slf4j-impl": { + "locked": "2.6.2", + "transitive": [ + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-common" + ] + }, + "org.apache.logging.log4j:log4j-web": { + "locked": "2.6.2", + "transitive": [ + "org.apache.hive:hive-common" + ] + }, "org.apache.orc:orc-core": { "locked": "1.6.3", "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-llap-server", "org.apache.iceberg:iceberg-orc" ] }, @@ -5178,12 +12613,151 @@ "org.apache.parquet:parquet-avro" ] }, + "org.apache.parquet:parquet-hadoop-bundle": { + "locked": "1.8.1", + "transitive": [ + "org.apache.hive:hive-serde" + ] + }, "org.apache.parquet:parquet-jackson": { "locked": "1.11.0", "transitive": [ "org.apache.parquet:parquet-hadoop" ] }, + "org.apache.slider:slider-core": { + "locked": "0.90.2-incubating", + "transitive": [ + "org.apache.hive:hive-llap-server" + ] + }, + "org.apache.tez:hadoop-shim": { + "locked": "0.9.1", + "transitive": [ + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-runtime-internals" + ] + }, + "org.apache.tez:tez-api": { + "locked": "0.9.1", + "transitive": [ + "org.apache.tez:tez-common", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.tez:tez-common": { + "locked": "0.9.1", + "transitive": [ + "com.klarna:hiverunner", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-internals", + "org.apache.tez:tez-runtime-library" + ] + }, + "org.apache.tez:tez-dag": { + "locked": "0.9.1", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.apache.tez:tez-mapreduce": { + "locked": "0.9.1", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.apache.tez:tez-runtime-internals": { + "locked": "0.9.1", + "transitive": [ + "org.apache.tez:tez-dag" + ] + }, + "org.apache.tez:tez-runtime-library": { + "locked": "0.9.1", + "transitive": [ + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce" + ] + }, + "org.apache.thrift:libfb303": { + "locked": "0.9.3", + "transitive": [ + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc" + ] + }, + "org.apache.thrift:libthrift": { + "locked": "0.9.3", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc", + "org.apache.thrift:libfb303" + ] + }, + "org.apache.twill:twill-api": { + "locked": "0.6.0-incubating", + "transitive": [ + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "org.apache.twill:twill-common": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-api", + "org.apache.twill:twill-discovery-api", + "org.apache.twill:twill-zookeeper" + ] + }, + "org.apache.twill:twill-core": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core" + ] + }, + "org.apache.twill:twill-discovery-api": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-api", + "org.apache.twill:twill-discovery-core" + ] + }, + "org.apache.twill:twill-discovery-core": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core" + ] + }, + "org.apache.twill:twill-zookeeper": { + "locked": "0.6.0-incubating", + "transitive": [ + "co.cask.tephra:tephra-core", + "org.apache.twill:twill-core", + "org.apache.twill:twill-discovery-core" + ] + }, + "org.apache.velocity:velocity": { + "locked": "1.5", + "transitive": [ + "org.apache.hive:hive-vector-code-gen" + ] + }, "org.apache.yetus:audience-annotations": { "locked": "0.11.0", "transitive": [ @@ -5193,12 +12767,32 @@ "org.apache.zookeeper:zookeeper": { "locked": "3.4.6", "transitive": [ + "org.apache.curator:apache-curator", "org.apache.curator:curator-client", "org.apache.curator:curator-framework", "org.apache.curator:curator-recipes", "org.apache.hadoop:hadoop-auth", "org.apache.hadoop:hadoop-common", - "org.apache.hadoop:hadoop-yarn-server-common" + "org.apache.hadoop:hadoop-yarn-server-common", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-server", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-client", + "org.apache.slider:slider-core", + "org.apache.twill:twill-zookeeper" + ] + }, + "org.apiguardian:apiguardian-api": { + "locked": "1.1.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter-api", + "org.junit.jupiter:junit-jupiter-engine", + "org.junit.jupiter:junit-jupiter-params", + "org.junit.platform:junit-platform-commons", + "org.junit.platform:junit-platform-engine" ] }, "org.checkerframework:checker-qual": { @@ -5207,6 +12801,12 @@ "com.github.ben-manes.caffeine:caffeine" ] }, + "org.codehaus.groovy:groovy-all": { + "locked": "2.4.4", + "transitive": [ + "org.apache.hive:hive-exec" + ] + }, "org.codehaus.jackson:jackson-core-asl": { "locked": "1.9.13", "transitive": [ @@ -5214,6 +12814,9 @@ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-registry", + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core", "org.codehaus.jackson:jackson-jaxrs", "org.codehaus.jackson:jackson-mapper-asl", "org.codehaus.jackson:jackson-xc" @@ -5223,7 +12826,9 @@ "locked": "1.9.13", "transitive": [ "com.sun.jersey:jersey-json", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hbase:hbase-server", + "org.apache.slider:slider-core" ] }, "org.codehaus.jackson:jackson-mapper-asl": { @@ -5233,6 +12838,12 @@ "org.apache.hadoop:hadoop-common", "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-yarn-common", + "org.apache.hadoop:hadoop-yarn-registry", + "org.apache.hbase:hbase-client", + "org.apache.hbase:hbase-server", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions", + "org.apache.slider:slider-core", "org.codehaus.jackson:jackson-jaxrs", "org.codehaus.jackson:jackson-xc" ] @@ -5241,14 +12852,68 @@ "locked": "1.9.13", "transitive": [ "com.sun.jersey:jersey-json", - "org.apache.hadoop:hadoop-yarn-common" + "org.apache.hadoop:hadoop-yarn-common", + "org.apache.slider:slider-core" + ] + }, + "org.codehaus.janino:commons-compiler": { + "locked": "2.7.6", + "transitive": [ + "org.apache.calcite:calcite-core", + "org.codehaus.janino:janino" + ] + }, + "org.codehaus.janino:janino": { + "locked": "2.7.6", + "transitive": [ + "org.apache.calcite:calcite-core" ] }, "org.codehaus.jettison:jettison": { "locked": "1.1", "transitive": [ "com.sun.jersey:jersey-json", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" + ] + }, + "org.datanucleus:datanucleus-api-jdo": { + "locked": "4.2.4", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.datanucleus:datanucleus-core": { + "locked": "4.1.17", + "transitive": [ + "org.apache.hive:hive-exec", + "org.apache.hive:hive-metastore" + ] + }, + "org.datanucleus:datanucleus-rdbms": { + "locked": "4.1.19", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.datanucleus:javax.jdo": { + "locked": "3.2.0-m3", + "transitive": [ + "org.apache.hive:hive-metastore" + ] + }, + "org.eclipse.jetty.aggregate:jetty-all": { + "locked": "7.6.0.v20120127", + "transitive": [ + "org.apache.hive:hive-common", + "org.apache.hive:hive-service" + ] + }, + "org.eclipse.jetty.orbit:javax.servlet": { + "locked": "3.0.0.v201112011016", + "transitive": [ + "org.apache.hive:hive-common" ] }, "org.fusesource.leveldbjni:leveldbjni-all": { @@ -5256,8 +12921,10 @@ "transitive": [ "org.apache.hadoop:hadoop-hdfs", "org.apache.hadoop:hadoop-mapreduce-client-shuffle", + "org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice", "org.apache.hadoop:hadoop-yarn-server-common", - "org.apache.hadoop:hadoop-yarn-server-nodemanager" + "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager" ] }, "org.hamcrest:hamcrest-core": { @@ -5267,25 +12934,124 @@ "org.mockito:mockito-core" ] }, + "org.jamon:jamon-runtime": { + "locked": "2.3.1", + "transitive": [ + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-service" + ] + }, "org.jetbrains:annotations": { "locked": "17.0.0", "transitive": [ "org.apache.orc:orc-core" ] }, + "org.jruby.jcodings:jcodings": { + "locked": "1.0.8", + "transitive": [ + "org.apache.hbase:hbase-client", + "org.jruby.joni:joni" + ] + }, + "org.jruby.joni:joni": { + "locked": "2.1.2", + "transitive": [ + "org.apache.hbase:hbase-client" + ] + }, + "org.junit.jupiter:junit-jupiter": { + "locked": "5.6.0", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.junit.jupiter:junit-jupiter-api": { + "locked": "5.6.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter", + "org.junit.jupiter:junit-jupiter-engine", + "org.junit.jupiter:junit-jupiter-params" + ] + }, + "org.junit.jupiter:junit-jupiter-engine": { + "locked": "5.6.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter" + ] + }, + "org.junit.jupiter:junit-jupiter-params": { + "locked": "5.6.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter" + ] + }, + "org.junit.platform:junit-platform-commons": { + "locked": "1.6.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter-api", + "org.junit.platform:junit-platform-engine" + ] + }, + "org.junit.platform:junit-platform-engine": { + "locked": "1.6.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter-engine" + ] + }, "org.mockito:mockito-core": { "locked": "1.10.19" }, "org.objenesis:objenesis": { - "locked": "2.1", + "locked": "2.5.1", "transitive": [ + "com.esotericsoftware:kryo-shaded", "org.mockito:mockito-core" ] }, + "org.opentest4j:opentest4j": { + "locked": "1.2.0", + "transitive": [ + "org.junit.jupiter:junit-jupiter-api", + "org.junit.platform:junit-platform-engine" + ] + }, + "org.ow2.asm:asm-all": { + "locked": "5.0.2", + "transitive": [ + "org.apache.twill:twill-core" + ] + }, + "org.reflections:reflections": { + "locked": "0.9.8", + "transitive": [ + "com.klarna:hiverunner" + ] + }, + "org.roaringbitmap:RoaringBitmap": { + "locked": "0.4.9", + "transitive": [ + "org.apache.tez:tez-runtime-library" + ] + }, "org.slf4j:slf4j-api": { "locked": "1.7.25", "transitive": [ + "ch.qos.logback:logback-classic", + "co.cask.tephra:tephra-core", + "com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter", + "com.jolbox:bonecp", + "com.ning:async-http-client", + "com.yammer.metrics:metrics-core", + "com.zaxxer:HikariCP", + "io.dropwizard.metrics:metrics-core", + "io.dropwizard.metrics:metrics-json", + "io.dropwizard.metrics:metrics-jvm", "org.apache.avro:avro", + "org.apache.calcite.avatica:avatica", + "org.apache.calcite.avatica:avatica-metrics", + "org.apache.calcite:calcite-core", + "org.apache.calcite:calcite-druid", "org.apache.curator:curator-client", "org.apache.directory.api:api-asn1-api", "org.apache.directory.api:api-util", @@ -5300,16 +13066,49 @@ "org.apache.hadoop:hadoop-mapreduce-client-shuffle", "org.apache.hadoop:hadoop-yarn-common", "org.apache.hadoop:hadoop-yarn-server-nodemanager", + "org.apache.hadoop:hadoop-yarn-server-resourcemanager", + "org.apache.hive.hcatalog:hive-hcatalog-core", + "org.apache.hive.hcatalog:hive-hcatalog-server-extensions", + "org.apache.hive.hcatalog:hive-webhcat-java-client", + "org.apache.hive.shims:hive-shims-0.23", + "org.apache.hive.shims:hive-shims-common", + "org.apache.hive.shims:hive-shims-scheduler", + "org.apache.hive:hive-cli", + "org.apache.hive:hive-common", + "org.apache.hive:hive-exec", + "org.apache.hive:hive-jdbc", + "org.apache.hive:hive-llap-client", + "org.apache.hive:hive-llap-common", + "org.apache.hive:hive-llap-server", + "org.apache.hive:hive-llap-tez", + "org.apache.hive:hive-metastore", + "org.apache.hive:hive-serde", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc", + "org.apache.hive:hive-shims", + "org.apache.hive:hive-storage-api", + "org.apache.hive:hive-vector-code-gen", "org.apache.iceberg:iceberg-api", "org.apache.iceberg:iceberg-common", "org.apache.iceberg:iceberg-core", "org.apache.iceberg:iceberg-data", "org.apache.iceberg:iceberg-orc", "org.apache.iceberg:iceberg-parquet", + "org.apache.logging.log4j:log4j-slf4j-impl", "org.apache.orc:orc-core", "org.apache.orc:orc-shims", "org.apache.parquet:parquet-common", "org.apache.parquet:parquet-format-structures", + "org.apache.slider:slider-core", + "org.apache.tez:hadoop-shim", + "org.apache.tez:tez-api", + "org.apache.tez:tez-dag", + "org.apache.tez:tez-mapreduce", + "org.apache.tez:tez-runtime-library", + "org.apache.thrift:libthrift", + "org.apache.twill:twill-common", + "org.apache.twill:twill-core", + "org.apache.twill:twill-zookeeper", "org.apache.zookeeper:zookeeper", "org.slf4j:slf4j-simple" ] @@ -5335,6 +13134,35 @@ "org.apache.parquet:parquet-hadoop" ] }, + "oro:oro": { + "locked": "2.0.8", + "transitive": [ + "org.apache.velocity:velocity" + ] + }, + "stax:stax-api": { + "locked": "1.0.1", + "transitive": [ + "org.apache.hive:hive-exec", + "org.codehaus.jettison:jettison" + ] + }, + "tomcat:jasper-compiler": { + "locked": "5.5.23", + "transitive": [ + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc" + ] + }, + "tomcat:jasper-runtime": { + "locked": "5.5.23", + "transitive": [ + "org.apache.hbase:hbase-server", + "org.apache.hive:hive-service", + "org.apache.hive:hive-service-rpc" + ] + }, "xerces:xercesImpl": { "locked": "2.9.1", "transitive": [ @@ -5344,6 +13172,7 @@ "xml-apis:xml-apis": { "locked": "1.3.04", "transitive": [ + "dom4j:dom4j", "xerces:xercesImpl" ] }, diff --git a/mr/src/main/java/org/apache/iceberg/mr/IcebergRecordReader.java b/mr/src/main/java/org/apache/iceberg/mr/IcebergRecordReader.java new file mode 100644 index 000000000000..32e5229fded8 --- /dev/null +++ b/mr/src/main/java/org/apache/iceberg/mr/IcebergRecordReader.java @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr; + +import org.apache.hadoop.conf.Configuration; +import org.apache.iceberg.*; +import org.apache.iceberg.avro.Avro; +import org.apache.iceberg.data.GenericRecord; +import org.apache.iceberg.data.Record; +import org.apache.iceberg.data.avro.DataReader; +import org.apache.iceberg.data.orc.GenericOrcReader; +import org.apache.iceberg.data.parquet.GenericParquetReaders; +import org.apache.iceberg.expressions.Evaluator; +import org.apache.iceberg.expressions.Expression; +import org.apache.iceberg.expressions.Expressions; +import org.apache.iceberg.hadoop.HadoopInputFile; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.io.InputFile; +import org.apache.iceberg.orc.ORC; +import org.apache.iceberg.parquet.Parquet; +import org.apache.iceberg.types.Type; +import org.apache.iceberg.types.Types; +import org.apache.iceberg.util.DateTimeUtil; + +import java.util.List; +import java.util.Map; + +public class IcebergRecordReader { + + private boolean applyResidual; + private boolean caseSensitive; + private boolean reuseContainers; + + private void initialize(Configuration conf) { + this.applyResidual = !conf.getBoolean(InputFormatConfig.SKIP_RESIDUAL_FILTERING, false); + this.caseSensitive = conf.getBoolean(InputFormatConfig.CASE_SENSITIVE, true); + this.reuseContainers = conf.getBoolean(InputFormatConfig.REUSE_CONTAINERS, false); + } + + public CloseableIterable createReader(Configuration config, FileScanTask currentTask, Schema readSchema) { + initialize(config); + DataFile file = currentTask.file(); + // TODO we should make use of FileIO to create inputFile + InputFile inputFile = HadoopInputFile.fromLocation(file.path(), config); + switch (file.format()) { + case AVRO: + return newAvroIterable(inputFile, currentTask, readSchema); + case ORC: + return newOrcIterable(inputFile, currentTask, readSchema); + case PARQUET: + return newParquetIterable(inputFile, currentTask, readSchema); + case METADATA: + return newMetadataIterable(currentTask.asDataTask(), readSchema); + default: + throw new UnsupportedOperationException( + String.format("Cannot read %s file: %s", file.format().name(), file.path())); + } + } + + private CloseableIterable newAvroIterable(InputFile inputFile, FileScanTask task, Schema readSchema) { + Avro.ReadBuilder avroReadBuilder = Avro.read(inputFile).project(readSchema).split(task.start(), task.length()); + if (reuseContainers) { + avroReadBuilder.reuseContainers(); + } + avroReadBuilder.createReaderFunc(DataReader::create); + return applyResidualFiltering(avroReadBuilder.build(), task.residual(), readSchema); + } + + private CloseableIterable newParquetIterable(InputFile inputFile, FileScanTask task, Schema readSchema) { + Parquet.ReadBuilder parquetReadBuilder = Parquet + .read(inputFile) + .project(readSchema) + .filter(task.residual()) + .caseSensitive(caseSensitive) + .split(task.start(), task.length()); + if (reuseContainers) { + parquetReadBuilder.reuseContainers(); + } + + parquetReadBuilder.createReaderFunc(fileSchema -> GenericParquetReaders.buildReader(readSchema, fileSchema)); + + return applyResidualFiltering(parquetReadBuilder.build(), task.residual(), readSchema); + } + + private CloseableIterable newOrcIterable(InputFile inputFile, FileScanTask task, Schema readSchema) { + ORC.ReadBuilder orcReadBuilder = ORC + .read(inputFile) + .project(readSchema) + .caseSensitive(caseSensitive) + .split(task.start(), task.length()); + // ORC does not support reuse containers yet + orcReadBuilder.createReaderFunc(fileSchema -> GenericOrcReader.buildReader(readSchema, fileSchema)); + return applyResidualFiltering(orcReadBuilder.build(), task.residual(), readSchema); + } + + private CloseableIterable newMetadataIterable(DataTask task, Schema readSchema) { + CloseableIterable asStructLikeRows = task.rows(); + return CloseableIterable.transform(asStructLikeRows, row -> convertToRecord((StructLike) row, readSchema)); + } + + private CloseableIterable applyResidualFiltering(CloseableIterable iter, Expression residual, Schema readSchema) { + if (applyResidual && residual != null && residual != Expressions.alwaysTrue()) { + Evaluator filter = new Evaluator(readSchema.asStruct(), residual, caseSensitive); + return CloseableIterable.filter(iter, record -> filter.eval((StructLike) record)); + } else { + return iter; + } + } + + private Record convertToRecord(StructLike structLike, Schema readSchema) { + Record record = GenericRecord.create(readSchema); + for(int i = 0; i < readSchema.columns().size(); i++) { + Type type = readSchema.findType(readSchema.columns().get(i).name()); + record.set(i, fieldValue(type, structLike, i, readSchema)); + } + return record; + } + + private Class javaType(Schema readSchema, int column) { + Type id = readSchema.findType(readSchema.columns().get(column).name()); + if (id.isMapType()) { + return Map.class; + } else if (id.isListType()) { + return List.class; + } else { + return id.typeId().javaClass(); + } + } + + private Object fieldValue(Type type, StructLike structLike, int column, Schema readSchema) { + if (type instanceof Types.TimestampType) { + Long value = (Long) structLike.get(column, javaType(readSchema, column)); + return ((Types.TimestampType) type).shouldAdjustToUTC() ? DateTimeUtil.timestamptzFromMicros(value) : DateTimeUtil.timestampFromMicros(value); + } else { + return structLike.get(column, javaType(readSchema, column)); + } + } +} diff --git a/mr/src/main/java/org/apache/iceberg/mr/InputFormatConfig.java b/mr/src/main/java/org/apache/iceberg/mr/InputFormatConfig.java new file mode 100644 index 000000000000..f4b9aae0f55a --- /dev/null +++ b/mr/src/main/java/org/apache/iceberg/mr/InputFormatConfig.java @@ -0,0 +1,156 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr; + +import java.util.function.Function; +import org.apache.hadoop.conf.Configuration; +import org.apache.iceberg.Schema; +import org.apache.iceberg.SchemaParser; +import org.apache.iceberg.catalog.Catalog; +import org.apache.iceberg.expressions.Expression; + +public class InputFormatConfig { + + private InputFormatConfig() {} + + // configuration values for Iceberg input formats + public static final String REUSE_CONTAINERS = "iceberg.mr.reuse.containers"; + public static final String CASE_SENSITIVE = "iceberg.mr.case.sensitive"; + public static final String SKIP_RESIDUAL_FILTERING = "skip.residual.filtering"; + public static final String AS_OF_TIMESTAMP = "iceberg.mr.as.of.time"; + public static final String FILTER_EXPRESSION = "iceberg.mr.filter.expression"; + public static final String IN_MEMORY_DATA_MODEL = "iceberg.mr.in.memory.data.model"; + public static final String READ_SCHEMA = "iceberg.mr.read.schema"; + public static final String SNAPSHOT_ID = "iceberg.mr.snapshot.id"; + public static final String SPLIT_SIZE = "iceberg.mr.split.size"; + public static final String TABLE_PATH = "iceberg.mr.table.path"; + public static final String TABLE_SCHEMA = "iceberg.mr.table.schema"; + public static final String LOCALITY = "iceberg.mr.locality"; + public static final String CATALOG = "iceberg.mr.catalog"; + + public static final String CATALOG_NAME = "iceberg.catalog"; + public static final String HADOOP_CATALOG = "hadoop.catalog"; + public static final String HADOOP_TABLES = "hadoop.tables"; + public static final String HIVE_CATALOG = "hive.catalog"; + public static final String ICEBERG_SNAPSHOTS_TABLE_SUFFIX = ".snapshots"; + public static final String SNAPSHOT_TABLE = "iceberg.snapshots.table"; + public static final String SNAPSHOT_TABLE_SUFFIX = "__snapshots"; + public static final String TABLE_LOCATION = "location"; + public static final String TABLE_NAME = "name"; + + public enum InMemoryDataModel { + PIG, + HIVE, + GENERIC // Default data model is of Iceberg Generics + } + + public static class ConfigBuilder { + private final Configuration conf; + + public ConfigBuilder(Configuration conf) { + this.conf = conf; + // defaults + conf.setBoolean(SKIP_RESIDUAL_FILTERING, false); + conf.setBoolean(CASE_SENSITIVE, true); + conf.setBoolean(REUSE_CONTAINERS, false); + conf.setBoolean(LOCALITY, false); + } + + public ConfigBuilder filter(Expression expression) { + conf.set(FILTER_EXPRESSION, SerializationUtil.serializeToBase64(expression)); + return this; + } + + public ConfigBuilder project(Schema schema) { + conf.set(READ_SCHEMA, SchemaParser.toJson(schema)); + return this; + } + + public ConfigBuilder schema(Schema schema) { + conf.set(TABLE_SCHEMA, SchemaParser.toJson(schema)); + return this; + } + + public ConfigBuilder readFrom(String path) { + conf.set(TABLE_PATH, path); + return this; + } + + public ConfigBuilder reuseContainers(boolean reuse) { + conf.setBoolean(InputFormatConfig.REUSE_CONTAINERS, reuse); + return this; + } + + public ConfigBuilder caseSensitive(boolean caseSensitive) { + conf.setBoolean(InputFormatConfig.CASE_SENSITIVE, caseSensitive); + return this; + } + + public ConfigBuilder snapshotId(long snapshotId) { + conf.setLong(SNAPSHOT_ID, snapshotId); + return this; + } + + public ConfigBuilder asOfTime(long asOfTime) { + conf.setLong(AS_OF_TIMESTAMP, asOfTime); + return this; + } + + public ConfigBuilder splitSize(long splitSize) { + conf.setLong(SPLIT_SIZE, splitSize); + return this; + } + + /** + * If this API is called. The input splits constructed will have host location information + */ + public ConfigBuilder preferLocality() { + conf.setBoolean(LOCALITY, true); + return this; + } + + public ConfigBuilder catalogFunc(Class> catalogFuncClass) { + conf.setClass(CATALOG, catalogFuncClass, Function.class); + return this; + } + + public ConfigBuilder useHiveRows() { + conf.set(IN_MEMORY_DATA_MODEL, InMemoryDataModel.HIVE.name()); + return this; + } + + public ConfigBuilder usePigTuples() { + conf.set(IN_MEMORY_DATA_MODEL, InMemoryDataModel.PIG.name()); + return this; + } + + /** + * Compute platforms pass down filters to data sources. If the data source cannot apply some filters, or only + * partially applies the filter, it will return the residual filter back. If the platform can correctly apply the + * residual filters, then it should call this api. Otherwise the current api will throw an exception if the passed + * in filter is not completely satisfied. + */ + public ConfigBuilder skipResidualFiltering() { + conf.setBoolean(InputFormatConfig.SKIP_RESIDUAL_FILTERING, true); + return this; + } + } + +} diff --git a/mr/src/main/java/org/apache/iceberg/mr/mapred/IcebergFilterFactory.java b/mr/src/main/java/org/apache/iceberg/mr/mapred/IcebergFilterFactory.java new file mode 100644 index 000000000000..ada7b78fa94a --- /dev/null +++ b/mr/src/main/java/org/apache/iceberg/mr/mapred/IcebergFilterFactory.java @@ -0,0 +1,158 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred; + +import java.math.BigDecimal; +import java.sql.Date; +import java.sql.Timestamp; +import java.util.List; +import org.apache.hadoop.hive.ql.io.sarg.ExpressionTree; +import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf; +import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; +import org.apache.iceberg.expressions.Expression; +import org.apache.iceberg.expressions.Expressions; + +import static org.apache.iceberg.expressions.Expressions.and; +import static org.apache.iceberg.expressions.Expressions.equal; +import static org.apache.iceberg.expressions.Expressions.greaterThanOrEqual; +import static org.apache.iceberg.expressions.Expressions.in; +import static org.apache.iceberg.expressions.Expressions.isNull; +import static org.apache.iceberg.expressions.Expressions.lessThan; +import static org.apache.iceberg.expressions.Expressions.lessThanOrEqual; +import static org.apache.iceberg.expressions.Expressions.not; +import static org.apache.iceberg.expressions.Expressions.notNull; +import static org.apache.iceberg.expressions.Expressions.or; + +public class IcebergFilterFactory { + + private IcebergFilterFactory() {} + + public static Expression generateFilterExpression(SearchArgument sarg) { + return translate(sarg.getExpression(), sarg.getLeaves()); + } + + /** + * Recursive method to traverse down the ExpressionTree to evaluate each expression and its leaf nodes. + * @param tree Current ExpressionTree where the 'top' node is being evaluated. + * @param leaves List of all leaf nodes within the tree. + * @return Expression that is translated from the Hive SearchArgument. + */ + private static Expression translate(ExpressionTree tree, List leaves) { + List childNodes = tree.getChildren(); + switch (tree.getOperator()) { + case OR: + Expression orResult = Expressions.alwaysFalse(); + for (ExpressionTree child : childNodes) { + orResult = or(orResult, translate(child, leaves)); + } + return orResult; + case AND: + Expression result = Expressions.alwaysTrue(); + for (ExpressionTree child : childNodes) { + result = and(result, translate(child, leaves)); + } + return result; + case NOT: + return not(translate(tree.getChildren().get(0), leaves)); + case LEAF: + return translateLeaf(leaves.get(tree.getLeaf())); + case CONSTANT: + //We are unsure of how the CONSTANT case works, so using the approach of: + //https://github.com/apache/hive/blob/master/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ + // ParquetFilterPredicateConverter.java#L116 + throw new UnsupportedOperationException("CONSTANT operator is not supported"); + default: + throw new IllegalStateException("Unknown operator: " + tree.getOperator()); + } + } + + /** + * Translate leaf nodes from Hive operator to Iceberg operator. + * @param leaf Leaf node + * @return Expression fully translated from Hive PredicateLeaf + */ + private static Expression translateLeaf(PredicateLeaf leaf) { + String column = leaf.getColumnName(); + switch (leaf.getOperator()) { + case EQUALS: + return equal(column, leafToIcebergType(leaf)); + case NULL_SAFE_EQUALS: + return equal(notNull(column).ref().name(), leafToIcebergType(leaf)); //TODO: Unsure.. + case LESS_THAN: + return lessThan(column, leafToIcebergType(leaf)); + case LESS_THAN_EQUALS: + return lessThanOrEqual(column, leafToIcebergType(leaf)); + case IN: + return in(column, (List) leafToIcebergType(leaf)); + case BETWEEN: + List icebergLiterals = leaf.getLiteralList(); + return and(greaterThanOrEqual(column, icebergLiterals.get(0)), + lessThanOrEqual(column, icebergLiterals.get(1))); + case IS_NULL: + return isNull(column); + default: + throw new IllegalStateException("Unknown operator: " + leaf.getOperator()); + } + } + + private static Object leafToIcebergType(PredicateLeaf leaf) { + switch (leaf.getType()) { + case LONG: + return leaf.getLiteral() != null ? leaf.getLiteral() : leaf.getLiteralList(); + case FLOAT: + return leaf.getLiteral() != null ? leaf.getLiteral() : leaf.getLiteralList(); + case STRING: + return leaf.getLiteral() != null ? leaf.getLiteral() : leaf.getLiteralList(); + case DATE: + //Hive converts a Date type to a Timestamp internally when retrieving literal + if (leaf.getLiteral() != null) { + return ((Timestamp) leaf.getLiteral()).toLocalDateTime().toLocalDate().toEpochDay(); + } else { + //But not when retrieving the literalList + List icebergValues = leaf.getLiteralList(); + icebergValues.replaceAll(value -> ((Date) value).toLocalDate().toEpochDay()); + return icebergValues; + } + case DECIMAL: + if (leaf.getLiteral() != null) { + return BigDecimal.valueOf(((HiveDecimalWritable) leaf.getLiteral()).doubleValue()); + } else { + List icebergValues = leaf.getLiteralList(); + icebergValues.replaceAll(value -> BigDecimal.valueOf(((HiveDecimalWritable) value).doubleValue())); + return icebergValues; + } + case TIMESTAMP: + if (leaf.getLiteral() != null) { + Timestamp timestamp = (Timestamp) leaf.getLiteral(); + return timestamp.toInstant().getEpochSecond() * 1000000 + timestamp.getNanos() / 1000; + } else { + List icebergValues = leaf.getLiteralList(); + icebergValues.replaceAll(value -> ( + (Timestamp) value).toInstant().getEpochSecond() * 1000000 + ((Timestamp) value).getNanos() / 1000); + return icebergValues; + } + case BOOLEAN: + return leaf.getLiteral() != null ? leaf.getLiteral() : leaf.getLiteralList(); + default: + throw new IllegalStateException("Unknown type: " + leaf.getType()); + } + } +} diff --git a/mr/src/main/java/org/apache/iceberg/mr/mapred/IcebergInputFormat.java b/mr/src/main/java/org/apache/iceberg/mr/mapred/IcebergInputFormat.java new file mode 100644 index 000000000000..0e294200c26e --- /dev/null +++ b/mr/src/main/java/org/apache/iceberg/mr/mapred/IcebergInputFormat.java @@ -0,0 +1,254 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.exec.SerializationUtilities; +import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; +import org.apache.hadoop.hive.ql.io.sarg.ConvertAstToSearchArg; +import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.TableScanDesc; +import org.apache.hadoop.hive.serde2.ColumnProjectionUtils; +import org.apache.hadoop.mapred.FileSplit; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.Reporter; +import org.apache.iceberg.CombinedScanTask; +import org.apache.iceberg.FileScanTask; +import org.apache.iceberg.Schema; +import org.apache.iceberg.Table; +import org.apache.iceberg.data.Record; +import org.apache.iceberg.expressions.Expression; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.mr.InputFormatConfig; +import org.apache.iceberg.mr.SerializationUtil; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * CombineHiveInputFormat.AvoidSplitCombination is implemented to correctly delegate InputSplit + * creation to this class. See: https://stackoverflow.com/questions/29133275/ + * custom-inputformat-getsplits-never-called-in-hive + */ +public class IcebergInputFormat implements InputFormat, CombineHiveInputFormat.AvoidSplitCombination { + private static final Logger LOG = LoggerFactory.getLogger(IcebergInputFormat.class); + + private Table table; + + @Override + public InputSplit[] getSplits(JobConf conf, int numSplits) throws IOException { + table = TableResolver.resolveTableFromConfiguration(conf); + String location = conf.get(InputFormatConfig.TABLE_LOCATION); + List tasks = planTasks(conf); + return createSplits(tasks, location); + } + + private List planTasks(JobConf conf) { + String[] readColumns = ColumnProjectionUtils.getReadColumnNames(conf); + List tasks; + if (conf.get(TableScanDesc.FILTER_EXPR_CONF_STR) == null) { + tasks = Lists.newArrayList(table + .newScan() + .select(readColumns) + .planTasks()); + } else { + ExprNodeGenericFuncDesc exprNodeDesc = SerializationUtilities + .deserializeObject(conf.get(TableScanDesc.FILTER_EXPR_CONF_STR), ExprNodeGenericFuncDesc.class); + SearchArgument sarg = ConvertAstToSearchArg.create(conf, exprNodeDesc); + Expression filter = IcebergFilterFactory.generateFilterExpression(sarg); + + tasks = Lists.newArrayList(table + .newScan() + .select(readColumns) + .filter(filter) + .planTasks()); + } + return tasks; + } + + private InputSplit[] createSplits(List tasks, String name) { + InputSplit[] splits = new InputSplit[tasks.size()]; + for (int i = 0; i < tasks.size(); i++) { + splits[i] = new IcebergSplit(tasks.get(i), name); + } + return splits; + } + + @Override + public RecordReader getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { + return new IcebergRecordReader(split, job); + } + + @Override + public boolean shouldSkipCombine(Path path, Configuration conf) throws IOException { + return true; + } + + public class IcebergRecordReader implements RecordReader { + private JobConf conf; + private IcebergSplit split; + + private Iterator tasks; + private CloseableIterable reader; + private Iterator recordIterator; + private Record currentRecord; + + public IcebergRecordReader(InputSplit split, JobConf conf) throws IOException { + this.split = (IcebergSplit) split; + this.conf = conf; + initialise(); + } + + private void initialise() { + tasks = split.getTask().files().iterator(); + nextTask(); + } + + private void nextTask() { + FileScanTask currentTask = tasks.next(); + Schema tableSchema = table.schema(); + org.apache.iceberg.mr.IcebergRecordReader wrappedReader = + new org.apache.iceberg.mr.IcebergRecordReader(); + reader = wrappedReader.createReader(conf, currentTask, tableSchema); + recordIterator = reader.iterator(); + } + + + @Override + public boolean next(Void key, IcebergWritable value) { + if (recordIterator.hasNext()) { + currentRecord = recordIterator.next(); + value.setRecord(currentRecord); + return true; + } + + if (tasks.hasNext()) { + nextTask(); + currentRecord = recordIterator.next(); + value.setRecord(currentRecord); + return true; + } + return false; + } + + @Override + public Void createKey() { + return null; + } + + @Override + public IcebergWritable createValue() { + IcebergWritable record = new IcebergWritable(); + record.setRecord(currentRecord); + record.setSchema(table.schema()); + return record; + } + + @Override + public long getPos() throws IOException { + return 0; + } + + @Override + public void close() throws IOException { + reader.close(); + } + + @Override + public float getProgress() throws IOException { + return 0; + } + } + + /** + * FileSplit is extended rather than implementing the InputSplit interface due to Hive's HiveInputFormat + * expecting a split which is an instance of FileSplit. + */ + private static class IcebergSplit extends FileSplit { + + private CombinedScanTask task; + private String partitionLocation; + + IcebergSplit() { + } + + IcebergSplit(CombinedScanTask task, String partitionLocation) { + this.task = task; + this.partitionLocation = partitionLocation; + } + + @Override + public long getLength() { + return task.files().stream().mapToLong(FileScanTask::length).sum(); + } + + @Override + public String[] getLocations() throws IOException { + return new String[0]; + } + + @Override + public Path getPath() { + return new Path(partitionLocation); + } + + @Override + public long getStart() { + return 0L; + } + + @Override + public void write(DataOutput out) throws IOException { + byte[] dataTask = SerializationUtil.serializeToBytes(this.task); + out.writeInt(dataTask.length); + out.write(dataTask); + + byte[] tableName = SerializationUtil.serializeToBytes(this.partitionLocation); + out.writeInt(tableName.length); + out.write(tableName); + } + + @Override + public void readFields(DataInput in) throws IOException { + byte[] data = new byte[in.readInt()]; + in.readFully(data); + this.task = SerializationUtil.deserializeFromBytes(data); + + byte[] name = new byte[in.readInt()]; + in.readFully(name); + this.partitionLocation = SerializationUtil.deserializeFromBytes(name); + } + + public CombinedScanTask getTask() { + return task; + } + } + +} diff --git a/mr/src/main/java/org/apache/iceberg/mr/mapred/IcebergSerDe.java b/mr/src/main/java/org/apache/iceberg/mr/mapred/IcebergSerDe.java new file mode 100644 index 000000000000..871e0e677835 --- /dev/null +++ b/mr/src/main/java/org/apache/iceberg/mr/mapred/IcebergSerDe.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Properties; +import javax.annotation.Nullable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.serde2.AbstractSerDe; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.SerDeStats; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.io.Writable; +import org.apache.iceberg.Table; +import org.apache.iceberg.mr.mapred.serde.objectinspector.IcebergObjectInspector; + +public class IcebergSerDe extends AbstractSerDe { + + private ObjectInspector inspector; + + @Override + public void initialize(@Nullable Configuration configuration, Properties serDeProperties) throws SerDeException { + final Table table; + + try { + table = TableResolver.resolveTableFromConfiguration(configuration, serDeProperties); + } catch (IOException e) { + throw new UncheckedIOException("Unable to resolve table from configuration: ", e); + } + + try { + this.inspector = IcebergObjectInspector.create(table.schema()); + } catch (Exception e) { + throw new SerDeException(e); + } + } + + @Override + public Class getSerializedClass() { + return IcebergWritable.class; + } + + @Override + public Writable serialize(Object o, ObjectInspector objectInspector) { + throw new UnsupportedOperationException("Serialization is not supported."); + } + + @Override + public SerDeStats getSerDeStats() { + return null; + } + + @Override + public Object deserialize(Writable writable) { + return ((IcebergWritable) writable).record(); + } + + @Override + public ObjectInspector getObjectInspector() { + return inspector; + } +} diff --git a/mr/src/main/java/org/apache/iceberg/mr/mapred/IcebergStorageHandler.java b/mr/src/main/java/org/apache/iceberg/mr/mapred/IcebergStorageHandler.java new file mode 100644 index 000000000000..1536d989f73a --- /dev/null +++ b/mr/src/main/java/org/apache/iceberg/mr/mapred/IcebergStorageHandler.java @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred; + +import java.util.Map; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.HiveMetaHook; +import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; +import org.apache.hadoop.hive.ql.metadata.HiveStoragePredicateHandler; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider; +import org.apache.hadoop.hive.serde2.AbstractSerDe; +import org.apache.hadoop.hive.serde2.Deserializer; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputFormat; + +public class IcebergStorageHandler implements HiveStoragePredicateHandler, HiveStorageHandler { + + private Configuration conf; + + @Override + public Class getInputFormatClass() { + return IcebergInputFormat.class; + } + + @Override + public Class getOutputFormatClass() { + return HiveIgnoreKeyTextOutputFormat.class; + } + + @Override + public Class getSerDeClass() { + return IcebergSerDe.class; + } + + @Override + public HiveMetaHook getMetaHook() { + return null; + } + + @Override + public HiveAuthorizationProvider getAuthorizationProvider() throws HiveException { + return null; + } + + @Override + public void configureInputJobProperties(TableDesc tableDesc, Map map) { + + } + + @Override + public void configureOutputJobProperties(TableDesc tableDesc, Map map) { + + } + + @Override + public void configureTableJobProperties(TableDesc tableDesc, Map map) { + + } + + @Override + public void configureJobConf(TableDesc tableDesc, JobConf jobConf) { + + } + + @Override + public Configuration getConf() { + return conf; + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + @Override + public String toString() { + return this.getClass().getName(); + } + + /** + * @param jobConf Job configuration for InputFormat to access + * @param deserializer Deserializer + * @param exprNodeDesc Filter expression extracted by Hive + * @return Entire filter to take advantage of Hive's pruning as well as Iceberg's pruning. + */ + @Override + public DecomposedPredicate decomposePredicate(JobConf jobConf, Deserializer deserializer, ExprNodeDesc exprNodeDesc) { + DecomposedPredicate predicate = new DecomposedPredicate(); + predicate.residualPredicate = (ExprNodeGenericFuncDesc) exprNodeDesc; + predicate.pushedPredicate = (ExprNodeGenericFuncDesc) exprNodeDesc; + return predicate; + } +} diff --git a/mr/src/main/java/org/apache/iceberg/mr/mapred/IcebergWritable.java b/mr/src/main/java/org/apache/iceberg/mr/mapred/IcebergWritable.java new file mode 100644 index 000000000000..3e191ab200e5 --- /dev/null +++ b/mr/src/main/java/org/apache/iceberg/mr/mapred/IcebergWritable.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred; + +import java.io.DataInput; +import java.io.DataOutput; +import org.apache.hadoop.io.Writable; +import org.apache.iceberg.Schema; +import org.apache.iceberg.data.Record; + +/** + * Wraps an Iceberg Record in a Writable which Hive can use in the SerDe. + */ +public class IcebergWritable implements Writable { + + private Record record; + private Schema schema; + + public IcebergWritable() { + } + + public IcebergWritable(Record record, Schema schema) { + this.record = record; + this.schema = schema; + } + + @SuppressWarnings("checkstyle:HiddenField") + public void wrapRecord(Record record) { + this.record = record; + } + + public Record record() { + return record; + } + + public void setRecord(Record record) { + this.record = record; + } + + public Schema schema() { + return schema; + } + + public void setSchema(Schema schema) { + this.schema = schema; + } + + @Override + public void write(DataOutput dataOutput) { + throw new UnsupportedOperationException("write is not supported."); + } + + @Override + public void readFields(DataInput dataInput) { + throw new UnsupportedOperationException("readFields is not supported."); + } +} diff --git a/mr/src/main/java/org/apache/iceberg/mr/mapred/TableResolver.java b/mr/src/main/java/org/apache/iceberg/mr/mapred/TableResolver.java new file mode 100644 index 000000000000..58b0e367009c --- /dev/null +++ b/mr/src/main/java/org/apache/iceberg/mr/mapred/TableResolver.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred; + +import java.io.IOException; +import java.util.Map; +import java.util.Properties; +import org.apache.hadoop.conf.Configuration; +import org.apache.iceberg.Table; +import org.apache.iceberg.exceptions.NoSuchNamespaceException; +import org.apache.iceberg.hadoop.HadoopTables; +import org.apache.iceberg.mr.InputFormatConfig; +import org.apache.iceberg.relocated.com.google.common.base.Preconditions; + +final class TableResolver { + + private TableResolver() { + } + + static Table resolveTableFromConfiguration(Configuration conf, Properties properties) throws IOException { + Configuration configuration = new Configuration(conf); + for (Map.Entry entry : properties.entrySet()) { + configuration.set(entry.getKey().toString(), entry.getValue().toString()); + } + return resolveTableFromConfiguration(configuration); + } + + static Table resolveTableFromConfiguration(Configuration conf) throws IOException { + //Default to HadoopTables + String catalogName = conf.get(InputFormatConfig.CATALOG_NAME, InputFormatConfig.HADOOP_TABLES); + + switch (catalogName) { + case InputFormatConfig.HADOOP_TABLES: + String tableLocation = conf.get(InputFormatConfig.TABLE_LOCATION); + Preconditions.checkNotNull(tableLocation, InputFormatConfig.TABLE_LOCATION + " is not set."); + HadoopTables tables = new HadoopTables(conf); + return tables.load(tableLocation); + + case InputFormatConfig.HIVE_CATALOG: + String tableName = conf.get(InputFormatConfig.TABLE_NAME); + Preconditions.checkNotNull(tableName, InputFormatConfig.TABLE_NAME + " is not set."); + //TODO Implement HiveCatalog + return null; + default: + throw new NoSuchNamespaceException("Catalog " + catalogName + " not supported."); + } + } + +} diff --git a/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergBinaryObjectInspector.java b/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergBinaryObjectInspector.java new file mode 100644 index 000000000000..85103c65307c --- /dev/null +++ b/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergBinaryObjectInspector.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred.serde.objectinspector; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.BinaryObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.io.BytesWritable; + +public final class IcebergBinaryObjectInspector extends IcebergPrimitiveObjectInspector + implements BinaryObjectInspector { + + private static final IcebergBinaryObjectInspector INSTANCE = new IcebergBinaryObjectInspector(); + + public static IcebergBinaryObjectInspector get() { + return INSTANCE; + } + + private IcebergBinaryObjectInspector() { + super(TypeInfoFactory.binaryTypeInfo); + } + + @Override + public byte[] getPrimitiveJavaObject(Object o) { + return o == null ? null : ((ByteBuffer) o).array(); + } + + @Override + public BytesWritable getPrimitiveWritableObject(Object o) { + return o == null ? null : new BytesWritable(getPrimitiveJavaObject(o)); + } + + @Override + public Object copyObject(Object o) { + if (o == null) { + return null; + } + + byte[] bytes = (byte[]) o; + return Arrays.copyOf(bytes, bytes.length); + } + +} diff --git a/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergDateObjectInspector.java b/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergDateObjectInspector.java new file mode 100644 index 000000000000..2991540437c7 --- /dev/null +++ b/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergDateObjectInspector.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred.serde.objectinspector; + +import java.sql.Date; +import java.time.LocalDate; +import org.apache.hadoop.hive.serde2.io.DateWritable; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.DateObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; + +public final class IcebergDateObjectInspector extends IcebergPrimitiveObjectInspector implements DateObjectInspector { + + private static final IcebergDateObjectInspector INSTANCE = new IcebergDateObjectInspector(); + + public static IcebergDateObjectInspector get() { + return INSTANCE; + } + + private IcebergDateObjectInspector() { + super(TypeInfoFactory.dateTypeInfo); + } + + @Override + public Date getPrimitiveJavaObject(Object o) { + return o == null ? null : Date.valueOf((LocalDate) o); + } + + @Override + public DateWritable getPrimitiveWritableObject(Object o) { + Date date = getPrimitiveJavaObject(o); + return date == null ? null : new DateWritable(date); + } + + @Override + public Object copyObject(Object o) { + return o == null ? null : new Date(((Date) o).getTime()); + } + +} diff --git a/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergDecimalObjectInspector.java b/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergDecimalObjectInspector.java new file mode 100644 index 000000000000..5d31ce814509 --- /dev/null +++ b/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergDecimalObjectInspector.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred.serde.objectinspector; + +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; +import java.math.BigDecimal; +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hive.common.type.HiveDecimal; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; +import org.apache.iceberg.relocated.com.google.common.base.Preconditions; + +public final class IcebergDecimalObjectInspector extends IcebergPrimitiveObjectInspector + implements HiveDecimalObjectInspector { + + private static final Cache CACHE = Caffeine.newBuilder() + .expireAfterAccess(10, TimeUnit.MINUTES) + .build(); + + public static IcebergDecimalObjectInspector get(int precision, int scale) { + Preconditions.checkArgument(scale < precision); + Preconditions.checkArgument(precision <= HiveDecimal.MAX_PRECISION); + Preconditions.checkArgument(scale <= HiveDecimal.MAX_SCALE); + + Integer key = precision << 8 | scale; + return CACHE.get(key, k -> new IcebergDecimalObjectInspector(precision, scale)); + } + + private IcebergDecimalObjectInspector(int precision, int scale) { + super(new DecimalTypeInfo(precision, scale)); + } + + @Override + public int precision() { + return ((DecimalTypeInfo) getTypeInfo()).precision(); + } + + @Override + public int scale() { + return ((DecimalTypeInfo) getTypeInfo()).scale(); + } + + @Override + public HiveDecimal getPrimitiveJavaObject(Object o) { + return o == null ? null : HiveDecimal.create((BigDecimal) o); + } + + @Override + public HiveDecimalWritable getPrimitiveWritableObject(Object o) { + HiveDecimal decimal = getPrimitiveJavaObject(o); + return decimal == null ? null : new HiveDecimalWritable(decimal); + } + + @Override + public Object copyObject(Object o) { + if (o == null) { + return null; + } + + HiveDecimal decimal = (HiveDecimal) o; + return HiveDecimal.create(decimal.bigDecimalValue()); + } + +} diff --git a/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergObjectInspector.java b/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergObjectInspector.java new file mode 100644 index 000000000000..ca4875649415 --- /dev/null +++ b/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergObjectInspector.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred.serde.objectinspector; + +import java.util.List; +import javax.annotation.Nullable; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.iceberg.Schema; +import org.apache.iceberg.types.Type; +import org.apache.iceberg.types.TypeUtil; +import org.apache.iceberg.types.Types; + +public final class IcebergObjectInspector extends TypeUtil.SchemaVisitor { + + public static ObjectInspector create(@Nullable Schema schema) { + if (schema == null) { + return IcebergRecordObjectInspector.empty(); + } + + return TypeUtil.visit(schema, new IcebergObjectInspector()); + } + + public static ObjectInspector create(Types.NestedField... fields) { + return create(new Schema(fields)); + } + + @Override + public ObjectInspector field(Types.NestedField field, ObjectInspector fieldObjectInspector) { + return fieldObjectInspector; + } + + @Override + public ObjectInspector list(Types.ListType listTypeInfo, ObjectInspector listObjectInspector) { + return ObjectInspectorFactory.getStandardListObjectInspector(listObjectInspector); + } + + @Override + public ObjectInspector map(Types.MapType mapType, + ObjectInspector keyObjectInspector, ObjectInspector valueObjectInspector) { + return ObjectInspectorFactory.getStandardMapObjectInspector(keyObjectInspector, valueObjectInspector); + } + + @Override + public ObjectInspector primitive(Type.PrimitiveType primitiveType) { + final PrimitiveTypeInfo primitiveTypeInfo; + + switch (primitiveType.typeId()) { + case BINARY: + return IcebergBinaryObjectInspector.get(); + case BOOLEAN: + primitiveTypeInfo = TypeInfoFactory.booleanTypeInfo; + break; + case DATE: + return IcebergDateObjectInspector.get(); + case DECIMAL: + Types.DecimalType type = (Types.DecimalType) primitiveType; + return IcebergDecimalObjectInspector.get(type.precision(), type.scale()); + case DOUBLE: + primitiveTypeInfo = TypeInfoFactory.doubleTypeInfo; + break; + case FLOAT: + primitiveTypeInfo = TypeInfoFactory.floatTypeInfo; + break; + case INTEGER: + primitiveTypeInfo = TypeInfoFactory.intTypeInfo; + break; + case LONG: + primitiveTypeInfo = TypeInfoFactory.longTypeInfo; + break; + case STRING: + primitiveTypeInfo = TypeInfoFactory.stringTypeInfo; + break; + case TIMESTAMP: + boolean adjustToUTC = ((Types.TimestampType) primitiveType).shouldAdjustToUTC(); + return IcebergTimestampObjectInspector.get(adjustToUTC); + + case FIXED: + case TIME: + case UUID: + default: + throw new IllegalArgumentException(primitiveType.typeId() + " type is not supported"); + } + + return PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector(primitiveTypeInfo); + } + + @Override + public ObjectInspector schema(Schema schema, ObjectInspector structObjectInspector) { + return structObjectInspector; + } + + @Override + public ObjectInspector struct(Types.StructType structType, List fieldObjectInspectors) { + return new IcebergRecordObjectInspector(structType, fieldObjectInspectors); + } + +} diff --git a/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergPrimitiveObjectInspector.java b/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergPrimitiveObjectInspector.java new file mode 100644 index 000000000000..53c3560c2dd7 --- /dev/null +++ b/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergPrimitiveObjectInspector.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred.serde.objectinspector; + +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; + +abstract class IcebergPrimitiveObjectInspector implements PrimitiveObjectInspector { + + private final PrimitiveTypeInfo typeInfo; + + protected IcebergPrimitiveObjectInspector(PrimitiveTypeInfo typeInfo) { + this.typeInfo = typeInfo; + } + + @Override + public Category getCategory() { + return typeInfo.getCategory(); + } + + @Override + public String getTypeName() { + return typeInfo.getTypeName(); + } + + @Override + public PrimitiveTypeInfo getTypeInfo() { + return typeInfo; + } + + @Override + public PrimitiveObjectInspector.PrimitiveCategory getPrimitiveCategory() { + return typeInfo.getPrimitiveCategory(); + } + + @Override + public Class getJavaPrimitiveClass() { + return typeInfo.getPrimitiveJavaClass(); + } + + @Override + public Class getPrimitiveWritableClass() { + return typeInfo.getPrimitiveWritableClass(); + } + + @Override + public boolean preferWritable() { + return false; + } + + @Override + public int precision() { + return 0; + } + + @Override + public int scale() { + return 0; + } + +} diff --git a/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergRecordObjectInspector.java b/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergRecordObjectInspector.java new file mode 100644 index 000000000000..7005e4239708 --- /dev/null +++ b/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergRecordObjectInspector.java @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred.serde.objectinspector; + +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; +import org.apache.hadoop.hive.serde2.objectinspector.StructField; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.iceberg.data.Record; +import org.apache.iceberg.relocated.com.google.common.base.Preconditions; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; +import org.apache.iceberg.types.Types; + +public final class IcebergRecordObjectInspector extends StructObjectInspector { + + private static final IcebergRecordObjectInspector EMPTY = + new IcebergRecordObjectInspector(Types.StructType.of(), Collections.emptyList()); + + private final List structFields; + + public IcebergRecordObjectInspector(Types.StructType structType, List objectInspectors) { + Preconditions.checkArgument(structType.fields().size() == objectInspectors.size()); + + this.structFields = Lists.newArrayListWithExpectedSize(structType.fields().size()); + + int position = 0; + + for (Types.NestedField field : structType.fields()) { + ObjectInspector oi = objectInspectors.get(position); + IcebergRecordStructField structField = new IcebergRecordStructField(field, oi, position); + structFields.add(structField); + position++; + } + } + + public static IcebergRecordObjectInspector empty() { + return EMPTY; + } + + @Override + public List getAllStructFieldRefs() { + return structFields; + } + + @Override + public StructField getStructFieldRef(String name) { + return ObjectInspectorUtils.getStandardStructFieldRef(name, structFields); + } + + @Override + public Object getStructFieldData(Object o, StructField structField) { + return ((Record) o).get(((IcebergRecordStructField) structField).position()); + } + + @Override + public List getStructFieldsDataAsList(Object o) { + Record record = (Record) o; + return structFields + .stream() + .map(f -> record.get(f.position())) + .collect(Collectors.toList()); + } + + @Override + public String getTypeName() { + return ObjectInspectorUtils.getStandardStructTypeName(this); + } + + @Override + public Category getCategory() { + return Category.STRUCT; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + IcebergRecordObjectInspector that = (IcebergRecordObjectInspector) o; + return structFields.equals(that.structFields); + } + + @Override + public int hashCode() { + return structFields.hashCode(); + } + + private static class IcebergRecordStructField implements StructField { + + private final Types.NestedField field; + private final ObjectInspector oi; + private final int position; + + IcebergRecordStructField(Types.NestedField field, ObjectInspector oi, int position) { + this.field = field; + this.oi = oi; + this.position = position; // position in the record + } + + @Override + public String getFieldName() { + return field.name(); + } + + @Override + public ObjectInspector getFieldObjectInspector() { + return oi; + } + + @Override + public int getFieldID() { + return field.fieldId(); + } + + @Override + public String getFieldComment() { + return field.doc(); + } + + int position() { + return position; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + IcebergRecordStructField that = (IcebergRecordStructField) o; + return field.equals(that.field) && oi.equals(that.oi); + } + + @Override + public int hashCode() { + return 31 * field.hashCode() + oi.hashCode(); + } + + } + +} diff --git a/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergTimestampObjectInspector.java b/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergTimestampObjectInspector.java new file mode 100644 index 000000000000..569267df8496 --- /dev/null +++ b/mr/src/main/java/org/apache/iceberg/mr/mapred/serde/objectinspector/IcebergTimestampObjectInspector.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred.serde.objectinspector; + +import java.sql.Timestamp; +import java.time.LocalDateTime; +import java.time.OffsetDateTime; +import java.util.function.Function; +import org.apache.hadoop.hive.serde2.io.TimestampWritable; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; + +public final class IcebergTimestampObjectInspector extends IcebergPrimitiveObjectInspector + implements TimestampObjectInspector { + + private static final IcebergTimestampObjectInspector INSTANCE_WITH_ZONE = + new IcebergTimestampObjectInspector(o -> ((OffsetDateTime) o).toLocalDateTime()); + + private static final IcebergTimestampObjectInspector INSTANCE_WITHOUT_ZONE = + new IcebergTimestampObjectInspector(o -> (LocalDateTime) o); + + public static IcebergTimestampObjectInspector get(boolean adjustToUTC) { + return adjustToUTC ? INSTANCE_WITH_ZONE : INSTANCE_WITHOUT_ZONE; + } + + private final Function cast; + + private IcebergTimestampObjectInspector(Function cast) { + super(TypeInfoFactory.timestampTypeInfo); + this.cast = cast; + } + + @Override + public Timestamp getPrimitiveJavaObject(Object o) { + return o == null ? null : Timestamp.valueOf(cast.apply(o)); + } + + @Override + public TimestampWritable getPrimitiveWritableObject(Object o) { + Timestamp ts = getPrimitiveJavaObject(o); + return ts == null ? null : new TimestampWritable(ts); + } + + @Override + public Object copyObject(Object o) { + if (o == null) { + return null; + } + + Timestamp ts = (Timestamp) o; + Timestamp copy = new Timestamp(ts.getTime()); + copy.setNanos(ts.getNanos()); + return copy; + } + +} diff --git a/mr/src/main/java/org/apache/iceberg/mr/mapreduce/IcebergInputFormat.java b/mr/src/main/java/org/apache/iceberg/mr/mapreduce/IcebergInputFormat.java index b209a80e4fee..3b2228a7585b 100644 --- a/mr/src/main/java/org/apache/iceberg/mr/mapreduce/IcebergInputFormat.java +++ b/mr/src/main/java/org/apache/iceberg/mr/mapreduce/IcebergInputFormat.java @@ -22,13 +22,8 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; -import java.util.Collections; import java.util.Iterator; import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.BiFunction; -import java.util.function.Function; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.InputFormat; @@ -38,65 +33,32 @@ import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.iceberg.CombinedScanTask; -import org.apache.iceberg.DataFile; import org.apache.iceberg.FileScanTask; -import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.SchemaParser; -import org.apache.iceberg.StructLike; import org.apache.iceberg.Table; import org.apache.iceberg.TableProperties; import org.apache.iceberg.TableScan; -import org.apache.iceberg.avro.Avro; -import org.apache.iceberg.catalog.Catalog; -import org.apache.iceberg.catalog.TableIdentifier; -import org.apache.iceberg.common.DynConstructors; -import org.apache.iceberg.data.IdentityPartitionConverters; -import org.apache.iceberg.data.avro.DataReader; -import org.apache.iceberg.data.orc.GenericOrcReader; -import org.apache.iceberg.data.parquet.GenericParquetReaders; import org.apache.iceberg.exceptions.RuntimeIOException; -import org.apache.iceberg.expressions.Evaluator; import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.expressions.Expressions; -import org.apache.iceberg.hadoop.HadoopInputFile; -import org.apache.iceberg.hadoop.HadoopTables; import org.apache.iceberg.hadoop.Util; import org.apache.iceberg.io.CloseableIterable; import org.apache.iceberg.io.CloseableIterator; -import org.apache.iceberg.io.InputFile; +import org.apache.iceberg.mr.InputFormatConfig; import org.apache.iceberg.mr.SerializationUtil; -import org.apache.iceberg.orc.ORC; -import org.apache.iceberg.parquet.Parquet; -import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.Lists; -import org.apache.iceberg.types.Type; -import org.apache.iceberg.types.TypeUtil; -import org.apache.iceberg.util.PartitionUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Generic Mrv2 InputFormat API for Iceberg. + * * @param T is the in memory data model which can either be Pig tuples, Hive rows. Default is Iceberg records */ public class IcebergInputFormat extends InputFormat { private static final Logger LOG = LoggerFactory.getLogger(IcebergInputFormat.class); - static final String AS_OF_TIMESTAMP = "iceberg.mr.as.of.time"; - static final String CASE_SENSITIVE = "iceberg.mr.case.sensitive"; - static final String FILTER_EXPRESSION = "iceberg.mr.filter.expression"; - static final String IN_MEMORY_DATA_MODEL = "iceberg.mr.in.memory.data.model"; - static final String READ_SCHEMA = "iceberg.mr.read.schema"; - static final String REUSE_CONTAINERS = "iceberg.mr.reuse.containers"; - static final String SNAPSHOT_ID = "iceberg.mr.snapshot.id"; - static final String SPLIT_SIZE = "iceberg.mr.split.size"; - static final String TABLE_PATH = "iceberg.mr.table.path"; - static final String TABLE_SCHEMA = "iceberg.mr.table.schema"; - static final String LOCALITY = "iceberg.mr.locality"; - static final String CATALOG = "iceberg.mr.catalog"; - static final String SKIP_RESIDUAL_FILTERING = "skip.residual.filtering"; - private transient List splits; private enum InMemoryDataModel { @@ -111,100 +73,9 @@ private enum InMemoryDataModel { * * @param job the {@code Job} to configure */ - public static ConfigBuilder configure(Job job) { + public static InputFormatConfig.ConfigBuilder configure(Job job) { job.setInputFormatClass(IcebergInputFormat.class); - return new ConfigBuilder(job.getConfiguration()); - } - - public static class ConfigBuilder { - private final Configuration conf; - - public ConfigBuilder(Configuration conf) { - this.conf = conf; - // defaults - conf.setEnum(IN_MEMORY_DATA_MODEL, InMemoryDataModel.GENERIC); - conf.setBoolean(SKIP_RESIDUAL_FILTERING, false); - conf.setBoolean(CASE_SENSITIVE, true); - conf.setBoolean(REUSE_CONTAINERS, false); - conf.setBoolean(LOCALITY, false); - } - - public ConfigBuilder readFrom(String path) { - conf.set(TABLE_PATH, path); - Table table = findTable(conf); - conf.set(TABLE_SCHEMA, SchemaParser.toJson(table.schema())); - return this; - } - - public ConfigBuilder filter(Expression expression) { - conf.set(FILTER_EXPRESSION, SerializationUtil.serializeToBase64(expression)); - return this; - } - - public ConfigBuilder project(Schema schema) { - conf.set(READ_SCHEMA, SchemaParser.toJson(schema)); - return this; - } - - public ConfigBuilder reuseContainers(boolean reuse) { - conf.setBoolean(REUSE_CONTAINERS, reuse); - return this; - } - - public ConfigBuilder caseSensitive(boolean caseSensitive) { - conf.setBoolean(CASE_SENSITIVE, caseSensitive); - return this; - } - - public ConfigBuilder snapshotId(long snapshotId) { - conf.setLong(SNAPSHOT_ID, snapshotId); - return this; - } - - public ConfigBuilder asOfTime(long asOfTime) { - conf.setLong(AS_OF_TIMESTAMP, asOfTime); - return this; - } - - public ConfigBuilder splitSize(long splitSize) { - conf.setLong(SPLIT_SIZE, splitSize); - return this; - } - - /** - * If this API is called. The input splits - * constructed will have host location information - */ - public ConfigBuilder preferLocality() { - conf.setBoolean(LOCALITY, true); - return this; - } - - public ConfigBuilder catalogFunc(Class> catalogFuncClass) { - conf.setClass(CATALOG, catalogFuncClass, Function.class); - return this; - } - - public ConfigBuilder useHiveRows() { - conf.set(IN_MEMORY_DATA_MODEL, InMemoryDataModel.HIVE.name()); - return this; - } - - public ConfigBuilder usePigTuples() { - conf.set(IN_MEMORY_DATA_MODEL, InMemoryDataModel.PIG.name()); - return this; - } - - /** - * Compute platforms pass down filters to data sources. If the data source cannot apply some filters, or only - * partially applies the filter, it will return the residual filter back. If the platform can correctly apply - * the residual filters, then it should call this api. Otherwise the current api will throw an exception if the - * passed in filter is not completely satisfied. - */ - public ConfigBuilder skipResidualFiltering() { - conf.setBoolean(SKIP_RESIDUAL_FILTERING, true); - return this; - } + return new InputFormatConfig.ConfigBuilder(job.getConfiguration()); } @Override @@ -215,48 +86,52 @@ public List getSplits(JobContext context) { } Configuration conf = context.getConfiguration(); - Table table = findTable(conf); - TableScan scan = table.newScan() - .caseSensitive(conf.getBoolean(CASE_SENSITIVE, true)); - long snapshotId = conf.getLong(SNAPSHOT_ID, -1); + Table table = TableResolver.findTable(conf); + TableScan scan = createTableScan(conf, table); + + splits = Lists.newArrayList(); + boolean applyResidual = !conf.getBoolean(InputFormatConfig.SKIP_RESIDUAL_FILTERING, false); + InMemoryDataModel model = conf.getEnum(InputFormatConfig.IN_MEMORY_DATA_MODEL, InMemoryDataModel.GENERIC); + try (CloseableIterable tasksIterable = scan.planTasks()) { + tasksIterable.forEach(task -> { + if (applyResidual && (model == InMemoryDataModel.HIVE || model == InMemoryDataModel.PIG)) { + //TODO: We do not support residual evaluation for HIVE and PIG in memory data model yet + checkResiduals(task); + } + splits.add(new IcebergSplit(conf, task)); + }); + } catch (IOException e) { + throw new RuntimeIOException(e, "Failed to close table scan: %s", scan); + } + + return splits; + } + + private TableScan createTableScan(Configuration conf, Table table) { + TableScan scan = table.newScan().caseSensitive(conf.getBoolean(InputFormatConfig.CASE_SENSITIVE, true)); + long snapshotId = conf.getLong(InputFormatConfig.SNAPSHOT_ID, -1); if (snapshotId != -1) { scan = scan.useSnapshot(snapshotId); } - long asOfTime = conf.getLong(AS_OF_TIMESTAMP, -1); + long asOfTime = conf.getLong(InputFormatConfig.AS_OF_TIMESTAMP, -1); if (asOfTime != -1) { scan = scan.asOfTime(asOfTime); } - long splitSize = conf.getLong(SPLIT_SIZE, 0); + long splitSize = conf.getLong(InputFormatConfig.SPLIT_SIZE, 0); if (splitSize > 0) { scan = scan.option(TableProperties.SPLIT_SIZE, String.valueOf(splitSize)); } - String schemaStr = conf.get(READ_SCHEMA); + String schemaStr = conf.get(InputFormatConfig.READ_SCHEMA); if (schemaStr != null) { scan.project(SchemaParser.fromJson(schemaStr)); } // TODO add a filter parser to get rid of Serialization - Expression filter = SerializationUtil.deserializeFromBase64(conf.get(FILTER_EXPRESSION)); + Expression filter = SerializationUtil.deserializeFromBase64(conf.get(InputFormatConfig.FILTER_EXPRESSION)); if (filter != null) { scan = scan.filter(filter); } - - splits = Lists.newArrayList(); - boolean applyResidual = !conf.getBoolean(SKIP_RESIDUAL_FILTERING, false); - InMemoryDataModel model = conf.getEnum(IN_MEMORY_DATA_MODEL, InMemoryDataModel.GENERIC); - try (CloseableIterable tasksIterable = scan.planTasks()) { - tasksIterable.forEach(task -> { - if (applyResidual && (model == InMemoryDataModel.HIVE || model == InMemoryDataModel.PIG)) { - //TODO: We do not support residual evaluation for HIVE and PIG in memory data model yet - checkResiduals(task); - } - splits.add(new IcebergSplit(conf, task)); - }); - } catch (IOException e) { - throw new RuntimeIOException(e, "Failed to close table scan: %s", scan); - } - - return splits; + return scan; } private static void checkResiduals(CombinedScanTask task) { @@ -264,9 +139,9 @@ private static void checkResiduals(CombinedScanTask task) { Expression residual = fileScanTask.residual(); if (residual != null && !residual.equals(Expressions.alwaysTrue())) { throw new UnsupportedOperationException( - String.format( - "Filter expression %s is not completely satisfied. Additional rows " + - "can be returned not satisfied by the filter expression", residual)); + String.format( + "Filter expression %s is not completely satisfied. Additional rows " + + "can be returned not satisfied by the filter expression", residual)); } }); } @@ -282,7 +157,7 @@ private static final class IcebergRecordReader extends RecordReader private Schema expectedSchema; private boolean reuseContainers; private boolean caseSensitive; - private InMemoryDataModel inMemoryDataModel; + private InputFormatConfig.InMemoryDataModel inMemoryDataModel; private Iterator tasks; private T currentRow; private CloseableIterator currentIterator; @@ -294,12 +169,13 @@ public void initialize(InputSplit split, TaskAttemptContext newContext) { CombinedScanTask task = ((IcebergSplit) split).task; this.context = newContext; this.tasks = task.files().iterator(); - this.tableSchema = SchemaParser.fromJson(conf.get(TABLE_SCHEMA)); - String readSchemaStr = conf.get(READ_SCHEMA); + this.tableSchema = SchemaParser.fromJson(conf.get(InputFormatConfig.TABLE_SCHEMA)); + String readSchemaStr = conf.get(InputFormatConfig.READ_SCHEMA); this.expectedSchema = readSchemaStr != null ? SchemaParser.fromJson(readSchemaStr) : tableSchema; - this.reuseContainers = conf.getBoolean(REUSE_CONTAINERS, false); - this.caseSensitive = conf.getBoolean(CASE_SENSITIVE, true); - this.inMemoryDataModel = conf.getEnum(IN_MEMORY_DATA_MODEL, InMemoryDataModel.GENERIC); + this.reuseContainers = conf.getBoolean(InputFormatConfig.REUSE_CONTAINERS, false); + this.caseSensitive = conf.getBoolean(InputFormatConfig.CASE_SENSITIVE, true); + this.inMemoryDataModel = conf.getEnum(InputFormatConfig.IN_MEMORY_DATA_MODEL, + InputFormatConfig.InMemoryDataModel.GENERIC); this.currentIterator = open(tasks.next(), expectedSchema).iterator(); } @@ -346,141 +222,10 @@ public void close() throws IOException { } private CloseableIterable open(FileScanTask currentTask, Schema readSchema) { - DataFile file = currentTask.file(); - // TODO we should make use of FileIO to create inputFile - InputFile inputFile = HadoopInputFile.fromLocation(file.path(), context.getConfiguration()); - CloseableIterable iterable; - switch (file.format()) { - case AVRO: - iterable = newAvroIterable(inputFile, currentTask, readSchema); - break; - case ORC: - iterable = newOrcIterable(inputFile, currentTask, readSchema); - break; - case PARQUET: - iterable = newParquetIterable(inputFile, currentTask, readSchema); - break; - default: - throw new UnsupportedOperationException( - String.format("Cannot read %s file: %s", file.format().name(), file.path())); - } - + org.apache.iceberg.mr.IcebergRecordReader wrappedReader = new org.apache.iceberg.mr.IcebergRecordReader(); + CloseableIterable iterable = wrappedReader.createReader(context.getConfiguration(), currentTask, readSchema); return iterable; } - - private CloseableIterable applyResidualFiltering(CloseableIterable iter, Expression residual, - Schema readSchema) { - boolean applyResidual = !context.getConfiguration().getBoolean(SKIP_RESIDUAL_FILTERING, false); - - if (applyResidual && residual != null && residual != Expressions.alwaysTrue()) { - Evaluator filter = new Evaluator(readSchema.asStruct(), residual, caseSensitive); - return CloseableIterable.filter(iter, record -> filter.eval((StructLike) record)); - } else { - return iter; - } - } - - private CloseableIterable newAvroIterable( - InputFile inputFile, FileScanTask task, Schema readSchema) { - Avro.ReadBuilder avroReadBuilder = Avro.read(inputFile) - .project(readSchema) - .split(task.start(), task.length()); - if (reuseContainers) { - avroReadBuilder.reuseContainers(); - } - - switch (inMemoryDataModel) { - case PIG: - case HIVE: - //TODO implement value readers for Pig and Hive - throw new UnsupportedOperationException("Avro support not yet supported for Pig and Hive"); - case GENERIC: - avroReadBuilder.createReaderFunc( - (expIcebergSchema, expAvroSchema) -> - DataReader.create(expIcebergSchema, expAvroSchema, - constantsMap(task, IdentityPartitionConverters::convertConstant))); - } - return applyResidualFiltering(avroReadBuilder.build(), task.residual(), readSchema); - } - - private CloseableIterable newParquetIterable(InputFile inputFile, FileScanTask task, Schema readSchema) { - Parquet.ReadBuilder parquetReadBuilder = Parquet.read(inputFile) - .project(readSchema) - .filter(task.residual()) - .caseSensitive(caseSensitive) - .split(task.start(), task.length()); - if (reuseContainers) { - parquetReadBuilder.reuseContainers(); - } - - switch (inMemoryDataModel) { - case PIG: - case HIVE: - //TODO implement value readers for Pig and Hive - throw new UnsupportedOperationException("Parquet support not yet supported for Pig and Hive"); - case GENERIC: - parquetReadBuilder.createReaderFunc( - fileSchema -> GenericParquetReaders.buildReader( - readSchema, fileSchema, constantsMap(task, IdentityPartitionConverters::convertConstant))); - } - return applyResidualFiltering(parquetReadBuilder.build(), task.residual(), readSchema); - } - - private CloseableIterable newOrcIterable(InputFile inputFile, FileScanTask task, Schema readSchema) { - ORC.ReadBuilder orcReadBuilder = ORC.read(inputFile) - .project(readSchema) - .filter(task.residual()) - .caseSensitive(caseSensitive) - .split(task.start(), task.length()); - // ORC does not support reuse containers yet - switch (inMemoryDataModel) { - case PIG: - case HIVE: - //TODO: implement value readers for Pig and Hive - throw new UnsupportedOperationException("ORC support not yet supported for Pig and Hive"); - case GENERIC: - orcReadBuilder.createReaderFunc( - fileSchema -> GenericOrcReader.buildReader( - readSchema, fileSchema, constantsMap(task, IdentityPartitionConverters::convertConstant))); - } - - return applyResidualFiltering(orcReadBuilder.build(), task.residual(), readSchema); - } - - private Map constantsMap(FileScanTask task, BiFunction converter) { - PartitionSpec spec = task.spec(); - Set idColumns = spec.identitySourceIds(); - Schema partitionSchema = TypeUtil.select(expectedSchema, idColumns); - boolean projectsIdentityPartitionColumns = !partitionSchema.columns().isEmpty(); - if (projectsIdentityPartitionColumns) { - return PartitionUtil.constantsMap(task, converter); - } else { - return Collections.emptyMap(); - } - } - } - - private static Table findTable(Configuration conf) { - String path = conf.get(TABLE_PATH); - Preconditions.checkArgument(path != null, "Table path should not be null"); - if (path.contains("/")) { - HadoopTables tables = new HadoopTables(conf); - return tables.load(path); - } - - String catalogFuncClass = conf.get(CATALOG); - if (catalogFuncClass != null) { - Function catalogFunc = (Function) - DynConstructors.builder(Function.class) - .impl(catalogFuncClass) - .build() - .newInstance(); - Catalog catalog = catalogFunc.apply(conf); - TableIdentifier tableIdentifier = TableIdentifier.parse(path); - return catalog.loadTable(tableIdentifier); - } else { - throw new IllegalArgumentException("No custom catalog specified to load table " + path); - } } static class IcebergSplit extends InputSplit implements Writable { @@ -501,7 +246,7 @@ public long getLength() { @Override public String[] getLocations() { - boolean localityPreferred = conf.getBoolean(LOCALITY, false); + boolean localityPreferred = conf.getBoolean(InputFormatConfig.LOCALITY, false); if (!localityPreferred) { return ANYWHERE; } diff --git a/mr/src/main/java/org/apache/iceberg/mr/mapreduce/TableResolver.java b/mr/src/main/java/org/apache/iceberg/mr/mapreduce/TableResolver.java new file mode 100644 index 000000000000..7775e7ea9409 --- /dev/null +++ b/mr/src/main/java/org/apache/iceberg/mr/mapreduce/TableResolver.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapreduce; + +import java.util.function.Function; +import org.apache.hadoop.conf.Configuration; +import org.apache.iceberg.Table; +import org.apache.iceberg.catalog.Catalog; +import org.apache.iceberg.catalog.TableIdentifier; +import org.apache.iceberg.common.DynConstructors; +import org.apache.iceberg.hadoop.HadoopTables; +import org.apache.iceberg.mr.InputFormatConfig; +import org.apache.iceberg.relocated.com.google.common.base.Preconditions; + +final class TableResolver { + + private TableResolver() { + } + + public static Table findTable(Configuration conf) { + String path = conf.get(InputFormatConfig.TABLE_PATH); + Preconditions.checkArgument(path != null, "Table path should not be null"); + if (path.contains("/")) { + HadoopTables tables = new HadoopTables(conf); + return tables.load(path); + } + + String catalogFuncClass = conf.get(InputFormatConfig.CATALOG); + if (catalogFuncClass != null) { + Function catalogFunc = (Function) + DynConstructors.builder(Function.class) + .impl(catalogFuncClass) + .build() + .newInstance(); + Catalog catalog = catalogFunc.apply(conf); + TableIdentifier tableIdentifier = TableIdentifier.parse(path); + return catalog.loadTable(tableIdentifier); + } else { + throw new IllegalArgumentException("No custom catalog specified to load table " + path); + } + } + +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/TestHelpers.java b/mr/src/test/java/org/apache/iceberg/mr/TestHelpers.java new file mode 100644 index 000000000000..8db577f00ed4 --- /dev/null +++ b/mr/src/test/java/org/apache/iceberg/mr/TestHelpers.java @@ -0,0 +1,153 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr; + +import java.io.File; +import java.io.IOException; +import java.util.List; +import org.apache.iceberg.DataFile; +import org.apache.iceberg.DataFiles; +import org.apache.iceberg.FileFormat; +import org.apache.iceberg.Files; +import org.apache.iceberg.Schema; +import org.apache.iceberg.StructLike; +import org.apache.iceberg.Table; +import org.apache.iceberg.avro.Avro; +import org.apache.iceberg.data.GenericRecord; +import org.apache.iceberg.data.Record; +import org.apache.iceberg.data.avro.DataWriter; +import org.apache.iceberg.data.orc.GenericOrcWriter; +import org.apache.iceberg.data.parquet.GenericParquetWriter; +import org.apache.iceberg.io.FileAppender; +import org.apache.iceberg.orc.ORC; +import org.apache.iceberg.parquet.Parquet; +import org.apache.iceberg.types.Types; + +import static org.apache.iceberg.types.Types.NestedField.optional; +import static org.apache.iceberg.types.Types.NestedField.required; + +public class TestHelpers { + + private TestHelpers() {} + + /** + * Implements {@link StructLike#get} for passing data in tests. + */ + public static class Row implements StructLike { + public static Row of(Object... values) { + return new Row(values); + } + + private final Object[] values; + + private Row(Object... values) { + this.values = values; + } + + @Override + public int size() { + return values.length; + } + + @Override + @SuppressWarnings("unchecked") + public T get(int pos, Class javaClass) { + return javaClass.cast(values[pos]); + } + + @Override + public void set(int pos, T value) { + throw new UnsupportedOperationException("Setting values is not supported"); + } + } + + public static DataFile writeFile(File targetFile, + Table table, StructLike partitionData, FileFormat fileFormat, List records) throws IOException { + if (targetFile.exists()) { + if (!targetFile.delete()) { + throw new IOException("Unable to delete " + targetFile.getAbsolutePath()); + } + } + FileAppender appender; + switch (fileFormat) { + case AVRO: + appender = Avro.write(Files.localOutput(targetFile)) + .schema(table.schema()) + .createWriterFunc(DataWriter::create) + .named(fileFormat.name()) + .build(); + break; + case PARQUET: + appender = Parquet.write(Files.localOutput(targetFile)) + .schema(table.schema()) + .createWriterFunc(GenericParquetWriter::buildWriter) + .named(fileFormat.name()) + .build(); + break; + case ORC: + appender = ORC.write(Files.localOutput(targetFile)) + .schema(table.schema()) + .createWriterFunc(GenericOrcWriter::buildWriter) + .build(); + break; + default: + throw new UnsupportedOperationException("Cannot write format: " + fileFormat); + } + + try { + appender.addAll(records); + } finally { + appender.close(); + } + + DataFiles.Builder builder = DataFiles.builder(table.spec()) + .withPath(targetFile.toString()) + .withFormat(fileFormat) + .withFileSizeInBytes(targetFile.length()) + .withMetrics(appender.metrics()); + if (partitionData != null) { + builder.withPartition(partitionData); + } + return builder.build(); + } + + /** + * Based on: https://github.com/apache/incubator-iceberg/blob/master/ + * spark/src/test/java/org/apache/iceberg/spark/source/SimpleRecord.java + */ + public static Record createSimpleRecord(long id, String data) { + Schema schema = new Schema(required(1, "id", Types.StringType.get()), + optional(2, "data", Types.LongType.get())); + GenericRecord record = GenericRecord.create(schema); + record.setField("id", id); + record.setField("data", data); + return record; + } + + public static Record createCustomRecord(Schema schema, List dataValues) { + GenericRecord record = GenericRecord.create(schema); + List fields = schema.columns(); + for (int i = 0; i < fields.size(); i++) { + record.setField(fields.get(i).name(), dataValues.get(i)); + } + return record; + } + +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/mapred/TestHiveIcebergInputFormat.java b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestHiveIcebergInputFormat.java new file mode 100644 index 000000000000..80ec34ec0487 --- /dev/null +++ b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestHiveIcebergInputFormat.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred; + +import com.klarna.hiverunner.HiveShell; +import com.klarna.hiverunner.StandaloneHiveRunner; +import com.klarna.hiverunner.annotations.HiveSQL; +import java.io.File; +import java.io.IOException; +import java.util.List; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Schema; +import org.apache.iceberg.Table; +import org.apache.iceberg.hadoop.HadoopTables; +import org.apache.iceberg.types.Types; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; + +import static org.apache.iceberg.types.Types.NestedField.optional; +import static org.apache.iceberg.types.Types.NestedField.required; +import static org.junit.Assert.assertEquals; + +@RunWith(StandaloneHiveRunner.class) +public class TestHiveIcebergInputFormat { + + @HiveSQL(files = {}, autoStart = true) + private HiveShell shell; + + @Rule + public TemporaryFolder temp = new TemporaryFolder(); + + private File tableLocation; + + @Before + public void before() throws IOException { + tableLocation = temp.newFolder(); + Schema schema = new Schema(required(1, "id", Types.LongType.get()), + optional(2, "data", Types.StringType.get())); + PartitionSpec spec = PartitionSpec.unpartitioned(); + + HadoopTables tables = new HadoopTables(); + Table table = tables.create(schema, spec, tableLocation.getAbsolutePath()); + } + + @Test + public void emptyTable() { + shell.execute("CREATE DATABASE source_db"); + shell.execute(new StringBuilder() + .append("CREATE TABLE source_db.table_a ") + .append("(id INT, data STRING) ") + .append("STORED AS ") + .append("INPUTFORMAT 'org.apache.iceberg.mr.mapred.IcebergInputFormat' ") + .append("OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ") + .append("LOCATION '") + .append(tableLocation.getAbsolutePath()) + .append("' TBLPROPERTIES ('iceberg.catalog'='hadoop.tables'") + .append(")") + .toString()); + + List result = shell.executeStatement("SELECT id, data FROM source_db.table_a"); + + assertEquals(0, result.size()); + } + + //TODO: when HiveSerde and StorageHandlers merged in, move over additional tests from Hiveberg +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/mapred/TestIcebergFilterFactory.java b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestIcebergFilterFactory.java new file mode 100644 index 000000000000..4c5338e13233 --- /dev/null +++ b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestIcebergFilterFactory.java @@ -0,0 +1,327 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred; + +import java.math.BigDecimal; +import java.sql.Date; +import java.sql.Timestamp; +import java.time.LocalDate; +import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf; +import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; +import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; +import org.apache.iceberg.expressions.And; +import org.apache.iceberg.expressions.Expressions; +import org.apache.iceberg.expressions.Not; +import org.apache.iceberg.expressions.Or; +import org.apache.iceberg.expressions.UnboundPredicate; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class TestIcebergFilterFactory { + + @Test + public void testEqualsOperand() { + SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); + SearchArgument arg = builder.startAnd().equals("salary", PredicateLeaf.Type.LONG, 3000L).end().build(); + + UnboundPredicate expected = Expressions.equal("salary", 3000L); + UnboundPredicate actual = (UnboundPredicate) IcebergFilterFactory.generateFilterExpression(arg); + + assertEquals(actual.op(), expected.op()); + assertEquals(actual.literal(), expected.literal()); + assertEquals(actual.ref().name(), expected.ref().name()); + } + + @Test + public void testNotEqualsOperand() { + SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); + SearchArgument arg = builder.startNot().equals("salary", PredicateLeaf.Type.LONG, 3000L).end().build(); + + Not expected = (Not) Expressions.not(Expressions.equal("salary", 3000L)); + Not actual = (Not) IcebergFilterFactory.generateFilterExpression(arg); + + UnboundPredicate childExpressionActual = (UnboundPredicate) actual.child(); + UnboundPredicate childExpressionExpected = Expressions.equal("salary", 3000L); + + assertEquals(actual.op(), expected.op()); + assertEquals(actual.child().op(), expected.child().op()); + assertEquals(childExpressionActual.ref().name(), childExpressionExpected.ref().name()); + assertEquals(childExpressionActual.literal(), childExpressionExpected.literal()); + } + + @Test + public void testLessThanOperand() { + SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); + SearchArgument arg = builder.startAnd().lessThan("salary", PredicateLeaf.Type.LONG, 3000L).end().build(); + + UnboundPredicate expected = Expressions.lessThan("salary", 3000L); + UnboundPredicate actual = (UnboundPredicate) IcebergFilterFactory.generateFilterExpression(arg); + + assertEquals(actual.op(), expected.op()); + assertEquals(actual.literal(), expected.literal()); + assertEquals(actual.ref().name(), expected.ref().name()); + } + + @Test + public void testLessThanEqualsOperand() { + SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); + SearchArgument arg = builder.startAnd().lessThanEquals("salary", PredicateLeaf.Type.LONG, 3000L).end().build(); + + UnboundPredicate expected = Expressions.lessThanOrEqual("salary", 3000L); + UnboundPredicate actual = (UnboundPredicate) IcebergFilterFactory.generateFilterExpression(arg); + + assertEquals(actual.op(), expected.op()); + assertEquals(actual.literal(), expected.literal()); + assertEquals(actual.ref().name(), expected.ref().name()); + } + + @Test + public void testInOperandWithLong() { + SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); + SearchArgument arg = builder.startAnd().in("salary", PredicateLeaf.Type.LONG, 3000L, 4000L).end().build(); + + UnboundPredicate expected = Expressions.in("salary", 3000L, 4000L); + UnboundPredicate actual = (UnboundPredicate) IcebergFilterFactory.generateFilterExpression(arg); + + assertEquals(actual.op(), expected.op()); + assertEquals(actual.literals(), expected.literals()); + assertEquals(actual.ref().name(), expected.ref().name()); + } + + @Test + public void testInOperandWithDecimal() { + SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); + SearchArgument arg = builder.startAnd().in("decimal", PredicateLeaf.Type.DECIMAL, + new HiveDecimalWritable("12.14"), new HiveDecimalWritable("13.15")).end().build(); + + UnboundPredicate expected = Expressions.in("decimal", BigDecimal.valueOf(12.14), BigDecimal.valueOf(13.15)); + UnboundPredicate actual = (UnboundPredicate) IcebergFilterFactory.generateFilterExpression(arg); + + assertEquals(actual.op(), expected.op()); + assertEquals(actual.literals(), expected.literals()); + assertEquals(actual.ref().name(), expected.ref().name()); + } + + @Test + public void testInOperandWithDate() { + SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); + SearchArgument arg = builder + .startAnd() + .in("date", PredicateLeaf.Type.DATE, + Date.valueOf("2020-06-15"), Date.valueOf("2021-06-15")) + .end() + .build(); + + UnboundPredicate expected = Expressions.in("date", LocalDate.of(2020, 6, 15).toEpochDay(), + LocalDate.of(2021, 6, 15).toEpochDay()); + UnboundPredicate actual = (UnboundPredicate) IcebergFilterFactory.generateFilterExpression(arg); + + assertEquals(actual.op(), expected.op()); + assertEquals(actual.literals(), expected.literals()); + assertEquals(actual.ref().name(), expected.ref().name()); + assertEquals(expected.toString(), actual.toString()); + } + + @Test + public void testInOperandWithTimestamp() { + Timestamp timestampHiveFilterOne = Timestamp.valueOf("2016-11-16 06:43:19.77"); + Timestamp timestampHiveFilterTwo = Timestamp.valueOf("2017-11-16 06:43:19.77"); + + SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); + SearchArgument arg = builder + .startAnd() + .in("timestamp", PredicateLeaf.Type.TIMESTAMP, timestampHiveFilterOne, timestampHiveFilterTwo) + .end() + .build(); + + UnboundPredicate expected = Expressions.in("timestamp", + 1479278599770000L, 1510814599770000L); + UnboundPredicate actual = (UnboundPredicate) IcebergFilterFactory.generateFilterExpression(arg); + + assertEquals(expected.op(), actual.op()); + assertEquals(expected.literals(), actual.literals()); + assertEquals(expected.ref().name(), actual.ref().name()); + assertEquals(expected.toString(), actual.toString()); + } + + + @Test + public void testBetweenOperand() { + SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); + SearchArgument arg = builder + .startAnd() + .between("salary", PredicateLeaf.Type.LONG, 3000L, 4000L).end().build(); + + And expected = (And) Expressions.and(Expressions.greaterThanOrEqual("salary", 3000L), + Expressions.lessThanOrEqual("salary", 3000L)); + And actual = (And) IcebergFilterFactory.generateFilterExpression(arg); + + assertEquals(actual.op(), expected.op()); + assertEquals(actual.left().op(), expected.left().op()); + assertEquals(actual.right().op(), expected.right().op()); + } + + @Test + public void testIsNullOperand() { + SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); + SearchArgument arg = builder.startAnd().isNull("salary", PredicateLeaf.Type.LONG).end().build(); + + UnboundPredicate expected = Expressions.isNull("salary"); + UnboundPredicate actual = (UnboundPredicate) IcebergFilterFactory.generateFilterExpression(arg); + + assertEquals(actual.op(), expected.op()); + assertEquals(actual.ref().name(), expected.ref().name()); + } + + @Test + public void testAndOperand() { + SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); + SearchArgument arg = builder + .startAnd() + .equals("salary", PredicateLeaf.Type.LONG, 3000L) + .equals("salary", PredicateLeaf.Type.LONG, 4000L) + .end().build(); + + And expected = (And) Expressions + .and(Expressions.equal("salary", 3000L), Expressions.equal("salary", 4000L)); + And actual = (And) IcebergFilterFactory.generateFilterExpression(arg); + + assertEquals(actual.op(), expected.op()); + assertEquals(actual.left().op(), expected.left().op()); + assertEquals(actual.right().op(), expected.right().op()); + } + + @Test + public void testOrOperand() { + SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); + SearchArgument arg = builder + .startOr() + .equals("salary", PredicateLeaf.Type.LONG, 3000L) + .equals("salary", PredicateLeaf.Type.LONG, 4000L) + .end().build(); + + Or expected = (Or) Expressions + .or(Expressions.equal("salary", 3000L), Expressions.equal("salary", 4000L)); + Or actual = (Or) IcebergFilterFactory.generateFilterExpression(arg); + + assertEquals(actual.op(), expected.op()); + assertEquals(actual.left().op(), expected.left().op()); + assertEquals(actual.right().op(), expected.right().op()); + } + + @Test + public void testManyAndOperand() { + SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); + SearchArgument arg = builder + .startAnd() + .equals("salary", PredicateLeaf.Type.LONG, 3000L) + .equals("job", PredicateLeaf.Type.LONG, 4000L) + .equals("name", PredicateLeaf.Type.LONG, 9000L) + .end() + .build(); + + And expected = (And) Expressions.and( + Expressions.equal("salary", 3000L), + Expressions.equal("job", 4000L), + Expressions.equal("name", 9000L)); + + And actual = (And) IcebergFilterFactory.generateFilterExpression(arg); + + assertEquals(actual.op(), expected.op()); + assertEquals(actual.right().op(), expected.right().op()); + assertEquals(actual.left().op(), expected.left().op()); + } + + @Test + public void testManyOrOperand() { + SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); + SearchArgument arg = builder + .startOr() + .equals("salary", PredicateLeaf.Type.LONG, 3000L) + .equals("job", PredicateLeaf.Type.LONG, 4000L) + .equals("name", PredicateLeaf.Type.LONG, 9000L) + .end() + .build(); + + Or expected = (Or) Expressions.or(Expressions.or(Expressions.equal("salary", 3000L), + Expressions.equal("job", 4000L)), Expressions.equal("name", 9000L)); + + Or actual = (Or) IcebergFilterFactory.generateFilterExpression(arg); + + assertEquals(actual.op(), expected.op()); + assertEquals(actual.right().op(), expected.right().op()); + assertEquals(actual.left().op(), expected.left().op()); + } + + @Test + public void testNestedFilter() { + SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); + SearchArgument arg = builder + .startOr() + .equals("job", PredicateLeaf.Type.STRING, "dev") + .startAnd() + .equals("id", PredicateLeaf.Type.LONG, 3L) + .equals("dept", PredicateLeaf.Type.STRING, "300") + .end() + .end() + .build(); + + And expected = (And) Expressions.and(Expressions.or(Expressions.equal("job", "dev"), Expressions.equal( + "id", 3L)), Expressions.or(Expressions.equal("job", "dev"), Expressions.equal("dept", "300"))); + And actual = (And) IcebergFilterFactory.generateFilterExpression(arg); + assertEquals(actual.op(), expected.op()); + assertEquals(actual.right().op(), expected.right().op()); + assertEquals(actual.left().op(), expected.left().op()); + } + + @Test + public void testTypeConversion() { + SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); + SearchArgument arg = builder + .startAnd() + .equals("date", PredicateLeaf.Type.DATE, Date.valueOf("2020-06-15")) + .equals("timestamp", PredicateLeaf.Type.TIMESTAMP, Timestamp.valueOf("2016-11-16 06:43:19.77")) + .equals("decimal", PredicateLeaf.Type.DECIMAL, new HiveDecimalWritable("12.12")) + .equals("string", PredicateLeaf.Type.STRING, "hello world") + .equals("long", PredicateLeaf.Type.LONG, 3020L) + .equals("float", PredicateLeaf.Type.FLOAT, 4400D) + .equals("boolean", PredicateLeaf.Type.BOOLEAN, true) + .end() + .build(); + + Timestamp timestamp = Timestamp.valueOf("2016-11-16 06:43:19.77"); + And expected = (And) Expressions.and( + Expressions.equal("date", LocalDate.of(2020, 6, 15).toEpochDay()), + Expressions.equal("timestamp", 1479278599770000L), + Expressions.equal("decimal", BigDecimal.valueOf(12.12)), + Expressions.equal("string", "hello world"), + Expressions.equal("long", 3020L), + Expressions.equal("float", 4400D), + Expressions.equal("boolean", true)); + + And actual = (And) IcebergFilterFactory.generateFilterExpression(arg); + + assertEquals(expected.toString(), actual.toString()); + assertEquals(expected.op(), actual.op()); + + } +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/mapred/TestIcebergInputFormat.java b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestIcebergInputFormat.java new file mode 100644 index 000000000000..5d8a71e34d10 --- /dev/null +++ b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestIcebergInputFormat.java @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred; + +import com.klarna.hiverunner.HiveShell; +import com.klarna.hiverunner.StandaloneHiveRunner; +import com.klarna.hiverunner.annotations.HiveSQL; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.iceberg.DataFile; +import org.apache.iceberg.FileFormat; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Schema; +import org.apache.iceberg.Table; +import org.apache.iceberg.data.Record; +import org.apache.iceberg.hadoop.HadoopTables; +import org.apache.iceberg.mr.InputFormatConfig; +import org.apache.iceberg.mr.TestHelpers; +import org.apache.iceberg.types.Types; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; + +import static org.apache.iceberg.types.Types.NestedField.optional; +import static org.apache.iceberg.types.Types.NestedField.required; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; + +@RunWith(StandaloneHiveRunner.class) +public class TestIcebergInputFormat { + + @Rule + public TemporaryFolder temp = new TemporaryFolder(); + + @HiveSQL(files = {}, autoStart = true) + private HiveShell shell; + + private IcebergInputFormat inputFormat = new IcebergInputFormat(); + private File tableLocation; + private JobConf conf = new JobConf(); + + @Before + public void before() throws IOException { + tableLocation = temp.newFolder(); + Schema schema = new Schema(required(1, "id", Types.LongType.get()), + optional(2, "data", Types.StringType.get())); + PartitionSpec spec = PartitionSpec.unpartitioned(); + + HadoopTables tables = new HadoopTables(); + Table table = tables.create(schema, spec, tableLocation.getAbsolutePath()); + + List data = new ArrayList<>(); + data.add(TestHelpers.createSimpleRecord(1L, "Michael")); + data.add(TestHelpers.createSimpleRecord(2L, "Andy")); + data.add(TestHelpers.createSimpleRecord(3L, "Berta")); + + DataFile fileA = TestHelpers.writeFile(temp.newFile(), table, null, FileFormat.PARQUET, data); + table.newAppend().appendFile(fileA).commit(); + } + + @Test + public void testGetSplits() throws IOException { + IcebergInputFormat format = new IcebergInputFormat(); + conf.set(InputFormatConfig.TABLE_LOCATION, tableLocation.getAbsolutePath()); + conf.set(InputFormatConfig.TABLE_NAME, "source_db.table_a"); + InputSplit[] splits = format.getSplits(conf, 1); + assertEquals(splits.length, 1); + } + + @Test(expected = NullPointerException.class) + public void testGetSplitsNoLocation() throws IOException { + conf.set(InputFormatConfig.CATALOG_NAME, InputFormatConfig.HADOOP_TABLES); + conf.set(InputFormatConfig.TABLE_NAME, "source_db.table_a"); + inputFormat.getSplits(conf, 1); + } + + @Ignore("Requires SerDe") + @Test + public void testInputFormat() { + shell.execute("CREATE DATABASE source_db"); + shell.execute(new StringBuilder() + .append("CREATE TABLE source_db.table_a ") + .append("ROW FORMAT SERDE 'org.apache.iceberg.mr.mapred.IcebergSerDe' ") + .append("STORED AS ") + .append("INPUTFORMAT 'org.apache.iceberg.mr.mapred.IcebergInputFormat' ") + .append("OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ") + .append("LOCATION '") + .append(tableLocation.getAbsolutePath()) + .append("' TBLPROPERTIES ('iceberg.catalog'='hadoop.tables'") + .append(")") + .toString()); + + List result = shell.executeStatement("SELECT id, data FROM source_db.table_a"); + + assertEquals(3, result.size()); + assertArrayEquals(new Object[]{1L, "Michael"}, result.get(0)); + assertArrayEquals(new Object[]{2L, "Andy"}, result.get(1)); + assertArrayEquals(new Object[]{3L, "Berta"}, result.get(2)); + } + + private List readRecords(JobConf jobConf) throws IOException { + InputSplit[] splits = inputFormat.getSplits(jobConf, 1); + RecordReader reader = inputFormat.getRecordReader(splits[0], jobConf, null); + List records = new ArrayList<>(); + IcebergWritable value = (IcebergWritable) reader.createValue(); + while (reader.next(null, value)) { + records.add(value.record().copy()); + } + return records; + } + +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/mapred/TestIcebergSerDe.java b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestIcebergSerDe.java new file mode 100644 index 000000000000..937ddba64da2 --- /dev/null +++ b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestIcebergSerDe.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred; + +import java.io.File; +import java.io.IOException; +import java.util.Collections; +import java.util.Properties; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Schema; +import org.apache.iceberg.data.RandomGenericData; +import org.apache.iceberg.data.Record; +import org.apache.iceberg.hadoop.HadoopTables; +import org.apache.iceberg.mr.InputFormatConfig; +import org.apache.iceberg.mr.mapred.serde.objectinspector.IcebergObjectInspector; +import org.apache.iceberg.types.Types; +import org.junit.Assert; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.apache.iceberg.types.Types.NestedField.required; + +public class TestIcebergSerDe { + + private static final Schema schema = new Schema(required(1, "string_field", Types.StringType.get())); + + @Rule + public TemporaryFolder tmp = new TemporaryFolder(); + + @Test + public void testInitialize() throws IOException, SerDeException { + File location = tmp.newFolder(); + Assert.assertTrue(location.delete()); + + Configuration conf = new Configuration(); + + Properties properties = new Properties(); + properties.setProperty(InputFormatConfig.CATALOG_NAME, InputFormatConfig.HADOOP_TABLES); + properties.setProperty(InputFormatConfig.TABLE_LOCATION, location.toString()); + + HadoopTables tables = new HadoopTables(conf); + tables.create(schema, PartitionSpec.unpartitioned(), Collections.emptyMap(), location.toString()); + + IcebergSerDe serDe = new IcebergSerDe(); + serDe.initialize(conf, properties); + + Assert.assertEquals(IcebergObjectInspector.create(schema), serDe.getObjectInspector()); + } + + @Test + public void testDeserialize() { + IcebergSerDe serDe = new IcebergSerDe(); + + Record record = RandomGenericData.generate(schema, 1, 0).get(0); + IcebergWritable writable = new IcebergWritable(record, schema); + + Assert.assertEquals(record, serDe.deserialize(writable)); + } + +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/mapred/TestInputFormatWithEmptyTable.java b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestInputFormatWithEmptyTable.java new file mode 100644 index 000000000000..188adfc739c8 --- /dev/null +++ b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestInputFormatWithEmptyTable.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred; + +import com.klarna.hiverunner.HiveShell; +import com.klarna.hiverunner.StandaloneHiveRunner; +import com.klarna.hiverunner.annotations.HiveSQL; +import java.io.File; +import java.io.IOException; +import java.util.List; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Schema; +import org.apache.iceberg.Table; +import org.apache.iceberg.hadoop.HadoopTables; +import org.apache.iceberg.types.Types; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; + +import static org.apache.iceberg.types.Types.NestedField.optional; +import static org.apache.iceberg.types.Types.NestedField.required; +import static org.junit.Assert.assertEquals; + +@RunWith(StandaloneHiveRunner.class) +public class TestInputFormatWithEmptyTable { + + @HiveSQL(files = {}, autoStart = true) + private HiveShell shell; + + @Rule + public TemporaryFolder temp = new TemporaryFolder(); + + private File tableLocation; + + @Before + public void before() throws IOException { + tableLocation = temp.newFolder(); + Schema schema = new Schema(required(1, "id", Types.LongType.get()), + optional(2, "data", Types.StringType.get())); + PartitionSpec spec = PartitionSpec.unpartitioned(); + + HadoopTables tables = new HadoopTables(); + Table table = tables.create(schema, spec, tableLocation.getAbsolutePath()); + } + + @Test + public void testInputFormat() { + shell.execute("CREATE DATABASE source_db"); + shell.execute(new StringBuilder() + .append("CREATE TABLE source_db.table_a ") + .append("ROW FORMAT SERDE 'org.apache.iceberg.mr.mapred.IcebergSerDe' ") + .append("STORED AS ") + .append("INPUTFORMAT 'org.apache.iceberg.mr.mapred.IcebergInputFormat' ") + .append("OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ") + .append("LOCATION '") + .append(tableLocation.getAbsolutePath()) + .append("' TBLPROPERTIES ('iceberg.catalog'='hadoop.tables'") + .append(")") + .toString()); + + List result = shell.executeStatement("SELECT id, data FROM source_db.table_a"); + + assertEquals(0, result.size()); + } +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/mapred/TestInputFormatWithHadoopTables.java b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestInputFormatWithHadoopTables.java new file mode 100644 index 000000000000..806c79c5ba20 --- /dev/null +++ b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestInputFormatWithHadoopTables.java @@ -0,0 +1,144 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred; + +import com.klarna.hiverunner.HiveShell; +import com.klarna.hiverunner.StandaloneHiveRunner; +import com.klarna.hiverunner.annotations.HiveSQL; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.iceberg.DataFile; +import org.apache.iceberg.FileFormat; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Schema; +import org.apache.iceberg.Table; +import org.apache.iceberg.data.Record; +import org.apache.iceberg.hadoop.HadoopTables; +import org.apache.iceberg.mr.TestHelpers; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; +import org.apache.iceberg.types.Types; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; + +import static org.apache.iceberg.types.Types.NestedField.optional; +import static org.apache.iceberg.types.Types.NestedField.required; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; + +@RunWith(StandaloneHiveRunner.class) +public class TestInputFormatWithHadoopTables { + + @HiveSQL(files = {}, autoStart = true) + private HiveShell shell; + + @Rule + public TemporaryFolder temp = new TemporaryFolder(); + + private File tableLocation; + private IcebergInputFormat format = new IcebergInputFormat(); + private JobConf conf = new JobConf(); + + @Before + public void before() throws IOException { + tableLocation = temp.newFolder(); + Schema schema = new Schema(required(1, "id", Types.LongType.get()), + optional(2, "data", Types.StringType.get())); + PartitionSpec spec = PartitionSpec.unpartitioned(); + + HadoopTables tables = new HadoopTables(); + Table table = tables.create(schema, spec, tableLocation.getAbsolutePath()); + + List data = new ArrayList<>(); + data.add(TestHelpers.createSimpleRecord(1L, "Michael")); + data.add(TestHelpers.createSimpleRecord(2L, "Andy")); + data.add(TestHelpers.createSimpleRecord(3L, "Berta")); + + DataFile fileA = TestHelpers.writeFile(temp.newFile(), table, null, FileFormat.PARQUET, data); + table.newAppend().appendFile(fileA).commit(); + } + + @Test + public void testInputFormat() { + shell.execute("CREATE DATABASE source_db"); + shell.execute(new StringBuilder() + .append("CREATE TABLE source_db.table_a ") + .append("ROW FORMAT SERDE 'org.apache.iceberg.mr.mapred.IcebergSerDe' ") + .append("STORED AS ") + .append("INPUTFORMAT 'org.apache.iceberg.mr.mapred.IcebergInputFormat' ") + .append("OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ") + .append("LOCATION '") + .append(tableLocation.getAbsolutePath()) + .append("'") + .toString()); + + List result = shell.executeStatement("SELECT id, data FROM source_db.table_a"); + + assertEquals(3, result.size()); + assertArrayEquals(new Object[]{1L, "Michael"}, result.get(0)); + assertArrayEquals(new Object[]{2L, "Andy"}, result.get(1)); + assertArrayEquals(new Object[]{3L, "Berta"}, result.get(2)); + } + + @Test + public void testGetSplits() throws IOException { + conf.set("location", tableLocation.getAbsolutePath()); + conf.set("iceberg.catalog", "hadoop.tables"); + conf.set("name", "source_db.table_a"); + InputSplit[] splits = format.getSplits(conf, 1); + assertEquals(splits.length, 1); + } + + @Test + public void testGetRecordReader() throws IOException { + conf.set("location", tableLocation.getAbsolutePath()); + conf.set("iceberg.catalog", "hadoop.tables"); + conf.set("name", "source_db.table_a"); + InputSplit[] splits = format.getSplits(conf, 1); + RecordReader reader = format.getRecordReader(splits[0], conf, null); + IcebergWritable value = (IcebergWritable) reader.createValue(); + + List records = Lists.newArrayList(); + boolean unfinished = true; + while (unfinished) { + if (reader.next(null, value)) { + records.add(value.record().copy()); + } else { + unfinished = false; + } + } + assertEquals(3, records.size()); + } + + @Test(expected = NullPointerException.class) + public void testGetSplitsNoLocation() throws IOException { + conf.set("iceberg.catalog", "hadoop.tables"); + conf.set("name", "source_db.table_a"); + format.getSplits(conf, 1); + } + +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/mapred/TestInputFormatWithMultipleTasks.java b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestInputFormatWithMultipleTasks.java new file mode 100644 index 000000000000..02331892deb4 --- /dev/null +++ b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestInputFormatWithMultipleTasks.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred; + +import com.klarna.hiverunner.HiveShell; +import com.klarna.hiverunner.StandaloneHiveRunner; +import com.klarna.hiverunner.annotations.HiveSQL; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import org.apache.hadoop.mapred.JobConf; +import org.apache.iceberg.DataFile; +import org.apache.iceberg.FileFormat; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Schema; +import org.apache.iceberg.Table; +import org.apache.iceberg.data.Record; +import org.apache.iceberg.hadoop.HadoopTables; +import org.apache.iceberg.mr.TestHelpers; +import org.apache.iceberg.types.Types; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; + +import static org.apache.iceberg.types.Types.NestedField.optional; +import static org.apache.iceberg.types.Types.NestedField.required; +import static org.junit.Assert.assertEquals; + +@RunWith(StandaloneHiveRunner.class) +public class TestInputFormatWithMultipleTasks { + + @HiveSQL(files = {}, autoStart = true) + private HiveShell shell; + + @Rule + public TemporaryFolder temp = new TemporaryFolder(); + + private File tableLocation; + private IcebergInputFormat format = new IcebergInputFormat(); + private JobConf conf = new JobConf(); + private long snapshotId; + + @Before + public void before() throws IOException { + tableLocation = temp.newFolder(); + Schema schema = new Schema(required(1, "id", Types.LongType.get()), + optional(2, "data", Types.StringType.get())); + PartitionSpec spec = PartitionSpec.unpartitioned(); + + HadoopTables tables = new HadoopTables(); + Table table = tables.create(schema, spec, tableLocation.getAbsolutePath()); + + List data = new ArrayList<>(); + data.add(TestHelpers.createSimpleRecord(1L, "Michael")); + data.add(TestHelpers.createSimpleRecord(2L, "Andy")); + + DataFile fileA = TestHelpers.writeFile(temp.newFile(), table, null, FileFormat.PARQUET, data); + table.newAppend().appendFile(fileA).commit(); + + DataFile fileB = TestHelpers.writeFile(temp.newFile(), table, null, FileFormat.PARQUET, data); + table.newAppend().appendFile(fileB).commit(); + + snapshotId = table.currentSnapshot().snapshotId(); + } + + @Ignore("TODO: re-enable this test when snapshot functionality added") + @Test + public void testAllRowsIncludeSnapshotId() { + shell.execute("CREATE DATABASE source_db"); + shell.execute(new StringBuilder() + .append("CREATE TABLE source_db.table_a ") + .append("ROW FORMAT SERDE 'org.apache.iceberg.mr.mapred.IcebergSerDe' ") + .append("STORED AS ") + .append("INPUTFORMAT 'org.apache.iceberg.mr.mapred.IcebergInputFormat' ") + .append("OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ") + .append("LOCATION '") + .append(tableLocation.getAbsolutePath()) + .append("'") + .toString()); + + List result = shell.executeStatement("SELECT * FROM source_db.table_a"); + + assertEquals(4, result.size()); + assertEquals(snapshotId, result.get(0)[2]); + assertEquals(snapshotId, result.get(1)[2]); + assertEquals(snapshotId, result.get(2)[2]); + assertEquals(snapshotId, result.get(3)[2]); + } + +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/mapred/TestJoinTablesWithHadoopTables.java b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestJoinTablesWithHadoopTables.java new file mode 100644 index 000000000000..a717b903807a --- /dev/null +++ b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestJoinTablesWithHadoopTables.java @@ -0,0 +1,153 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred; + +import com.klarna.hiverunner.HiveShell; +import com.klarna.hiverunner.StandaloneHiveRunner; +import com.klarna.hiverunner.annotations.HiveSQL; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import org.apache.iceberg.DataFile; +import org.apache.iceberg.FileFormat; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Schema; +import org.apache.iceberg.Table; +import org.apache.iceberg.data.Record; +import org.apache.iceberg.hadoop.HadoopTables; +import org.apache.iceberg.mr.TestHelpers; +import org.apache.iceberg.types.Types; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; + +import static org.apache.iceberg.types.Types.NestedField.optional; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; + +@RunWith(StandaloneHiveRunner.class) +public class TestJoinTablesWithHadoopTables { + + @HiveSQL(files = {}, autoStart = false) + private HiveShell shell; + + @Rule + public TemporaryFolder temp = new TemporaryFolder(); + + private File tableLocationA; + private File tableLocationB; + + @Before + public void before() throws IOException { + tableLocationA = temp.newFolder("table_a"); + tableLocationB = temp.newFolder("table_b"); + Schema schemaA = new Schema(optional(1, "first_name", Types.StringType.get()), + optional(2, "salary", Types.LongType.get()), + optional(3, "id", Types.LongType.get())); + Schema schemaB = new Schema(optional(1, "name", Types.StringType.get()), + optional(2, "salary", Types.LongType.get())); + + PartitionSpec spec = PartitionSpec.unpartitioned(); + + HadoopTables tables = new HadoopTables(); + Table tableA = tables.create(schemaA, spec, tableLocationA.getAbsolutePath()); + Table tableB = tables.create(schemaB, spec, tableLocationB.getAbsolutePath()); + + List tableAData = new ArrayList<>(); + tableAData.add(TestHelpers.createCustomRecord(schemaA, Arrays.asList("Ella", 3000L, 1L))); + tableAData.add(TestHelpers.createCustomRecord(schemaA, Arrays.asList("Jean", 5000L, 2L))); + tableAData.add(TestHelpers.createCustomRecord(schemaA, Arrays.asList("Joe", 2000L, 3L))); + + DataFile fileA = TestHelpers.writeFile(temp.newFile(), tableA, null, FileFormat.PARQUET, tableAData); + + List tableBData = new ArrayList<>(); + tableBData.add(TestHelpers.createCustomRecord(schemaB, Arrays.asList("Michael", 3000L))); + tableBData.add(TestHelpers.createCustomRecord(schemaB, Arrays.asList("Andy", 3000L))); + tableBData.add(TestHelpers.createCustomRecord(schemaB, Arrays.asList("Berta", 4000L))); + + DataFile fileB = TestHelpers.writeFile(temp.newFile(), tableB, null, FileFormat.PARQUET, tableBData); + + tableA.newAppend().appendFile(fileA).commit(); + tableB.newAppend().appendFile(fileB).commit(); + shell.start(); + } + + @Test + public void testJoinTablesWithStoredAs() { + shell.execute("CREATE DATABASE source_db"); + shell.execute(new StringBuilder() + .append("CREATE EXTERNAL TABLE source_db.table_a ") + .append("ROW FORMAT SERDE 'org.apache.iceberg.mr.mapred.IcebergSerDe' ") + .append("STORED AS ") + .append("INPUTFORMAT 'org.apache.iceberg.mr.mapred.IcebergInputFormat' ") + .append("OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ") + .append("LOCATION '") + .append(tableLocationA.getAbsolutePath()) + .append("'") + .toString()); + + shell.execute(new StringBuilder() + .append("CREATE EXTERNAL TABLE source_db.table_b ") + .append("ROW FORMAT SERDE 'org.apache.iceberg.mr.mapred.IcebergSerDe' ") + .append("STORED AS ") + .append("INPUTFORMAT 'org.apache.iceberg.mr.mapred.IcebergInputFormat' ") + .append("OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' ") + .append("LOCATION '") + .append(tableLocationB.getAbsolutePath()) + .append("'") + .toString()); + + List result = shell.executeStatement("SELECT table_a.first_name, table_b.name, table_b.salary " + + "FROM source_db.table_a, source_db.table_b WHERE table_a.salary = table_b.salary"); + assertEquals(2, result.size()); + assertArrayEquals(new Object[]{"Ella", "Andy", 3000L}, result.get(0)); + assertArrayEquals(new Object[]{"Ella", "Michael", 3000L}, result.get(1)); + } + + @Test + public void testJoinTablesWithStoredBy() { + shell.execute("CREATE DATABASE source_db"); + shell.execute(new StringBuilder() + .append("CREATE EXTERNAL TABLE source_db.table_a ") + .append("STORED BY 'org.apache.iceberg.mr.mapred.IcebergStorageHandler' ") + .append("LOCATION '") + .append(tableLocationA.getAbsolutePath()) + .append("'") + .toString()); + + shell.execute(new StringBuilder() + .append("CREATE EXTERNAL TABLE source_db.table_b ") + .append("STORED BY 'org.apache.iceberg.mr.mapred.IcebergStorageHandler' ") + .append("LOCATION '") + .append(tableLocationB.getAbsolutePath()) + .append("'") + .toString()); + + List result = shell.executeStatement("SELECT table_a.first_name, table_b.name, table_b.salary " + + "FROM source_db.table_a, source_db.table_b WHERE table_a.salary = table_b.salary"); + assertEquals(2, result.size()); + assertArrayEquals(new Object[]{"Ella", "Andy", 3000L}, result.get(0)); + assertArrayEquals(new Object[]{"Ella", "Michael", 3000L}, result.get(1)); + } +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/mapred/TestPredicatePushdown.java b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestPredicatePushdown.java new file mode 100644 index 000000000000..c96921a30eb6 --- /dev/null +++ b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestPredicatePushdown.java @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred; + +import com.klarna.hiverunner.HiveShell; +import com.klarna.hiverunner.StandaloneHiveRunner; +import com.klarna.hiverunner.annotations.HiveSQL; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.iceberg.DataFile; +import org.apache.iceberg.FileFormat; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Schema; +import org.apache.iceberg.Table; +import org.apache.iceberg.catalog.TableIdentifier; +import org.apache.iceberg.data.Record; +import org.apache.iceberg.hadoop.HadoopCatalog; +import org.apache.iceberg.mr.TestHelpers; +import org.apache.iceberg.types.Types; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; + +import static org.apache.iceberg.types.Types.NestedField.optional; +import static org.apache.iceberg.types.Types.NestedField.required; +import static org.junit.Assert.assertEquals; + +@RunWith(StandaloneHiveRunner.class) +public class TestPredicatePushdown { + + @HiveSQL(files = {}, autoStart = true) + private HiveShell shell; + + @Rule + public TemporaryFolder temp = new TemporaryFolder(); + + private File tableLocation; + private Table table; + + @Before + public void before() throws IOException { + tableLocation = temp.newFolder(); + Schema schema = new Schema(required(1, "id", Types.LongType.get()), + optional(2, "data", Types.StringType.get())); + PartitionSpec spec = PartitionSpec.unpartitioned(); + + Configuration conf = new Configuration(); + HadoopCatalog catalog = new HadoopCatalog(conf, tableLocation.getAbsolutePath()); + TableIdentifier id = TableIdentifier.parse("source_db.table_a"); + table = catalog.createTable(id, schema, spec); + } + + /** + * This test is supposed to check that filter properties set in IcebergStorageHandler#decomposePredicate + * are unset for the next query so that a wrong filter isn't applied to the next read. + */ + @Test + public void testFilterPropertyIsUnsetAfterQuery() throws IOException { + List dataA = new ArrayList<>(); + dataA.add(TestHelpers.createSimpleRecord(1L, "Michael")); + + List dataB = new ArrayList<>(); + dataB.add(TestHelpers.createSimpleRecord(2L, "Andy")); + + List dataC = new ArrayList<>(); + dataC.add(TestHelpers.createSimpleRecord(3L, "Berta")); + + DataFile fileA = TestHelpers.writeFile(temp.newFile(), table, null, FileFormat.PARQUET, dataA); + DataFile fileB = TestHelpers.writeFile(temp.newFile(), table, null, FileFormat.PARQUET, dataB); + DataFile fileC = TestHelpers.writeFile(temp.newFile(), table, null, FileFormat.PARQUET, dataC); + + table.newAppend().appendFile(fileA).commit(); + table.newAppend().appendFile(fileB).commit(); + table.newAppend().appendFile(fileC).commit(); + + shell.execute("CREATE DATABASE source_db"); + shell.execute(new StringBuilder() + .append("CREATE TABLE source_db.table_a ") + .append("STORED BY 'org.apache.iceberg.mr.mapred.IcebergStorageHandler' ") + .append("LOCATION '") + .append(tableLocation.getAbsolutePath() + "/source_db/table_a'") + .toString()); + + List resultFullTable = shell.executeStatement("SELECT * FROM source_db.table_a"); + assertEquals(3, resultFullTable.size()); + + List resultFilterId = shell.executeStatement("SELECT * FROM source_db.table_a WHERE id = 1"); + assertEquals(1, resultFilterId.size()); + + List resultFullTableAfterQuery = shell.executeStatement("SELECT * FROM source_db.table_a"); + assertEquals(3, resultFullTableAfterQuery.size()); + } +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/mapred/TestReadMetadataTables.java b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestReadMetadataTables.java new file mode 100644 index 000000000000..4b3df31ddc04 --- /dev/null +++ b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestReadMetadataTables.java @@ -0,0 +1,209 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred; + +import com.klarna.hiverunner.HiveShell; +import com.klarna.hiverunner.StandaloneHiveRunner; +import com.klarna.hiverunner.annotations.HiveSQL; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.iceberg.DataFile; +import org.apache.iceberg.FileFormat; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Schema; +import org.apache.iceberg.Snapshot; +import org.apache.iceberg.Table; +import org.apache.iceberg.catalog.TableIdentifier; +import org.apache.iceberg.data.Record; +import org.apache.iceberg.hadoop.HadoopCatalog; +import org.apache.iceberg.mr.TestHelpers; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; +import org.apache.iceberg.types.Types; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; + +import static org.apache.iceberg.types.Types.NestedField.optional; +import static org.apache.iceberg.types.Types.NestedField.required; +import static org.junit.Assert.assertEquals; + +@RunWith(StandaloneHiveRunner.class) +public class TestReadMetadataTables { + + @HiveSQL(files = {}, autoStart = true) + private HiveShell shell; + + @Rule + public TemporaryFolder temp = new TemporaryFolder(); + + private File tableLocation; + private Configuration conf = new Configuration(); + private HadoopCatalog catalog; + private Schema schema = new Schema(required(1, "id", Types.LongType.get()), + optional(2, "data", Types.StringType.get())); + private long snapshotId; + + @Before + public void before() throws IOException { + tableLocation = temp.newFolder(); + catalog = new HadoopCatalog(conf, tableLocation.getAbsolutePath()); + PartitionSpec spec = PartitionSpec.unpartitioned(); + + TableIdentifier id = TableIdentifier.parse("source_db.table_a"); + Table table = catalog.createTable(id, schema, spec); + + List data = new ArrayList<>(); + data.add(TestHelpers.createSimpleRecord(1L, "Michael")); + data.add(TestHelpers.createSimpleRecord(2L, "Andy")); + data.add(TestHelpers.createSimpleRecord(3L, "Berta")); + + DataFile fileA = TestHelpers.writeFile(temp.newFile(), table, null, FileFormat.PARQUET, data); + DataFile fileB = TestHelpers.writeFile(temp.newFile(), table, null, FileFormat.PARQUET, data); + DataFile fileC = TestHelpers.writeFile(temp.newFile(), table, null, FileFormat.PARQUET, data); + table.newAppend().appendFile(fileA).commit(); + table.newAppend().appendFile(fileB).commit(); + table.newAppend().appendFile(fileC).commit(); + + List snapshots = Lists.newArrayList(table.snapshots().iterator()); + snapshotId = snapshots.get(0).snapshotId(); + } + + @Test + public void testReadSnapshotTable() { + shell.execute("CREATE DATABASE source_db"); + + shell.execute(new StringBuilder() + .append("CREATE TABLE source_db.table_a ") + .append("STORED BY 'org.apache.iceberg.mr.mapred.IcebergStorageHandler' ") + .append("LOCATION '") + .append(tableLocation.getAbsolutePath() + "/source_db/table_a#snapshots'") + .toString()); + + List result = shell.executeStatement("SELECT * FROM source_db.table_a"); + + assertEquals(3, result.size()); + } + + @Test + public void testReadHistoryTable() { + shell.execute("CREATE DATABASE source_db"); + + shell.execute(new StringBuilder() + .append("CREATE TABLE source_db.table_a ") + .append("STORED BY 'org.apache.iceberg.mr.mapred.IcebergStorageHandler' ") + .append("LOCATION '") + .append(tableLocation.getAbsolutePath() + "/source_db/table_a#history'") + .toString()); + + List result = shell.executeStatement("SELECT * FROM source_db.table_a"); + + assertEquals(3, result.size()); + } + + @Ignore("TODO: re-enable this test when snapshot functionality added") + @Test + public void testCreateRegularTableEndingWithSnapshots() throws IOException { + TableIdentifier id = TableIdentifier.parse("source_db.table_a__snapshots"); + Table table = catalog.createTable(id, schema, PartitionSpec.unpartitioned()); + + List data = new ArrayList<>(); + data.add(TestHelpers.createSimpleRecord(1L, "Michael")); + DataFile fileA = TestHelpers.writeFile(temp.newFile(), table, null, FileFormat.PARQUET, data); + table.newAppend().appendFile(fileA).commit(); + + shell.execute("CREATE DATABASE source_db"); + + shell.execute(new StringBuilder() + .append("CREATE TABLE source_db.table_a__snapshots ") + .append("STORED BY 'org.apache.iceberg.mr.mapred.IcebergStorageHandler' ") + .append("LOCATION '") + .append(tableLocation.getAbsolutePath() + "/source_db/table_a__snapshots") + .append("' TBLPROPERTIES ('iceberg.snapshots.table'='false')") + .toString()); + + List result = shell.executeStatement("SELECT * FROM source_db.table_a__snapshots"); + + assertEquals(1, result.size()); + } + + @Ignore("TODO: re-enable this test when snapshot functionality added") + @Test + public void testTimeTravelRead() { + shell.execute("CREATE DATABASE source_db"); + + shell.execute(new StringBuilder() + .append("CREATE TABLE source_db.table_a ") + .append("STORED BY 'org.apache.iceberg.mr.mapred.IcebergStorageHandler' ") + .append("LOCATION '") + .append(tableLocation.getAbsolutePath() + "/source_db/table_a'") + .toString()); + + shell.execute(new StringBuilder() + .append("CREATE TABLE source_db.table_a__snapshots ") + .append("STORED BY 'org.apache.iceberg.mr.mapred.IcebergStorageHandler' ") + .append("LOCATION '") + .append(tableLocation.getAbsolutePath() + "/source_db/table_a#snapshots'") + .toString()); + + List resultLatestTable = shell.executeStatement("SELECT * FROM source_db.table_a"); + assertEquals(9, resultLatestTable.size()); + + List resultFirstSnapshot = shell.executeStatement( + "SELECT * FROM source_db.table_a WHERE SNAPSHOT__ID = " + snapshotId); + assertEquals(3, resultFirstSnapshot.size()); + + List resultLatestSnapshotAgain = shell.executeStatement("SELECT * FROM source_db.table_a"); + assertEquals(9, resultLatestSnapshotAgain.size()); + } + + @Test + public void testCreateTableWithSnapshotIDColumnInSchema() throws IOException { + PartitionSpec spec = PartitionSpec.unpartitioned(); + schema = new Schema(required(1, "snapshot__id", Types.LongType.get()), + optional(2, "data", Types.StringType.get())); + TableIdentifier id = TableIdentifier.parse("source_db.table_b"); + Table table = catalog.createTable(id, schema, spec); + + List data = new ArrayList<>(); + data.add(TestHelpers.createSimpleRecord(1L, "Michael")); + DataFile fileA = TestHelpers.writeFile(temp.newFile(), table, null, FileFormat.PARQUET, data); + table.newAppend().appendFile(fileA).commit(); + + shell.execute("CREATE DATABASE source_db"); + + shell.execute(new StringBuilder() + .append("CREATE TABLE source_db.table_b ") + .append("STORED BY 'org.apache.iceberg.mr.mapred.IcebergStorageHandler' ") + .append("LOCATION '") + .append(tableLocation.getAbsolutePath() + "/source_db/table_b") + .append("' TBLPROPERTIES (") + .append("'iceberg.hive.snapshot.virtual.column.name' = 'metadata_snapshot_id')") + .toString()); + + List resultLatestTable = shell.executeStatement("SELECT * FROM source_db.table_b"); + assertEquals(1, resultLatestTable.size()); + } +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/mapred/TestTableResolver.java b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestTableResolver.java new file mode 100644 index 000000000000..a0835dab15e1 --- /dev/null +++ b/mr/src/test/java/org/apache/iceberg/mr/mapred/TestTableResolver.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred; + +import java.io.File; +import java.io.IOException; +import java.util.Collections; +import java.util.Properties; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.mapred.JobConf; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Schema; +import org.apache.iceberg.Table; +import org.apache.iceberg.exceptions.NoSuchNamespaceException; +import org.apache.iceberg.hadoop.HadoopTables; +import org.apache.iceberg.mr.InputFormatConfig; +import org.apache.iceberg.types.Types; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.apache.iceberg.types.Types.NestedField.required; + +public class TestTableResolver { + + @Rule + public TemporaryFolder tmp = new TemporaryFolder(); + + private Schema schema = new Schema(required(1, "string_field", Types.StringType.get())); + private File tableLocation; + + @Before + public void before() throws IOException, SerDeException { + tableLocation = tmp.newFolder(); + Configuration conf = new Configuration(); + HadoopTables tables = new HadoopTables(conf); + tables.create(schema, PartitionSpec.unpartitioned(), Collections.emptyMap(), tableLocation.toString()); + } + + @Test + public void resolveTableFromConfigurationDefault() throws IOException { + Configuration conf = new Configuration(); + conf.set(InputFormatConfig.TABLE_LOCATION, tableLocation.getAbsolutePath()); + + Table table = TableResolver.resolveTableFromConfiguration(conf); + Assert.assertEquals(tableLocation.getAbsolutePath(), table.location()); + } + + @Test + public void resolveTableFromConfigurationHadoopTables() throws IOException { + Configuration conf = new Configuration(); + conf.set(InputFormatConfig.CATALOG_NAME, InputFormatConfig.HADOOP_TABLES); + conf.set(InputFormatConfig.TABLE_LOCATION, tableLocation.getAbsolutePath()); + + Table table = TableResolver.resolveTableFromConfiguration(conf); + Assert.assertEquals(tableLocation.getAbsolutePath(), table.location()); + } + + @Test(expected = NullPointerException.class) + public void resolveTableFromConfigurationHadoopTablesNoLocation() throws IOException { + Configuration conf = new Configuration(); + conf.set(InputFormatConfig.CATALOG_NAME, InputFormatConfig.HADOOP_TABLES); + + TableResolver.resolveTableFromConfiguration(conf); + } + + @Test(expected = NoSuchNamespaceException.class) + public void resolveTableFromConfigurationInvalidName() throws IOException { + Configuration conf = new Configuration(); + conf.set(InputFormatConfig.CATALOG_NAME, "invalid-name"); + + TableResolver.resolveTableFromConfiguration(conf); + } + + @Test + public void resolveTableFromJobConfDefault() throws IOException { + JobConf conf = new JobConf(); + conf.set(InputFormatConfig.TABLE_LOCATION, tableLocation.getAbsolutePath()); + + Table table = TableResolver.resolveTableFromConfiguration(conf); + Assert.assertEquals(tableLocation.getAbsolutePath(), table.location()); + } + + @Test + public void resolveTableFromPropertiesDefault() throws IOException { + Configuration conf = new Configuration(); + Properties properties = new Properties(); + properties.setProperty(InputFormatConfig.TABLE_LOCATION, tableLocation.getAbsolutePath()); + + Table table = TableResolver.resolveTableFromConfiguration(conf, properties); + Assert.assertEquals(tableLocation.getAbsolutePath(), table.location()); + } + +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/mapred/serde/objectinspector/TestIcebergBinaryObjectInspector.java b/mr/src/test/java/org/apache/iceberg/mr/mapred/serde/objectinspector/TestIcebergBinaryObjectInspector.java new file mode 100644 index 000000000000..5d88da53cd6c --- /dev/null +++ b/mr/src/test/java/org/apache/iceberg/mr/mapred/serde/objectinspector/TestIcebergBinaryObjectInspector.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred.serde.objectinspector; + +import java.nio.ByteBuffer; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.BinaryObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.io.BytesWritable; +import org.junit.Assert; +import org.junit.Test; + +public class TestIcebergBinaryObjectInspector { + + @Test + public void testIcebergBinaryObjectInspector() { + BinaryObjectInspector oi = IcebergBinaryObjectInspector.get(); + + Assert.assertEquals(ObjectInspector.Category.PRIMITIVE, oi.getCategory()); + Assert.assertEquals(PrimitiveObjectInspector.PrimitiveCategory.BINARY, oi.getPrimitiveCategory()); + + Assert.assertEquals(TypeInfoFactory.binaryTypeInfo, oi.getTypeInfo()); + Assert.assertEquals(TypeInfoFactory.binaryTypeInfo.getTypeName(), oi.getTypeName()); + + Assert.assertEquals(byte[].class, oi.getJavaPrimitiveClass()); + Assert.assertEquals(BytesWritable.class, oi.getPrimitiveWritableClass()); + + Assert.assertNull(oi.copyObject(null)); + Assert.assertNull(oi.getPrimitiveJavaObject(null)); + Assert.assertNull(oi.getPrimitiveWritableObject(null)); + + byte[] bytes = new byte[] {0, 1}; + ByteBuffer buffer = ByteBuffer.wrap(bytes); + + Assert.assertArrayEquals(bytes, oi.getPrimitiveJavaObject(buffer)); + Assert.assertEquals(new BytesWritable(bytes), oi.getPrimitiveWritableObject(buffer)); + + byte[] copy = (byte[]) oi.copyObject(bytes); + + Assert.assertArrayEquals(bytes, copy); + Assert.assertNotSame(bytes, copy); + + Assert.assertFalse(oi.preferWritable()); + } + +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/mapred/serde/objectinspector/TestIcebergDateObjectInspector.java b/mr/src/test/java/org/apache/iceberg/mr/mapred/serde/objectinspector/TestIcebergDateObjectInspector.java new file mode 100644 index 000000000000..28962aa352f4 --- /dev/null +++ b/mr/src/test/java/org/apache/iceberg/mr/mapred/serde/objectinspector/TestIcebergDateObjectInspector.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred.serde.objectinspector; + +import java.sql.Date; +import java.time.LocalDate; +import org.apache.hadoop.hive.serde2.io.DateWritable; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.DateObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.junit.Assert; +import org.junit.Test; + +public class TestIcebergDateObjectInspector { + + @Test + public void testIcebergDateObjectInspector() { + DateObjectInspector oi = IcebergDateObjectInspector.get(); + + Assert.assertEquals(ObjectInspector.Category.PRIMITIVE, oi.getCategory()); + Assert.assertEquals(PrimitiveObjectInspector.PrimitiveCategory.DATE, oi.getPrimitiveCategory()); + + Assert.assertEquals(TypeInfoFactory.dateTypeInfo, oi.getTypeInfo()); + Assert.assertEquals(TypeInfoFactory.dateTypeInfo.getTypeName(), oi.getTypeName()); + + Assert.assertEquals(Date.class, oi.getJavaPrimitiveClass()); + Assert.assertEquals(DateWritable.class, oi.getPrimitiveWritableClass()); + + Assert.assertNull(oi.copyObject(null)); + Assert.assertNull(oi.getPrimitiveJavaObject(null)); + Assert.assertNull(oi.getPrimitiveWritableObject(null)); + + LocalDate local = LocalDate.of(2020, 1, 1); + Date date = Date.valueOf("2020-01-01"); + + Assert.assertEquals(date, oi.getPrimitiveJavaObject(local)); + Assert.assertEquals(new DateWritable(date), oi.getPrimitiveWritableObject(local)); + + Date copy = (Date) oi.copyObject(date); + + Assert.assertEquals(date, copy); + Assert.assertNotSame(date, copy); + + Assert.assertFalse(oi.preferWritable()); + } + +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/mapred/serde/objectinspector/TestIcebergDecimalObjectInspector.java b/mr/src/test/java/org/apache/iceberg/mr/mapred/serde/objectinspector/TestIcebergDecimalObjectInspector.java new file mode 100644 index 000000000000..77489a4478e0 --- /dev/null +++ b/mr/src/test/java/org/apache/iceberg/mr/mapred/serde/objectinspector/TestIcebergDecimalObjectInspector.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred.serde.objectinspector; + +import java.math.BigDecimal; +import org.apache.hadoop.hive.common.type.HiveDecimal; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.junit.Assert; +import org.junit.Test; + +public class TestIcebergDecimalObjectInspector { + + @Test + public void testCache() { + HiveDecimalObjectInspector oi = IcebergDecimalObjectInspector.get(38, 18); + + Assert.assertSame(oi, IcebergDecimalObjectInspector.get(38, 18)); + Assert.assertNotSame(oi, IcebergDecimalObjectInspector.get(28, 18)); + Assert.assertNotSame(oi, IcebergDecimalObjectInspector.get(38, 28)); + } + + @Test + public void testIcebergDecimalObjectInspector() { + HiveDecimalObjectInspector oi = IcebergDecimalObjectInspector.get(38, 18); + + Assert.assertEquals(ObjectInspector.Category.PRIMITIVE, oi.getCategory()); + Assert.assertEquals(PrimitiveObjectInspector.PrimitiveCategory.DECIMAL, oi.getPrimitiveCategory()); + + Assert.assertEquals(new DecimalTypeInfo(38, 18), oi.getTypeInfo()); + Assert.assertEquals(TypeInfoFactory.decimalTypeInfo.getTypeName(), oi.getTypeName()); + + Assert.assertEquals(38, oi.precision()); + Assert.assertEquals(18, oi.scale()); + + Assert.assertEquals(HiveDecimal.class, oi.getJavaPrimitiveClass()); + Assert.assertEquals(HiveDecimalWritable.class, oi.getPrimitiveWritableClass()); + + Assert.assertNull(oi.copyObject(null)); + Assert.assertNull(oi.getPrimitiveJavaObject(null)); + Assert.assertNull(oi.getPrimitiveWritableObject(null)); + + HiveDecimal one = HiveDecimal.create(BigDecimal.ONE); + + Assert.assertEquals(one, oi.getPrimitiveJavaObject(BigDecimal.ONE)); + Assert.assertEquals(new HiveDecimalWritable(one), oi.getPrimitiveWritableObject(BigDecimal.ONE)); + + HiveDecimal copy = (HiveDecimal) oi.copyObject(one); + + Assert.assertEquals(one, copy); + Assert.assertNotSame(one, copy); + + Assert.assertFalse(oi.preferWritable()); + } + +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/mapred/serde/objectinspector/TestIcebergObjectInspector.java b/mr/src/test/java/org/apache/iceberg/mr/mapred/serde/objectinspector/TestIcebergObjectInspector.java new file mode 100644 index 000000000000..b280b05fb86d --- /dev/null +++ b/mr/src/test/java/org/apache/iceberg/mr/mapred/serde/objectinspector/TestIcebergObjectInspector.java @@ -0,0 +1,203 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred.serde.objectinspector; + +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.objectinspector.StructField; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.iceberg.AssertHelpers; +import org.apache.iceberg.Schema; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; +import org.apache.iceberg.types.Types; +import org.junit.Assert; +import org.junit.Test; + +import static org.apache.iceberg.types.Types.NestedField.required; + + +public class TestIcebergObjectInspector { + + private final Schema schema = new Schema( + required(0, "binary_field", Types.BinaryType.get(), "binary comment"), + required(1, "boolean_field", Types.BooleanType.get(), "boolean comment"), + required(2, "date_field", Types.DateType.get(), "date comment"), + required(3, "decimal_field", Types.DecimalType.of(38, 18), "decimal comment"), + required(4, "double_field", Types.DoubleType.get(), "double comment"), + required(5, "float_field", Types.FloatType.get(), "float comment"), + required(6, "integer_field", Types.IntegerType.get(), "integer comment"), + required(7, "long_field", Types.LongType.get(), "long comment"), + required(8, "string_field", Types.StringType.get(), "string comment"), + required(9, "timestamp_field", Types.TimestampType.withoutZone(), "timestamp comment"), + required(10, "timestamptz_field", Types.TimestampType.withZone(), "timestamptz comment"), + required(11, "list_field", + Types.ListType.ofRequired(12, Types.StringType.get()), "list comment"), + required(13, "map_field", + Types.MapType.ofRequired(14, 15, Types.StringType.get(), Types.IntegerType.get()), + "map comment"), + required(16, "struct_field", Types.StructType.of( + Types.NestedField.required(17, "nested_field", Types.StringType.get(), "nested field comment")), + "struct comment" + ) + ); + + @Test + public void testIcebergObjectInspector() { + ObjectInspector oi = IcebergObjectInspector.create(schema); + Assert.assertNotNull(oi); + Assert.assertEquals(ObjectInspector.Category.STRUCT, oi.getCategory()); + + StructObjectInspector soi = (StructObjectInspector) oi; + + // binary + StructField binaryField = soi.getStructFieldRef("binary_field"); + Assert.assertEquals(0, binaryField.getFieldID()); + Assert.assertEquals("binary_field", binaryField.getFieldName()); + Assert.assertEquals("binary comment", binaryField.getFieldComment()); + Assert.assertEquals(IcebergBinaryObjectInspector.get(), binaryField.getFieldObjectInspector()); + + // boolean + StructField booleanField = soi.getStructFieldRef("boolean_field"); + Assert.assertEquals(1, booleanField.getFieldID()); + Assert.assertEquals("boolean_field", booleanField.getFieldName()); + Assert.assertEquals("boolean comment", booleanField.getFieldComment()); + Assert.assertEquals(getPrimitiveObjectInspector(boolean.class), booleanField.getFieldObjectInspector()); + + // date + StructField dateField = soi.getStructFieldRef("date_field"); + Assert.assertEquals(2, dateField.getFieldID()); + Assert.assertEquals("date_field", dateField.getFieldName()); + Assert.assertEquals("date comment", dateField.getFieldComment()); + Assert.assertEquals(IcebergDateObjectInspector.get(), dateField.getFieldObjectInspector()); + + // decimal + StructField decimalField = soi.getStructFieldRef("decimal_field"); + Assert.assertEquals(3, decimalField.getFieldID()); + Assert.assertEquals("decimal_field", decimalField.getFieldName()); + Assert.assertEquals("decimal comment", decimalField.getFieldComment()); + Assert.assertEquals(IcebergDecimalObjectInspector.get(38, 18), decimalField.getFieldObjectInspector()); + + // double + StructField doubleField = soi.getStructFieldRef("double_field"); + Assert.assertEquals(4, doubleField.getFieldID()); + Assert.assertEquals("double_field", doubleField.getFieldName()); + Assert.assertEquals("double comment", doubleField.getFieldComment()); + Assert.assertEquals(getPrimitiveObjectInspector(double.class), doubleField.getFieldObjectInspector()); + + // float + StructField floatField = soi.getStructFieldRef("float_field"); + Assert.assertEquals(5, floatField.getFieldID()); + Assert.assertEquals("float_field", floatField.getFieldName()); + Assert.assertEquals("float comment", floatField.getFieldComment()); + Assert.assertEquals(getPrimitiveObjectInspector(float.class), floatField.getFieldObjectInspector()); + + // integer + StructField integerField = soi.getStructFieldRef("integer_field"); + Assert.assertEquals(6, integerField.getFieldID()); + Assert.assertEquals("integer_field", integerField.getFieldName()); + Assert.assertEquals("integer comment", integerField.getFieldComment()); + Assert.assertEquals(getPrimitiveObjectInspector(int.class), integerField.getFieldObjectInspector()); + + // long + StructField longField = soi.getStructFieldRef("long_field"); + Assert.assertEquals(7, longField.getFieldID()); + Assert.assertEquals("long_field", longField.getFieldName()); + Assert.assertEquals("long comment", longField.getFieldComment()); + Assert.assertEquals(getPrimitiveObjectInspector(long.class), longField.getFieldObjectInspector()); + + // string + StructField stringField = soi.getStructFieldRef("string_field"); + Assert.assertEquals(8, stringField.getFieldID()); + Assert.assertEquals("string_field", stringField.getFieldName()); + Assert.assertEquals("string comment", stringField.getFieldComment()); + Assert.assertEquals(getPrimitiveObjectInspector(String.class), stringField.getFieldObjectInspector()); + + // timestamp without tz + StructField timestampField = soi.getStructFieldRef("timestamp_field"); + Assert.assertEquals(9, timestampField.getFieldID()); + Assert.assertEquals("timestamp_field", timestampField.getFieldName()); + Assert.assertEquals("timestamp comment", timestampField.getFieldComment()); + Assert.assertEquals(IcebergTimestampObjectInspector.get(false), timestampField.getFieldObjectInspector()); + + // timestamp with tz + StructField timestampTzField = soi.getStructFieldRef("timestamptz_field"); + Assert.assertEquals(10, timestampTzField.getFieldID()); + Assert.assertEquals("timestamptz_field", timestampTzField.getFieldName()); + Assert.assertEquals("timestamptz comment", timestampTzField.getFieldComment()); + Assert.assertEquals(IcebergTimestampObjectInspector.get(true), timestampTzField.getFieldObjectInspector()); + + // list + StructField listField = soi.getStructFieldRef("list_field"); + Assert.assertEquals(11, listField.getFieldID()); + Assert.assertEquals("list_field", listField.getFieldName()); + Assert.assertEquals("list comment", listField.getFieldComment()); + Assert.assertEquals(getListObjectInspector(String.class), listField.getFieldObjectInspector()); + + // map + StructField mapField = soi.getStructFieldRef("map_field"); + Assert.assertEquals(13, mapField.getFieldID()); + Assert.assertEquals("map_field", mapField.getFieldName()); + Assert.assertEquals("map comment", mapField.getFieldComment()); + Assert.assertEquals(getMapObjectInspector(String.class, int.class), mapField.getFieldObjectInspector()); + + // struct + StructField structField = soi.getStructFieldRef("struct_field"); + Assert.assertEquals(16, structField.getFieldID()); + Assert.assertEquals("struct_field", structField.getFieldName()); + Assert.assertEquals("struct comment", structField.getFieldComment()); + + ObjectInspector expectedObjectInspector = new IcebergRecordObjectInspector( + (Types.StructType) schema.findType(16), ImmutableList.of(getPrimitiveObjectInspector(String.class))); + Assert.assertEquals(expectedObjectInspector, structField.getFieldObjectInspector()); + } + + @Test + public void testIcebergObjectInspectorUnsupportedTypes() { + AssertHelpers.assertThrows( + "Hive does not support fixed type", IllegalArgumentException.class, "FIXED type is not supported", + () -> IcebergObjectInspector.create(required(1, "fixed_field", Types.FixedType.ofLength(1)))); + + AssertHelpers.assertThrows( + "Hive does not support time type", IllegalArgumentException.class, "TIME type is not supported", + () -> IcebergObjectInspector.create(required(1, "time_field", Types.TimeType.get()))); + + AssertHelpers.assertThrows( + "Hive does not support UUID type", IllegalArgumentException.class, "UUID type is not supported", + () -> IcebergObjectInspector.create(required(1, "uuid_field", Types.UUIDType.get()))); + } + + private static ObjectInspector getPrimitiveObjectInspector(Class clazz) { + PrimitiveTypeInfo typeInfo = (PrimitiveTypeInfo) TypeInfoFactory.getPrimitiveTypeInfoFromJavaPrimitive(clazz); + return PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector(typeInfo); + } + + private static ObjectInspector getListObjectInspector(Class clazz) { + return ObjectInspectorFactory.getStandardListObjectInspector(getPrimitiveObjectInspector(clazz)); + } + + private static ObjectInspector getMapObjectInspector(Class keyClazz, Class valueClazz) { + return ObjectInspectorFactory.getStandardMapObjectInspector( + getPrimitiveObjectInspector(keyClazz), getPrimitiveObjectInspector(valueClazz)); + } + +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/mapred/serde/objectinspector/TestIcebergRecordObjectInspector.java b/mr/src/test/java/org/apache/iceberg/mr/mapred/serde/objectinspector/TestIcebergRecordObjectInspector.java new file mode 100644 index 000000000000..edfaa1722185 --- /dev/null +++ b/mr/src/test/java/org/apache/iceberg/mr/mapred/serde/objectinspector/TestIcebergRecordObjectInspector.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred.serde.objectinspector; + +import org.apache.hadoop.hive.serde2.objectinspector.StructField; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.iceberg.Schema; +import org.apache.iceberg.data.RandomGenericData; +import org.apache.iceberg.data.Record; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; +import org.apache.iceberg.types.Types; +import org.junit.Assert; +import org.junit.Test; + +import static org.apache.iceberg.types.Types.NestedField.required; + +public class TestIcebergRecordObjectInspector { + + @Test + public void testIcebergRecordObjectInspector() { + Schema schema = new Schema( + required(1, "integer_field", Types.IntegerType.get()), + required(2, "struct_field", Types.StructType.of( + Types.NestedField.required(3, "string_field", Types.StringType.get()))) + ); + + Record record = RandomGenericData.generate(schema, 1, 0L).get(0); + Record innerRecord = record.get(1, Record.class); + + StructObjectInspector soi = (StructObjectInspector) IcebergObjectInspector.create(schema); + Assert.assertEquals(ImmutableList.of(record.get(0), record.get(1)), soi.getStructFieldsDataAsList(record)); + + StructField integerField = soi.getStructFieldRef("integer_field"); + Assert.assertEquals(record.get(0), soi.getStructFieldData(record, integerField)); + + StructField structField = soi.getStructFieldRef("struct_field"); + Object innerData = soi.getStructFieldData(record, structField); + Assert.assertEquals(innerRecord, innerData); + + StructObjectInspector innerSoi = (StructObjectInspector) structField.getFieldObjectInspector(); + StructField stringField = innerSoi.getStructFieldRef("string_field"); + + Assert.assertEquals(ImmutableList.of(innerRecord.get(0)), innerSoi.getStructFieldsDataAsList(innerRecord)); + Assert.assertEquals(innerRecord.get(0), innerSoi.getStructFieldData(innerData, stringField)); + } + +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/mapred/serde/objectinspector/TestIcebergTimestampObjectInspector.java b/mr/src/test/java/org/apache/iceberg/mr/mapred/serde/objectinspector/TestIcebergTimestampObjectInspector.java new file mode 100644 index 000000000000..a1f6c18b8dd6 --- /dev/null +++ b/mr/src/test/java/org/apache/iceberg/mr/mapred/serde/objectinspector/TestIcebergTimestampObjectInspector.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapred.serde.objectinspector; + +import java.sql.Timestamp; +import java.time.LocalDateTime; +import org.apache.hadoop.hive.serde2.io.TimestampWritable; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.junit.Assert; +import org.junit.Test; + +public class TestIcebergTimestampObjectInspector { + + @Test + public void testIcebergTimestampObjectInspector() { + TimestampObjectInspector oi = IcebergTimestampObjectInspector.get(false); + + Assert.assertEquals(ObjectInspector.Category.PRIMITIVE, oi.getCategory()); + Assert.assertEquals(PrimitiveObjectInspector.PrimitiveCategory.TIMESTAMP, oi.getPrimitiveCategory()); + + Assert.assertEquals(TypeInfoFactory.timestampTypeInfo, oi.getTypeInfo()); + Assert.assertEquals(TypeInfoFactory.timestampTypeInfo.getTypeName(), oi.getTypeName()); + + Assert.assertEquals(Timestamp.class, oi.getJavaPrimitiveClass()); + Assert.assertEquals(TimestampWritable.class, oi.getPrimitiveWritableClass()); + + Assert.assertNull(oi.copyObject(null)); + Assert.assertNull(oi.getPrimitiveJavaObject(null)); + Assert.assertNull(oi.getPrimitiveWritableObject(null)); + + LocalDateTime local = LocalDateTime.of(2020, 1, 1, 0, 0); + Timestamp ts = Timestamp.valueOf("2020-01-01 00:00:00"); + + Assert.assertEquals(ts, oi.getPrimitiveJavaObject(local)); + Assert.assertEquals(new TimestampWritable(ts), oi.getPrimitiveWritableObject(local)); + + Timestamp copy = (Timestamp) oi.copyObject(ts); + + Assert.assertEquals(ts, copy); + Assert.assertNotSame(ts, copy); + + Assert.assertFalse(oi.preferWritable()); + } + +} diff --git a/mr/src/test/java/org/apache/iceberg/mr/mapreduce/TestIcebergInputFormat.java b/mr/src/test/java/org/apache/iceberg/mr/mapreduce/TestIcebergInputFormat.java index a5dbbef0ac4e..8c0c9e81aa04 100644 --- a/mr/src/test/java/org/apache/iceberg/mr/mapreduce/TestIcebergInputFormat.java +++ b/mr/src/test/java/org/apache/iceberg/mr/mapreduce/TestIcebergInputFormat.java @@ -20,7 +20,6 @@ package org.apache.iceberg.mr.mapreduce; import java.io.File; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Locale; @@ -35,31 +34,21 @@ import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; import org.apache.iceberg.AppendFiles; -import org.apache.iceberg.AssertHelpers; import org.apache.iceberg.DataFile; -import org.apache.iceberg.DataFiles; import org.apache.iceberg.FileFormat; -import org.apache.iceberg.Files; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; -import org.apache.iceberg.StructLike; import org.apache.iceberg.Table; import org.apache.iceberg.TableProperties; import org.apache.iceberg.TestHelpers.Row; -import org.apache.iceberg.avro.Avro; import org.apache.iceberg.catalog.Catalog; import org.apache.iceberg.catalog.TableIdentifier; import org.apache.iceberg.data.RandomGenericData; import org.apache.iceberg.data.Record; -import org.apache.iceberg.data.avro.DataWriter; -import org.apache.iceberg.data.orc.GenericOrcWriter; -import org.apache.iceberg.data.parquet.GenericParquetWriter; import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.hadoop.HadoopCatalog; import org.apache.iceberg.hadoop.HadoopTables; -import org.apache.iceberg.io.FileAppender; -import org.apache.iceberg.orc.ORC; -import org.apache.iceberg.parquet.Parquet; +import org.apache.iceberg.mr.InputFormatConfig; import org.apache.iceberg.relocated.com.google.common.collect.FluentIterable; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; @@ -68,31 +57,32 @@ import org.apache.iceberg.types.TypeUtil; import org.apache.iceberg.types.Types; import org.junit.Assert; -import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import static org.apache.iceberg.mr.TestHelpers.writeFile; import static org.apache.iceberg.types.Types.NestedField.required; @RunWith(Parameterized.class) public class TestIcebergInputFormat { - static final Schema SCHEMA = new Schema( - required(1, "data", Types.StringType.get()), - required(2, "id", Types.LongType.get()), - required(3, "date", Types.StringType.get())); + private static final Schema SCHEMA = new Schema( + required(1, "data", Types.StringType.get()), + required(2, "id", Types.LongType.get()), + required(3, "date", Types.StringType.get())); - static final PartitionSpec SPEC = PartitionSpec.builderFor(SCHEMA) - .identity("date") - .bucket("id", 1) - .build(); + private static final PartitionSpec SPEC = PartitionSpec.builderFor(SCHEMA) + .identity("date") + .bucket("id", 1) + .build(); @Rule public TemporaryFolder temp = new TemporaryFolder(); - private HadoopTables tables; - private Configuration conf; + + private Configuration conf = new Configuration(); + private HadoopTables tables = new HadoopTables(conf); @Parameterized.Parameters public static Object[][] parameters() { @@ -109,27 +99,21 @@ public TestIcebergInputFormat(String format) { this.format = FileFormat.valueOf(format.toUpperCase(Locale.ENGLISH)); } - @Before - public void before() { - conf = new Configuration(); - tables = new HadoopTables(conf); - } - @Test public void testUnpartitionedTable() throws Exception { File location = temp.newFolder(format.name()); Assert.assertTrue(location.delete()); Table table = tables.create(SCHEMA, PartitionSpec.unpartitioned(), - ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), - location.toString()); + ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), + location.toString()); List expectedRecords = RandomGenericData.generate(table.schema(), 1, 0L); - DataFile dataFile = writeFile(table, null, format, expectedRecords); + DataFile dataFile = writeFile(temp.newFile(), table, null, format, expectedRecords); table.newAppend() - .appendFile(dataFile) - .commit(); + .appendFile(dataFile) + .commit(); Job job = Job.getInstance(conf); - IcebergInputFormat.ConfigBuilder configBuilder = IcebergInputFormat.configure(job); - configBuilder.readFrom(location.toString()); + InputFormatConfig.ConfigBuilder configBuilder = IcebergInputFormat.configure(job); + configBuilder.readFrom(location.toString()).schema(table.schema()); validate(job, expectedRecords); } @@ -138,18 +122,18 @@ public void testPartitionedTable() throws Exception { File location = temp.newFolder(format.name()); Assert.assertTrue(location.delete()); Table table = tables.create(SCHEMA, SPEC, - ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), - location.toString()); + ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), + location.toString()); List expectedRecords = RandomGenericData.generate(table.schema(), 1, 0L); expectedRecords.get(0).set(2, "2020-03-20"); - DataFile dataFile = writeFile(table, Row.of("2020-03-20", 0), format, expectedRecords); + DataFile dataFile = writeFile(temp.newFile(), table, Row.of("2020-03-20", 0), format, expectedRecords); table.newAppend() - .appendFile(dataFile) - .commit(); + .appendFile(dataFile) + .commit(); Job job = Job.getInstance(conf); - IcebergInputFormat.ConfigBuilder configBuilder = IcebergInputFormat.configure(job); - configBuilder.readFrom(location.toString()); + InputFormatConfig.ConfigBuilder configBuilder = IcebergInputFormat.configure(job); + configBuilder.readFrom(location.toString()).schema(table.schema()); validate(job, expectedRecords); } @@ -158,22 +142,23 @@ public void testFilterExp() throws Exception { File location = temp.newFolder(format.name()); Assert.assertTrue(location.delete()); Table table = tables.create(SCHEMA, SPEC, - ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), - location.toString()); + ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), + location.toString()); List expectedRecords = RandomGenericData.generate(table.schema(), 2, 0L); expectedRecords.get(0).set(2, "2020-03-20"); expectedRecords.get(1).set(2, "2020-03-20"); - DataFile dataFile1 = writeFile(table, Row.of("2020-03-20", 0), format, expectedRecords); - DataFile dataFile2 = writeFile(table, Row.of("2020-03-21", 0), format, - RandomGenericData.generate(table.schema(), 2, 0L)); + DataFile dataFile1 = writeFile(temp.newFile(), table, Row.of("2020-03-20", 0), format, expectedRecords); + DataFile dataFile2 = writeFile(temp.newFile(), table, Row.of("2020-03-21", 0), format, + RandomGenericData.generate(table.schema(), 2, 0L)); table.newAppend() - .appendFile(dataFile1) - .appendFile(dataFile2) - .commit(); + .appendFile(dataFile1) + .appendFile(dataFile2) + .commit(); Job job = Job.getInstance(conf); - IcebergInputFormat.ConfigBuilder configBuilder = IcebergInputFormat.configure(job); + InputFormatConfig.ConfigBuilder configBuilder = IcebergInputFormat.configure(job); configBuilder.readFrom(location.toString()) - .filter(Expressions.equal("date", "2020-03-20")); + .schema(table.schema()) + .filter(Expressions.equal("date", "2020-03-20")); validate(job, expectedRecords); } @@ -182,8 +167,8 @@ public void testResiduals() throws Exception { File location = temp.newFolder(format.name()); Assert.assertTrue(location.delete()); Table table = tables.create(SCHEMA, SPEC, - ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), - location.toString()); + ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), + location.toString()); List writeRecords = RandomGenericData.generate(table.schema(), 2, 0L); writeRecords.get(0).set(1, 123L); writeRecords.get(0).set(2, "2020-03-20"); @@ -193,111 +178,75 @@ public void testResiduals() throws Exception { List expectedRecords = new ArrayList<>(); expectedRecords.add(writeRecords.get(0)); - DataFile dataFile1 = writeFile(table, Row.of("2020-03-20", 0), format, writeRecords); - DataFile dataFile2 = writeFile(table, Row.of("2020-03-21", 0), format, - RandomGenericData.generate(table.schema(), 2, 0L)); + DataFile dataFile1 = writeFile(temp.newFile(), table, Row.of("2020-03-20", 0), format, writeRecords); + DataFile dataFile2 = writeFile(temp.newFile(), table, Row.of("2020-03-21", 0), format, + RandomGenericData.generate(table.schema(), 2, 0L)); table.newAppend() - .appendFile(dataFile1) - .appendFile(dataFile2) - .commit(); + .appendFile(dataFile1) + .appendFile(dataFile2) + .commit(); Job job = Job.getInstance(conf); - IcebergInputFormat.ConfigBuilder configBuilder = IcebergInputFormat.configure(job); + InputFormatConfig.ConfigBuilder configBuilder = IcebergInputFormat.configure(job); configBuilder.readFrom(location.toString()) - .filter(Expressions.and( - Expressions.equal("date", "2020-03-20"), - Expressions.equal("id", 123))); + .schema(table.schema()) + .filter(Expressions.and( + Expressions.equal("date", "2020-03-20"), + Expressions.equal("id", 123))); validate(job, expectedRecords); // skip residual filtering job = Job.getInstance(conf); configBuilder = IcebergInputFormat.configure(job); configBuilder.skipResidualFiltering().readFrom(location.toString()) - .filter(Expressions.and( - Expressions.equal("date", "2020-03-20"), - Expressions.equal("id", 123))); + .schema(table.schema()) + .filter(Expressions.and( + Expressions.equal("date", "2020-03-20"), + Expressions.equal("id", 123))); validate(job, writeRecords); } - @Test - public void testFailedResidualFiltering() throws Exception { - File location = temp.newFolder(format.name()); - Assert.assertTrue(location.delete()); - Table table = tables.create(SCHEMA, SPEC, - ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), - location.toString()); - List expectedRecords = RandomGenericData.generate(table.schema(), 2, 0L); - expectedRecords.get(0).set(2, "2020-03-20"); - expectedRecords.get(1).set(2, "2020-03-20"); - - DataFile dataFile1 = writeFile(table, Row.of("2020-03-20", 0), format, expectedRecords); - table.newAppend() - .appendFile(dataFile1) - .commit(); - - Job jobShouldFail1 = Job.getInstance(conf); - IcebergInputFormat.ConfigBuilder configBuilder = IcebergInputFormat.configure(jobShouldFail1); - configBuilder.useHiveRows().readFrom(location.toString()) - .filter(Expressions.and( - Expressions.equal("date", "2020-03-20"), - Expressions.equal("id", 0))); - AssertHelpers.assertThrows( - "Residuals are not evaluated today for Iceberg Generics In memory model of HIVE", - UnsupportedOperationException.class, "Filter expression ref(name=\"id\") == 0 is not completely satisfied.", - () -> validate(jobShouldFail1, expectedRecords)); - - Job jobShouldFail2 = Job.getInstance(conf); - configBuilder = IcebergInputFormat.configure(jobShouldFail2); - configBuilder.usePigTuples().readFrom(location.toString()) - .filter(Expressions.and( - Expressions.equal("date", "2020-03-20"), - Expressions.equal("id", 0))); - AssertHelpers.assertThrows( - "Residuals are not evaluated today for Iceberg Generics In memory model of PIG", - UnsupportedOperationException.class, "Filter expression ref(name=\"id\") == 0 is not completely satisfied.", - () -> validate(jobShouldFail2, expectedRecords)); - } - @Test public void testProjection() throws Exception { File location = temp.newFolder(format.name()); Assert.assertTrue(location.delete()); Schema projectedSchema = TypeUtil.select(SCHEMA, ImmutableSet.of(1)); Table table = tables.create(SCHEMA, SPEC, - ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), - location.toString()); + ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), + location.toString()); List inputRecords = RandomGenericData.generate(table.schema(), 1, 0L); - DataFile dataFile = writeFile(table, Row.of("2020-03-20", 0), format, inputRecords); + DataFile dataFile = writeFile(temp.newFile(), table, Row.of("2020-03-20", 0), format, inputRecords); table.newAppend() - .appendFile(dataFile) - .commit(); + .appendFile(dataFile) + .commit(); Job job = Job.getInstance(conf); - IcebergInputFormat.ConfigBuilder configBuilder = IcebergInputFormat.configure(job); + InputFormatConfig.ConfigBuilder configBuilder = IcebergInputFormat.configure(job); configBuilder - .readFrom(location.toString()) - .project(projectedSchema); + .readFrom(location.toString()) + .project(projectedSchema) + .schema(table.schema()); List outputRecords = readRecords(job.getConfiguration()); Assert.assertEquals(inputRecords.size(), outputRecords.size()); Assert.assertEquals(projectedSchema.asStruct(), outputRecords.get(0).struct()); } private static final Schema LOG_SCHEMA = new Schema( - Types.NestedField.optional(1, "id", Types.IntegerType.get()), - Types.NestedField.optional(2, "date", Types.StringType.get()), - Types.NestedField.optional(3, "level", Types.StringType.get()), - Types.NestedField.optional(4, "message", Types.StringType.get()) + Types.NestedField.optional(1, "id", Types.IntegerType.get()), + Types.NestedField.optional(2, "date", Types.StringType.get()), + Types.NestedField.optional(3, "level", Types.StringType.get()), + Types.NestedField.optional(4, "message", Types.StringType.get()) ); private static final PartitionSpec IDENTITY_PARTITION_SPEC = - PartitionSpec.builderFor(LOG_SCHEMA).identity("date").identity("level").build(); + PartitionSpec.builderFor(LOG_SCHEMA).identity("date").identity("level").build(); @Test public void testIdentityPartitionProjections() throws Exception { File location = temp.newFolder(format.name()); Assert.assertTrue(location.delete()); Table table = tables.create(LOG_SCHEMA, IDENTITY_PARTITION_SPEC, - ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), - location.toString()); + ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), + location.toString()); List inputRecords = RandomGenericData.generate(LOG_SCHEMA, 10, 0); Integer idx = 0; @@ -305,33 +254,50 @@ public void testIdentityPartitionProjections() throws Exception { for (Record record : inputRecords) { record.set(1, "2020-03-2" + idx); record.set(2, idx.toString()); - append.appendFile(writeFile(table, Row.of("2020-03-2" + idx, idx.toString()), format, ImmutableList.of(record))); + append.appendFile(writeFile(temp.newFile(), table, Row.of("2020-03-2" + idx, idx.toString()), format, + ImmutableList.of(record))); idx += 1; } append.commit(); // individual fields - validateIdentityPartitionProjections(location.toString(), withColumns("date"), inputRecords); - validateIdentityPartitionProjections(location.toString(), withColumns("level"), inputRecords); - validateIdentityPartitionProjections(location.toString(), withColumns("message"), inputRecords); - validateIdentityPartitionProjections(location.toString(), withColumns("id"), inputRecords); + validateIdentityPartitionProjections(location.toString(), table.schema(), + withColumns("date"), inputRecords); + validateIdentityPartitionProjections(location.toString(), table.schema(), + withColumns("level"), inputRecords); + validateIdentityPartitionProjections(location.toString(), table.schema(), + withColumns("message"), inputRecords); + validateIdentityPartitionProjections(location.toString(), table.schema(), + withColumns("id"), inputRecords); // field pairs - validateIdentityPartitionProjections(location.toString(), withColumns("date", "message"), inputRecords); - validateIdentityPartitionProjections(location.toString(), withColumns("level", "message"), inputRecords); - validateIdentityPartitionProjections(location.toString(), withColumns("date", "level"), inputRecords); + validateIdentityPartitionProjections(location.toString(), table.schema(), + withColumns("date", "message"), inputRecords); + validateIdentityPartitionProjections(location.toString(), table.schema(), + withColumns("level", "message"), inputRecords); + validateIdentityPartitionProjections(location.toString(), table.schema(), + withColumns("date", "level"), inputRecords); // out-of-order pairs - validateIdentityPartitionProjections(location.toString(), withColumns("message", "date"), inputRecords); - validateIdentityPartitionProjections(location.toString(), withColumns("message", "level"), inputRecords); - validateIdentityPartitionProjections(location.toString(), withColumns("level", "date"), inputRecords); + validateIdentityPartitionProjections(location.toString(), table.schema(), + withColumns("message", "date"), inputRecords); + validateIdentityPartitionProjections(location.toString(), table.schema(), + withColumns("message", "level"), inputRecords); + validateIdentityPartitionProjections(location.toString(), table.schema(), + withColumns("level", "date"), inputRecords); // full projection - validateIdentityPartitionProjections(location.toString(), LOG_SCHEMA, inputRecords); + validateIdentityPartitionProjections(location.toString(), table.schema(), LOG_SCHEMA, inputRecords); // out-of-order triplets - validateIdentityPartitionProjections(location.toString(), withColumns("date", "level", "message"), inputRecords); - validateIdentityPartitionProjections(location.toString(), withColumns("level", "date", "message"), inputRecords); - validateIdentityPartitionProjections(location.toString(), withColumns("date", "message", "level"), inputRecords); - validateIdentityPartitionProjections(location.toString(), withColumns("level", "message", "date"), inputRecords); - validateIdentityPartitionProjections(location.toString(), withColumns("message", "date", "level"), inputRecords); - validateIdentityPartitionProjections(location.toString(), withColumns("message", "level", "date"), inputRecords); + validateIdentityPartitionProjections(location.toString(), table.schema(), + withColumns("date", "level", "message"), inputRecords); + validateIdentityPartitionProjections(location.toString(), table.schema(), + withColumns("level", "date", "message"), inputRecords); + validateIdentityPartitionProjections(location.toString(), table.schema(), + withColumns("date", "message", "level"), inputRecords); + validateIdentityPartitionProjections(location.toString(), table.schema(), + withColumns("level", "message", "date"), inputRecords); + validateIdentityPartitionProjections(location.toString(), table.schema(), + withColumns("message", "date", "level"), inputRecords); + validateIdentityPartitionProjections(location.toString(), table.schema(), + withColumns("message", "level", "date"), inputRecords); } private static Schema withColumns(String... names) { @@ -344,12 +310,13 @@ private static Schema withColumns(String... names) { } private void validateIdentityPartitionProjections( - String tablePath, Schema projectedSchema, List inputRecords) throws Exception { + String tablePath, Schema tableSchema, Schema projectedSchema, List inputRecords) throws Exception { Job job = Job.getInstance(conf); - IcebergInputFormat.ConfigBuilder configBuilder = IcebergInputFormat.configure(job); + InputFormatConfig.ConfigBuilder configBuilder = IcebergInputFormat.configure(job); configBuilder - .readFrom(tablePath) - .project(projectedSchema); + .readFrom(tablePath) + .schema(tableSchema) + .project(projectedSchema); List actualRecords = readRecords(job.getConfiguration()); Set fieldNames = TypeUtil.indexByName(projectedSchema.asStruct()).keySet(); @@ -359,7 +326,8 @@ private void validateIdentityPartitionProjections( Assert.assertEquals("Projected schema should match", projectedSchema.asStruct(), actualRecord.struct()); for (String name : fieldNames) { Assert.assertEquals( - "Projected field " + name + " should match", inputRecord.getField(name), actualRecord.getField(name)); + "Projected field " + name + " should match", inputRecord.getField(name), + actualRecord.getField(name)); } } } @@ -369,22 +337,24 @@ public void testSnapshotReads() throws Exception { File location = temp.newFolder(format.name()); Assert.assertTrue(location.delete()); Table table = tables.create(SCHEMA, PartitionSpec.unpartitioned(), - ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), - location.toString()); + ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), + location.toString()); List expectedRecords = RandomGenericData.generate(table.schema(), 1, 0L); table.newAppend() - .appendFile(writeFile(table, null, format, expectedRecords)) - .commit(); + .appendFile(writeFile(temp.newFile(), table, null, format, expectedRecords)) + .commit(); long snapshotId = table.currentSnapshot().snapshotId(); table.newAppend() - .appendFile(writeFile(table, null, format, RandomGenericData.generate(table.schema(), 1, 0L))) - .commit(); + .appendFile(writeFile(temp.newFile(), table, null, format, + RandomGenericData.generate(table.schema(), 1, 0L))) + .commit(); Job job = Job.getInstance(conf); - IcebergInputFormat.ConfigBuilder configBuilder = IcebergInputFormat.configure(job); + InputFormatConfig.ConfigBuilder configBuilder = IcebergInputFormat.configure(job); configBuilder - .readFrom(location.toString()) - .snapshotId(snapshotId); + .schema(table.schema()) + .readFrom(location.toString()) + .snapshotId(snapshotId); validate(job, expectedRecords); } @@ -394,15 +364,15 @@ public void testLocality() throws Exception { File location = temp.newFolder(format.name()); Assert.assertTrue(location.delete()); Table table = tables.create(SCHEMA, PartitionSpec.unpartitioned(), - ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), - location.toString()); + ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), + location.toString()); List expectedRecords = RandomGenericData.generate(table.schema(), 1, 0L); table.newAppend() - .appendFile(writeFile(table, null, format, expectedRecords)) - .commit(); + .appendFile(writeFile(temp.newFile(), table, null, format, expectedRecords)) + .commit(); Job job = Job.getInstance(conf); - IcebergInputFormat.ConfigBuilder configBuilder = IcebergInputFormat.configure(job); - configBuilder.readFrom(location.toString()); + InputFormatConfig.ConfigBuilder configBuilder = IcebergInputFormat.configure(job); + configBuilder.readFrom(location.toString()).schema(table.schema()); for (InputSplit split : splits(job.getConfiguration())) { Assert.assertArrayEquals(IcebergInputFormat.IcebergSplit.ANYWHERE, split.getLocations()); @@ -429,19 +399,20 @@ public void testCustomCatalog() throws Exception { Catalog catalog = new HadoopCatalogFunc().apply(conf); TableIdentifier tableIdentifier = TableIdentifier.of("db", "t"); Table table = catalog.createTable(tableIdentifier, SCHEMA, SPEC, - ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name())); + ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name())); List expectedRecords = RandomGenericData.generate(table.schema(), 1, 0L); expectedRecords.get(0).set(2, "2020-03-20"); - DataFile dataFile = writeFile(table, Row.of("2020-03-20", 0), format, expectedRecords); + DataFile dataFile = writeFile(temp.newFile(), table, Row.of("2020-03-20", 0), format, expectedRecords); table.newAppend() - .appendFile(dataFile) - .commit(); + .appendFile(dataFile) + .commit(); Job job = Job.getInstance(conf); - IcebergInputFormat.ConfigBuilder configBuilder = IcebergInputFormat.configure(job); + InputFormatConfig.ConfigBuilder configBuilder = IcebergInputFormat.configure(job); configBuilder - .catalogFunc(HadoopCatalogFunc.class) - .readFrom(tableIdentifier.toString()); + .catalogFunc(HadoopCatalogFunc.class) + .schema(table.schema()) + .readFrom(tableIdentifier.toString()); validate(job, expectedRecords); } @@ -461,14 +432,14 @@ private static List readRecords(Configuration conf) { IcebergInputFormat icebergInputFormat = new IcebergInputFormat<>(); List splits = icebergInputFormat.getSplits(context); return - FluentIterable - .from(splits) - .transformAndConcat(split -> readRecords(icebergInputFormat, split, context)) - .toList(); + FluentIterable + .from(splits) + .transformAndConcat(split -> readRecords(icebergInputFormat, split, context)) + .toList(); } private static Iterable readRecords( - IcebergInputFormat inputFormat, InputSplit split, TaskAttemptContext context) { + IcebergInputFormat inputFormat, InputSplit split, TaskAttemptContext context) { RecordReader recordReader = inputFormat.createRecordReader(split, context); List records = new ArrayList<>(); try { @@ -482,50 +453,4 @@ private static Iterable readRecords( return records; } - private DataFile writeFile( - Table table, StructLike partitionData, FileFormat fileFormat, List records) throws IOException { - File file = temp.newFile(); - Assert.assertTrue(file.delete()); - FileAppender appender; - switch (fileFormat) { - case AVRO: - appender = Avro.write(Files.localOutput(file)) - .schema(table.schema()) - .createWriterFunc(DataWriter::create) - .named(fileFormat.name()) - .build(); - break; - case PARQUET: - appender = Parquet.write(Files.localOutput(file)) - .schema(table.schema()) - .createWriterFunc(GenericParquetWriter::buildWriter) - .named(fileFormat.name()) - .build(); - break; - case ORC: - appender = ORC.write(Files.localOutput(file)) - .schema(table.schema()) - .createWriterFunc(GenericOrcWriter::buildWriter) - .build(); - break; - default: - throw new UnsupportedOperationException("Cannot write format: " + fileFormat); - } - - try { - appender.addAll(records); - } finally { - appender.close(); - } - - DataFiles.Builder builder = DataFiles.builder(table.spec()) - .withPath(file.toString()) - .withFormat(format) - .withFileSizeInBytes(file.length()) - .withMetrics(appender.metrics()); - if (partitionData != null) { - builder.withPartition(partitionData); - } - return builder.build(); - } } diff --git a/versions.props b/versions.props index 7c743a41106b..84371690456f 100644 --- a/versions.props +++ b/versions.props @@ -3,6 +3,7 @@ org.apache.avro:avro = 1.9.2 org.apache.flink:* = 1.10.1 org.apache.hadoop:* = 2.7.3 org.apache.hive:hive-metastore = 2.3.7 +org.apache.hive:hive-serde = 2.3.7 org.apache.orc:* = 1.6.3 org.apache.parquet:* = 1.11.0 org.apache.spark:spark-hive_2.11 = 2.4.5