diff --git a/README.md b/README.md
index 0a27bf85b0d..60c279c14e8 100644
--- a/README.md
+++ b/README.md
@@ -162,8 +162,7 @@ enable 3rd party vendor repository (cloudera)
##### `-Pmapr[version]` (optional)
For the MapR Hadoop Distribution, these profiles will handle the Hadoop version. As MapR allows different versions of Spark to be installed, you should specify which version of Spark is installed on the cluster by adding a Spark profile (`-Pspark-1.2`, `-Pspark-1.3`, etc.) as needed.
-For Hive, check the hive/pom.xml and adjust the version installed as well. The correct Maven
-artifacts can be found for every version of MapR at http://doc.mapr.com
+The correct Maven artifacts can be found for every version of MapR at http://doc.mapr.com
Available profiles are
diff --git a/conf/zeppelin-site.xml.template b/conf/zeppelin-site.xml.template
index 2d1fea1ba36..b01618016a4 100755
--- a/conf/zeppelin-site.xml.template
+++ b/conf/zeppelin-site.xml.template
@@ -178,7 +178,7 @@
zeppelin.interpreters
- org.apache.zeppelin.spark.SparkInterpreter,org.apache.zeppelin.spark.PySparkInterpreter,org.apache.zeppelin.rinterpreter.RRepl,org.apache.zeppelin.rinterpreter.KnitR,org.apache.zeppelin.spark.SparkRInterpreter,org.apache.zeppelin.spark.SparkSqlInterpreter,org.apache.zeppelin.spark.DepInterpreter,org.apache.zeppelin.markdown.Markdown,org.apache.zeppelin.angular.AngularInterpreter,org.apache.zeppelin.shell.ShellInterpreter,org.apache.zeppelin.hive.HiveInterpreter,org.apache.zeppelin.tajo.TajoInterpreter,org.apache.zeppelin.file.HDFSFileInterpreter,org.apache.zeppelin.flink.FlinkInterpreter,,org.apache.zeppelin.python.PythonInterpreter,org.apache.zeppelin.lens.LensInterpreter,org.apache.zeppelin.ignite.IgniteInterpreter,org.apache.zeppelin.ignite.IgniteSqlInterpreter,org.apache.zeppelin.cassandra.CassandraInterpreter,org.apache.zeppelin.geode.GeodeOqlInterpreter,org.apache.zeppelin.postgresql.PostgreSqlInterpreter,org.apache.zeppelin.jdbc.JDBCInterpreter,org.apache.zeppelin.phoenix.PhoenixInterpreter,org.apache.zeppelin.kylin.KylinInterpreter,org.apache.zeppelin.elasticsearch.ElasticsearchInterpreter,org.apache.zeppelin.scalding.ScaldingInterpreter,org.apache.zeppelin.alluxio.AlluxioInterpreter,org.apache.zeppelin.hbase.HbaseInterpreter,org.apache.zeppelin.livy.LivySparkInterpreter,org.apache.zeppelin.livy.LivyPySparkInterpreter,org.apache.zeppelin.livy.LivySparkRInterpreter,org.apache.zeppelin.livy.LivySparkSQLInterpreter
+ org.apache.zeppelin.spark.SparkInterpreter,org.apache.zeppelin.spark.PySparkInterpreter,org.apache.zeppelin.rinterpreter.RRepl,org.apache.zeppelin.rinterpreter.KnitR,org.apache.zeppelin.spark.SparkRInterpreter,org.apache.zeppelin.spark.SparkSqlInterpreter,org.apache.zeppelin.spark.DepInterpreter,org.apache.zeppelin.markdown.Markdown,org.apache.zeppelin.angular.AngularInterpreter,org.apache.zeppelin.shell.ShellInterpreter,org.apache.zeppelin.tajo.TajoInterpreter,org.apache.zeppelin.file.HDFSFileInterpreter,org.apache.zeppelin.flink.FlinkInterpreter,,org.apache.zeppelin.python.PythonInterpreter,org.apache.zeppelin.lens.LensInterpreter,org.apache.zeppelin.ignite.IgniteInterpreter,org.apache.zeppelin.ignite.IgniteSqlInterpreter,org.apache.zeppelin.cassandra.CassandraInterpreter,org.apache.zeppelin.geode.GeodeOqlInterpreter,org.apache.zeppelin.postgresql.PostgreSqlInterpreter,org.apache.zeppelin.jdbc.JDBCInterpreter,org.apache.zeppelin.phoenix.PhoenixInterpreter,org.apache.zeppelin.kylin.KylinInterpreter,org.apache.zeppelin.elasticsearch.ElasticsearchInterpreter,org.apache.zeppelin.scalding.ScaldingInterpreter,org.apache.zeppelin.alluxio.AlluxioInterpreter,org.apache.zeppelin.hbase.HbaseInterpreter,org.apache.zeppelin.livy.LivySparkInterpreter,org.apache.zeppelin.livy.LivyPySparkInterpreter,org.apache.zeppelin.livy.LivySparkRInterpreter,org.apache.zeppelin.livy.LivySparkSQLInterpreter
Comma separated interpreter configurations. First interpreter become a default
diff --git a/docs/development/writingzeppelininterpreter.md b/docs/development/writingzeppelininterpreter.md
index a024fcaeddf..5f797e8b49a 100644
--- a/docs/development/writingzeppelininterpreter.md
+++ b/docs/development/writingzeppelininterpreter.md
@@ -199,7 +199,7 @@ Checkout some interpreters released with Zeppelin by default.
- [spark](https://github.com/apache/incubator-zeppelin/tree/master/spark)
- [markdown](https://github.com/apache/incubator-zeppelin/tree/master/markdown)
- [shell](https://github.com/apache/incubator-zeppelin/tree/master/shell)
- - [hive](https://github.com/apache/incubator-zeppelin/tree/master/hive)
+ - [jdbc](https://github.com/apache/incubator-zeppelin/tree/master/jdbc)
### Contributing a new Interpreter to Zeppelin releases
diff --git a/docs/index.md b/docs/index.md
index 19d9390cbfd..e6a71f02f85 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -41,7 +41,7 @@ limitations under the License.
### Multiple language backend
Zeppelin interpreter concept allows any language/data-processing-backend to be plugged into Zeppelin.
-Currently Zeppelin supports many interpreters such as Scala(with Apache Spark), Python(with Apache Spark), SparkSQL, Hive, Markdown and Shell.
+Currently Zeppelin supports many interpreters such as Scala(with Apache Spark), Python(with Apache Spark), SparkSQL, JDBC, Markdown and Shell.
diff --git a/docs/install/install.md b/docs/install/install.md
index 696f837b295..4dcad74a56a 100644
--- a/docs/install/install.md
+++ b/docs/install/install.md
@@ -232,7 +232,7 @@ You can configure Zeppelin with both **environment variables** in `conf/zeppelin
ZEPPELIN_INTERPRETERS |
zeppelin.interpreters |
- org.apache.zeppelin.spark.SparkInterpreter, org.apache.zeppelin.spark.PySparkInterpreter, org.apache.zeppelin.spark.SparkSqlInterpreter, org.apache.zeppelin.spark.DepInterpreter, org.apache.zeppelin.markdown.Markdown, org.apache.zeppelin.shell.ShellInterpreter, org.apache.zeppelin.hive.HiveInterpreter
+ | org.apache.zeppelin.spark.SparkInterpreter, org.apache.zeppelin.spark.PySparkInterpreter, org.apache.zeppelin.spark.SparkSqlInterpreter, org.apache.zeppelin.spark.DepInterpreter, org.apache.zeppelin.markdown.Markdown, org.apache.zeppelin.shell.ShellInterpreter,
...
|
Comma separated interpreter configurations [Class] The first interpreter will be a default value. It means only the first interpreter in this list can be available without %interpreter_name annotation in Zeppelin notebook paragraph. |
diff --git a/docs/install/yarn_install.md b/docs/install/yarn_install.md
index 764014b402c..1f01c3593fc 100644
--- a/docs/install/yarn_install.md
+++ b/docs/install/yarn_install.md
@@ -20,7 +20,7 @@ limitations under the License.
{% include JB/setup %}
## Introduction
-This page describes how to pre-configure a bare metal node, configure Zeppelin and connect it to existing YARN cluster running Hortonworks flavour of Hadoop. It also describes steps to configure Spark & Hive interpreter of Zeppelin.
+This page describes how to pre-configure a bare metal node, configure Zeppelin and connect it to existing YARN cluster running Hortonworks flavour of Hadoop. It also describes steps to configure Spark interpreter of Zeppelin.
## Prepare Node
@@ -118,16 +118,12 @@ bin/zeppelin-daemon.sh stop
```
## Interpreter
-Zeppelin provides various distributed processing frameworks to process data that ranges from Spark, Hive, Tajo, Ignite and Lens to name a few. This document describes to configure Hive & Spark interpreters.
+Zeppelin provides various distributed processing frameworks to process data that ranges from Spark, JDBC, Tajo, Ignite and Lens to name a few. This document describes to configure JDBC & Spark interpreters.
### Hive
-Zeppelin supports Hive interpreter and hence copy hive-site.xml that should be present at /etc/hive/conf to the configuration folder of Zeppelin. Once Zeppelin is built it will have conf folder under /home/zeppelin/incubator-zeppelin.
+Zeppelin supports Hive through JDBC interpreter. You might need the information to use Hive and can find in your hive-site.xml
-```bash
-cp /etc/hive/conf/hive-site.xml /home/zeppelin/incubator-zeppelin/conf
-```
-
-Once Zeppelin server has started successfully, visit http://[zeppelin-server-host-name]:8080 with your web browser. Click on Interpreter tab next to Notebook dropdown. Look for Hive configurations and set them appropriately. By default hive.hiveserver2.url will be pointing to localhost and hive.hiveserver2.password/hive.hiveserver2.user are set to hive/hive. Set them as per Hive installation on YARN cluster.
+Once Zeppelin server has started successfully, visit http://[zeppelin-server-host-name]:8080 with your web browser. Click on Interpreter tab next to Notebook dropdown. Look for Hive configurations and set them appropriately. Set them as per Hive installation on YARN cluster.
Click on Save button. Once these configurations are updated, Zeppelin will prompt you to restart the interpreter. Accept the prompt and the interpreter will reload the configurations.
### Spark
diff --git a/docs/interpreter/hive.md b/docs/interpreter/hive.md
index 99d483b6e32..2fc365c3502 100644
--- a/docs/interpreter/hive.md
+++ b/docs/interpreter/hive.md
@@ -9,6 +9,51 @@ group: manual
## Hive Interpreter for Apache Zeppelin
The [Apache Hive](https://hive.apache.org/) ™ data warehouse software facilitates querying and managing large datasets residing in distributed storage. Hive provides a mechanism to project structure onto this data and query the data using a SQL-like language called HiveQL. At the same time this language also allows traditional map/reduce programmers to plug in their custom mappers and reducers when it is inconvenient or inefficient to express this logic in HiveQL.
+## Important Notice
+Hive Interpreter will be deprecated and merged into JDBC Interpreter. You can use Hive Interpreter by using JDBC Interpreter with same functionality. See the example below of settings and dependencies.
+
+### Properties
+
+
+ | Property |
+ Value |
+
+
+ | hive.driver |
+ org.apache.hive.jdbc.HiveDriver |
+
+
+ | hive.url |
+ jdbc:hive2://localhost:10000 |
+
+
+ | hive.user |
+ hiveUser |
+
+
+ | hive.password |
+ hivePassword |
+
+
+
+### Dependencies
+
+
+ | Artifact |
+ Exclude |
+
+
+ | org.apache.hive:hive-jdbc:0.14.0 |
+ |
+
+
+ | org.apache.hadoop:hadoop-common:2.6.0 |
+ |
+
+
+
+----
+
### Configuration
diff --git a/docs/interpreter/jdbc.md b/docs/interpreter/jdbc.md
index f12c673813b..72cce3d5328 100644
--- a/docs/interpreter/jdbc.md
+++ b/docs/interpreter/jdbc.md
@@ -195,6 +195,47 @@ To develop this functionality use this [method](http://docs.oracle.com/javase/7/
+### Examples
+#### Hive
+##### Properties
+
+
+ | Name |
+ Value |
+
+
+ | hive.driver |
+ org.apache.hive.jdbc.HiveDriver |
+
+
+ | hive.url |
+ jdbc:hive2://localhost:10000 |
+
+
+ | hive.user |
+ hive_user |
+
+
+ | hive.password |
+ hive_password |
+
+
+##### Dependencies
+
+
+ | Artifact |
+ Excludes |
+
+
+ | org.apache.hive:hive-jdbc:0.14.0 |
+ |
+
+
+ | org.apache.hadoop:hadoop-common:2.6.0 |
+ |
+
+
+
### How to use
#### Reference in paragraph
diff --git a/docs/manual/interpreters.md b/docs/manual/interpreters.md
index 87a412230d7..ca73befc30e 100644
--- a/docs/manual/interpreters.md
+++ b/docs/manual/interpreters.md
@@ -22,7 +22,7 @@ limitations under the License.
## Interpreters in Zeppelin
In this section, we will explain about the role of interpreters, interpreters group and interpreter settings in Zeppelin.
The concept of Zeppelin interpreter allows any language/data-processing-backend to be plugged into Zeppelin.
-Currently, Zeppelin supports many interpreters such as Scala ( with Apache Spark ), Python ( with Apache Spark ), SparkSQL, Hive, Markdown, Shell and so on.
+Currently, Zeppelin supports many interpreters such as Scala ( with Apache Spark ), Python ( with Apache Spark ), SparkSQL, JDBC, Markdown, Shell and so on.
## What is Zeppelin interpreter?
Zeppelin Interpreter is a plug-in which enables Zeppelin users to use a specific language/data-processing-backend. For example, to use Scala code in Zeppelin, you need `%spark` interpreter.
diff --git a/docs/security/interpreter_authorization.md b/docs/security/interpreter_authorization.md
index 862ef9b828b..d6c48b208ef 100644
--- a/docs/security/interpreter_authorization.md
+++ b/docs/security/interpreter_authorization.md
@@ -27,7 +27,7 @@ Interpreter authorization involves permissions like creating an interpreter and
Data source authorization involves authenticating to the data source like a Mysql database and letting it determine user permissions.
-For the Hive interpreter, we need to maintain per-user connection pools.
+For the JDBC interpreter, we need to maintain per-user connection pools.
The interpret method takes the user string as parameter and executes the jdbc call using a connection in the user's connection pool.
In case of Presto, we don't need password if the Presto DB server runs backend code using HDFS authorization for the user.
diff --git a/hive/pom.xml b/hive/pom.xml
deleted file mode 100644
index 057a335d434..00000000000
--- a/hive/pom.xml
+++ /dev/null
@@ -1,165 +0,0 @@
-
-
-
-
- 4.0.0
-
-
- zeppelin
- org.apache.zeppelin
- 0.6.0-SNAPSHOT
- ..
-
-
- org.apache.zeppelin
- zeppelin-hive
- jar
- 0.6.0-SNAPSHOT
- Zeppelin: Hive interpreter
- http://www.apache.org
-
-
- 0.14.0
- 2.6.0
-
-
-
-
- org.apache.zeppelin
- zeppelin-interpreter
- ${project.version}
- provided
-
-
-
- org.apache.commons
- commons-exec
- 1.3
-
-
-
- org.slf4j
- slf4j-api
-
-
-
- org.slf4j
- slf4j-log4j12
-
-
-
- org.apache.hive
- hive-jdbc
- ${hive.hive.version}
-
-
- org.apache.hadoop
- hadoop-common
- ${hive.hadoop.version}
-
-
- junit
- junit
- test
-
-
- com.h2database
- h2
- 1.4.190
- test
-
-
-
-
-
-
- org.apache.maven.plugins
- maven-deploy-plugin
- 2.7
-
- true
-
-
-
-
- maven-enforcer-plugin
- 1.3.1
-
-
- enforce
- none
-
-
-
-
-
- maven-dependency-plugin
- 2.8
-
-
- copy-dependencies
- package
-
- copy-dependencies
-
-
- ${project.build.directory}/../../interpreter/hive
- false
- false
- true
- runtime
-
-
-
- copy-artifact
- package
-
- copy
-
-
- ${project.build.directory}/../../interpreter/hive
- false
- false
- true
- runtime
-
-
- ${project.groupId}
- ${project.artifactId}
- ${project.version}
- ${project.packaging}
-
-
-
-
-
-
-
-
-
-
diff --git a/hive/src/main/java/org/apache/zeppelin/hive/HiveInterpreter.java b/hive/src/main/java/org/apache/zeppelin/hive/HiveInterpreter.java
deleted file mode 100644
index 42916b47452..00000000000
--- a/hive/src/main/java/org/apache/zeppelin/hive/HiveInterpreter.java
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.zeppelin.hive;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.ResultSetMetaData;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-
-import org.apache.zeppelin.interpreter.Interpreter;
-import org.apache.zeppelin.interpreter.InterpreterContext;
-import org.apache.zeppelin.interpreter.InterpreterPropertyBuilder;
-import org.apache.zeppelin.interpreter.InterpreterResult;
-import org.apache.zeppelin.interpreter.InterpreterResult.Code;
-import org.apache.zeppelin.scheduler.Scheduler;
-import org.apache.zeppelin.scheduler.SchedulerFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.apache.commons.lang.StringUtils.containsIgnoreCase;
-
-/**
- * Hive interpreter for Zeppelin.
- */
-public class HiveInterpreter extends Interpreter {
- Logger logger = LoggerFactory.getLogger(HiveInterpreter.class);
-
- static final String COMMON_KEY = "common";
- static final String MAX_LINE_KEY = "max_count";
- static final String MAX_LINE_DEFAULT = "1000";
-
- static final String DEFAULT_KEY = "default";
- static final String DRIVER_KEY = "driver";
- static final String URL_KEY = "url";
- static final String USER_KEY = "user";
- static final String PASSWORD_KEY = "password";
- static final String DOT = ".";
-
- static final char TAB = '\t';
- static final char NEWLINE = '\n';
- static final String EXPLAIN_PREDICATE = "EXPLAIN ";
- static final String TABLE_MAGIC_TAG = "%table ";
- static final String UPDATE_COUNT_HEADER = "Update Count";
-
- static final String COMMON_MAX_LINE = COMMON_KEY + DOT + MAX_LINE_KEY;
-
- static final String DEFAULT_DRIVER = DEFAULT_KEY + DOT + DRIVER_KEY;
- static final String DEFAULT_URL = DEFAULT_KEY + DOT + URL_KEY;
- static final String DEFAULT_USER = DEFAULT_KEY + DOT + USER_KEY;
- static final String DEFAULT_PASSWORD = DEFAULT_KEY + DOT + PASSWORD_KEY;
-
- private final HashMap propertiesMap;
- private final Map paragraphIdStatementMap;
-
- private final Map> propertyKeyUnusedConnectionListMap;
- private final Map paragraphIdConnectionMap;
-
- static {
- Interpreter.register(
- "hql",
- "hive",
- HiveInterpreter.class.getName(),
- new InterpreterPropertyBuilder()
- .add(COMMON_MAX_LINE, MAX_LINE_DEFAULT, "Maximum line of results")
- .add(DEFAULT_DRIVER, "org.apache.hive.jdbc.HiveDriver", "Hive JDBC driver")
- .add(DEFAULT_URL, "jdbc:hive2://localhost:10000", "The URL for HiveServer2.")
- .add(DEFAULT_USER, "hive", "The hive user")
- .add(DEFAULT_PASSWORD, "", "The password for the hive user").build());
- }
-
- public HiveInterpreter(Properties property) {
- super(property);
- propertiesMap = new HashMap<>();
- propertyKeyUnusedConnectionListMap = new HashMap<>();
- paragraphIdStatementMap = new HashMap<>();
- paragraphIdConnectionMap = new HashMap<>();
- }
-
- public HashMap getPropertiesMap() {
- return propertiesMap;
- }
-
- @Override
- public void open() {
- logger.debug("property: {}", property);
-
- for (String propertyKey : property.stringPropertyNames()) {
- logger.debug("propertyKey: {}", propertyKey);
- String[] keyValue = propertyKey.split("\\.", 2);
- if (2 == keyValue.length) {
- logger.debug("key: {}, value: {}", keyValue[0], keyValue[1]);
- Properties prefixProperties;
- if (propertiesMap.containsKey(keyValue[0])) {
- prefixProperties = propertiesMap.get(keyValue[0]);
- } else {
- prefixProperties = new Properties();
- propertiesMap.put(keyValue[0], prefixProperties);
- }
- prefixProperties.put(keyValue[1], property.getProperty(propertyKey));
- }
- }
-
- Set removeKeySet = new HashSet<>();
- for (String key : propertiesMap.keySet()) {
- if (!COMMON_KEY.equals(key)) {
- Properties properties = propertiesMap.get(key);
- if (!properties.containsKey(DRIVER_KEY) || !properties.containsKey(URL_KEY)) {
- logger.error("{} will be ignored. {}.{} and {}.{} is mandatory.",
- key, DRIVER_KEY, key, key, URL_KEY);
- removeKeySet.add(key);
- }
- }
- }
-
- for (String key : removeKeySet) {
- propertiesMap.remove(key);
- }
-
- logger.debug("propertiesMap: {}", propertiesMap);
- }
-
- @Override
- public void close() {
- try {
- for (List connectionList : propertyKeyUnusedConnectionListMap.values()) {
- for (Connection c : connectionList) {
- c.close();
- }
- }
-
- for (Statement statement : paragraphIdStatementMap.values()) {
- statement.close();
- }
- paragraphIdStatementMap.clear();
-
- for (Connection connection : paragraphIdConnectionMap.values()) {
- connection.close();
- }
- paragraphIdConnectionMap.clear();
-
- } catch (SQLException e) {
- logger.error("Error while closing...", e);
- }
- }
-
- public Connection getConnection(String propertyKey) throws ClassNotFoundException, SQLException {
- Connection connection = null;
- if (propertyKey == null || propertiesMap.get(propertyKey) == null) {
- return null;
- }
- if (propertyKeyUnusedConnectionListMap.containsKey(propertyKey)) {
- ArrayList connectionList = propertyKeyUnusedConnectionListMap.get(propertyKey);
- if (0 != connectionList.size()) {
- connection = propertyKeyUnusedConnectionListMap.get(propertyKey).remove(0);
- if (null != connection && connection.isClosed()) {
- connection.close();
- connection = null;
- }
- }
- }
- if (null == connection) {
- Properties properties = propertiesMap.get(propertyKey);
- Class.forName(properties.getProperty(DRIVER_KEY));
- String url = properties.getProperty(URL_KEY);
- String user = properties.getProperty(USER_KEY);
- String password = properties.getProperty(PASSWORD_KEY);
- if (null != user && null != password) {
- connection = DriverManager.getConnection(url, user, password);
- } else {
- connection = DriverManager.getConnection(url, properties);
- }
- }
- return connection;
- }
-
- public Statement getStatement(String propertyKey, String paragraphId)
- throws SQLException, ClassNotFoundException {
- Connection connection;
- if (paragraphIdConnectionMap.containsKey(paragraphId)) {
- // Never enter for now.
- connection = paragraphIdConnectionMap.get(paragraphId);
- } else {
- connection = getConnection(propertyKey);
- }
-
- if (connection == null) {
- return null;
- }
-
- Statement statement = connection.createStatement();
- if (isStatementClosed(statement)) {
- connection = getConnection(propertyKey);
- statement = connection.createStatement();
- }
- paragraphIdConnectionMap.put(paragraphId, connection);
- paragraphIdStatementMap.put(paragraphId, statement);
-
- return statement;
- }
-
- private boolean isStatementClosed(Statement statement) {
- try {
- return statement.isClosed();
- } catch (Throwable t) {
- logger.debug("{} doesn't support isClosed method", statement);
- return false;
- }
- }
-
- public InterpreterResult executeSql(String propertyKey, String sql,
- InterpreterContext interpreterContext) {
- String paragraphId = interpreterContext.getParagraphId();
-
- try {
-
- Statement statement = getStatement(propertyKey, paragraphId);
-
- if (statement == null) {
- return new InterpreterResult(Code.ERROR, "Prefix not found.");
- }
-
- statement.setMaxRows(getMaxResult());
-
- StringBuilder msg;
-
- if (containsIgnoreCase(sql, EXPLAIN_PREDICATE)) {
- msg = new StringBuilder();
- } else {
- msg = new StringBuilder(TABLE_MAGIC_TAG);
- }
-
- ResultSet resultSet = null;
-
- try {
- boolean isResultSetAvailable = statement.execute(sql);
-
- if (isResultSetAvailable) {
- resultSet = statement.getResultSet();
-
- ResultSetMetaData md = resultSet.getMetaData();
-
- for (int i = 1; i < md.getColumnCount() + 1; i++) {
- if (i > 1) {
- msg.append(TAB);
- }
- msg.append(md.getColumnName(i));
- }
- msg.append(NEWLINE);
-
- int displayRowCount = 0;
- while (resultSet.next() && displayRowCount < getMaxResult()) {
- for (int i = 1; i < md.getColumnCount() + 1; i++) {
- msg.append(resultSet.getString(i));
- if (i != md.getColumnCount()) {
- msg.append(TAB);
- }
- }
- msg.append(NEWLINE);
- displayRowCount++;
- }
- } else {
- // Response contains either an update count or there are no results.
- int updateCount = statement.getUpdateCount();
- msg.append(UPDATE_COUNT_HEADER).append(NEWLINE);
- msg.append(updateCount).append(NEWLINE);
- }
- } finally {
- try {
- if (resultSet != null) {
- resultSet.close();
- }
- statement.close();
- } finally {
- moveConnectionToUnused(propertyKey, paragraphId);
- }
- }
-
- return new InterpreterResult(Code.SUCCESS, msg.toString());
-
- } catch (SQLException | ClassNotFoundException ex) {
- logger.error("Cannot run " + sql, ex);
- return new InterpreterResult(Code.ERROR, ex.getMessage());
- }
- }
-
- private void moveConnectionToUnused(String propertyKey, String paragraphId) {
- if (paragraphIdConnectionMap.containsKey(paragraphId)) {
- Connection connection = paragraphIdConnectionMap.remove(paragraphId);
- if (null != connection) {
- if (propertyKeyUnusedConnectionListMap.containsKey(propertyKey)) {
- propertyKeyUnusedConnectionListMap.get(propertyKey).add(connection);
- } else {
- ArrayList connectionList = new ArrayList<>();
- connectionList.add(connection);
- propertyKeyUnusedConnectionListMap.put(propertyKey, connectionList);
- }
- }
- }
- }
-
- @Override
- public InterpreterResult interpret(String cmd, InterpreterContext contextInterpreter) {
- String propertyKey = getPropertyKey(cmd);
-
- if (null != propertyKey && !propertyKey.equals(DEFAULT_KEY)) {
- cmd = cmd.substring(propertyKey.length() + 2);
- }
-
- cmd = cmd.trim();
-
- logger.info("PropertyKey: {}, SQL command: '{}'", propertyKey, cmd);
-
- return executeSql(propertyKey, cmd, contextInterpreter);
- }
-
- private int getMaxResult() {
- return Integer.valueOf(
- propertiesMap.get(COMMON_KEY).getProperty(MAX_LINE_KEY, MAX_LINE_DEFAULT));
- }
-
- public String getPropertyKey(String cmd) {
- boolean firstLineIndex = cmd.startsWith("(");
-
- if (firstLineIndex) {
- int configStartIndex = cmd.indexOf("(");
- int configLastIndex = cmd.indexOf(")");
- if (configStartIndex != -1 && configLastIndex != -1) {
- return cmd.substring(configStartIndex + 1, configLastIndex);
- } else {
- return null;
- }
- } else {
- return DEFAULT_KEY;
- }
- }
-
- @Override
- public void cancel(InterpreterContext context) {
- String paragraphId = context.getParagraphId();
- try {
- paragraphIdStatementMap.get(paragraphId).cancel();
- } catch (SQLException e) {
- logger.error("Error while cancelling...", e);
- }
- }
-
- @Override
- public FormType getFormType() {
- return FormType.SIMPLE;
- }
-
- @Override
- public int getProgress(InterpreterContext context) {
- return 0;
- }
-
- @Override
- public Scheduler getScheduler() {
- return SchedulerFactory.singleton().createOrGetParallelScheduler(
- HiveInterpreter.class.getName() + this.hashCode(), 10);
- }
-
- @Override
- public List completion(String buf, int cursor) {
- return null;
- }
-}
diff --git a/hive/src/test/java/org/apache/zeppelin/hive/HiveInterpreterTest.java b/hive/src/test/java/org/apache/zeppelin/hive/HiveInterpreterTest.java
deleted file mode 100644
index cbec104fba9..00000000000
--- a/hive/src/test/java/org/apache/zeppelin/hive/HiveInterpreterTest.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.zeppelin.hive;
-
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Properties;
-
-import org.apache.zeppelin.interpreter.InterpreterContext;
-import org.apache.zeppelin.interpreter.InterpreterResult;
-import org.junit.After;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import static org.junit.Assert.*;
-import static java.lang.String.format;
-
-/**
- * Hive interpreter unit tests
- */
-public class HiveInterpreterTest {
- static String jdbcConnection;
-
- private static String getJdbcConnection() throws IOException {
- if(null == jdbcConnection) {
- Path tmpDir = Files.createTempDirectory("h2-test-");
- tmpDir.toFile().deleteOnExit();
- jdbcConnection = format("jdbc:h2:%s", tmpDir);
- }
- return jdbcConnection;
- }
- @BeforeClass
- public static void setUp() throws Exception {
-
- Class.forName("org.h2.Driver");
- Connection connection = DriverManager.getConnection(getJdbcConnection());
- Statement statement = connection.createStatement();
- statement.execute(
- "DROP TABLE IF EXISTS test_table; " +
- "CREATE TABLE test_table(id varchar(255), name varchar(255));");
- statement.execute(
- "insert into test_table(id, name) values ('a', 'a_name'),('b', 'b_name');"
- );
- }
-
- @After
- public void tearDown() throws Exception {
- }
-
- @Test
- public void testForParsePropertyKey() throws IOException {
- HiveInterpreter t = new HiveInterpreter(new Properties());
-
- assertEquals(t.getPropertyKey("(fake) select max(cant) from test_table where id >= 2452640"),
- "fake");
-
- assertEquals(t.getPropertyKey("() select max(cant) from test_table where id >= 2452640"),
- "");
-
- assertEquals(t.getPropertyKey(")fake( select max(cant) from test_table where id >= 2452640"),
- "default");
-
- // when you use a %hive(prefix1), prefix1 is the propertyKey as form part of the cmd string
- assertEquals(t.getPropertyKey("(prefix1)\n select max(cant) from test_table where id >= 2452640"),
- "prefix1");
-
- assertEquals(t.getPropertyKey("(prefix2) select max(cant) from test_table where id >= 2452640"),
- "prefix2");
-
- // when you use a %hive, prefix is the default
- assertEquals(t.getPropertyKey("select max(cant) from test_table where id >= 2452640"),
- "default");
- }
-
- @Test
- public void testForMapPrefix() throws SQLException, IOException {
- Properties properties = new Properties();
- properties.setProperty("common.max_count", "1000");
- properties.setProperty("common.max_retry", "3");
- properties.setProperty("default.driver", "org.h2.Driver");
- properties.setProperty("default.url", getJdbcConnection());
- properties.setProperty("default.user", "");
- properties.setProperty("default.password", "");
- HiveInterpreter t = new HiveInterpreter(properties);
- t.open();
-
- String sqlQuery = "(fake) select * from test_table";
-
- InterpreterResult interpreterResult = t.interpret(sqlQuery, new InterpreterContext("", "1", "", "", null, null, null, null, null, null, null));
-
- // if prefix not found return ERROR and Prefix not found.
- assertEquals(InterpreterResult.Code.ERROR, interpreterResult.code());
- assertEquals("Prefix not found.", interpreterResult.message());
- }
-
- @Test
- public void readTest() throws IOException {
- Properties properties = new Properties();
- properties.setProperty("common.max_count", "1000");
- properties.setProperty("common.max_retry", "3");
- properties.setProperty("default.driver", "org.h2.Driver");
- properties.setProperty("default.url", getJdbcConnection());
- properties.setProperty("default.user", "");
- properties.setProperty("default.password", "");
- HiveInterpreter t = new HiveInterpreter(properties);
- t.open();
-
- assertTrue(t.interpret("show databases", new InterpreterContext("", "1", "", "", null, null, null, null, null, null, null)).message().contains("SCHEMA_NAME"));
- assertEquals("ID\tNAME\na\ta_name\nb\tb_name\n",
- t.interpret("select * from test_table", new InterpreterContext("", "1", "", "", null, null, null, null, null, null, null)).message());
- }
-
- @Test
- public void readTestWithConfiguration() throws IOException {
- Properties properties = new Properties();
- properties.setProperty("common.max_count", "1000");
- properties.setProperty("common.max_retry", "3");
- properties.setProperty("default.driver", "wrong.Driver");
- properties.setProperty("default.url", getJdbcConnection());
- properties.setProperty("default.user", "");
- properties.setProperty("default.password", "");
- properties.setProperty("h2.driver", "org.h2.Driver");
- properties.setProperty("h2.url", getJdbcConnection());
- properties.setProperty("h2.user", "");
- properties.setProperty("h2.password", "");
- HiveInterpreter t = new HiveInterpreter(properties);
- t.open();
-
- assertEquals("ID\tNAME\na\ta_name\nb\tb_name\n",
- t.interpret("(h2)\n select * from test_table", new InterpreterContext("", "1", "", "", null, null, null, null, null, null, null)).message());
- }
-
- @Test
- public void jdbcRestart() throws IOException, SQLException, ClassNotFoundException {
- Properties properties = new Properties();
- properties.setProperty("common.max_count", "1000");
- properties.setProperty("common.max_retry", "3");
- properties.setProperty("default.driver", "org.h2.Driver");
- properties.setProperty("default.url", getJdbcConnection());
- properties.setProperty("default.user", "");
- properties.setProperty("default.password", "");
- HiveInterpreter t = new HiveInterpreter(properties);
- t.open();
-
- InterpreterResult interpreterResult =
- t.interpret("select * from test_table", new InterpreterContext("", "1", "", "", null, null, null, null, null, null, null));
- assertEquals("ID\tNAME\na\ta_name\nb\tb_name\n", interpreterResult.message());
-
- t.getConnection("default").close();
-
- interpreterResult =
- t.interpret("select * from test_table", new InterpreterContext("", "1", "", "", null, null, null, null, null, null, null));
- assertEquals("ID\tNAME\na\ta_name\nb\tb_name\n", interpreterResult.message());
- }
-
- @Test
- public void test() throws IOException {
- Properties properties = new Properties();
- properties.setProperty("common.max_count", "1000");
- properties.setProperty("common.max_retry", "3");
- properties.setProperty("default.driver", "org.h2.Driver");
- properties.setProperty("default.url", getJdbcConnection());
- properties.setProperty("default.user", "");
- properties.setProperty("default.password", "");
- HiveInterpreter t = new HiveInterpreter(properties);
- t.open();
-
- InterpreterContext interpreterContext = new InterpreterContext(null, "a", null, null, null, null, null, null, null, null, null);
-
- //simple select test
- InterpreterResult result = t.interpret("select * from test_table", interpreterContext);
- assertEquals(result.type(), InterpreterResult.Type.TABLE);
-
- //explain test
- result = t.interpret("explain select * from test_table", interpreterContext);
- assertEquals(result.type(), InterpreterResult.Type.TEXT);
- t.close();
- }
-
- @Test
- public void parseMultiplePropertiesMap() {
- Properties properties = new Properties();
- properties.setProperty("common.max_count", "1000");
- properties.setProperty("common.max_retry", "3");
- properties.setProperty("default.driver", "defaultDriver");
- properties.setProperty("default.url", "defaultUri");
- properties.setProperty("default.user", "defaultUser");
- HiveInterpreter hi = new HiveInterpreter(properties);
- hi.open();
- assertNotNull("propertiesMap is not null", hi.getPropertiesMap());
- assertNotNull("propertiesMap.get(default) is not null", hi.getPropertiesMap().get("default"));
- assertTrue("default exists", "defaultDriver".equals(hi.getPropertiesMap().get("default").getProperty("driver")));
- hi.close();
- }
-
- @Test
- public void ignoreInvalidSettings() {
- Properties properties = new Properties();
- properties.setProperty("common.max_count", "1000");
- properties.setProperty("common.max_retry", "3");
- properties.setProperty("default.driver", "defaultDriver");
- properties.setProperty("default.url", "defaultUri");
- properties.setProperty("default.user", "defaultUser");
- properties.setProperty("presto.driver", "com.facebook.presto.jdbc.PrestoDriver");
- HiveInterpreter hi = new HiveInterpreter(properties);
- hi.open();
- assertTrue("default exists", hi.getPropertiesMap().containsKey("default"));
- assertFalse("presto doesn't exists", hi.getPropertiesMap().containsKey("presto"));
- hi.close();
- }
-
- @Test
- public void getPropertyKey() {
- HiveInterpreter hi = new HiveInterpreter(new Properties());
- hi.open();
- String testCommand = "(default)\nshow tables";
- assertEquals("get key of default", "default", hi.getPropertyKey(testCommand));
- testCommand = "(default) show tables";
- assertEquals("get key of default", "default", hi.getPropertyKey(testCommand));
- hi.close();
- }
-}
diff --git a/pom.xml b/pom.xml
index 970f6cbe457..2451dec7af3 100755
--- a/pom.xml
+++ b/pom.xml
@@ -72,7 +72,6 @@
angular
shell
livy
- hive
hbase
phoenix
postgresql