diff --git a/cassandra/pom.xml b/cassandra/pom.xml
index e12c75012d7..1019ad7acc2 100644
--- a/cassandra/pom.xml
+++ b/cassandra/pom.xml
@@ -35,8 +35,8 @@
2.1.7.1
- 2.11.7
- 2.11
+ 2.10.4
+ 2.10
3.3.2
1.7.1
diff --git a/spark/pom.xml b/spark/pom.xml
index 9b82acbedd5..5f470742c67 100644
--- a/spark/pom.xml
+++ b/spark/pom.xml
@@ -48,7 +48,7 @@
org.spark-project.akka
2.3.4-spark
-
+ 1.9.5
http://www.apache.org/dist/spark/spark-${spark.version}/spark-${spark.version}.tgz
@@ -494,6 +494,13 @@
junit
test
+
+
+ org.mockito
+ mockito-core
+ ${mockito.version}
+ test
+
@@ -1002,7 +1009,19 @@
-
+
+ org.scalatest
+ scalatest-maven-plugin
+ 1.0
+
+
+ test
+
+ test
+
+
+
+
diff --git a/spark/src/main/java/org/apache/zeppelin/spark/PySparkInterpreter.java b/spark/src/main/java/org/apache/zeppelin/spark/PySparkInterpreter.java
index 852dd335183..f2261ebc435 100644
--- a/spark/src/main/java/org/apache/zeppelin/spark/PySparkInterpreter.java
+++ b/spark/src/main/java/org/apache/zeppelin/spark/PySparkInterpreter.java
@@ -42,6 +42,7 @@
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.SQLContext;
+import org.apache.zeppelin.context.ZeppelinContext;
import org.apache.zeppelin.interpreter.Interpreter;
import org.apache.zeppelin.interpreter.InterpreterContext;
import org.apache.zeppelin.interpreter.InterpreterException;
diff --git a/spark/src/main/java/org/apache/zeppelin/spark/SparkInterpreter.java b/spark/src/main/java/org/apache/zeppelin/spark/SparkInterpreter.java
index aec6d16d55a..65bae227571 100644
--- a/spark/src/main/java/org/apache/zeppelin/spark/SparkInterpreter.java
+++ b/spark/src/main/java/org/apache/zeppelin/spark/SparkInterpreter.java
@@ -33,6 +33,7 @@
import org.apache.spark.SparkConf;
import org.apache.spark.SparkContext;
import org.apache.spark.SparkEnv;
+import org.apache.zeppelin.spark.display.SparkDisplayFunctionsHelper$;
import org.apache.spark.repl.SparkCommandLine;
import org.apache.spark.repl.SparkILoop;
import org.apache.spark.repl.SparkIMain;
@@ -43,6 +44,7 @@
import org.apache.spark.scheduler.Stage;
import org.apache.spark.sql.SQLContext;
import org.apache.spark.ui.jobs.JobProgressListener;
+import org.apache.zeppelin.context.ZeppelinContext;
import org.apache.zeppelin.interpreter.Interpreter;
import org.apache.zeppelin.interpreter.InterpreterContext;
import org.apache.zeppelin.interpreter.InterpreterException;
@@ -335,14 +337,19 @@ public void open() {
// https://groups.google.com/forum/#!topic/scala-user/MlVwo2xCCI0
/*
- * > val env = new nsc.Settings(errLogger) > env.usejavacp.value = true > val p = new
- * Interpreter(env) > p.setContextClassLoader > Alternatively you can set the class path through
- * nsc.Settings.classpath.
+ * > val env = new nsc.Settings(errLogger)
+ * > env.usejavacp.value = true
+ * > val p = new Interpreter(env)
+ * > p.setContextClassLoader
+ * > Alternatively you can set the class path through nsc.Settings.classpath.
*
- * >> val settings = new Settings() >> settings.usejavacp.value = true >>
- * settings.classpath.value += File.pathSeparator + >> System.getProperty("java.class.path") >>
- * val in = new Interpreter(settings) { >> override protected def parentClassLoader =
- * getClass.getClassLoader >> } >> in.setContextClassLoader()
+ * >> val settings = new Settings()
+ * >> settings.usejavacp.value = true
+ * >> settings.classpath.value += File.pathSeparator + System.getProperty("java.class.path")
+ * >> val in = new Interpreter(settings) {
+ * >> override protected def parentClassLoader = getClass.getClassLoader
+ * >> }
+ * >> in.setContextClassLoader()
*/
Settings settings = new Settings();
if (getProperty("args") != null) {
@@ -433,18 +440,24 @@ public void open() {
dep = getDependencyResolver();
- z = new ZeppelinContext(sc, sqlc, null, dep, printStream,
- Integer.parseInt(getProperty("zeppelin.spark.maxResult")));
+ final int defaultSparkMaxResult = Integer.parseInt(getProperty("zeppelin.spark.maxResult"));
+ z = new ZeppelinContext(defaultSparkMaxResult);
+
+ SparkDisplayFunctionsHelper$.MODULE$.registerDisplayFunctions(sc, z);
intp.interpret("@transient var _binder = new java.util.HashMap[String, Object]()");
binder = (Map) getValue("_binder");
binder.put("sc", sc);
binder.put("sqlc", sqlc);
binder.put("z", z);
+ binder.put("dep", dep);
binder.put("out", printStream);
intp.interpret("@transient val z = "
- + "_binder.get(\"z\").asInstanceOf[org.apache.zeppelin.spark.ZeppelinContext]");
+ + "_binder.get(\"z\").asInstanceOf[org.apache.zeppelin.context.ZeppelinContext]");
+ intp.interpret("@transient val dep = "
+ + "_binder.get(\"dep\").asInstanceOf" +
+ "[org.apache.zeppelin.spark.dep.DependencyResolver]");
intp.interpret("@transient val sc = "
+ "_binder.get(\"sc\").asInstanceOf[org.apache.spark.SparkContext]");
intp.interpret("@transient val sqlc = "
@@ -467,17 +480,6 @@ public void open() {
intp.interpret("import org.apache.spark.sql.functions._");
}
- /* Temporary disabling DisplayUtils. see https://issues.apache.org/jira/browse/ZEPPELIN-127
- *
- // Utility functions for display
- intp.interpret("import org.apache.zeppelin.spark.utils.DisplayUtils._");
-
- // Scala implicit value for spark.maxResult
- intp.interpret("import org.apache.zeppelin.spark.utils.SparkMaxResult");
- intp.interpret("implicit val sparkMaxResult = new SparkMaxResult(" +
- Integer.parseInt(getProperty("zeppelin.spark.maxResult")) + ")");
- */
-
try {
if (sc.version().startsWith("1.1") || sc.version().startsWith("1.2")) {
Method loadFiles = this.interpreter.getClass().getMethod("loadFiles", Settings.class);
diff --git a/spark/src/main/java/org/apache/zeppelin/spark/SparkSqlInterpreter.java b/spark/src/main/java/org/apache/zeppelin/spark/SparkSqlInterpreter.java
index e60ff2bc6bf..5ac90e45c12 100644
--- a/spark/src/main/java/org/apache/zeppelin/spark/SparkSqlInterpreter.java
+++ b/spark/src/main/java/org/apache/zeppelin/spark/SparkSqlInterpreter.java
@@ -17,8 +17,11 @@
package org.apache.zeppelin.spark;
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
+import java.util.Arrays;
import java.util.List;
import java.util.Properties;
import java.util.Set;
@@ -30,6 +33,8 @@
import org.apache.spark.scheduler.Stage;
import org.apache.spark.sql.SQLContext;
import org.apache.spark.ui.jobs.JobProgressListener;
+import org.apache.zeppelin.context.ZeppelinContext;
+import org.apache.zeppelin.display.DisplayParams;
import org.apache.zeppelin.interpreter.Interpreter;
import org.apache.zeppelin.interpreter.InterpreterContext;
import org.apache.zeppelin.interpreter.InterpreterException;
@@ -120,7 +125,8 @@ public void close() {}
public InterpreterResult interpret(String st, InterpreterContext context) {
SQLContext sqlc = null;
- sqlc = getSparkInterpreter().getSQLContext();
+ final SparkInterpreter sparkInterpreter = getSparkInterpreter();
+ sqlc = sparkInterpreter.getSQLContext();
SparkContext sc = sqlc.sparkContext();
if (concurrentSQL()) {
@@ -131,8 +137,13 @@ public InterpreterResult interpret(String st, InterpreterContext context) {
Object rdd = sqlc.sql(st);
- String msg = ZeppelinContext.showRDD(sc, context, rdd, maxResult);
- return new InterpreterResult(Code.SUCCESS, msg);
+ final ZeppelinContext zeppelinContext = sparkInterpreter.getZeppelinContext();
+ zeppelinContext.setInterpreterContext(context);
+ final ByteArrayOutputStream out = new ByteArrayOutputStream();
+ PrintStream stream = new PrintStream(out);
+ zeppelinContext.display(rdd, new DisplayParams(maxResult, stream, context,
+ new java.util.ArrayList()));
+ return new InterpreterResult(Code.SUCCESS, out.toString());
}
@Override
diff --git a/spark/src/main/java/org/apache/zeppelin/spark/ZeppelinContext.java b/spark/src/main/java/org/apache/zeppelin/spark/ZeppelinContext.java
deleted file mode 100644
index 6cb94d9e927..00000000000
--- a/spark/src/main/java/org/apache/zeppelin/spark/ZeppelinContext.java
+++ /dev/null
@@ -1,751 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.zeppelin.spark;
-
-import static scala.collection.JavaConversions.asJavaCollection;
-import static scala.collection.JavaConversions.asJavaIterable;
-import static scala.collection.JavaConversions.collectionAsScalaIterable;
-
-import java.io.PrintStream;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.spark.SparkContext;
-import org.apache.spark.sql.SQLContext;
-import org.apache.spark.sql.SQLContext.QueryExecution;
-import org.apache.spark.sql.catalyst.expressions.Attribute;
-import org.apache.spark.sql.hive.HiveContext;
-import org.apache.zeppelin.display.AngularObject;
-import org.apache.zeppelin.display.AngularObjectRegistry;
-import org.apache.zeppelin.display.AngularObjectWatcher;
-import org.apache.zeppelin.display.GUI;
-import org.apache.zeppelin.display.Input.ParamOption;
-import org.apache.zeppelin.interpreter.InterpreterContext;
-import org.apache.zeppelin.interpreter.InterpreterContextRunner;
-import org.apache.zeppelin.interpreter.InterpreterException;
-import org.apache.zeppelin.spark.dep.DependencyResolver;
-
-import scala.Tuple2;
-import scala.Unit;
-import scala.collection.Iterable;
-
-/**
- * Spark context for zeppelin.
- *
- * @author Leemoonsoo
- *
- */
-public class ZeppelinContext extends HashMap {
- private DependencyResolver dep;
- private PrintStream out;
- private InterpreterContext interpreterContext;
- private int maxResult;
-
- public ZeppelinContext(SparkContext sc, SQLContext sql,
- InterpreterContext interpreterContext,
- DependencyResolver dep, PrintStream printStream,
- int maxResult) {
- this.sc = sc;
- this.sqlContext = sql;
- this.interpreterContext = interpreterContext;
- this.dep = dep;
- this.out = printStream;
- this.maxResult = maxResult;
- }
-
- public SparkContext sc;
- public SQLContext sqlContext;
- public HiveContext hiveContext;
- private GUI gui;
-
- /**
- * Load dependency for interpreter and runtime (driver).
- * And distribute them to spark cluster (sc.add())
- *
- * @param artifact "group:artifact:version" or file path like "/somepath/your.jar"
- * @return
- * @throws Exception
- */
- public Iterable load(String artifact) throws Exception {
- return collectionAsScalaIterable(dep.load(artifact, true));
- }
-
- /**
- * Load dependency and it's transitive dependencies for interpreter and runtime (driver).
- * And distribute them to spark cluster (sc.add())
- *
- * @param artifact "groupId:artifactId:version" or file path like "/somepath/your.jar"
- * @param excludes exclusion list of transitive dependency. list of "groupId:artifactId" string.
- * @return
- * @throws Exception
- */
- public Iterable load(String artifact, scala.collection.Iterable excludes)
- throws Exception {
- return collectionAsScalaIterable(
- dep.load(artifact,
- asJavaCollection(excludes),
- true));
- }
-
- /**
- * Load dependency and it's transitive dependencies for interpreter and runtime (driver).
- * And distribute them to spark cluster (sc.add())
- *
- * @param artifact "groupId:artifactId:version" or file path like "/somepath/your.jar"
- * @param excludes exclusion list of transitive dependency. list of "groupId:artifactId" string.
- * @return
- * @throws Exception
- */
- public Iterable load(String artifact, Collection excludes) throws Exception {
- return collectionAsScalaIterable(dep.load(artifact, excludes, true));
- }
-
- /**
- * Load dependency for interpreter and runtime, and then add to sparkContext.
- * But not adding them to spark cluster
- *
- * @param artifact "groupId:artifactId:version" or file path like "/somepath/your.jar"
- * @return
- * @throws Exception
- */
- public Iterable loadLocal(String artifact) throws Exception {
- return collectionAsScalaIterable(dep.load(artifact, false));
- }
-
-
- /**
- * Load dependency and it's transitive dependencies and then add to sparkContext.
- * But not adding them to spark cluster
- *
- * @param artifact "groupId:artifactId:version" or file path like "/somepath/your.jar"
- * @param excludes exclusion list of transitive dependency. list of "groupId:artifactId" string.
- * @return
- * @throws Exception
- */
- public Iterable loadLocal(String artifact,
- scala.collection.Iterable excludes) throws Exception {
- return collectionAsScalaIterable(dep.load(artifact,
- asJavaCollection(excludes), false));
- }
-
- /**
- * Load dependency and it's transitive dependencies and then add to sparkContext.
- * But not adding them to spark cluster
- *
- * @param artifact "groupId:artifactId:version" or file path like "/somepath/your.jar"
- * @param excludes exclusion list of transitive dependency. list of "groupId:artifactId" string.
- * @return
- * @throws Exception
- */
- public Iterable loadLocal(String artifact, Collection excludes)
- throws Exception {
- return collectionAsScalaIterable(dep.load(artifact, excludes, false));
- }
-
-
- /**
- * Add maven repository
- *
- * @param id id of repository ex) oss, local, snapshot
- * @param url url of repository. supported protocol : file, http, https
- */
- public void addRepo(String id, String url) {
- addRepo(id, url, false);
- }
-
- /**
- * Add maven repository
- *
- * @param id id of repository
- * @param url url of repository. supported protocol : file, http, https
- * @param snapshot true if it is snapshot repository
- */
- public void addRepo(String id, String url, boolean snapshot) {
- dep.addRepo(id, url, snapshot);
- }
-
- /**
- * Remove maven repository by id
- * @param id id of repository
- */
- public void removeRepo(String id){
- dep.delRepo(id);
- }
-
- /**
- * Load dependency only interpreter.
- *
- * @param name
- * @return
- */
-
- public Object input(String name) {
- return input(name, "");
- }
-
- public Object input(String name, Object defaultValue) {
- return gui.input(name, defaultValue);
- }
-
- public Object select(String name, scala.collection.Iterable> options) {
- return select(name, "", options);
- }
-
- public Object select(String name, Object defaultValue,
- scala.collection.Iterable> options) {
- int n = options.size();
- ParamOption[] paramOptions = new ParamOption[n];
- Iterator> it = asJavaIterable(options).iterator();
-
- int i = 0;
- while (it.hasNext()) {
- Tuple2