diff --git a/.travis.yml b/.travis.yml
index cdf1f934646..ecb279949ae 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -28,20 +28,6 @@ cache:
- zeppelin-web/node_modules
- zeppelin-web/bower_components
-addons:
- apt:
- sources:
- - r-source
- - sourceline: 'deb http://cran.rstudio.com/bin/linux/ubuntu trusty/'
- key_url: 'keyserver.ubuntu.com/pks/lookup?op=get&search=0x51716619E084DAB9'
- - r-packages-trusty
- - mysql-5.7-trusty
- packages:
- - r-base
- - r-base-dev
- - mysql-server
- - mysql-client
-
services:
- mysql
@@ -154,12 +140,12 @@ jobs:
# Test python/pyspark with python 2, livy 0.5
- dist: xenial
jdk: "openjdk8"
- env: PYTHON="2" SCALA_VER="2.10" SPARK_VER="1.6.3" HADOOP_VER="2.6" LIVY_VER="0.5.0-incubating" PROFILE="-Pspark-1.6 -Phadoop2 -Pscala-2.10" BUILD_FLAG="install -DskipTests -DskipRat" TEST_FLAG="verify -DskipRat" MODULES="-pl livy -am" TEST_PROJECTS=""
+ env: PYTHON="2" SCALA_VER="2.10" SPARK_VER="1.6.3" HADOOP_VER="2.6" LIVY_VER="0.5.0-incubating" R="true" PROFILE="-Pspark-1.6 -Phadoop2 -Pscala-2.10" BUILD_FLAG="install -DskipTests -DskipRat" TEST_FLAG="verify -DskipRat" MODULES="-pl livy -am" TEST_PROJECTS=""
# Test livy 0.5 with spark 2.2.0 under python3
- dist: xenial
jdk: "openjdk8"
- env: PYTHON="3" SPARK_VER="2.2.0" HADOOP_VER="2.6" LIVY_VER="0.5.0-incubating" PROFILE="" BUILD_FLAG="install -DskipTests -DskipRat" TEST_FLAG="verify -DskipRat" MODULES="-pl livy -am" TEST_PROJECTS=""
+ env: PYTHON="3" SPARK_VER="2.2.0" HADOOP_VER="2.6" LIVY_VER="0.5.0-incubating" R="true" PROFILE="" BUILD_FLAG="install -DskipTests -DskipRat" TEST_FLAG="verify -DskipRat" MODULES="-pl livy -am" TEST_PROJECTS=""
before_install:
# check files included in commit range, clear bower_components if a bower.json file has changed.
diff --git a/livy/src/test/java/org/apache/zeppelin/livy/LivyInterpreterIT.java b/livy/src/test/java/org/apache/zeppelin/livy/LivyInterpreterIT.java
index 45040891455..9421f021e09 100644
--- a/livy/src/test/java/org/apache/zeppelin/livy/LivyInterpreterIT.java
+++ b/livy/src/test/java/org/apache/zeppelin/livy/LivyInterpreterIT.java
@@ -115,7 +115,7 @@ public void testSparkInterpreter() throws InterpreterException {
try {
// detect spark version
InterpreterResult result = sparkInterpreter.interpret("sc.version", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
boolean isSpark2 = isSpark2(sparkInterpreter, context);
@@ -141,20 +141,20 @@ private void testRDD(final LivySparkInterpreter sparkInterpreter, boolean isSpar
;
InterpreterResult result = sparkInterpreter.interpret("sc.parallelize(1 to 10).sum()", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertTrue(result.message().get(0).getData().contains("Double = 55.0"));
// single line comment
String singleLineComment = "println(1)// my comment";
result = sparkInterpreter.interpret(singleLineComment, context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
// multiple line comment
String multipleLineComment = "println(1)/* multiple \n" + "line \n" + "comment */";
result = sparkInterpreter.interpret(multipleLineComment, context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
// multi-line string
@@ -162,7 +162,7 @@ private void testRDD(final LivySparkInterpreter sparkInterpreter, boolean isSpar
"line\"\"\"\n" +
"println(str)";
result = sparkInterpreter.interpret(multiLineString, context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertTrue(result.message().get(0).getData().contains("multiple\nline"));
@@ -171,14 +171,14 @@ private void testRDD(final LivySparkInterpreter sparkInterpreter, boolean isSpar
"name:String)\n" +
"val p=Person(1, \"name_a\")";
result = sparkInterpreter.interpret(caseClassCode, context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertTrue(result.message().get(0).getData().contains("p: Person = Person(1,name_a)"));
// object class
String objectClassCode = "object Person {}";
result = sparkInterpreter.interpret(objectClassCode, context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
if (!isSpark2) {
assertTrue(result.message().get(0).getData().contains("defined module Person"));
@@ -189,7 +189,7 @@ private void testRDD(final LivySparkInterpreter sparkInterpreter, boolean isSpar
// html output
String htmlCode = "println(\"%html
hello
\")";
result = sparkInterpreter.interpret(htmlCode, context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertEquals(InterpreterResult.Type.HTML, result.message().get(0).getType());
@@ -249,7 +249,7 @@ private void testDataFrame(LivySparkInterpreter sparkInterpreter,
result = sparkInterpreter.interpret(
"val df=sqlContext.createDataFrame(Seq((\"hello\",20))).toDF(\"col_1\", \"col_2\")\n"
+ "df.collect()", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertTrue(result.message().get(0).getData()
.contains("Array[org.apache.spark.sql.Row] = Array([hello,20])"));
@@ -257,7 +257,7 @@ private void testDataFrame(LivySparkInterpreter sparkInterpreter,
result = sparkInterpreter.interpret(
"val df=spark.createDataFrame(Seq((\"hello\",20))).toDF(\"col_1\", \"col_2\")\n"
+ "df.collect()", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertTrue(result.message().get(0).getData()
.contains("Array[org.apache.spark.sql.Row] = Array([hello,20])"));
@@ -265,25 +265,25 @@ private void testDataFrame(LivySparkInterpreter sparkInterpreter,
sparkInterpreter.interpret("df.registerTempTable(\"df\")", context);
// test LivySparkSQLInterpreter which share the same SparkContext with LivySparkInterpreter
result = sqlInterpreter.interpret("select * from df where col_1='hello'", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(InterpreterResult.Type.TABLE, result.message().get(0).getType());
assertEquals("col_1\tcol_2\nhello\t20", result.message().get(0).getData());
// double quotes
result = sqlInterpreter.interpret("select * from df where col_1=\"hello\"", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(InterpreterResult.Type.TABLE, result.message().get(0).getType());
assertEquals("col_1\tcol_2\nhello\t20", result.message().get(0).getData());
// only enable this test in spark2 as spark1 doesn't work for this case
if (isSpark2) {
result = sqlInterpreter.interpret("select * from df where col_1=\"he\\\"llo\" ", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(InterpreterResult.Type.TABLE, result.message().get(0).getType());
}
// single quotes inside attribute value
result = sqlInterpreter.interpret("select * from df where col_1=\"he'llo\"", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(InterpreterResult.Type.TABLE, result.message().get(0).getType());
// test sql with syntax error
@@ -328,7 +328,7 @@ public void run() {
"val df=sqlContext.createDataFrame(Seq((\"12characters12characters\",20)))"
+ ".toDF(\"col_1\", \"col_2\")\n"
+ "df.collect()", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertTrue(result.message().get(0).getData()
.contains("Array[org.apache.spark.sql.Row] = Array([12characters12characters,20])"));
@@ -337,7 +337,7 @@ public void run() {
"val df=spark.createDataFrame(Seq((\"12characters12characters\",20)))"
+ ".toDF(\"col_1\", \"col_2\")\n"
+ "df.collect()", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertTrue(result.message().get(0).getData()
.contains("Array[org.apache.spark.sql.Row] = Array([12characters12characters,20])"));
@@ -346,7 +346,7 @@ public void run() {
// test LivySparkSQLInterpreter which share the same SparkContext with LivySparkInterpreter
result = sqlInterpreter.interpret("select * from df where col_1='12characters12characters'",
context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(InterpreterResult.Type.TABLE, result.message().get(0).getType());
assertEquals("col_1\tcol_2\n12characters12cha...\t20", result.message().get(0).getData());
@@ -405,14 +405,14 @@ public void testPySparkInterpreter() throws InterpreterException {
try {
InterpreterResult result = pysparkInterpreter.interpret("sc.version", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
boolean isSpark2 = isSpark2(pysparkInterpreter, context);
// test RDD api
result = pysparkInterpreter.interpret("sc.range(1, 10).sum()", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertTrue(result.message().get(0).getData().contains("45"));
@@ -422,7 +422,7 @@ public void testPySparkInterpreter() throws InterpreterException {
+ "sqlContext = SQLContext(sc)", context);
result = pysparkInterpreter.interpret("df=sqlContext.createDataFrame([(\"hello\",20)])\n"
+ "df.collect()", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
//python2 has u and python3 don't have u
assertTrue(result.message().get(0).getData().contains("[Row(_1=u'hello', _2=20)]")
@@ -430,7 +430,7 @@ public void testPySparkInterpreter() throws InterpreterException {
} else {
result = pysparkInterpreter.interpret("df=spark.createDataFrame([(\"hello\",20)])\n"
+ "df.collect()", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
//python2 has u and python3 don't have u
assertTrue(result.message().get(0).getData().contains("[Row(_1=u'hello', _2=20)]")
@@ -441,7 +441,7 @@ public void testPySparkInterpreter() throws InterpreterException {
pysparkInterpreter.interpret("t = [{\"name\":\"userA\", \"role\":\"roleA\"},"
+ "{\"name\":\"userB\", \"role\":\"roleB\"}]", context);
result = pysparkInterpreter.interpret("%table t", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertEquals(InterpreterResult.Type.TABLE, result.message().get(0).getType());
assertTrue(result.message().get(0).getData().contains("userA"));
@@ -515,7 +515,7 @@ public void testSparkInterpreterWithDisplayAppInfo_StringWithoutTruncation()
try {
InterpreterResult result = sparkInterpreter.interpret("sc.version", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(2, result.message().size());
// check yarn appId and ensure it is not null
assertTrue(result.message().get(1).getData().contains("Spark Application Id: application_"));
@@ -523,13 +523,13 @@ public void testSparkInterpreterWithDisplayAppInfo_StringWithoutTruncation()
// html output
String htmlCode = "println(\"%html hello
\")";
result = sparkInterpreter.interpret(htmlCode, context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(2, result.message().size());
assertEquals(InterpreterResult.Type.HTML, result.message().get(0).getType());
// detect spark version
result = sparkInterpreter.interpret("sc.version", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(2, result.message().size());
boolean isSpark2 = isSpark2(sparkInterpreter, context);
@@ -539,7 +539,7 @@ public void testSparkInterpreterWithDisplayAppInfo_StringWithoutTruncation()
"val df=sqlContext.createDataFrame(Seq((\"12characters12characters\",20)))"
+ ".toDF(\"col_1\", \"col_2\")\n"
+ "df.collect()", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(2, result.message().size());
assertTrue(result.message().get(0).getData()
.contains("Array[org.apache.spark.sql.Row] = Array([12characters12characters,20])"));
@@ -548,7 +548,7 @@ public void testSparkInterpreterWithDisplayAppInfo_StringWithoutTruncation()
"val df=spark.createDataFrame(Seq((\"12characters12characters\",20)))"
+ ".toDF(\"col_1\", \"col_2\")\n"
+ "df.collect()", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(2, result.message().size());
assertTrue(result.message().get(0).getData()
.contains("Array[org.apache.spark.sql.Row] = Array([12characters12characters,20])"));
@@ -557,7 +557,7 @@ public void testSparkInterpreterWithDisplayAppInfo_StringWithoutTruncation()
// test LivySparkSQLInterpreter which share the same SparkContext with LivySparkInterpreter
result = sqlInterpreter.interpret("select * from df where col_1='12characters12characters'",
context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(InterpreterResult.Type.TABLE, result.message().get(0).getType());
assertEquals("col_1\tcol_2\n12characters12characters\t20", result.message().get(0).getData());
} finally {
@@ -599,7 +599,7 @@ public void testSparkRInterpreter() throws InterpreterException {
// test DataFrame api
if (isSpark2) {
result = sparkRInterpreter.interpret("df <- as.DataFrame(faithful)\nhead(df)", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertTrue(result.message().get(0).getData().contains("eruptions waiting"));
@@ -628,7 +628,7 @@ public void run() {
} else {
result = sparkRInterpreter.interpret("df <- createDataFrame(sqlContext, faithful)" +
"\nhead(df)", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertTrue(result.message().get(0).getData().contains("eruptions waiting"));
}
@@ -673,11 +673,11 @@ public void testLivyTutorialNote() throws IOException, InterpreterException {
String p1 = IOUtils.toString(getClass().getResourceAsStream("/livy_tutorial_1.scala"));
InterpreterResult result = sparkInterpreter.interpret(p1, context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
String p2 = IOUtils.toString(getClass().getResourceAsStream("/livy_tutorial_2.sql"));
result = sqlInterpreter.interpret(p2, context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(InterpreterResult.Type.TABLE, result.message().get(0).getType());
} finally {
sparkInterpreter.close();
@@ -734,7 +734,7 @@ public void testSharedInterpreter() throws InterpreterException {
.build();
// detect spark version
InterpreterResult result = sparkInterpreter.interpret("sc.version", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
boolean isSpark2 = isSpark2((BaseLivyInterpreter) sparkInterpreter.getInnerInterpreter(),
@@ -744,7 +744,7 @@ public void testSharedInterpreter() throws InterpreterException {
result = sparkInterpreter.interpret(
"val df=sqlContext.createDataFrame(Seq((\"hello\",20))).toDF(\"col_1\", \"col_2\")\n"
+ "df.collect()", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertTrue(result.message().get(0).getData()
.contains("Array[org.apache.spark.sql.Row] = Array([hello,20])"));
@@ -753,7 +753,7 @@ public void testSharedInterpreter() throws InterpreterException {
// access table from pyspark
result = pysparkInterpreter.interpret("sqlContext.sql(\"select * from df\").show()",
context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertTrue(result.message().get(0).getData()
.contains("+-----+-----+\n" +
@@ -765,14 +765,14 @@ public void testSharedInterpreter() throws InterpreterException {
// access table from sparkr
result = sparkRInterpreter.interpret("head(sql(sqlContext, \"select * from df\"))",
context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertTrue(result.message().get(0).getData().contains("col_1 col_2\n1 hello 20"));
} else {
result = sparkInterpreter.interpret(
"val df=spark.createDataFrame(Seq((\"hello\",20))).toDF(\"col_1\", \"col_2\")\n"
+ "df.collect()", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertTrue(result.message().get(0).getData()
.contains("Array[org.apache.spark.sql.Row] = Array([hello,20])"));
@@ -780,7 +780,7 @@ public void testSharedInterpreter() throws InterpreterException {
// access table from pyspark
result = pysparkInterpreter.interpret("spark.sql(\"select * from df\").show()", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertTrue(result.message().get(0).getData()
.contains("+-----+-----+\n" +
@@ -791,7 +791,7 @@ public void testSharedInterpreter() throws InterpreterException {
// access table from sparkr
result = sparkRInterpreter.interpret("head(sql(\"select * from df\"))", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertTrue(result.message().get(0).getData().contains("col_1 col_2\n1 hello 20"));
}
@@ -804,14 +804,14 @@ public void testSharedInterpreter() throws InterpreterException {
"plt.figure()\n" +
"plt.plot(data)\n" +
"%matplot plt", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertEquals(InterpreterResult.Type.IMG, result.message().get(0).getType());
// test plotting of R
result = sparkRInterpreter.interpret(
"hist(mtcars$mpg)", context);
- assertEquals(InterpreterResult.Code.SUCCESS, result.code());
+ assertEquals(result.toString(), InterpreterResult.Code.SUCCESS, result.code());
assertEquals(1, result.message().size());
assertEquals(InterpreterResult.Type.IMG, result.message().get(0).getType());
@@ -828,22 +828,13 @@ public void testSharedInterpreter() throws InterpreterException {
}
private boolean isSpark2(BaseLivyInterpreter interpreter, InterpreterContext context) {
- InterpreterResult result = null;
if (interpreter instanceof LivySparkRInterpreter) {
- result = interpreter.interpret("sparkR.session()", context);
+ InterpreterResult result = interpreter.interpret("sparkR.session()", context);
// SparkRInterpreter would always return SUCCESS, it is due to bug of LIVY-313
- if (result.message().get(0).getData().contains("Error")) {
- return false;
- } else {
- return true;
- }
+ return !result.message().get(0).getData().contains("Error");
} else {
- result = interpreter.interpret("spark", context);
- if (result.code() == InterpreterResult.Code.SUCCESS) {
- return true;
- } else {
- return false;
- }
+ InterpreterResult result = interpreter.interpret("spark", context);
+ return result.code() == InterpreterResult.Code.SUCCESS;
}
}
diff --git a/testing/install_external_dependencies.sh b/testing/install_external_dependencies.sh
index a1fdc32d592..e44815b58a0 100755
--- a/testing/install_external_dependencies.sh
+++ b/testing/install_external_dependencies.sh
@@ -22,9 +22,9 @@ touch ~/.environ
# Install Python dependencies for Python specific tests
if [[ -n "$PYTHON" ]] ; then
- wget https://repo.continuum.io/miniconda/Miniconda${PYTHON}-4.6.14-Linux-x86_64.sh -O miniconda.sh
+ wget "https://repo.continuum.io/miniconda/Miniconda${PYTHON}-4.6.14-Linux-x86_64.sh" -O miniconda.sh
- bash miniconda.sh -b -p $HOME/miniconda
+ bash miniconda.sh -b -p "$HOME/miniconda"
echo "export PATH='$HOME/miniconda/bin:$PATH'" >> ~/.environ
source ~/.environ
@@ -44,9 +44,9 @@ if [[ -n "$PYTHON" ]] ; then
fi
if [[ -n "$TENSORFLOW" ]] ; then
- check_results=`conda search -c conda-forge tensorflow`
+ check_results=$(conda search -c conda-forge tensorflow)
echo "search tensorflow = $check_results"
- pip install tensorflow==${TENSORFLOW}
+ pip install "tensorflow==${TENSORFLOW}"
fi
fi