Skip to content

Commit 9106585

Browse files
committed
addressed comments v4.0
2 parents bced829 + 9646018 commit 9106585

File tree

21 files changed

+1359
-94
lines changed

21 files changed

+1359
-94
lines changed

core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -701,7 +701,7 @@ object SparkSubmit {
701701
}
702702

703703
/** Provides utility functions to be used inside SparkSubmit. */
704-
private[deploy] object SparkSubmitUtils {
704+
private[spark] object SparkSubmitUtils {
705705

706706
// Exposed for testing
707707
var printStream = SparkSubmit.printStream

docs/building-spark.md

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,6 @@ Because HDFS is not protocol-compatible across versions, if you want to read fro
6666
<tr><th>Hadoop version</th><th>Profile required</th></tr>
6767
</thead>
6868
<tbody>
69-
<tr><td>0.23.x</td><td>hadoop-0.23</td></tr>
7069
<tr><td>1.x to 2.1.x</td><td>(none)</td></tr>
7170
<tr><td>2.2.x</td><td>hadoop-2.2</td></tr>
7271
<tr><td>2.3.x</td><td>hadoop-2.3</td></tr>
@@ -82,9 +81,6 @@ mvn -Dhadoop.version=1.2.1 -DskipTests clean package
8281

8382
# Cloudera CDH 4.2.0 with MapReduce v1
8483
mvn -Dhadoop.version=2.0.0-mr1-cdh4.2.0 -DskipTests clean package
85-
86-
# Apache Hadoop 0.23.x
87-
mvn -Phadoop-0.23 -Dhadoop.version=0.23.7 -DskipTests clean package
8884
{% endhighlight %}
8985

9086
You can enable the "yarn" profile and optionally set the "yarn.version" property if it is different from "hadoop.version". Spark only supports YARN versions 2.2.0 and later.

docs/hadoop-third-party-distributions.md

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,9 +29,6 @@ the _exact_ Hadoop version you are running to avoid any compatibility errors.
2929
<tr><th>Release</th><th>Version code</th></tr>
3030
<tr><td>CDH 4.X.X (YARN mode)</td><td>2.0.0-cdh4.X.X</td></tr>
3131
<tr><td>CDH 4.X.X</td><td>2.0.0-mr1-cdh4.X.X</td></tr>
32-
<tr><td>CDH 3u6</td><td>0.20.2-cdh3u6</td></tr>
33-
<tr><td>CDH 3u5</td><td>0.20.2-cdh3u5</td></tr>
34-
<tr><td>CDH 3u4</td><td>0.20.2-cdh3u4</td></tr>
3532
</table>
3633
</td>
3734
<td>

make-distribution.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ while (( "$#" )); do
5858
--hadoop)
5959
echo "Error: '--hadoop' is no longer supported:"
6060
echo "Error: use Maven profiles and options -Dhadoop.version and -Dyarn.version instead."
61-
echo "Error: Related profiles include hadoop-0.23, hdaoop-2.2, hadoop-2.3 and hadoop-2.4."
61+
echo "Error: Related profiles include hadoop-2.2, hadoop-2.3 and hadoop-2.4."
6262
exit_with_usage
6363
;;
6464
--with-yarn)

pom.xml

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1614,20 +1614,6 @@
16141614
http://hadoop.apache.org/docs/ra.b.c/hadoop-project-dist/hadoop-common/dependency-analysis.html
16151615
-->
16161616

1617-
<profile>
1618-
<id>hadoop-0.23</id>
1619-
<!-- SPARK-1121: Adds an explicit dependency on Avro to work around a Hadoop 0.23.X issue -->
1620-
<dependencies>
1621-
<dependency>
1622-
<groupId>org.apache.avro</groupId>
1623-
<artifactId>avro</artifactId>
1624-
</dependency>
1625-
</dependencies>
1626-
<properties>
1627-
<hadoop.version>0.23.10</hadoop.version>
1628-
</properties>
1629-
</profile>
1630-
16311617
<profile>
16321618
<id>hadoop-2.2</id>
16331619
<properties>

python/pyspark/ml/tuning.py

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
#
2+
# Licensed to the Apache Software Foundation (ASF) under one or more
3+
# contributor license agreements. See the NOTICE file distributed with
4+
# this work for additional information regarding copyright ownership.
5+
# The ASF licenses this file to You under the Apache License, Version 2.0
6+
# (the "License"); you may not use this file except in compliance with
7+
# the License. You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
#
17+
18+
import itertools
19+
20+
__all__ = ['ParamGridBuilder']
21+
22+
23+
class ParamGridBuilder(object):
24+
"""
25+
Builder for a param grid used in grid search-based model selection.
26+
27+
>>> from classification import LogisticRegression
28+
>>> lr = LogisticRegression()
29+
>>> output = ParamGridBuilder().baseOn({lr.labelCol: 'l'}) \
30+
.baseOn([lr.predictionCol, 'p']) \
31+
.addGrid(lr.regParam, [1.0, 2.0, 3.0]) \
32+
.addGrid(lr.maxIter, [1, 5]) \
33+
.addGrid(lr.featuresCol, ['f']) \
34+
.build()
35+
>>> expected = [ \
36+
{lr.regParam: 1.0, lr.featuresCol: 'f', lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'}, \
37+
{lr.regParam: 2.0, lr.featuresCol: 'f', lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'}, \
38+
{lr.regParam: 3.0, lr.featuresCol: 'f', lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'}, \
39+
{lr.regParam: 1.0, lr.featuresCol: 'f', lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'}, \
40+
{lr.regParam: 2.0, lr.featuresCol: 'f', lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'}, \
41+
{lr.regParam: 3.0, lr.featuresCol: 'f', lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'}]
42+
>>> len(output) == len(expected)
43+
True
44+
>>> all([m in expected for m in output])
45+
True
46+
"""
47+
48+
def __init__(self):
49+
self._param_grid = {}
50+
51+
def addGrid(self, param, values):
52+
"""
53+
Sets the given parameters in this grid to fixed values.
54+
"""
55+
self._param_grid[param] = values
56+
57+
return self
58+
59+
def baseOn(self, *args):
60+
"""
61+
Sets the given parameters in this grid to fixed values.
62+
Accepts either a parameter dictionary or a list of (parameter, value) pairs.
63+
"""
64+
if isinstance(args[0], dict):
65+
self.baseOn(*args[0].items())
66+
else:
67+
for (param, value) in args:
68+
self.addGrid(param, [value])
69+
70+
return self
71+
72+
def build(self):
73+
"""
74+
Builds and returns all combinations of parameters specified
75+
by the param grid.
76+
"""
77+
keys = self._param_grid.keys()
78+
grid_values = self._param_grid.values()
79+
return [dict(zip(keys, prod)) for prod in itertools.product(*grid_values)]
80+
81+
82+
if __name__ == "__main__":
83+
import doctest
84+
doctest.testmod()

python/pyspark/sql/dataframe.py

Lines changed: 32 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -875,6 +875,27 @@ def fillna(self, value, subset=None):
875875

876876
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
877877

878+
def corr(self, col1, col2, method=None):
879+
"""
880+
Calculates the correlation of two columns of a DataFrame as a double value. Currently only
881+
supports the Pearson Correlation Coefficient.
882+
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases.
883+
884+
:param col1: The name of the first column
885+
:param col2: The name of the second column
886+
:param method: The correlation method. Currently only supports "pearson"
887+
"""
888+
if not isinstance(col1, str):
889+
raise ValueError("col1 should be a string.")
890+
if not isinstance(col2, str):
891+
raise ValueError("col2 should be a string.")
892+
if not method:
893+
method = "pearson"
894+
if not method == "pearson":
895+
raise ValueError("Currently only the calculation of the Pearson Correlation " +
896+
"coefficient is supported.")
897+
return self._jdf.stat().corr(col1, col2, method)
898+
878899
def cov(self, col1, col2):
879900
"""
880901
Calculate the sample covariance for the given columns, specified by their names, as a
@@ -892,13 +913,15 @@ def cov(self, col1, col2):
892913
def crosstab(self, col1, col2):
893914
"""
894915
Computes a pair-wise frequency table of the given columns. Also known as a contingency
895-
table. The number of distinct values for each column should be less than 1e5. The first
916+
table. The number of distinct values for each column should be less than 1e4. The first
896917
column of each row will be the distinct values of `col1` and the column names will be the
897-
distinct values of `col2` sorted in lexicographical order.
918+
distinct values of `col2`. Pairs that have no occurrences will have `null` as their values.
898919
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
899920
900-
:param col1: The name of the first column
901-
:param col2: The name of the second column
921+
:param col1: The name of the first column. Distinct items will make the first item of
922+
each row.
923+
:param col2: The name of the second column. Distinct items will make the column names
924+
of the DataFrame.
902925
"""
903926
if not isinstance(col1, str):
904927
raise ValueError("col1 should be a string.")
@@ -1376,6 +1399,11 @@ class DataFrameStatFunctions(object):
13761399
def __init__(self, df):
13771400
self.df = df
13781401

1402+
def corr(self, col1, col2, method=None):
1403+
return self.df.corr(col1, col2, method)
1404+
1405+
corr.__doc__ = DataFrame.corr.__doc__
1406+
13791407
def cov(self, col1, col2):
13801408
return self.df.cov(col1, col2)
13811409

python/pyspark/sql/tests.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -394,15 +394,22 @@ def test_aggregator(self):
394394
self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])
395395
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
396396

397+
def test_corr(self):
398+
import math
399+
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
400+
corr = df.stat.corr("a", "b")
401+
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
402+
397403
def test_cov(self):
398404
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
399405
cov = df.stat.cov("a", "b")
400406
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
401407

402408
def test_crosstab(self):
403409
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
404-
ct = df.stat.crosstab("a", "b")
405-
for i, row in enumerate(ct.collect()):
410+
ct = df.stat.crosstab("a", "b").collect()
411+
ct = sorted(ct, lambda r: r[0])
412+
for i, row in enumerate(ct):
406413
self.assertEqual(row[0], str(i))
407414
self.assertTrue(row[1], 1)
408415
self.assertTrue(row[2], 1)

python/run-tests

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,7 @@ function run_ml_tests() {
9898
echo "Run ml tests ..."
9999
run_test "pyspark/ml/feature.py"
100100
run_test "pyspark/ml/classification.py"
101+
run_test "pyspark/ml/tuning.py"
101102
run_test "pyspark/ml/tests.py"
102103
}
103104

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Catalog.scala

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@ import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Subquery}
2727
*/
2828
class NoSuchTableException extends Exception
2929

30+
class NoSuchDatabaseException extends Exception
31+
3032
/**
3133
* An interface for looking up relations by name. Used by an [[Analyzer]].
3234
*/

0 commit comments

Comments
 (0)