Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,12 @@ public String get(String name) {
return value;
}

@Override
public String get(String name, String defaultValue) {
String value = config.get(name);
return value == null ? defaultValue : value;
}

@Override
public Iterable<Map.Entry<String, String>> getAll() {
return config.entrySet();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,12 @@ public String get(String name) {
return value;
}

@Override
public String get(String name, String defaultValue) {
String value = conf.get(name);
return value == null ? defaultValue : value;
}

@Override
public Iterable<Map.Entry<String, String>> getAll() {
return conf;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ class SparkHadoopUtil extends Logging {
}
} catch {
case e: IOException =>
logDebug("Failed to decode $token: $e", e)
logDebug(s"Failed to decode $token: $e", e)
}
buffer.toString
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ object SparkTransportConf {

new TransportConf(module, new ConfigProvider {
override def get(name: String): String = conf.get(name)

override def get(name: String, defaultValue: String): String = conf.get(name, defaultValue)
override def getAll(): java.lang.Iterable[java.util.Map.Entry[String, String]] = {
conf.getAll.toMap.asJava.entrySet()
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ private object ParallelCollectionRDD {
*/
def slice[T: ClassTag](seq: Seq[T], numSlices: Int): Seq[Seq[T]] = {
if (numSlices < 1) {
throw new IllegalArgumentException("Positive number of slices required")
throw new IllegalArgumentException("Positive number of partitions required")
}
// Sequences need to be sliced at the same set of index positions for operations
// like RDD.zip() to behave as expected
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@

/**
* Computes an approximation to pi
* Usage: JavaSparkPi [slices]
* Usage: JavaSparkPi [partitions]
*/
public final class JavaSparkPi {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@

/**
* Transitive closure on a graph, implemented in Java.
* Usage: JavaTC [slices]
* Usage: JavaTC [partitions]
*/
public final class JavaTC {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ package org.apache.spark.examples
import org.apache.spark.sql.SparkSession

/**
* Usage: BroadcastTest [slices] [numElem] [blockSize]
* Usage: BroadcastTest [partitions] [numElem] [blockSize]
*/
object BroadcastTest {
def main(args: Array[String]) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import org.apache.spark.sql.SparkSession


/**
* Usage: MultiBroadcastTest [slices] [numElem]
* Usage: MultiBroadcastTest [partitions] [numElem]
*/
object MultiBroadcastTest {
def main(args: Array[String]) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ object SparkALS {
ITERATIONS = iters.getOrElse("5").toInt
slices = slices_.getOrElse("2").toInt
case _ =>
System.err.println("Usage: SparkALS [M] [U] [F] [iters] [slices]")
System.err.println("Usage: SparkALS [M] [U] [F] [iters] [partitions]")
System.exit(1)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import org.apache.spark.sql.SparkSession

/**
* Logistic regression based classification.
* Usage: SparkLR [slices]
* Usage: SparkLR [partitions]
*
* This is an example implementation for learning how to use Spark. For more conventional use,
* please refer to org.apache.spark.ml.classification.LogisticRegression.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ object TestingUtils {
if (r.fun(x, r.y, r.eps)) {
throw new TestFailedException(
s"Did not expect \n$x\n and \n${r.y}\n to be within " +
"${r.eps}${r.method} for all elements.", 0)
s"${r.eps}${r.method} for all elements.", 0)
}
true
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ object PowerIterationClustering extends Logging {
val j = ctx.dstId
val s = ctx.attr
if (s < 0.0) {
throw new SparkException("Similarity must be nonnegative but found s($i, $j) = $s.")
throw new SparkException(s"Similarity must be nonnegative but found s($i, $j) = $s.")
}
if (s > 0.0) {
ctx.sendToSrc(s)
Expand All @@ -283,7 +283,7 @@ object PowerIterationClustering extends Logging {
: Graph[Double, Double] = {
val edges = similarities.flatMap { case (i, j, s) =>
if (s < 0.0) {
throw new SparkException("Similarity must be nonnegative but found s($i, $j) = $s.")
throw new SparkException(s"Similarity must be nonnegative but found s($i, $j) = $s.")
}
if (i != j) {
Seq(Edge(i, j, s), Edge(j, i, s))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ object DecisionTreeModel extends Loader[DecisionTreeModel] with Logging {
// Build node data into a tree.
val trees = constructTrees(nodes)
assert(trees.length == 1,
"Decision tree should contain exactly one tree but got ${trees.size} trees.")
s"Decision tree should contain exactly one tree but got ${trees.size} trees.")
val model = new DecisionTreeModel(trees(0), Algo.fromString(algo))
assert(model.numNodes == numNodes, s"Unable to load DecisionTreeModel data from: $dataPath." +
s" Expected $numNodes nodes but found ${model.numNodes}")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ object MLUtils extends Logging {
while (i < indicesLength) {
val current = indices(i)
require(current > previous, s"indices should be one-based and in ascending order;"
+ " found current=$current, previous=$previous; line=\"$line\"")
+ s""" found current=$current, previous=$previous; line="$line"""")
previous = current
i += 1
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ object TestingUtils {
if (r.fun(x, r.y, r.eps)) {
throw new TestFailedException(
s"Did not expect \n$x\n and \n${r.y}\n to be within " +
"${r.eps}${r.method} for all elements.", 0)
s"${r.eps}${r.method} for all elements.", 0)
}
true
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ class OrcQuerySuite extends QueryTest with BeforeAndAfterAll with OrcTest {
val queryOutput = selfJoin.queryExecution.analyzed.output

assertResult(4, "Field count mismatches")(queryOutput.size)
assertResult(2, "Duplicated expression ID in query plan:\n $selfJoin") {
assertResult(2, s"Duplicated expression ID in query plan:\n $selfJoin") {
queryOutput.filter(_.name == "_1").map(_.exprId).size
}

Expand All @@ -294,7 +294,7 @@ class OrcQuerySuite extends QueryTest with BeforeAndAfterAll with OrcTest {
}

test("nested data - struct with array field") {
val data = (1 to 10).map(i => Tuple1((i, Seq("val_$i"))))
val data = (1 to 10).map(i => Tuple1((i, Seq(s"val_$i"))))
withOrcTable(data, "t") {
checkAnswer(sql("SELECT `_1`.`_2`[0] FROM t"), data.map {
case Tuple1((_, Seq(string))) => Row(string)
Expand All @@ -303,7 +303,7 @@ class OrcQuerySuite extends QueryTest with BeforeAndAfterAll with OrcTest {
}

test("nested data - array of struct") {
val data = (1 to 10).map(i => Tuple1(Seq(i -> "val_$i")))
val data = (1 to 10).map(i => Tuple1(Seq(i -> s"val_$i")))
withOrcTable(data, "t") {
checkAnswer(sql("SELECT `_1`[0].`_2` FROM t"), data.map {
case Tuple1(Seq((_, string))) => Row(string)
Expand Down