Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ object BroadcastTest {
val arr1 = (0 until num).toArray

for (i <- 0 until 3) {
println("Iteration " + i)
println(s"Iteration ${i}")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Beyond the unnecessary { } that @srowen has already mentioned, this isn't really a style improvement. "a string " + anotherString is arguably at least as good stylistically as using string interpolation for such simple concatenations of a string reference to the end of a string literal. It's only when there are multiple concatenations and/or multiple string references that interpolation is clearly the better way.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@markhamstra Thank you for valueable suggestion, I am addressed and did new commit.

println("===========")
val startTime = System.nanoTime
val barr1 = sc.broadcast(arr1)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,10 @@ object DFSReadWriteTest {
}

private def printUsage(): Unit = {
val usage: String = "DFS Read-Write Test\n" +
"\n" +
"Usage: localFile dfsDir\n" +
"\n" +
"localFile - (string) local file to use in test\n" +
"dfsDir - (string) DFS directory for read/write tests\n"
val usage = s"""DFS Read-Write Test
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

here you should use

"""
 |...
 """.stripMargin

otherwise you introduce a lot of spaces

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@mgaido91 Thank you for feedback, changed addressed.

Usage: localFile dfsDir
localFile - (string) local file to use in test
dfsDir - (string) DFS directory for read/write tests"""

println(usage)
}
Expand All @@ -69,13 +67,13 @@ object DFSReadWriteTest {

localFilePath = new File(args(i))
if (!localFilePath.exists) {
System.err.println("Given path (" + args(i) + ") does not exist.\n")
System.err.println(s"Given path (${args(i)}) does not exist")
printUsage()
System.exit(1)
}

if (!localFilePath.isFile) {
System.err.println("Given path (" + args(i) + ") is not a file.\n")
System.err.println(s"Given path (${args(i)}) is not a file")
printUsage()
System.exit(1)
}
Expand All @@ -97,22 +95,22 @@ object DFSReadWriteTest {
def main(args: Array[String]): Unit = {
parseArgs(args)

println("Performing local word count")
println(s"Performing local word count")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is no interpolation here; revert changes like this.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@srowen Thanks for review, I did addressed changes. Please review

val fileContents = readFile(localFilePath.toString())
val localWordCount = runLocalWordCount(fileContents)

println("Creating SparkSession")
println(s"Creating SparkSession")
val spark = SparkSession
.builder
.appName("DFS Read Write Test")
.getOrCreate()

println("Writing local file to DFS")
val dfsFilename = dfsDirPath + "/dfs_read_write_test"
println(s"Writing local file to DFS")
val dfsFilename = s"${dfsDirPath}/dfs_read_write_test"
val fileRDD = spark.sparkContext.parallelize(fileContents)
fileRDD.saveAsTextFile(dfsFilename)

println("Reading file from DFS and running Word Count")
println(s"Reading file from DFS and running Word Count")
val readFileRDD = spark.sparkContext.textFile(dfsFilename)

val dfsWordCount = readFileRDD
Expand All @@ -127,11 +125,11 @@ object DFSReadWriteTest {
spark.stop()

if (localWordCount == dfsWordCount) {
println(s"Success! Local Word Count ($localWordCount) " +
s"and DFS Word Count ($dfsWordCount) agree.")
println(s"Success! Local Word Count ($localWordCount)
and DFS Word Count ($dfsWordCount) agree.")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should be reverted; you've added a bunch of space to the string.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@srowen Thanks for review, I did addressed changes. Please review.

} else {
println(s"Failure! Local Word Count ($localWordCount) " +
s"and DFS Word Count ($dfsWordCount) disagree.")
println(s"Failure! Local Word Count ($localWordCount)
and DFS Word Count ($dfsWordCount) disagree.")
}

}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,18 +29,18 @@ import org.apache.spark.util.Utils
object DriverSubmissionTest {
def main(args: Array[String]) {
if (args.length < 1) {
println("Usage: DriverSubmissionTest <seconds-to-sleep>")
println(s"Usage: DriverSubmissionTest <seconds-to-sleep>")
System.exit(0)
}
val numSecondsToSleep = args(0).toInt

val env = System.getenv()
val properties = Utils.getSystemProperties

println("Environment variables containing SPARK_TEST:")
println(s"Environment variables containing SPARK_TEST:")
env.asScala.filter { case (k, _) => k.contains("SPARK_TEST")}.foreach(println)

println("System properties containing spark.test:")
println(s"System properties containing spark.test:")
properties.filter { case (k, _) => k.toString.contains("spark.test") }.foreach(println)

for (i <- 1 until numSecondsToSleep) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ object HdfsTest {
/** Usage: HdfsTest [file] */
def main(args: Array[String]) {
if (args.length < 1) {
System.err.println("Usage: HdfsTest <file>")
System.err.println(s"Usage: HdfsTest <file>")
System.exit(1)
}
val spark = SparkSession
Expand All @@ -39,7 +39,7 @@ object HdfsTest {
val start = System.currentTimeMillis()
for (x <- mapped) { x + 2 }
val end = System.currentTimeMillis()
println("Iteration " + iter + " took " + (end-start) + " ms")
println(s"Iteration ${iter} took ${(end-start)} ${ms}")
}
spark.stop()
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ object LocalALS {

def showWarning() {
System.err.println(
"""WARN: This is a naive implementation of ALS and is given as an example!
s"""WARN: This is a naive implementation of ALS and is given as an example!
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

please here revert the change and do the same in all similar places, since there is no variable to interpolate

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@mgaido91 Thank you for feedback, changed addressed.

|Please use org.apache.spark.ml.recommendation.ALS
|for more conventional use.
""".stripMargin)
Expand All @@ -110,7 +110,7 @@ object LocalALS {
F = f.toInt
ITERATIONS = iters.toInt
case _ =>
System.err.println("Usage: LocalALS <M> <U> <F> <iters>")
System.err.println(s"Usage: LocalALS <M> <U> <F> <iters>")
System.exit(1)
}

Expand All @@ -129,8 +129,7 @@ object LocalALS {
println(s"Iteration $iter:")
ms = (0 until M).map(i => updateMovie(i, ms(i), us, R)).toArray
us = (0 until U).map(j => updateUser(j, us(j), ms, R)).toArray
println("RMSE = " + rmse(R, ms, us))
println()
println(s"RMSE = ${rmse(R, ms, us)}")
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ object LocalFileLR {

def showWarning() {
System.err.println(
"""WARN: This is a naive implementation of Logistic Regression and is given as an example!
s"""WARN: This is a naive implementation of Logistic Regression and is given as an example!
|Please use org.apache.spark.ml.classification.LogisticRegression
|for more conventional use.
""".stripMargin)
Expand All @@ -58,10 +58,10 @@ object LocalFileLR {

// Initialize w to a random value
val w = DenseVector.fill(D) {2 * rand.nextDouble - 1}
println("Initial w: " + w)
println(s"Initial w: ${w}")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can just write $w in cases like this

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@srowen Thanks for review, I did addressed changes. Please review


for (i <- 1 to ITERATIONS) {
println("On iteration " + i)
println(s"On iteration ${i}")
val gradient = DenseVector.zeros[Double](D)
for (p <- points) {
val scale = (1 / (1 + math.exp(-p.y * (w.dot(p.x)))) - 1) * p.y
Expand All @@ -71,7 +71,7 @@ object LocalFileLR {
}

fileSrc.close()
println("Final w: " + w)
println(s"Final w: ${w}")
}
}
// scalastyle:on println
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ object LocalKMeans {

def showWarning() {
System.err.println(
"""WARN: This is a naive implementation of KMeans Clustering and is given as an example!
s"""WARN: This is a naive implementation of KMeans Clustering and is given as an example!
|Please use org.apache.spark.ml.clustering.KMeans
|for more conventional use.
""".stripMargin)
Expand All @@ -88,7 +88,7 @@ object LocalKMeans {
kPoints.put(i, iter.next())
}

println("Initial centers: " + kPoints)
println(s"Initial centers: ${kPoints}")

while(tempDist > convergeDist) {
val closest = data.map (p => (closestPoint(p, kPoints), (p, 1)))
Expand All @@ -114,7 +114,7 @@ object LocalKMeans {
}
}

println("Final centers: " + kPoints)
println(s"Final centers: ${kPoints}")
}
}
// scalastyle:on println
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ object LocalLR {

def showWarning() {
System.err.println(
"""WARN: This is a naive implementation of Logistic Regression and is given as an example!
s"""WARN: This is a naive implementation of Logistic Regression and is given as an example!
|Please use org.apache.spark.ml.classification.LogisticRegression
|for more conventional use.
""".stripMargin)
Expand All @@ -61,10 +61,10 @@ object LocalLR {
val data = generateData
// Initialize w to a random value
val w = DenseVector.fill(D) {2 * rand.nextDouble - 1}
println("Initial w: " + w)
println(s"Initial w: ${w}")

for (i <- 1 to ITERATIONS) {
println("On iteration " + i)
println(s"On iteration ${i}")
val gradient = DenseVector.zeros[Double](D)
for (p <- data) {
val scale = (1 / (1 + math.exp(-p.y * (w.dot(p.x)))) - 1) * p.y
Expand All @@ -73,7 +73,7 @@ object LocalLR {
w -= gradient
}

println("Final w: " + w)
println(s"Final w: ${w}")
}
}
// scalastyle:on println
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ object LocalPi {
val y = random * 2 - 1
if (x*x + y*y <= 1) count += 1
}
println("Pi is roughly " + 4 * count / 100000.0)
println(s"Pi is roughly ${4 * count / 100000.0}")
}
}
// scalastyle:on println
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ import org.apache.spark.{SparkConf, SparkContext}
*/
object LogQuery {
val exampleApacheLogs = List(
"""10.10.10.10 - "FRED" [18/Jan/2013:17:56:07 +1100] "GET http://images.com/2013/Generic.jpg
s"""10.10.10.10 - "FRED" [18/Jan/2013:17:56:07 +1100] "GET http://images.com/2013/Generic.jpg
| HTTP/1.1" 304 315 "http://referall.com/" "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1;
| GTB7.4; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR
| 3.5.21022; .NET CLR 3.0.4506.2152; .NET CLR 1.0.3705; .NET CLR 1.1.4322; .NET CLR
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ object SimpleSkewedGroupByTest {
// Enforce that everything has been calculated and in cache
pairs1.count

println("RESULT: " + pairs1.groupByKey(numReducers).count)
println(s"RESULT: ${pairs1.groupByKey(numReducers).count}")
// Print how many keys each reducer got (for debugging)
// println("RESULT: " + pairs1.groupByKey(numReducers)
// .map{case (k,v) => (k, v.size)}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ object SparkALS {

def showWarning() {
System.err.println(
"""WARN: This is a naive implementation of ALS and is given as an example!
s"""WARN: This is a naive implementation of ALS and is given as an example!
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Seems we don't need s.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@HyukjinKwon Addressed ! Kindly do review

|Please use org.apache.spark.ml.recommendation.ALS
|for more conventional use.
""".stripMargin)
Expand All @@ -100,7 +100,7 @@ object SparkALS {
ITERATIONS = iters.getOrElse("5").toInt
slices = slices_.getOrElse("2").toInt
case _ =>
System.err.println("Usage: SparkALS [M] [U] [F] [iters] [partitions]")
System.err.println(s"Usage: SparkALS [M] [U] [F] [iters] [partitions]")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ditto for s.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@HyukjinKwon Addressed ! Kindly do review

System.exit(1)
}

Expand Down Expand Up @@ -135,10 +135,8 @@ object SparkALS {
.map(i => update(i, usb.value(i), msb.value, Rc.value.transpose()))
.collect()
usb = sc.broadcast(us) // Re-broadcast us because it was updated
println("RMSE = " + rmse(R, ms, us))
println()
println(s"RMSE = ${rmse(R, ms, us)}")
}

spark.stop()
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ object SparkHdfsLR {

def showWarning() {
System.err.println(
"""WARN: This is a naive implementation of Logistic Regression and is given as an example!
s"""WARN: This is a naive implementation of Logistic Regression and is given as an example!
|Please use org.apache.spark.ml.classification.LogisticRegression
|for more conventional use.
""".stripMargin)
Expand All @@ -60,7 +60,7 @@ object SparkHdfsLR {
def main(args: Array[String]) {

if (args.length < 2) {
System.err.println("Usage: SparkHdfsLR <file> <iters>")
System.err.println(s"Usage: SparkHdfsLR <file> <iters>")
System.exit(1)
}

Expand All @@ -79,17 +79,17 @@ object SparkHdfsLR {

// Initialize w to a random value
val w = DenseVector.fill(D) {2 * rand.nextDouble - 1}
println("Initial w: " + w)
println(s"Initial w: ${w}")

for (i <- 1 to ITERATIONS) {
println("On iteration " + i)
println(s"On iteration ${i}")
val gradient = points.map { p =>
p.x * (1 / (1 + exp(-p.y * (w.dot(p.x)))) - 1) * p.y
}.reduce(_ + _)
w -= gradient
}

println("Final w: " + w)
println(s"Final w: ${w}")
spark.stop()
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ object SparkKMeans {

def showWarning() {
System.err.println(
"""WARN: This is a naive implementation of KMeans Clustering and is given as an example!
s"""WARN: This is a naive implementation of KMeans Clustering and is given as an example!
|Please use org.apache.spark.ml.clustering.KMeans
|for more conventional use.
""".stripMargin)
Expand All @@ -60,7 +60,7 @@ object SparkKMeans {
def main(args: Array[String]) {

if (args.length < 3) {
System.err.println("Usage: SparkKMeans <file> <k> <convergeDist>")
System.err.println(s"Usage: SparkKMeans <file> <k> <convergeDist>")
System.exit(1)
}

Expand Down Expand Up @@ -95,10 +95,10 @@ object SparkKMeans {
for (newP <- newPoints) {
kPoints(newP._1) = newP._2
}
println("Finished iteration (delta = " + tempDist + ")")
println(s"Finished iteration (delta = ${tempDist})")
}

println("Final centers:")
println(s"Final centers:")
kPoints.foreach(println)
spark.stop()
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ object SparkLR {

def showWarning() {
System.err.println(
"""WARN: This is a naive implementation of Logistic Regression and is given as an example!
s"""WARN: This is a naive implementation of Logistic Regression and is given as an example!
|Please use org.apache.spark.ml.classification.LogisticRegression
|for more conventional use.
""".stripMargin)
Expand All @@ -73,17 +73,17 @@ object SparkLR {

// Initialize w to a random value
val w = DenseVector.fill(D) {2 * rand.nextDouble - 1}
println("Initial w: " + w)
println(s"Initial w: ${w}")

for (i <- 1 to ITERATIONS) {
println("On iteration " + i)
println(s"On iteration ${i}")
val gradient = points.map { p =>
p.x * (1 / (1 + exp(-p.y * (w.dot(p.x)))) - 1) * p.y
}.reduce(_ + _)
w -= gradient
}

println("Final w: " + w)
println(s"Final w: ${w}")

spark.stop()
}
Expand Down
Loading