Skip to content

Commit 79e6789

Browse files
committed
Spark scala code change for request addressed
1 parent 5507cad commit 79e6789

File tree

4 files changed

+9
-6
lines changed

4 files changed

+9
-6
lines changed

examples/src/main/scala/org/apache/spark/examples/graphx/Analytics.scala

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -145,9 +145,11 @@ object Analytics extends Logging {
145145
// TriangleCount requires the graph to be partitioned
146146
.partitionBy(partitionStrategy.getOrElse(RandomVertexCut)).cache()
147147
val triangles = TriangleCount.run(graph)
148-
println(s"Triangles: ${triangles.vertices.map {
148+
val triangleTypes = triangles.vertices.map {
149149
case (vid, data) => data.toLong
150-
}.reduce(_ + _) / 3}")
150+
}.reduce(_ + _) / 3
151+
152+
println(s"Triangles: ${triangleTypes}")
151153
sc.stop()
152154

153155
case _ =>

examples/src/main/scala/org/apache/spark/examples/ml/CorrelationExample.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,10 +51,10 @@ object CorrelationExample {
5151

5252
val df = data.map(Tuple1.apply).toDF("features")
5353
val Row(coeff1: Matrix) = Correlation.corr(df, "features").head
54-
println(s"Pearson correlation matrix:\n ${coeff1.toString}")
54+
println(s"Pearson correlation matrix:\n $coeff1")
5555

5656
val Row(coeff2: Matrix) = Correlation.corr(df, "features", "spearman").head
57-
println(s"Spearman correlation matrix:\n ${coeff2.toString}")
57+
println(s"Spearman correlation matrix:\n $coeff2")
5858
// $example off$
5959

6060
spark.stop()

examples/src/main/scala/org/apache/spark/examples/ml/QuantileDiscretizerExample.scala

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,11 +31,12 @@ object QuantileDiscretizerExample {
3131

3232
// $example on$
3333
val data = Array((0, 18.0), (1, 19.0), (2, 8.0), (3, 5.0), (4, 2.2))
34-
val df = spark.createDataFrame(data).toDF("id", "hour").repartition(1)
34+
val df = spark.createDataFrame(data).toDF("id", "hour")
3535
// $example off$
3636
// Output of QuantileDiscretizer for such small datasets can depend on the number of
3737
// partitions. Here we force a single partition to ensure consistent results.
3838
// Note this is not necessary for normal use cases
39+
.repartition(1)
3940

4041
// $example on$
4142
val discretizer = new QuantileDiscretizer()

examples/src/main/scala/org/apache/spark/examples/streaming/CustomReceiver.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ class CustomReceiver(host: String, port: Int)
8282
var socket: Socket = null
8383
var userInput: String = null
8484
try {
85-
logInfo(s"Connecting to $host $port")
85+
logInfo(s"Connecting to $host : $port")
8686
socket = new Socket(host, port)
8787
logInfo(s"Connected to $host : $port")
8888
val reader = new BufferedReader(

0 commit comments

Comments
 (0)