Skip to content

Commit b5e2e6f

Browse files
committed
Pass 3
1 parent 05932d7 commit b5e2e6f

File tree

5 files changed

+20
-11
lines changed

5 files changed

+20
-11
lines changed

examples/src/main/scala/org/apache/spark/streaming/examples/clickstream/PageViewStream.scala

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,11 @@ object PageViewStream {
6969
val normalCount = statuses.filter(_ == 200).size
7070
val errorCount = statuses.size - normalCount
7171
val errorRatio = errorCount.toFloat / statuses.size
72-
if (errorRatio > 0.05) {"%s: **%s**".format(zip, errorRatio)}
73-
else {"%s: %s".format(zip, errorRatio)}
72+
if (errorRatio > 0.05) {
73+
"%s: **%s**".format(zip, errorRatio)
74+
} else {
75+
"%s: %s".format(zip, errorRatio)
76+
}
7477
}
7578

7679
// Return the number unique users in last 15 seconds

sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTableOperations.scala

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -203,8 +203,9 @@ case class InsertIntoParquetTable(
203203
val stageId = sc.newRddId()
204204

205205
val taskIdOffset =
206-
if (overwrite) 1
207-
else {
206+
if (overwrite) {
207+
1
208+
} else {
208209
FileSystemHelper
209210
.findMaxTaskId(NewFileOutputFormat.getOutputPath(job).toString, job.getConfiguration) + 1
210211
}

sql/core/src/main/scala/org/apache/spark/sql/parquet/ParquetTableSupport.scala

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -158,8 +158,11 @@ private[parquet] class CatalystGroupConverter(
158158
a => a.dataType match {
159159
case ctype: NativeType =>
160160
// note: for some reason matching for StringType fails so use this ugly if instead
161-
if (ctype == StringType) new CatalystPrimitiveStringConverter(this, schema.indexOf(a))
162-
else new CatalystPrimitiveConverter(this, schema.indexOf(a))
161+
if (ctype == StringType) {
162+
new CatalystPrimitiveStringConverter(this, schema.indexOf(a))
163+
} else {
164+
new CatalystPrimitiveConverter(this, schema.indexOf(a))
165+
}
163166
case _ => throw new RuntimeException(
164167
s"unable to convert datatype ${a.dataType.toString} in CatalystGroupConverter")
165168
}

yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -147,12 +147,12 @@ class ApplicationMaster(args: ApplicationMasterArguments, conf: Configuration,
147147
// LOCAL_DIRS => 2.X, YARN_LOCAL_DIRS => 0.23.X
148148
val localDirs = Option(System.getenv("YARN_LOCAL_DIRS"))
149149
.orElse(Option(System.getenv("LOCAL_DIRS")))
150-
150+
151151
localDirs match {
152152
case None => throw new Exception("Yarn Local dirs can't be empty")
153153
case Some(l) => l
154154
}
155-
}
155+
}
156156

157157
private def getApplicationAttemptId(): ApplicationAttemptId = {
158158
val envs = System.getenv()
@@ -321,8 +321,9 @@ class ApplicationMaster(args: ApplicationMasterArguments, conf: Configuration,
321321
logInfo("Allocating %d containers to make up for (potentially) lost containers".
322322
format(missingExecutorCount))
323323
yarnAllocator.allocateContainers(missingExecutorCount)
324+
} else {
325+
sendProgress()
324326
}
325-
else sendProgress()
326327
Thread.sleep(sleepTime)
327328
}
328329
}
@@ -361,7 +362,7 @@ class ApplicationMaster(args: ApplicationMasterArguments, conf: Configuration,
361362
return
362363
}
363364
isFinished = true
364-
365+
365366
logInfo("finishApplicationMaster with " + status)
366367
if (registered) {
367368
val finishReq = Records.newRecord(classOf[FinishApplicationMasterRequest])

yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/ExecutorLauncher.scala

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -243,8 +243,9 @@ class ExecutorLauncher(args: ApplicationMasterArguments, conf: Configuration, sp
243243
logInfo("Allocating " + missingExecutorCount +
244244
" containers to make up for (potentially ?) lost containers")
245245
yarnAllocator.allocateContainers(missingExecutorCount)
246+
} else {
247+
sendProgress()
246248
}
247-
else sendProgress()
248249
Thread.sleep(sleepTime)
249250
}
250251
}

0 commit comments

Comments
 (0)