Skip to content

Commit 99b37a9

Browse files
committed
[SPARK-25398] Minor bugs from comparing unrelated types
## What changes were proposed in this pull request? Correct some comparisons between unrelated types to what they seem to… have been trying to do ## How was this patch tested? Existing tests. Closes #22384 from srowen/SPARK-25398. Authored-by: Sean Owen <[email protected]> Signed-off-by: Sean Owen <[email protected]> (cherry picked from commit cfbdd6a) Signed-off-by: Sean Owen <[email protected]>
1 parent 16127e8 commit 99b37a9

File tree

11 files changed

+27
-31
lines changed

11 files changed

+27
-31
lines changed

core/src/main/scala/org/apache/spark/status/LiveEntity.scala

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@ import org.apache.spark.storage.RDDInfo
3333
import org.apache.spark.ui.SparkUI
3434
import org.apache.spark.util.AccumulatorContext
3535
import org.apache.spark.util.collection.OpenHashSet
36-
import org.apache.spark.util.kvstore.KVStore
3736

3837
/**
3938
* A mutable representation of a live entity in Spark (jobs, stages, tasks, et al). Every live
@@ -585,8 +584,7 @@ private object LiveEntityHelpers {
585584
.filter { acc =>
586585
// We don't need to store internal or SQL accumulables as their values will be shown in
587586
// other places, so drop them to reduce the memory usage.
588-
!acc.internal && (!acc.metadata.isDefined ||
589-
acc.metadata.get != Some(AccumulatorContext.SQL_ACCUM_IDENTIFIER))
587+
!acc.internal && acc.metadata != Some(AccumulatorContext.SQL_ACCUM_IDENTIFIER)
590588
}
591589
.map { acc =>
592590
new v1.AccumulableInfo(

core/src/main/scala/org/apache/spark/util/ClosureCleaner.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ private[spark] object ClosureCleaner extends Logging {
175175
closure.getClass.isSynthetic &&
176176
closure
177177
.getClass
178-
.getInterfaces.exists(_.getName.equals("scala.Serializable"))
178+
.getInterfaces.exists(_.getName == "scala.Serializable")
179179

180180
if (isClosureCandidate) {
181181
try {

core/src/test/scala/org/apache/spark/util/collection/ExternalAppendOnlyMapSuite.scala

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,6 @@
1717

1818
package org.apache.spark.util.collection
1919

20-
import java.util.Objects
21-
2220
import scala.collection.mutable.ArrayBuffer
2321
import scala.ref.WeakReference
2422

@@ -509,7 +507,7 @@ class ExternalAppendOnlyMapSuite extends SparkFunSuite
509507
.sorted
510508

511509
assert(it.isEmpty)
512-
assert(keys == (0 until 100))
510+
assert(keys == (0 until 100).toList)
513511

514512
assert(map.numSpills == 0)
515513
// these asserts try to show that we're no longer holding references to the underlying map.

resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterScheduler.scala

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,7 @@ private[spark] class MesosClusterScheduler(
202202
} else if (removeFromPendingRetryDrivers(submissionId)) {
203203
k.success = true
204204
k.message = "Removed driver while it's being retried"
205-
} else if (finishedDrivers.exists(_.driverDescription.submissionId.equals(submissionId))) {
205+
} else if (finishedDrivers.exists(_.driverDescription.submissionId == submissionId)) {
206206
k.success = false
207207
k.message = "Driver already terminated"
208208
} else {
@@ -222,21 +222,21 @@ private[spark] class MesosClusterScheduler(
222222
}
223223
s.submissionId = submissionId
224224
stateLock.synchronized {
225-
if (queuedDrivers.exists(_.submissionId.equals(submissionId))) {
225+
if (queuedDrivers.exists(_.submissionId == submissionId)) {
226226
s.success = true
227227
s.driverState = "QUEUED"
228228
} else if (launchedDrivers.contains(submissionId)) {
229229
s.success = true
230230
s.driverState = "RUNNING"
231231
launchedDrivers(submissionId).mesosTaskStatus.foreach(state => s.message = state.toString)
232-
} else if (finishedDrivers.exists(_.driverDescription.submissionId.equals(submissionId))) {
232+
} else if (finishedDrivers.exists(_.driverDescription.submissionId == submissionId)) {
233233
s.success = true
234234
s.driverState = "FINISHED"
235235
finishedDrivers
236236
.find(d => d.driverDescription.submissionId.equals(submissionId)).get.mesosTaskStatus
237237
.foreach(state => s.message = state.toString)
238-
} else if (pendingRetryDrivers.exists(_.submissionId.equals(submissionId))) {
239-
val status = pendingRetryDrivers.find(_.submissionId.equals(submissionId))
238+
} else if (pendingRetryDrivers.exists(_.submissionId == submissionId)) {
239+
val status = pendingRetryDrivers.find(_.submissionId == submissionId)
240240
.get.retryState.get.lastFailureStatus
241241
s.success = true
242242
s.driverState = "RETRYING"
@@ -254,13 +254,13 @@ private[spark] class MesosClusterScheduler(
254254
*/
255255
def getDriverState(submissionId: String): Option[MesosDriverState] = {
256256
stateLock.synchronized {
257-
queuedDrivers.find(_.submissionId.equals(submissionId))
257+
queuedDrivers.find(_.submissionId == submissionId)
258258
.map(d => new MesosDriverState("QUEUED", d))
259259
.orElse(launchedDrivers.get(submissionId)
260260
.map(d => new MesosDriverState("RUNNING", d.driverDescription, Some(d))))
261-
.orElse(finishedDrivers.find(_.driverDescription.submissionId.equals(submissionId))
261+
.orElse(finishedDrivers.find(_.driverDescription.submissionId == submissionId)
262262
.map(d => new MesosDriverState("FINISHED", d.driverDescription, Some(d))))
263-
.orElse(pendingRetryDrivers.find(_.submissionId.equals(submissionId))
263+
.orElse(pendingRetryDrivers.find(_.submissionId == submissionId)
264264
.map(d => new MesosDriverState("RETRYING", d)))
265265
}
266266
}
@@ -814,7 +814,7 @@ private[spark] class MesosClusterScheduler(
814814
status: Int): Unit = {}
815815

816816
private def removeFromQueuedDrivers(subId: String): Boolean = {
817-
val index = queuedDrivers.indexWhere(_.submissionId.equals(subId))
817+
val index = queuedDrivers.indexWhere(_.submissionId == subId)
818818
if (index != -1) {
819819
queuedDrivers.remove(index)
820820
queuedDriversState.expunge(subId)
@@ -834,7 +834,7 @@ private[spark] class MesosClusterScheduler(
834834
}
835835

836836
private def removeFromPendingRetryDrivers(subId: String): Boolean = {
837-
val index = pendingRetryDrivers.indexWhere(_.submissionId.equals(subId))
837+
val index = pendingRetryDrivers.indexWhere(_.submissionId == subId)
838838
if (index != -1) {
839839
pendingRetryDrivers.remove(index)
840840
pendingRetryDriversState.expunge(subId)

resource-managers/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterSchedulerSuite.scala

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ import java.util.{Collection, Collections, Date}
2121

2222
import scala.collection.JavaConverters._
2323

24-
import org.apache.mesos.Protos.{Environment, Secret, TaskState => MesosTaskState, _}
24+
import org.apache.mesos.Protos.{TaskState => MesosTaskState, _}
2525
import org.apache.mesos.Protos.Value.{Scalar, Type}
2626
import org.apache.mesos.SchedulerDriver
2727
import org.mockito.{ArgumentCaptor, Matchers}
@@ -146,14 +146,14 @@ class MesosClusterSchedulerSuite extends SparkFunSuite with LocalSparkContext wi
146146
assert(scheduler.getResource(resources, "cpus") == 1.5)
147147
assert(scheduler.getResource(resources, "mem") == 1200)
148148
val resourcesSeq: Seq[Resource] = resources.asScala
149-
val cpus = resourcesSeq.filter(_.getName.equals("cpus")).toList
149+
val cpus = resourcesSeq.filter(_.getName == "cpus").toList
150150
assert(cpus.size == 2)
151-
assert(cpus.exists(_.getRole().equals("role2")))
152-
assert(cpus.exists(_.getRole().equals("*")))
153-
val mem = resourcesSeq.filter(_.getName.equals("mem")).toList
151+
assert(cpus.exists(_.getRole() == "role2"))
152+
assert(cpus.exists(_.getRole() == "*"))
153+
val mem = resourcesSeq.filter(_.getName == "mem").toList
154154
assert(mem.size == 2)
155-
assert(mem.exists(_.getRole().equals("role2")))
156-
assert(mem.exists(_.getRole().equals("*")))
155+
assert(mem.exists(_.getRole() == "role2"))
156+
assert(mem.exists(_.getRole() == "*"))
157157

158158
verify(driver, times(1)).launchTasks(
159159
Matchers.eq(Collections.singleton(offer.getId)),

resource-managers/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackendSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ class MesosFineGrainedSchedulerBackendSuite
106106
// uri is null.
107107
val (executorInfo, _) = mesosSchedulerBackend.createExecutorInfo(resources, "test-id")
108108
val executorResources = executorInfo.getResourcesList
109-
val cpus = executorResources.asScala.find(_.getName.equals("cpus")).get.getScalar.getValue
109+
val cpus = executorResources.asScala.find(_.getName == "cpus").get.getScalar.getValue
110110

111111
assert(cpus === mesosExecutorCores)
112112
}

resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/ClientSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,7 @@ class ClientSuite extends SparkFunSuite with Matchers {
191191
appContext.getQueue should be ("staging-queue")
192192
appContext.getAMContainerSpec should be (containerLaunchContext)
193193
appContext.getApplicationType should be ("SPARK")
194-
appContext.getClass.getMethods.filter(_.getName.equals("getApplicationTags")).foreach{ method =>
194+
appContext.getClass.getMethods.filter(_.getName == "getApplicationTags").foreach { method =>
195195
val tags = method.invoke(appContext).asInstanceOf[java.util.Set[String]]
196196
tags should contain allOf ("tag1", "dup", "tag2", "multi word")
197197
tags.asScala.count(_.nonEmpty) should be (4)

sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/PropagateEmptyRelationSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ class PropagateEmptyRelationSuite extends PlanTest {
147147
.where(false)
148148
.select('a)
149149
.where('a > 1)
150-
.where('a != 200)
150+
.where('a =!= 200)
151151
.orderBy('a.asc)
152152

153153
val optimized = Optimize.execute(query.analyze)

sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/UnsafeArraySuite.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -114,15 +114,15 @@ class UnsafeArraySuite extends SparkFunSuite {
114114
assert(unsafeDate.isInstanceOf[UnsafeArrayData])
115115
assert(unsafeDate.numElements == dateArray.length)
116116
dateArray.zipWithIndex.map { case (e, i) =>
117-
assert(unsafeDate.get(i, DateType) == e)
117+
assert(unsafeDate.get(i, DateType).asInstanceOf[Int] == e)
118118
}
119119

120120
val unsafeTimestamp = ExpressionEncoder[Array[Long]].resolveAndBind().
121121
toRow(timestampArray).getArray(0)
122122
assert(unsafeTimestamp.isInstanceOf[UnsafeArrayData])
123123
assert(unsafeTimestamp.numElements == timestampArray.length)
124124
timestampArray.zipWithIndex.map { case (e, i) =>
125-
assert(unsafeTimestamp.get(i, TimestampType) == e)
125+
assert(unsafeTimestamp.get(i, TimestampType).asInstanceOf[Long] == e)
126126
}
127127

128128
Seq(decimalArray4_1, decimalArray20_20).map { decimalArray =>

sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -611,7 +611,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
611611
).toDF("id", "stringData")
612612
val sampleDF = df.sample(false, 0.7, 50)
613613
// After sampling, sampleDF doesn't contain id=1.
614-
assert(!sampleDF.select("id").collect.contains(1))
614+
assert(!sampleDF.select("id").as[Int].collect.contains(1))
615615
// simpleUdf should not encounter id=1.
616616
checkAnswer(sampleDF.select(simpleUdf($"id")), List.fill(sampleDF.count.toInt)(Row(1)))
617617
}

0 commit comments

Comments
 (0)