Skip to content

Commit 130e9ae

Browse files
xuanyuankingdongjoon-hyun
authored andcommitted
[SPARK-29357][SQL][TESTS] Fix flaky test by changing to use AtomicLong
### What changes were proposed in this pull request? Change to use AtomicLong instead of a var in the test. ### Why are the changes needed? Fix flaky test. ### Does this PR introduce any user-facing change? No. ### How was this patch tested? Existing UT. Closes #26020 from xuanyuanking/SPARK-25159. Authored-by: Yuanjian Li <[email protected]> Signed-off-by: Dongjoon Hyun <[email protected]>
1 parent 20ee2f5 commit 130e9ae

File tree

1 file changed

+4
-3
lines changed

1 file changed

+4
-3
lines changed

sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import java.io.{ByteArrayOutputStream, File}
2121
import java.nio.charset.StandardCharsets
2222
import java.sql.{Date, Timestamp}
2323
import java.util.UUID
24+
import java.util.concurrent.atomic.AtomicLong
2425

2526
import scala.util.Random
2627

@@ -2105,17 +2106,17 @@ class DataFrameSuite extends QueryTest with SharedSparkSession {
21052106
// partitions.
21062107
.write.partitionBy("p").option("compression", "gzip").json(path.getCanonicalPath)
21072108

2108-
var numJobs = 0
2109+
val numJobs = new AtomicLong(0)
21092110
sparkContext.addSparkListener(new SparkListener {
21102111
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
2111-
numJobs += 1
2112+
numJobs.incrementAndGet()
21122113
}
21132114
})
21142115

21152116
val df = spark.read.json(path.getCanonicalPath)
21162117
assert(df.columns === Array("i", "p"))
21172118
spark.sparkContext.listenerBus.waitUntilEmpty()
2118-
assert(numJobs == 1)
2119+
assert(numJobs.get() == 1L)
21192120
}
21202121
}
21212122

0 commit comments

Comments
 (0)