Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions R/pkg/inst/tests/testthat/test_Windows.R
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,6 @@ test_that("sparkJars tag in SparkContext", {
abcPath <- testOutput[1]
expect_equal(abcPath, "a\\b\\c")
})

message("--- End test (Windows) ", as.POSIXct(Sys.time(), tz = "GMT"))
message("elapsed ", (proc.time() - timer_ptm)[3])
4 changes: 4 additions & 0 deletions R/pkg/inst/tests/testthat/test_mllib_classification.R
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ absoluteSparkPath <- function(x) {
}

test_that("spark.svmLinear", {
skip_on_cran()

df <- suppressWarnings(createDataFrame(iris))
training <- df[df$Species %in% c("versicolor", "virginica"), ]
model <- spark.svmLinear(training, Species ~ ., regParam = 0.01, maxIter = 10)
Expand Down Expand Up @@ -226,6 +228,8 @@ test_that("spark.logit", {
})

test_that("spark.mlp", {
skip_on_cran()

df <- read.df(absoluteSparkPath("data/mllib/sample_multiclass_classification_data.txt"),
source = "libsvm")
model <- spark.mlp(df, label ~ features, blockSize = 128, layers = c(4, 5, 4, 3),
Expand Down
2 changes: 2 additions & 0 deletions R/pkg/inst/tests/testthat/test_mllib_clustering.R
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ absoluteSparkPath <- function(x) {
}

test_that("spark.bisectingKmeans", {
skip_on_cran()

newIris <- iris
newIris$Species <- NULL
training <- suppressWarnings(createDataFrame(newIris))
Expand Down
82 changes: 48 additions & 34 deletions R/pkg/inst/tests/testthat/test_mllib_tree.R
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ absoluteSparkPath <- function(x) {
}

test_that("spark.gbt", {
skip_on_cran()

# regression
data <- suppressWarnings(createDataFrame(longley))
model <- spark.gbt(data, Employed ~ ., "regression", maxDepth = 5, maxBins = 16, seed = 123)
Expand Down Expand Up @@ -103,10 +105,12 @@ test_that("spark.gbt", {
expect_equal(stats$maxDepth, 5)

# spark.gbt classification can work on libsvm data
data <- read.df(absoluteSparkPath("data/mllib/sample_binary_classification_data.txt"),
source = "libsvm")
model <- spark.gbt(data, label ~ features, "classification")
expect_equal(summary(model)$numFeatures, 692)
if (not_cran_or_windows_with_hadoop()) {
data <- read.df(absoluteSparkPath("data/mllib/sample_binary_classification_data.txt"),
source = "libsvm")
model <- spark.gbt(data, label ~ features, "classification")
expect_equal(summary(model)$numFeatures, 692)
}
})

test_that("spark.randomForest", {
Expand Down Expand Up @@ -211,13 +215,17 @@ test_that("spark.randomForest", {
expect_equal(length(grep("2.0", predictions)), 50)

# spark.randomForest classification can work on libsvm data
data <- read.df(absoluteSparkPath("data/mllib/sample_multiclass_classification_data.txt"),
source = "libsvm")
model <- spark.randomForest(data, label ~ features, "classification")
expect_equal(summary(model)$numFeatures, 4)
if (not_cran_or_windows_with_hadoop()) {
data <- read.df(absoluteSparkPath("data/mllib/sample_multiclass_classification_data.txt"),
source = "libsvm")
model <- spark.randomForest(data, label ~ features, "classification")
expect_equal(summary(model)$numFeatures, 4)
}
})

test_that("spark.decisionTree", {
skip_on_cran()

# regression
data <- suppressWarnings(createDataFrame(longley))
model <- spark.decisionTree(data, Employed ~ ., "regression", maxDepth = 5, maxBins = 16)
Expand All @@ -234,19 +242,21 @@ test_that("spark.decisionTree", {
expect_error(capture.output(stats), NA)
expect_true(length(capture.output(stats)) > 6)

modelPath <- tempfile(pattern = "spark-decisionTreeRegression", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
write.ml(model, modelPath, overwrite = TRUE)
model2 <- read.ml(modelPath)
stats2 <- summary(model2)
expect_equal(stats$formula, stats2$formula)
expect_equal(stats$numFeatures, stats2$numFeatures)
expect_equal(stats$features, stats2$features)
expect_equal(stats$featureImportances, stats2$featureImportances)
expect_equal(stats$maxDepth, stats2$maxDepth)
if (not_cran_or_windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-decisionTreeRegression", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
write.ml(model, modelPath, overwrite = TRUE)
model2 <- read.ml(modelPath)
stats2 <- summary(model2)
expect_equal(stats$formula, stats2$formula)
expect_equal(stats$numFeatures, stats2$numFeatures)
expect_equal(stats$features, stats2$features)
expect_equal(stats$featureImportances, stats2$featureImportances)
expect_equal(stats$maxDepth, stats2$maxDepth)

unlink(modelPath)
unlink(modelPath)
}

# classification
data <- suppressWarnings(createDataFrame(iris))
Expand All @@ -263,17 +273,19 @@ test_that("spark.decisionTree", {
expect_equal(length(grep("setosa", predictions)), 50)
expect_equal(length(grep("versicolor", predictions)), 50)

modelPath <- tempfile(pattern = "spark-decisionTreeClassification", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
write.ml(model, modelPath, overwrite = TRUE)
model2 <- read.ml(modelPath)
stats2 <- summary(model2)
expect_equal(stats$depth, stats2$depth)
expect_equal(stats$numNodes, stats2$numNodes)
expect_equal(stats$numClasses, stats2$numClasses)
if (not_cran_or_windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-decisionTreeClassification", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
write.ml(model, modelPath, overwrite = TRUE)
model2 <- read.ml(modelPath)
stats2 <- summary(model2)
expect_equal(stats$depth, stats2$depth)
expect_equal(stats$numNodes, stats2$numNodes)
expect_equal(stats$numClasses, stats2$numClasses)

unlink(modelPath)
unlink(modelPath)
}

# Test numeric response variable
labelToIndex <- function(species) {
Expand All @@ -297,10 +309,12 @@ test_that("spark.decisionTree", {
expect_equal(length(grep("2.0", predictions)), 50)

# spark.decisionTree classification can work on libsvm data
data <- read.df(absoluteSparkPath("data/mllib/sample_multiclass_classification_data.txt"),
source = "libsvm")
model <- spark.decisionTree(data, label ~ features, "classification")
expect_equal(summary(model)$numFeatures, 4)
if (not_cran_or_windows_with_hadoop()) {
data <- read.df(absoluteSparkPath("data/mllib/sample_multiclass_classification_data.txt"),
source = "libsvm")
model <- spark.decisionTree(data, label ~ features, "classification")
expect_equal(summary(model)$numFeatures, 4)
}
})

sparkR.session.stop()
15 changes: 15 additions & 0 deletions R/pkg/inst/tests/testthat/test_sparkSQL.R
Original file line number Diff line number Diff line change
Expand Up @@ -1395,6 +1395,8 @@ test_that("column operators", {
})

test_that("column functions", {
skip_on_cran()

c <- column("a")
c1 <- abs(c) + acos(c) + approxCountDistinct(c) + ascii(c) + asin(c) + atan(c)
c2 <- avg(c) + base64(c) + bin(c) + bitwiseNOT(c) + cbrt(c) + ceil(c) + cos(c)
Expand Down Expand Up @@ -1780,6 +1782,8 @@ test_that("when(), otherwise() and ifelse() with column on a DataFrame", {
})

test_that("group by, agg functions", {
skip_on_cran()

df <- read.json(jsonPath)
df1 <- agg(df, name = "max", age = "sum")
expect_equal(1, count(df1))
Expand Down Expand Up @@ -2121,6 +2125,8 @@ test_that("filter() on a DataFrame", {
})

test_that("join(), crossJoin() and merge() on a DataFrame", {
skip_on_cran()

df <- read.json(jsonPath)

mockLines2 <- c("{\"name\":\"Michael\", \"test\": \"yes\"}",
Expand Down Expand Up @@ -2978,6 +2984,7 @@ test_that("dapply() and dapplyCollect() on a DataFrame", {
})

test_that("dapplyCollect() on DataFrame with a binary column", {
skip_on_cran()

df <- data.frame(key = 1:3)
df$bytes <- lapply(df$key, serialize, connection = NULL)
Expand All @@ -2999,6 +3006,8 @@ test_that("dapplyCollect() on DataFrame with a binary column", {
})

test_that("repartition by columns on DataFrame", {
skip_on_cran()

df <- createDataFrame(
list(list(1L, 1, "1", 0.1), list(1L, 2, "2", 0.2), list(3L, 3, "3", 0.3)),
c("a", "b", "c", "d"))
Expand Down Expand Up @@ -3037,6 +3046,8 @@ test_that("repartition by columns on DataFrame", {
})

test_that("coalesce, repartition, numPartitions", {
skip_on_cran()

df <- as.DataFrame(cars, numPartitions = 5)
expect_equal(getNumPartitions(df), 5)
expect_equal(getNumPartitions(coalesce(df, 3)), 3)
Expand All @@ -3056,6 +3067,8 @@ test_that("coalesce, repartition, numPartitions", {
})

test_that("gapply() and gapplyCollect() on a DataFrame", {
skip_on_cran()

df <- createDataFrame (
list(list(1L, 1, "1", 0.1), list(1L, 2, "1", 0.2), list(3L, 3, "3", 0.3)),
c("a", "b", "c", "d"))
Expand Down Expand Up @@ -3208,6 +3221,8 @@ test_that("createDataFrame sqlContext parameter backward compatibility", {
})

test_that("randomSplit", {
skip_on_cran()

num <- 4000
df <- createDataFrame(data.frame(id = 1:num))
weights <- c(2, 3, 5)
Expand Down
3 changes: 3 additions & 0 deletions R/pkg/inst/tests/testthat/test_utils.R
Original file line number Diff line number Diff line change
Expand Up @@ -243,3 +243,6 @@ test_that("basenameSansExtFromUrl", {
})

sparkR.session.stop()

message("--- End test (utils) ", as.POSIXct(Sys.time(), tz = "GMT"))
message("elapsed ", (proc.time() - timer_ptm)[3])
6 changes: 6 additions & 0 deletions R/pkg/tests/run-all.R
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,12 @@ library(SparkR)
# Turn all warnings into errors
options("warn" = 2)

if (.Platform$OS.type == "windows") {
Sys.setenv(TZ = "GMT")
}
message("--- Start test ", as.POSIXct(Sys.time(), tz = "GMT"))
timer_ptm <- proc.time()

# Setup global test environment
# Install Spark first to set SPARK_HOME
install.spark()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
Started
</span>
</th>
<th>
<th class="completedColumn">
<span data-toggle="tooltip" data-placement="above" title="The completed time of this application.">
Completed
</span>
Expand Down Expand Up @@ -73,7 +73,7 @@
{{#attempts}}
<td class="attemptIDSpan"><a href="{{uiroot}}/history/{{id}}/{{attemptId}}/jobs/">{{attemptId}}</a></td>
<td>{{startTime}}</td>
<td>{{endTime}}</td>
<td class="completedColumn">{{endTime}}</td>
<td><span title="{{duration}}" class="durationClass">{{duration}}</span></td>
<td>{{sparkUser}}</td>
<td>{{lastUpdated}}</td>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,13 @@ $(document).ready(function() {
}
}

if (requestedIncomplete) {
var completedCells = document.getElementsByClassName("completedColumn");
for (i = 0; i < completedCells.length; i++) {
completedCells[i].style.display='none';
}
}

var durationCells = document.getElementsByClassName("durationClass");
for (i = 0; i < durationCells.length; i++) {
var timeInMilliseconds = parseInt(durationCells[i].title);
Expand Down
4 changes: 2 additions & 2 deletions core/src/main/scala/org/apache/spark/util/AccumulatorV2.scala
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ abstract class AccumulatorV2[IN, OUT] extends Serializable {

private def assertMetadataNotNull(): Unit = {
if (metadata == null) {
throw new IllegalAccessError("The metadata of this accumulator has not been assigned yet.")
throw new IllegalStateException("The metadata of this accumulator has not been assigned yet.")
}
}

Expand Down Expand Up @@ -265,7 +265,7 @@ private[spark] object AccumulatorContext {
// Since we are storing weak references, we must check whether the underlying data is valid.
val acc = ref.get
if (acc eq null) {
throw new IllegalAccessError(s"Attempted to access garbage collected accumulator $id")
throw new IllegalStateException(s"Attempted to access garbage collected accumulator $id")
}
acc
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ class AccumulatorSuite extends SparkFunSuite with Matchers with LocalSparkContex
assert(ref.get.isEmpty)

// Getting a garbage collected accum should throw error
intercept[IllegalAccessError] {
intercept[IllegalStateException] {
AccumulatorContext.get(accId)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -553,10 +553,10 @@ class BasicSchedulerIntegrationSuite extends SchedulerIntegrationSuite[SingleCor
*/
testScheduler("multi-stage job") {

def stageToOutputParts(stageId: Int): Int = {
stageId match {
def shuffleIdToOutputParts(shuffleId: Int): Int = {
shuffleId match {
case 0 => 10
case 2 => 20
case 1 => 20
case _ => 30
}
}
Expand All @@ -577,11 +577,12 @@ class BasicSchedulerIntegrationSuite extends SchedulerIntegrationSuite[SingleCor
// b/c the stage numbering is non-deterministic, so stage number alone doesn't tell
// us what to check
}

(task.stageId, task.stageAttemptId, task.partitionId) match {
case (stage, 0, _) if stage < 4 =>
val shuffleId =
scheduler.stageIdToStage(stage).asInstanceOf[ShuffleMapStage].shuffleDep.shuffleId
backend.taskSuccess(taskDescription,
DAGSchedulerSuite.makeMapStatus("hostA", stageToOutputParts(stage)))
DAGSchedulerSuite.makeMapStatus("hostA", shuffleIdToOutputParts(shuffleId)))
case (4, 0, partition) =>
backend.taskSuccess(taskDescription, 4321 + partition)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,12 +85,10 @@ private[kafka010] object KafkaWriter extends Logging {
topic: Option[String] = None): Unit = {
val schema = queryExecution.analyzed.output
validateQuery(queryExecution, kafkaParameters, topic)
SQLExecution.withNewExecutionId(sparkSession, queryExecution) {
queryExecution.toRdd.foreachPartition { iter =>
val writeTask = new KafkaWriteTask(kafkaParameters, schema, topic)
Utils.tryWithSafeFinally(block = writeTask.execute(iter))(
finallyBlock = writeTask.close())
}
queryExecution.toRdd.foreachPartition { iter =>
val writeTask = new KafkaWriteTask(kafkaParameters, schema, topic)
Utils.tryWithSafeFinally(block = writeTask.execute(iter))(
finallyBlock = writeTask.close())
}
}
}
Loading