-
Notifications
You must be signed in to change notification settings - Fork 29k
[SPARK-22883] ML test for StructuredStreaming: spark.ml.feature, I-M #20964
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -17,14 +17,13 @@ | |
|
|
||
| package org.apache.spark.ml.feature | ||
|
|
||
| import org.apache.spark.SparkFunSuite | ||
| import org.apache.spark.ml.linalg.{Vector, Vectors} | ||
| import org.apache.spark.ml.param.ParamsSuite | ||
| import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTestingUtils} | ||
| import org.apache.spark.mllib.util.MLlibTestSparkContext | ||
| import org.apache.spark.sql.Dataset | ||
| import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTest, MLTestingUtils} | ||
| import org.apache.spark.sql.{Dataset, Row} | ||
|
|
||
| class MinHashLSHSuite extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest { | ||
|
|
||
| class MinHashLSHSuite extends MLTest with DefaultReadWriteTest { | ||
|
|
||
| @transient var dataset: Dataset[_] = _ | ||
|
|
||
|
|
@@ -167,4 +166,20 @@ class MinHashLSHSuite extends SparkFunSuite with MLlibTestSparkContext with Defa | |
| assert(precision == 1.0) | ||
| assert(recall >= 0.7) | ||
| } | ||
|
|
||
| test("MinHashLSHModel.transform should work with Structured Streaming") { | ||
| val localSpark = spark | ||
| import localSpark.implicits._ | ||
|
|
||
| val model = new MinHashLSHModel("mh", randCoefficients = Array((1, 0))) | ||
| model.set(model.inputCol, "keys") | ||
| testTransformer[Tuple1[Vector]](dataset.toDF(), model, "keys", model.getOutputCol) { | ||
| case Row(_: Vector, output: Seq[_]) => | ||
| assert(output.length === model.randCoefficients.length) | ||
| // no AND-amplification yet: SPARK-18450, so each hash output is of length 1 | ||
| output.foreach { | ||
| case hashOutput: Vector => assert(hashOutput.size === 1) | ||
| } | ||
| } | ||
|
||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -84,7 +84,7 @@ class NGramSuite extends MLTest with DefaultReadWriteTest { | |
|
|
||
| def testNGram(t: NGram, dataFrame: DataFrame): Unit = { | ||
| testTransformer[(Seq[String], Seq[String])](dataFrame, t, "nGrams", "wantedNGrams") { | ||
| case Row(actualNGrams : Seq[String], wantedNGrams: Seq[String]) => | ||
| case Row(actualNGrams : Seq[_], wantedNGrams: Seq[_]) => | ||
|
||
| assert(actualNGrams === wantedNGrams) | ||
| } | ||
| } | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why the "value" column use
java.lang.Doubletype ?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
since it's nullable