Skip to content
Merged

sync #19

Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
20cd47e
[SPARK-32030][SQL] Support unlimited MATCHED and NOT MATCHED clauses …
Jun 29, 2020
5472170
[SPARK-29999][SS][FOLLOWUP] Fix test to check the actual metadata log…
HeartSaVioR Jun 30, 2020
165c948
[SPARK-32068][WEBUI] Correct task lauchtime show issue due to timezon…
TJX2014 Jun 30, 2020
5176707
[MINOR][DOCS] Fix a typo for a configuration property of resources al…
sarutak Jun 30, 2020
67cb7ea
[SPARK-31336][SQL] Support Oracle Kerberos login in JDBC connector
gaborgsomogyi Jun 30, 2020
bbd0275
[MINOR][SQL] Fix spaces in JDBC connection providers
gaborgsomogyi Jun 30, 2020
dd03c31
[SPARK-32088][PYTHON][FOLLOWUP] Replace `collect()` by `show()` in th…
MaxGekk Jul 1, 2020
8194d9e
[SPARK-32142][SQL][TESTS] Keep the original tests and codes to avoid …
HyukjinKwon Jul 1, 2020
02f3b80
[SPARK-32131][SQL] Fix AnalysisException messages at UNION/EXCEPT/MIN…
GuoPhilipse Jul 1, 2020
a4ba344
[SPARK-32095][SQL] Update documentation to reflect usage of updated s…
Jul 1, 2020
15fb5d7
[SPARK-28169][SQL] Convert scan predicate condition to CNF
AngersZhuuuu Jul 1, 2020
f7d9e3d
[SPARK-23631][ML][PYSPARK] Add summary to RandomForestClassificationM…
huaxingao Jul 1, 2020
7dbd90b
[SPARK-31797][SQL][FOLLOWUP] TIMESTAMP_SECONDS supports fractional input
cloud-fan Jul 1, 2020
f843a5b
[SPARK-32026][CORE][TEST] Add PrometheusServletSuite
erenavsarogullari Jul 1, 2020
6edb20d
[SPARK-31935][SQL][FOLLOWUP] Hadoop file system config should be effe…
cloud-fan Jul 1, 2020
bcf2330
[SPARK-32130][SQL] Disable the JSON option `inferTimestamp` by default
MaxGekk Jul 1, 2020
2a52a1b
[SPARK-32056][SQL][FOLLOW-UP] Coalesce partitions for repartiotion hi…
viirya Jul 1, 2020
ced8e0e
[SPARK-29465][YARN][WEBUI] Adding Check to not to set UI port (spark.…
Jul 2, 2020
3f7780d
[SPARK-32136][SQL] NormalizeFloatingNumbers should work on null struct
viirya Jul 2, 2020
7fda184
[SPARK-32121][SHUFFLE] Support Windows OS in ExecutorDiskUtils
pan3793 Jul 2, 2020
45fe6b6
[MINOR][DOCS] Pyspark getActiveSession docstring
animenon Jul 2, 2020
f834156
[MINOR][TEST][SQL] Make in-limit.sql more robust
cloud-fan Jul 2, 2020
f082a79
[SPARK-31100][SQL] Check namespace existens when setting namespace
Jul 2, 2020
0acad58
[SPARK-32156][SPARK-31061][TESTS][SQL] Refactor two similar test case…
TJX2014 Jul 2, 2020
42f01e3
[SPARK-32130][SQL][FOLLOWUP] Enable timestamps inference in JsonBench…
MaxGekk Jul 2, 2020
492d5d1
[SPARK-32171][SQL][DOCS] Change file locations for use db and refresh…
huaxingao Jul 5, 2020
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,19 @@

import com.google.common.annotations.VisibleForTesting;

import org.apache.commons.lang3.SystemUtils;
import org.apache.spark.network.util.JavaUtils;

public class ExecutorDiskUtils {

private static final Pattern MULTIPLE_SEPARATORS = Pattern.compile(File.separator + "{2,}");
private static final Pattern MULTIPLE_SEPARATORS;
static {
if (SystemUtils.IS_OS_WINDOWS) {
MULTIPLE_SEPARATORS = Pattern.compile("[/\\\\]+");
} else {
MULTIPLE_SEPARATORS = Pattern.compile("/{2,}");
}
}

/**
* Hashes a filename into the corresponding local directory, in a manner consistent with
Expand All @@ -50,14 +58,18 @@ public static File getFile(String[] localDirs, int subDirsPerLocalDir, String fi
* the internal code in java.io.File would normalize it later, creating a new "foo/bar"
* String copy. Unfortunately, we cannot just reuse the normalization code that java.io.File
* uses, since it is in the package-private class java.io.FileSystem.
*
* On Windows, separator "\" is used instead of "/".
*
* "\\" is a legal character in path name on Unix-like OS, but illegal on Windows.
*/
@VisibleForTesting
static String createNormalizedInternedPathname(String dir1, String dir2, String fname) {
String pathname = dir1 + File.separator + dir2 + File.separator + fname;
Matcher m = MULTIPLE_SEPARATORS.matcher(pathname);
pathname = m.replaceAll("/");
pathname = m.replaceAll(Matcher.quoteReplacement(File.separator));
// A single trailing slash needs to be taken care of separately
if (pathname.length() > 1 && pathname.endsWith("/")) {
if (pathname.length() > 1 && pathname.charAt(pathname.length() - 1) == File.separatorChar) {
pathname = pathname.substring(0, pathname.length() - 1);
}
return pathname.intern();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@

import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.io.CharStreams;
import org.apache.commons.lang3.SystemUtils;
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo;
import org.apache.spark.network.util.MapConfigProvider;
import org.apache.spark.network.util.TransportConf;
Expand Down Expand Up @@ -146,12 +147,19 @@ public void jsonSerializationOfExecutorRegistration() throws IOException {

@Test
public void testNormalizeAndInternPathname() {
assertPathsMatch("/foo", "bar", "baz", "/foo/bar/baz");
assertPathsMatch("//foo/", "bar/", "//baz", "/foo/bar/baz");
assertPathsMatch("foo", "bar", "baz///", "foo/bar/baz");
assertPathsMatch("/foo/", "/bar//", "/baz", "/foo/bar/baz");
assertPathsMatch("/", "", "", "/");
assertPathsMatch("/", "/", "/", "/");
String sep = File.separator;
String expectedPathname = sep + "foo" + sep + "bar" + sep + "baz";
assertPathsMatch("/foo", "bar", "baz", expectedPathname);
assertPathsMatch("//foo/", "bar/", "//baz", expectedPathname);
assertPathsMatch("/foo/", "/bar//", "/baz", expectedPathname);
assertPathsMatch("foo", "bar", "baz///", "foo" + sep + "bar" + sep + "baz");
assertPathsMatch("/", "", "", sep);
assertPathsMatch("/", "/", "/", sep);
if (SystemUtils.IS_OS_WINDOWS) {
assertPathsMatch("/foo\\/", "bar", "baz", expectedPathname);
} else {
assertPathsMatch("/foo\\/", "bar", "baz", sep + "foo\\" + sep + "bar" + sep + "baz");
}
}

private void assertPathsMatch(String p1, String p2, String p3, String expectedPathname) {
Expand All @@ -160,6 +168,6 @@ private void assertPathsMatch(String p1, String p2, String p3, String expectedPa
assertEquals(expectedPathname, normPathname);
File file = new File(normPathname);
String returnedPath = file.getPath();
assertTrue(normPathname == returnedPath);
assertEquals(normPathname, returnedPath);
}
}
11 changes: 9 additions & 2 deletions core/src/main/resources/org/apache/spark/ui/static/utils.js
Original file line number Diff line number Diff line change
Expand Up @@ -56,13 +56,17 @@ function formatTimeMillis(timeMillis) {
return "-";
} else {
var dt = new Date(timeMillis);
return formatDateString(dt);
}
}

function formatDateString(dt) {
return dt.getFullYear() + "-" +
padZeroes(dt.getMonth() + 1) + "-" +
padZeroes(dt.getDate()) + " " +
padZeroes(dt.getHours()) + ":" +
padZeroes(dt.getMinutes()) + ":" +
padZeroes(dt.getSeconds());
}
}

function getTimeZone() {
Expand Down Expand Up @@ -161,7 +165,10 @@ function setDataTableDefaults() {

function formatDate(date) {
if (date <= 0) return "-";
else return date.split(".")[0].replace("T", " ");
else {
var dt = new Date(date.replace("GMT", "Z"))
return formatDateString(dt);
}
}

function createRESTEndPointForExecutorsPage(appId) {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.metrics.sink

import java.util.Properties

import scala.collection.JavaConverters._

import com.codahale.metrics.{Counter, Gauge, MetricRegistry}
import org.scalatest.PrivateMethodTester

import org.apache.spark.SparkFunSuite

class PrometheusServletSuite extends SparkFunSuite with PrivateMethodTester {

test("register metrics") {
val sink = createPrometheusServlet()

val gauge = new Gauge[Double] {
override def getValue: Double = 5.0
}

val counter = new Counter
counter.inc(10)

sink.registry.register("gauge1", gauge)
sink.registry.register("gauge2", gauge)
sink.registry.register("counter1", counter)

val metricGaugeKeys = sink.registry.getGauges.keySet.asScala
assert(metricGaugeKeys.equals(Set("gauge1", "gauge2")),
"Should contain 2 gauges metrics registered")

val metricCounterKeys = sink.registry.getCounters.keySet.asScala
assert(metricCounterKeys.equals(Set("counter1")),
"Should contain 1 counter metric registered")

val gaugeValues = sink.registry.getGauges.values.asScala
assert(gaugeValues.size == 2)
gaugeValues.foreach(gauge => assert(gauge.getValue == 5.0))

val counterValues = sink.registry.getCounters.values.asScala
assert(counterValues.size == 1)
counterValues.foreach(counter => assert(counter.getCount == 10))
}

test("normalize key") {
val key = "local-1592132938718.driver.LiveListenerBus." +
"listenerProcessingTime.org.apache.spark.HeartbeatReceiver"
val sink = createPrometheusServlet()
val suffix = sink invokePrivate PrivateMethod[String]('normalizeKey)(key)
assert(suffix == "metrics_local_1592132938718_driver_LiveListenerBus_" +
"listenerProcessingTime_org_apache_spark_HeartbeatReceiver_")
}

private def createPrometheusServlet(): PrometheusServlet =
new PrometheusServlet(new Properties, new MetricRegistry, securityMgr = null)
}
4 changes: 2 additions & 2 deletions docs/_data/menu-sql.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@
- text: REPAIR TABLE
url: sql-ref-syntax-ddl-repair-table.html
- text: USE DATABASE
url: sql-ref-syntax-qry-select-usedb.html
url: sql-ref-syntax-ddl-usedb.html
- text: Data Manipulation Statements
url: sql-ref-syntax-dml.html
subitems:
Expand Down Expand Up @@ -207,7 +207,7 @@
- text: CLEAR CACHE
url: sql-ref-syntax-aux-cache-clear-cache.html
- text: REFRESH TABLE
url: sql-ref-syntax-aux-refresh-table.html
url: sql-ref-syntax-aux-cache-refresh-table.html
- text: REFRESH
url: sql-ref-syntax-aux-cache-refresh.html
- text: DESCRIBE
Expand Down
2 changes: 1 addition & 1 deletion docs/spark-standalone.md
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,7 @@ Spark Standalone has 2 parts, the first is configuring the resources for the Wor

The user must configure the Workers to have a set of resources available so that it can assign them out to Executors. The <code>spark.worker.resource.{resourceName}.amount</code> is used to control the amount of each resource the worker has allocated. The user must also specify either <code>spark.worker.resourcesFile</code> or <code>spark.worker.resource.{resourceName}.discoveryScript</code> to specify how the Worker discovers the resources its assigned. See the descriptions above for each of those to see which method works best for your setup.

The second part is running an application on Spark Standalone. The only special case from the standard Spark resource configs is when you are running the Driver in client mode. For a Driver in client mode, the user can specify the resources it uses via <code>spark.driver.resourcesfile</code> or <code>spark.driver.resource.{resourceName}.discoveryScript</code>. If the Driver is running on the same host as other Drivers, please make sure the resources file or discovery script only returns resources that do not conflict with other Drivers running on the same node.
The second part is running an application on Spark Standalone. The only special case from the standard Spark resource configs is when you are running the Driver in client mode. For a Driver in client mode, the user can specify the resources it uses via <code>spark.driver.resourcesFile</code> or <code>spark.driver.resource.{resourceName}.discoveryScript</code>. If the Driver is running on the same host as other Drivers, please make sure the resources file or discovery script only returns resources that do not conflict with other Drivers running on the same node.

Note, the user does not need to specify a discovery script when submitting an application as the Worker will start each Executor with the resources it allocates to it.

Expand Down
6 changes: 5 additions & 1 deletion docs/sql-migration-guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,11 @@ license: |
- In Spark 3.1, `from_unixtime`, `unix_timestamp`,`to_unix_timestamp`, `to_timestamp` and `to_date` will fail if the specified datetime pattern is invalid. In Spark 3.0 or earlier, they result `NULL`.

- In Spark 3.1, casting numeric to timestamp will be forbidden by default. It's strongly recommended to use dedicated functions: TIMESTAMP_SECONDS, TIMESTAMP_MILLIS and TIMESTAMP_MICROS. Or you can set `spark.sql.legacy.allowCastNumericToTimestamp` to true to work around it. See more details in SPARK-31710.


## Upgrading from Spark SQL 3.0 to 3.0.1

- In Spark 3.0, JSON datasource and JSON function `schema_of_json` infer TimestampType from string values if they match to the pattern defined by the JSON option `timestampFormat`. Since version 3.0.1, the timestamp type inference is disabled by default. Set the JSON option `inferTimestamp` to `true` to enable such type inference.

## Upgrading from Spark SQL 2.4 to 3.0

### Dataset/DataFrame APIs
Expand Down
2 changes: 1 addition & 1 deletion docs/sql-ref-syntax-aux-cache-cache-table.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,5 +78,5 @@ CACHE TABLE testCache OPTIONS ('storageLevel' 'DISK_ONLY') SELECT * FROM testDat

* [CLEAR CACHE](sql-ref-syntax-aux-cache-clear-cache.html)
* [UNCACHE TABLE](sql-ref-syntax-aux-cache-uncache-table.html)
* [REFRESH TABLE](sql-ref-syntax-aux-refresh-table.html)
* [REFRESH TABLE](sql-ref-syntax-aux-cache-refresh-table.html)
* [REFRESH](sql-ref-syntax-aux-cache-refresh.html)
2 changes: 1 addition & 1 deletion docs/sql-ref-syntax-aux-cache-clear-cache.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,5 +39,5 @@ CLEAR CACHE;

* [CACHE TABLE](sql-ref-syntax-aux-cache-cache-table.html)
* [UNCACHE TABLE](sql-ref-syntax-aux-cache-uncache-table.html)
* [REFRESH TABLE](sql-ref-syntax-aux-refresh-table.html)
* [REFRESH TABLE](sql-ref-syntax-aux-cache-refresh-table.html)
* [REFRESH](sql-ref-syntax-aux-cache-refresh.html)
2 changes: 1 addition & 1 deletion docs/sql-ref-syntax-aux-cache-refresh.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,4 +53,4 @@ REFRESH "hdfs://path/to/table";
* [CACHE TABLE](sql-ref-syntax-aux-cache-cache-table.html)
* [CLEAR CACHE](sql-ref-syntax-aux-cache-clear-cache.html)
* [UNCACHE TABLE](sql-ref-syntax-aux-cache-uncache-table.html)
* [REFRESH TABLE](sql-ref-syntax-aux-refresh-table.html)
* [REFRESH TABLE](sql-ref-syntax-aux-cache-refresh-table.html)
2 changes: 1 addition & 1 deletion docs/sql-ref-syntax-aux-cache-uncache-table.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,5 +48,5 @@ UNCACHE TABLE t1;

* [CACHE TABLE](sql-ref-syntax-aux-cache-cache-table.html)
* [CLEAR CACHE](sql-ref-syntax-aux-cache-clear-cache.html)
* [REFRESH TABLE](sql-ref-syntax-aux-refresh-table.html)
* [REFRESH TABLE](sql-ref-syntax-aux-cache-refresh-table.html)
* [REFRESH](sql-ref-syntax-aux-cache-refresh.html)
2 changes: 1 addition & 1 deletion docs/sql-ref-syntax-aux-cache.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,5 +22,5 @@ license: |
* [CACHE TABLE statement](sql-ref-syntax-aux-cache-cache-table.html)
* [UNCACHE TABLE statement](sql-ref-syntax-aux-cache-uncache-table.html)
* [CLEAR CACHE statement](sql-ref-syntax-aux-cache-clear-cache.html)
* [REFRESH TABLE statement](sql-ref-syntax-aux-refresh-table.html)
* [REFRESH TABLE statement](sql-ref-syntax-aux-cache-refresh-table.html)
* [REFRESH statement](sql-ref-syntax-aux-cache-refresh.html)
2 changes: 1 addition & 1 deletion docs/sql-ref-syntax-ddl.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,4 @@ Data Definition Statements are used to create or modify the structure of databas
* [DROP VIEW](sql-ref-syntax-ddl-drop-view.html)
* [TRUNCATE TABLE](sql-ref-syntax-ddl-truncate-table.html)
* [REPAIR TABLE](sql-ref-syntax-ddl-repair-table.html)
* [USE DATABASE](sql-ref-syntax-qry-select-usedb.html)
* [USE DATABASE](sql-ref-syntax-ddl-usedb.html)
4 changes: 2 additions & 2 deletions docs/sql-ref-syntax.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ Spark SQL is Apache Spark's module for working with structured data. The SQL Syn
* [DROP VIEW](sql-ref-syntax-ddl-drop-view.html)
* [REPAIR TABLE](sql-ref-syntax-ddl-repair-table.html)
* [TRUNCATE TABLE](sql-ref-syntax-ddl-truncate-table.html)
* [USE DATABASE](sql-ref-syntax-qry-select-usedb.html)
* [USE DATABASE](sql-ref-syntax-ddl-usedb.html)

### DML Statements

Expand Down Expand Up @@ -82,7 +82,7 @@ Spark SQL is Apache Spark's module for working with structured data. The SQL Syn
* [LIST FILE](sql-ref-syntax-aux-resource-mgmt-list-file.html)
* [LIST JAR](sql-ref-syntax-aux-resource-mgmt-list-jar.html)
* [REFRESH](sql-ref-syntax-aux-cache-refresh.html)
* [REFRESH TABLE](sql-ref-syntax-aux-refresh-table.html)
* [REFRESH TABLE](sql-ref-syntax-aux-cache-refresh-table.html)
* [RESET](sql-ref-syntax-aux-conf-mgmt-reset.html)
* [SET](sql-ref-syntax-aux-conf-mgmt-set.html)
* [SHOW COLUMNS](sql-ref-syntax-aux-show-columns.html)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,13 @@ class AvroDataSourceV2 extends FileDataSourceV2 {

override def getTable(options: CaseInsensitiveStringMap): Table = {
val paths = getPaths(options)
val tableName = getTableName(paths)
val tableName = getTableName(options, paths)
AvroTable(tableName, sparkSession, options, paths, None, fallbackFileFormat)
}

override def getTable(options: CaseInsensitiveStringMap, schema: StructType): Table = {
val paths = getPaths(options)
val tableName = getTableName(paths)
val tableName = getTableName(options, paths)
AvroTable(tableName, sparkSession, options, paths, Some(schema), fallbackFileFormat)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ private[classification] trait ClassificationSummary extends Serializable {
@Since("3.1.0")
def labelCol: String

/** Field in "predictions" which gives the weight of each instance as a vector. */
/** Field in "predictions" which gives the weight of each instance. */
@Since("3.1.0")
def weightCol: String

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import org.apache.spark.annotation.Since
import org.apache.spark.ml.{PredictionModel, Predictor, PredictorParams}
import org.apache.spark.ml.feature.{Instance, LabeledPoint}
import org.apache.spark.ml.linalg.{Vector, VectorUDT}
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.param.shared.HasRawPredictionCol
import org.apache.spark.ml.util.{MetadataUtils, SchemaUtils}
import org.apache.spark.rdd.RDD
Expand Down Expand Up @@ -269,4 +270,26 @@ abstract class ClassificationModel[FeaturesType, M <: ClassificationModel[Featur
* @return predicted label
*/
protected def raw2prediction(rawPrediction: Vector): Double = rawPrediction.argmax

/**
* If the rawPrediction and prediction columns are set, this method returns the current model,
* otherwise it generates new columns for them and sets them as columns on a new copy of
* the current model
*/
private[classification] def findSummaryModel():
(ClassificationModel[FeaturesType, M], String, String) = {
val model = if ($(rawPredictionCol).isEmpty && $(predictionCol).isEmpty) {
copy(ParamMap.empty)
.setRawPredictionCol("rawPrediction_" + java.util.UUID.randomUUID.toString)
.setPredictionCol("prediction_" + java.util.UUID.randomUUID.toString)
} else if ($(rawPredictionCol).isEmpty) {
copy(ParamMap.empty).setRawPredictionCol("rawPrediction_" +
java.util.UUID.randomUUID.toString)
} else if ($(predictionCol).isEmpty) {
copy(ParamMap.empty).setPredictionCol("prediction_" + java.util.UUID.randomUUID.toString)
} else {
this
}
(model, model.getRawPredictionCol, model.getPredictionCol)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -394,27 +394,6 @@ class LinearSVCModel private[classification] (
@Since("3.1.0")
override def summary: LinearSVCTrainingSummary = super.summary

/**
* If the rawPrediction and prediction columns are set, this method returns the current model,
* otherwise it generates new columns for them and sets them as columns on a new copy of
* the current model
*/
private[classification] def findSummaryModel(): (LinearSVCModel, String, String) = {
val model = if ($(rawPredictionCol).isEmpty && $(predictionCol).isEmpty) {
copy(ParamMap.empty)
.setRawPredictionCol("rawPrediction_" + java.util.UUID.randomUUID.toString)
.setPredictionCol("prediction_" + java.util.UUID.randomUUID.toString)
} else if ($(rawPredictionCol).isEmpty) {
copy(ParamMap.empty).setRawPredictionCol("rawPrediction_" +
java.util.UUID.randomUUID.toString)
} else if ($(predictionCol).isEmpty) {
copy(ParamMap.empty).setPredictionCol("prediction_" + java.util.UUID.randomUUID.toString)
} else {
this
}
(model, model.getRawPredictionCol, model.getPredictionCol)
}

/**
* Evaluates the model on a test dataset.
*
Expand Down
Loading