-
Notifications
You must be signed in to change notification settings - Fork 29k
[SPARK-34265][WIP][PYTHON][SQL] Instrument Python UDF execution using SQL Metrics #31367
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,66 @@ | ||
| # | ||
| # Licensed to the Apache Software Foundation (ASF) under one or more | ||
| # contributor license agreements. See the NOTICE file distributed with | ||
| # this work for additional information regarding copyright ownership. | ||
| # The ASF licenses this file to You under the Apache License, Version 2.0 | ||
| # (the "License"); you may not use this file except in compliance with | ||
| # the License. You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
| # | ||
|
|
||
| import unittest | ||
|
|
||
| from pyspark.sql.functions import pandas_udf | ||
| from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \ | ||
| pandas_requirement_message, pyarrow_requirement_message | ||
|
|
||
|
|
||
| @unittest.skipIf( | ||
| not have_pandas or not have_pyarrow, | ||
| pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type] | ||
| class PandasSQLMetrics(ReusedSQLTestCase): | ||
|
||
|
|
||
| def test_pandas_sql_metrics_basic(self): | ||
|
|
||
| PythonSQLMetrics = [ | ||
| "time spent executing", | ||
| "time spent sending data", | ||
| "time spent sending code", | ||
| "bytes of code sent", | ||
| "bytes of data returned", | ||
| "bytes of data sent", | ||
| "number of batches returned", | ||
| "number of batches processed", | ||
| "number of rows returned", | ||
| "number of rows processed" | ||
| ] | ||
|
|
||
| @pandas_udf("long") | ||
| def test_pandas(col1): | ||
| return col1 * col1 | ||
|
|
||
| res = self.spark.range(10).select(test_pandas("id")).collect() | ||
|
|
||
| statusStore = self.spark._jsparkSession.sharedState().statusStore() | ||
| executionMetrics = statusStore.execution(0).get().metrics().mkString() | ||
|
|
||
| for metric in PythonSQLMetrics: | ||
| self.assertIn(metric, executionMetrics) | ||
|
|
||
LucaCanali marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| if __name__ == "__main__": | ||
| from pyspark.sql.tests.test_pandas_sqlmetrics import * # noqa: F401 | ||
|
|
||
| try: | ||
| import xmlrunner # type: ignore[import] | ||
| testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2) | ||
| except ImportError: | ||
| testRunner = None | ||
| unittest.main(testRunner=testRunner, verbosity=2) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -32,7 +32,7 @@ import org.apache.spark.sql.types.{StructField, StructType} | |
| * A physical plan that evaluates a [[PythonUDF]] | ||
| */ | ||
| case class BatchEvalPythonExec(udfs: Seq[PythonUDF], resultAttrs: Seq[Attribute], child: SparkPlan) | ||
| extends EvalPythonExec { | ||
| extends EvalPythonExec with PythonSQLMetrics { | ||
|
|
||
| protected override def evaluate( | ||
| funcs: Seq[ChainedPythonFunctions], | ||
|
|
@@ -61,6 +61,7 @@ case class BatchEvalPythonExec(udfs: Seq[PythonUDF], resultAttrs: Seq[Attribute] | |
| // Input iterator to Python: input rows are grouped so we send them in batches to Python. | ||
| // For each row, add it to the queue. | ||
| val inputIterator = iter.map { row => | ||
| pythonNumRowsSent += 1 | ||
| if (needConversion) { | ||
| EvaluatePython.toJava(row, schema) | ||
| } else { | ||
|
|
@@ -74,10 +75,22 @@ case class BatchEvalPythonExec(udfs: Seq[PythonUDF], resultAttrs: Seq[Attribute] | |
| } | ||
| fields | ||
| } | ||
| }.grouped(100).map(x => pickle.dumps(x.toArray)) | ||
|
|
||
| }.grouped(100).map(x => { | ||
| pythonNumBatchesSent += 1 | ||
| pickle.dumps(x.toArray) | ||
| }) | ||
| // Output iterator for results from Python. | ||
| val outputIterator = new PythonUDFRunner(funcs, PythonEvalType.SQL_BATCHED_UDF, argOffsets) | ||
| val outputIterator = new PythonUDFRunner( | ||
| funcs, | ||
| PythonEvalType.SQL_BATCHED_UDF, | ||
| argOffsets, | ||
| pythonExecTime, | ||
| pythonDataSerializeTime, | ||
| pythonCodeSerializeTime, | ||
| pythonCodeSent, | ||
| pythonDataReceived, | ||
| pythonDataSent, | ||
| pythonNumBatchesReceived) | ||
| .compute(inputIterator, context.partitionId(), context) | ||
|
|
||
| val unpickle = new Unpickler | ||
|
|
@@ -94,12 +107,19 @@ case class BatchEvalPythonExec(udfs: Seq[PythonUDF], resultAttrs: Seq[Attribute] | |
| val unpickledBatch = unpickle.loads(pickedResult) | ||
| unpickledBatch.asInstanceOf[java.util.ArrayList[Any]].asScala | ||
| }.map { result => | ||
| pythonNumRowsReceived += 1 | ||
| val startTime = System.nanoTime() | ||
| if (udfs.length == 1) { | ||
| // fast path for single UDF | ||
| mutableRow(0) = fromJava(result) | ||
| val deltaTime = System.nanoTime() - startTime | ||
| pythonExecTime += deltaTime | ||
| mutableRow | ||
| } else { | ||
| fromJava(result).asInstanceOf[InternalRow] | ||
| val res = fromJava(result).asInstanceOf[InternalRow] | ||
| val deltaTime = System.nanoTime() - startTime | ||
| pythonExecTime += deltaTime | ||
|
||
| res | ||
| } | ||
| } | ||
| } | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.