Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.connector.read.{InputPartition, PartitionReader, PartitionReaderFactory}
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.sql.execution.metric.SQLMetric
import org.apache.spark.sql.execution.metric.{CustomMetrics, SQLMetric}
import org.apache.spark.sql.vectorized.ColumnarBatch

class DataSourceRDDPartition(val index: Int, val inputPartition: InputPartition)
Expand Down Expand Up @@ -66,7 +66,17 @@ class DataSourceRDD(
new PartitionIterator[InternalRow](rowReader, customMetrics))
(iter, rowReader)
}
context.addTaskCompletionListener[Unit](_ => reader.close())
context.addTaskCompletionListener[Unit] { _ =>
// In case of early stopping before consuming the entire iterator,
// we need to do one more metric update at the end of the task.
reader.currentMetricsValues.foreach { metric =>
assert(customMetrics.contains(metric.name()),
s"Custom metrics ${customMetrics.keys.mkString(", ")} do not contain the metric " +
s"${metric.name()}")
customMetrics(metric.name()).set(metric.value())
}
reader.close()
}
// TODO: SPARK-25083 remove the type erasure hack in data source scan
new InterruptibleIterator(context, iter.asInstanceOf[Iterator[InternalRow]])
}
Expand All @@ -81,6 +91,8 @@ private class PartitionIterator[T](
customMetrics: Map[String, SQLMetric]) extends Iterator[T] {
private[this] var valuePrepared = false

private var numRow = 0L

override def hasNext: Boolean = {
if (!valuePrepared) {
valuePrepared = reader.next()
Expand All @@ -92,12 +104,15 @@ private class PartitionIterator[T](
if (!hasNext) {
throw QueryExecutionErrors.endOfStreamError()
}
reader.currentMetricsValues.foreach { metric =>
assert(customMetrics.contains(metric.name()),
s"Custom metrics ${customMetrics.keys.mkString(", ")} do not contain the metric " +
s"${metric.name()}")
customMetrics(metric.name()).set(metric.value())
if (numRow % CustomMetrics.numRowsPerUpdate == 0) {
reader.currentMetricsValues.foreach { metric =>
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can we move it into a method to reuse code?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

sure.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added a reused method.

assert(customMetrics.contains(metric.name()),
s"Custom metrics ${customMetrics.keys.mkString(", ")} do not contain the metric " +
s"${metric.name()}")
customMetrics(metric.name()).set(metric.value())
}
}
numRow += 1
valuePrepared = false
reader.get()
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ import org.apache.spark.sql.connector.CustomMetric
object CustomMetrics {
private[spark] val V2_CUSTOM = "v2Custom"

private[spark] val numRowsPerUpdate = 100L
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

does it need to be a long?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

numRow is a long, I guess this can be just int.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Made it as int.


/**
* Given a class name, builds and returns a metric type for a V2 custom metric class
* `CustomMetric`.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.connector.read.InputPartition
import org.apache.spark.sql.connector.read.streaming.ContinuousPartitionReaderFactory
import org.apache.spark.sql.execution.metric.SQLMetric
import org.apache.spark.sql.execution.metric.{CustomMetrics, SQLMetric}
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.NextIterator

Expand Down Expand Up @@ -92,10 +92,18 @@ class ContinuousDataSourceRDD(

val partitionReader = readerForPartition.getPartitionReader()
new NextIterator[InternalRow] {
private var numRow = 0L

override def getNext(): InternalRow = {
partitionReader.currentMetricsValues.foreach { metric =>
customMetrics(metric.name()).set(metric.value())
if (numRow % CustomMetrics.numRowsPerUpdate == 0) {
partitionReader.currentMetricsValues.foreach { metric =>
assert(customMetrics.contains(metric.name()),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure how useful is the assert here. It's for internal error only and customMetrics(metric.name()) will fail too.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I can remove it. I also thought it is not necessary but just added for a comment before.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Removed

s"Custom metrics ${customMetrics.keys.mkString(", ")} do not contain the metric " +
s"${metric.name()}")
customMetrics(metric.name()).set(metric.value())
}
}
numRow += 1
readerForPartition.next() match {
case null =>
finished = true
Expand Down