-
Notifications
You must be signed in to change notification settings - Fork 29k
[SPARK-24638][SQL] StringStartsWith support push down #21623
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 3 commits
5b52ace
02f41cc
4f25a33
e959d1a
536610e
800fde7
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -378,6 +378,14 @@ object SQLConf { | |
| .booleanConf | ||
| .createWithDefault(true) | ||
|
|
||
| val PARQUET_FILTER_PUSHDOWN_STRING_STARTSWITH_ENABLED = | ||
| buildConf("spark.sql.parquet.filterPushdown.string.startsWith") | ||
| .doc("If true, enables Parquet filter push-down optimization for string starts with. " + | ||
|
||
| "This configuration only has an effect when 'spark.sql.parquet.filterPushdown' is enabled.") | ||
| .internal() | ||
| .booleanConf | ||
| .createWithDefault(true) | ||
|
|
||
| val PARQUET_WRITE_LEGACY_FORMAT = buildConf("spark.sql.parquet.writeLegacyFormat") | ||
| .doc("Whether to be compatible with the legacy Parquet format adopted by Spark 1.4 and prior " + | ||
| "versions, when converting Parquet schema to Spark SQL schema and vice versa.") | ||
|
|
@@ -1420,6 +1428,9 @@ class SQLConf extends Serializable with Logging { | |
|
|
||
| def parquetFilterPushDownDate: Boolean = getConf(PARQUET_FILTER_PUSHDOWN_DATE_ENABLED) | ||
|
|
||
| def parquetFilterPushDownStringStartWith: Boolean = | ||
| getConf(PARQUET_FILTER_PUSHDOWN_STRING_STARTSWITH_ENABLED) | ||
|
|
||
| def orcFilterPushDown: Boolean = getConf(ORC_FILTER_PUSHDOWN_ENABLED) | ||
|
|
||
| def verifyPartitionPath: Boolean = getConf(HIVE_VERIFY_PARTITION_PATH) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -22,16 +22,23 @@ import java.sql.Date | |
| import org.apache.parquet.filter2.predicate._ | ||
| import org.apache.parquet.filter2.predicate.FilterApi._ | ||
| import org.apache.parquet.io.api.Binary | ||
| import org.apache.parquet.schema.PrimitiveComparator | ||
|
|
||
| import org.apache.spark.sql.catalyst.util.DateTimeUtils | ||
| import org.apache.spark.sql.catalyst.util.DateTimeUtils.SQLDate | ||
| import org.apache.spark.sql.internal.SQLConf | ||
| import org.apache.spark.sql.sources | ||
| import org.apache.spark.sql.types._ | ||
| import org.apache.spark.unsafe.types.UTF8String | ||
|
|
||
| /** | ||
| * Some utility function to convert Spark data source filters to Parquet filters. | ||
| */ | ||
| private[parquet] class ParquetFilters(pushDownDate: Boolean) { | ||
| private[parquet] class ParquetFilters() { | ||
|
|
||
| val sqlConf: SQLConf = SQLConf.get | ||
|
||
| val pushDownDate = sqlConf.parquetFilterPushDownDate | ||
| val pushDownStartWith = sqlConf.parquetFilterPushDownStringStartWith | ||
|
|
||
| private def dateToDays(date: Date): SQLDate = { | ||
| DateTimeUtils.fromJavaDate(date) | ||
|
|
@@ -270,6 +277,36 @@ private[parquet] class ParquetFilters(pushDownDate: Boolean) { | |
| case sources.Not(pred) => | ||
| createFilter(schema, pred).map(FilterApi.not) | ||
|
|
||
| case sources.StringStartsWith(name, prefix) if pushDownStartWith && canMakeFilterOn(name) => | ||
| Option(prefix).map { v => | ||
| FilterApi.userDefined(binaryColumn(name), | ||
| new UserDefinedPredicate[Binary] with Serializable { | ||
| private val strToBinary = Binary.fromReusedByteArray(v.getBytes) | ||
| private val size = strToBinary.length | ||
|
|
||
| override def canDrop(statistics: Statistics[Binary]): Boolean = { | ||
| val comparator = PrimitiveComparator.UNSIGNED_LEXICOGRAPHICAL_BINARY_COMPARATOR | ||
| val max = statistics.getMax | ||
| val min = statistics.getMin | ||
| comparator.compare(max.slice(0, math.min(size, max.length)), strToBinary) < 0 || | ||
| comparator.compare(min.slice(0, math.min(size, min.length)), strToBinary) > 0 | ||
| } | ||
|
|
||
| override def inverseCanDrop(statistics: Statistics[Binary]): Boolean = { | ||
| val comparator = PrimitiveComparator.UNSIGNED_LEXICOGRAPHICAL_BINARY_COMPARATOR | ||
| val max = statistics.getMax | ||
| val min = statistics.getMin | ||
| comparator.compare(max.slice(0, math.min(size, max.length)), strToBinary) == 0 && | ||
| comparator.compare(min.slice(0, math.min(size, min.length)), strToBinary) == 0 | ||
| } | ||
|
|
||
| override def keep(value: Binary): Boolean = { | ||
| UTF8String.fromBytes(value.getBytes).startsWith(UTF8String.fromString(v)) | ||
|
||
| } | ||
| } | ||
| ) | ||
| } | ||
|
|
||
| case _ => None | ||
| } | ||
| } | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -55,7 +55,7 @@ import org.apache.spark.util.{AccumulatorContext, AccumulatorV2} | |
| */ | ||
| class ParquetFilterSuite extends QueryTest with ParquetTest with SharedSQLContext { | ||
|
|
||
| private lazy val parquetFilters = new ParquetFilters(conf.parquetFilterPushDownDate) | ||
| private lazy val parquetFilters = new ParquetFilters() | ||
|
|
||
| override def beforeEach(): Unit = { | ||
| super.beforeEach() | ||
|
|
@@ -82,6 +82,7 @@ class ParquetFilterSuite extends QueryTest with ParquetTest with SharedSQLContex | |
| withSQLConf( | ||
| SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true", | ||
| SQLConf.PARQUET_FILTER_PUSHDOWN_DATE_ENABLED.key -> "true", | ||
| SQLConf.PARQUET_FILTER_PUSHDOWN_STRING_STARTSWITH_ENABLED.key -> "true", | ||
| SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") { | ||
| val query = df | ||
| .select(output.map(e => Column(e)): _*) | ||
|
|
@@ -660,6 +661,56 @@ class ParquetFilterSuite extends QueryTest with ParquetTest with SharedSQLContex | |
| assert(df.where("col > 0").count() === 2) | ||
| } | ||
| } | ||
|
|
||
| test("filter pushdown - StringStartsWith") { | ||
| withParquetDataFrame((1 to 4).map(i => Tuple1(i + "str" + i))) { implicit df => | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think that all of these tests go through the
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Added |
||
| // Test canDrop() | ||
|
||
| checkFilterPredicate( | ||
| '_1.startsWith("").asInstanceOf[Predicate], | ||
| classOf[UserDefinedByInstance[_, _]], | ||
| Seq("1str1", "2str2", "3str3", "4str4").map(Row(_))) | ||
|
|
||
| Seq("2", "2s", "2st", "2str", "2str2").foreach { prefix => | ||
| checkFilterPredicate( | ||
| '_1.startsWith(prefix).asInstanceOf[Predicate], | ||
| classOf[UserDefinedByInstance[_, _]], | ||
| "2str2") | ||
| } | ||
|
|
||
| Seq("2S", "null", "2str22").foreach { prefix => | ||
| checkFilterPredicate( | ||
| '_1.startsWith(prefix).asInstanceOf[Predicate], | ||
| classOf[UserDefinedByInstance[_, _]], | ||
| Seq.empty[Row]) | ||
| } | ||
|
|
||
| // Test inverseCanDrop() | ||
| checkFilterPredicate( | ||
| !'_1.startsWith("").asInstanceOf[Predicate], | ||
| classOf[UserDefinedByInstance[_, _]], | ||
| Seq().map(Row(_))) | ||
|
|
||
| Seq("2", "2s", "2st", "2str", "2str2").foreach { prefix => | ||
| checkFilterPredicate( | ||
| !'_1.startsWith(prefix).asInstanceOf[Predicate], | ||
| classOf[UserDefinedByInstance[_, _]], | ||
| Seq("1str1", "3str3", "4str4").map(Row(_))) | ||
| } | ||
|
|
||
| Seq("2S", "null", "2str22").foreach { prefix => | ||
| checkFilterPredicate( | ||
| !'_1.startsWith(prefix).asInstanceOf[Predicate], | ||
| classOf[UserDefinedByInstance[_, _]], | ||
| Seq("1str1", "2str2", "3str3", "4str4").map(Row(_))) | ||
| } | ||
|
|
||
| assertResult(None) { | ||
| parquetFilters.createFilter( | ||
| df.schema, | ||
| sources.StringStartsWith("_1", null)) | ||
|
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks @attilapiros , |
||
| } | ||
| } | ||
| } | ||
| } | ||
|
|
||
| class NumRowGroupsAcc extends AccumulatorV2[Integer, Integer] { | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It would be better if we added
.enabledpostfix.