Skip to content

Commit 121abcd

Browse files
anicolasppekrivokonmapr
authored andcommitted
Fixing row deserializer (apache#617)
Spark 2.4.x uses a different way to sirialize Row into InternalRow. We must use a RowEncoder.
1 parent 82273fb commit 121abcd

File tree

1 file changed

+2
-3
lines changed

1 file changed

+2
-3
lines changed

external/maprdb/src/main/scala/com/mapr/db/spark/sql/v2/MapRDBDataPartitionReader.scala

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import org.apache.spark.sql.catalyst.InternalRow
55
import org.apache.spark.sql.sources._
66
import org.apache.spark.sql.sources.v2.reader.{InputPartition, InputPartitionReader}
77
import org.apache.spark.sql.types._
8+
import org.apache.spark.sql.catalyst.encoders.RowEncoder
89

910
/**
1011
* Reads data from one particular MapR-DB tablet / region
@@ -80,9 +81,7 @@ class MapRDBDataPartitionReader(table: String,
8081

8182
val row = documentToRow(com.mapr.db.spark.MapRDBSpark.newDocument(document), schema)
8283

83-
val values = (0 until row.length).foldLeft(List.empty[Any])((acc, idx) => row.get(idx) :: acc).reverse
84-
85-
InternalRow(values)
84+
RowEncoder(schema).toRow(row)
8685
}
8786

8887
override def close(): Unit = {

0 commit comments

Comments
 (0)