-
Notifications
You must be signed in to change notification settings - Fork 3.6k
Add Avro support to Iceberg Connector #4776
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -17,4 +17,5 @@ public enum IcebergFileFormat | |
| { | ||
| ORC, | ||
| PARQUET, | ||
| AVRO | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Nit: add trailing comma |
||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -28,6 +28,7 @@ | |
| import io.prestosql.plugin.hive.NodeVersion; | ||
| import io.prestosql.plugin.hive.orc.HdfsOrcDataSource; | ||
| import io.prestosql.plugin.hive.orc.OrcWriterConfig; | ||
| import io.prestosql.plugin.iceberg.avro.IcebergAvroFileWriter; | ||
| import io.prestosql.spi.PrestoException; | ||
| import io.prestosql.spi.connector.ConnectorSession; | ||
| import io.prestosql.spi.type.Type; | ||
|
|
@@ -104,6 +105,8 @@ public OrcWriterStats getOrcWriterStats() | |
| } | ||
|
|
||
| public IcebergFileWriter createFileWriter( | ||
| String schemaName, | ||
| String tableName, | ||
| Path outputPath, | ||
| Schema icebergSchema, | ||
| JobConf jobConf, | ||
|
|
@@ -116,6 +119,8 @@ public IcebergFileWriter createFileWriter( | |
| return createParquetWriter(outputPath, icebergSchema, jobConf, session, hdfsContext); | ||
| case ORC: | ||
| return createOrcWriter(outputPath, icebergSchema, jobConf, session); | ||
| case AVRO: | ||
| return createAvroWriter(schemaName, tableName, outputPath, icebergSchema, session); | ||
| } | ||
| throw new PrestoException(NOT_SUPPORTED, "File format not supported for Iceberg: " + fileFormat); | ||
| } | ||
|
|
@@ -233,4 +238,24 @@ private IcebergFileWriter createOrcWriter( | |
| throw new PrestoException(ICEBERG_WRITER_OPEN_ERROR, "Error creating ORC file", e); | ||
| } | ||
| } | ||
|
|
||
| private IcebergFileWriter createAvroWriter( | ||
| String schemaName, | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. the requirement of providing a "tableName" in
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Agreed. We already pass |
||
| String tableName, | ||
| Path outputPath, | ||
| Schema icebergSchema, | ||
| ConnectorSession session) | ||
| { | ||
| List<Type> fileColumnTypes = icebergSchema.columns().stream() | ||
| .map(column -> toPrestoType(column.type(), typeManager)) | ||
| .collect(toImmutableList()); | ||
| return new IcebergAvroFileWriter( | ||
| tableName, | ||
| outputPath, | ||
| hdfsEnvironment, | ||
| new HdfsContext(session, schemaName, tableName), | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We should pass |
||
| icebergSchema, | ||
| fileColumnTypes, | ||
| getCompressionCodec(session)); | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -36,6 +36,7 @@ public class IcebergSplit | |
| private final long length; | ||
| private final FileFormat fileFormat; | ||
| private final List<HostAddress> addresses; | ||
| private final String schemaAsJson; | ||
| private final Map<Integer, String> partitionKeys; | ||
|
|
||
| @JsonCreator | ||
|
|
@@ -45,13 +46,15 @@ public IcebergSplit( | |
| @JsonProperty("length") long length, | ||
| @JsonProperty("fileFormat") FileFormat fileFormat, | ||
| @JsonProperty("addresses") List<HostAddress> addresses, | ||
| @JsonProperty("schemaAsJson") String schemaAsJson, | ||
| @JsonProperty("partitionKeys") Map<Integer, String> partitionKeys) | ||
| { | ||
| this.path = requireNonNull(path, "path is null"); | ||
| this.start = start; | ||
| this.length = length; | ||
| this.fileFormat = requireNonNull(fileFormat, "fileFormat is null"); | ||
| this.addresses = ImmutableList.copyOf(requireNonNull(addresses, "addresses is null")); | ||
| this.schemaAsJson = requireNonNull(schemaAsJson, "schemaAsJson is null"); | ||
| this.partitionKeys = Collections.unmodifiableMap(requireNonNull(partitionKeys, "partitionKeys is null")); | ||
| } | ||
|
|
||
|
|
@@ -92,6 +95,12 @@ public FileFormat getFileFormat() | |
| return fileFormat; | ||
| } | ||
|
|
||
| @JsonProperty | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We could add @JsonRawValue
@JsonProperty("schema")
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The serialized schema can be large. I think we should make it optional and only pass it for Avro. No need to pay the serialization cost for other formats. |
||
| public String getSchemaAsJson() | ||
| { | ||
| return schemaAsJson; | ||
| } | ||
|
|
||
| @JsonProperty | ||
| public Map<Integer, String> getPartitionKeys() | ||
| { | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Did you mean to change the existing error codes?