Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -438,7 +438,7 @@
<dependency>
<groupId>io.prestosql.orc</groupId>
<artifactId>orc-protobuf</artifactId>
<version>9</version>
<version>10</version>
</dependency>

<dependency>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import io.prestosql.orc.OrcWriterOptions;
import io.prestosql.orc.OrcWriterStats;
import io.prestosql.orc.metadata.CompressionKind;
import io.prestosql.orc.metadata.OrcType;
import io.prestosql.spi.Page;
import io.prestosql.spi.PrestoException;
import io.prestosql.spi.block.Block;
Expand Down Expand Up @@ -65,6 +66,7 @@ public OrcFileWriter(
Callable<Void> rollbackAction,
List<String> columnNames,
List<Type> fileColumnTypes,
List<OrcType> flattenedOrcTypes,
CompressionKind compression,
OrcWriterOptions options,
boolean writeLegacyVersion,
Expand All @@ -81,6 +83,7 @@ public OrcFileWriter(
orcDataSink,
columnNames,
fileColumnTypes,
flattenedOrcTypes,
compression,
options,
writeLegacyVersion,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import io.prestosql.orc.OrcWriterStats;
import io.prestosql.orc.OutputStreamOrcDataSink;
import io.prestosql.orc.metadata.CompressionKind;
import io.prestosql.orc.metadata.OrcType;
import io.prestosql.plugin.hive.metastore.StorageFormat;
import io.prestosql.plugin.hive.orc.HdfsOrcDataSource;
import io.prestosql.spi.PrestoException;
Expand Down Expand Up @@ -140,6 +141,7 @@ public Optional<HiveFileWriter> createFileWriter(
List<Type> fileColumnTypes = getColumnTypes(schema).stream()
.map(hiveType -> hiveType.getType(typeManager))
.collect(toList());
List<OrcType> flattenedOrcTypes = OrcType.createOrcRowType(0, fileColumnNames, fileColumnTypes);

int[] fileInputColumnIndexes = fileColumnNames.stream()
.mapToInt(inputColumnNames::indexOf)
Expand Down Expand Up @@ -179,6 +181,7 @@ public Optional<HiveFileWriter> createFileWriter(
rollbackAction,
fileColumnNames,
fileColumnTypes,
flattenedOrcTypes,
compression,
orcWriterOptions
.withStripeMinSize(getOrcOptimizedWriterMinStripeSize(session))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import io.prestosql.orc.OrcWriter;
import io.prestosql.orc.OrcWriterOptions;
import io.prestosql.orc.OrcWriterStats;
import io.prestosql.orc.metadata.OrcType;
import io.prestosql.spi.Page;
import io.prestosql.spi.type.Type;

Expand Down Expand Up @@ -77,6 +78,7 @@ private static OrcWriter createOrcFileWriter(OrcDataSink sink, List<Type> types)
sink,
columnNames,
types,
OrcType.createOrcRowType(0, columnNames, types),
LZ4,
new OrcWriterOptions()
.withMaxStringStatisticsLimit(new DataSize(0, BYTE))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import io.prestosql.orc.OrcWriterOptions;
import io.prestosql.orc.OrcWriterStats;
import io.prestosql.orc.OutputStreamOrcDataSink;
import io.prestosql.orc.metadata.OrcType;
import io.prestosql.plugin.hive.FileFormatDataSourceStats;
import io.prestosql.plugin.hive.GenericHiveRecordCursorProvider;
import io.prestosql.plugin.hive.HdfsEnvironment;
Expand Down Expand Up @@ -447,6 +448,7 @@ public PrestoOrcFormatWriter(File targetFile, List<String> columnNames, List<Typ
new OutputStreamOrcDataSink(new FileOutputStream(targetFile)),
columnNames,
types,
OrcType.createOrcRowType(0, columnNames, types),
compressionCodec.getOrcCompressionKind(),
new OrcWriterOptions(),
false,
Expand Down
17 changes: 17 additions & 0 deletions presto-iceberg/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,11 @@
<artifactId>presto-parquet</artifactId>
</dependency>

<dependency>
<groupId>io.prestosql</groupId>
<artifactId>presto-orc</artifactId>
</dependency>

<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
Expand Down Expand Up @@ -166,6 +171,18 @@
</exclusions>
</dependency>

<dependency>
<groupId>${dep.iceberg.groupId}</groupId>
<artifactId>iceberg-orc</artifactId>
<version>${dep.iceberg.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.orc</groupId>
<artifactId>orc-core</artifactId>
</exclusion>
</exclusions>
</dependency>

<!-- Presto SPI -->
<dependency>
<groupId>io.prestosql</groupId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.AppendFiles;
import org.apache.iceberg.DataFiles;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.PartitionSpecParser;
import org.apache.iceberg.Schema;
Expand Down Expand Up @@ -87,8 +88,8 @@
import static io.prestosql.plugin.iceberg.IcebergUtil.isIcebergTable;
import static io.prestosql.plugin.iceberg.PartitionFields.parsePartitionFields;
import static io.prestosql.plugin.iceberg.PartitionFields.toPartitionFields;
import static io.prestosql.plugin.iceberg.TypeConveter.toIcebergType;
import static io.prestosql.plugin.iceberg.TypeConveter.toPrestoType;
import static io.prestosql.plugin.iceberg.TypeConverter.toIcebergType;
import static io.prestosql.plugin.iceberg.TypeConverter.toPrestoType;
import static io.prestosql.spi.StandardErrorCode.INVALID_SCHEMA_PROPERTY;
import static io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED;
import static io.prestosql.spi.StandardErrorCode.SCHEMA_NOT_EMPTY;
Expand All @@ -99,6 +100,7 @@
import static java.util.stream.Collectors.toList;
import static java.util.stream.Collectors.toMap;
import static org.apache.iceberg.TableMetadata.newTableMetadata;
import static org.apache.iceberg.TableProperties.DEFAULT_FILE_FORMAT;
import static org.apache.iceberg.Transactions.createTableTransaction;

public class IcebergMetadata
Expand Down Expand Up @@ -290,7 +292,8 @@ public ConnectorOutputTableHandle beginCreateTable(ConnectorSession session, Con
throw new TableAlreadyExistsException(schemaTableName);
}

TableMetadata metadata = newTableMetadata(operations, schema, partitionSpec, targetPath.toString());
FileFormat fileFormat = (FileFormat) tableMetadata.getProperties().get(FILE_FORMAT_PROPERTY);
TableMetadata metadata = newTableMetadata(operations, schema, partitionSpec, targetPath.toString(), ImmutableMap.of(DEFAULT_FILE_FORMAT, fileFormat.toString()));

transaction = createTableTransaction(operations, metadata);

Expand All @@ -301,7 +304,7 @@ public ConnectorOutputTableHandle beginCreateTable(ConnectorSession session, Con
PartitionSpecParser.toJson(metadata.spec()),
getColumns(metadata.schema(), metadata.spec(), typeManager),
targetPath.toString(),
getFileFormat(tableMetadata.getProperties()));
fileFormat);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,27 @@
package io.prestosql.plugin.iceberg;

import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import io.airlift.json.JsonCodec;
import io.airlift.slice.Slice;
import io.prestosql.orc.OrcDataSink;
import io.prestosql.orc.OrcDataSource;
import io.prestosql.orc.OrcWriteValidation;
import io.prestosql.orc.OrcWriterOptions;
import io.prestosql.orc.OrcWriterStats;
import io.prestosql.orc.OutputStreamOrcDataSink;
import io.prestosql.orc.metadata.CompressionKind;
import io.prestosql.orc.metadata.OrcType;
import io.prestosql.plugin.hive.HdfsEnvironment;
import io.prestosql.plugin.hive.HdfsEnvironment.HdfsContext;
import io.prestosql.plugin.hive.HiveColumnHandle;
import io.prestosql.plugin.hive.HiveConfig;
import io.prestosql.plugin.hive.HiveFileWriter;
import io.prestosql.plugin.hive.HiveMetadata;
import io.prestosql.plugin.hive.HiveStorageFormat;
import io.prestosql.plugin.hive.NodeVersion;
import io.prestosql.plugin.hive.OrcFileWriter;
import io.prestosql.plugin.hive.OrcFileWriterConfig;
import io.prestosql.plugin.hive.RecordFileWriter;
import io.prestosql.plugin.iceberg.PartitionTransforms.ColumnTransform;
import io.prestosql.spi.Page;
Expand All @@ -34,6 +48,7 @@
import io.prestosql.spi.type.StandardTypes;
import io.prestosql.spi.type.Type;
import io.prestosql.spi.type.TypeManager;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.io.IOConstants;
import org.apache.hadoop.mapred.JobConf;
Expand All @@ -44,34 +59,53 @@
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.hadoop.HadoopInputFile;
import org.apache.iceberg.orc.OrcMetrics;
import org.apache.iceberg.parquet.ParquetUtil;
import org.apache.iceberg.transforms.Transform;
import org.apache.iceberg.types.Types;
import org.apache.orc.OrcConf;
import org.joda.time.DateTimeZone;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.IntStream;

import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Verify.verify;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static io.airlift.slice.Slices.wrappedBuffer;
import static io.prestosql.plugin.hive.HiveErrorCode.HIVE_UNSUPPORTED_FORMAT;
import static io.prestosql.plugin.hive.HiveErrorCode.HIVE_WRITER_OPEN_ERROR;
import static io.prestosql.plugin.hive.HiveSessionProperties.getOrcOptimizedWriterMaxDictionaryMemory;
import static io.prestosql.plugin.hive.HiveSessionProperties.getOrcOptimizedWriterMaxStripeRows;
import static io.prestosql.plugin.hive.HiveSessionProperties.getOrcOptimizedWriterMaxStripeSize;
import static io.prestosql.plugin.hive.HiveSessionProperties.getOrcOptimizedWriterMinStripeSize;
import static io.prestosql.plugin.hive.HiveSessionProperties.getOrcOptimizedWriterValidateMode;
import static io.prestosql.plugin.hive.HiveSessionProperties.getOrcStringStatisticsLimit;
import static io.prestosql.plugin.hive.ParquetRecordWriterUtil.setParquetSchema;
import static io.prestosql.plugin.hive.metastore.StorageFormat.fromHiveStorageFormat;
import static io.prestosql.plugin.hive.util.ConfigurationUtils.toJobConf;
import static io.prestosql.plugin.iceberg.IcebergErrorCode.ICEBERG_TOO_MANY_OPEN_PARTITIONS;
import static io.prestosql.plugin.iceberg.PartitionTransforms.getColumnTransform;
import static io.prestosql.plugin.iceberg.TypeConverter.toOrcStructType;
import static io.prestosql.plugin.iceberg.TypeConverter.toPrestoType;
import static io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED;
import static io.prestosql.spi.type.DateTimeEncoding.unpackMillisUtc;
import static io.prestosql.spi.type.Decimals.readBigDecimal;
import static java.lang.Float.intBitsToFloat;
import static java.lang.Math.toIntExact;
import static java.lang.String.format;
import static java.util.Locale.ENGLISH;
import static java.util.Objects.requireNonNull;
import static java.util.UUID.randomUUID;
import static java.util.concurrent.CompletableFuture.completedFuture;
Expand Down Expand Up @@ -104,6 +138,10 @@ public class IcebergPageSink
private long systemMemoryUsage;
private long validationCpuNanos;

private final HiveConfig hiveConfig;
private final OrcFileWriterConfig orcFileWriterConfig;
private final NodeVersion nodeVersion;

public IcebergPageSink(
Schema outputSchema,
PartitionSpec partitionSpec,
Expand All @@ -115,7 +153,10 @@ public IcebergPageSink(
TypeManager typeManager,
JsonCodec<CommitTaskData> jsonCodec,
ConnectorSession session,
FileFormat fileFormat)
FileFormat fileFormat,
HiveConfig hiveConfig,
OrcFileWriterConfig orcFileWriterConfig,
NodeVersion nodeVersion)
{
requireNonNull(inputColumns, "inputColumns is null");
this.outputSchema = requireNonNull(outputSchema, "outputSchema is null");
Expand All @@ -130,6 +171,9 @@ public IcebergPageSink(
this.fileFormat = requireNonNull(fileFormat, "fileFormat is null");
this.inputColumns = ImmutableList.copyOf(inputColumns);
this.pagePartitioner = new PagePartitioner(pageIndexerFactory, toPartitionColumns(typeManager, inputColumns, partitionSpec));
this.hiveConfig = requireNonNull(hiveConfig, "hiveConfig is null");
this.orcFileWriterConfig = requireNonNull(orcFileWriterConfig, "orcFileWriterConfig is null");
this.nodeVersion = requireNonNull(nodeVersion, "nodeVersion is null");
}

@Override
Expand Down Expand Up @@ -321,6 +365,8 @@ private HiveFileWriter createWriter(Path outputPath)
switch (fileFormat) {
case PARQUET:
return createParquetWriter(outputPath);
case ORC:
return createOrcWriter(outputPath);
}
throw new PrestoException(NOT_SUPPORTED, "File format not supported for Iceberg: " + fileFormat);
}
Expand Down Expand Up @@ -356,6 +402,9 @@ private Metrics readMetrics(Path path)
switch (fileFormat) {
case PARQUET:
return ParquetUtil.fileMetrics(HadoopInputFile.fromPath(path, jobConf), MetricsConfig.getDefault());
case ORC:
//TODO:LXY
return OrcMetrics.fromInputFile(HadoopInputFile.fromPath(path, jobConf), jobConf);
}
throw new PrestoException(NOT_SUPPORTED, "File format not supported for Iceberg: " + fileFormat);
}
Expand Down Expand Up @@ -543,4 +592,71 @@ public Function<Block, Block> getBlockTransform()
return blockTransform;
}
}

private HiveFileWriter createOrcWriter(Path path)
{
try {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(session.getUser(), path, jobConf);
OrcDataSink orcDataSink = new OutputStreamOrcDataSink(fileSystem.create(path));
Callable<Void> rollbackAction = () -> {
fileSystem.delete(path, false);
return null;
};
List<Types.NestedField> columnFields = outputSchema.columns();
List<String> columnNames = columnFields.stream().map(Types.NestedField::name).collect(toImmutableList());
List<Type> columnTypes = columnFields.stream().map(Types.NestedField::type).map(type -> toPrestoType(type, typeManager)).collect(toImmutableList());
List<OrcType> flattenedOrcTypes = toOrcStructType(0, outputSchema.asStruct(), -1);
CompressionKind compression = getCompression(jobConf);
OrcWriterOptions orcWriterOptions = orcFileWriterConfig.toOrcWriterOptions()
.withStripeMinSize(getOrcOptimizedWriterMinStripeSize(session))
.withStripeMaxSize(getOrcOptimizedWriterMaxStripeSize(session))
.withStripeMaxRowCount(getOrcOptimizedWriterMaxStripeRows(session))
.withDictionaryMaxMemory(getOrcOptimizedWriterMaxDictionaryMemory(session))
.withMaxStringStatisticsLimit(getOrcStringStatisticsLimit(session));
boolean writeLegacyVersion = hiveConfig.isOrcWriteLegacyVersion();
int[] fileInputColumnIndexes = IntStream.range(0, columnNames.size()).toArray();
Map<String, String> metadata = ImmutableMap.<String, String>builder()
.put(HiveMetadata.PRESTO_VERSION_NAME, nodeVersion.toString())
.put(HiveMetadata.PRESTO_QUERY_ID_NAME, session.getQueryId())
.build();
DateTimeZone hiveStorageTimeZone = hiveConfig.getDateTimeZone();
Optional<Supplier<OrcDataSource>> validationInputFactory = Optional.empty();
OrcWriteValidation.OrcWriteValidationMode validationMode = getOrcOptimizedWriterValidateMode(session);
OrcWriterStats stats = new OrcWriterStats();
return new OrcFileWriter(
orcDataSink,
rollbackAction,
columnNames,
columnTypes,
flattenedOrcTypes,
compression,
orcWriterOptions,
writeLegacyVersion,
fileInputColumnIndexes,
metadata,
hiveStorageTimeZone,
validationInputFactory,
validationMode,
stats);
}
catch (IOException e) {
throw new PrestoException(HIVE_WRITER_OPEN_ERROR, "Error creating ORC file", e);
}
}

private static CompressionKind getCompression(JobConf configuration)
{
String compressionName = OrcConf.COMPRESS.getString(configuration);
if (compressionName == null) {
return CompressionKind.ZLIB;
}
CompressionKind compression;
try {
compression = CompressionKind.valueOf(compressionName.toUpperCase(ENGLISH));
}
catch (IllegalArgumentException e) {
throw new PrestoException(HIVE_UNSUPPORTED_FORMAT, "Unknown ORC compression type " + compressionName);
}
return compression;
}
}
Loading