Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import com.facebook.presto.orc.StorageStripeMetadataSource;
import com.facebook.presto.orc.StripeMetadataSourceFactory;
import com.facebook.presto.orc.cache.StorageOrcFileTailSource;
import com.facebook.presto.parquet.FileParquetDataSource;
import com.facebook.presto.parquet.cache.MetadataReader;
import com.facebook.presto.spi.ConnectorPageSource;
import com.facebook.presto.spi.ConnectorSession;
Expand Down Expand Up @@ -56,13 +57,19 @@
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.parquet.hadoop.metadata.ParquetMetadata;
import org.apache.parquet.schema.LogicalTypeAnnotation;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.Type;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;

import java.io.File;
import java.io.IOException;
import java.sql.Timestamp;
import java.time.Instant;
import java.util.Arrays;
import java.util.List;
Expand Down Expand Up @@ -106,6 +113,7 @@
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.collect.Iterables.filter;
import static io.airlift.slice.Slices.utf8Slice;
import static java.io.File.createTempFile;
import static java.util.Objects.requireNonNull;
import static java.util.stream.Collectors.toList;
import static org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.getStandardListObjectInspector;
Expand All @@ -120,6 +128,7 @@
import static org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaStringObjectInspector;
import static org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory.javaTimestampObjectInspector;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertTrue;
import static org.testng.Assert.fail;
Expand Down Expand Up @@ -498,6 +507,52 @@ public void testParquetPageSourceSchemaEvolution(int rowCount)
.isReadableByPageSource(new ParquetPageSourceFactory(FUNCTION_AND_TYPE_MANAGER, FUNCTION_RESOLUTION, HDFS_ENVIRONMENT, STATS, METADATA_READER));
}

@Test
public void testParquetLogicalTypes() throws IOException
{
HiveFileWriterFactory parquetFileWriterFactory = new ParquetFileWriterFactory(HDFS_ENVIRONMENT, FUNCTION_AND_TYPE_MANAGER, new NodeVersion("test"), HIVE_STORAGE_TIME_ZONE);

List<PropertyMetadata<?>> allSessionProperties = getAllSessionProperties(
new HiveClientConfig(),
new ParquetFileWriterConfig().setParquetOptimizedWriterEnabled(true),
createOrcHiveCommonClientConfig(true, 100.0));

TestingConnectorSession session = new TestingConnectorSession(allSessionProperties);
File file = createTempFile("logicaltest", ".parquet");
long timestamp = new DateTime(2011, 5, 6, 7, 8, 9, 123).getMillis();

try {
createTestFile(
file.getAbsolutePath(),
PARQUET,
HiveCompressionCodec.NONE,
ImmutableList.of(new TestColumn("t_timestamp", javaTimestampObjectInspector, new Timestamp(timestamp), timestamp)),
session,
3,
parquetFileWriterFactory);

FileParquetDataSource dataSource = new FileParquetDataSource(file);
ParquetMetadata parquetMetadata = MetadataReader.readFooter(
dataSource,
file.length(),
Optional.empty(),
false).getParquetMetadata();

MessageType writtenSchema = parquetMetadata.getFileMetaData().getSchema();
Type timestampType = writtenSchema.getType("t_timestamp");
if (timestampType.getLogicalTypeAnnotation() instanceof LogicalTypeAnnotation.TimestampLogicalTypeAnnotation) {
LogicalTypeAnnotation.TimestampLogicalTypeAnnotation annotation = (LogicalTypeAnnotation.TimestampLogicalTypeAnnotation) timestampType.getLogicalTypeAnnotation();
assertFalse(annotation.isAdjustedToUTC());
}
else {
fail("the logical type annotation saved was not of type TimestampLogicalTypeAnnotation");
}
}
finally {
file.delete();
}
}

private static List<TestColumn> getTestColumnsSupportedByParquet()
{
// Write of complex hive data to Parquet is broken
Expand Down
11 changes: 4 additions & 7 deletions presto-iceberg/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -38,18 +38,15 @@
<scope>runtime</scope>
</dependency>

<dependency>
<groupId>org.apache.parquet</groupId>
<artifactId>parquet-format-structures</artifactId>
<version>${dep.parquet.version}</version>
<scope>runtime</scope>
</dependency>

<dependency>
<groupId>org.apache.parquet</groupId>
<artifactId>parquet-common</artifactId>
<version>${dep.parquet.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.parquet</groupId>
<artifactId>parquet-format-structures</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.parquet</groupId>
<artifactId>parquet-format</artifactId>
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,185 @@
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.facebook.presto.iceberg;

import com.facebook.presto.Session;
import com.facebook.presto.common.Page;
import com.facebook.presto.common.type.BooleanType;
import com.facebook.presto.common.type.DoubleType;
import com.facebook.presto.common.type.Type;
import com.facebook.presto.common.type.TypeManager;
import com.facebook.presto.common.type.TypeSignature;
import com.facebook.presto.common.type.TypeSignatureParameter;
import com.facebook.presto.hive.FileFormatDataSourceStats;
import com.facebook.presto.hive.HdfsContext;
import com.facebook.presto.hive.HdfsEnvironment;
import com.facebook.presto.hive.HiveCompressionCodec;
import com.facebook.presto.hive.HiveDwrfEncryptionProvider;
import com.facebook.presto.hive.NodeVersion;
import com.facebook.presto.hive.OrcFileWriterConfig;
import com.facebook.presto.metadata.SessionPropertyManager;
import com.facebook.presto.parquet.FileParquetDataSource;
import com.facebook.presto.parquet.cache.MetadataReader;
import com.facebook.presto.spi.ColumnMetadata;
import com.facebook.presto.spi.ConnectorId;
import com.facebook.presto.spi.ConnectorSession;
import com.facebook.presto.spi.session.PropertyMetadata;
import com.google.common.collect.ImmutableList;
import io.airlift.units.DataSize;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.iceberg.MetricsConfig;
import org.apache.iceberg.Schema;
import org.apache.parquet.column.ParquetProperties;
import org.apache.parquet.hadoop.metadata.ParquetMetadata;
import org.apache.parquet.schema.MessageType;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;

import java.io.File;
import java.util.List;
import java.util.Optional;

import static com.facebook.presto.RowPagesBuilder.rowPagesBuilder;
import static com.facebook.presto.common.type.BigintType.BIGINT;
import static com.facebook.presto.common.type.DateType.DATE;
import static com.facebook.presto.common.type.HyperLogLogType.HYPER_LOG_LOG;
import static com.facebook.presto.common.type.IntegerType.INTEGER;
import static com.facebook.presto.common.type.TimestampType.TIMESTAMP;
import static com.facebook.presto.common.type.VarbinaryType.VARBINARY;
import static com.facebook.presto.common.type.VarcharType.VARCHAR;
import static com.facebook.presto.iceberg.IcebergAbstractMetadata.toIcebergSchema;
import static com.facebook.presto.iceberg.IcebergQueryRunner.ICEBERG_CATALOG;
import static com.facebook.presto.iceberg.IcebergSessionProperties.dataSizeSessionProperty;
import static com.facebook.presto.testing.TestingSession.testSessionBuilder;
import static com.google.common.io.Files.createTempDir;
import static org.apache.iceberg.parquet.ParquetSchemaUtil.convert;
import static org.testng.Assert.assertEquals;

public class TestIcebergFileWriter
{
private IcebergFileWriterFactory icebergFileWriterFactory;
private HdfsContext hdfsContext;
private ConnectorSession connectorSession;

@BeforeClass
public void setup() throws Exception
{
ConnectorId connectorId = new ConnectorId("iceberg");
SessionPropertyManager sessionPropertyManager = new SessionPropertyManager();

sessionPropertyManager.addConnectorSessionProperties(
connectorId,
ImmutableList.of(
dataSizeSessionProperty("parquet_writer_page_size", "Parquet: Writer page size", new DataSize(10, DataSize.Unit.KILOBYTE), false),
dataSizeSessionProperty("parquet_writer_block_size", "Parquet: Writer block size", new DataSize(10, DataSize.Unit.KILOBYTE), false),
new PropertyMetadata<>(
"parquet_writer_version",
"Parquet: Writer version",
VARCHAR,
ParquetProperties.WriterVersion.class,
ParquetProperties.WriterVersion.PARQUET_2_0,
false,
value -> ParquetProperties.WriterVersion.valueOf(((String) value).toUpperCase()),
ParquetProperties.WriterVersion::name),
new PropertyMetadata<>(
"compression_codec",
"The compression codec to use when writing files",
VARCHAR,
HiveCompressionCodec.class,
HiveCompressionCodec.NONE,
false,
value -> HiveCompressionCodec.valueOf(((String) value).toUpperCase()),
HiveCompressionCodec::name)));

Session session = testSessionBuilder(sessionPropertyManager)
.setCatalog(ICEBERG_CATALOG)
.setSchema("tpch")
.build();

this.connectorSession = session.toConnectorSession(connectorId);
TypeManager typeManager = new TestingTypeManager();
this.hdfsContext = new HdfsContext(connectorSession);
HdfsEnvironment hdfsEnvironment = IcebergDistributedTestBase.getHdfsEnvironment();
this.icebergFileWriterFactory = new IcebergFileWriterFactory(hdfsEnvironment, typeManager,
new FileFormatDataSourceStats(), new NodeVersion("test"), new OrcFileWriterConfig(), HiveDwrfEncryptionProvider.NO_ENCRYPTION);
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do we have Orc config settings here when we're suing Parquet?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The iceberg file writer factory requires orc settings to be instantiated

}

@Test
public void testWriteParquetFileWithLogicalTypes() throws Exception
{
Path path = new Path(createTempDir().getAbsolutePath() + "/test.parquet");
Schema icebergSchema = toIcebergSchema(ImmutableList.of(
new ColumnMetadata("a", VARCHAR),
new ColumnMetadata("b", INTEGER),
new ColumnMetadata("c", TIMESTAMP),
new ColumnMetadata("d", DATE)));
IcebergFileWriter icebergFileWriter = this.icebergFileWriterFactory.createFileWriter(path, icebergSchema, new JobConf(), connectorSession,
hdfsContext, FileFormat.PARQUET, MetricsConfig.getDefault());

List<Page> input = rowPagesBuilder(VARCHAR, BIGINT, TIMESTAMP, DATE)
.addSequencePage(100, 0, 0, 123, 100)
.addSequencePage(100, 100, 100, 223, 100)
.addSequencePage(100, 200, 200, 323, 100)
.build();
for (Page page : input) {
icebergFileWriter.appendRows(page);
}
icebergFileWriter.commit();

File parquetFile = new File(path.toString());
FileParquetDataSource dataSource = new FileParquetDataSource(parquetFile);
ParquetMetadata parquetMetadata = MetadataReader.readFooter(
dataSource,
parquetFile.length(),
Optional.empty(),
false).getParquetMetadata();
MessageType writtenSchema = parquetMetadata.getFileMetaData().getSchema();
MessageType originalSchema = convert(icebergSchema, "table");
assertEquals(originalSchema, writtenSchema);
}

private static class TestingTypeManager
implements TypeManager
{
@Override
public Type getType(TypeSignature signature)
{
for (Type type : getTypes()) {
if (signature.getBase().equals(type.getTypeSignature().getBase())) {
return type;
}
}
return null;
}

@Override
public Type getParameterizedType(String baseTypeName, List<TypeSignatureParameter> typeParameters)
{
return getType(new TypeSignature(baseTypeName, typeParameters));
}

@Override
public boolean canCoerce(Type actualType, Type expectedType)
{
throw new UnsupportedOperationException();
}

private List<Type> getTypes()
{
return ImmutableList.of(BooleanType.BOOLEAN, INTEGER, BIGINT, DoubleType.DOUBLE, VARCHAR, VARBINARY, TIMESTAMP, DATE, HYPER_LOG_LOG);
}
}
}
Loading