Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import com.google.common.collect.ImmutableList;
import io.airlift.units.DataSize;
import io.airlift.units.Duration;
import org.apache.parquet.column.ParquetProperties;

import javax.inject.Inject;

Expand Down Expand Up @@ -85,6 +86,7 @@ public final class HiveSessionProperties
private static final String PARQUET_WRITER_BLOCK_SIZE = "parquet_writer_block_size";
private static final String PARQUET_WRITER_PAGE_SIZE = "parquet_writer_page_size";
private static final String PARQUET_OPTIMIZED_WRITER_ENABLED = "parquet_optimized_writer_enabled";
private static final String PARQUET_WRITER_VERSION = "parquet_writer_version";
private static final String MAX_SPLIT_SIZE = "max_split_size";
private static final String MAX_INITIAL_SPLIT_SIZE = "max_initial_split_size";
public static final String RCFILE_OPTIMIZED_WRITER_ENABLED = "rcfile_optimized_writer_enabled";
Expand Down Expand Up @@ -539,6 +541,15 @@ public HiveSessionProperties(HiveClientConfig hiveClientConfig, OrcFileWriterCon
"Experimental: Enable optimized writer",
parquetFileWriterConfig.isParquetOptimizedWriterEnabled(),
false),
new PropertyMetadata<>(
PARQUET_WRITER_VERSION,
"Parquet: Writer version",
VARCHAR,
ParquetProperties.WriterVersion.class,
parquetFileWriterConfig.getWriterVersion(),
false,
value -> ParquetProperties.WriterVersion.valueOf(((String) value).toUpperCase()),
ParquetProperties.WriterVersion::name),
booleanProperty(
PARQUET_BATCH_READ_OPTIMIZATION_ENABLED,
"Is Parquet batch read optimization enabled",
Expand Down Expand Up @@ -1112,6 +1123,11 @@ public static boolean isParquetOptimizedWriterEnabled(ConnectorSession session)
return session.getProperty(PARQUET_OPTIMIZED_WRITER_ENABLED, Boolean.class);
}

public static ParquetProperties.WriterVersion getParquetWriterVersion(ConnectorSession session)
{
return session.getProperty(PARQUET_WRITER_VERSION, ParquetProperties.WriterVersion.class);
}

public static BucketFunctionType getBucketFunctionTypeForExchange(ConnectorSession session)
{
return session.getProperty(BUCKET_FUNCTION_TYPE_FOR_EXCHANGE, BucketFunctionType.class);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,17 +14,20 @@
package com.facebook.presto.hive;

import com.facebook.airlift.configuration.Config;
import com.facebook.presto.parquet.writer.ParquetWriterOptions;
import io.airlift.units.DataSize;
import org.apache.parquet.hadoop.ParquetWriter;

import static io.airlift.units.DataSize.Unit.BYTE;
import static org.apache.parquet.column.ParquetProperties.WriterVersion;

public class ParquetFileWriterConfig
{
private boolean parquetOptimizedWriterEnabled;

private DataSize blockSize = new DataSize(ParquetWriter.DEFAULT_BLOCK_SIZE, BYTE);
private DataSize pageSize = new DataSize(ParquetWriter.DEFAULT_PAGE_SIZE, BYTE);
private WriterVersion writerVersion = ParquetWriterOptions.DEFAULT_WRITER_VERSION;

public DataSize getBlockSize()
{
Expand All @@ -50,6 +53,18 @@ public ParquetFileWriterConfig setPageSize(DataSize pageSize)
return this;
}

public WriterVersion getWriterVersion()
{
return writerVersion;
}

@Config("hive.parquet.writer.version")
public ParquetFileWriterConfig setWriterVersion(WriterVersion writerVersion)
{
this.writerVersion = writerVersion;
return this;
}

public boolean isParquetOptimizedWriterEnabled()
{
return parquetOptimizedWriterEnabled;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
import static com.facebook.presto.hive.HiveErrorCode.HIVE_WRITER_OPEN_ERROR;
import static com.facebook.presto.hive.HiveSessionProperties.getParquetWriterBlockSize;
import static com.facebook.presto.hive.HiveSessionProperties.getParquetWriterPageSize;
import static com.facebook.presto.hive.HiveSessionProperties.getParquetWriterVersion;
import static com.facebook.presto.hive.HiveSessionProperties.isParquetOptimizedWriterEnabled;
import static com.facebook.presto.hive.HiveType.toHiveTypes;
import static java.util.Objects.requireNonNull;
Expand Down Expand Up @@ -103,6 +104,7 @@ public Optional<HiveFileWriter> createFileWriter(
ParquetWriterOptions parquetWriterOptions = ParquetWriterOptions.builder()
.setMaxPageSize(getParquetWriterPageSize(session))
.setMaxBlockSize(getParquetWriterBlockSize(session))
.setWriterVersion(getParquetWriterVersion(session))
.build();

CompressionCodecName compressionCodecName = getCompression(conf);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,12 @@
import org.testng.annotations.Test;

import static com.facebook.presto.hive.HiveSessionProperties.getNodeSelectionStrategy;
import static com.facebook.presto.hive.HiveSessionProperties.getParquetWriterVersion;
import static com.facebook.presto.hive.HiveSessionProperties.isCacheEnabled;
import static com.facebook.presto.spi.schedule.NodeSelectionStrategy.HARD_AFFINITY;
import static com.facebook.presto.spi.schedule.NodeSelectionStrategy.NO_PREFERENCE;
import static org.apache.parquet.column.ParquetProperties.WriterVersion.PARQUET_1_0;
import static org.apache.parquet.column.ParquetProperties.WriterVersion.PARQUET_2_0;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertTrue;

Expand Down Expand Up @@ -75,4 +78,24 @@ public void testCacheEnabledConfig()
new CacheConfig().setCachingEnabled(true)).getSessionProperties());
assertTrue(isCacheEnabled(connectorSession));
}

@Test
public void testParquetWriterVersionConfig()
{
ConnectorSession connectorSession = new TestingConnectorSession(
new HiveSessionProperties(
new HiveClientConfig(),
new OrcFileWriterConfig(),
new ParquetFileWriterConfig(),
new CacheConfig().setCachingEnabled(true)).getSessionProperties());
assertEquals(getParquetWriterVersion(connectorSession), PARQUET_2_0);

connectorSession = new TestingConnectorSession(
new HiveSessionProperties(
new HiveClientConfig(),
new OrcFileWriterConfig(),
new ParquetFileWriterConfig().setWriterVersion(PARQUET_1_0),
new CacheConfig().setCachingEnabled(true)).getSessionProperties());
assertEquals(getParquetWriterVersion(connectorSession), PARQUET_1_0);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

import com.google.common.collect.ImmutableMap;
import io.airlift.units.DataSize;
import org.apache.parquet.column.ParquetProperties;
import org.apache.parquet.hadoop.ParquetWriter;
import org.testng.annotations.Test;

Expand All @@ -34,7 +35,8 @@ public void testDefaults()
assertRecordedDefaults(recordDefaults(ParquetFileWriterConfig.class)
.setParquetOptimizedWriterEnabled(false)
.setBlockSize(new DataSize(ParquetWriter.DEFAULT_BLOCK_SIZE, BYTE))
.setPageSize(new DataSize(ParquetWriter.DEFAULT_PAGE_SIZE, BYTE)));
.setPageSize(new DataSize(ParquetWriter.DEFAULT_PAGE_SIZE, BYTE))
.setWriterVersion(ParquetProperties.WriterVersion.PARQUET_2_0));
}

@Test
Expand All @@ -44,12 +46,14 @@ public void testExplicitPropertyMappings()
.put("hive.parquet.optimized-writer.enabled", "true")
.put("hive.parquet.writer.block-size", "234MB")
.put("hive.parquet.writer.page-size", "11MB")
.put("hive.parquet.writer.version", "PARQUET_1_0")
.build();

ParquetFileWriterConfig expected = new ParquetFileWriterConfig()
.setParquetOptimizedWriterEnabled(true)
.setBlockSize(new DataSize(234, MEGABYTE))
.setPageSize(new DataSize(11, MEGABYTE));
.setPageSize(new DataSize(11, MEGABYTE))
.setWriterVersion(ParquetProperties.WriterVersion.PARQUET_1_0);

assertFullMapping(properties, expected);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import com.facebook.presto.parquet.cache.MetadataReader;
import com.facebook.presto.parquet.cache.ParquetFileMetadata;
import com.facebook.presto.parquet.cache.ParquetMetadataSource;
import com.facebook.presto.parquet.writer.ParquetWriterOptions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.AbstractIterator;
Expand Down Expand Up @@ -1628,7 +1629,8 @@ public void testCaching()
columnNames,
new Iterable<?>[] {values},
10,
CompressionCodecName.GZIP);
CompressionCodecName.GZIP,
ParquetWriterOptions.DEFAULT_WRITER_VERSION);
long tempFileCreationTime = System.currentTimeMillis();

testSingleRead(new Iterable<?>[] {values},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
import com.facebook.presto.testing.TestingSession;
import com.google.common.collect.ImmutableList;
import io.airlift.units.DataSize;
import org.apache.parquet.column.ParquetProperties;
import org.apache.parquet.hadoop.metadata.CompressionCodecName;
import org.apache.parquet.hadoop.metadata.ParquetMetadata;
import org.apache.parquet.io.ColumnIOConverter;
Expand Down Expand Up @@ -259,7 +260,8 @@ public void setup()
columnNames,
values,
ROWS,
compressionCodecName);
compressionCodecName,
ParquetProperties.WriterVersion.PARQUET_2_0);

//Set up PageProcessor
List<RowExpression> projections = getProjections(type);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -389,18 +389,20 @@ void assertRoundTrip(
}

// write presto parquet
for (CompressionCodecName compressionCodecName : writerCompressions) {
for (ConnectorSession session : sessions) {
try (TempFile tempFile = new TempFile("test", "parquet")) {
OptionalInt min = stream(writeValues).mapToInt(Iterables::size).min();
checkState(min.isPresent());
writeParquetFileFromPresto(tempFile.getFile(), columnTypes, columnNames, readValues, min.getAsInt(), compressionCodecName);
assertFileContents(
session,
tempFile.getFile(),
getIterators(readValues),
columnNames,
columnTypes);
for (WriterVersion version : versions) {
for (CompressionCodecName compressionCodecName : writerCompressions) {
for (ConnectorSession session : sessions) {
try (TempFile tempFile = new TempFile("test", "parquet")) {
OptionalInt min = stream(writeValues).mapToInt(Iterables::size).min();
checkState(min.isPresent());
writeParquetFileFromPresto(tempFile.getFile(), columnTypes, columnNames, readValues, min.getAsInt(), compressionCodecName, version);
assertFileContents(
session,
tempFile.getFile(),
getIterators(readValues),
columnNames,
columnTypes);
}
}
}
}
Expand Down Expand Up @@ -825,7 +827,7 @@ private static Object decodeObject(Type type, Block block, int position)
return type.getObjectValue(SESSION.getSqlFunctionProperties(), block, position);
}

public static void writeParquetFileFromPresto(File outputFile, List<Type> types, List<String> columnNames, Iterable<?>[] values, int size, CompressionCodecName compressionCodecName)
public static void writeParquetFileFromPresto(File outputFile, List<Type> types, List<String> columnNames, Iterable<?>[] values, int size, CompressionCodecName compressionCodecName, WriterVersion writerVersion)
throws Exception
{
checkArgument(types.size() == columnNames.size() && types.size() == values.length);
Expand All @@ -841,6 +843,7 @@ public static void writeParquetFileFromPresto(File outputFile, List<Type> types,
ParquetWriterOptions.builder()
.setMaxPageSize(DataSize.succinctBytes(100))
.setMaxBlockSize(DataSize.succinctBytes(100000))
.setWriterVersion(writerVersion)
.build(),
compressionCodecName.getHadoopCompressionCodecClassName());

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import com.facebook.presto.parquet.FileParquetDataSource;
import com.facebook.presto.parquet.cache.MetadataReader;
import com.facebook.presto.parquet.reader.ParquetReader;
import com.facebook.presto.parquet.writer.ParquetWriterOptions;
import com.google.common.collect.ImmutableList;
import io.airlift.units.DataSize;
import org.apache.parquet.hadoop.metadata.CompressionCodecName;
Expand Down Expand Up @@ -87,7 +88,8 @@ private void prepareDataFile(CompressionCodecName compressionCodecName)
ImmutableList.of("c1"),
new Iterable<?>[] {generateValues()},
ROWS,
compressionCodecName);
compressionCodecName,
ParquetWriterOptions.DEFAULT_WRITER_VERSION);
}

private List<Integer> generateValues()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@
import static com.facebook.presto.iceberg.IcebergSessionProperties.getOrcMaxMergeDistance;
import static com.facebook.presto.iceberg.IcebergSessionProperties.getOrcOptimizedWriterValidateMode;
import static com.facebook.presto.iceberg.IcebergSessionProperties.getOrcStreamBufferSize;
import static com.facebook.presto.iceberg.IcebergSessionProperties.getParquetWriterVersion;
import static com.facebook.presto.iceberg.IcebergSessionProperties.isOrcOptimizedWriterValidate;
import static com.facebook.presto.iceberg.TypeConverter.toOrcType;
import static com.facebook.presto.iceberg.TypeConverter.toPrestoType;
Expand Down Expand Up @@ -142,6 +143,7 @@ private IcebergFileWriter createParquetWriter(
ParquetWriterOptions parquetWriterOptions = ParquetWriterOptions.builder()
.setMaxPageSize(getParquetWriterPageSize(session))
.setMaxBlockSize(getParquetWriterBlockSize(session))
.setWriterVersion(getParquetWriterVersion(session))
.build();

return new IcebergParquetFileWriter(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import com.facebook.presto.spi.session.PropertyMetadata;
import com.google.common.collect.ImmutableList;
import io.airlift.units.DataSize;
import org.apache.parquet.column.ParquetProperties;

import javax.inject.Inject;

Expand All @@ -50,6 +51,7 @@ public final class IcebergSessionProperties
private static final String PARQUET_MAX_READ_BLOCK_SIZE = "parquet_max_read_block_size";
private static final String PARQUET_WRITER_BLOCK_SIZE = "parquet_writer_block_size";
private static final String PARQUET_WRITER_PAGE_SIZE = "parquet_writer_page_size";
private static final String PARQUET_WRITER_VERSION = "parquet_writer_version";
private static final String PARQUET_USE_COLUMN_NAMES = "parquet_use_column_names";
private static final String PARQUET_BATCH_READ_OPTIMIZATION_ENABLED = "parquet_batch_read_optimization_enabled";
private static final String PARQUET_BATCH_READER_VERIFICATION_ENABLED = "parquet_batch_reader_verification_enabled";
Expand Down Expand Up @@ -130,6 +132,15 @@ public IcebergSessionProperties(
"Parquet: Writer page size",
parquetFileWriterConfig.getPageSize(),
false),
new PropertyMetadata<>(
PARQUET_WRITER_VERSION,
"Parquet: Writer version",
VARCHAR,
ParquetProperties.WriterVersion.class,
parquetFileWriterConfig.getWriterVersion(),
false,
value -> ParquetProperties.WriterVersion.valueOf(((String) value).toUpperCase()),
ParquetProperties.WriterVersion::name),
booleanProperty(
ORC_BLOOM_FILTERS_ENABLED,
"ORC: Enable bloom filters for predicate pushdown",
Expand Down Expand Up @@ -307,6 +318,11 @@ public static DataSize getParquetWriterBlockSize(ConnectorSession session)
return session.getProperty(PARQUET_WRITER_PAGE_SIZE, DataSize.class);
}

public static ParquetProperties.WriterVersion getParquetWriterVersion(ConnectorSession session)
{
return session.getProperty(PARQUET_WRITER_VERSION, ParquetProperties.WriterVersion.class);
}

public static PropertyMetadata<DataSize> dataSizeSessionProperty(String name, String description, DataSize defaultValue, boolean hidden)
{
return new PropertyMetadata<>(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@
import static java.lang.Math.toIntExact;
import static java.nio.charset.StandardCharsets.US_ASCII;
import static java.util.Objects.requireNonNull;
import static org.apache.parquet.column.ParquetProperties.WriterVersion.PARQUET_2_0;
import static org.apache.parquet.hadoop.metadata.CompressionCodecName.BROTLI;
import static org.apache.parquet.hadoop.metadata.CompressionCodecName.GZIP;
import static org.apache.parquet.hadoop.metadata.CompressionCodecName.LZ4;
Expand Down Expand Up @@ -100,7 +99,7 @@ public ParquetWriter(OutputStream outputStream,
this.messageType = requireNonNull(messageType, "messageType is null");

ParquetProperties parquetProperties = ParquetProperties.builder()
.withWriterVersion(PARQUET_2_0)
.withWriterVersion(writerOption.getWriterVersion())
.withPageSize(writerOption.getMaxPageSize())
.withDictionaryPageSize(writerOption.getMaxDictionaryPageSize())
.build();
Expand Down
Loading