diff --git a/parquet/src/main/java/org/apache/iceberg/parquet/Parquet.java b/parquet/src/main/java/org/apache/iceberg/parquet/Parquet.java index 7677a53de0c1..d1660d4e935b 100644 --- a/parquet/src/main/java/org/apache/iceberg/parquet/Parquet.java +++ b/parquet/src/main/java/org/apache/iceberg/parquet/Parquet.java @@ -108,6 +108,7 @@ import org.apache.parquet.avro.AvroWriteSupport; import org.apache.parquet.column.ParquetProperties; import org.apache.parquet.column.ParquetProperties.WriterVersion; +import org.apache.parquet.conf.PlainParquetConfiguration; import org.apache.parquet.crypto.FileDecryptionProperties; import org.apache.parquet.crypto.FileEncryptionProperties; import org.apache.parquet.hadoop.ParquetFileReader; @@ -1251,7 +1252,7 @@ public CloseableIterable build() { } optionsBuilder = HadoopReadOptions.builder(conf); } else { - optionsBuilder = ParquetReadOptions.builder(); + optionsBuilder = ParquetReadOptions.builder(new PlainParquetConfiguration()); } for (Map.Entry entry : properties.entrySet()) { @@ -1324,7 +1325,9 @@ public CloseableIterable build() { // TODO: should not need to get the schema to push down before opening the file. // Parquet should allow setting a filter inside its read support ParquetReadOptions decryptOptions = - ParquetReadOptions.builder().withDecryption(fileDecryptionProperties).build(); + ParquetReadOptions.builder(new PlainParquetConfiguration()) + .withDecryption(fileDecryptionProperties) + .build(); MessageType type; try (ParquetFileReader schemaReader = ParquetFileReader.open(ParquetIO.file(file), decryptOptions)) {