diff --git a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/example/GroupReadSupport.java b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/example/GroupReadSupport.java index c49b681d5c..304a1eb766 100644 --- a/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/example/GroupReadSupport.java +++ b/parquet-hadoop/src/main/java/org/apache/parquet/hadoop/example/GroupReadSupport.java @@ -1,4 +1,4 @@ -/* +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -24,6 +24,7 @@ import org.apache.parquet.example.data.Group; import org.apache.parquet.example.data.simple.convert.GroupRecordConverter; +import org.apache.parquet.hadoop.api.InitContext; import org.apache.parquet.hadoop.api.ReadSupport; import org.apache.parquet.io.api.RecordMaterializer; import org.apache.parquet.schema.MessageType; @@ -31,11 +32,9 @@ public class GroupReadSupport extends ReadSupport { @Override - public org.apache.parquet.hadoop.api.ReadSupport.ReadContext init( - Configuration configuration, Map keyValueMetaData, - MessageType fileSchema) { - String partialSchemaString = configuration.get(ReadSupport.PARQUET_READ_SCHEMA); - MessageType requestedProjection = getSchemaForRead(fileSchema, partialSchemaString); + public ReadContext init(InitContext context) { + String partialSchemaString = context.getConfiguration().get(ReadSupport.PARQUET_READ_SCHEMA); + MessageType requestedProjection = getSchemaForRead(context.getFileSchema(), partialSchemaString); return new ReadContext(requestedProjection); } diff --git a/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/example/GroupReadSupportTest.java b/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/example/GroupReadSupportTest.java index 2a99a1b1fa..9d97c571a2 100644 --- a/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/example/GroupReadSupportTest.java +++ b/parquet-hadoop/src/test/java/org/apache/parquet/hadoop/example/GroupReadSupportTest.java @@ -1,4 +1,4 @@ -/* +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -18,7 +18,9 @@ */ package org.apache.parquet.hadoop.example; +import java.util.Set; import org.apache.hadoop.conf.Configuration; +import org.apache.parquet.hadoop.api.InitContext; import org.junit.Test; import org.apache.parquet.hadoop.api.ReadSupport; import org.apache.parquet.schema.MessageType; @@ -43,10 +45,10 @@ public class GroupReadSupportTest { public void testInitWithoutSpecifyingRequestSchema() throws Exception { GroupReadSupport s = new GroupReadSupport(); Configuration configuration = new Configuration(); - Map keyValueMetaData = new HashMap(); + Map> keyValueMetaData = new HashMap<>(); MessageType fileSchema = MessageTypeParser.parseMessageType(fullSchemaStr); - ReadSupport.ReadContext context = s.init(configuration, keyValueMetaData, fileSchema); + ReadSupport.ReadContext context = s.init(new InitContext(configuration, keyValueMetaData, fileSchema)); assertEquals(context.getRequestedSchema(), fileSchema); } @@ -54,12 +56,12 @@ public void testInitWithoutSpecifyingRequestSchema() throws Exception { public void testInitWithPartialSchema() { GroupReadSupport s = new GroupReadSupport(); Configuration configuration = new Configuration(); - Map keyValueMetaData = new HashMap(); + Map> keyValueMetaData = new HashMap<>(); MessageType fileSchema = MessageTypeParser.parseMessageType(fullSchemaStr); MessageType partialSchema = MessageTypeParser.parseMessageType(partialSchemaStr); configuration.set(ReadSupport.PARQUET_READ_SCHEMA, partialSchemaStr); - ReadSupport.ReadContext context = s.init(configuration, keyValueMetaData, fileSchema); + ReadSupport.ReadContext context = s.init(new InitContext(configuration, keyValueMetaData, fileSchema)); assertEquals(context.getRequestedSchema(), partialSchema); } } diff --git a/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestThriftToParquetFileWriter.java b/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestThriftToParquetFileWriter.java index e96b226e4e..7de44769de 100644 --- a/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestThriftToParquetFileWriter.java +++ b/parquet-thrift/src/test/java/org/apache/parquet/hadoop/thrift/TestThriftToParquetFileWriter.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.parquet.column.statistics.*; +import org.apache.parquet.hadoop.api.InitContext; import org.apache.parquet.hadoop.metadata.BlockMetaData; import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; import org.apache.parquet.hadoop.util.ContextUtil; @@ -328,7 +329,7 @@ private ParquetReader createRecordReader(Path parquetFilePath) throws IOE ParquetMetadata readFooter = ParquetFileReader.readFooter(configuration, parquetFilePath); MessageType schema = readFooter.getFileMetaData().getSchema(); - readSupport.init(configuration, null, schema); + readSupport.init(new InitContext(configuration, null, schema)); return new ParquetReader(parquetFilePath, readSupport); }