-
Notifications
You must be signed in to change notification settings - Fork 2.9k
Allow custom hadoop properties to be loaded in the Spark data source #7
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 7 commits
78f8847
c85ce3a
66f2160
46732c8
9e53508
498e58d
6f07979
8019608
8b238ea
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -39,6 +39,7 @@ | |
| import org.apache.spark.sql.types.StructType; | ||
| import java.util.List; | ||
| import java.util.Locale; | ||
| import java.util.Map; | ||
| import java.util.Optional; | ||
|
|
||
| import static com.netflix.iceberg.TableProperties.DEFAULT_FILE_FORMAT; | ||
|
|
@@ -56,16 +57,18 @@ public String shortName() { | |
|
|
||
| @Override | ||
| public DataSourceReader createReader(DataSourceOptions options) { | ||
| Table table = findTable(options); | ||
| return new Reader(table, lazyConf()); | ||
| Configuration conf = new Configuration(lazyBaseConf()); | ||
| Table table = getTableAndResolveHadoopConfiguration(options, conf); | ||
|
|
||
| return new Reader(table, conf); | ||
| } | ||
|
|
||
| @Override | ||
| public Optional<DataSourceWriter> createWriter(String jobId, StructType dfStruct, SaveMode mode, | ||
| DataSourceOptions options) { | ||
| Preconditions.checkArgument(mode == SaveMode.Append, "Save mode %s is not supported", mode); | ||
|
|
||
| Table table = findTable(options); | ||
| Configuration conf = new Configuration(lazyBaseConf()); | ||
| Table table = getTableAndResolveHadoopConfiguration(options, conf); | ||
|
|
||
| Schema dfSchema = SparkSchemaUtil.convert(table.schema(), dfStruct); | ||
| List<String> errors = CheckCompatibility.writeCompatibilityErrors(table.schema(), dfSchema); | ||
|
|
@@ -89,30 +92,51 @@ public Optional<DataSourceWriter> createWriter(String jobId, StructType dfStruct | |
| .toUpperCase(Locale.ENGLISH)); | ||
| } | ||
|
|
||
| return Optional.of(new Writer(table, lazyConf(), format)); | ||
| return Optional.of(new Writer(table, conf, format)); | ||
| } | ||
|
|
||
| protected Table findTable(DataSourceOptions options) { | ||
| protected Table findTable(DataSourceOptions options, Configuration conf) { | ||
| Optional<String> location = options.get("path"); | ||
| Preconditions.checkArgument(location.isPresent(), | ||
| "Cannot open table without a location: path is not set"); | ||
|
|
||
| HadoopTables tables = new HadoopTables(lazyConf()); | ||
| HadoopTables tables = new HadoopTables(conf); | ||
|
|
||
| return tables.load(location.get()); | ||
| } | ||
|
|
||
| protected SparkSession lazySparkSession() { | ||
| private SparkSession lazySparkSession() { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is nice to have in subclasses, which is why it is protected. We use it in |
||
| if (lazySpark == null) { | ||
| this.lazySpark = SparkSession.builder().getOrCreate(); | ||
| } | ||
| return lazySpark; | ||
| } | ||
|
|
||
| protected Configuration lazyConf() { | ||
| private Configuration lazyBaseConf() { | ||
| if (lazyConf == null) { | ||
| this.lazyConf = lazySparkSession().sparkContext().hadoopConfiguration(); | ||
| } | ||
| return lazyConf; | ||
| } | ||
|
|
||
| private Table getTableAndResolveHadoopConfiguration( | ||
| DataSourceOptions options, Configuration conf) { | ||
| // Overwrite configurations from the Spark Context with configurations from the options. | ||
| mergeIcebergHadoopConfs(conf, options.asMap(), true); | ||
| Table table = findTable(options, conf); | ||
| // Set confs from table properties, but do not overwrite options from the Spark Context with | ||
| // configurations from the table | ||
| mergeIcebergHadoopConfs(conf, table.properties(), false); | ||
|
||
| // Re-overwrite values set in options and table properties but were not in the environment. | ||
| mergeIcebergHadoopConfs(conf, options.asMap(), true); | ||
| return table; | ||
| } | ||
|
|
||
| private static void mergeIcebergHadoopConfs( | ||
| Configuration baseConf, Map<String, String> options, boolean overwrite) { | ||
| options.keySet().stream() | ||
| .filter(key -> key.startsWith("iceberg.hadoop")) | ||
mccheah marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| .filter(key -> overwrite || baseConf.get(key.replaceFirst("iceberg.hadoop", "")) == null) | ||
|
||
| .forEach(key -> baseConf.set(key.replaceFirst("iceberg.hadoop", ""), options.get(key))); | ||
| } | ||
| } | ||
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
After #47, we may not need to pass in conf. (Not something we need to change here.)