Skip to content
Merged
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
import org.apache.spark.sql.types.StructType;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Optional;

import static com.netflix.iceberg.TableProperties.DEFAULT_FILE_FORMAT;
Expand All @@ -56,16 +57,18 @@ public String shortName() {

@Override
public DataSourceReader createReader(DataSourceOptions options) {
Table table = findTable(options);
return new Reader(table, lazyConf());
Configuration conf = new Configuration(lazyBaseConf());
Table table = getTableAndResolveHadoopConfiguration(options, conf);

return new Reader(table, conf);
}

@Override
public Optional<DataSourceWriter> createWriter(String jobId, StructType dfStruct, SaveMode mode,
DataSourceOptions options) {
Preconditions.checkArgument(mode == SaveMode.Append, "Save mode %s is not supported", mode);

Table table = findTable(options);
Configuration conf = new Configuration(lazyBaseConf());
Table table = getTableAndResolveHadoopConfiguration(options, conf);

Schema dfSchema = SparkSchemaUtil.convert(table.schema(), dfStruct);
List<String> errors = CheckCompatibility.writeCompatibilityErrors(table.schema(), dfSchema);
Expand All @@ -89,30 +92,51 @@ public Optional<DataSourceWriter> createWriter(String jobId, StructType dfStruct
.toUpperCase(Locale.ENGLISH));
}

return Optional.of(new Writer(table, lazyConf(), format));
return Optional.of(new Writer(table, conf, format));
Copy link
Contributor

@rdblue rdblue Dec 11, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

After #47, we may not need to pass in conf. (Not something we need to change here.)

}

protected Table findTable(DataSourceOptions options) {
protected Table findTable(DataSourceOptions options, Configuration conf) {
Optional<String> location = options.get("path");
Preconditions.checkArgument(location.isPresent(),
"Cannot open table without a location: path is not set");

HadoopTables tables = new HadoopTables(lazyConf());
HadoopTables tables = new HadoopTables(conf);

return tables.load(location.get());
}

protected SparkSession lazySparkSession() {
private SparkSession lazySparkSession() {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is nice to have in subclasses, which is why it is protected. We use it in findTable to get information about the catalog to use. Not a big deal if it becomes private, since we can make a quick change in our add-on library and keep track of it there.

if (lazySpark == null) {
this.lazySpark = SparkSession.builder().getOrCreate();
}
return lazySpark;
}

protected Configuration lazyConf() {
private Configuration lazyBaseConf() {
if (lazyConf == null) {
this.lazyConf = lazySparkSession().sparkContext().hadoopConfiguration();
}
return lazyConf;
}

private Table getTableAndResolveHadoopConfiguration(
DataSourceOptions options, Configuration conf) {
// Overwrite configurations from the Spark Context with configurations from the options.
mergeIcebergHadoopConfs(conf, options.asMap(), true);
Table table = findTable(options, conf);
// Set confs from table properties, but do not overwrite options from the Spark Context with
// configurations from the table
mergeIcebergHadoopConfs(conf, table.properties(), false);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this still needs to be true, in which case we can remove the option. Table properties still need to override those set in the Hadoop Configuration. Then we re-apply the ones from options to fix up precedence.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hm, I would think that properties set in the JVM, particularly if set on the Spark Context via spark.hadoop.*, should take precedence over the table properties.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Values set in the Configuration are session specific and what we want is to move to table settings instead of Spark settings for configuration like Parquet row group size that are tied to the data. Write-specific settings from the write config can override.

Table settings should take priority over session-wide settings because session-wide config would apply for all tables, and that's not usually appropriate like the row group size example.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's fair enough, I suppose as long as the behavior is well documented it should be clear to the user on how to get the final configurations they want.

// Re-overwrite values set in options and table properties but were not in the environment.
mergeIcebergHadoopConfs(conf, options.asMap(), true);
return table;
}

private static void mergeIcebergHadoopConfs(
Configuration baseConf, Map<String, String> options, boolean overwrite) {
options.keySet().stream()
.filter(key -> key.startsWith("iceberg.hadoop"))
.filter(key -> overwrite || baseConf.get(key.replaceFirst("iceberg.hadoop", "")) == null)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Doesn't overwrite discard all keys? I don't think it matters now because it isn't needed anymore.

.forEach(key -> baseConf.set(key.replaceFirst("iceberg.hadoop", ""), options.get(key)));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
package com.netflix.iceberg.spark.source;

import com.netflix.iceberg.Table;
import org.apache.hadoop.conf.Configuration;
import org.apache.spark.sql.sources.v2.DataSourceOptions;

public class TestIcebergSource extends IcebergSource {
Expand All @@ -29,7 +30,7 @@ public String shortName() {
}

@Override
protected Table findTable(DataSourceOptions options) {
protected Table findTable(DataSourceOptions options, Configuration conf) {
return TestTables.load(options.get("iceberg.table.name").get());
}
}