Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
50 commits
Select commit Hold shift + click to select a range
b287072
Tidying up
Jan 27, 2023
3130aa8
Revisited `rewriteRecord` utility to decouple transformer generation …
Jan 27, 2023
5be80db
Rebased `HoodieSparkSqlWriter` and `HoodieSparkRecord` onto using gen…
Jan 27, 2023
122e9c8
Revisited common caches to avoid multiple lookups
Jan 27, 2023
51dff0b
Cleaned up unnecessary utils
Jan 27, 2023
ceacbc6
Rebased rewriting Spark's Rows with renaming to decouple transformer …
Jan 27, 2023
0556d1d
Tidying up
Jan 27, 2023
50a6307
Fixed position map not being updated properly
Jan 27, 2023
150bd85
Fixed array/map handling
Jan 27, 2023
1e062f2
Fixing support of Hudi's custom conversion to String
Jan 27, 2023
757997f
Fixing support of Hudi's custom conversion to Decimals
Jan 27, 2023
b7b706f
Fixing support of Hudi's custom conversion to Date
Jan 27, 2023
65f545d
Fixing unsafe row-writer application
Jan 28, 2023
c79f2df
Adjusted core writing flows merging/inserting to avoid record's copyi…
Jan 28, 2023
37225a0
Fixed usage of the caches
Jan 28, 2023
c367473
Fixing compilation
Jan 28, 2023
8dc8f30
Fixing conversion into String
Jan 28, 2023
f06701b
XXX
Jan 28, 2023
ac1819d
Revisited `MetadataValues` to avoid using `HashMap`
Jan 28, 2023
da4ea35
Rebased `HoodieRecord` impls to thrown unchecked exception
Jan 28, 2023
dfdbe9c
Avoid rewriting records twice when schema evolution is enabled;
Jan 28, 2023
6d13330
Deduplicated record transformation and meta-fields prepending into 2 …
Jan 28, 2023
6f627a6
Rebased `WriteHandle`s onto new `HoodieRecord` APIs
Jan 28, 2023
7c18557
Fixed `BootstrapMetadataHandler`
Jan 28, 2023
ebad3b3
Fixing compilation
Jan 28, 2023
3509b7e
Fixing FP into Decimal conversion (might involve rounding)
Jan 28, 2023
9c5c9fc
Fixed `AppendHandle` to properly copy record's instance (to avoid bei…
Jan 29, 2023
a3cdd3e
Fixed `HoodieSparkRecord` to properly update meta-fields
Jan 29, 2023
21c1a56
Fixed `composeNestedFieldPath` util to properly handle missing fields
Jan 29, 2023
06ca3c1
Cleaning up
Jan 29, 2023
845b3b1
Tidying up `HoodieInternalRowUtils`;
Jan 29, 2023
b1670bf
Combined tests
Jan 29, 2023
ca13108
De-duplicated `RowWriter` utils
Jan 29, 2023
ab05ada
Rebased `HoodieSparkSqlWriter` onto a new utils
Jan 29, 2023
622be32
Fixing compilation
Jan 29, 2023
e498a67
Reverting accidental changes
Jan 29, 2023
5cb4e7a
Fixing typo
Jan 29, 2023
5446734
Tidying up
Jan 29, 2023
f0ffe7c
Fixed `rewriteRecordWithNewSchema` mising to pass target columns to b…
Jan 29, 2023
b86cca7
Fixed map row-writer to not assume particular impl of `MapData`
Jan 29, 2023
60c6afc
Tidying up
Jan 29, 2023
7832e4f
Disable DS multi-writer test
Jan 30, 2023
4c3b1bc
Fixed `ParquetBootstrapMetadataHandler` to properly instantiate boots…
Jan 30, 2023
d9ab8da
Cleaned up `updateMetadataFields`
Jan 30, 2023
5e9f80f
Fixing `String` to `UTF8String` conversion
Jan 30, 2023
bd0216a
Fixed `HoodieSparkRecord` to avoid truncating list of meta-fields whe…
Jan 30, 2023
a0d21f3
Cleaned up `HoodiesSparkParquetWriter` to avoid unnecessary conversio…
Jan 30, 2023
d379195
Fixed `ParquetBootstrapMetadataHandler` to specify complete list of m…
Jan 30, 2023
3252447
Fixed `HoodieBootstrapHandle` to rely on `METADATA_BOOTSTRAP_RECORD_S…
Jan 30, 2023
24020a9
Strip all meta-fields in `HoodieIncrSource` to make sure these are no…
Jan 31, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,11 @@
import org.apache.hudi.client.utils.LazyIterableIterator;
import org.apache.hudi.common.engine.TaskContextSupplier;
import org.apache.hudi.common.model.HoodieRecord;
import org.apache.hudi.common.util.queue.ExecutorType;
import org.apache.hudi.config.HoodieWriteConfig;
import org.apache.hudi.io.CreateHandleFactory;
import org.apache.hudi.io.WriteHandleFactory;
import org.apache.hudi.table.HoodieTable;
import org.apache.hudi.util.ExecutorFactory;

import java.util.Iterator;
import java.util.List;
Expand Down Expand Up @@ -104,7 +104,7 @@ private static <T> Function<HoodieRecord<T>, HoodieInsertValueGenResult<HoodieRe
// it since these records will be subsequently buffered (w/in the in-memory queue);
// Only case when we don't need to make a copy is when using [[SimpleExecutor]] which
// is guaranteed to not hold on to references to any records
boolean shouldClone = writeConfig.getExecutorType() != ExecutorType.SIMPLE;
boolean shouldClone = ExecutorFactory.isBufferingRecords(writeConfig);

return record -> {
HoodieRecord<T> clonedRecord = shouldClone ? record.copy() : record;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -236,29 +236,33 @@ private Option<HoodieRecord> prepareRecord(HoodieRecord<T> hoodieRecord) {
// If the format can not record the operation field, nullify the DELETE payload manually.
boolean nullifyPayload = HoodieOperation.isDelete(hoodieRecord.getOperation()) && !config.allowOperationMetadataField();
recordProperties.put(HoodiePayloadProps.PAYLOAD_IS_UPDATE_RECORD_FOR_MOR, String.valueOf(isUpdateRecord));
Option<HoodieRecord> finalRecord = nullifyPayload ? Option.empty() : Option.of(hoodieRecord);

Option<HoodieRecord> finalRecordOpt = nullifyPayload ? Option.empty() : Option.of(hoodieRecord);
// Check for delete
if (finalRecord.isPresent() && !finalRecord.get().isDelete(schema, recordProperties)) {
// Check for ignore ExpressionPayload
if (finalRecord.get().shouldIgnore(schema, recordProperties)) {
return finalRecord;
if (finalRecordOpt.isPresent() && !finalRecordOpt.get().isDelete(schema, recordProperties)) {
HoodieRecord finalRecord = finalRecordOpt.get();
// Check if the record should be ignored (special case for [[ExpressionPayload]])
if (finalRecord.shouldIgnore(schema, recordProperties)) {
return finalRecordOpt;
}
// Convert GenericRecord to GenericRecord with hoodie commit metadata in schema
HoodieRecord rewrittenRecord = schemaOnReadEnabled ? finalRecord.get().rewriteRecordWithNewSchema(schema, recordProperties, writeSchemaWithMetaFields)
: finalRecord.get().rewriteRecord(schema, recordProperties, writeSchemaWithMetaFields);

// Prepend meta-fields into the record
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is primary change in this file: instead of the sequence:

  • rewriteRecord/rewriteRecordWithNewSchema (rewriting record into schema bearing meta-fields)
  • updateMetadataValues

we call directly prependMetaFields API (expanding record's schema w/ meta-fields and setting them at the same time)

MetadataValues metadataValues = populateMetadataFields(finalRecord);
HoodieRecord populatedRecord =
finalRecord.prependMetaFields(schema, writeSchemaWithMetaFields, metadataValues, recordProperties);

// NOTE: Record have to be cloned here to make sure if it holds low-level engine-specific
// payload pointing into a shared, mutable (underlying) buffer we get a clean copy of
// it since these records will be put into the recordList(List).
HoodieRecord populatedRecord = populateMetadataFields(rewrittenRecord.copy(), writeSchemaWithMetaFields, recordProperties);
finalRecord = Option.of(populatedRecord);
finalRecordOpt = Option.of(populatedRecord.copy());
if (isUpdateRecord || isLogCompaction) {
updatedRecordsWritten++;
} else {
insertRecordsWritten++;
}
recordsWritten++;
} else {
finalRecord = Option.empty();
finalRecordOpt = Option.empty();
recordsDeleted++;
}

Expand All @@ -267,15 +271,15 @@ private Option<HoodieRecord> prepareRecord(HoodieRecord<T> hoodieRecord) {
// part of marking
// record successful.
hoodieRecord.deflate();
return finalRecord;
return finalRecordOpt;
} catch (Exception e) {
LOG.error("Error writing record " + hoodieRecord, e);
writeStatus.markFailure(hoodieRecord, e, recordMetadata);
}
return Option.empty();
}

private HoodieRecord populateMetadataFields(HoodieRecord<T> hoodieRecord, Schema schema, Properties prop) throws IOException {
private MetadataValues populateMetadataFields(HoodieRecord<T> hoodieRecord) {
MetadataValues metadataValues = new MetadataValues();
if (config.populateMetaFields()) {
String seqId =
Expand All @@ -292,7 +296,7 @@ private HoodieRecord populateMetadataFields(HoodieRecord<T> hoodieRecord, Schema
metadataValues.setOperation(hoodieRecord.getOperation().getName());
}

return hoodieRecord.updateMetadataValues(schema, prop, metadataValues);
return metadataValues;
}

private void initNewStatus() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,19 @@

package org.apache.hudi.io;

import org.apache.hudi.avro.HoodieAvroUtils;
import org.apache.avro.JsonProperties;
import org.apache.avro.Schema;
import org.apache.hudi.common.engine.TaskContextSupplier;
import org.apache.hudi.common.model.HoodieRecord;
import org.apache.hudi.common.util.Option;
import org.apache.hudi.config.HoodieWriteConfig;
import org.apache.hudi.table.HoodieTable;

import java.util.List;
import java.util.stream.Collectors;

import static org.apache.hudi.avro.AvroSchemaUtils.createNullableSchema;

/**
* This class is essentially same as Create Handle but overrides two things
* 1) Schema : Metadata bootstrap writes only metadata fields as part of write. So, setup the writer schema accordingly.
Expand All @@ -34,14 +40,28 @@
*/
public class HoodieBootstrapHandle<T, I, K, O> extends HoodieCreateHandle<T, I, K, O> {

// NOTE: We have to use schema containing all the meta-fields in here b/c unlike for [[HoodieAvroRecord]],
// [[HoodieSparkRecord]] requires records to always bear either all or no meta-fields in the
// record schema (ie partial inclusion of the meta-fields in the schema is not allowed)
public static final Schema METADATA_BOOTSTRAP_RECORD_SCHEMA = createMetadataBootstrapRecordSchema();

public HoodieBootstrapHandle(HoodieWriteConfig config, String commitTime, HoodieTable<T, I, K, O> hoodieTable,
String partitionPath, String fileId, TaskContextSupplier taskContextSupplier) {
super(config, commitTime, hoodieTable, partitionPath, fileId,
Option.of(HoodieAvroUtils.RECORD_KEY_SCHEMA), taskContextSupplier);
Option.of(METADATA_BOOTSTRAP_RECORD_SCHEMA), taskContextSupplier);
}

@Override
public boolean canWrite(HoodieRecord record) {
return true;
}

private static Schema createMetadataBootstrapRecordSchema() {
List<Schema.Field> fields =
HoodieRecord.HOODIE_META_COLUMNS.stream()
.map(metaField ->
new Schema.Field(metaField, createNullableSchema(Schema.Type.STRING), "", JsonProperties.NULL_VALUE))
.collect(Collectors.toList());
return Schema.createRecord("HoodieRecordKey", "", "", false, fields);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -136,24 +136,22 @@ protected void doWrite(HoodieRecord record, Schema schema, TypedProperties props
if (record.shouldIgnore(schema, config.getProps())) {
return;
}
// Convert GenericRecord to GenericRecord with hoodie commit metadata in schema
HoodieRecord rewriteRecord;
if (schemaOnReadEnabled) {
rewriteRecord = record.rewriteRecordWithNewSchema(schema, config.getProps(), writeSchemaWithMetaFields);
} else {
rewriteRecord = record.rewriteRecord(schema, config.getProps(), writeSchemaWithMetaFields);
}

MetadataValues metadataValues = new MetadataValues().setFileName(path.getName());
rewriteRecord = rewriteRecord.updateMetadataValues(writeSchemaWithMetaFields, config.getProps(), metadataValues);
HoodieRecord populatedRecord =
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Change similar to #7769 (comment)

record.prependMetaFields(schema, writeSchemaWithMetaFields, metadataValues, config.getProps());

if (preserveMetadata) {
fileWriter.write(record.getRecordKey(), rewriteRecord, writeSchemaWithMetaFields);
fileWriter.write(record.getRecordKey(), populatedRecord, writeSchemaWithMetaFields);
} else {
fileWriter.writeWithMetadata(record.getKey(), rewriteRecord, writeSchemaWithMetaFields);
fileWriter.writeWithMetadata(record.getKey(), populatedRecord, writeSchemaWithMetaFields);
}
// update the new location of record, so we know where to find it next

// Update the new location of record, so we know where to find it next
record.unseal();
record.setNewLocation(new HoodieRecordLocation(instantTime, writeStatus.getFileId()));
record.seal();

recordsWritten++;
insertRecordsWritten++;
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -374,20 +374,16 @@ public void write(HoodieRecord<T> oldRecord) {
}

protected void writeToFile(HoodieKey key, HoodieRecord<T> record, Schema schema, Properties prop, boolean shouldPreserveRecordMetadata) throws IOException {
HoodieRecord rewriteRecord;
if (schemaOnReadEnabled) {
rewriteRecord = record.rewriteRecordWithNewSchema(schema, prop, writeSchemaWithMetaFields);
} else {
rewriteRecord = record.rewriteRecord(schema, prop, writeSchemaWithMetaFields);
}
// NOTE: `FILENAME_METADATA_FIELD` has to be rewritten to correctly point to the
// file holding this record even in cases when overall metadata is preserved
MetadataValues metadataValues = new MetadataValues().setFileName(newFilePath.getName());
rewriteRecord = rewriteRecord.updateMetadataValues(writeSchemaWithMetaFields, prop, metadataValues);
HoodieRecord populatedRecord =
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Change similar to #7769 (comment)

record.prependMetaFields(schema, writeSchemaWithMetaFields, metadataValues, prop);

if (shouldPreserveRecordMetadata) {
fileWriter.write(key.getRecordKey(), rewriteRecord, writeSchemaWithMetaFields);
fileWriter.write(key.getRecordKey(), populatedRecord, writeSchemaWithMetaFields);
} else {
fileWriter.writeWithMetadata(key, rewriteRecord, writeSchemaWithMetaFields);
fileWriter.writeWithMetadata(key, populatedRecord, writeSchemaWithMetaFields);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ protected HoodieWriteHandle(HoodieWriteConfig config, String instantTime, String
!hoodieTable.getIndex().isImplicitWithStorage(), config.getWriteStatusFailureFraction());
this.taskContextSupplier = taskContextSupplier;
this.writeToken = makeWriteToken();
schemaOnReadEnabled = !isNullOrEmpty(hoodieTable.getConfig().getInternalSchema());
this.schemaOnReadEnabled = !isNullOrEmpty(hoodieTable.getConfig().getInternalSchema());
this.recordMerger = config.getRecordMerger();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,6 @@
import org.apache.hudi.common.util.ClosableIterator;
import org.apache.hudi.common.util.InternalSchemaCache;
import org.apache.hudi.common.util.Option;
import org.apache.hudi.common.util.collection.MappingIterator;
import org.apache.hudi.common.util.collection.Pair;
import org.apache.hudi.common.util.queue.HoodieExecutor;
import org.apache.hudi.config.HoodieWriteConfig;
import org.apache.hudi.exception.HoodieException;
Expand Down Expand Up @@ -94,8 +92,8 @@ public void runMerge(HoodieTable<?, ?, ?, ?> table,

// In case Advanced Schema Evolution is enabled we might need to rewrite currently
// persisted records to adhere to an evolved schema
Option<Pair<Function<Schema, Function<HoodieRecord, HoodieRecord>>, Schema>> schemaEvolutionTransformerOpt =
composeSchemaEvolutionTransformer(writerSchema, baseFile, writeConfig, table.getMetaClient());
Option<Function<HoodieRecord, HoodieRecord>> schemaEvolutionTransformerOpt =
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Simplifying implementation by supplying reader-scheme into the method

composeSchemaEvolutionTransformer(readerSchema, writerSchema, baseFile, writeConfig, table.getMetaClient());

// Check whether the writer schema is simply a projection of the file's one, ie
// - Its field-set is a proper subset (of the reader schema)
Expand Down Expand Up @@ -130,29 +128,27 @@ public void runMerge(HoodieTable<?, ?, ?, ?> table,
(left, right) ->
left.joinWith(right, mergeHandle.getWriterSchemaWithMetaFields()));
recordSchema = mergeHandle.getWriterSchemaWithMetaFields();
} else if (schemaEvolutionTransformerOpt.isPresent()) {
recordIterator = new MappingIterator<>(baseFileRecordIterator,
schemaEvolutionTransformerOpt.get().getLeft().apply(isPureProjection ? writerSchema : readerSchema));
recordSchema = schemaEvolutionTransformerOpt.get().getRight();
} else {
recordIterator = baseFileRecordIterator;
recordSchema = isPureProjection ? writerSchema : readerSchema;
}

boolean isBufferingRecords = ExecutorFactory.isBufferingRecords(writeConfig);

wrapper = ExecutorFactory.create(writeConfig, recordIterator, new UpdateHandler(mergeHandle), record -> {
HoodieRecord newRecord;
if (schemaEvolutionTransformerOpt.isPresent()) {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Schema Evolution transformer now is applied inside the transformer as opposed to as an MappingIterator previously

newRecord = schemaEvolutionTransformerOpt.get().apply(record);
} else if (shouldRewriteInWriterSchema) {
newRecord = record.rewriteRecordWithNewSchema(recordSchema, writeConfig.getProps(), writerSchema);
} else {
newRecord = record;
}

// NOTE: Record have to be cloned here to make sure if it holds low-level engine-specific
// payload pointing into a shared, mutable (underlying) buffer we get a clean copy of
// it since these records will be put into queue of QueueBasedExecutorFactory.
if (shouldRewriteInWriterSchema) {
try {
return record.rewriteRecordWithNewSchema(recordSchema, writeConfig.getProps(), writerSchema).copy();
} catch (IOException e) {
LOG.error("Error rewrite record with new schema", e);
throw new HoodieException(e);
}
} else {
return record.copy();
}
return isBufferingRecords ? newRecord.copy() : newRecord;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

👍

}, table.getPreExecuteRunnable());

wrapper.execute();
Expand All @@ -173,10 +169,11 @@ public void runMerge(HoodieTable<?, ?, ?, ?> table,
}
}

private Option<Pair<Function<Schema, Function<HoodieRecord, HoodieRecord>>, Schema>> composeSchemaEvolutionTransformer(Schema writerSchema,
HoodieBaseFile baseFile,
HoodieWriteConfig writeConfig,
HoodieTableMetaClient metaClient) {
private Option<Function<HoodieRecord, HoodieRecord>> composeSchemaEvolutionTransformer(Schema recordSchema,
Schema writerSchema,
HoodieBaseFile baseFile,
HoodieWriteConfig writeConfig,
HoodieTableMetaClient metaClient) {
Option<InternalSchema> querySchemaOpt = SerDeHelper.fromJson(writeConfig.getInternalSchema());
// TODO support bootstrap
if (querySchemaOpt.isPresent() && !baseFile.getBootstrapBaseFile().isPresent()) {
Expand Down Expand Up @@ -214,18 +211,12 @@ private Option<Pair<Function<Schema, Function<HoodieRecord, HoodieRecord>>, Sche
|| SchemaCompatibility.checkReaderWriterCompatibility(newWriterSchema, writeSchemaFromFile).getType() == org.apache.avro.SchemaCompatibility.SchemaCompatibilityType.COMPATIBLE;
if (needToReWriteRecord) {
Map<String, String> renameCols = InternalSchemaUtils.collectRenameCols(writeInternalSchema, querySchema);
return Option.of(Pair.of(
(schema) -> (record) -> {
try {
return record.rewriteRecordWithNewSchema(
schema,
writeConfig.getProps(),
newWriterSchema, renameCols);
} catch (IOException e) {
LOG.error("Error rewrite record with new schema", e);
throw new HoodieException(e);
}
}, newWriterSchema));
return Option.of(record -> {
return record.rewriteRecordWithNewSchema(
recordSchema,
writeConfig.getProps(),
newWriterSchema, renameCols);
});
} else {
return Option.empty();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,31 +33,47 @@

public class ExecutorFactory {

public static <I, O, E> HoodieExecutor<E> create(HoodieWriteConfig hoodieConfig,
public static <I, O, E> HoodieExecutor<E> create(HoodieWriteConfig config,
Iterator<I> inputItr,
HoodieConsumer<O, E> consumer,
Function<I, O> transformFunction) {
return create(hoodieConfig, inputItr, consumer, transformFunction, Functions.noop());
return create(config, inputItr, consumer, transformFunction, Functions.noop());
}

public static <I, O, E> HoodieExecutor<E> create(HoodieWriteConfig hoodieConfig,
public static <I, O, E> HoodieExecutor<E> create(HoodieWriteConfig config,
Iterator<I> inputItr,
HoodieConsumer<O, E> consumer,
Function<I, O> transformFunction,
Runnable preExecuteRunnable) {
ExecutorType executorType = hoodieConfig.getExecutorType();

ExecutorType executorType = config.getExecutorType();
switch (executorType) {
case BOUNDED_IN_MEMORY:
return new BoundedInMemoryExecutor<>(hoodieConfig.getWriteBufferLimitBytes(), inputItr, consumer,
return new BoundedInMemoryExecutor<>(config.getWriteBufferLimitBytes(), inputItr, consumer,
transformFunction, preExecuteRunnable);
case DISRUPTOR:
return new DisruptorExecutor<>(hoodieConfig.getWriteExecutorDisruptorWriteBufferLimitBytes(), inputItr, consumer,
transformFunction, hoodieConfig.getWriteExecutorDisruptorWaitStrategy(), preExecuteRunnable);
return new DisruptorExecutor<>(config.getWriteExecutorDisruptorWriteBufferLimitBytes(), inputItr, consumer,
transformFunction, config.getWriteExecutorDisruptorWaitStrategy(), preExecuteRunnable);
case SIMPLE:
return new SimpleExecutor<>(inputItr, consumer, transformFunction);
default:
throw new HoodieException("Unsupported Executor Type " + executorType);
}
}

/**
* Checks whether configured {@link HoodieExecutor} buffer records (for ex, by holding them
* in the queue)
*/
public static boolean isBufferingRecords(HoodieWriteConfig config) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why not make this a property of ExecutorType? so we don't need this extra helper

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good call. Will address in a follow-up (to avoid re-running CI)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I actually realized that this is not possible unfortunately: we're copying in transformers which we pass as args to ctor of the respective Executor, therefore we can't just call a method on it

ExecutorType executorType = config.getExecutorType();
switch (executorType) {
case BOUNDED_IN_MEMORY:
case DISRUPTOR:
return true;
case SIMPLE:
return false;
default:
throw new HoodieException("Unsupported Executor Type " + executorType);
}
}
}
Loading