Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,7 @@ private String getDefaultIndexType(EngineType engineType) {
case SPARK:
return HoodieIndex.IndexType.BLOOM.name();
case FLINK:
case JAVA:
return HoodieIndex.IndexType.INMEMORY.name();
default:
throw new HoodieNotSupportedException("Unsupported engine " + engineType);
Expand Down
13 changes: 13 additions & 0 deletions hudi-client/hudi-java-client/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,19 @@
<version>${project.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>${hive.groupid}</groupId>
<artifactId>hive-exec</artifactId>
<version>${hive.version}</version>
<scope>test</scope>
<classifier>${hive.exec.classifier}</classifier>
</dependency>
Comment on lines +69 to +75
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

do not need this dependency?

<dependency>
      <groupId>${hive.groupid}</groupId>
      <artifactId>hive-metastore</artifactId>
      <version>${hive.version}</version>
      <scope>test</scope>
    </dependency>

<dependency>
<groupId>${hive.groupid}</groupId>
<artifactId>hive-metastore</artifactId>
<version>${hive.version}</version>
<scope>test</scope>
</dependency>

<!-- Test -->
<dependency>
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hudi.execution.bulkinsert;

import org.apache.hudi.exception.HoodieException;
import org.apache.hudi.table.BulkInsertPartitioner;

/**
* A factory to generate built-in partitioner to repartition input records into at least
* expected number of output spark partitions for bulk insert operation.
*/
public abstract class JavaBulkInsertInternalPartitionerFactory {

public static BulkInsertPartitioner get(BulkInsertSortMode sortMode) {
switch (sortMode) {
case NONE:
return new JavaNonSortPartitioner();
case GLOBAL_SORT:
return new JavaGlobalSortPartitioner();
default:
throw new HoodieException("The bulk insert sort mode \"" + sortMode.name()
+ "\" is not supported in java client.");
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hudi.execution.bulkinsert;

import org.apache.hudi.common.model.HoodieRecord;
import org.apache.hudi.common.model.HoodieRecordPayload;
import org.apache.hudi.table.BulkInsertPartitioner;

import java.util.Comparator;
import java.util.List;

/**
* A built-in partitioner that does global sorting for the input records across partitions
* after repartition for bulk insert operation, corresponding to the
* {@code BulkInsertSortMode.GLOBAL_SORT} mode.
*
* @param <T> HoodieRecordPayload type
*/
public class JavaGlobalSortPartitioner<T extends HoodieRecordPayload>
implements BulkInsertPartitioner<List<HoodieRecord<T>>> {

@Override
public List<HoodieRecord<T>> repartitionRecords(List<HoodieRecord<T>> records,
int outputSparkPartitions) {
// Now, sort the records and line them up nicely for loading.
records.sort(new Comparator() {
@Override
public int compare(Object o1, Object o2) {
HoodieRecord o11 = (HoodieRecord) o1;
HoodieRecord o22 = (HoodieRecord) o2;
String left = new StringBuilder()
.append(o11.getPartitionPath())
.append("+")
.append(o11.getRecordKey())
.toString();
String right = new StringBuilder()
.append(o22.getPartitionPath())
.append("+")
.append(o22.getRecordKey())
.toString();
return left.compareTo(right);
}
});
return records;
}

@Override
public boolean arePartitionRecordsSorted() {
return true;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hudi.execution.bulkinsert;

import org.apache.hudi.common.model.HoodieRecord;
import org.apache.hudi.common.model.HoodieRecordPayload;
import org.apache.hudi.table.BulkInsertPartitioner;

import java.util.List;

/**
* A built-in partitioner that only does coalesce for input records for bulk insert operation,
* corresponding to the {@code BulkInsertSortMode.NONE} mode.
*
* @param <T> HoodieRecordPayload type
*/
public class JavaNonSortPartitioner<T extends HoodieRecordPayload>
implements BulkInsertPartitioner<List<HoodieRecord<T>>> {

@Override
public List<HoodieRecord<T>> repartitionRecords(List<HoodieRecord<T>> records,
int outputPartitions) {
return records;
}

@Override
public boolean arePartitionRecordsSorted() {
return false;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,17 @@
import org.apache.hudi.table.action.bootstrap.HoodieBootstrapWriteMetadata;
import org.apache.hudi.table.action.clean.JavaCleanActionExecutor;
import org.apache.hudi.table.action.commit.JavaDeleteCommitActionExecutor;
import org.apache.hudi.table.action.commit.JavaBulkInsertCommitActionExecutor;
import org.apache.hudi.table.action.commit.JavaBulkInsertPreppedCommitActionExecutor;
import org.apache.hudi.table.action.commit.JavaInsertCommitActionExecutor;
import org.apache.hudi.table.action.commit.JavaInsertOverwriteCommitActionExecutor;
import org.apache.hudi.table.action.commit.JavaInsertOverwriteTableCommitActionExecutor;
import org.apache.hudi.table.action.commit.JavaInsertPreppedCommitActionExecutor;
import org.apache.hudi.table.action.commit.JavaUpsertCommitActionExecutor;
import org.apache.hudi.table.action.commit.JavaUpsertPreppedCommitActionExecutor;
import org.apache.hudi.table.action.restore.JavaCopyOnWriteRestoreActionExecutor;
import org.apache.hudi.table.action.rollback.JavaCopyOnWriteRollbackActionExecutor;
import org.apache.hudi.table.action.savepoint.SavepointActionExecutor;

import java.util.List;
import java.util.Map;
Expand Down Expand Up @@ -75,7 +82,8 @@ public HoodieWriteMetadata<List<WriteStatus>> bulkInsert(HoodieEngineContext con
String instantTime,
List<HoodieRecord<T>> records,
Option<BulkInsertPartitioner<List<HoodieRecord<T>>>> bulkInsertPartitioner) {
throw new HoodieNotSupportedException("BulkInsert is not supported yet");
return new JavaBulkInsertCommitActionExecutor((HoodieJavaEngineContext) context, config,
this, instantTime, records, bulkInsertPartitioner).execute();
}

@Override
Expand Down Expand Up @@ -112,21 +120,24 @@ public HoodieWriteMetadata<List<WriteStatus>> bulkInsertPrepped(HoodieEngineCont
String instantTime,
List<HoodieRecord<T>> preppedRecords,
Option<BulkInsertPartitioner<List<HoodieRecord<T>>>> bulkInsertPartitioner) {
throw new HoodieNotSupportedException("BulkInsertPrepped is not supported yet");
return new JavaBulkInsertPreppedCommitActionExecutor((HoodieJavaEngineContext) context, config,
this, instantTime, preppedRecords, bulkInsertPartitioner).execute();
}

@Override
public HoodieWriteMetadata<List<WriteStatus>> insertOverwrite(HoodieEngineContext context,
String instantTime,
List<HoodieRecord<T>> records) {
throw new HoodieNotSupportedException("InsertOverwrite is not supported yet");
return new JavaInsertOverwriteCommitActionExecutor(
context, config, this, instantTime, records).execute();
}

@Override
public HoodieWriteMetadata<List<WriteStatus>> insertOverwriteTable(HoodieEngineContext context,
String instantTime,
List<HoodieRecord<T>> records) {
throw new HoodieNotSupportedException("InsertOverwrite is not supported yet");
return new JavaInsertOverwriteTableCommitActionExecutor(
context, config, this, instantTime, records).execute();
}

@Override
Expand Down Expand Up @@ -175,21 +186,24 @@ public HoodieRollbackMetadata rollback(HoodieEngineContext context,
String rollbackInstantTime,
HoodieInstant commitInstant,
boolean deleteInstants) {
throw new HoodieNotSupportedException("Rollback is not supported yet");
return new JavaCopyOnWriteRollbackActionExecutor(
context, config, this, rollbackInstantTime, commitInstant, deleteInstants).execute();
}

@Override
public HoodieSavepointMetadata savepoint(HoodieEngineContext context,
String instantToSavepoint,
String user,
String comment) {
throw new HoodieNotSupportedException("Savepoint is not supported yet");
return new SavepointActionExecutor(
context, config, this, instantToSavepoint, user, comment).execute();
}

@Override
public HoodieRestoreMetadata restore(HoodieEngineContext context,
String restoreInstantTime,
String instantToRestore) {
throw new HoodieNotSupportedException("Restore is not supported yet");
return new JavaCopyOnWriteRestoreActionExecutor((HoodieJavaEngineContext) context,
config, this, restoreInstantTime, instantToRestore).execute();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,7 @@ public HoodieWriteMetadata<List<WriteStatus>> execute(List<HoodieRecord<T>> inpu
}
});
updateIndex(writeStatuses, result);
updateIndexAndCommitIfNeeded(writeStatuses, result);
return result;
}

Expand Down Expand Up @@ -297,8 +298,7 @@ protected HoodieMergeHandle getUpdateHandle(String partitionPath, String fileId,
}

@Override
public Iterator<List<WriteStatus>> handleInsert(String idPfx, Iterator<HoodieRecord<T>> recordItr)
throws Exception {
public Iterator<List<WriteStatus>> handleInsert(String idPfx, Iterator<HoodieRecord<T>> recordItr) {
// This is needed since sometimes some buckets are never picked in getPartition() and end up with 0 records
if (!recordItr.hasNext()) {
LOG.info("Empty partition");
Expand All @@ -325,4 +325,13 @@ public Partitioner getInsertPartitioner(WorkloadProfile profile) {
return getUpsertPartitioner(profile);
}

public void updateIndexAndCommitIfNeeded(List<WriteStatus> writeStatuses, HoodieWriteMetadata result) {
Instant indexStartTime = Instant.now();
// Update the index back
List<WriteStatus> statuses = table.getIndex().updateLocation(writeStatuses, context, table);
result.setIndexUpdateDuration(Duration.between(indexStartTime, Instant.now()));
result.setWriteStatuses(statuses);
result.setPartitionToReplaceFileIds(getPartitionToReplacedFileIds(statuses));
commitOnAutoCommit(result);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hudi.table.action.commit;

import org.apache.hudi.client.WriteStatus;
import org.apache.hudi.client.common.HoodieJavaEngineContext;
import org.apache.hudi.common.model.HoodieRecord;
import org.apache.hudi.common.model.HoodieRecordPayload;
import org.apache.hudi.common.model.WriteOperationType;
import org.apache.hudi.common.util.Option;
import org.apache.hudi.config.HoodieWriteConfig;
import org.apache.hudi.exception.HoodieInsertException;
import org.apache.hudi.table.BulkInsertPartitioner;
import org.apache.hudi.table.HoodieTable;
import org.apache.hudi.table.action.HoodieWriteMetadata;

import java.util.List;
import java.util.Map;

public class JavaBulkInsertCommitActionExecutor<T extends HoodieRecordPayload<T>> extends BaseJavaCommitActionExecutor<T> {

private final List<HoodieRecord<T>> inputRecords;
private final Option<BulkInsertPartitioner<List<HoodieRecord<T>>>> bulkInsertPartitioner;

public JavaBulkInsertCommitActionExecutor(HoodieJavaEngineContext context, HoodieWriteConfig config, HoodieTable table,
String instantTime, List<HoodieRecord<T>> inputRecords,
Option<BulkInsertPartitioner<List<HoodieRecord<T>>>> bulkInsertPartitioner) {
this(context, config, table, instantTime, inputRecords, bulkInsertPartitioner, Option.empty());
}

public JavaBulkInsertCommitActionExecutor(HoodieJavaEngineContext context, HoodieWriteConfig config, HoodieTable table,
String instantTime, List<HoodieRecord<T>> inputRecords,
Option<BulkInsertPartitioner<List<HoodieRecord<T>>>> bulkInsertPartitioner,
Option<Map<String, String>> extraMetadata) {
super(context, config, table, instantTime, WriteOperationType.BULK_INSERT, extraMetadata);
this.inputRecords = inputRecords;
this.bulkInsertPartitioner = bulkInsertPartitioner;
}

@Override
public HoodieWriteMetadata<List<WriteStatus>> execute() {
try {
return JavaBulkInsertHelper.newInstance().bulkInsert(inputRecords, instantTime, table, config,
this, true, bulkInsertPartitioner);
} catch (HoodieInsertException ie) {
throw ie;
} catch (Throwable e) {
throw new HoodieInsertException("Failed to bulk insert for commit time " + instantTime, e);
}
}
}
Loading