Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,226 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hudi.cli.commands;

import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hudi.cli.HoodieCLI;
import org.apache.hudi.cli.utils.SparkUtil;
import org.apache.hudi.client.common.HoodieSparkEngineContext;
import org.apache.hudi.common.config.HoodieMetadataConfig;
import org.apache.hudi.common.util.HoodieTimer;
import org.apache.hudi.common.util.ValidationUtils;
import org.apache.hudi.config.HoodieWriteConfig;
import org.apache.hudi.metadata.HoodieBackedTableMetadata;
import org.apache.hudi.metadata.HoodieTableMetadata;
import org.apache.hudi.metadata.SparkHoodieBackedTableMetadataWriter;

import org.apache.spark.api.java.JavaSparkContext;
import org.springframework.shell.core.CommandMarker;
import org.springframework.shell.core.annotation.CliCommand;
import org.springframework.shell.core.annotation.CliOption;
import org.springframework.stereotype.Component;

import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Map;

/**
* CLI commands to operate on the Metadata Table.
*/
@Component
public class MetadataCommand implements CommandMarker {

private JavaSparkContext jsc;
private static String metadataBaseDirectory;

/**
* Sets the directory to store/read Metadata Table.
*
* This can be used to store the metadata table away from the dataset directory.
* - Useful for testing as well as for using via the HUDI CLI so that the actual dataset is not written to.
* - Useful for testing Metadata Table performance and operations on existing datasets before enabling.
*/
public static void setMetadataBaseDirectory(String metadataDir) {
ValidationUtils.checkState(metadataBaseDirectory == null,
"metadataBaseDirectory is already set to " + metadataBaseDirectory);
metadataBaseDirectory = metadataDir;
}

public static String getMetadataTableBasePath(String tableBasePath) {
if (metadataBaseDirectory != null) {
return metadataBaseDirectory;
}
return HoodieTableMetadata.getMetadataTableBasePath(tableBasePath);
}

@CliCommand(value = "metadata set", help = "Set options for Metadata Table")
public String set(@CliOption(key = {"metadataDir"},
help = "Directory to read/write metadata table (can be different from dataset)", unspecifiedDefaultValue = "")
final String metadataDir) {
if (!metadataDir.isEmpty()) {
setMetadataBaseDirectory(metadataDir);
}

return "Ok";
}
Comment on lines +75 to +84
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this make sense to have this configurability in CLI? During write operations this field is not configurable so metadata will always be updated at tableBasePath + '.hoodie/metadata/'

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cc @prashantwason to follow on, with this thinking


@CliCommand(value = "metadata create", help = "Create the Metadata Table if it does not exist")
public String create() throws IOException {
HoodieCLI.getTableMetaClient();
Path metadataPath = new Path(getMetadataTableBasePath(HoodieCLI.basePath));
try {
FileStatus[] statuses = HoodieCLI.fs.listStatus(metadataPath);
if (statuses.length > 0) {
throw new RuntimeException("Metadata directory (" + metadataPath.toString() + ") not empty.");
}
} catch (FileNotFoundException e) {
// Metadata directory does not exist yet
HoodieCLI.fs.mkdirs(metadataPath);
}

HoodieTimer timer = new HoodieTimer().startTimer();
HoodieWriteConfig writeConfig = getWriteConfig();
initJavaSparkContext();
SparkHoodieBackedTableMetadataWriter.create(HoodieCLI.conf, writeConfig, new HoodieSparkEngineContext(jsc));
return String.format("Created Metadata Table in %s (duration=%.2f secs)", metadataPath, timer.endTimer() / 1000.0);
}

@CliCommand(value = "metadata delete", help = "Remove the Metadata Table")
public String delete() throws Exception {
HoodieCLI.getTableMetaClient();
Path metadataPath = new Path(getMetadataTableBasePath(HoodieCLI.basePath));
try {
FileStatus[] statuses = HoodieCLI.fs.listStatus(metadataPath);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[Minor] Rather than listing we should be able to just call delete on path if exists

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cc @prashantwason might be good to collect these in a JIRA and fix in a follow on?

if (statuses.length > 0) {
HoodieCLI.fs.delete(metadataPath, true);
}
} catch (FileNotFoundException e) {
// Metadata directory does not exist
}

return String.format("Removed Metadata Table from %s", metadataPath);
}

@CliCommand(value = "metadata init", help = "Update the metadata table from commits since the creation")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[Minor] Maybe rename metadata sync based on the information provided in help. This command does not initialize the metadata

public String init(@CliOption(key = {"readonly"}, unspecifiedDefaultValue = "false",
Copy link
Contributor

@rmpifer rmpifer Jan 3, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[Minor] Not sure what purpose of readonly config is here. This command doesn't return any values. This command seems like its only purpose is to perform a write operation

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cc @prashantwason again

help = "Open in read-only mode") final boolean readOnly) throws Exception {
HoodieCLI.getTableMetaClient();
Path metadataPath = new Path(getMetadataTableBasePath(HoodieCLI.basePath));
try {
HoodieCLI.fs.listStatus(metadataPath);
} catch (FileNotFoundException e) {
// Metadata directory does not exist
throw new RuntimeException("Metadata directory (" + metadataPath.toString() + ") does not exist.");
}

HoodieTimer timer = new HoodieTimer().startTimer();
if (!readOnly) {
HoodieWriteConfig writeConfig = getWriteConfig();
initJavaSparkContext();
SparkHoodieBackedTableMetadataWriter.create(HoodieCLI.conf, writeConfig, new HoodieSparkEngineContext(jsc));
}

String action = readOnly ? "Opened" : "Initialized";
return String.format(action + " Metadata Table in %s (duration=%.2fsec)", metadataPath, (timer.endTimer()) / 1000.0);
}

@CliCommand(value = "metadata stats", help = "Print stats about the metadata")
public String stats() throws IOException {
HoodieCLI.getTableMetaClient();
HoodieBackedTableMetadata metadata = new HoodieBackedTableMetadata(HoodieCLI.conf, HoodieCLI.basePath, "/tmp", true, false, false);
Map<String, String> stats = metadata.stats();

StringBuffer out = new StringBuffer("\n");
out.append(String.format("Base path: %s\n", getMetadataTableBasePath(HoodieCLI.basePath)));
for (Map.Entry<String, String> entry : stats.entrySet()) {
out.append(String.format("%s: %s\n", entry.getKey(), entry.getValue()));
}

return out.toString();
}

@CliCommand(value = "metadata list-partitions", help = "Print a list of all partitions from the metadata")
public String listPartitions() throws IOException {
HoodieCLI.getTableMetaClient();
HoodieBackedTableMetadata metadata = new HoodieBackedTableMetadata(HoodieCLI.conf, HoodieCLI.basePath, "/tmp", true, false, false);

StringBuffer out = new StringBuffer("\n");
if (!metadata.enabled()) {
out.append("=== Metadata Table not initilized. Using file listing to get list of partitions. ===\n\n");
}

long t1 = System.currentTimeMillis();
List<String> partitions = metadata.getAllPartitionPaths();
long t2 = System.currentTimeMillis();

int[] count = {0};
partitions.stream().sorted((p1, p2) -> p2.compareTo(p1)).forEach(p -> {
out.append(p);
if (++count[0] % 15 == 0) {
out.append("\n");
} else {
out.append(", ");
}
});

out.append(String.format("\n\n=== List of partitions retrieved in %.2fsec ===", (t2 - t1) / 1000.0));

return out.toString();
}

@CliCommand(value = "metadata list-files", help = "Print a list of all files in a partition from the metadata")
public String listFiles(
@CliOption(key = {"partition"}, help = "Name of the partition to list files", mandatory = true)
final String partition) throws IOException {
HoodieCLI.getTableMetaClient();
HoodieBackedTableMetadata metaReader = new HoodieBackedTableMetadata(HoodieCLI.conf, HoodieCLI.basePath, "/tmp", true, false, false);

StringBuffer out = new StringBuffer("\n");
if (!metaReader.enabled()) {
out.append("=== Metadata Table not initialized. Using file listing to get list of files in partition. ===\n\n");
}

long t1 = System.currentTimeMillis();
FileStatus[] statuses = metaReader.getAllFilesInPartition(new Path(HoodieCLI.basePath, partition));
long t2 = System.currentTimeMillis();

Arrays.stream(statuses).sorted((p1, p2) -> p2.getPath().getName().compareTo(p1.getPath().getName())).forEach(p -> {
out.append("\t" + p.getPath().getName());
out.append("\n");
});

out.append(String.format("\n=== Files in partition retrieved in %.2fsec ===", (t2 - t1) / 1000.0));

return out.toString();
}

private HoodieWriteConfig getWriteConfig() {
return HoodieWriteConfig.newBuilder().withPath(HoodieCLI.basePath)
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(true).build()).build();
}

private void initJavaSparkContext() {
if (jsc == null) {
jsc = SparkUtil.initJavaSparkConf("HoodieClI");
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ protected AbstractHoodieClient(HoodieEngineContext context, HoodieWriteConfig cl
this.timelineServer = timelineServer;
shouldStopTimelineServer = !timelineServer.isPresent();
startEmbeddedServerView();
initWrapperFSMetrics();
}

/**
Expand Down Expand Up @@ -118,6 +119,10 @@ public HoodieWriteConfig getConfig() {
return config;
}

protected void initWrapperFSMetrics() {
// no-op.
}

protected HoodieTableMetaClient createMetaClient(boolean loadActiveTimelineOnLoad) {
return new HoodieTableMetaClient(hadoopConf, config.getBasePath(), loadActiveTimelineOnLoad,
config.getConsistencyGuardConfig(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@ public AbstractHoodieWriteClient(HoodieEngineContext context, HoodieWriteConfig
this.metrics = new HoodieMetrics(config, config.getTableName());
this.rollbackPending = rollbackPending;
this.index = createIndex(writeConfig);
syncTableMetadata();
}

protected abstract HoodieIndex<T, I, K, O> createIndex(HoodieWriteConfig writeConfig);
Expand Down Expand Up @@ -220,6 +221,10 @@ void emitCommitMetrics(String instantTime, HoodieCommitMetadata metadata, String
}
}

protected void syncTableMetadata() {
// no-op
}

/**
* Filter out HoodieRecords that already exists in the output folder. This is useful in deduplication.
*
Expand Down Expand Up @@ -407,7 +412,9 @@ protected void postCommit(HoodieTable<T, I, K, O> table, HoodieCommitMetadata me
// We cannot have unbounded commit files. Archive commits if we have to archive
HoodieTimelineArchiveLog archiveLog = new HoodieTimelineArchiveLog(config, table);
archiveLog.archiveIfRequired(context);
autoCleanOnCommit(instantTime);
autoCleanOnCommit();

syncTableMetadata();
} catch (IOException ioe) {
throw new HoodieIOException(ioe.getMessage(), ioe);
}
Expand All @@ -434,18 +441,18 @@ protected void runAnyPendingClustering(HoodieTable<T, I, K, O> table) {
/**
* Handle auto clean during commit.
*
* @param instantTime
*/
protected void autoCleanOnCommit(String instantTime) {
protected void autoCleanOnCommit() {
if (config.isAutoClean()) {
// Call clean to cleanup if there is anything to cleanup after the commit,
if (config.isAsyncClean()) {
LOG.info("Cleaner has been spawned already. Waiting for it to finish");
AsyncCleanerService.waitForCompletion(asyncCleanerService);
LOG.info("Cleaner has finished");
} else {
// Do not reuse instantTime for clean as metadata table requires all changes to have unique instant timestamps.
LOG.info("Auto cleaning is enabled. Running cleaner now");
clean(instantTime);
clean();
}
}
}
Expand Down Expand Up @@ -599,8 +606,14 @@ public HoodieCleanMetadata clean() {
* Provides a new commit time for a write operation (insert/update/delete).
*/
public String startCommit() {
// NOTE : Need to ensure that rollback is done before a new commit is started
if (rollbackPending) {
// Only rollback pending commit/delta-commits. Do not touch compaction commits
rollbackPendingCommits();
}
String instantTime = HoodieActiveTimeline.createNewInstantTime();
startCommitWithTime(instantTime);
HoodieTableMetaClient metaClient = createMetaClient(true);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

after https://github.com/apache/hudi/pull/2136/files merged, we will reuse the metaclient in AbstractHoodieWriteClient.java

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ack.

startCommit(instantTime, metaClient.getCommitActionType(), metaClient);
return instantTime;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
package org.apache.hudi.client;

import org.apache.hudi.async.HoodieAsyncService;
import org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
import org.apache.hudi.common.util.collection.Pair;
import org.apache.hudi.exception.HoodieException;
import org.apache.log4j.LogManager;
Expand Down Expand Up @@ -52,11 +53,11 @@ protected Pair<CompletableFuture, ExecutorService> startService() {
}), executor);
}

public static AsyncCleanerService startAsyncCleaningIfEnabled(AbstractHoodieWriteClient writeClient,
String instantTime) {
public static AsyncCleanerService startAsyncCleaningIfEnabled(AbstractHoodieWriteClient writeClient) {
AsyncCleanerService asyncCleanerService = null;
if (writeClient.getConfig().isAutoClean() && writeClient.getConfig().isAsyncClean()) {
LOG.info("Auto cleaning is enabled. Running cleaner async to write operation");
String instantTime = HoodieActiveTimeline.createNewInstantTime();
LOG.info("Auto cleaning is enabled. Running cleaner async to write operation at instant time " + instantTime);
asyncCleanerService = new AsyncCleanerService(writeClient, instantTime);
asyncCleanerService.start(null);
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,9 @@ public class HoodieMetricsConfig extends DefaultHoodieConfig {
public static final String METRICS_REPORTER_CLASS = METRIC_PREFIX + ".reporter.class";
public static final String DEFAULT_METRICS_REPORTER_CLASS = "";

// Enable metrics collection from executors
public static final String ENABLE_EXECUTOR_METRICS = METRIC_PREFIX + ".executor.enable";

private HoodieMetricsConfig(Properties props) {
super(props);
}
Expand Down Expand Up @@ -126,6 +129,11 @@ public Builder withReporterClass(String className) {
return this;
}

public Builder withExecutorMetrics(boolean enable) {
props.setProperty(ENABLE_EXECUTOR_METRICS, String.valueOf(enable));
return this;
}

public HoodieMetricsConfig build() {
HoodieMetricsConfig config = new HoodieMetricsConfig(props);
setDefaultOnCondition(props, !props.containsKey(METRICS_ON), METRICS_ON, String.valueOf(DEFAULT_METRICS_ON));
Expand Down
Loading