Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
119 changes: 119 additions & 0 deletions bolt/connectors/hive/HiveConnectorSplit.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ inline const std::string KPaimonDeletionFilePath = "paimon.deletion.file.path";
inline const std::string KPaimonDeletionBinOffset =
"paimon.deletion.bin.offset";
inline const std::string KPaimonDeletionBinSize = "paimon.deletion.bin.size";
constexpr std::string_view kDefaultHiveConnectorId = "hive-default";

struct HiveConnectorSplitCacheLimit : public ISerializable {
// record table is or not cache, and table partition range limit;
Expand Down Expand Up @@ -205,4 +206,122 @@ struct HiveConnectorSplit : public connector::ConnectorSplit {
static void registerSerDe();
};

/// Builder for HiveConnectorSplit. Allows constructing splits incrementally
/// without specifying every constructor argument.
class HiveConnectorSplitBuilder {
public:
explicit HiveConnectorSplitBuilder(std::string filePath)
: filePath_{std::move(filePath)} {}

HiveConnectorSplitBuilder& connectorId(const std::string& connectorId) {
connectorId_ = connectorId;
return *this;
}

HiveConnectorSplitBuilder& start(uint64_t start) {
start_ = start;
return *this;
}

HiveConnectorSplitBuilder& length(uint64_t length) {
length_ = length;
return *this;
}

HiveConnectorSplitBuilder& fileFormat(dwio::common::FileFormat format) {
fileFormat_ = format;
return *this;
}

HiveConnectorSplitBuilder& partitionKey(
std::string name,
std::optional<std::string> value) {
partitionKeys_.emplace(std::move(name), std::move(value));
return *this;
}

HiveConnectorSplitBuilder& tableBucketNumber(int32_t bucket) {
tableBucketNumber_ = bucket;
return *this;
}

HiveConnectorSplitBuilder& cacheLimit(HiveConnectorSplitCacheLimit cl) {
cacheLimit_ = std::make_shared<HiveConnectorSplitCacheLimit>(std::move(cl));
return *this;
}

HiveConnectorSplitBuilder& customSplitInfo(
std::unordered_map<std::string, std::string> info) {
customSplitInfo_ = std::move(info);
return *this;
}

HiveConnectorSplitBuilder& extraFileInfo(
std::shared_ptr<std::string> extraFileInfo) {
extraFileInfo_ = std::move(extraFileInfo);
return *this;
}

HiveConnectorSplitBuilder& serdeParameters(
std::unordered_map<std::string, std::string> params) {
serdeParameters_ = std::move(params);
return *this;
}

HiveConnectorSplitBuilder& fileSize(uint64_t size) {
fileSize_ = size;
return *this;
}

HiveConnectorSplitBuilder& rowIdProperties(RowIdProperties props) {
rowIdProperties_ = std::move(props);
return *this;
}

HiveConnectorSplitBuilder& infoColumn(
const std::string& name,
const std::string& value) {
infoColumns_.emplace(name, value);
return *this;
}

std::shared_ptr<HiveConnectorSplit> build() const {
const auto effectivePath =
filePath_.find('/') == 0 ? "file:" + filePath_ : filePath_;
return std::make_shared<HiveConnectorSplit>(
connectorId_,
effectivePath,
fileFormat_,
start_,
length_,
partitionKeys_,
tableBucketNumber_,
cacheLimit_
? std::make_unique<HiveConnectorSplitCacheLimit>(*cacheLimit_)
: nullptr,
customSplitInfo_,
extraFileInfo_,
serdeParameters_,
fileSize_,
rowIdProperties_,
infoColumns_);
}

private:
std::string filePath_;
std::string connectorId_{kDefaultHiveConnectorId};
dwio::common::FileFormat fileFormat_{dwio::common::FileFormat::PARQUET};
uint64_t start_{0};
uint64_t length_{std::numeric_limits<uint64_t>::max()};
std::unordered_map<std::string, std::optional<std::string>> partitionKeys_;
std::optional<int32_t> tableBucketNumber_;
std::shared_ptr<HiveConnectorSplitCacheLimit> cacheLimit_;
std::unordered_map<std::string, std::string> customSplitInfo_;
std::shared_ptr<std::string> extraFileInfo_;
std::unordered_map<std::string, std::string> serdeParameters_;
uint64_t fileSize_{0};
std::optional<RowIdProperties> rowIdProperties_;
std::unordered_map<std::string, std::string> infoColumns_;
};

} // namespace bytedance::bolt::connector::hive
69 changes: 28 additions & 41 deletions bolt/connectors/hive/HiveObjectFactory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,93 +39,80 @@ std::shared_ptr<ConnectorSplit> HiveObjectFactory::makeConnectorSplit(
auto* dyn = dynamic_cast<const DynamicConnectorOptions*>(&optsBase);
BOLT_CHECK(dyn != nullptr, "Expected DynamicConnectorOptions");
const auto& options = dyn->options;
dwio::common::FileFormat fileFormat =
HiveConnectorSplitBuilder builder(filePath);
builder.connectorId(connectorId()).start(start).length(length);

builder.fileFormat(
(!options.isNull() && options.count("fileFormat"))
? static_cast<dwio::common::FileFormat>(options["fileFormat"].asInt())
: defaultFileFormat_;
? static_cast<dwio::common::FileFormat>(options["fileFormat"].asInt())
: defaultFileFormat_);

std::unordered_map<std::string, std::optional<std::string>> partitionKeys;
if (!options.isNull() && options.count("partitionKeys")) {
for (auto& kv : options["partitionKeys"].items()) {
partitionKeys[kv.first.asString()] = kv.second.isNull()
? std::nullopt
: std::optional<std::string>(kv.second.asString());
builder.partitionKey(
kv.first.asString(),
kv.second.isNull()
? std::nullopt
: std::optional<std::string>(kv.second.asString()));
}
}

std::optional<int32_t> tableBucketNumber;
if (!options.isNull() && options.count("tableBucketNumber")) {
tableBucketNumber = options["tableBucketNumber"].asInt();
builder.tableBucketNumber(options["tableBucketNumber"].asInt());
}

std::unordered_map<std::string, std::string> customSplitInfo;
if (!options.isNull() && options.count("customSplitInfo")) {
std::unordered_map<std::string, std::string> info;
for (auto& kv : options["customSplitInfo"].items()) {
customSplitInfo[kv.first.asString()] = kv.second.asString();
info[kv.first.asString()] = kv.second.asString();
}
builder.customSplitInfo(std::move(info));
}

std::shared_ptr<std::string> extraFileInfo;
if (!options.isNull() && options.count("extraFileInfo")) {
extraFileInfo = options["extraFileInfo"].isNull()
? std::shared_ptr<std::string>()
: std::make_shared<std::string>(options["extraFileInfo"].asString());
if (!options["extraFileInfo"].isNull()) {
builder.extraFileInfo(
std::make_shared<std::string>(options["extraFileInfo"].asString()));
}
}

std::unordered_map<std::string, std::string> serdeParameters;
if (!options.isNull() && options.count("serdeParameters")) {
std::unordered_map<std::string, std::string> params;
for (auto& kv : options["serdeParameters"].items()) {
serdeParameters[kv.first.asString()] = kv.second.asString();
params[kv.first.asString()] = kv.second.asString();
}
builder.serdeParameters(std::move(params));
}

std::unique_ptr<HiveConnectorSplitCacheLimit> hiveConnectorSplitCacheLimit;
if (!options.isNull() && options.count("hiveConnectorSplitCacheLimit")) {
hiveConnectorSplitCacheLimit = HiveConnectorSplitCacheLimit::create(
options["hiveConnectorSplitCacheLimit"]);
builder.cacheLimit(*HiveConnectorSplitCacheLimit::create(
options["hiveConnectorSplitCacheLimit"]));
}

uint64_t fileSize = 0;
if (!options.isNull() && options.count("fileProperties")) {
const auto& propertiesOption = options["fileProperties"];
if (propertiesOption.count("fileSize") &&
!propertiesOption["fileSize"].isNull()) {
fileSize = propertiesOption["fileSize"].asInt();
builder.fileSize(propertiesOption["fileSize"].asInt());
}
}

std::optional<RowIdProperties> rowIdProperties;
if (!options.isNull() && options.count("rowIdProperties")) {
RowIdProperties props;
const auto& rowIdPropertiesOption = options["rowIdProperties"];
props.metadataVersion = rowIdPropertiesOption["metadataVersion"].asInt();
props.partitionId = rowIdPropertiesOption["partitionId"].asInt();
props.tableGuid = rowIdPropertiesOption["tableGuid"].asString();
rowIdProperties = props;
builder.rowIdProperties(std::move(props));
}

std::unordered_map<std::string, std::string> infoColumns;
if (!options.isNull() && options.count("infoColumns")) {
for (auto& kv : options["infoColumns"].items()) {
infoColumns[kv.first.asString()] = kv.second.asString();
builder.infoColumn(kv.first.asString(), kv.second.asString());
}
}

return std::make_shared<HiveConnectorSplit>(
connectorId(),
filePath,
fileFormat,
start,
length,
partitionKeys,
tableBucketNumber,
std::move(hiveConnectorSplitCacheLimit),
customSplitInfo,
extraFileInfo,
serdeParameters,
fileSize,
rowIdProperties,
infoColumns);
return builder.build();
}

std::shared_ptr<connector::ColumnHandle> HiveObjectFactory::makeColumnHandle(
Expand Down
3 changes: 3 additions & 0 deletions bolt/exec/tests/AssertQueryBuilderTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
#include "bolt/exec/tests/utils/PlanBuilder.h"
#include "bolt/vector/fuzzer/VectorFuzzer.h"
namespace bytedance::bolt::exec::test {
using connector::hive::HiveConnectorSplitBuilder;

class AssertQueryBuilderTest : public HiveConnectorTestBase {};

Expand Down Expand Up @@ -121,6 +122,8 @@ TEST_F(AssertQueryBuilderTest, hiveSplits) {
.planNode(),
duckDbQueryRunner_)
.split(HiveConnectorSplitBuilder(file->path)
.connectorId(kHiveConnectorId)
.fileFormat(dwio::common::FileFormat::DWRF)
.partitionKey("ds", "2022-05-10")
.build())
.assertResults(
Expand Down
14 changes: 10 additions & 4 deletions bolt/exec/tests/HashJoinTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ using namespace bytedance::bolt::exec;
using namespace bytedance::bolt::exec::test;
using namespace bytedance::bolt::common::testutil;

using bytedance::bolt::connector::hive::HiveConnectorSplitBuilder;
using bytedance::bolt::test::BatchMaker;

namespace {
Expand Down Expand Up @@ -5366,6 +5367,8 @@ TEST_F(HashJoinTest, dynamicFiltersWithSkippedSplits) {
// We add splits that have no rows.
auto makeEmpty = [&]() {
return exec::Split(HiveConnectorSplitBuilder(tempFiles.back()->path)
.connectorId(kHiveConnectorId)
.fileFormat(dwio::common::FileFormat::DWRF)
.start(10000000)
.length(1)
.build());
Expand Down Expand Up @@ -5570,6 +5573,8 @@ TEST_F(HashJoinTest, dynamicFiltersAppliedToPreloadedSplits) {
tempFiles.push_back(TempFilePath::create());
writeToFile(tempFiles.back()->path, rowVector);
auto split = HiveConnectorSplitBuilder(tempFiles.back()->path)
.connectorId(kHiveConnectorId)
.fileFormat(dwio::common::FileFormat::DWRF)
.partitionKey("p1", std::to_string(i))
.build();
probeSplits.push_back(exec::Split(split));
Expand Down Expand Up @@ -6179,10 +6184,11 @@ TEST_F(HashJoinTest, dynamicFilterOnPartitionKey) {
std::vector<RowVectorPtr> buildVectors{
makeRowVector({"c0"}, {makeFlatVector<int64_t>({0, 1, 2})})};
createDuckDbTable("t", buildVectors);
auto split =
bytedance::bolt::exec::test::HiveConnectorSplitBuilder(filePaths[0]->path)
.partitionKey("k", "0")
.build();
auto split = HiveConnectorSplitBuilder(filePaths[0]->path)
.connectorId(kHiveConnectorId)
.fileFormat(dwio::common::FileFormat::DWRF)
.partitionKey("k", "0")
.build();
auto outputType = ROW({"n1_0", "n1_1"}, {BIGINT(), BIGINT()});
ColumnHandleMap assignments = {
{"n1_0", regularColumn("c0", BIGINT())},
Expand Down
Loading