Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

FIFO Compaction with TTL #2480

Closed
wants to merge 11 commits into from
1 change: 1 addition & 0 deletions HISTORY.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
### New Features
* Measure estimated number of reads per file. The information can be accessed through DB::GetColumnFamilyMetaData or "rocksdb.sstables" DB property.
* RateLimiter support for throttling background reads, or throttling the sum of background reads and writes. This can give more predictable I/O usage when compaction reads more data than it writes, e.g., due to lots of deletions.
* [Experimental] FIFO compaction with TTL support. It can be enabled by setting CompactionOptionsFIFO.ttl > 0.

## 5.6.0 (06/06/2017)
### Public API Change
Expand Down
9 changes: 5 additions & 4 deletions db/compaction.cc
Original file line number Diff line number Diff line change
Expand Up @@ -465,10 +465,11 @@ uint64_t Compaction::MaxInputFileCreationTime() const {
uint64_t max_creation_time = 0;
if (cfd_->ioptions()->compaction_style == kCompactionStyleFIFO) {
sagar0 marked this conversation as resolved.
Show resolved Hide resolved
for (const auto& file : inputs_[0].files) {
uint64_t creation_time =
file->fd.table_reader->GetTableProperties()->creation_time;
max_creation_time =
creation_time > max_creation_time ? creation_time : max_creation_time;
if (file->fd.table_reader != nullptr) {
uint64_t creation_time =
file->fd.table_reader->GetTableProperties()->creation_time;
max_creation_time = std::max(max_creation_time, creation_time);
}
}
}
return max_creation_time;
Expand Down
25 changes: 13 additions & 12 deletions db/compaction_picker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1440,18 +1440,19 @@ Compaction* FIFOCompactionPicker::PickTTLCompaction(

for (auto ritr = level_files.rbegin(); ritr != level_files.rend(); ++ritr) {
auto f = *ritr;
auto props = f->fd.table_reader->GetTableProperties();
auto creation_time = props->creation_time;
if (creation_time == 0) {
continue;
} else if (creation_time <
(current_time - ioptions_.compaction_options_fifo.ttl)) {
total_size -= f->compensated_file_size;
inputs[0].files.push_back(f);
ROCKS_LOG_BUFFER(log_buffer,
"[%s] FIFO compaction: picking file %" PRIu64
" with creation time %" PRIu64 " for deletion",
cf_name.c_str(), f->fd.GetNumber(), creation_time);
if (f->fd.table_reader != nullptr) {
auto creation_time =
f->fd.table_reader->GetTableProperties()->creation_time;
if (creation_time > 0 &&
creation_time <
(current_time - ioptions_.compaction_options_fifo.ttl)) {
total_size -= f->compensated_file_size;
sagar0 marked this conversation as resolved.
Show resolved Hide resolved
inputs[0].files.push_back(f);
ROCKS_LOG_BUFFER(log_buffer,
"[%s] FIFO compaction: picking file %" PRIu64
" with creation time %" PRIu64 " for deletion",
cf_name.c_str(), f->fd.GetNumber(), creation_time);
}
}
}

Expand Down
11 changes: 6 additions & 5 deletions db/db_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2849,8 +2849,8 @@ TEST_F(DBTest, FIFOCompactionTestWithTTL) {
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
}

// Test to make sure that all files with expired ttl are deleted on compaction
// that is triggerred by size going beyond max_table_files_size threshold.
// Test to make sure that all files with expired ttl are deleted on next
// automatic compaction.
{
options.compaction_options_fifo.max_table_files_size = 150 << 10; // 150KB
options.compaction_options_fifo.allow_compaction = false;
Expand All @@ -2873,16 +2873,17 @@ TEST_F(DBTest, FIFOCompactionTestWithTTL) {
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(NumTableFilesAtLevel(0), 10);

// Create 10 more files. The old 10 files are dropped.
for (int i = 0; i < 10; i++) {
// Create 1 more file to trigger TTL compaction. The old files are dropped.
for (int i = 0; i < 1; i++) {
for (int j = 0; j < 10; j++) {
ASSERT_OK(Put(ToString(i * 20 + j), RandomString(&rnd, 980)));
}
Flush();
}

ASSERT_OK(dbfull()->TEST_WaitForCompact());
// Only the new 10 files remain.
ASSERT_EQ(NumTableFilesAtLevel(0), 10);
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
ASSERT_LE(SizeAtLevel(0),
options.compaction_options_fifo.max_table_files_size);
}
Expand Down
30 changes: 30 additions & 0 deletions db/version_set.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1318,6 +1318,31 @@ void VersionStorageInfo::EstimateCompactionBytesNeeded(
}
}

namespace {
uint32_t GetExpiredTtlFilesCount(const ImmutableCFOptions& ioptions,
const std::vector<FileMetaData*>& files) {
uint32_t ttl_expired_files_count = 0;

int64_t _current_time;
auto status = ioptions.env->GetCurrentTime(&_current_time);
if (status.ok()) {
const uint64_t current_time = static_cast<uint64_t>(_current_time);
for (auto f : files) {
if (!f->being_compacted && f->fd.table_reader != nullptr) {
auto creation_time =
f->fd.table_reader->GetTableProperties()->creation_time;
if (creation_time > 0 &&
creation_time <
(current_time - ioptions.compaction_options_fifo.ttl)) {
ttl_expired_files_count++;
}
}
}
}
return ttl_expired_files_count;
}
} // anonymous namespace

void VersionStorageInfo::ComputeCompactionScore(
const ImmutableCFOptions& immutable_cf_options,
const MutableCFOptions& mutable_cf_options) {
Expand Down Expand Up @@ -1364,6 +1389,11 @@ void VersionStorageInfo::ComputeCompactionScore(
mutable_cf_options.level0_file_num_compaction_trigger,
score);
}
if (immutable_cf_options.compaction_options_fifo.ttl > 0) {
score = std::max(static_cast<double>(GetExpiredTtlFilesCount(
immutable_cf_options, files_[level])),
score);
}

} else {
score = static_cast<double>(num_sorted_runs) /
Expand Down