Skip to content

Commit

Permalink
WIP: Reducing cache choerency traffic under contention for spinlock
Browse files Browse the repository at this point in the history
Use the code from [1] to implement the spinlock based on
std::atomic<bool> instead of std::atomic_flag. While the former
is not necessarily lock-free, it is on the majority of platforms.
A static assert is added to catch this on platforms that don't
have this - we could potentially use the older implementation on
those instead then.

WIP because I don't have a good scientific benchmark for this yet.
I tested it in our realworld application, and it seems to have
slightly reduced the load of the spinlock, but not in a really large
way...

See also: efficient#146

[1]: https://rigtorp.se/spinlock/
  • Loading branch information
milianw committed Jun 28, 2022
1 parent 784d0f5 commit f9673ad
Showing 1 changed file with 20 additions and 9 deletions.
29 changes: 20 additions & 9 deletions libcuckoo/cuckoohash_map.hh
Original file line number Diff line number Diff line change
Expand Up @@ -803,13 +803,11 @@ private:
LIBCUCKOO_SQUELCH_PADDING_WARNING
class LIBCUCKOO_ALIGNAS(64) spinlock {
public:
spinlock() : elem_counter_(0), is_migrated_(true) { lock_.clear(); }
spinlock() : elem_counter_(0), is_migrated_(true) {}

spinlock(const spinlock &other) noexcept
: elem_counter_(other.elem_counter()),
is_migrated_(other.is_migrated()) {
lock_.clear();
}
is_migrated_(other.is_migrated()) {}

spinlock &operator=(const spinlock &other) noexcept {
elem_counter() = other.elem_counter();
Expand All @@ -818,14 +816,27 @@ private:
}

void lock() noexcept {
while (lock_.test_and_set(std::memory_order_acq_rel))
;
for (;;) {
// Optimistically assume the lock is free on the first try
if (!lock_.exchange(true, std::memory_order_acquire)) {
return;
}
// Wait for lock to be released without generating cache misses
while (lock_.load(std::memory_order_relaxed)) {
// Issue X86 PAUSE or ARM YIELD instruction to reduce contention
// between hyper-threads
__builtin_ia32_pause();
}
}
}

void unlock() noexcept { lock_.clear(std::memory_order_release); }
void unlock() noexcept { lock_.store(false, std::memory_order_release); }

bool try_lock() noexcept {
return !lock_.test_and_set(std::memory_order_acq_rel);
// First do a relaxed load to check if lock is free in order to prevent
// unnecessary cache misses if someone does while(!try_lock())
return !lock_.load(std::memory_order_relaxed) &&
!lock_.exchange(true, std::memory_order_acquire);
}

counter_type &elem_counter() noexcept { return elem_counter_; }
Expand All @@ -835,7 +846,7 @@ private:
bool is_migrated() const noexcept { return is_migrated_; }

private:
std::atomic_flag lock_;
std::atomic<bool> lock_ = {0};
counter_type elem_counter_;
bool is_migrated_;
};
Expand Down

0 comments on commit f9673ad

Please sign in to comment.