Skip to content

Commit

Permalink
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Move memory layout and common methods of AlignedHeapSegment to Aligne…
Browse files Browse the repository at this point in the history
…dHeapSegmentBase (#1510)

Summary:

The large heap segment type should have the same storage layout as
current AlignedHeapSegment, and share a few common methods. Abstract
these to a base class, and make both AlignedHeapSegment and
JumboHeapSegment inherit from the base type.

Differential Revision: D61675022
lavenzg authored and facebook-github-bot committed Oct 21, 2024

Verified

This commit was signed with the committer’s verified signature.
booc0mtaco Holloway
1 parent b74bd2c commit c4cc598
Showing 3 changed files with 228 additions and 213 deletions.
372 changes: 190 additions & 182 deletions include/hermes/VM/AlignedHeapSegment.h
Original file line number Diff line number Diff line change
@@ -36,9 +36,9 @@ class StorageProvider;
// TODO (T25527350): Debug Dump
// TODO (T25527350): Heap Moving

/// An \c AlignedHeapSegment is a contiguous chunk of memory aligned to its own
/// storage size (which is a fixed power of two number of bytes). The storage
/// is further split up according to the diagram below:
/// An \c AlignedHeapSegmentBase manages a contiguous chunk of memory aligned to
/// kSegmentUnitSize. The storage is further split up according to the diagram
/// below:
///
/// +----------------------------------------+
/// | (1) Card Table |
@@ -52,83 +52,23 @@ class StorageProvider;
/// | (End) |
/// +----------------------------------------+
///
/// The tables in (1), and (2) cover the contiguous allocation space (3)
/// into which GCCells are bump allocated.
class AlignedHeapSegment {
/// The tables in (1), and (2) cover the contiguous allocation space (3) into
/// which GCCells are bump allocated. They have fixed size computed from
/// kSegmentUnitSize. For segments with larger size (which must be multiples of
/// kSegmentUnitSize), card table allocates its internal arrays separately
/// instead. Any segment size smaller than kSegmentUnitSize is not supported.
class AlignedHeapSegmentBase {
public:
/// @name Constants and utility functions for the aligned storage of \c
/// AlignedHeapSegment.
///
/// @{
/// The size and the alignment of the storage, in bytes.
static constexpr unsigned kLogSize = HERMESVM_LOG_HEAP_SEGMENT_SIZE;
static constexpr size_t kSize{1 << kLogSize};
/// Mask for isolating the offset into a storage for a pointer.
static constexpr size_t kLowMask{kSize - 1};
/// Mask for isolating the storage being pointed into by a pointer.
static constexpr size_t kHighMask{~kLowMask};

/// Returns the storage size, in bytes, of an \c AlignedHeapSegment.
static constexpr size_t storageSize() {
return kSize;
}

/// Returns the pointer to the beginning of the storage containing \p ptr
/// (inclusive). Assuming such a storage exists. Note that
///
/// storageStart(seg.hiLim()) != seg.lowLim()
///
/// as \c seg.hiLim() is not contained in the bounds of \c seg -- it
/// is the first address not in the bounds.
static void *storageStart(const void *ptr) {
return reinterpret_cast<char *>(
reinterpret_cast<uintptr_t>(ptr) & kHighMask);
}

/// Returns the pointer to the end of the storage containing \p ptr
/// (exclusive). Assuming such a storage exists. Note that
///
/// storageEnd(seg.hiLim()) != seg.hiLim()
///
/// as \c seg.hiLim() is not contained in the bounds of \c seg -- it
/// is the first address not in the bounds.
static void *storageEnd(const void *ptr) {
return reinterpret_cast<char *>(storageStart(ptr)) + kSize;
}

/// Returns the offset in bytes to \p ptr from the start of its containing
/// storage. Assuming such a storage exists. Note that
///
/// offset(seg.hiLim()) != seg.size()
///
/// as \c seg.hiLim() is not contained in the bounds of \c seg -- it
/// is the first address not in the bounds.
static size_t offset(const char *ptr) {
return reinterpret_cast<size_t>(ptr) & kLowMask;
}
/// @}

/// Construct a null AlignedHeapSegment (one that does not own memory).
AlignedHeapSegment() = default;
/// \c AlignedHeapSegment is movable and assignable, but not copyable.
AlignedHeapSegment(AlignedHeapSegment &&);
AlignedHeapSegment &operator=(AlignedHeapSegment &&);
AlignedHeapSegment(const AlignedHeapSegment &) = delete;

~AlignedHeapSegment();

/// Create a AlignedHeapSegment by allocating memory with \p provider.
static llvh::ErrorOr<AlignedHeapSegment> create(StorageProvider *provider);
static llvh::ErrorOr<AlignedHeapSegment> create(
StorageProvider *provider,
const char *name);
static constexpr size_t kLogSize = HERMESVM_LOG_HEAP_SEGMENT_SIZE;
static constexpr size_t kSegmentUnitSize = (1 << kLogSize);

/// Contents of the memory region managed by this segment.
class Contents {
public:
/// The number of bits representing the total number of heap-aligned
/// addresses in the segment storage.
static constexpr size_t kMarkBitArraySize = kSize >> LogHeapAlign;
static constexpr size_t kMarkBitArraySize =
kSegmentUnitSize >> LogHeapAlign;
/// BitArray for marking allocation region of a segment.
using MarkBitArray = BitArray<kMarkBitArraySize>;

@@ -138,6 +78,7 @@ class AlignedHeapSegment {

private:
friend class AlignedHeapSegment;
friend class AlignedHeapSegmentBase;

/// Note that because of the Contents object, the first few bytes of the
/// card table are unused, we instead use them to store a small
@@ -179,10 +120,11 @@ class AlignedHeapSegment {
"SHSegmentInfo does not fit in available unused CardTable space.");

/// The offset from the beginning of a segment of the allocatable region.
static constexpr size_t offsetOfAllocRegion{offsetof(Contents, allocRegion_)};
static constexpr size_t kOffsetOfAllocRegion{
offsetof(Contents, allocRegion_)};

static_assert(
isSizeHeapAligned(offsetOfAllocRegion),
isSizeHeapAligned(kOffsetOfAllocRegion),
"Allocation region must start at a heap aligned offset");

static_assert(
@@ -215,6 +157,178 @@ class AlignedHeapSegment {
GCCell *cell_{nullptr};
};

/// Returns the address that is the lower bound of the segment.
/// \post The returned pointer is guaranteed to be aligned to
/// kSegmentUnitSize.
char *lowLim() const {
return lowLim_;
}

/// Returns the address at which the first allocation in this segment would
/// occur.
/// Disable UB sanitization because 'this' may be null during the tests.
char *start() const LLVM_NO_SANITIZE("undefined") {
return contents()->allocRegion_;
}

/// Return a reference to the card table covering the memory region managed by
/// this segment.
CardTable &cardTable() const {
return contents()->cardTable_;
}

/// Return a reference to the mark bit array covering the memory region
/// managed by this segment.
Contents::MarkBitArray &markBitArray() const {
return contents()->markBitArray_;
}

/// Mark the given \p cell. Assumes the given address is a valid heap object.
static void setCellMarkBit(const GCCell *cell) {
auto *markBits = markBitArrayCovering(cell);
size_t ind = addressToMarkBitArrayIndex(cell);
markBits->set(ind, true);
}

/// Return whether the given \p cell is marked. Assumes the given address is
/// a valid heap object.
static bool getCellMarkBit(const GCCell *cell) {
auto *markBits = markBitArrayCovering(cell);
size_t ind = addressToMarkBitArrayIndex(cell);
return markBits->at(ind);
}

protected:
AlignedHeapSegmentBase() = default;

/// Construct Contents() at the address of \p lowLim.
AlignedHeapSegmentBase(void *lowLim)
: lowLim_(reinterpret_cast<char *>(lowLim)) {
new (contents()) Contents();
contents()->protectGuardPage(oscompat::ProtectMode::None);
}

/// Return a pointer to the contents of the memory region managed by this
/// segment.
Contents *contents() const {
return reinterpret_cast<Contents *>(lowLim_);
}

/// Given the \p lowLim of some valid segment's memory region, returns a
/// pointer to the Contents laid out in the storage, assuming it exists.
static Contents *contents(void *lowLim) {
return reinterpret_cast<Contents *>(lowLim);
}

/// The start of the aligned segment.
char *lowLim_{nullptr};

private:
/// Return the starting address for aligned region of size kSegmentUnitSize
/// that \p cell resides in. If \c cell resides in a JumboSegment, it's the
/// only cell there, this essentially returns its segment starting address.
static char *alignedStorageStart(const GCCell *cell) {
return reinterpret_cast<char *>(
reinterpret_cast<uintptr_t>(cell) & ~(kSegmentUnitSize - 1));
}

/// Given a \p cell, returns a pointer to the MarkBitArray covering the
/// segment that \p cell resides in.
///
/// \pre There exists a currently alive heap that claims to contain \c ptr.
static Contents::MarkBitArray *markBitArrayCovering(const GCCell *cell) {
auto *segStart = alignedStorageStart(cell);
return &contents(segStart)->markBitArray_;
}

/// Translate the given address to a 0-based index in the MarkBitArray of its
/// segment. The base address is the start of the storage of this segment. For
/// JumboSegment, this should always return a constant index
/// kOffsetOfAllocRegion >> LogHeapAlign.
static size_t addressToMarkBitArrayIndex(const GCCell *cell) {
auto *cp = reinterpret_cast<const char *>(cell);
auto *base = reinterpret_cast<const char *>(alignedStorageStart(cell));
return (cp - base) >> LogHeapAlign;
}
};

/// JumboHeapSegment has custom storage size that must be a multiple of
/// kSegmentUnitSize. Each such segment can only allocate a single object that
/// occupies the entire allocation space. Therefore, the inline MarkBitArray is
/// large enough, while the CardTable is stored separately.
class JumboHeapSegment : public AlignedHeapSegmentBase {};

/// AlignedHeapSegment has fixed storage size kSegmentUnitSize. Its CardTable
/// and MarkBitArray are stored inline right before the allocation space. This
/// is used for all normal object allcations in YoungGen and OldGen.
class AlignedHeapSegment : public AlignedHeapSegmentBase {
public:
/// @name Constants and utility functions for the aligned storage of \c
/// AlignedHeapSegment.
///
/// @{
/// The size and the alignment of the storage, in bytes.
static constexpr size_t kSize = kSegmentUnitSize;
/// Mask for isolating the offset into a storage for a pointer.
static constexpr size_t kLowMask{kSize - 1};
/// Mask for isolating the storage being pointed into by a pointer.
static constexpr size_t kHighMask{~kLowMask};

/// Returns the storage size, in bytes, of an \c AlignedHeapSegment.
static constexpr size_t storageSize() {
return kSize;
}

/// Returns the pointer to the beginning of the storage containing \p ptr
/// (inclusive). Assuming such a storage exists. Note that
///
/// storageStart(seg.hiLim()) != seg.lowLim()
///
/// as \c seg.hiLim() is not contained in the bounds of \c seg -- it
/// is the first address not in the bounds.
static void *storageStart(const void *ptr) {
return reinterpret_cast<char *>(
reinterpret_cast<uintptr_t>(ptr) & kHighMask);
}

/// Returns the pointer to the end of the storage containing \p ptr
/// (exclusive). Assuming such a storage exists. Note that
///
/// storageEnd(seg.hiLim()) != seg.hiLim()
///
/// as \c seg.hiLim() is not contained in the bounds of \c seg -- it
/// is the first address not in the bounds.
static void *storageEnd(const void *ptr) {
return reinterpret_cast<char *>(storageStart(ptr)) + kSize;
}

/// Returns the offset in bytes to \p ptr from the start of its containing
/// storage. Assuming such a storage exists. Note that
///
/// offset(seg.hiLim()) != seg.size()
///
/// as \c seg.hiLim() is not contained in the bounds of \c seg -- it
/// is the first address not in the bounds.
static size_t offset(const char *ptr) {
return reinterpret_cast<size_t>(ptr) & kLowMask;
}
/// @}

/// Construct a null AlignedHeapSegment (one that does not own memory).
AlignedHeapSegment() = default;
/// \c AlignedHeapSegment is movable and assignable, but not copyable.
AlignedHeapSegment(AlignedHeapSegment &&);
AlignedHeapSegment &operator=(AlignedHeapSegment &&);
AlignedHeapSegment(const AlignedHeapSegment &) = delete;

~AlignedHeapSegment();

/// Create a AlignedHeapSegment by allocating memory with \p provider.
static llvh::ErrorOr<AlignedHeapSegment> create(StorageProvider *provider);
static llvh::ErrorOr<AlignedHeapSegment> create(
StorageProvider *provider,
const char *name);

/// Returns the index of the segment containing \p lowLim, which is required
/// to be the start of its containing segment. (This can allow extra
/// efficiency, in cases where the segment start has already been computed.)
@@ -238,40 +352,12 @@ class AlignedHeapSegment {
/// space, returns {nullptr, false}.
inline AllocResult alloc(uint32_t size);

/// Given the \p lowLim of some valid segment's memory region, returns a
/// pointer to the AlignedHeapSegment::Contents laid out in that storage,
/// assuming it exists.
inline static Contents *contents(void *lowLim);
inline static const Contents *contents(const void *lowLim);

/// Given a \p ptr into the memory region of some valid segment \c s, returns
/// a pointer to the CardTable covering the segment containing the pointer.
///
/// \pre There exists a currently alive heap that claims to contain \c ptr.
inline static CardTable *cardTableCovering(const void *ptr);

/// Given a \p ptr into the memory region of some valid segment \c s, returns
/// a pointer to the MarkBitArray covering the segment containing the
/// pointer.
///
/// \pre There exists a currently alive heap that claims to contain \c ptr.
inline static Contents::MarkBitArray *markBitArrayCovering(const void *ptr);

/// Translate the given address to a 0-based index in the MarkBitArray of its
/// segment. The base address is the start of the storage of this segment.
static size_t addressToMarkBitArrayIndex(const void *ptr) {
auto *cp = reinterpret_cast<const char *>(ptr);
auto *base = reinterpret_cast<const char *>(storageStart(cp));
return (cp - base) >> LogHeapAlign;
}

/// Mark the given \p cell. Assumes the given address is a valid heap object.
inline static void setCellMarkBit(const GCCell *cell);

/// Return whether the given \p cell is marked. Assumes the given address is
/// a valid heap object.
inline static bool getCellMarkBit(const GCCell *cell);

/// Find the head of the first cell that extends into the card at index
/// \p cardIdx.
/// \return A cell such that
@@ -294,23 +380,11 @@ class AlignedHeapSegment {
/// The number of bytes in the segment that are available for allocation.
inline size_t available() const;

/// Returns the address that is the lower bound of the segment.
/// \post The returned pointer is guaranteed to be aligned to a segment
/// boundary.
char *lowLim() const {
return lowLim_;
}

/// Returns the address that is the upper bound of the segment.
char *hiLim() const {
return lowLim() + storageSize();
}

/// Returns the address at which the first allocation in this segment would
/// occur.
/// Disable UB sanitization because 'this' may be null during the tests.
inline char *start() const LLVM_NO_SANITIZE("undefined");

/// Returns the first address after the region in which allocations can occur,
/// taking external memory credits into a account (they decrease the effective
/// end).
@@ -340,15 +414,6 @@ class AlignedHeapSegment {
/// AlignedHeapSegment.
inline static bool containedInSame(const void *a, const void *b);

/// Return a reference to the card table covering the memory region managed by
/// this segment.
/// Disable sanitization because 'this' may be null in the tests.
inline CardTable &cardTable() const LLVM_NO_SANITIZE("null");

/// Return a reference to the mark bit array covering the memory region
/// managed by this segment.
inline Contents::MarkBitArray &markBitArray() const;

explicit operator bool() const {
return lowLim();
}
@@ -390,20 +455,11 @@ class AlignedHeapSegment {

/// Set the contents of the segment to a dead value.
void clear();
/// Set the given range [start, end) to a dead value.
static void clear(char *start, char *end);
/// Checks that dead values are present in the [start, end) range.
static void checkUnwritten(char *start, char *end);
#endif

protected:
/// Return a pointer to the contents of the memory region managed by this
/// segment.
inline Contents *contents() const;

/// The start of the aligned segment.
char *lowLim_{nullptr};

private:
/// The provider that created this segment. It will be used to properly
/// destroy this.
StorageProvider *provider_{nullptr};
@@ -419,7 +475,6 @@ class AlignedHeapSegment {
/// and swap idiom.
friend void swap(AlignedHeapSegment &a, AlignedHeapSegment &b);

private:
AlignedHeapSegment(StorageProvider *provider, void *lowLim);
};

@@ -459,26 +514,6 @@ AllocResult AlignedHeapSegment::alloc(uint32_t size) {
return {cell, true};
}

/*static*/
AlignedHeapSegment::Contents::MarkBitArray *
AlignedHeapSegment::markBitArrayCovering(const void *ptr) {
return &contents(storageStart(ptr))->markBitArray_;
}

/*static*/
void AlignedHeapSegment::setCellMarkBit(const GCCell *cell) {
auto *markBits = markBitArrayCovering(cell);
size_t ind = addressToMarkBitArrayIndex(cell);
markBits->set(ind, true);
}

/*static*/
bool AlignedHeapSegment::getCellMarkBit(const GCCell *cell) {
auto *markBits = markBitArrayCovering(cell);
size_t ind = addressToMarkBitArrayIndex(cell);
return markBits->at(ind);
}

GCCell *AlignedHeapSegment::getFirstCellHead(size_t cardIdx) {
CardTable &cards = cardTable();
GCCell *cell = cards.firstObjForCard(cardIdx);
@@ -499,16 +534,6 @@ void AlignedHeapSegment::setCellHead(const GCCell *cellStart, const size_t sz) {
}
}

/* static */ AlignedHeapSegment::Contents *AlignedHeapSegment::contents(
void *lowLim) {
return reinterpret_cast<Contents *>(lowLim);
}

/* static */ const AlignedHeapSegment::Contents *AlignedHeapSegment::contents(
const void *lowLim) {
return reinterpret_cast<const Contents *>(lowLim);
}

/* static */ CardTable *AlignedHeapSegment::cardTableCovering(const void *ptr) {
return &AlignedHeapSegment::contents(storageStart(ptr))->cardTable_;
}
@@ -529,10 +554,6 @@ size_t AlignedHeapSegment::available() const {
return effectiveEnd() - level();
}

char *AlignedHeapSegment::start() const {
return contents()->allocRegion_;
}

char *AlignedHeapSegment::effectiveEnd() const {
return effectiveEnd_;
}
@@ -558,19 +579,6 @@ bool AlignedHeapSegment::containedInSame(const void *a, const void *b) {
storageSize();
}

CardTable &AlignedHeapSegment::cardTable() const {
return contents()->cardTable_;
}

AlignedHeapSegment::Contents::MarkBitArray &AlignedHeapSegment::markBitArray()
const {
return contents()->markBitArray_;
}

AlignedHeapSegment::Contents *AlignedHeapSegment::contents() const {
return contents(lowLim());
}

} // namespace vm
} // namespace hermes

32 changes: 16 additions & 16 deletions lib/VM/gcs/AlignedHeapSegment.cpp
Original file line number Diff line number Diff line change
@@ -22,6 +22,17 @@
namespace hermes {
namespace vm {

#ifndef NDEBUG
/// Set the given range [start, end) to a dead value.
static void clearRange(char *start, char *end) {
#if LLVM_ADDRESS_SANITIZER_BUILD
__asan_poison_memory_region(start, end - start);
#else
std::memset(start, kInvalidHeapValue, end - start);
#endif
}
#endif

void AlignedHeapSegment::Contents::protectGuardPage(
oscompat::ProtectMode mode) {
char *begin = &paddedGuardPage_[kGuardPagePadding];
@@ -45,11 +56,12 @@ llvh::ErrorOr<AlignedHeapSegment> AlignedHeapSegment::create(
if (!result) {
return result.getError();
}
assert(*result && "Heap segment storage allocation failure");
return AlignedHeapSegment{provider, *result};
}

AlignedHeapSegment::AlignedHeapSegment(StorageProvider *provider, void *lowLim)
: lowLim_(static_cast<char *>(lowLim)), provider_(provider) {
: AlignedHeapSegmentBase(lowLim), provider_(provider) {
assert(
storageStart(lowLim_) == lowLim_ &&
"The lower limit of this storage must be aligned");
@@ -58,13 +70,9 @@ AlignedHeapSegment::AlignedHeapSegment(StorageProvider *provider, void *lowLim)
assert(
reinterpret_cast<uintptr_t>(hiLim()) % oscompat::page_size() == 0 &&
"The higher limit must be page aligned");
if (*this) {
new (contents()) Contents();
contents()->protectGuardPage(oscompat::ProtectMode::None);
#ifndef NDEBUG
clear();
clear();
#endif
}
}

void swap(AlignedHeapSegment &a, AlignedHeapSegment &b) {
@@ -120,7 +128,7 @@ void AlignedHeapSegment::setLevel(char *lvl) {
assert(dbgContainsLevel(lvl));
if (lvl < level_) {
#ifndef NDEBUG
clear(lvl, level_);
clearRange(lvl, level_);
#else
if (MU == AdviseUnused::Yes) {
const size_t PS = oscompat::page_size();
@@ -172,15 +180,7 @@ bool AlignedHeapSegment::validPointer(const void *p) const {
}

void AlignedHeapSegment::clear() {
clear(start(), end());
}

/* static */ void AlignedHeapSegment::clear(char *start, char *end) {
#if LLVM_ADDRESS_SANITIZER_BUILD
__asan_poison_memory_region(start, end - start);
#else
std::memset(start, kInvalidHeapValue, end - start);
#endif
clearRange(start(), end());
}

/* static */ void AlignedHeapSegment::checkUnwritten(char *start, char *end) {
37 changes: 22 additions & 15 deletions unittests/VMRuntime/MarkBitArrayNCTest.cpp
Original file line number Diff line number Diff line change
@@ -27,6 +27,13 @@ namespace {
struct MarkBitArrayTest : public ::testing::Test {
MarkBitArrayTest();

static size_t addressToMarkBitArrayIndex(const void *addr) {
auto *cp = reinterpret_cast<const char *>(addr);
auto *base =
reinterpret_cast<const char *>(AlignedHeapSegment::storageStart(addr));
return (cp - base) >> LogHeapAlign;
}

protected:
std::unique_ptr<StorageProvider> provider;
AlignedHeapSegment seg;
@@ -66,7 +73,7 @@ TEST_F(MarkBitArrayTest, AddressToIndex) {
char *addr = addrs.at(i);
size_t ind = indices.at(i);

EXPECT_EQ(ind, AlignedHeapSegment::addressToMarkBitArrayIndex(addr))
EXPECT_EQ(ind, addressToMarkBitArrayIndex(addr))
<< "0x" << std::hex << (void *)addr << " -> " << ind;
char *toAddr = seg.lowLim() + (ind << LogHeapAlign);
EXPECT_EQ(toAddr, addr)
@@ -78,7 +85,7 @@ TEST_F(MarkBitArrayTest, MarkGet) {
const size_t lastIx = mba.size() - 1;

for (char *addr : addrs) {
size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr);
size_t ind = addressToMarkBitArrayIndex(addr);

EXPECT_FALSE(ind > 0 && mba.at(ind - 1)) << "initial " << ind << " - 1";
EXPECT_FALSE(mba.at(ind)) << "initial " << ind;
@@ -97,37 +104,37 @@ TEST_F(MarkBitArrayTest, MarkGet) {

TEST_F(MarkBitArrayTest, Initial) {
for (char *addr : addrs) {
size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr);
size_t ind = addressToMarkBitArrayIndex(addr);
EXPECT_FALSE(mba.at(ind));
}
}

TEST_F(MarkBitArrayTest, Clear) {
for (char *addr : addrs) {
size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr);
size_t ind = addressToMarkBitArrayIndex(addr);
ASSERT_FALSE(mba.at(ind));
}

for (char *addr : addrs) {
size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr);
size_t ind = addressToMarkBitArrayIndex(addr);
mba.set(ind, true);
}

for (char *addr : addrs) {
size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr);
size_t ind = addressToMarkBitArrayIndex(addr);
ASSERT_TRUE(mba.at(ind));
}

mba.reset();
for (char *addr : addrs) {
size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr);
size_t ind = addressToMarkBitArrayIndex(addr);
EXPECT_FALSE(mba.at(ind));
}
}

TEST_F(MarkBitArrayTest, NextMarkedBitImmediate) {
char *addr = addrs.at(addrs.size() / 2);
size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr);
size_t ind = addressToMarkBitArrayIndex(addr);

mba.set(ind, true);
EXPECT_EQ(ind, mba.findNextSetBitFrom(ind));
@@ -140,7 +147,7 @@ TEST_F(MarkBitArrayTest, NextMarkedBit) {
EXPECT_EQ(FOUND_NONE, mba.findNextSetBitFrom(0));
std::queue<size_t> indices;
for (char *addr : addrs) {
auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr);
auto ind = addressToMarkBitArrayIndex(addr);
mba.set(ind, true);
indices.push(ind);
}
@@ -154,7 +161,7 @@ TEST_F(MarkBitArrayTest, NextMarkedBit) {

TEST_F(MarkBitArrayTest, NextUnmarkedBitImmediate) {
char *addr = addrs.at(addrs.size() / 2);
size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr);
size_t ind = addressToMarkBitArrayIndex(addr);
mba.set();
mba.set(ind, false);
EXPECT_EQ(ind, mba.findNextZeroBitFrom(ind));
@@ -167,7 +174,7 @@ TEST_F(MarkBitArrayTest, NextUnmarkedBit) {
EXPECT_EQ(FOUND_NONE, mba.findNextZeroBitFrom(0));
std::queue<size_t> indices;
for (char *addr : addrs) {
auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr);
auto ind = addressToMarkBitArrayIndex(addr);
mba.set(ind, false);
indices.push(ind);
}
@@ -182,7 +189,7 @@ TEST_F(MarkBitArrayTest, NextUnmarkedBit) {

TEST_F(MarkBitArrayTest, PrevMarkedBitImmediate) {
char *addr = addrs.at(addrs.size() / 2);
size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr);
size_t ind = addressToMarkBitArrayIndex(addr);
mba.set(ind, true);
EXPECT_EQ(ind, mba.findPrevSetBitFrom(ind + 1));
}
@@ -196,7 +203,7 @@ TEST_F(MarkBitArrayTest, PrevMarkedBit) {
std::queue<size_t> indices;
size_t addrIdx = addrs.size();
while (addrIdx-- > 0) {
auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addrs[addrIdx]);
auto ind = addressToMarkBitArrayIndex(addrs[addrIdx]);
mba.set(ind, true);
indices.push(ind);
}
@@ -209,7 +216,7 @@ TEST_F(MarkBitArrayTest, PrevMarkedBit) {

TEST_F(MarkBitArrayTest, PrevUnmarkedBitImmediate) {
char *addr = addrs.at(addrs.size() / 2);
size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr);
size_t ind = addressToMarkBitArrayIndex(addr);
mba.set();
mba.set(ind, false);
EXPECT_EQ(ind, mba.findPrevZeroBitFrom(ind + 1));
@@ -225,7 +232,7 @@ TEST_F(MarkBitArrayTest, PrevUnmarkedBit) {
std::queue<size_t> indices;
size_t addrIdx = addrs.size();
while (addrIdx-- > 0) {
auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addrs[addrIdx]);
auto ind = addressToMarkBitArrayIndex(addrs[addrIdx]);
mba.set(ind, false);
indices.push(ind);
}

0 comments on commit c4cc598

Please sign in to comment.