From acd95c167b7383ab83619436478aba423fdc64c0 Mon Sep 17 00:00:00 2001 From: "Gang Zhao (Hermes)" Date: Fri, 6 Dec 2024 14:41:30 -0800 Subject: [PATCH 1/2] Remove HeapCellIterator and AlignedHeapSegment::cells() Summary: With this change, we can remove the dependency on GCCell.h in the header. Reviewed By: neildhar Differential Revision: D66727534 --- include/hermes/VM/AlignedHeapSegment.h | 40 +------------------------- lib/VM/gcs/HadesGC.cpp | 11 +++++-- 2 files changed, 10 insertions(+), 41 deletions(-) diff --git a/include/hermes/VM/AlignedHeapSegment.h b/include/hermes/VM/AlignedHeapSegment.h index 4a7d96b197e..727e3b7a41a 100644 --- a/include/hermes/VM/AlignedHeapSegment.h +++ b/include/hermes/VM/AlignedHeapSegment.h @@ -12,20 +12,17 @@ #include "hermes/Support/OSCompat.h" #include "hermes/VM/AdviseUnused.h" #include "hermes/VM/AllocResult.h" -#include "hermes/VM/AllocSource.h" #include "hermes/VM/CardTableNC.h" -#include "hermes/VM/GCBase.h" -#include "hermes/VM/GCCell.h" #include "hermes/VM/HeapAlign.h" #include "llvh/Support/MathExtras.h" #include -#include namespace hermes { namespace vm { +class GCCell; class StorageProvider; #ifndef HERMESVM_LOG_HEAP_SEGMENT_SIZE @@ -191,30 +188,6 @@ class AlignedHeapSegment { 0, "Guard page must be aligned to likely page size"); - class HeapCellIterator : public llvh::iterator_facade_base< - HeapCellIterator, - std::forward_iterator_tag, - GCCell *> { - public: - HeapCellIterator(GCCell *cell) : cell_(cell) {} - - bool operator==(const HeapCellIterator &R) const { - return cell_ == R.cell_; - } - - GCCell *const &operator*() const { - return cell_; - } - - HeapCellIterator &operator++() { - cell_ = cell_->nextCell(); - return *this; - } - - private: - GCCell *cell_{nullptr}; - }; - /// Returns the index of the segment containing \p lowLim, which is required /// to be the start of its containing segment. (This can allow extra /// efficiency, in cases where the segment start has already been computed.) @@ -333,9 +306,6 @@ class AlignedHeapSegment { /// Returns the address at which the next allocation, if any, will occur. inline char *level() const; - /// Returns an iterator range corresponding to the cells in this segment. - inline llvh::iterator_range cells(); - /// Returns whether \p a and \p b are contained in the same /// AlignedHeapSegment. inline static bool containedInSame(const void *a, const void *b); @@ -425,7 +395,6 @@ class AlignedHeapSegment { AllocResult AlignedHeapSegment::alloc(uint32_t size) { assert(lowLim() != nullptr && "Cannot allocate in a null segment"); - assert(size >= sizeof(GCCell) && "cell must be larger than GCCell"); assert(isSizeHeapAligned(size) && "size must be heap aligned"); char *cellPtr; // Initialized in the if below. @@ -545,13 +514,6 @@ char *AlignedHeapSegment::level() const { return level_; } -llvh::iterator_range -AlignedHeapSegment::cells() { - return { - HeapCellIterator(reinterpret_cast(start())), - HeapCellIterator(reinterpret_cast(level()))}; -} - /* static */ bool AlignedHeapSegment::containedInSame(const void *a, const void *b) { return (reinterpret_cast(a) ^ reinterpret_cast(b)) < diff --git a/lib/VM/gcs/HadesGC.cpp b/lib/VM/gcs/HadesGC.cpp index e9bf33f4b28..9397b13856b 100644 --- a/lib/VM/gcs/HadesGC.cpp +++ b/lib/VM/gcs/HadesGC.cpp @@ -174,7 +174,10 @@ template void HadesGC::forAllObjsInSegment( hermes::vm::AlignedHeapSegment &seg, CallbackFunction callback) { - for (GCCell *cell : seg.cells()) { + for (GCCell *cell = reinterpret_cast(seg.start()), + *end = reinterpret_cast(seg.level()); + cell < end; + cell = cell->nextCell()) { // Skip free-list entries. if (!vmisa(cell)) { callback(cell); @@ -1052,7 +1055,11 @@ bool HadesGC::OldGen::sweepNext(bool backgroundThread) { char *freeRangeStart = nullptr, *freeRangeEnd = nullptr; size_t mergedCells = 0; int32_t segmentSweptBytes = 0; - for (GCCell *cell : segments_[sweepIterator_.segNumber].cells()) { + auto &seg = segments_[sweepIterator_.segNumber]; + for (GCCell *cell = reinterpret_cast(seg.start()), + *end = reinterpret_cast(seg.level()); + cell < end; + cell = cell->nextCell()) { assert(cell->isValid() && "Invalid cell in sweeping"); if (AlignedHeapSegment::getCellMarkBit(cell)) { // Cannot concurrently trim storage. Technically just checking From d27151b66245993521f556a03474425c97cbe714 Mon Sep 17 00:00:00 2001 From: "Gang Zhao (Hermes)" Date: Fri, 6 Dec 2024 14:41:30 -0800 Subject: [PATCH 2/2] Move memory layout and common methods of AlignedHeapSegment to a base class (#1510) Summary: The large heap segment type should have the same storage layout as current AlignedHeapSegment, and share a few common methods. Abstract these to a base class, and make both FixedSizeHeapSegment and JumboHeapSegment inherit from the base type. Reviewed By: neildhar Differential Revision: D61675022 --- include/hermes/VM/AlignedHeapSegment.h | 531 ++++++++++-------- include/hermes/VM/CardTableNC.h | 2 +- include/hermes/VM/GCBase.h | 2 +- include/hermes/VM/HadesGC.h | 50 +- include/hermes/VM/HeapRuntime.h | 3 +- include/hermes/VM/StorageProvider.h | 7 +- lib/VM/LimitedStorageProvider.cpp | 6 +- lib/VM/Runtime.cpp | 9 +- lib/VM/StorageProvider.cpp | 35 +- lib/VM/gcs/AlignedHeapSegment.cpp | 95 ++-- lib/VM/gcs/CardTableNC.cpp | 2 +- lib/VM/gcs/HadesGC.cpp | 119 ++-- .../VMRuntime/AlignedHeapSegmentTest.cpp | 66 +-- .../VMRuntime/CardObjectBoundaryNCTest.cpp | 4 +- unittests/VMRuntime/CardTableNCTest.cpp | 4 +- unittests/VMRuntime/CrashManagerTest.cpp | 6 +- unittests/VMRuntime/GCBasicsTest.cpp | 4 +- unittests/VMRuntime/GCFragmentationTest.cpp | 8 +- unittests/VMRuntime/GCLazySegmentNCTest.cpp | 8 +- unittests/VMRuntime/GCOOMTest.cpp | 4 +- .../VMRuntime/GCReturnUnusedMemoryTest.cpp | 2 +- unittests/VMRuntime/MarkBitArrayNCTest.cpp | 44 +- unittests/VMRuntime/StorageProviderTest.cpp | 22 +- 23 files changed, 546 insertions(+), 487 deletions(-) diff --git a/include/hermes/VM/AlignedHeapSegment.h b/include/hermes/VM/AlignedHeapSegment.h index 727e3b7a41a..045a3257981 100644 --- a/include/hermes/VM/AlignedHeapSegment.h +++ b/include/hermes/VM/AlignedHeapSegment.h @@ -33,9 +33,9 @@ class StorageProvider; // TODO (T25527350): Debug Dump // TODO (T25527350): Heap Moving -/// An \c AlignedHeapSegment is a contiguous chunk of memory aligned to its own -/// storage size (which is a fixed power of two number of bytes). The storage -/// is further split up according to the diagram below: +/// An \c AlignedHeapSegment manages a contiguous chunk of memory aligned to +/// kSegmentUnitSize. The storage is further split up according to the diagram +/// below: /// /// +----------------------------------------+ /// | (1) Card Table | @@ -49,83 +49,43 @@ class StorageProvider; /// | (End) | /// +----------------------------------------+ /// -/// The tables in (1), and (2) cover the contiguous allocation space (3) -/// into which GCCells are bump allocated. +/// The tables in (1), and (2) cover the contiguous allocation space (3) into +/// which GCCells are bump allocated. They have fixed size computed from +/// kSegmentUnitSize. For segments whose size is some non-unit multiple of +/// kSegmentUnitSize, card table allocates its internal arrays separately +/// instead. Only one GCCell is allowed in each such segment, so the inline +/// Mark Bit Array is large enough. Any segment size smaller than +/// kSegmentUnitSize is not supported. The headers of all GCCells, in any +/// segment type, must reside in the first region of kSegmentUnitSize. This +/// invariant ensures that we can always get the card table from a valid GCCell +/// pointer. class AlignedHeapSegment { - public: - /// @name Constants and utility functions for the aligned storage of \c - /// AlignedHeapSegment. - /// - /// @{ - /// The size and the alignment of the storage, in bytes. - static constexpr unsigned kLogSize = HERMESVM_LOG_HEAP_SEGMENT_SIZE; - static constexpr size_t kSize{1 << kLogSize}; - /// Mask for isolating the offset into a storage for a pointer. - static constexpr size_t kLowMask{kSize - 1}; - /// Mask for isolating the storage being pointed into by a pointer. - static constexpr size_t kHighMask{~kLowMask}; - - /// Returns the storage size, in bytes, of an \c AlignedHeapSegment. - static constexpr size_t storageSize() { - return kSize; - } - - /// Returns the pointer to the beginning of the storage containing \p ptr - /// (inclusive). Assuming such a storage exists. Note that - /// - /// storageStart(seg.hiLim()) != seg.lowLim() - /// - /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it - /// is the first address not in the bounds. - static void *storageStart(const void *ptr) { - return reinterpret_cast( - reinterpret_cast(ptr) & kHighMask); - } - - /// Returns the pointer to the end of the storage containing \p ptr - /// (exclusive). Assuming such a storage exists. Note that - /// - /// storageEnd(seg.hiLim()) != seg.hiLim() - /// - /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it - /// is the first address not in the bounds. - static void *storageEnd(const void *ptr) { - return reinterpret_cast(storageStart(ptr)) + kSize; - } - - /// Returns the offset in bytes to \p ptr from the start of its containing - /// storage. Assuming such a storage exists. Note that - /// - /// offset(seg.hiLim()) != seg.size() - /// - /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it - /// is the first address not in the bounds. - static size_t offset(const char *ptr) { - return reinterpret_cast(ptr) & kLowMask; - } - /// @} + protected: + /// The provider that created this segment. It will be used to properly + /// destroy this. + StorageProvider *provider_{nullptr}; - /// Construct a null AlignedHeapSegment (one that does not own memory). - AlignedHeapSegment() = default; - /// \c AlignedHeapSegment is movable and assignable, but not copyable. - AlignedHeapSegment(AlignedHeapSegment &&); - AlignedHeapSegment &operator=(AlignedHeapSegment &&); - AlignedHeapSegment(const AlignedHeapSegment &) = delete; + /// The start of the aligned segment. + char *lowLim_{nullptr}; - ~AlignedHeapSegment(); + /// The current address in this segment to allocate new object. This must be + /// positioned after lowLim_ to be correctly initialized. + char *level_{start()}; - /// Create a AlignedHeapSegment by allocating memory with \p provider. - static llvh::ErrorOr create(StorageProvider *provider); - static llvh::ErrorOr create( - StorageProvider *provider, - const char *name); + public: + /// Base 2 log of the heap segment size. + static constexpr size_t kLogSize = HERMESVM_LOG_HEAP_SEGMENT_SIZE; + /// The unit segment size, in bytes. Any valid heap segment's size must be a + /// multiple of this. + static constexpr size_t kSegmentUnitSize = (1 << kLogSize); /// Contents of the memory region managed by this segment. class Contents { public: /// The number of bits representing the total number of heap-aligned /// addresses in the segment storage. - static constexpr size_t kMarkBitArraySize = kSize >> LogHeapAlign; + static constexpr size_t kMarkBitArraySize = + kSegmentUnitSize >> LogHeapAlign; /// BitArray for marking allocation region of a segment. using MarkBitArray = BitArray; @@ -134,6 +94,7 @@ class AlignedHeapSegment { void protectGuardPage(oscompat::ProtectMode mode); private: + friend class FixedSizeHeapSegment; friend class AlignedHeapSegment; /// Note that because of the Contents object, the first few bytes of the @@ -176,10 +137,11 @@ class AlignedHeapSegment { "SHSegmentInfo does not fit in available unused CardTable space."); /// The offset from the beginning of a segment of the allocatable region. - static constexpr size_t offsetOfAllocRegion{offsetof(Contents, allocRegion_)}; + static constexpr size_t kOffsetOfAllocRegion{ + offsetof(Contents, allocRegion_)}; static_assert( - isSizeHeapAligned(offsetOfAllocRegion), + isSizeHeapAligned(kOffsetOfAllocRegion), "Allocation region must start at a heap aligned offset"); static_assert( @@ -188,11 +150,108 @@ class AlignedHeapSegment { 0, "Guard page must be aligned to likely page size"); + ~AlignedHeapSegment(); + + /// Returns the address that is the lower bound of the segment. + /// \post The returned pointer is guaranteed to be aligned to + /// kSegmentUnitSize. + char *lowLim() const { + return lowLim_; + } + + /// Returns the address that is the upper bound of the segment. + char *hiLim() const { + return lowLim_ + kSegmentUnitSize; + } + + /// Returns the address at which the first allocation in this segment would + /// occur. + /// Disable UB sanitization because 'this' may be null during the tests. + char *start() const LLVM_NO_SANITIZE("undefined") { + return contents()->allocRegion_; + } + + /// Returns the address at which the next allocation, if any, will occur. + char *level() const { + return level_; + } + + /// Return a reference to the card table covering the memory region managed by + /// this segment. + CardTable &cardTable() const { + return contents()->cardTable_; + } + + /// Given a \p cell lives in the memory region of some valid segment \c s, + /// returns a pointer to the CardTable covering the segment containing the + /// cell. Note that this takes a GCCell pointer in order to correctly get + /// the segment starting address for JumboHeapSegment. + /// + /// \pre There exists a currently alive heap in which \p cell is allocated. + static CardTable *cardTableCovering(const GCCell *cell) { + return &contents(alignedStorageStart(cell))->cardTable_; + } + + /// Find the head of the first cell that extends into the card at index + /// \p cardIdx. + /// \return A cell such that + /// cell <= indexToAddress(cardIdx) < cell->nextCell(). + GCCell *getFirstCellHead(size_t cardIdx) { + CardTable &cards = cardTable(); + GCCell *cell = cards.firstObjForCard(cardIdx); + return cell; + } + + /// Record the head of this cell so it can be found by the card scanner. + static void setCellHead(const GCCell *cellStart, const size_t sz) { + const char *start = reinterpret_cast(cellStart); + const char *end = start + sz; + CardTable *cards = cardTableCovering(cellStart); + auto boundary = cards->nextBoundary(start); + // If this object crosses a card boundary, then update boundaries + // appropriately. + if (boundary.address() < end) { + cards->updateBoundaries(&boundary, start, end); + } + } + + /// Return a reference to the mark bit array covering the memory region + /// managed by this segment. + Contents::MarkBitArray &markBitArray() const { + return contents()->markBitArray_; + } + + /// Mark the given \p cell. Assumes the given address is a valid heap object. + static void setCellMarkBit(const GCCell *cell) { + auto *markBits = markBitArrayCovering(cell); + size_t ind = addressToMarkBitArrayIndex(cell); + markBits->set(ind, true); + } + + /// Return whether the given \p cell is marked. Assumes the given address is + /// a valid heap object. + static bool getCellMarkBit(const GCCell *cell) { + auto *markBits = markBitArrayCovering(cell); + size_t ind = addressToMarkBitArrayIndex(cell); + return markBits->at(ind); + } + + /// Translate the given address to a 0-based index in the MarkBitArray of its + /// segment. The base address is the start of the storage of this segment. For + /// JumboSegment, this should always return a constant index + /// kOffsetOfAllocRegion >> LogHeapAlign. + static size_t addressToMarkBitArrayIndex(const GCCell *cell) { + auto *cp = reinterpret_cast(cell); + auto *base = reinterpret_cast(alignedStorageStart(cell)); + return (cp - base) >> LogHeapAlign; + } + /// Returns the index of the segment containing \p lowLim, which is required /// to be the start of its containing segment. (This can allow extra /// efficiency, in cases where the segment start has already been computed.) + /// Note: we can't assert that \p lowLim is really the start of a segment, if + /// it's from a large object, so the caller must ensure it. static unsigned getSegmentIndexFromStart(const void *lowLim) { - assert(lowLim == storageStart(lowLim) && "Precondition."); auto *segInfo = reinterpret_cast(lowLim); return segInfo->index; } @@ -200,59 +259,162 @@ class AlignedHeapSegment { /// Requires that \p lowLim is the start address of a segment, and sets /// that segment's index to \p index. static void setSegmentIndexFromStart(void *lowLim, unsigned index) { - assert(lowLim == storageStart(lowLim) && "Precondition."); auto *segInfo = reinterpret_cast(lowLim); segInfo->index = index; } - /// Attempt an allocation of the given size in the segment. If there is - /// sufficent space, cast the space as a GCCell, and returns an uninitialized - /// pointer to that cell (with success = true). If there is not sufficient - /// space, returns {nullptr, false}. - inline AllocResult alloc(uint32_t size); +#ifndef NDEBUG + /// Set the contents of the segment to a dead value. + void clear(); +#endif + + protected: + AlignedHeapSegment() = default; + + /// Construct Contents() at the address of \p lowLim. + AlignedHeapSegment(StorageProvider *provider, void *lowLim); + + AlignedHeapSegment(AlignedHeapSegment &&); + AlignedHeapSegment &operator=(AlignedHeapSegment &&); + + /// Return a pointer to the contents of the memory region managed by this + /// segment. + Contents *contents() const { + return reinterpret_cast(lowLim_); + } /// Given the \p lowLim of some valid segment's memory region, returns a - /// pointer to the AlignedHeapSegment::Contents laid out in that storage, - /// assuming it exists. - inline static Contents *contents(void *lowLim); - inline static const Contents *contents(const void *lowLim); + /// pointer to the Contents laid out in the storage, assuming it exists. + static Contents *contents(void *lowLim) { + return reinterpret_cast(lowLim); + } - /// Given a \p ptr into the memory region of some valid segment \c s, returns - /// a pointer to the CardTable covering the segment containing the pointer. + private: + /// Used in move constructor and move assignment operator following the copy + /// and swap idiom. + friend void swap(AlignedHeapSegment &a, AlignedHeapSegment &b); + + /// Return the starting address for aligned region of size kSegmentUnitSize + /// that \p cell resides in. If \c cell resides in a JumboSegment, it's the + /// only cell there, this essentially returns its segment starting address. + static char *alignedStorageStart(const GCCell *cell) { + return reinterpret_cast( + reinterpret_cast(cell) & ~(kSegmentUnitSize - 1)); + } + + /// Given a \p cell, returns a pointer to the MarkBitArray covering the + /// segment that \p cell resides in. /// - /// \pre There exists a currently alive heap that claims to contain \c ptr. - inline static CardTable *cardTableCovering(const void *ptr); + /// \pre There exists a currently alive heap that claims to contain \c cell. + static Contents::MarkBitArray *markBitArrayCovering(const GCCell *cell) { + auto *segStart = alignedStorageStart(cell); + return &contents(segStart)->markBitArray_; + } +}; - /// Given a \p ptr into the memory region of some valid segment \c s, returns - /// a pointer to the MarkBitArray covering the segment containing the - /// pointer. +/// JumboHeapSegment has custom storage size that must be a multiple of +/// kSegmentUnitSize. Each such segment can only allocate a single object that +/// occupies the entire allocation space. Therefore, the inline MarkBitArray is +/// large enough, while CardTable needs to allocate its cards and boundaries +/// arrays separately. +class JumboHeapSegment : public AlignedHeapSegment {}; + +/// FixedSizeHeapSegment has fixed storage size kSegmentUnitSize. Its CardTable +/// and MarkBitArray are stored inline right before the allocation space. This +/// is used for all allocations in YoungGen and normal object allocations in +/// OldGen. +class FixedSizeHeapSegment : public AlignedHeapSegment { + /// The upper limit of the space that we can currently allocated into; + /// this may be decreased when externally allocated memory is credited to + /// the generation owning this space. + char *effectiveEnd_{end()}; + + public: + /// @name Constants and utility functions for the aligned storage of \c + /// FixedSizeHeapSegment. /// - /// \pre There exists a currently alive heap that claims to contain \c ptr. - inline static Contents::MarkBitArray *markBitArrayCovering(const void *ptr); + /// @{ + /// The size and the alignment of the storage, in bytes. + static constexpr size_t kSize = kSegmentUnitSize; + /// Mask for isolating the offset into a storage for a pointer. + static constexpr size_t kLowMask{kSize - 1}; + /// Mask for isolating the storage being pointed into by a pointer. + static constexpr size_t kHighMask{~kLowMask}; - /// Translate the given address to a 0-based index in the MarkBitArray of its - /// segment. The base address is the start of the storage of this segment. - static size_t addressToMarkBitArrayIndex(const void *ptr) { - auto *cp = reinterpret_cast(ptr); - auto *base = reinterpret_cast(storageStart(cp)); - return (cp - base) >> LogHeapAlign; + /// Returns the storage size, in bytes, of an \c FixedSizeHeapSegment. + static constexpr size_t storageSize() { + return kSize; } - /// Mark the given \p cell. Assumes the given address is a valid heap object. - inline static void setCellMarkBit(const GCCell *cell); + /// Returns the pointer to the beginning of the storage containing \p ptr + /// (inclusive). Assuming such a storage exists. Note that + /// + /// storageStart(seg.hiLim()) != seg.lowLim() + /// + /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it + /// is the first address not in the bounds. + static void *storageStart(const void *ptr) { + return reinterpret_cast( + reinterpret_cast(ptr) & kHighMask); + } - /// Return whether the given \p cell is marked. Assumes the given address is - /// a valid heap object. - inline static bool getCellMarkBit(const GCCell *cell); + /// Returns the pointer to the end of the storage containing \p ptr + /// (exclusive). Assuming such a storage exists. Note that + /// + /// storageEnd(seg.hiLim()) != seg.hiLim() + /// + /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it + /// is the first address not in the bounds. + static void *storageEnd(const void *ptr) { + return reinterpret_cast(storageStart(ptr)) + kSize; + } - /// Find the head of the first cell that extends into the card at index - /// \p cardIdx. - /// \return A cell such that - /// cell <= indexToAddress(cardIdx) < cell->nextCell(). - inline GCCell *getFirstCellHead(size_t cardIdx); + /// Returns the offset in bytes to \p ptr from the start of its containing + /// storage. Assuming such a storage exists. Note that + /// + /// offset(seg.hiLim()) != seg.size() + /// + /// as \c seg.hiLim() is not contained in the bounds of \c seg -- it + /// is the first address not in the bounds. + static size_t offset(const char *ptr) { + return reinterpret_cast(ptr) & kLowMask; + } + /// @} - /// Record the head of this cell so it can be found by the card scanner. - static inline void setCellHead(const GCCell *start, const size_t sz); + /// Construct a null FixedSizeHeapSegment (one that does not own memory). + FixedSizeHeapSegment() = default; + /// \c FixedSizeHeapSegment is movable and assignable, but not copyable. + /// Default to the base type move constructor and assignment operator. All + /// fields that need to be invalidated are handled in them and the only + /// field in this class (effectiveEnd_) doesn't need to be invalidated. + FixedSizeHeapSegment(FixedSizeHeapSegment &&) = default; + FixedSizeHeapSegment &operator=(FixedSizeHeapSegment &&) = default; + FixedSizeHeapSegment(const FixedSizeHeapSegment &) = delete; + FixedSizeHeapSegment &operator=(const FixedSizeHeapSegment &) = delete; + + /// Create a FixedSizeHeapSegment by allocating memory with \p provider. + static llvh::ErrorOr create( + StorageProvider *provider, + const char *name = nullptr); + /// This is just a variant to provide the same interface as other HeapSegment + /// type and used in template functions. The extra parameter is always ignored + /// since FixedSizeHeapSegment has fixed size. + static llvh::ErrorOr + create(StorageProvider *provider, const char *name, size_t) { + return create(provider, name); + } + + /// Attempt an allocation of the given size in the segment. If there is + /// sufficent space, cast the space as a GCCell, and returns an uninitialized + /// pointer to that cell (with success = true). If there is not sufficient + /// space, returns {nullptr, false}. + inline AllocResult alloc(uint32_t size); + + /// Given a \p ptr into the memory region of some valid segment \c s, returns + /// a pointer to the CardTable covering the segment containing the pointer. + /// + /// \pre There exists a currently alive heap that claims to contain \c ptr. + inline static CardTable *cardTableCovering(const void *ptr); /// The largest size the allocation region of an aligned heap segment could /// be. @@ -267,23 +429,11 @@ class AlignedHeapSegment { /// The number of bytes in the segment that are available for allocation. inline size_t available() const; - /// Returns the address that is the lower bound of the segment. - /// \post The returned pointer is guaranteed to be aligned to a segment - /// boundary. - char *lowLim() const { - return lowLim_; - } - /// Returns the address that is the upper bound of the segment. char *hiLim() const { return lowLim() + storageSize(); } - /// Returns the address at which the first allocation in this segment would - /// occur. - /// Disable UB sanitization because 'this' may be null during the tests. - inline char *start() const LLVM_NO_SANITIZE("undefined"); - /// Returns the first address after the region in which allocations can occur, /// taking external memory credits into a account (they decrease the effective /// end). @@ -303,28 +453,16 @@ class AlignedHeapSegment { /// ignoring external memory credits. inline char *end() const; - /// Returns the address at which the next allocation, if any, will occur. - inline char *level() const; - /// Returns whether \p a and \p b are contained in the same - /// AlignedHeapSegment. + /// FixedSizeHeapSegment. inline static bool containedInSame(const void *a, const void *b); - /// Return a reference to the card table covering the memory region managed by - /// this segment. - /// Disable sanitization because 'this' may be null in the tests. - inline CardTable &cardTable() const LLVM_NO_SANITIZE("null"); - - /// Return a reference to the mark bit array covering the memory region - /// managed by this segment. - inline Contents::MarkBitArray &markBitArray() const; - explicit operator bool() const { return lowLim(); } /// \return \c true if and only if \p ptr is within the memory range owned by - /// this \c AlignedHeapSegment. + /// this \c FixedSizeHeapSegment. bool contains(const void *ptr) const { return storageStart(ptr) == lowLim(); } @@ -358,42 +496,15 @@ class AlignedHeapSegment { /// and not at dead memory. bool validPointer(const void *p) const; - /// Set the contents of the segment to a dead value. - void clear(); - /// Set the given range [start, end) to a dead value. - static void clear(char *start, char *end); /// Checks that dead values are present in the [start, end) range. static void checkUnwritten(char *start, char *end); #endif - protected: - /// Return a pointer to the contents of the memory region managed by this - /// segment. - inline Contents *contents() const; - - /// The start of the aligned segment. - char *lowLim_{nullptr}; - - /// The provider that created this segment. It will be used to properly - /// destroy this. - StorageProvider *provider_{nullptr}; - - char *level_{start()}; - - /// The upper limit of the space that we can currently allocated into; - /// this may be decreased when externally allocated memory is credited to - /// the generation owning this space. - char *effectiveEnd_{end()}; - - /// Used in move constructor and move assignment operator following the copy - /// and swap idiom. - friend void swap(AlignedHeapSegment &a, AlignedHeapSegment &b); - private: - AlignedHeapSegment(StorageProvider *provider, void *lowLim); + FixedSizeHeapSegment(StorageProvider *provider, void *lowLim); }; -AllocResult AlignedHeapSegment::alloc(uint32_t size) { +AllocResult FixedSizeHeapSegment::alloc(uint32_t size) { assert(lowLim() != nullptr && "Cannot allocate in a null segment"); assert(isSizeHeapAligned(size) && "size must be heap aligned"); @@ -428,111 +539,41 @@ AllocResult AlignedHeapSegment::alloc(uint32_t size) { return {cell, true}; } -/*static*/ -AlignedHeapSegment::Contents::MarkBitArray * -AlignedHeapSegment::markBitArrayCovering(const void *ptr) { - return &contents(storageStart(ptr))->markBitArray_; +/* static */ CardTable *FixedSizeHeapSegment::cardTableCovering( + const void *ptr) { + return &FixedSizeHeapSegment::contents(storageStart(ptr))->cardTable_; } -/*static*/ -void AlignedHeapSegment::setCellMarkBit(const GCCell *cell) { - auto *markBits = markBitArrayCovering(cell); - size_t ind = addressToMarkBitArrayIndex(cell); - markBits->set(ind, true); -} - -/*static*/ -bool AlignedHeapSegment::getCellMarkBit(const GCCell *cell) { - auto *markBits = markBitArrayCovering(cell); - size_t ind = addressToMarkBitArrayIndex(cell); - return markBits->at(ind); -} - -GCCell *AlignedHeapSegment::getFirstCellHead(size_t cardIdx) { - CardTable &cards = cardTable(); - GCCell *cell = cards.firstObjForCard(cardIdx); - assert(cell->isValid() && "Object head doesn't point to a valid object"); - return cell; -} - -/* static */ -void AlignedHeapSegment::setCellHead(const GCCell *cellStart, const size_t sz) { - const char *start = reinterpret_cast(cellStart); - const char *end = start + sz; - CardTable *cards = cardTableCovering(start); - auto boundary = cards->nextBoundary(start); - // If this object crosses a card boundary, then update boundaries - // appropriately. - if (boundary.address() < end) { - cards->updateBoundaries(&boundary, start, end); - } -} - -/* static */ AlignedHeapSegment::Contents *AlignedHeapSegment::contents( - void *lowLim) { - return reinterpret_cast(lowLim); -} - -/* static */ const AlignedHeapSegment::Contents *AlignedHeapSegment::contents( - const void *lowLim) { - return reinterpret_cast(lowLim); -} - -/* static */ CardTable *AlignedHeapSegment::cardTableCovering(const void *ptr) { - return &AlignedHeapSegment::contents(storageStart(ptr))->cardTable_; -} - -/* static */ constexpr size_t AlignedHeapSegment::maxSize() { +/* static */ constexpr size_t FixedSizeHeapSegment::maxSize() { return storageSize() - offsetof(Contents, allocRegion_); } -size_t AlignedHeapSegment::size() const { +size_t FixedSizeHeapSegment::size() const { return end() - start(); } -size_t AlignedHeapSegment::used() const { +size_t FixedSizeHeapSegment::used() const { return level() - start(); } -size_t AlignedHeapSegment::available() const { +size_t FixedSizeHeapSegment::available() const { return effectiveEnd() - level(); } -char *AlignedHeapSegment::start() const { - return contents()->allocRegion_; -} - -char *AlignedHeapSegment::effectiveEnd() const { +char *FixedSizeHeapSegment::effectiveEnd() const { return effectiveEnd_; } -char *AlignedHeapSegment::end() const { +char *FixedSizeHeapSegment::end() const { return start() + maxSize(); } -char *AlignedHeapSegment::level() const { - return level_; -} - /* static */ -bool AlignedHeapSegment::containedInSame(const void *a, const void *b) { +bool FixedSizeHeapSegment::containedInSame(const void *a, const void *b) { return (reinterpret_cast(a) ^ reinterpret_cast(b)) < storageSize(); } -CardTable &AlignedHeapSegment::cardTable() const { - return contents()->cardTable_; -} - -AlignedHeapSegment::Contents::MarkBitArray &AlignedHeapSegment::markBitArray() - const { - return contents()->markBitArray_; -} - -AlignedHeapSegment::Contents *AlignedHeapSegment::contents() const { - return contents(lowLim()); -} - } // namespace vm } // namespace hermes diff --git a/include/hermes/VM/CardTableNC.h b/include/hermes/VM/CardTableNC.h index 5bfa40f2102..2d53791ef91 100644 --- a/include/hermes/VM/CardTableNC.h +++ b/include/hermes/VM/CardTableNC.h @@ -23,7 +23,7 @@ namespace vm { /// The card table optimizes young gen collections by restricting the amount of /// heap belonging to the old gen that must be scanned. The card table expects -/// to be constructed inside an AlignedHeapSegment's storage, at some position +/// to be constructed inside an FixedSizeHeapSegment's storage, at some position /// before the allocation region, and covers the extent of that storage's /// memory. /// diff --git a/include/hermes/VM/GCBase.h b/include/hermes/VM/GCBase.h index c1114809745..a869d08c4a4 100644 --- a/include/hermes/VM/GCBase.h +++ b/include/hermes/VM/GCBase.h @@ -226,7 +226,7 @@ enum XorPtrKeyID { /// Return the maximum amount of bytes holdable by this heap. /// gcheapsize_t max() const; /// Return the total amount of bytes of storage this GC will require. -/// This will be a multiple of AlignedHeapSegment::storageSize(). +/// This will be a multiple of FixedSizeHeapSegment::storageSize(). /// gcheapsize_t storageFootprint() const; /// class GCBase { diff --git a/include/hermes/VM/HadesGC.h b/include/hermes/VM/HadesGC.h index 1ff0d7219c8..a904e875b77 100644 --- a/include/hermes/VM/HadesGC.h +++ b/include/hermes/VM/HadesGC.h @@ -76,7 +76,7 @@ class HadesGC final : public GCBase { static constexpr uint32_t maxAllocationSizeImpl() { // The largest allocation allowable in Hades is the max size a single // segment supports. - return AlignedHeapSegment::maxSize(); + return FixedSizeHeapSegment::maxSize(); } static constexpr uint32_t minAllocationSizeImpl() { @@ -297,7 +297,7 @@ class HadesGC final : public GCBase { /// \return true if the pointer lives in the young generation. bool inYoungGen(const void *p) const override { - return youngGen_.lowLim() == AlignedHeapSegment::storageStart(p); + return youngGen_.lowLim() == FixedSizeHeapSegment::storageStart(p); } bool inYoungGen(CompressedPointer p) const { return p.getSegmentStart() == youngGenCP_; @@ -361,12 +361,12 @@ class HadesGC final : public GCBase { /// Call \p callback on every non-freelist cell allocated in this segment. template static void forAllObjsInSegment( - AlignedHeapSegment &seg, + FixedSizeHeapSegment &seg, CallbackFunction callback); /// Only call the callback on cells without forwarding pointers. template static void forCompactedObjsInSegment( - AlignedHeapSegment &seg, + FixedSizeHeapSegment &seg, CallbackFunction callback, PointerBase &base); @@ -374,21 +374,21 @@ class HadesGC final : public GCBase { public: explicit OldGen(HadesGC &gc); - std::deque::iterator begin(); - std::deque::iterator end(); - std::deque::const_iterator begin() const; - std::deque::const_iterator end() const; + std::deque::iterator begin(); + std::deque::iterator end(); + std::deque::const_iterator begin() const; + std::deque::const_iterator end() const; size_t numSegments() const; - AlignedHeapSegment &operator[](size_t i); + FixedSizeHeapSegment &operator[](size_t i); /// Take ownership of the given segment. - void addSegment(AlignedHeapSegment seg); + void addSegment(FixedSizeHeapSegment seg); /// Remove the last segment from the OG. /// \return the segment that was removed. - AlignedHeapSegment popSegment(); + FixedSizeHeapSegment popSegment(); /// Indicate that OG should target having a size of \p targetSizeBytes. void setTargetSizeBytes(size_t targetSizeBytes); @@ -507,7 +507,7 @@ class HadesGC final : public GCBase { static constexpr size_t kMinSizeForLargeBlock = 1 << kLogMinSizeForLargeBlock; static constexpr size_t kNumLargeFreelistBuckets = - llvh::detail::ConstantLog2::value - + llvh::detail::ConstantLog2::value - kLogMinSizeForLargeBlock + 1; static constexpr size_t kNumFreelistBuckets = kNumSmallFreelistBuckets + kNumLargeFreelistBuckets; @@ -578,7 +578,7 @@ class HadesGC final : public GCBase { /// Use a std::deque instead of a std::vector so that references into it /// remain valid across a push_back. - std::deque segments_; + std::deque segments_; /// See \c targetSizeBytes() above. ExponentialMovingAverage targetSizeBytes_{0, 0}; @@ -660,9 +660,9 @@ class HadesGC final : public GCBase { /// Keeps the storage provider alive until after the GC is fully destructed. std::shared_ptr provider_; - /// youngGen is a bump-pointer space, so it can re-use AlignedHeapSegment. + /// youngGen is a bump-pointer space, so it can re-use FixedSizeHeapSegment. /// Protected by gcMutex_. - AlignedHeapSegment youngGen_; + FixedSizeHeapSegment youngGen_; AssignableCompressedPointer youngGenCP_; /// List of cells in YG that have finalizers. Iterate through this to clean @@ -672,7 +672,7 @@ class HadesGC final : public GCBase { /// Since YG collection times are the primary driver of pause times, it is /// useful to have a knob to reduce the effective size of the YG. This number - /// is the fraction of AlignedHeapSegment::maxSize() that we should use for + /// is the fraction of FixedSizeHeapSegment::maxSize() that we should use for /// the YG.. Note that we only set the YG size using this at the end of the /// first real YG, since doing it for direct promotions would waste OG memory /// without a pause time benefit. @@ -772,7 +772,7 @@ class HadesGC final : public GCBase { /// \return true if the pointer lives in the segment that is being marked or /// evacuated for compaction. bool contains(const void *p) const { - return start == AlignedHeapSegment::storageStart(p); + return start == FixedSizeHeapSegment::storageStart(p); } bool contains(CompressedPointer p) const { return p.getSegmentStart() == startCP; @@ -781,7 +781,7 @@ class HadesGC final : public GCBase { /// \return true if the pointer lives in the segment that is currently being /// evacuated for compaction. bool evacContains(const void *p) const { - return evacStart == AlignedHeapSegment::storageStart(p); + return evacStart == FixedSizeHeapSegment::storageStart(p); } bool evacContains(CompressedPointer p) const { return p.getSegmentStart() == evacStartCP; @@ -829,7 +829,7 @@ class HadesGC final : public GCBase { /// The segment being compacted. This should be removed from the OG right /// after it is identified, and freed entirely once the compaction is /// complete. - std::shared_ptr segment; + std::shared_ptr segment; } compactee_; /// The number of compactions this GC has performed. @@ -964,7 +964,7 @@ class HadesGC final : public GCBase { template void scanDirtyCardsForSegment( EvacAcceptor &acceptor, - AlignedHeapSegment &segment); + FixedSizeHeapSegment &segment); /// Find all pointers from OG into the YG/compactee during a YG collection. /// This is done quickly through use of write barriers that detect the @@ -1011,19 +1011,19 @@ class HadesGC final : public GCBase { uint64_t heapFootprint() const; /// Accessor for the YG. - AlignedHeapSegment &youngGen() { + FixedSizeHeapSegment &youngGen() { return youngGen_; } - const AlignedHeapSegment &youngGen() const { + const FixedSizeHeapSegment &youngGen() const { return youngGen_; } /// Create a new segment (to be used by either YG or OG). - llvh::ErrorOr createSegment(); + llvh::ErrorOr createSegment(); /// Set a given segment as the YG segment. /// \return the previous YG segment. - AlignedHeapSegment setYoungGen(AlignedHeapSegment seg); + FixedSizeHeapSegment setYoungGen(FixedSizeHeapSegment seg); /// Get/set the current number of external bytes used by the YG. size_t getYoungGenExternalBytes() const; @@ -1048,7 +1048,7 @@ class HadesGC final : public GCBase { /// \param extraName append this to the name of the segment. Must be /// non-empty. void addSegmentExtentToCrashManager( - const AlignedHeapSegment &seg, + const FixedSizeHeapSegment &seg, const std::string &extraName); /// Deletes a segment from the CrashManager's custom data. diff --git a/include/hermes/VM/HeapRuntime.h b/include/hermes/VM/HeapRuntime.h index c87aed40d76..1dd54148a6f 100644 --- a/include/hermes/VM/HeapRuntime.h +++ b/include/hermes/VM/HeapRuntime.h @@ -40,7 +40,8 @@ class HeapRuntime { if (!ptrOrError) hermes_fatal("Cannot initialize Runtime storage.", ptrOrError.getError()); static_assert( - sizeof(RT) < AlignedHeapSegment::storageSize(), "Segments too small."); + sizeof(RT) < FixedSizeHeapSegment::storageSize(), + "Segments too small."); runtime_ = static_cast(*ptrOrError); } diff --git a/include/hermes/VM/StorageProvider.h b/include/hermes/VM/StorageProvider.h index 41d87f82ac5..232fb5e4138 100644 --- a/include/hermes/VM/StorageProvider.h +++ b/include/hermes/VM/StorageProvider.h @@ -43,13 +43,14 @@ class StorageProvider { } /// Create a new segment memory space and give this memory the name \p name. /// \return A pointer to a block of memory that has - /// AlignedHeapSegment::storageSize() bytes, and is aligned on - /// AlignedHeapSegment::storageSize(). + /// FixedSizeHeapSegment::storageSize() bytes, and is aligned on + /// FixedSizeHeapSegment::storageSize(). llvh::ErrorOr newStorage(const char *name); /// Delete the given segment's memory space, and make it available for re-use. /// \post Nothing in the range [storage, storage + - /// AlignedHeapSegment::storageSize()) is valid memory to be read or written. + /// FixedSizeHeapSegment::storageSize()) is valid memory to be read or + /// written. void deleteStorage(void *storage); /// The number of storages this provider has allocated in its lifetime. diff --git a/lib/VM/LimitedStorageProvider.cpp b/lib/VM/LimitedStorageProvider.cpp index 90e3e6138b5..12b05c87498 100644 --- a/lib/VM/LimitedStorageProvider.cpp +++ b/lib/VM/LimitedStorageProvider.cpp @@ -14,10 +14,10 @@ namespace hermes { namespace vm { llvh::ErrorOr LimitedStorageProvider::newStorageImpl(const char *name) { - if (limit_ < AlignedHeapSegment::storageSize()) { + if (limit_ < FixedSizeHeapSegment::storageSize()) { return make_error_code(OOMError::TestVMLimitReached); } - limit_ -= AlignedHeapSegment::storageSize(); + limit_ -= FixedSizeHeapSegment::storageSize(); return delegate_->newStorage(name); } @@ -26,7 +26,7 @@ void LimitedStorageProvider::deleteStorageImpl(void *storage) { return; } delegate_->deleteStorage(storage); - limit_ += AlignedHeapSegment::storageSize(); + limit_ += FixedSizeHeapSegment::storageSize(); } } // namespace vm diff --git a/lib/VM/Runtime.cpp b/lib/VM/Runtime.cpp index 9307b5cfce0..21f08860e93 100644 --- a/lib/VM/Runtime.cpp +++ b/lib/VM/Runtime.cpp @@ -159,7 +159,7 @@ std::shared_ptr Runtime::create(const RuntimeConfig &runtimeConfig) { uint64_t maxHeapSize = runtimeConfig.getGCConfig().getMaxHeapSize(); // Allow some extra segments for the runtime, and as a buffer for the GC. uint64_t providerSize = std::min( - 1ULL << 32, maxHeapSize + AlignedHeapSegment::storageSize() * 4); + 1ULL << 32, maxHeapSize + FixedSizeHeapSegment::storageSize() * 4); std::shared_ptr sp = StorageProvider::contiguousVAProvider(providerSize); auto rt = HeapRuntime::create(sp); @@ -252,7 +252,12 @@ void RuntimeBase::registerHeapSegment(unsigned idx, void *lowLim) { reinterpret_cast(lowLim) - (idx << AlignedHeapSegment::kLogSize); segmentMap[idx] = bias; #endif - assert(lowLim == AlignedHeapSegment::storageStart(lowLim) && "Precondition"); + // Ideally we need to assert that lowLim is the start address of the segment, + // but the approach for computing segment start address does not work for + // JumboHeapSegment. + assert( + (uintptr_t)(lowLim) % AlignedHeapSegment::kSegmentUnitSize == 0 && + "Segment start address should be aligned to kSegmentUnitSize"); AlignedHeapSegment::setSegmentIndexFromStart(lowLim, idx); } diff --git a/lib/VM/StorageProvider.cpp b/lib/VM/StorageProvider.cpp index 67fed1eb8d3..bd0a33d61ce 100644 --- a/lib/VM/StorageProvider.cpp +++ b/lib/VM/StorageProvider.cpp @@ -57,12 +57,12 @@ namespace { bool isAligned(void *p) { return (reinterpret_cast(p) & - (AlignedHeapSegment::storageSize() - 1)) == 0; + (FixedSizeHeapSegment::storageSize() - 1)) == 0; } char *alignAlloc(void *p) { return reinterpret_cast(llvh::alignTo( - reinterpret_cast(p), AlignedHeapSegment::storageSize())); + reinterpret_cast(p), FixedSizeHeapSegment::storageSize())); } void *getMmapHint() { @@ -85,9 +85,9 @@ class VMAllocateStorageProvider final : public StorageProvider { class ContiguousVAStorageProvider final : public StorageProvider { public: ContiguousVAStorageProvider(size_t size) - : size_(llvh::alignTo(size)) { + : size_(llvh::alignTo(size)) { auto result = oscompat::vm_reserve_aligned( - size_, AlignedHeapSegment::storageSize(), getMmapHint()); + size_, FixedSizeHeapSegment::storageSize(), getMmapHint()); if (!result) hermes_fatal("Contiguous storage allocation failed.", result.getError()); level_ = start_ = static_cast(*result); @@ -104,13 +104,14 @@ class ContiguousVAStorageProvider final : public StorageProvider { freelist_.pop_back(); } else if (level_ < start_ + size_) { storage = - std::exchange(level_, level_ + AlignedHeapSegment::storageSize()); + std::exchange(level_, level_ + FixedSizeHeapSegment::storageSize()); } else { return make_error_code(OOMError::MaxStorageReached); } - auto res = oscompat::vm_commit(storage, AlignedHeapSegment::storageSize()); + auto res = + oscompat::vm_commit(storage, FixedSizeHeapSegment::storageSize()); if (res) { - oscompat::vm_name(storage, AlignedHeapSegment::storageSize(), name); + oscompat::vm_name(storage, FixedSizeHeapSegment::storageSize(), name); } return res; } @@ -118,12 +119,12 @@ class ContiguousVAStorageProvider final : public StorageProvider { void deleteStorageImpl(void *storage) override { assert( !llvh::alignmentAdjustment( - storage, AlignedHeapSegment::storageSize()) && + storage, FixedSizeHeapSegment::storageSize()) && "Storage not aligned"); assert(storage >= start_ && storage < level_ && "Storage not in region"); oscompat::vm_name( - storage, AlignedHeapSegment::storageSize(), kFreeRegionName); - oscompat::vm_uncommit(storage, AlignedHeapSegment::storageSize()); + storage, FixedSizeHeapSegment::storageSize(), kFreeRegionName); + oscompat::vm_uncommit(storage, FixedSizeHeapSegment::storageSize()); freelist_.push_back(storage); } @@ -149,11 +150,11 @@ class MallocStorageProvider final : public StorageProvider { llvh::ErrorOr VMAllocateStorageProvider::newStorageImpl( const char *name) { - assert(AlignedHeapSegment::storageSize() % oscompat::page_size() == 0); + assert(FixedSizeHeapSegment::storageSize() % oscompat::page_size() == 0); // Allocate the space, hoping it will be the correct alignment. auto result = oscompat::vm_allocate_aligned( - AlignedHeapSegment::storageSize(), - AlignedHeapSegment::storageSize(), + FixedSizeHeapSegment::storageSize(), + FixedSizeHeapSegment::storageSize(), getMmapHint()); if (!result) { return result; @@ -162,11 +163,11 @@ llvh::ErrorOr VMAllocateStorageProvider::newStorageImpl( assert(isAligned(mem)); (void)&isAligned; #ifdef HERMESVM_ALLOW_HUGE_PAGES - oscompat::vm_hugepage(mem, AlignedHeapSegment::storageSize()); + oscompat::vm_hugepage(mem, FixedSizeHeapSegment::storageSize()); #endif // Name the memory region on platforms that support naming. - oscompat::vm_name(mem, AlignedHeapSegment::storageSize(), name); + oscompat::vm_name(mem, FixedSizeHeapSegment::storageSize(), name); return mem; } @@ -174,13 +175,13 @@ void VMAllocateStorageProvider::deleteStorageImpl(void *storage) { if (!storage) { return; } - oscompat::vm_free_aligned(storage, AlignedHeapSegment::storageSize()); + oscompat::vm_free_aligned(storage, FixedSizeHeapSegment::storageSize()); } llvh::ErrorOr MallocStorageProvider::newStorageImpl(const char *name) { // name is unused, can't name malloc memory. (void)name; - void *mem = checkedMalloc2(AlignedHeapSegment::storageSize(), 2u); + void *mem = checkedMalloc2(FixedSizeHeapSegment::storageSize(), 2u); void *lowLim = alignAlloc(mem); assert(isAligned(lowLim) && "New storage should be aligned"); lowLimToAllocHandle_[lowLim] = mem; diff --git a/lib/VM/gcs/AlignedHeapSegment.cpp b/lib/VM/gcs/AlignedHeapSegment.cpp index 1509168194d..d8f87e0fdfc 100644 --- a/lib/VM/gcs/AlignedHeapSegment.cpp +++ b/lib/VM/gcs/AlignedHeapSegment.cpp @@ -22,6 +22,17 @@ namespace hermes { namespace vm { +#ifndef NDEBUG +/// Set the given range [start, end) to a dead value. +static void clearRange(char *start, char *end) { +#if LLVM_ADDRESS_SANITIZER_BUILD + __asan_poison_memory_region(start, end - start); +#else + std::memset(start, kInvalidHeapValue, end - start); +#endif +} +#endif + void AlignedHeapSegment::Contents::protectGuardPage( oscompat::ProtectMode mode) { char *begin = &paddedGuardPage_[kGuardPagePadding]; @@ -33,38 +44,19 @@ void AlignedHeapSegment::Contents::protectGuardPage( } } -llvh::ErrorOr AlignedHeapSegment::create( - StorageProvider *provider) { - return create(provider, nullptr); -} - -llvh::ErrorOr AlignedHeapSegment::create( - StorageProvider *provider, - const char *name) { - auto result = provider->newStorage(name); - if (!result) { - return result.getError(); - } - return AlignedHeapSegment{provider, *result}; -} - AlignedHeapSegment::AlignedHeapSegment(StorageProvider *provider, void *lowLim) - : lowLim_(static_cast(lowLim)), provider_(provider) { - assert( - storageStart(lowLim_) == lowLim_ && - "The lower limit of this storage must be aligned"); + : provider_(provider), lowLim_(reinterpret_cast(lowLim)) { // Storage end must be page-aligned so that markUnused below stays in // segment. assert( reinterpret_cast(hiLim()) % oscompat::page_size() == 0 && "The higher limit must be page aligned"); - if (*this) { - new (contents()) Contents(); - contents()->protectGuardPage(oscompat::ProtectMode::None); + new (contents()) Contents(); + contents()->protectGuardPage(oscompat::ProtectMode::None); + #ifndef NDEBUG - clear(); + clear(); #endif - } } void swap(AlignedHeapSegment &a, AlignedHeapSegment &b) { @@ -73,7 +65,6 @@ void swap(AlignedHeapSegment &a, AlignedHeapSegment &b) { std::swap(a.lowLim_, b.lowLim_); std::swap(a.provider_, b.provider_); std::swap(a.level_, b.level_); - std::swap(a.effectiveEnd_, b.effectiveEnd_); } AlignedHeapSegment::AlignedHeapSegment(AlignedHeapSegment &&other) @@ -92,14 +83,30 @@ AlignedHeapSegment::~AlignedHeapSegment() { } contents()->protectGuardPage(oscompat::ProtectMode::ReadWrite); contents()->~Contents(); - __asan_unpoison_memory_region(start(), end() - start()); + __asan_unpoison_memory_region(start(), hiLim() - start()); if (provider_) { provider_->deleteStorage(lowLim_); } } -void AlignedHeapSegment::markUnused(char *start, char *end) { +llvh::ErrorOr FixedSizeHeapSegment::create( + StorageProvider *provider, + const char *name) { + auto result = provider->newStorage(name); + if (!result) { + return result.getError(); + } + assert(*result && "Heap segment storage allocation failure"); + return FixedSizeHeapSegment{provider, *result}; +} + +FixedSizeHeapSegment::FixedSizeHeapSegment( + StorageProvider *provider, + void *lowLim) + : AlignedHeapSegment(provider, lowLim) {} + +void FixedSizeHeapSegment::markUnused(char *start, char *end) { assert( !llvh::alignmentAdjustment(start, oscompat::page_size()) && !llvh::alignmentAdjustment(end, oscompat::page_size())); @@ -116,11 +123,11 @@ void AlignedHeapSegment::markUnused(char *start, char *end) { } template -void AlignedHeapSegment::setLevel(char *lvl) { +void FixedSizeHeapSegment::setLevel(char *lvl) { assert(dbgContainsLevel(lvl)); if (lvl < level_) { #ifndef NDEBUG - clear(lvl, level_); + clearRange(lvl, level_); #else if (MU == AdviseUnused::Yes) { const size_t PS = oscompat::page_size(); @@ -137,19 +144,19 @@ void AlignedHeapSegment::setLevel(char *lvl) { } /// Explicit template instantiations for setLevel -template void AlignedHeapSegment::setLevel(char *lvl); -template void AlignedHeapSegment::setLevel(char *lvl); +template void FixedSizeHeapSegment::setLevel(char *lvl); +template void FixedSizeHeapSegment::setLevel(char *lvl); template -void AlignedHeapSegment::resetLevel() { +void FixedSizeHeapSegment::resetLevel() { setLevel(start()); } /// Explicit template instantiations for resetLevel -template void AlignedHeapSegment::resetLevel(); -template void AlignedHeapSegment::resetLevel(); +template void FixedSizeHeapSegment::resetLevel(); +template void FixedSizeHeapSegment::resetLevel(); -void AlignedHeapSegment::setEffectiveEnd(char *effectiveEnd) { +void FixedSizeHeapSegment::setEffectiveEnd(char *effectiveEnd) { assert( start() <= effectiveEnd && effectiveEnd <= end() && "Must be valid end for segment."); @@ -157,33 +164,25 @@ void AlignedHeapSegment::setEffectiveEnd(char *effectiveEnd) { effectiveEnd_ = effectiveEnd; } -void AlignedHeapSegment::clearExternalMemoryCharge() { +void FixedSizeHeapSegment::clearExternalMemoryCharge() { setEffectiveEnd(end()); } #ifndef NDEBUG -bool AlignedHeapSegment::dbgContainsLevel(const void *lvl) const { +bool FixedSizeHeapSegment::dbgContainsLevel(const void *lvl) const { return contains(lvl) || lvl == hiLim(); } -bool AlignedHeapSegment::validPointer(const void *p) const { +bool FixedSizeHeapSegment::validPointer(const void *p) const { return start() <= p && p < level() && static_cast(p)->isValid(); } void AlignedHeapSegment::clear() { - clear(start(), end()); -} - -/* static */ void AlignedHeapSegment::clear(char *start, char *end) { -#if LLVM_ADDRESS_SANITIZER_BUILD - __asan_poison_memory_region(start, end - start); -#else - std::memset(start, kInvalidHeapValue, end - start); -#endif + clearRange(start(), hiLim()); } -/* static */ void AlignedHeapSegment::checkUnwritten(char *start, char *end) { +/* static */ void FixedSizeHeapSegment::checkUnwritten(char *start, char *end) { #if !LLVM_ADDRESS_SANITIZER_BUILD && defined(HERMES_SLOW_DEBUG) // Check that the space was not written into. std::for_each( diff --git a/lib/VM/gcs/CardTableNC.cpp b/lib/VM/gcs/CardTableNC.cpp index ec94d5e5710..c044ed21789 100644 --- a/lib/VM/gcs/CardTableNC.cpp +++ b/lib/VM/gcs/CardTableNC.cpp @@ -22,7 +22,7 @@ namespace vm { #ifndef NDEBUG /* static */ void *CardTable::storageEnd(const void *ptr) { - return AlignedHeapSegment::storageEnd(ptr); + return FixedSizeHeapSegment::storageEnd(ptr); } #endif diff --git a/lib/VM/gcs/HadesGC.cpp b/lib/VM/gcs/HadesGC.cpp index 9397b13856b..1378fa6e78c 100644 --- a/lib/VM/gcs/HadesGC.cpp +++ b/lib/VM/gcs/HadesGC.cpp @@ -36,7 +36,7 @@ static constexpr size_t kTargetMaxPauseMs = 50; // Assert that it is always safe to construct a cell that is as large as the // entire segment. This lets us always assume that contiguous regions in a // segment can be safely turned into a single FreelistCell. -static_assert(AlignedHeapSegment::maxSize() <= HadesGC::maxAllocationSize()); +static_assert(FixedSizeHeapSegment::maxSize() <= HadesGC::maxAllocationSize()); // A free list cell is always variable-sized. const VTable HadesGC::OldGen::FreelistCell::vt{ @@ -172,7 +172,7 @@ HadesGC::OldGen::FreelistCell *HadesGC::OldGen::removeCellFromFreelist( /* static */ template void HadesGC::forAllObjsInSegment( - hermes::vm::AlignedHeapSegment &seg, + FixedSizeHeapSegment &seg, CallbackFunction callback) { for (GCCell *cell = reinterpret_cast(seg.start()), *end = reinterpret_cast(seg.level()); @@ -188,7 +188,7 @@ void HadesGC::forAllObjsInSegment( /* static */ template void HadesGC::forCompactedObjsInSegment( - AlignedHeapSegment &seg, + FixedSizeHeapSegment &seg, CallbackFunction callback, PointerBase &base) { void *const stop = seg.level(); @@ -418,7 +418,7 @@ class HadesGC::EvacAcceptor final : public RootAndSlotAcceptor, if (CompactionEnabled && gc.compactee_.contains(ptr)) { // If a compaction is about to take place, dirty the card for any newly // evacuated cells, since the marker may miss them. - AlignedHeapSegment::cardTableCovering(heapLoc)->dirtyCardForAddress( + FixedSizeHeapSegment::cardTableCovering(heapLoc)->dirtyCardForAddress( heapLoc); } return ptr; @@ -436,7 +436,7 @@ class HadesGC::EvacAcceptor final : public RootAndSlotAcceptor, if (CompactionEnabled && gc.compactee_.contains(cptr)) { // If a compaction is about to take place, dirty the card for any newly // evacuated cells, since the marker may miss them. - AlignedHeapSegment::cardTableCovering(heapLoc)->dirtyCardForAddress( + FixedSizeHeapSegment::cardTableCovering(heapLoc)->dirtyCardForAddress( heapLoc); } return cptr; @@ -649,7 +649,7 @@ class HadesGC::MarkAcceptor final : public RootAndSlotAcceptor { if (gc.compactee_.contains(cell) && !gc.compactee_.contains(heapLoc)) { // This is a pointer in the heap pointing into the compactee, dirty the // corresponding card. - AlignedHeapSegment::cardTableCovering(heapLoc)->dirtyCardForAddress( + FixedSizeHeapSegment::cardTableCovering(heapLoc)->dirtyCardForAddress( heapLoc); } if (AlignedHeapSegment::getCellMarkBit(cell)) { @@ -1198,7 +1198,7 @@ size_t HadesGC::OldGen::sweepSegmentsRemaining() const { } size_t HadesGC::OldGen::getMemorySize() const { - size_t memorySize = segments_.size() * sizeof(AlignedHeapSegment); + size_t memorySize = segments_.size() * sizeof(FixedSizeHeapSegment); memorySize += segmentBuckets_.size() * sizeof(SegmentBuckets); return memorySize; } @@ -1224,7 +1224,7 @@ HadesGC::HadesGC( maxHeapSize_{std::max( gcConfig.getMaxHeapSize(), // At least one YG segment and one OG segment. - 2 * AlignedHeapSegment::storageSize())}, + 2 * FixedSizeHeapSegment::storageSize())}, provider_(std::move(provider)), oldGen_{*this}, backgroundExecutor_{ @@ -1235,22 +1235,22 @@ HadesGC::HadesGC( occupancyTarget_(gcConfig.getOccupancyTarget()), ygAverageSurvivalBytes_{ /*weight*/ 0.5, - /*init*/ kYGInitialSizeFactor * AlignedHeapSegment::maxSize() * + /*init*/ kYGInitialSizeFactor * FixedSizeHeapSegment::maxSize() * kYGInitialSurvivalRatio} { (void)vmExperimentFlags; std::lock_guard lk(gcMutex_); crashMgr_->setCustomData("HermesGC", getKindAsStr().c_str()); // createSegment relies on member variables and should not be called until // they are initialised. - llvh::ErrorOr newYoungGen = createSegment(); + llvh::ErrorOr newYoungGen = createSegment(); if (!newYoungGen) hermes_fatal("Failed to initialize the young gen", newYoungGen.getError()); setYoungGen(std::move(newYoungGen.get())); const size_t initHeapSize = std::max( {gcConfig.getMinHeapSize(), gcConfig.getInitHeapSize(), - AlignedHeapSegment::maxSize()}); - oldGen_.setTargetSizeBytes(initHeapSize - AlignedHeapSegment::maxSize()); + FixedSizeHeapSegment::maxSize()}); + oldGen_.setTargetSizeBytes(initHeapSize - FixedSizeHeapSegment::maxSize()); } HadesGC::~HadesGC() { @@ -1267,7 +1267,7 @@ void HadesGC::getHeapInfo(HeapInfo &info) { info.allocatedBytes = allocatedBytes(); // Heap size includes fragmentation, which means every segment is fully used. info.heapSize = - (oldGen_.numSegments() + 1) * AlignedHeapSegment::storageSize(); + (oldGen_.numSegments() + 1) * FixedSizeHeapSegment::storageSize(); // If YG isn't empty, its bytes haven't been accounted for yet, add them here. info.totalAllocatedBytes = totalAllocatedBytes_ + youngGen().used(); info.va = info.heapSize; @@ -1497,7 +1497,7 @@ void HadesGC::oldGenCollection(std::string cause, bool forceCompaction) { // First, clear any mark bits that were set by a previous collection or // direct-to-OG allocation, they aren't needed anymore. - for (AlignedHeapSegment &seg : oldGen_) + for (FixedSizeHeapSegment &seg : oldGen_) seg.markBitArray().reset(); // Unmark all symbols in the identifier table, as Symbol liveness will be @@ -1668,13 +1668,13 @@ void HadesGC::prepareCompactee(bool forceCompaction) { // from the heap, we only want to compact if there are at least 2 segments in // the OG. uint64_t buffer = std::max( - oldGen_.targetSizeBytes() / 20, AlignedHeapSegment::maxSize()); + oldGen_.targetSizeBytes() / 20, FixedSizeHeapSegment::maxSize()); uint64_t threshold = oldGen_.targetSizeBytes() + buffer; uint64_t totalBytes = oldGen_.size() + oldGen_.externalBytes(); if ((forceCompaction || totalBytes > threshold) && oldGen_.numSegments() > 1) { compactee_.segment = - std::make_shared(oldGen_.popSegment()); + std::make_shared(oldGen_.popSegment()); addSegmentExtentToCrashManager( *compactee_.segment, kCompacteeNameForCrashMgr); compactee_.start = compactee_.segment->lowLim(); @@ -1871,7 +1871,7 @@ void HadesGC::finalizeAll() { forCompactedObjsInSegment( *compactee_.segment, finalizeCallback, getPointerBase()); - for (AlignedHeapSegment &seg : oldGen_) + for (FixedSizeHeapSegment &seg : oldGen_) forAllObjsInSegment(seg, finalizeCallback); } @@ -1962,7 +1962,7 @@ void HadesGC::constructorWriteBarrierRangeSlow( const GCHermesValue *start, uint32_t numHVs) { assert( - AlignedHeapSegment::containedInSame(start, start + numHVs) && + FixedSizeHeapSegment::containedInSame(start, start + numHVs) && "Range must start and end within a heap segment."); // Most constructors should be running in the YG, so in the common case, we @@ -1970,7 +1970,7 @@ void HadesGC::constructorWriteBarrierRangeSlow( // then just dirty all the cards corresponding to it, and we can scan them for // pointers later. This is less precise but makes the write barrier faster. - AlignedHeapSegment::cardTableCovering(start)->dirtyCardsForAddressRange( + FixedSizeHeapSegment::cardTableCovering(start)->dirtyCardsForAddressRange( start, start + numHVs); } @@ -1978,9 +1978,9 @@ void HadesGC::constructorWriteBarrierRangeSlow( const GCSmallHermesValue *start, uint32_t numHVs) { assert( - AlignedHeapSegment::containedInSame(start, start + numHVs) && + FixedSizeHeapSegment::containedInSame(start, start + numHVs) && "Range must start and end within a heap segment."); - AlignedHeapSegment::cardTableCovering(start)->dirtyCardsForAddressRange( + FixedSizeHeapSegment::cardTableCovering(start)->dirtyCardsForAddressRange( start, start + numHVs); } @@ -2055,7 +2055,7 @@ void HadesGC::relocationWriteBarrier(const void *loc, const void *value) { // Do not dirty cards for compactee->compactee, yg->yg, or yg->compactee // pointers. But do dirty cards for compactee->yg pointers, since compaction // may not happen in the next YG. - if (AlignedHeapSegment::containedInSame(loc, value)) { + if (FixedSizeHeapSegment::containedInSame(loc, value)) { return; } if (inYoungGen(value) || compactee_.contains(value)) { @@ -2064,7 +2064,7 @@ void HadesGC::relocationWriteBarrier(const void *loc, const void *value) { // allocation. // Note that this *only* applies since the boundaries are updated separately // from the card table being marked itself. - AlignedHeapSegment::cardTableCovering(loc)->dirtyCardForAddress(loc); + FixedSizeHeapSegment::cardTableCovering(loc)->dirtyCardForAddress(loc); } } @@ -2105,7 +2105,7 @@ void HadesGC::forAllObjs(const std::function &callback) { callback(cell); } }; - for (AlignedHeapSegment &seg : oldGen_) { + for (FixedSizeHeapSegment &seg : oldGen_) { if (concurrentPhase_ != Phase::Sweep) forAllObjsInSegment(seg, callback); else @@ -2223,7 +2223,7 @@ GCCell *HadesGC::OldGen::alloc(uint32_t sz) { // Before waiting for a collection to finish, check if we're below the max // heap size and can simply allocate another segment. This will prevent // blocking the YG unnecessarily. - llvh::ErrorOr seg = gc_.createSegment(); + llvh::ErrorOr seg = gc_.createSegment(); if (seg) { // Complete this allocation using a bump alloc. AllocResult res = seg->alloc(sz); @@ -2524,7 +2524,7 @@ void HadesGC::youngGenCollection( // 3. The duration of this collection may not have met our pause time goals. youngGen().setEffectiveEnd( youngGen().start() + - static_cast(ygSizeFactor_ * AlignedHeapSegment::maxSize())); + static_cast(ygSizeFactor_ * FixedSizeHeapSegment::maxSize())); // We have to set these after the collection, in case a compaction took // place and updated these metrics. @@ -2605,7 +2605,7 @@ bool HadesGC::promoteYoungGenToOldGen() { // TODO: Add more stringent criteria for turning off this flag, for instance, // once the heap reaches a certain size. That would avoid growing the heap to // the maximum possible size before stopping promotions. - llvh::ErrorOr newYoungGen = createSegment(); + llvh::ErrorOr newYoungGen = createSegment(); if (!newYoungGen) { promoteYGToOG_ = false; return false; @@ -2631,7 +2631,7 @@ bool HadesGC::promoteYoungGenToOldGen() { return true; } -AlignedHeapSegment HadesGC::setYoungGen(AlignedHeapSegment seg) { +FixedSizeHeapSegment HadesGC::setYoungGen(FixedSizeHeapSegment seg) { addSegmentExtentToCrashManager(seg, "YG"); youngGenFinalizables_.clear(); std::swap(youngGen_, seg); @@ -2685,7 +2685,7 @@ void HadesGC::updateYoungGenSizeFactor() { template void HadesGC::scanDirtyCardsForSegment( EvacAcceptor &acceptor, - AlignedHeapSegment &seg) { + FixedSizeHeapSegment &seg) { const auto &cardTable = seg.cardTable(); // Use level instead of end in case the OG segment is still in bump alloc // mode. @@ -2726,6 +2726,8 @@ void HadesGC::scanDirtyCardsForSegment( // Use the object heads rather than the card table to discover the head // of the object. GCCell *const firstObj = seg.getFirstCellHead(iBegin); + assert( + firstObj->isValid() && "Object head doesn't point to a valid object"); GCCell *obj = firstObj; // Throughout this loop, objects are being marked which could promote // other objects into the OG. Such objects might be promoted onto a dirty @@ -2781,7 +2783,7 @@ void HadesGC::scanDirtyCards(EvacAcceptor &acceptor) { for (size_t i = 0; i < segEnd; ++i) { // It is safe to hold this reference across a push_back into // oldGen_.segments_ since references into a deque are not invalidated. - AlignedHeapSegment &seg = oldGen_[i]; + FixedSizeHeapSegment &seg = oldGen_[i]; scanDirtyCardsForSegment(acceptor, seg); // Do not clear the card table if the OG thread is currently marking to // prepare for a compaction. Note that we should clear the card tables if @@ -2817,7 +2819,7 @@ uint64_t HadesGC::externalBytes() const { uint64_t HadesGC::segmentFootprint() const { size_t totalSegments = oldGen_.numSegments() + (youngGen_ ? 1 : 0) + (compactee_.segment ? 1 : 0); - return totalSegments * AlignedHeapSegment::storageSize(); + return totalSegments * FixedSizeHeapSegment::storageSize(); } uint64_t HadesGC::heapFootprint() const { @@ -2840,7 +2842,7 @@ uint64_t HadesGC::OldGen::externalBytes() const { uint64_t HadesGC::OldGen::size() const { size_t totalSegments = numSegments() + (gc_.compactee_.segment ? 1 : 0); - return totalSegments * AlignedHeapSegment::maxSize(); + return totalSegments * FixedSizeHeapSegment::maxSize(); } uint64_t HadesGC::OldGen::targetSizeBytes() const { @@ -2871,7 +2873,7 @@ llvh::ErrorOr HadesGC::getVMFootprintForTest() const { return ygFootprint; // Add each OG segment. - for (const AlignedHeapSegment &seg : oldGen_) { + for (const FixedSizeHeapSegment &seg : oldGen_) { auto segFootprint = hermes::oscompat::vm_footprint(seg.start(), seg.hiLim()); if (!segFootprint) @@ -2881,19 +2883,20 @@ llvh::ErrorOr HadesGC::getVMFootprintForTest() const { return footprint; } -std::deque::iterator HadesGC::OldGen::begin() { +std::deque::iterator HadesGC::OldGen::begin() { return segments_.begin(); } -std::deque::iterator HadesGC::OldGen::end() { +std::deque::iterator HadesGC::OldGen::end() { return segments_.end(); } -std::deque::const_iterator HadesGC::OldGen::begin() const { +std::deque::const_iterator HadesGC::OldGen::begin() + const { return segments_.begin(); } -std::deque::const_iterator HadesGC::OldGen::end() const { +std::deque::const_iterator HadesGC::OldGen::end() const { return segments_.end(); } @@ -2901,21 +2904,20 @@ size_t HadesGC::OldGen::numSegments() const { return segments_.size(); } -AlignedHeapSegment &HadesGC::OldGen::operator[](size_t i) { +FixedSizeHeapSegment &HadesGC::OldGen::operator[](size_t i) { return segments_[i]; } -llvh::ErrorOr HadesGC::createSegment() { +llvh::ErrorOr HadesGC::createSegment() { // No heap size limit when Handle-SAN is on, to allow the heap enough room to // keep moving things around. if (!sanitizeRate_ && heapFootprint() >= maxHeapSize_) return make_error_code(OOMError::MaxHeapReached); - - auto res = AlignedHeapSegment::create(provider_.get(), "hades-segment"); + auto res = FixedSizeHeapSegment::create(provider_.get(), "hades-segment"); if (!res) { return res.getError(); } - AlignedHeapSegment seg(std::move(res.get())); + FixedSizeHeapSegment seg(std::move(res.get())); // Even if compressed pointers are off, we still use the segment index for // crash manager indices. size_t segIdx; @@ -2928,12 +2930,12 @@ llvh::ErrorOr HadesGC::createSegment() { gcCallbacks_.registerHeapSegment(segIdx, seg.lowLim()); addSegmentExtentToCrashManager(seg, std::to_string(segIdx)); seg.markBitArray().set(); - return llvh::ErrorOr(std::move(seg)); + return llvh::ErrorOr(std::move(seg)); } -void HadesGC::OldGen::addSegment(AlignedHeapSegment seg) { +void HadesGC::OldGen::addSegment(FixedSizeHeapSegment seg) { segments_.emplace_back(std::move(seg)); - AlignedHeapSegment &newSeg = segments_.back(); + FixedSizeHeapSegment &newSeg = segments_.back(); incrementAllocatedBytes(newSeg.used()); // Add a set of freelist buckets for this segment. segmentBuckets_.emplace_back(); @@ -2954,7 +2956,7 @@ void HadesGC::OldGen::addSegment(AlignedHeapSegment seg) { gc_.addSegmentExtentToCrashManager(newSeg, std::to_string(numSegments())); } -AlignedHeapSegment HadesGC::OldGen::popSegment() { +FixedSizeHeapSegment HadesGC::OldGen::popSegment() { const auto &segBuckets = segmentBuckets_.back(); for (size_t bucket = 0; bucket < kNumFreelistBuckets; ++bucket) { if (segBuckets[bucket].head) { @@ -2976,13 +2978,13 @@ void HadesGC::OldGen::setTargetSizeBytes(size_t targetSizeBytes) { } bool HadesGC::inOldGen(const void *p) const { - // If it isn't in any OG segment or the compactee, then this pointer is not in - // the OG. + // If it isn't in any OG segment or the compactee, then this pointer is not + // in the OG. return compactee_.contains(p) || std::any_of( oldGen_.begin(), oldGen_.end(), - [p](const AlignedHeapSegment &seg) { return seg.contains(p); }); + [p](const FixedSizeHeapSegment &seg) { return seg.contains(p); }); } void HadesGC::yieldToOldGen() { @@ -3020,12 +3022,13 @@ size_t HadesGC::getDrainRate() { assert(!kConcurrentGC); // Set a fixed floor on the mark rate, regardless of the pause time budget. - // yieldToOldGen may operate in multiples of this drain rate if it fits in the - // budget. Pinning the mark rate in this way helps us keep the dynamically - // computed OG collection threshold in a reasonable range. On a slow device, - // where we can only do one iteration of this drain rate, the OG threshold - // will be ~75%. And by not increasing the drain rate when the threshold is - // high, we avoid having a one-way ratchet effect that hurts pause times. + // yieldToOldGen may operate in multiples of this drain rate if it fits in + // the budget. Pinning the mark rate in this way helps us keep the + // dynamically computed OG collection threshold in a reasonable range. On a + // slow device, where we can only do one iteration of this drain rate, the + // OG threshold will be ~75%. And by not increasing the drain rate when the + // threshold is high, we avoid having a one-way ratchet effect that hurts + // pause times. constexpr size_t baseMarkRate = 3; uint64_t drainRate = baseMarkRate * ygAverageSurvivalBytes_; // In case the allocation rate is extremely low, set a lower bound to ensure @@ -3035,7 +3038,7 @@ size_t HadesGC::getDrainRate() { } void HadesGC::addSegmentExtentToCrashManager( - const AlignedHeapSegment &seg, + const FixedSizeHeapSegment &seg, const std::string &extraName) { assert(!extraName.empty() && "extraName can't be empty"); if (!crashMgr_) { @@ -3104,7 +3107,7 @@ void HadesGC::verifyCardTable() { gc.compactee_.evacContains(valuePtr); if (!gc.inYoungGen(locPtr) && (gc.inYoungGen(valuePtr) || crossRegionCompacteePtr)) { - assert(AlignedHeapSegment::cardTableCovering(locPtr) + assert(FixedSizeHeapSegment::cardTableCovering(locPtr) ->isCardForAddressDirty(locPtr)); } } @@ -3128,7 +3131,7 @@ void HadesGC::verifyCardTable() { VerifyCardDirtyAcceptor acceptor{*this}; forAllObjs([this, &acceptor](GCCell *cell) { markCell(acceptor, cell); }); - for (const AlignedHeapSegment &seg : oldGen_) { + for (const FixedSizeHeapSegment &seg : oldGen_) { seg.cardTable().verifyBoundaries(seg.start(), seg.level()); } } diff --git a/unittests/VMRuntime/AlignedHeapSegmentTest.cpp b/unittests/VMRuntime/AlignedHeapSegmentTest.cpp index 6362b80d6f1..47b54bbc059 100644 --- a/unittests/VMRuntime/AlignedHeapSegmentTest.cpp +++ b/unittests/VMRuntime/AlignedHeapSegmentTest.cpp @@ -37,18 +37,18 @@ static char *alignPointer(char *p, size_t align) { struct AlignedHeapSegmentTest : public ::testing::Test { AlignedHeapSegmentTest() : provider_(StorageProvider::mmapProvider()), - s(std::move(AlignedHeapSegment::create(provider_.get()).get())) {} + s(std::move(FixedSizeHeapSegment::create(provider_.get()).get())) {} ~AlignedHeapSegmentTest() = default; std::unique_ptr provider_; - AlignedHeapSegment s; + FixedSizeHeapSegment s; }; #ifndef NDEBUG TEST_F(AlignedHeapSegmentTest, FailedAllocation) { LimitedStorageProvider limitedProvider{StorageProvider::mmapProvider(), 0}; - auto result = AlignedHeapSegment::create(&limitedProvider); + auto result = FixedSizeHeapSegment::create(&limitedProvider); EXPECT_FALSE(result); } #endif // !NDEBUG @@ -57,48 +57,48 @@ TEST_F(AlignedHeapSegmentTest, Start) { char *lo = s.lowLim(); char *hi = s.hiLim(); - EXPECT_EQ(lo, AlignedHeapSegment::storageStart(lo)); + EXPECT_EQ(lo, FixedSizeHeapSegment::storageStart(lo)); EXPECT_EQ( lo, - AlignedHeapSegment::storageStart( - lo + AlignedHeapSegment::storageSize() / 2)); - EXPECT_EQ(lo, AlignedHeapSegment::storageStart(hi - 1)); + FixedSizeHeapSegment::storageStart( + lo + FixedSizeHeapSegment::storageSize() / 2)); + EXPECT_EQ(lo, FixedSizeHeapSegment::storageStart(hi - 1)); // `hi` is the first address in the storage following \c storage (if // such a storage existed). - EXPECT_EQ(hi, AlignedHeapSegment::storageStart(hi)); + EXPECT_EQ(hi, FixedSizeHeapSegment::storageStart(hi)); } TEST_F(AlignedHeapSegmentTest, End) { char *lo = s.lowLim(); char *hi = s.hiLim(); - EXPECT_EQ(hi, AlignedHeapSegment::storageEnd(lo)); + EXPECT_EQ(hi, FixedSizeHeapSegment::storageEnd(lo)); EXPECT_EQ( hi, - AlignedHeapSegment::storageEnd( - lo + AlignedHeapSegment::storageSize() / 2)); - EXPECT_EQ(hi, AlignedHeapSegment::storageEnd(hi - 1)); + FixedSizeHeapSegment::storageEnd( + lo + FixedSizeHeapSegment::storageSize() / 2)); + EXPECT_EQ(hi, FixedSizeHeapSegment::storageEnd(hi - 1)); // `hi` is the first address in the storage following \c storage (if // such a storage existed). EXPECT_EQ( - hi + AlignedHeapSegment::storageSize(), - AlignedHeapSegment::storageEnd(hi)); + hi + FixedSizeHeapSegment::storageSize(), + FixedSizeHeapSegment::storageEnd(hi)); } TEST_F(AlignedHeapSegmentTest, Offset) { char *lo = s.lowLim(); char *hi = s.hiLim(); - const size_t size = AlignedHeapSegment::storageSize(); + const size_t size = FixedSizeHeapSegment::storageSize(); - EXPECT_EQ(0, AlignedHeapSegment::offset(lo)); - EXPECT_EQ(size / 2, AlignedHeapSegment::offset(lo + size / 2)); - EXPECT_EQ(size - 1, AlignedHeapSegment::offset(hi - 1)); + EXPECT_EQ(0, FixedSizeHeapSegment::offset(lo)); + EXPECT_EQ(size / 2, FixedSizeHeapSegment::offset(lo + size / 2)); + EXPECT_EQ(size - 1, FixedSizeHeapSegment::offset(hi - 1)); // `hi` is the first address in the storage following \c storage (if // such a storage existed). - EXPECT_EQ(0, AlignedHeapSegment::offset(hi)); + EXPECT_EQ(0, FixedSizeHeapSegment::offset(hi)); } TEST_F(AlignedHeapSegmentTest, AdviseUnused) { @@ -108,16 +108,16 @@ TEST_F(AlignedHeapSegmentTest, AdviseUnused) { #if !defined(_WINDOWS) && !defined(HERMESVM_ALLOW_HUGE_PAGES) const size_t PG_SIZE = oscompat::page_size(); - ASSERT_EQ(0, AlignedHeapSegment::storageSize() % PG_SIZE); + ASSERT_EQ(0, FixedSizeHeapSegment::storageSize() % PG_SIZE); - const size_t TOTAL_PAGES = AlignedHeapSegment::storageSize() / PG_SIZE; + const size_t TOTAL_PAGES = FixedSizeHeapSegment::storageSize() / PG_SIZE; const size_t FREED_PAGES = TOTAL_PAGES / 2; // We can't use the storage of s here since it contains guard pages and also // s.start() may not align to actual page boundary. void *storage = provider_->newStorage().get(); char *start = reinterpret_cast(storage); - char *end = start + AlignedHeapSegment::storageSize(); + char *end = start + FixedSizeHeapSegment::storageSize(); // On some platforms, the mapping containing [start, end) can be larger than // [start, end) itself, and the extra space may already contribute to the @@ -151,15 +151,15 @@ TEST_F(AlignedHeapSegmentTest, Containment) { EXPECT_FALSE(s.contains(s.hiLim())); // Interior - EXPECT_TRUE(s.contains(s.lowLim() + AlignedHeapSegment::storageSize() / 2)); + EXPECT_TRUE(s.contains(s.lowLim() + FixedSizeHeapSegment::storageSize() / 2)); } TEST_F(AlignedHeapSegmentTest, Alignment) { /** - * This test alternates between allocating an AlignedHeapSegment, and an + * This test alternates between allocating an FixedSizeHeapSegment, and an * anonymous "spacer" mapping such that the i-th spacer has size: * - * AlignedHeapSegment::storageSize() + i MB + * FixedSizeHeapSegment::storageSize() + i MB * * In the worst case the anonymous mappings are perfectly interleaved with the * aligned storage, and we must be intentional about aligning the storage @@ -175,7 +175,7 @@ TEST_F(AlignedHeapSegmentTest, Alignment) { * - A box's width includes its left boundary and excludes its right boundary. * - A / boundary indicates 1MB belongs to the previous box and 1MB to the * next. - * - Boxes labeled with `A` are AlignedHeapSegment. + * - Boxes labeled with `A` are FixedSizeHeapSegment. * - Boxes labeled with `S` are spacers. * - Boxes with no label are unmapped. * @@ -183,16 +183,16 @@ TEST_F(AlignedHeapSegmentTest, Alignment) { * allocation pattern we (might) get from allocating in a tight loop. */ - std::vector segments; + std::vector segments; std::vector spacers; const size_t MB = 1 << 20; - const size_t SIZE = AlignedHeapSegment::storageSize(); + const size_t SIZE = FixedSizeHeapSegment::storageSize(); for (size_t space = SIZE + MB; space < 2 * SIZE; space += MB) { segments.emplace_back( - std::move(AlignedHeapSegment::create(provider_.get()).get())); - AlignedHeapSegment &seg = segments.back(); + std::move(FixedSizeHeapSegment::create(provider_.get()).get())); + FixedSizeHeapSegment &seg = segments.back(); EXPECT_EQ(seg.lowLim(), alignPointer(seg.lowLim(), SIZE)); @@ -200,7 +200,7 @@ TEST_F(AlignedHeapSegmentTest, Alignment) { } { // When \c storages goes out of scope, it will correctly destruct the \c - // AlignedHeapSegment instances it holds. \c spacers, on the other hand, + // FixedSizeHeapSegment instances it holds. \c spacers, on the other hand, // holds only raw pointers, so we must clean them up manually: size_t space = SIZE + MB; for (void *spacer : spacers) { @@ -252,7 +252,7 @@ TEST_F(AlignedHeapSegmentTest, AllocTest) { } TEST_F(AlignedHeapSegmentTest, FullSize) { - EXPECT_EQ(s.size(), AlignedHeapSegment::maxSize()); + EXPECT_EQ(s.size(), FixedSizeHeapSegment::maxSize()); EXPECT_EQ(s.size(), s.available()); EXPECT_EQ(s.size(), s.hiLim() - s.start()); @@ -279,7 +279,7 @@ using AlignedHeapSegmentDeathTest = AlignedHeapSegmentTest; // Allocating into a null segment causes an assertion failure on !NDEBUG builds. TEST_F(AlignedHeapSegmentDeathTest, NullAlloc) { - AlignedHeapSegment s; + FixedSizeHeapSegment s; constexpr uint32_t SIZE = heapAlignSize(sizeof(GCCell)); EXPECT_DEATH_IF_SUPPORTED({ s.alloc(SIZE); }, "null segment"); } diff --git a/unittests/VMRuntime/CardObjectBoundaryNCTest.cpp b/unittests/VMRuntime/CardObjectBoundaryNCTest.cpp index e763c37f122..2bb1e31f2ba 100644 --- a/unittests/VMRuntime/CardObjectBoundaryNCTest.cpp +++ b/unittests/VMRuntime/CardObjectBoundaryNCTest.cpp @@ -33,7 +33,7 @@ struct CardObjectBoundaryNCTest : public ::testing::Test { } std::unique_ptr provider; - AlignedHeapSegment segment; + FixedSizeHeapSegment segment; CardTable::Boundary boundary; size_t segStartIndex; @@ -41,7 +41,7 @@ struct CardObjectBoundaryNCTest : public ::testing::Test { CardObjectBoundaryNCTest::CardObjectBoundaryNCTest() : provider(StorageProvider::mmapProvider()), - segment(std::move(AlignedHeapSegment::create(provider.get()).get())), + segment(std::move(FixedSizeHeapSegment::create(provider.get()).get())), boundary(segment.cardTable().nextBoundary(segment.start())), segStartIndex(boundary.index()) {} diff --git a/unittests/VMRuntime/CardTableNCTest.cpp b/unittests/VMRuntime/CardTableNCTest.cpp index adaffe0651d..22ddfc8a82c 100644 --- a/unittests/VMRuntime/CardTableNCTest.cpp +++ b/unittests/VMRuntime/CardTableNCTest.cpp @@ -36,8 +36,8 @@ struct CardTableNCTest : public ::testing::Test { protected: std::unique_ptr provider{StorageProvider::mmapProvider()}; - AlignedHeapSegment seg{ - std::move(AlignedHeapSegment::create(provider.get()).get())}; + FixedSizeHeapSegment seg{ + std::move(FixedSizeHeapSegment::create(provider.get()).get())}; CardTable *table{new (seg.lowLim()) CardTable()}; // Addresses in the aligned storage to interact with during the tests. diff --git a/unittests/VMRuntime/CrashManagerTest.cpp b/unittests/VMRuntime/CrashManagerTest.cpp index 8201eb4247e..84f98055b19 100644 --- a/unittests/VMRuntime/CrashManagerTest.cpp +++ b/unittests/VMRuntime/CrashManagerTest.cpp @@ -31,7 +31,7 @@ using ::testing::MatchesRegex; namespace { // We make this not FixedSize, to allow direct allocation in the old generation. -using SegmentCell = EmptyCell; +using SegmentCell = EmptyCell; class TestCrashManager : public CrashManager { public: @@ -107,8 +107,8 @@ TEST(CrashManagerTest, HeapExtentsCorrect) { uint32_t numHeapSegmentsNumbered = 0; int32_t keyNum; for (const auto &[key, payload] : contextualCustomData) { - // Keeps track whether key represents an AlignedHeapSegment so that payload - // can be validated below. + // Keeps track whether key represents an FixedSizeHeapSegment so that + // payload can be validated below. bool validatePayload = false; if (key == ygSegmentName) { validatePayload = true; diff --git a/unittests/VMRuntime/GCBasicsTest.cpp b/unittests/VMRuntime/GCBasicsTest.cpp index 2c71a106be5..7c6c074731a 100644 --- a/unittests/VMRuntime/GCBasicsTest.cpp +++ b/unittests/VMRuntime/GCBasicsTest.cpp @@ -356,9 +356,9 @@ TEST(GCCallbackTest, TestCallbackInvoked) { } #ifndef HERMESVM_GC_MALLOC -using SegmentCell = EmptyCell; +using SegmentCell = EmptyCell; TEST(GCBasicsTestNCGen, TestIDPersistsAcrossMultipleCollections) { - constexpr size_t kHeapSizeHint = AlignedHeapSegment::maxSize() * 10; + constexpr size_t kHeapSizeHint = FixedSizeHeapSegment::maxSize() * 10; const GCConfig kGCConfig = TestGCConfigFixedSize(kHeapSizeHint); auto runtime = DummyRuntime::create(kGCConfig); diff --git a/unittests/VMRuntime/GCFragmentationTest.cpp b/unittests/VMRuntime/GCFragmentationTest.cpp index b554dcea6d4..714b1213c4f 100644 --- a/unittests/VMRuntime/GCFragmentationTest.cpp +++ b/unittests/VMRuntime/GCFragmentationTest.cpp @@ -30,15 +30,15 @@ TEST(GCFragmentationTest, TestCoalescing) { // allocate. static const size_t kNumAvailableSegments = kNumSegments + 1; static const size_t kHeapSize = - AlignedHeapSegment::maxSize() * kNumAvailableSegments; + FixedSizeHeapSegment::maxSize() * kNumAvailableSegments; static const GCConfig kGCConfig = TestGCConfigFixedSize(kHeapSize); auto runtime = DummyRuntime::create(kGCConfig); DummyRuntime &rt = *runtime; - using SixteenthCell = EmptyCell; - using EighthCell = EmptyCell; - using QuarterCell = EmptyCell; + using SixteenthCell = EmptyCell; + using EighthCell = EmptyCell; + using QuarterCell = EmptyCell; { GCScope scope(rt); diff --git a/unittests/VMRuntime/GCLazySegmentNCTest.cpp b/unittests/VMRuntime/GCLazySegmentNCTest.cpp index c90af85c5ad..26c3a2eba7c 100644 --- a/unittests/VMRuntime/GCLazySegmentNCTest.cpp +++ b/unittests/VMRuntime/GCLazySegmentNCTest.cpp @@ -27,13 +27,13 @@ struct GCLazySegmentNCTest : public ::testing::Test {}; using GCLazySegmentNCDeathTest = GCLazySegmentNCTest; -using SegmentCell = EmptyCell; +using SegmentCell = EmptyCell; -constexpr size_t kHeapSizeHint = AlignedHeapSegment::maxSize() * 10; +constexpr size_t kHeapSizeHint = FixedSizeHeapSegment::maxSize() * 10; const GCConfig kGCConfig = TestGCConfigFixedSize(kHeapSizeHint); -constexpr size_t kHeapVA = AlignedHeapSegment::storageSize() * 10; +constexpr size_t kHeapVA = FixedSizeHeapSegment::storageSize() * 10; constexpr size_t kHeapVALimited = - kHeapVA / 2 + AlignedHeapSegment::storageSize() - 1; + kHeapVA / 2 + FixedSizeHeapSegment::storageSize() - 1; /// We are able to materialize every segment. TEST_F(GCLazySegmentNCTest, MaterializeAll) { diff --git a/unittests/VMRuntime/GCOOMTest.cpp b/unittests/VMRuntime/GCOOMTest.cpp index 87f521074ff..22e7b048cf0 100644 --- a/unittests/VMRuntime/GCOOMTest.cpp +++ b/unittests/VMRuntime/GCOOMTest.cpp @@ -34,10 +34,10 @@ static void exceedMaxHeap( GCConfig::Builder baseConfig = kTestGCConfigBaseBuilder) { static constexpr size_t kSegments = 10; static constexpr size_t kHeapSizeHint = - AlignedHeapSegment::maxSize() * kSegments; + FixedSizeHeapSegment::maxSize() * kSegments; // Only one of these cells will fit into a segment, with the maximum amount of // space wasted in the segment. - using AwkwardCell = EmptyCell; + using AwkwardCell = EmptyCell; auto runtime = DummyRuntime::create(TestGCConfigFixedSize(kHeapSizeHint, baseConfig)); diff --git a/unittests/VMRuntime/GCReturnUnusedMemoryTest.cpp b/unittests/VMRuntime/GCReturnUnusedMemoryTest.cpp index 022373a9b1a..c257078971b 100644 --- a/unittests/VMRuntime/GCReturnUnusedMemoryTest.cpp +++ b/unittests/VMRuntime/GCReturnUnusedMemoryTest.cpp @@ -32,7 +32,7 @@ TEST(GCReturnUnusedMemoryTest, CollectReturnsFreeMemory) { DummyRuntime &rt = *runtime; auto &gc = rt.getHeap(); - using SemiCell = EmptyCell; + using SemiCell = EmptyCell; llvh::ErrorOr before = 0; { diff --git a/unittests/VMRuntime/MarkBitArrayNCTest.cpp b/unittests/VMRuntime/MarkBitArrayNCTest.cpp index 455c1996fb1..efbd975c651 100644 --- a/unittests/VMRuntime/MarkBitArrayNCTest.cpp +++ b/unittests/VMRuntime/MarkBitArrayNCTest.cpp @@ -20,16 +20,24 @@ #include using namespace hermes::vm; -using MarkBitArray = AlignedHeapSegment::Contents::MarkBitArray; +using MarkBitArray = FixedSizeHeapSegment::Contents::MarkBitArray; namespace { struct MarkBitArrayTest : public ::testing::Test { MarkBitArrayTest(); + size_t addressToMarkBitArrayIndex(const void *addr) { + // Since we only test FixedSizeHeapSegment in this file, it's safe to cast + // address in the segment to a GCCell pointer (i.e., we can always compute + // the correct segment start address from this pointer). + auto *cell = reinterpret_cast(addr); + return seg.addressToMarkBitArrayIndex(cell); + } + protected: std::unique_ptr provider; - AlignedHeapSegment seg; + FixedSizeHeapSegment seg; MarkBitArray &mba; // Addresses in the aligned storage to interact w ith during the tests. @@ -38,7 +46,7 @@ struct MarkBitArrayTest : public ::testing::Test { MarkBitArrayTest::MarkBitArrayTest() : provider(StorageProvider::mmapProvider()), - seg{std::move(AlignedHeapSegment::create(provider.get()).get())}, + seg{std::move(FixedSizeHeapSegment::create(provider.get()).get())}, mba(seg.markBitArray()) { auto first = seg.lowLim(); auto last = reinterpret_cast( @@ -66,7 +74,7 @@ TEST_F(MarkBitArrayTest, AddressToIndex) { char *addr = addrs.at(i); size_t ind = indices.at(i); - EXPECT_EQ(ind, AlignedHeapSegment::addressToMarkBitArrayIndex(addr)) + EXPECT_EQ(ind, addressToMarkBitArrayIndex(addr)) << "0x" << std::hex << (void *)addr << " -> " << ind; char *toAddr = seg.lowLim() + (ind << LogHeapAlign); EXPECT_EQ(toAddr, addr) @@ -78,7 +86,7 @@ TEST_F(MarkBitArrayTest, MarkGet) { const size_t lastIx = mba.size() - 1; for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); EXPECT_FALSE(ind > 0 && mba.at(ind - 1)) << "initial " << ind << " - 1"; EXPECT_FALSE(mba.at(ind)) << "initial " << ind; @@ -97,37 +105,37 @@ TEST_F(MarkBitArrayTest, MarkGet) { TEST_F(MarkBitArrayTest, Initial) { for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); EXPECT_FALSE(mba.at(ind)); } } TEST_F(MarkBitArrayTest, Clear) { for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); ASSERT_FALSE(mba.at(ind)); } for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); } for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); ASSERT_TRUE(mba.at(ind)); } mba.reset(); for (char *addr : addrs) { - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); EXPECT_FALSE(mba.at(ind)); } } TEST_F(MarkBitArrayTest, NextMarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); EXPECT_EQ(ind, mba.findNextSetBitFrom(ind)); @@ -140,7 +148,7 @@ TEST_F(MarkBitArrayTest, NextMarkedBit) { EXPECT_EQ(FOUND_NONE, mba.findNextSetBitFrom(0)); std::queue indices; for (char *addr : addrs) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + auto ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); indices.push(ind); } @@ -154,7 +162,7 @@ TEST_F(MarkBitArrayTest, NextMarkedBit) { TEST_F(MarkBitArrayTest, NextUnmarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(); mba.set(ind, false); EXPECT_EQ(ind, mba.findNextZeroBitFrom(ind)); @@ -167,7 +175,7 @@ TEST_F(MarkBitArrayTest, NextUnmarkedBit) { EXPECT_EQ(FOUND_NONE, mba.findNextZeroBitFrom(0)); std::queue indices; for (char *addr : addrs) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + auto ind = addressToMarkBitArrayIndex(addr); mba.set(ind, false); indices.push(ind); } @@ -182,7 +190,7 @@ TEST_F(MarkBitArrayTest, NextUnmarkedBit) { TEST_F(MarkBitArrayTest, PrevMarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(ind, true); EXPECT_EQ(ind, mba.findPrevSetBitFrom(ind + 1)); } @@ -196,7 +204,7 @@ TEST_F(MarkBitArrayTest, PrevMarkedBit) { std::queue indices; size_t addrIdx = addrs.size(); while (addrIdx-- > 0) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addrs[addrIdx]); + auto ind = addressToMarkBitArrayIndex(addrs[addrIdx]); mba.set(ind, true); indices.push(ind); } @@ -209,7 +217,7 @@ TEST_F(MarkBitArrayTest, PrevMarkedBit) { TEST_F(MarkBitArrayTest, PrevUnmarkedBitImmediate) { char *addr = addrs.at(addrs.size() / 2); - size_t ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addr); + size_t ind = addressToMarkBitArrayIndex(addr); mba.set(); mba.set(ind, false); EXPECT_EQ(ind, mba.findPrevZeroBitFrom(ind + 1)); @@ -225,7 +233,7 @@ TEST_F(MarkBitArrayTest, PrevUnmarkedBit) { std::queue indices; size_t addrIdx = addrs.size(); while (addrIdx-- > 0) { - auto ind = AlignedHeapSegment::addressToMarkBitArrayIndex(addrs[addrIdx]); + auto ind = addressToMarkBitArrayIndex(addrs[addrIdx]); mba.set(ind, false); indices.push(ind); } diff --git a/unittests/VMRuntime/StorageProviderTest.cpp b/unittests/VMRuntime/StorageProviderTest.cpp index e189bcabce0..47fb5fdcad8 100644 --- a/unittests/VMRuntime/StorageProviderTest.cpp +++ b/unittests/VMRuntime/StorageProviderTest.cpp @@ -50,7 +50,7 @@ static std::unique_ptr GetStorageProvider( return StorageProvider::mmapProvider(); case ContiguousVAProvider: return StorageProvider::contiguousVAProvider( - AlignedHeapSegment::storageSize()); + FixedSizeHeapSegment::storageSize()); default: return nullptr; } @@ -107,7 +107,7 @@ TEST(StorageProviderTest, LimitedStorageProviderEnforce) { constexpr size_t LIM = 2; LimitedStorageProvider provider{ StorageProvider::mmapProvider(), - AlignedHeapSegment::storageSize() * LIM, + FixedSizeHeapSegment::storageSize() * LIM, }; void *live[LIM]; for (size_t i = 0; i < LIM; ++i) { @@ -128,7 +128,7 @@ TEST(StorageProviderTest, LimitedStorageProviderTrackDelete) { constexpr size_t LIM = 2; LimitedStorageProvider provider{ StorageProvider::mmapProvider(), - AlignedHeapSegment::storageSize() * LIM, + FixedSizeHeapSegment::storageSize() * LIM, }; // If the storage gets deleted, we should be able to re-allocate it, even if @@ -145,7 +145,7 @@ TEST(StorageProviderTest, LimitedStorageProviderDeleteNull) { constexpr size_t LIM = 2; LimitedStorageProvider provider{ StorageProvider::mmapProvider(), - AlignedHeapSegment::storageSize() * LIM, + FixedSizeHeapSegment::storageSize() * LIM, }; void *live[LIM]; @@ -174,7 +174,7 @@ TEST(StorageProviderTest, StorageProviderAllocsCount) { auto provider = std::unique_ptr{new LimitedStorageProvider{ StorageProvider::mmapProvider(), - AlignedHeapSegment::storageSize() * LIM}}; + FixedSizeHeapSegment::storageSize() * LIM}}; constexpr size_t FAILS = 3; void *storages[LIM]; @@ -261,16 +261,16 @@ TEST(StorageProviderTest, SucceedsAfterReducing) { } { // Test using the aligned storage alignment - SetVALimit limit{50 * AlignedHeapSegment::storageSize()}; + SetVALimit limit{50 * FixedSizeHeapSegment::storageSize()}; auto result = vmAllocateAllowLess( - 100 * AlignedHeapSegment::storageSize(), - 30 * AlignedHeapSegment::storageSize(), - AlignedHeapSegment::storageSize()); + 100 * FixedSizeHeapSegment::storageSize(), + 30 * FixedSizeHeapSegment::storageSize(), + FixedSizeHeapSegment::storageSize()); ASSERT_TRUE(result); auto memAndSize = result.get(); EXPECT_TRUE(memAndSize.first != nullptr); - EXPECT_GE(memAndSize.second, 30 * AlignedHeapSegment::storageSize()); - EXPECT_LE(memAndSize.second, 50 * AlignedHeapSegment::storageSize()); + EXPECT_GE(memAndSize.second, 30 * FixedSizeHeapSegment::storageSize()); + EXPECT_LE(memAndSize.second, 50 * FixedSizeHeapSegment::storageSize()); } }