Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 38 additions & 9 deletions tasks/track_memory_allocations/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,17 +21,46 @@ static GLOBAL: TrackedAllocator = TrackedAllocator;

struct TrackedAllocator;

/// Atomic counter.
///
/// Mainly just a wrapper around `AtomicUsize`, but `increment` method ensures that counter value
/// doesn't wrap around if counter reaches `usize::MAX`.
/// This is practically infeasible on 64-bit systems, but might just be possible on 32-bit.
///
/// Note: `SeqCst` ordering may be stronger than required, but performance is not the primary concern here,
/// so play it safe.
struct AtomicCounter(AtomicUsize);

impl AtomicCounter {
const fn new() -> Self {
Self(AtomicUsize::new(0))
}

fn get(&self) -> usize {
self.0.load(SeqCst)
}

fn increment(&self) {
// Result of `fetch_update` cannot be `Err` as closure always returns `Some`
let _ = self.0.fetch_update(SeqCst, SeqCst, |count| Some(count.saturating_add(1)));
}

fn reset(&self) {
self.0.store(0, SeqCst);
}
}

/// Number of system allocations
// NOTE: We are only tracking the number of system allocations here, and not the number of bytes that are allocated.
// The original version of this tool did track the number of bytes, but there was some variance between platforms that
// made it less reliable as a measurement. In general, the number of allocations is closely correlated with the size of
// allocations, so just tracking the number of allocations is sufficient for our purposes.
static NUM_ALLOC: AtomicUsize = AtomicUsize::new(0);
static NUM_REALLOC: AtomicUsize = AtomicUsize::new(0);
static NUM_ALLOC: AtomicCounter = AtomicCounter::new();
static NUM_REALLOC: AtomicCounter = AtomicCounter::new();

fn reset_global_allocs() {
NUM_ALLOC.store(0, SeqCst);
NUM_REALLOC.store(0, SeqCst);
NUM_ALLOC.reset();
NUM_REALLOC.reset();
}

// SAFETY: Methods simply delegate to `MiMalloc` allocator to ensure that the allocator
Expand All @@ -41,7 +70,7 @@ unsafe impl GlobalAlloc for TrackedAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let ret = unsafe { MiMalloc.alloc(layout) };
if !ret.is_null() {
NUM_ALLOC.fetch_add(1, SeqCst);
NUM_ALLOC.increment();
}
ret
}
Expand All @@ -53,15 +82,15 @@ unsafe impl GlobalAlloc for TrackedAllocator {
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
let ret = unsafe { MiMalloc.alloc_zeroed(layout) };
if !ret.is_null() {
NUM_ALLOC.fetch_add(1, SeqCst);
NUM_ALLOC.increment();
}
ret
}

unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
let ret = unsafe { MiMalloc.realloc(ptr, layout, new_size) };
if !ret.is_null() {
NUM_REALLOC.fetch_add(1, SeqCst);
NUM_REALLOC.increment();
}
ret
}
Expand Down Expand Up @@ -116,8 +145,8 @@ pub fn run() -> Result<(), io::Error> {

Parser::new(&allocator, &file.source_text, file.source_type).with_options(options).parse();

let sys_allocs = NUM_ALLOC.load(SeqCst);
let sys_reallocs = NUM_REALLOC.load(SeqCst);
let sys_allocs = NUM_ALLOC.get();
let sys_reallocs = NUM_REALLOC.get();
#[cfg(not(feature = "is_all_features"))]
let (arena_allocs, arena_reallocs) = allocator.get_allocation_stats();
#[cfg(feature = "is_all_features")]
Expand Down
Loading