From 8c141e5e85c509def41cfde27e493e0d0ba128de Mon Sep 17 00:00:00 2001 From: overlookmotel <557937+overlookmotel@users.noreply.github.com> Date: Thu, 14 Aug 2025 14:59:11 +0000 Subject: [PATCH] fix(tasks): allocation tracker prevent counter overflow (#13085) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allocation tracker did not deal with possibility that counters could reach `usize::MAX`. In this case, addition would wrap around back to 0. This problem was pointed out by @aapoalas in https://github.com/oxc-project/oxc/pull/13043#pullrequestreview-3119426135. Introduce an `AtomicCounter` type which uses saturating addition, to prevent this happening. Reaching such a high allocation count is highly unlikely on 64-bit systems, but might just about be possible on 32-bit. Maybe this is overkill, but since `cargo allocs` is just an internal tool, performance is not the highest priority. So I figure on balance probably better to handle this edge case. It doesn't seem to hurt perf much. ``` # Before % hyperfine -i --warmup 3 'cargo allocs' Benchmark 1: cargo allocs Time (mean ± σ): 299.9 ms ± 2.4 ms [User: 132.8 ms, System: 31.0 ms] Range (min … max): 297.6 ms … 303.9 ms 10 runs # After % hyperfine -i --warmup 3 'cargo allocs' Benchmark 1: cargo allocs Time (mean ± σ): 301.3 ms ± 3.4 ms [User: 133.3 ms, System: 31.5 ms] Range (min … max): 297.8 ms … 310.0 ms 10 runs ``` Note: The same problem in arena allocation tracker is addressed in #13043. --- tasks/track_memory_allocations/src/lib.rs | 47 ++++++++++++++++++----- 1 file changed, 38 insertions(+), 9 deletions(-) diff --git a/tasks/track_memory_allocations/src/lib.rs b/tasks/track_memory_allocations/src/lib.rs index cbe5deee7e588..fc1f03e451453 100644 --- a/tasks/track_memory_allocations/src/lib.rs +++ b/tasks/track_memory_allocations/src/lib.rs @@ -21,17 +21,46 @@ static GLOBAL: TrackedAllocator = TrackedAllocator; struct TrackedAllocator; +/// Atomic counter. +/// +/// Mainly just a wrapper around `AtomicUsize`, but `increment` method ensures that counter value +/// doesn't wrap around if counter reaches `usize::MAX`. +/// This is practically infeasible on 64-bit systems, but might just be possible on 32-bit. +/// +/// Note: `SeqCst` ordering may be stronger than required, but performance is not the primary concern here, +/// so play it safe. +struct AtomicCounter(AtomicUsize); + +impl AtomicCounter { + const fn new() -> Self { + Self(AtomicUsize::new(0)) + } + + fn get(&self) -> usize { + self.0.load(SeqCst) + } + + fn increment(&self) { + // Result of `fetch_update` cannot be `Err` as closure always returns `Some` + let _ = self.0.fetch_update(SeqCst, SeqCst, |count| Some(count.saturating_add(1))); + } + + fn reset(&self) { + self.0.store(0, SeqCst); + } +} + /// Number of system allocations // NOTE: We are only tracking the number of system allocations here, and not the number of bytes that are allocated. // The original version of this tool did track the number of bytes, but there was some variance between platforms that // made it less reliable as a measurement. In general, the number of allocations is closely correlated with the size of // allocations, so just tracking the number of allocations is sufficient for our purposes. -static NUM_ALLOC: AtomicUsize = AtomicUsize::new(0); -static NUM_REALLOC: AtomicUsize = AtomicUsize::new(0); +static NUM_ALLOC: AtomicCounter = AtomicCounter::new(); +static NUM_REALLOC: AtomicCounter = AtomicCounter::new(); fn reset_global_allocs() { - NUM_ALLOC.store(0, SeqCst); - NUM_REALLOC.store(0, SeqCst); + NUM_ALLOC.reset(); + NUM_REALLOC.reset(); } // SAFETY: Methods simply delegate to `MiMalloc` allocator to ensure that the allocator @@ -41,7 +70,7 @@ unsafe impl GlobalAlloc for TrackedAllocator { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { let ret = unsafe { MiMalloc.alloc(layout) }; if !ret.is_null() { - NUM_ALLOC.fetch_add(1, SeqCst); + NUM_ALLOC.increment(); } ret } @@ -53,7 +82,7 @@ unsafe impl GlobalAlloc for TrackedAllocator { unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { let ret = unsafe { MiMalloc.alloc_zeroed(layout) }; if !ret.is_null() { - NUM_ALLOC.fetch_add(1, SeqCst); + NUM_ALLOC.increment(); } ret } @@ -61,7 +90,7 @@ unsafe impl GlobalAlloc for TrackedAllocator { unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { let ret = unsafe { MiMalloc.realloc(ptr, layout, new_size) }; if !ret.is_null() { - NUM_REALLOC.fetch_add(1, SeqCst); + NUM_REALLOC.increment(); } ret } @@ -116,8 +145,8 @@ pub fn run() -> Result<(), io::Error> { Parser::new(&allocator, &file.source_text, file.source_type).with_options(options).parse(); - let sys_allocs = NUM_ALLOC.load(SeqCst); - let sys_reallocs = NUM_REALLOC.load(SeqCst); + let sys_allocs = NUM_ALLOC.get(); + let sys_reallocs = NUM_REALLOC.get(); #[cfg(not(feature = "is_all_features"))] let (arena_allocs, arena_reallocs) = allocator.get_allocation_stats(); #[cfg(feature = "is_all_features")]