From dfe54b44ff9815e793bf61a5dd966a146998d81a Mon Sep 17 00:00:00 2001 From: overlookmotel <557937+overlookmotel@users.noreply.github.com> Date: Thu, 17 Jul 2025 16:59:33 +0000 Subject: [PATCH] refactor(allocator): move all fixed size allocator code into 1 file (#12309) Pure refactor. Split `AllocatorPool` into 2 separate files for the normal and fixed size variants. The extra layer of abstraction required to share code between the two versions was getting unwieldy. All the fixed-size allocator code is now together in one file. --- crates/oxc_allocator/src/lib.rs | 37 ++++-- crates/oxc_allocator/src/pool.rs | 109 ++--------------- .../src/{fixed_size.rs => pool_fixed_size.rs} | 110 +++++++++++++++--- 3 files changed, 130 insertions(+), 126 deletions(-) rename crates/oxc_allocator/src/{fixed_size.rs => pool_fixed_size.rs} (60%) diff --git a/crates/oxc_allocator/src/lib.rs b/crates/oxc_allocator/src/lib.rs index 0d25e9a3df767..609170620a57e 100644 --- a/crates/oxc_allocator/src/lib.rs +++ b/crates/oxc_allocator/src/lib.rs @@ -37,18 +37,9 @@ mod allocator_api2; mod boxed; mod clone_in; mod convert; -// Fixed size allocators are only supported on 64-bit little-endian platforms at present -#[cfg(all( - feature = "fixed_size", - not(feature = "disable_fixed_size"), - target_pointer_width = "64", - target_endian = "little" -))] -mod fixed_size; #[cfg(feature = "from_raw_parts")] mod from_raw_parts; pub mod hash_map; -mod pool; mod string_builder; mod take_in; mod vec; @@ -61,11 +52,37 @@ pub use boxed::Box; pub use clone_in::CloneIn; pub use convert::{FromIn, IntoIn}; pub use hash_map::HashMap; -pub use pool::{AllocatorGuard, AllocatorPool}; pub use string_builder::StringBuilder; pub use take_in::{Dummy, TakeIn}; pub use vec::Vec; +// Fixed size allocators are only supported on 64-bit little-endian platforms at present + +#[cfg(not(all( + feature = "fixed_size", + not(feature = "disable_fixed_size"), + target_pointer_width = "64", + target_endian = "little" +)))] +mod pool; + +#[cfg(all( + feature = "fixed_size", + not(feature = "disable_fixed_size"), + target_pointer_width = "64", + target_endian = "little" +))] +mod pool_fixed_size; +#[cfg(all( + feature = "fixed_size", + not(feature = "disable_fixed_size"), + target_pointer_width = "64", + target_endian = "little" +))] +use pool_fixed_size as pool; + +pub use pool::{AllocatorGuard, AllocatorPool}; + mod generated { #[cfg(all( feature = "fixed_size", diff --git a/crates/oxc_allocator/src/pool.rs b/crates/oxc_allocator/src/pool.rs index a78fb7246e30e..b0505d0d32c6e 100644 --- a/crates/oxc_allocator/src/pool.rs +++ b/crates/oxc_allocator/src/pool.rs @@ -1,4 +1,4 @@ -use std::{mem::ManuallyDrop, ops::Deref, sync::Mutex}; +use std::{iter, mem::ManuallyDrop, ops::Deref, sync::Mutex}; use crate::Allocator; @@ -7,13 +7,13 @@ use crate::Allocator; /// Internally uses a `Vec` protected by a `Mutex` to store available allocators. #[derive(Default)] pub struct AllocatorPool { - allocators: Mutex>, + allocators: Mutex>, } impl AllocatorPool { - /// Creates a new [`AllocatorPool`] pre-filled with the given number of default `AllocatorWrapper` instances. + /// Creates a new [`AllocatorPool`] pre-filled with the given number of default [`Allocator`] instances. pub fn new(size: usize) -> AllocatorPool { - let allocators = AllocatorWrapper::new_vec(size); + let allocators = iter::repeat_with(Allocator::new).take(size).collect(); AllocatorPool { allocators: Mutex::new(allocators) } } @@ -29,19 +29,19 @@ impl AllocatorPool { let mut allocators = self.allocators.lock().unwrap(); allocators.pop() }; - let allocator = allocator.unwrap_or_else(AllocatorWrapper::new); + let allocator = allocator.unwrap_or_else(Allocator::new); AllocatorGuard { allocator: ManuallyDrop::new(allocator), pool: self } } - /// Add an [`AllocatorWrapper`] to the pool. + /// Add an [`Allocator`] to the pool. /// /// The `Allocator` should be empty, ready to be re-used. /// /// # Panics /// /// Panics if the underlying mutex is poisoned. - fn add(&self, allocator: AllocatorWrapper) { + fn add(&self, allocator: Allocator) { let mut allocators = self.allocators.lock().unwrap(); allocators.push(allocator); } @@ -51,7 +51,7 @@ impl AllocatorPool { /// /// On drop, the `Allocator` is reset and returned to the pool. pub struct AllocatorGuard<'alloc_pool> { - allocator: ManuallyDrop, + allocator: ManuallyDrop, pool: &'alloc_pool AllocatorPool, } @@ -59,105 +59,16 @@ impl Deref for AllocatorGuard<'_> { type Target = Allocator; fn deref(&self) -> &Self::Target { - self.allocator.get() + &self.allocator } } impl Drop for AllocatorGuard<'_> { /// Return [`Allocator`] back to the pool. fn drop(&mut self) { - // SAFETY: After taking ownership of the `AllocatorWrapper`, we do not touch the `ManuallyDrop` again + // SAFETY: After taking ownership of the `Allocator`, we do not touch the `ManuallyDrop` again let mut allocator = unsafe { ManuallyDrop::take(&mut self.allocator) }; allocator.reset(); self.pool.add(allocator); } } - -#[cfg(not(all( - feature = "fixed_size", - not(feature = "disable_fixed_size"), - target_pointer_width = "64", - target_endian = "little" -)))] -mod wrapper { - use crate::Allocator; - - /// Structure which wraps an [`Allocator`]. - /// - /// Default implementation which is just a wrapper around an [`Allocator`]. - pub struct AllocatorWrapper(Allocator); - - impl AllocatorWrapper { - /// Create a new [`AllocatorWrapper`]. - pub fn new() -> Self { - Self(Allocator::default()) - } - - /// Get reference to underlying [`Allocator`]. - pub fn get(&self) -> &Allocator { - &self.0 - } - - /// Reset the [`Allocator`] in this [`AllocatorWrapper`]. - pub fn reset(&mut self) { - self.0.reset(); - } - - /// Create a `Vec` of [`AllocatorWrapper`]s. - pub fn new_vec(size: usize) -> Vec { - std::iter::repeat_with(Self::new).take(size).collect() - } - } -} - -#[cfg(all( - feature = "fixed_size", - not(feature = "disable_fixed_size"), - target_pointer_width = "64", - target_endian = "little" -))] -mod wrapper { - use crate::{Allocator, fixed_size::FixedSizeAllocator, fixed_size_constants::BUFFER_ALIGN}; - - /// Structure which wraps an [`Allocator`] with fixed size of 2 GiB, and aligned on 4 GiB. - /// - /// See [`FixedSizeAllocator`] for more details. - pub struct AllocatorWrapper(FixedSizeAllocator); - - impl AllocatorWrapper { - /// Create a new [`AllocatorWrapper`]. - pub fn new() -> Self { - Self(FixedSizeAllocator::new()) - } - - /// Get reference to underlying [`Allocator`]. - pub fn get(&self) -> &Allocator { - &self.0 - } - - /// Reset the [`Allocator`] in this [`AllocatorWrapper`]. - pub fn reset(&mut self) { - // Set cursor back to end - self.0.reset(); - - // Set data pointer back to start. - // SAFETY: Fixed-size allocators have data pointer originally aligned on `BUFFER_ALIGN`, - // and size less than `BUFFER_ALIGN`. So we can restore original data pointer by rounding down - // to next multiple of `BUFFER_ALIGN`. - unsafe { - let data_ptr = self.0.data_ptr(); - let offset = data_ptr.as_ptr() as usize % BUFFER_ALIGN; - let data_ptr = data_ptr.sub(offset); - self.0.set_data_ptr(data_ptr); - } - } - - /// Create a `Vec` of [`AllocatorWrapper`]s. - pub fn new_vec(size: usize) -> Vec { - // Each allocator consumes a large block of memory, so create them on demand instead of upfront - Vec::with_capacity(size) - } - } -} - -use wrapper::AllocatorWrapper; diff --git a/crates/oxc_allocator/src/fixed_size.rs b/crates/oxc_allocator/src/pool_fixed_size.rs similarity index 60% rename from crates/oxc_allocator/src/fixed_size.rs rename to crates/oxc_allocator/src/pool_fixed_size.rs index 7ad32b85cc8d2..bbbc9ec707755 100644 --- a/crates/oxc_allocator/src/fixed_size.rs +++ b/crates/oxc_allocator/src/pool_fixed_size.rs @@ -1,8 +1,9 @@ use std::{ alloc::{self, GlobalAlloc, Layout, System}, mem::ManuallyDrop, - ops::{Deref, DerefMut}, + ops::Deref, ptr::NonNull, + sync::Mutex, }; use crate::{ @@ -13,6 +14,78 @@ use crate::{ const TWO_GIB: usize = 1 << 31; const FOUR_GIB: usize = 1 << 32; +/// A thread-safe pool for reusing [`Allocator`] instances to reduce allocation overhead. +/// +/// Internally uses a `Vec` protected by a `Mutex` to store available allocators. +#[derive(Default)] +pub struct AllocatorPool { + allocators: Mutex>, +} + +impl AllocatorPool { + /// Creates a new [`AllocatorPool`] with capacity for the given number of `FixedSizeAllocator` instances. + pub fn new(size: usize) -> AllocatorPool { + // Each allocator consumes a large block of memory, so create them on demand instead of upfront + let allocators = Vec::with_capacity(size); + AllocatorPool { allocators: Mutex::new(allocators) } + } + + /// Retrieves an [`Allocator`] from the pool, or creates a new one if the pool is empty. + /// + /// Returns an [`AllocatorGuard`] that gives access to the allocator. + /// + /// # Panics + /// + /// Panics if the underlying mutex is poisoned. + pub fn get(&self) -> AllocatorGuard { + let allocator = { + let mut allocators = self.allocators.lock().unwrap(); + allocators.pop() + }; + let allocator = allocator.unwrap_or_else(FixedSizeAllocator::new); + + AllocatorGuard { allocator: ManuallyDrop::new(allocator), pool: self } + } + + /// Add a [`FixedSizeAllocator`] to the pool. + /// + /// The `Allocator` should be empty, ready to be re-used. + /// + /// # Panics + /// + /// Panics if the underlying mutex is poisoned. + fn add(&self, allocator: FixedSizeAllocator) { + let mut allocators = self.allocators.lock().unwrap(); + allocators.push(allocator); + } +} + +/// A guard object representing exclusive access to an [`Allocator`] from the pool. +/// +/// On drop, the `Allocator` is reset and returned to the pool. +pub struct AllocatorGuard<'alloc_pool> { + allocator: ManuallyDrop, + pool: &'alloc_pool AllocatorPool, +} + +impl Deref for AllocatorGuard<'_> { + type Target = Allocator; + + fn deref(&self) -> &Self::Target { + &self.allocator.allocator + } +} + +impl Drop for AllocatorGuard<'_> { + /// Return [`Allocator`] back to the pool. + fn drop(&mut self) { + // SAFETY: After taking ownership of the `FixedSizeAllocator`, we do not touch the `ManuallyDrop` again + let mut allocator = unsafe { ManuallyDrop::take(&mut self.allocator) }; + allocator.reset(); + self.pool.add(allocator); + } +} + // What we ideally want is an allocation 2 GiB in size, aligned on 4 GiB. // But system allocator on Mac OS refuses allocations with 4 GiB alignment. // https://github.com/rust-lang/rust/blob/556d20a834126d2d0ac20743b9792b8474d6d03c/library/std/src/sys/alloc/unix.rs#L16-L27 @@ -41,7 +114,7 @@ const ALLOC_LAYOUT: Layout = match Layout::from_size_align(ALLOC_SIZE, ALLOC_ALI /// To achieve this, we manually allocate memory to back the `Allocator`'s single chunk. /// We over-allocate 4 GiB, and then use a part of that allocation to back the `Allocator`. /// Inner `Allocator` is wrapped in `ManuallyDrop` to prevent it freeing the memory itself, -/// and `AllocatorWrapper` has a custom `Drop` impl which frees the whole of the original allocation. +/// and `FixedSizeAllocator` has a custom `Drop` impl which frees the whole of the original allocation. /// /// We allocate via `System` allocator, bypassing any registered alternative global allocator /// (e.g. Mimalloc in linter). Mimalloc complains that it cannot serve allocations with high alignment, @@ -59,7 +132,7 @@ impl FixedSizeAllocator { #[expect(clippy::items_after_statements)] pub fn new() -> Self { // Allocate block of memory. - // SAFETY: Layout does not have zero size. + // SAFETY: `ALLOC_LAYOUT` does not have zero size. let alloc_ptr = unsafe { System.alloc(ALLOC_LAYOUT) }; let Some(alloc_ptr) = NonNull::new(alloc_ptr) else { alloc::handle_alloc_error(ALLOC_LAYOUT); @@ -92,6 +165,23 @@ impl FixedSizeAllocator { // Store pointer to original allocation, so it can be used to deallocate in `drop` Self { allocator: ManuallyDrop::new(allocator), alloc_ptr } } + + /// Reset this [`FixedSizeAllocator`]. + fn reset(&mut self) { + // Set cursor back to end + self.allocator.reset(); + + // Set data pointer back to start. + // SAFETY: Fixed-size allocators have data pointer originally aligned on `BUFFER_ALIGN`, + // and size less than `BUFFER_ALIGN`. So we can restore original data pointer by rounding down + // to next multiple of `BUFFER_ALIGN`. + unsafe { + let data_ptr = self.allocator.data_ptr(); + let offset = data_ptr.as_ptr() as usize % BUFFER_ALIGN; + let data_ptr = data_ptr.sub(offset); + self.allocator.set_data_ptr(data_ptr); + } + } } impl Drop for FixedSizeAllocator { @@ -101,20 +191,6 @@ impl Drop for FixedSizeAllocator { } } -impl Deref for FixedSizeAllocator { - type Target = Allocator; - - fn deref(&self) -> &Self::Target { - &self.allocator - } -} - -impl DerefMut for FixedSizeAllocator { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.allocator - } -} - // SAFETY: `Allocator` is `Send`. // Moving `alloc_ptr: NonNull` across threads along with the `Allocator` is safe. unsafe impl Send for FixedSizeAllocator {}