Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
108 changes: 89 additions & 19 deletions crates/oxc_allocator/src/pool/fixed_size.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use std::{
mem::{self, ManuallyDrop},
ptr::NonNull,
sync::{
Mutex,
Condvar, Mutex,
atomic::{AtomicBool, AtomicU32, Ordering},
},
};
Expand All @@ -18,43 +18,80 @@ use crate::{
const TWO_GIB: usize = 1 << 31;
const FOUR_GIB: usize = 1 << 32;

/// Capacity limiter for a pool that blocks when the maximum capacity is reached.
struct CapacityLimit {
/// Maximum number of items that can be created
max_count: u32,
/// Condition variable to signal when an item is returned to the pool
available: Condvar,
}

impl CapacityLimit {
/// Create a new [`CapacityLimit`] with the given maximum count.
fn new(max_count: u32) -> Self {
Self { max_count, available: Condvar::new() }
}

/// Check if the pool is at capacity.
fn is_at_capacity(&self, current_count: u32) -> bool {
current_count >= self.max_count
}

/// Wait for an item to become available and return it.
///
/// This method blocks until an item is returned to the pool.
fn wait_for_item<T>(&self, mut items: std::sync::MutexGuard<'_, Vec<T>>) -> T {
loop {
items = self.available.wait(items).unwrap();
Copy link

Copilot AI Dec 17, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The unwrap() in wait_for_item could panic if the mutex is poisoned, but this is not documented. The panic behavior should either be documented in the method's docstring or the error should be propagated to the caller.

Suggested change
items = self.available.wait(items).unwrap();
items = match self.available.wait(items) {
Ok(guard) => guard,
Err(poisoned) => poisoned.into_inner(),
};

Copilot uses AI. Check for mistakes.
if let Some(item) = items.pop() {
return item;
}
}
}

/// Notify one waiting thread that an item is available.
fn notify_available(&self) {
self.available.notify_one();
}
}

/// A thread-safe pool for reusing [`Allocator`] instances, that uses fixed-size allocators,
/// suitable for use with raw transfer.
pub struct FixedSizeAllocatorPool {
/// Allocators in the pool
allocators: Mutex<Vec<FixedSizeAllocator>>,
/// ID to assign to next `Allocator` that's created
next_id: AtomicU32,
/// Capacity limiter. `None` means no limit (default).
capacity_limit: Option<CapacityLimit>,
}

impl FixedSizeAllocatorPool {
/// Create a new [`FixedSizeAllocatorPool`] for use across the specified number of threads.
pub fn new(thread_count: usize) -> FixedSizeAllocatorPool {
///
/// If `max_count` is `Some`, the pool will block when trying to get an allocator
/// if the maximum number of allocators has been reached, waiting until one is returned.
/// If `max_count` is `None`, there is no limit on the number of allocators.
pub fn new(thread_count: usize, max_count: Option<u32>) -> FixedSizeAllocatorPool {
// Each allocator consumes a large block of memory, so create them on demand instead of upfront,
// in case not all threads end up being used (e.g. language server without `import` plugin)
let allocators = Vec::with_capacity(thread_count);
FixedSizeAllocatorPool { allocators: Mutex::new(allocators), next_id: AtomicU32::new(0) }
FixedSizeAllocatorPool {
allocators: Mutex::new(allocators),
next_id: AtomicU32::new(0),
capacity_limit: max_count.map(CapacityLimit::new),
Copy link

Copilot AI Dec 17, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is no validation that max_count is greater than zero. If max_count is Some(0), the pool will always block in wait_for_item and never be able to create any allocators, resulting in a deadlock. Consider adding validation to ensure max_count is at least 1 when it's Some.

Suggested change
capacity_limit: max_count.map(CapacityLimit::new),
capacity_limit: max_count.filter(|&count| count > 0).map(CapacityLimit::new),

Copilot uses AI. Check for mistakes.
}
}

/// Retrieve an [`Allocator`] from the pool, or create a new one if the pool is empty.
///
/// If `max_count` was set and the maximum number of allocators has been reached,
/// this method will block until an allocator is returned to the pool.
///
/// # Panics
/// Panics if the underlying mutex is poisoned.
pub fn get(&self) -> Allocator {
let fixed_size_allocator = {
let mut allocators = self.allocators.lock().unwrap();
allocators.pop()
};

let fixed_size_allocator = fixed_size_allocator.unwrap_or_else(|| {
// Each allocator needs to have a unique ID, but the order those IDs are assigned in
// doesn't matter, so `Ordering::Relaxed` is fine
let id = self.next_id.fetch_add(1, Ordering::Relaxed);
// Protect against IDs wrapping around.
// TODO: Does this work? Do we need it anyway?
assert!(id < u32::MAX, "Created too many allocators");
FixedSizeAllocator::new(id)
});
let fixed_size_allocator = self.get_impl();

// Unwrap `FixedSizeAllocator`.
// `add` method will wrap it again, before returning it to pool, ensuring it gets dropped properly.
Expand All @@ -66,6 +103,32 @@ impl FixedSizeAllocatorPool {
ManuallyDrop::into_inner(allocator)
}

fn get_impl(&self) -> FixedSizeAllocator {
let mut allocators = self.allocators.lock().unwrap();

// Try to get an allocator from the pool
if let Some(alloc) = allocators.pop() {
return alloc;
}

// Check if we're at maximum capacity
if let Some(capacity_limit) = &self.capacity_limit {
let current_count = self.next_id.load(Ordering::Relaxed);
if capacity_limit.is_at_capacity(current_count) {
// At maximum capacity - wait for an item to be returned
return capacity_limit.wait_for_item(allocators);
}
}

// Create a new allocator.
// Each allocator needs to have a unique ID, but the order those IDs are assigned in
// doesn't matter, so `Ordering::Relaxed` is fine.
let id = self.next_id.fetch_add(1, Ordering::Relaxed);
// Protect against IDs wrapping around.
assert!(id < u32::MAX, "Created too many allocators");
FixedSizeAllocator::new(id)
Comment on lines +114 to +129
Copy link

Copilot AI Dec 17, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is a race condition between checking capacity and incrementing next_id. Multiple threads could simultaneously:

  1. Check that allocators.pop() returns None (line 110)
  2. Load current_count and see it's below max_count (line 116)
  3. All pass the capacity check (line 117)
  4. All increment next_id with fetch_add (line 126)

This allows the pool to create more allocators than max_count allows. The capacity check needs to happen atomically with the ID increment, or use a different synchronization approach.

Copilot uses AI. Check for mistakes.
}
Comment on lines +106 to +130
Copy link

Copilot AI Dec 17, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The new capacity limiting behavior lacks test coverage. Consider adding tests to verify: (1) blocking behavior when capacity is reached, (2) correct unblocking when allocators are returned, (3) race condition handling when multiple threads try to acquire allocators simultaneously, and (4) correct behavior when max_count is None.

Copilot uses AI. Check for mistakes.

/// Add an [`Allocator`] to the pool.
///
/// The `Allocator` is reset by this method, so it's ready to be re-used.
Expand All @@ -80,8 +143,15 @@ impl FixedSizeAllocatorPool {
FixedSizeAllocator { allocator: ManuallyDrop::new(allocator) };
fixed_size_allocator.reset();

let mut allocators = self.allocators.lock().unwrap();
allocators.push(fixed_size_allocator);
{
let mut allocators = self.allocators.lock().unwrap();
allocators.push(fixed_size_allocator);
}

// Notify one waiting thread that an allocator is available (if capacity is limited)
if let Some(capacity_limit) = &self.capacity_limit {
capacity_limit.notify_available();
Comment on lines +149 to +153
Copy link

Copilot AI Dec 17, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The mutex guard is dropped before notifying waiting threads. This creates a window where a thread waiting in wait_for_item could be notified but another thread could acquire the lock first and pop the allocator, causing the notified thread to wait again unnecessarily. Consider moving the notify_available call inside the mutex-protected block, or document why this ordering is intentional.

Suggested change
}
// Notify one waiting thread that an allocator is available (if capacity is limited)
if let Some(capacity_limit) = &self.capacity_limit {
capacity_limit.notify_available();
// Notify one waiting thread that an allocator is available (if capacity is limited)
if let Some(capacity_limit) = &self.capacity_limit {
capacity_limit.notify_available();
}

Copilot uses AI. Check for mistakes.
}
}
}

Expand Down
12 changes: 10 additions & 2 deletions crates/oxc_allocator/src/pool/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,16 +46,24 @@ impl AllocatorPool {

/// Create a new [`AllocatorPool`] for use across the specified number of threads,
/// which uses fixed-size allocators (suitable for raw transfer).
///
/// If `max_count` is `Some`, the pool will block when trying to get an allocator
/// if the maximum number of allocators has been reached, waiting until one is returned.
/// If `max_count` is `None`, there is no limit on the number of allocators.
#[cfg(feature = "fixed_size")]
pub fn new_fixed_size(thread_count: usize) -> AllocatorPool {
pub fn new_fixed_size(thread_count: usize, max_count: Option<u32>) -> AllocatorPool {
#[cfg(all(target_pointer_width = "64", target_endian = "little"))]
{
Self(AllocatorPoolInner::FixedSize(FixedSizeAllocatorPool::new(thread_count)))
Self(AllocatorPoolInner::FixedSize(FixedSizeAllocatorPool::new(
thread_count,
max_count,
)))
}

#[cfg(not(all(target_pointer_width = "64", target_endian = "little")))]
{
let _thread_count = thread_count; // Avoid unused vars lint warning
let _max_count = max_count; // Avoid unused vars lint warning
panic!("Fixed size allocators are only supported on 64-bit little-endian platforms");
}
}
Expand Down
2 changes: 1 addition & 1 deletion crates/oxc_linter/src/service/runtime.rs
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ impl Runtime {
// If an external linter is used (JS plugins), we must use fixed-size allocators,
// for compatibility with raw transfer
let allocator_pool = if linter.has_external_linter() {
AllocatorPool::new_fixed_size(thread_count)
AllocatorPool::new_fixed_size(thread_count, None)
} else {
AllocatorPool::new(thread_count)
};
Expand Down
Loading