diff --git a/compiler/rustc_ast/src/attr/mod.rs b/compiler/rustc_ast/src/attr/mod.rs index 67affb622f8da..990f4f8f1329f 100644 --- a/compiler/rustc_ast/src/attr/mod.rs +++ b/compiler/rustc_ast/src/attr/mod.rs @@ -19,6 +19,10 @@ use rustc_span::Span; use std::cell::Cell; use std::iter; +#[cfg(debug_assertions)] +use std::ops::BitXor; +#[cfg(debug_assertions)] +use std::sync::atomic::{AtomicU32, Ordering}; pub struct MarkedAttrs(GrowableBitSet); @@ -350,17 +354,36 @@ pub fn mk_nested_word_item(ident: Ident) -> NestedMetaItem { pub struct AttrIdGenerator(WorkerLocal>); +#[cfg(debug_assertions)] +static MAX_ATTR_ID: AtomicU32 = AtomicU32::new(u32::MAX); + impl AttrIdGenerator { pub fn new() -> Self { // We use `(index as u32).reverse_bits()` to initialize the // starting value of AttrId in each worker thread. // The `index` is the index of the worker thread. // This ensures that the AttrId generated in each thread is unique. - AttrIdGenerator(WorkerLocal::new(|index| Cell::new((index as u32).reverse_bits()))) + AttrIdGenerator(WorkerLocal::new(|index| { + let index: u32 = index.try_into().unwrap(); + + #[cfg(debug_assertions)] + { + let max_id = ((index + 1).next_power_of_two() - 1).bitxor(u32::MAX).reverse_bits(); + MAX_ATTR_ID.fetch_min(max_id, Ordering::Release); + } + + Cell::new(index.reverse_bits()) + })) } pub fn mk_attr_id(&self) -> AttrId { let id = self.0.get(); + + // Ensure the assigned attr_id does not overlap the bits + // representing the number of threads. + #[cfg(debug_assertions)] + assert!(id <= MAX_ATTR_ID.load(Ordering::Acquire)); + self.0.set(id + 1); AttrId::from_u32(id) }