Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,9 @@ immix_smaller_block = []
# Zero the unmarked lines after a GC cycle in immix. This helps debug untraced objects.
immix_zero_on_release = []

# Run sanity checks for BlockList in mark sweep.
ms_block_list_sanity = []

# Run sanity GC
sanity = []
# Run analysis
Expand All @@ -138,7 +141,7 @@ nogc_no_zeroing = ["nogc_lock_free"]
single_worker = []

# To run expensive comprehensive runtime checks, such as checking duplicate edges
extreme_assertions = []
extreme_assertions = ["ms_block_list_sanity"]

# Enable multiple spaces for NoGC, each allocator maps to an individual ImmortalSpace.
nogc_multi_space = []
Expand Down
12 changes: 6 additions & 6 deletions src/policy/marksweepspace/native_ms/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -136,35 +136,35 @@ impl Block {
.is_ok()
}

pub fn load_prev_block(&self) -> Option<Block> {
pub(in crate::policy::marksweepspace::native_ms) fn load_prev_block(&self) -> Option<Block> {
let prev = unsafe { Block::PREV_BLOCK_TABLE.load::<usize>(self.start()) };
NonZeroUsize::new(prev).map(Block)
}

pub fn load_next_block(&self) -> Option<Block> {
pub(in crate::policy::marksweepspace::native_ms) fn load_next_block(&self) -> Option<Block> {
let next = unsafe { Block::NEXT_BLOCK_TABLE.load::<usize>(self.start()) };
NonZeroUsize::new(next).map(Block)
}

pub fn store_next_block(&self, next: Block) {
pub(in crate::policy::marksweepspace::native_ms) fn store_next_block(&self, next: Block) {
unsafe {
Block::NEXT_BLOCK_TABLE.store::<usize>(self.start(), next.start().as_usize());
}
}

pub fn clear_next_block(&self) {
pub(in crate::policy::marksweepspace::native_ms) fn clear_next_block(&self) {
unsafe {
Block::NEXT_BLOCK_TABLE.store::<usize>(self.start(), 0);
}
}

pub fn store_prev_block(&self, prev: Block) {
pub(in crate::policy::marksweepspace::native_ms) fn store_prev_block(&self, prev: Block) {
unsafe {
Block::PREV_BLOCK_TABLE.store::<usize>(self.start(), prev.start().as_usize());
}
}

pub fn clear_prev_block(&self) {
pub(in crate::policy::marksweepspace::native_ms) fn clear_prev_block(&self) {
unsafe {
Block::PREV_BLOCK_TABLE.store::<usize>(self.start(), 0);
}
Expand Down
142 changes: 133 additions & 9 deletions src/policy/marksweepspace/native_ms/block_list.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,23 @@ use crate::util::linear_scan::Region;
use crate::vm::VMBinding;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
#[cfg(feature = "ms_block_list_sanity")]
use std::sync::Mutex;

/// List of blocks owned by the allocator
#[repr(C)]
pub struct BlockList {
pub first: Option<Block>,
pub last: Option<Block>,
pub size: usize,
pub lock: AtomicBool,
first: Option<Block>,
last: Option<Block>,
size: usize,
lock: AtomicBool,
#[cfg(feature = "ms_block_list_sanity")]
sanity_list: Mutex<Vec<Block>>,
}

impl std::fmt::Debug for BlockList {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "BlockList {:?}", self.iter().collect::<Vec<Block>>())
write!(f, "{:?}", self.iter().collect::<Vec<Block>>())
}
}

Expand All @@ -27,16 +31,45 @@ impl BlockList {
last: None,
size,
lock: AtomicBool::new(false),
#[cfg(feature = "ms_block_list_sanity")]
sanity_list: Mutex::new(vec![]),
}
}

#[cfg(feature = "ms_block_list_sanity")]
fn verify_block_list(&self, sanity_list: &mut Vec<Block>) {
if !sanity_list
.iter()
.cloned()
.eq(BlockListIterator { cursor: self.first })
{
eprintln!("Sanity block list: {:?}", sanity_list);
eprintln!("First {:?}", sanity_list.first());
eprintln!("Actual block list: {:?}", self);
eprintln!("First {:?}", self.first);
eprintln!("Block list {:?}", self as *const _);
panic!("Incorrect block list");
}
}

/// List has no blocks
#[allow(clippy::let_and_return)]
pub fn is_empty(&self) -> bool {
self.first.is_none()
let ret = self.first.is_none();

#[cfg(feature = "ms_block_list_sanity")]
{
let mut sanity_list = self.sanity_list.lock().unwrap();
Copy link
Collaborator

@wks wks Apr 11, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If this method is not thread-safe, we can use try_lock() instead of lock(), assert that try_lock() always succeeds. If the assertion fails, it means there is a contention on the lock, and the only case that would happen is that two threads attempted to call thread-unsafe methods concurrently.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Well, the method is_empty itself can be called from multiple threads concurrently as long as there isn't another thread calling other methods that mutate this BlockList. In theory, Rust's ownership model and borrow checker can prevent that from happening because it disallows a & to coexist with &mut.

self.verify_block_list(&mut sanity_list);
assert_eq!(sanity_list.is_empty(), ret);
}

ret
}

/// Remove a block from the list
pub fn remove(&mut self, block: Block) {
trace!("Blocklist {:?}: Remove {:?}", self as *const _, block);
match (block.load_prev_block(), block.load_next_block()) {
(None, None) => {
self.first = None;
Expand All @@ -45,23 +78,40 @@ impl BlockList {
(None, Some(next)) => {
next.clear_prev_block();
self.first = Some(next);
next.store_block_list(self);
// next.store_block_list(self);
debug_assert_eq!(next.load_block_list(), self as *mut _);
}
(Some(prev), None) => {
prev.clear_next_block();
self.last = Some(prev);
prev.store_block_list(self);
// prev.store_block_list(self);
debug_assert_eq!(prev.load_block_list(), self as *mut _);
}
(Some(prev), Some(next)) => {
prev.store_next_block(next);
next.store_prev_block(prev);
}
}

#[cfg(feature = "ms_block_list_sanity")]
{
let mut sanity_list = self.sanity_list.lock().unwrap();
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

But if there is a contention on this lock, there must be a race because no two threads should call BlockList::remove at the same time. We can use try_lock() here.

if let Some((index, _)) = sanity_list
.iter()
.enumerate()
.find(|&(_, &val)| val == block)
{
sanity_list.remove(index);
} else {
panic!("Cannot find {:?} in the block list", block);
}
self.verify_block_list(&mut sanity_list);
}
}

/// Pop the first block in the list
pub fn pop(&mut self) -> Option<Block> {
if let Some(head) = self.first {
let ret = if let Some(head) = self.first {
if let Some(next) = head.load_next_block() {
self.first = Some(next);
next.clear_prev_block();
Expand All @@ -75,11 +125,27 @@ impl BlockList {
Some(head)
} else {
None
};

#[cfg(feature = "ms_block_list_sanity")]
{
let mut sanity_list = self.sanity_list.lock().unwrap();
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ditto

let sanity_ret = if sanity_list.is_empty() {
None
} else {
Some(sanity_list.remove(0)) // pop first
};
self.verify_block_list(&mut sanity_list);
assert_eq!(sanity_ret, ret);
}

trace!("Blocklist {:?}: Pop = {:?}", self as *const _, ret);
ret
}

/// Push block to the front of the list
pub fn push(&mut self, block: Block) {
trace!("Blocklist {:?}: Push {:?}", self as *const _, block);
if self.is_empty() {
block.clear_next_block();
block.clear_prev_block();
Expand All @@ -93,10 +159,33 @@ impl BlockList {
self.first = Some(block);
}
block.store_block_list(self);

#[cfg(feature = "ms_block_list_sanity")]
{
let mut sanity_list = self.sanity_list.lock().unwrap();
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the same.

sanity_list.insert(0, block); // push front
self.verify_block_list(&mut sanity_list);
}
}

/// Moves all the blocks of `other` into `self`, leaving `other` empty.
pub fn append(&mut self, other: &mut BlockList) {
trace!(
"Blocklist {:?}: Append Blocklist {:?}",
self as *const _,
other as *const _
);
#[cfg(feature = "ms_block_list_sanity")]
{
// Check before merging
let mut sanity_list = self.sanity_list.lock().unwrap();
self.verify_block_list(&mut sanity_list);
let mut sanity_list_other = other.sanity_list.lock().unwrap();
other.verify_block_list(&mut sanity_list_other);
}
#[cfg(feature = "ms_block_list_sanity")]
let mut sanity_list_in_other = other.sanity_list.lock().unwrap().clone();

debug_assert_eq!(self.size, other.size);
if !other.is_empty() {
debug_assert!(
Expand Down Expand Up @@ -128,12 +217,26 @@ impl BlockList {
}
other.reset();
}

#[cfg(feature = "ms_block_list_sanity")]
{
let mut sanity_list = self.sanity_list.lock().unwrap();
sanity_list.append(&mut sanity_list_in_other);
self.verify_block_list(&mut sanity_list);
}
}

/// Remove all blocks
fn reset(&mut self) {
trace!("Blocklist {:?}: Reset", self as *const _);
self.first = None;
self.last = None;

#[cfg(feature = "ms_block_list_sanity")]
{
let mut sanity_list = self.sanity_list.lock().unwrap();
sanity_list.clear();
}
}

/// Lock the list. The MiMalloc allocator mostly uses thread-local block lists, and those operations on the list
Expand All @@ -152,10 +255,12 @@ impl BlockList {
.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
.is_ok();
}
trace!("Blocklist {:?}: locked", self as *const _);
}

/// Unlock list. See the comments on the lock method.
pub fn unlock(&mut self) {
trace!("Blocklist {:?}: unlock", self as *const _);
self.lock.store(false, Ordering::SeqCst);
}

Expand All @@ -172,6 +277,25 @@ impl BlockList {
}
}
}

/// Get the size of this block list.
#[allow(clippy::let_and_return)]
pub fn size(&self) -> usize {
let ret = self.size;

#[cfg(feature = "ms_block_list_sanity")]
{
let mut sanity_list = self.sanity_list.lock().unwrap();
self.verify_block_list(&mut sanity_list);
}

ret
}

/// Get the first block in the list.
pub fn first(&self) -> Option<Block> {
self.first
}
}

pub struct BlockListIterator {
Expand Down
18 changes: 18 additions & 0 deletions src/policy/marksweepspace/native_ms/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,15 +65,29 @@ impl AbandonedBlockLists {
fn move_consumed_to_unswept(&mut self) {
let mut i = 0;
while i < MI_BIN_FULL {
// This is executed during release. We also execute SweepChunk which will access blocks and block lists during release.
// We have to acquire locks before we modify these block lists.
self.consumed[i].lock();
self.unswept[i].lock();

if !self.consumed[i].is_empty() {
self.unswept[i].append(&mut self.consumed[i]);
}

self.unswept[i].unlock();
self.consumed[i].unlock();
i += 1;
}
}

fn sweep<VM: VMBinding>(&mut self, space: &MarkSweepSpace<VM>) {
for i in 0..MI_BIN_FULL {
// This is executed during release. We also execute SweepChunk which will access blocks and block lists during release.
// We have to acquire locks before we modify these block lists.
self.available[i].lock();
self.consumed[i].lock();
self.unswept[i].lock();

self.available[i].sweep_blocks(space);
self.consumed[i].sweep_blocks(space);
self.unswept[i].sweep_blocks(space);
Expand All @@ -86,6 +100,10 @@ impl AbandonedBlockLists {
self.consumed[i].push(block);
}
}

self.unswept[i].unlock();
self.consumed[i].unlock();
self.available[i].unlock();
}
}
}
Expand Down
8 changes: 4 additions & 4 deletions src/util/alloc/free_list_allocator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -218,10 +218,10 @@ impl<VM: VMBinding> FreeListAllocator<VM> {
debug_assert!(bin <= MAX_BIN);

let available = &mut available_blocks[bin];
debug_assert!(available.size >= size);
debug_assert!(available.size() >= size);

if !available.is_empty() {
let mut cursor = available.first;
let mut cursor = available.first();

while let Some(block) = cursor {
if block.has_free_cells() {
Expand All @@ -230,7 +230,7 @@ impl<VM: VMBinding> FreeListAllocator<VM> {
available.pop();
consumed_blocks.get_mut(bin).unwrap().push(block);

cursor = available.first;
cursor = available.first();
}
}

Expand Down Expand Up @@ -303,7 +303,7 @@ impl<VM: VMBinding> FreeListAllocator<VM> {

crate::policy::marksweepspace::native_ms::BlockAcquireResult::Fresh(block) => {
self.add_to_available_blocks(bin, block, stress_test);
self.init_block(block, self.available_blocks[bin].size);
self.init_block(block, self.available_blocks[bin].size());

return Some(block);
}
Expand Down