From 4a87c668a40e8569ff9a9b4e1ae00d47f9555707 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Sat, 11 May 2024 11:11:34 +0800 Subject: [PATCH 01/25] WIP: Rename Edge to Slot --- docs/userguide/src/portingguide/howto/nogc.md | 2 +- src/memory_manager.rs | 8 ++-- src/mmtk.rs | 8 ++-- src/plan/barriers.rs | 16 +++---- src/plan/generational/barrier.rs | 4 +- src/plan/generational/gc_work.rs | 4 +- src/plan/tracing.rs | 2 +- src/scheduler/gc_work.rs | 12 ++--- src/util/edge_logger.rs | 8 ++-- src/util/sanity/sanity_checker.rs | 8 ++-- src/vm/mod.rs | 6 +-- src/vm/scanning.rs | 14 +++--- src/vm/{edge_shape.rs => slot.rs} | 44 +++++++++---------- 13 files changed, 68 insertions(+), 68 deletions(-) rename src/vm/{edge_shape.rs => slot.rs} (91%) diff --git a/docs/userguide/src/portingguide/howto/nogc.md b/docs/userguide/src/portingguide/howto/nogc.md index 034bd3f9d7..5b5279d21c 100644 --- a/docs/userguide/src/portingguide/howto/nogc.md +++ b/docs/userguide/src/portingguide/howto/nogc.md @@ -64,7 +64,7 @@ The `VMBinding` trait is a "meta-trait" (i.e. a trait that encapsulates other tr 3. [`ObjectModel`](https://docs.mmtk.io/api/mmtk/vm/trait.ObjectModel.html): This trait implements the runtime's object model. The object model includes object metadata such as mark-bits, forwarding-bits, etc.; constants regarding assumptions about object addresses; and functions to implement copying objects, querying object sizes, etc. You should ***carefully*** implement and understand this as it is a key trait on which many things depend. We will go into more detail about this trait in the [object model section](#object-model). 4. [`ReferenceGlue`](https://docs.mmtk.io/api/mmtk/vm/trait.ReferenceGlue.html): This trait implements runtime-specific finalization and weak reference processing methods. Note that each runtime has its own way of dealing with finalization and reference processing, so this is often one of the trickiest traits to implement. 5. [`Scanning`](https://docs.mmtk.io/api/mmtk/vm/trait.Scanning.html): This trait implements object scanning functions such as scanning mutator threads for root pointers, scanning a particular object for reference fields, etc. - 6. [`Edge`](https://docs.mmtk.io/api/mmtk/vm/edge_shape/trait.Edge.html): This trait implements what an edge in the object graph looks like in the runtime. This is useful as it can abstract over compressed or tagged pointers. If an edge in your runtime is indistinguishable from an arbitrary address, you may set it to the [`Address`](https://docs.mmtk.io/api/mmtk/util/address/struct.Address.html) type. + 6. [`Slot`](https://docs.mmtk.io/api/mmtk/vm/slot/trait.Slot.html): This trait implements what an edge in the object graph looks like in the runtime. This is useful as it can abstract over compressed or tagged pointers. If an edge in your runtime is indistinguishable from an arbitrary address, you may set it to the [`Address`](https://docs.mmtk.io/api/mmtk/util/address/struct.Address.html) type. 7. [`MemorySlice`](https://docs.mmtk.io/api/mmtk/vm/edge_shape/trait.MemorySlice.html): This trait implements functions related to memory slices such as arrays. This is mainly used by generational collectors. For the time-being we can implement all the above traits via `unimplemented!()` stubs. If you are using the Dummy VM binding as a starting point, you will have to edit some of the concrete implementations to `unimplemented!()`. Note that you should change the type that implements `VMBinding` from `DummyVM` to an appropriately named type for your runtime. For example, the OpenJDK binding defines the zero-struct [`OpenJDK`](https://github.com/mmtk/mmtk-openjdk/blob/54a249e877e1cbea147a71aafaafb8583f33843d/mmtk/src/lib.rs#L139-L162) which implements the `VMBinding` trait. diff --git a/src/memory_manager.rs b/src/memory_manager.rs index 0b9f7cd3c3..888a4dcdf2 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -22,7 +22,7 @@ use crate::util::constants::{LOG_BYTES_IN_PAGE, MIN_OBJECT_SIZE}; use crate::util::heap::layout::vm_layout::vm_layout; use crate::util::opaque_pointer::*; use crate::util::{Address, ObjectReference}; -use crate::vm::edge_shape::MemorySlice; +use crate::vm::slot::MemorySlice; use crate::vm::ReferenceGlue; use crate::vm::VMBinding; @@ -250,7 +250,7 @@ pub fn post_alloc( pub fn object_reference_write( mutator: &mut Mutator, src: ObjectReference, - slot: VM::VMEdge, + slot: VM::VMSlot, target: ObjectReference, ) { mutator.barrier().object_reference_write(src, slot, target); @@ -276,7 +276,7 @@ pub fn object_reference_write( pub fn object_reference_write_pre( mutator: &mut Mutator, src: ObjectReference, - slot: VM::VMEdge, + slot: VM::VMSlot, target: Option, ) { mutator @@ -304,7 +304,7 @@ pub fn object_reference_write_pre( pub fn object_reference_write_post( mutator: &mut Mutator, src: ObjectReference, - slot: VM::VMEdge, + slot: VM::VMSlot, target: Option, ) { mutator diff --git a/src/mmtk.rs b/src/mmtk.rs index 819796261c..f767724b02 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -9,7 +9,7 @@ use crate::scheduler::GCWorkScheduler; #[cfg(feature = "analysis")] use crate::util::analysis::AnalysisManager; #[cfg(feature = "extreme_assertions")] -use crate::util::edge_logger::EdgeLogger; +use crate::util::edge_logger::SlotLogger; use crate::util::finalizable_processor::FinalizableProcessor; use crate::util::heap::gc_trigger::GCTrigger; use crate::util::heap::layout::vm_layout::VMLayout; @@ -113,9 +113,9 @@ pub struct MMTK { Mutex>::FinalizableType>>, pub(crate) scheduler: Arc>, #[cfg(feature = "sanity")] - pub(crate) sanity_checker: Mutex>, + pub(crate) sanity_checker: Mutex>, #[cfg(feature = "extreme_assertions")] - pub(crate) edge_logger: EdgeLogger, + pub(crate) edge_logger: SlotLogger, pub(crate) gc_trigger: Arc>, pub(crate) gc_requester: Arc>, pub(crate) stats: Arc, @@ -222,7 +222,7 @@ impl MMTK { inside_sanity: AtomicBool::new(false), inside_harness: AtomicBool::new(false), #[cfg(feature = "extreme_assertions")] - edge_logger: EdgeLogger::new(), + edge_logger: SlotLogger::new(), #[cfg(feature = "analysis")] analysis_manager: Arc::new(AnalysisManager::new(stats.clone())), gc_trigger, diff --git a/src/plan/barriers.rs b/src/plan/barriers.rs index ef1cb6c4bb..3bcab4c1d5 100644 --- a/src/plan/barriers.rs +++ b/src/plan/barriers.rs @@ -1,6 +1,6 @@ //! Read/Write barrier implementations. -use crate::vm::edge_shape::{Edge, MemorySlice}; +use crate::vm::slot::{Slot, MemorySlice}; use crate::vm::ObjectModel; use crate::{ util::{metadata::MetadataSpec, *}, @@ -49,7 +49,7 @@ pub trait Barrier: 'static + Send + Downcast { fn object_reference_write( &mut self, src: ObjectReference, - slot: VM::VMEdge, + slot: VM::VMSlot, target: ObjectReference, ) { self.object_reference_write_pre(src, slot, Some(target)); @@ -61,7 +61,7 @@ pub trait Barrier: 'static + Send + Downcast { fn object_reference_write_pre( &mut self, _src: ObjectReference, - _slot: VM::VMEdge, + _slot: VM::VMSlot, _target: Option, ) { } @@ -70,7 +70,7 @@ pub trait Barrier: 'static + Send + Downcast { fn object_reference_write_post( &mut self, _src: ObjectReference, - _slot: VM::VMEdge, + _slot: VM::VMSlot, _target: Option, ) { } @@ -80,7 +80,7 @@ pub trait Barrier: 'static + Send + Downcast { fn object_reference_write_slow( &mut self, _src: ObjectReference, - _slot: VM::VMEdge, + _slot: VM::VMSlot, _target: Option, ) { } @@ -146,7 +146,7 @@ pub trait BarrierSemantics: 'static + Send { fn object_reference_write_slow( &mut self, src: ObjectReference, - slot: ::VMEdge, + slot: ::VMSlot, target: Option, ); @@ -216,7 +216,7 @@ impl Barrier for ObjectBarrier { fn object_reference_write_post( &mut self, src: ObjectReference, - slot: ::VMEdge, + slot: ::VMSlot, target: Option, ) { if self.object_is_unlogged(src) { @@ -227,7 +227,7 @@ impl Barrier for ObjectBarrier { fn object_reference_write_slow( &mut self, src: ObjectReference, - slot: ::VMEdge, + slot: ::VMSlot, target: Option, ) { if self.log_object(src) { diff --git a/src/plan/generational/barrier.rs b/src/plan/generational/barrier.rs index 3c7281ba93..6c008fe04d 100644 --- a/src/plan/generational/barrier.rs +++ b/src/plan/generational/barrier.rs @@ -7,7 +7,7 @@ use crate::policy::gc_work::DEFAULT_TRACE; use crate::scheduler::WorkBucketStage; use crate::util::constants::BYTES_IN_INT; use crate::util::*; -use crate::vm::edge_shape::MemorySlice; +use crate::vm::slot::MemorySlice; use crate::vm::VMBinding; use crate::MMTK; @@ -74,7 +74,7 @@ impl + PlanTraceObject> BarrierSem fn object_reference_write_slow( &mut self, src: ObjectReference, - _slot: VM::VMEdge, + _slot: VM::VMSlot, _target: Option, ) { // enqueue the object diff --git a/src/plan/generational/gc_work.rs b/src/plan/generational/gc_work.rs index afa0ebf1fb..3a2ccf4e8d 100644 --- a/src/plan/generational/gc_work.rs +++ b/src/plan/generational/gc_work.rs @@ -5,7 +5,7 @@ use crate::plan::VectorObjectQueue; use crate::policy::gc_work::TraceKind; use crate::scheduler::{gc_work::*, GCWork, GCWorker, WorkBucketStage}; use crate::util::ObjectReference; -use crate::vm::edge_shape::{Edge, MemorySlice}; +use crate::vm::slot::{Slot, MemorySlice}; use crate::vm::*; use crate::MMTK; use std::marker::PhantomData; @@ -165,7 +165,7 @@ impl GCWork for ProcessRegionModBuf { // Collect all the entries in all the slices let mut edges = vec![]; for slice in &self.modbuf { - for edge in slice.iter_edges() { + for edge in slice.iter_slots() { edges.push(edge); } } diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index 27e021051f..47fa003bad 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -115,7 +115,7 @@ impl<'a, E: ProcessEdgesWork> EdgeVisitor> for ObjectsClosure<'a, E> { fn visit_edge(&mut self, slot: EdgeOf) { #[cfg(debug_assertions)] { - use crate::vm::edge_shape::Edge; + use crate::vm::slot::Slot; trace!( "(ObjectsClosure) Visit edge {:?} (pointing to {:?})", slot, diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 9d622711f5..4d1afe29f6 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -4,7 +4,7 @@ use crate::global_state::GcStatus; use crate::plan::ObjectsClosure; use crate::plan::VectorObjectQueue; use crate::util::*; -use crate::vm::edge_shape::Edge; +use crate::vm::slot::Slot; use crate::vm::*; use crate::*; use std::marker::PhantomData; @@ -441,7 +441,7 @@ impl GCWork for ScanVMSpecificRoots { } pub struct ProcessEdgesBase { - pub edges: Vec, + pub edges: Vec, pub nodes: VectorObjectQueue, mmtk: &'static MMTK, // Use raw pointer for fast pointer dereferencing, instead of using `Option<&'static mut GCWorker>`. @@ -457,7 +457,7 @@ impl ProcessEdgesBase { // Requires an MMTk reference. Each plan-specific type that uses ProcessEdgesBase can get a static plan reference // at creation. This avoids overhead for dynamic dispatch or downcasting plan for each object traced. pub fn new( - edges: Vec, + edges: Vec, roots: bool, mmtk: &'static MMTK, bucket: WorkBucketStage, @@ -505,7 +505,7 @@ impl ProcessEdgesBase { } /// A short-hand for `::VMEdge`. -pub type EdgeOf = <::VM as VMBinding>::VMEdge; +pub type EdgeOf = <::VM as VMBinding>::VMSlot; /// Scan & update a list of object slots // @@ -702,9 +702,9 @@ impl, PPE: ProcessEdgesWork, PPE: ProcessEdgesWork> - RootsWorkFactory for ProcessEdgesWorkRootsWorkFactory + RootsWorkFactory for ProcessEdgesWorkRootsWorkFactory { - fn create_process_edge_roots_work(&mut self, edges: Vec) { + fn create_process_edge_roots_work(&mut self, edges: Vec) { crate::memory_manager::add_work_packet( self.mmtk, WorkBucketStage::Closure, diff --git a/src/util/edge_logger.rs b/src/util/edge_logger.rs index 7c4bb08e9a..94196a49e1 100644 --- a/src/util/edge_logger.rs +++ b/src/util/edge_logger.rs @@ -5,19 +5,19 @@ //! use crate::plan::Plan; -use crate::vm::edge_shape::Edge; +use crate::vm::slot::Slot; use crate::vm::VMBinding; use std::collections::HashSet; use std::sync::RwLock; -pub struct EdgeLogger { +pub struct SlotLogger { // A private hash-set to keep track of edges. edge_log: RwLock>, } -unsafe impl Sync for EdgeLogger {} +unsafe impl Sync for SlotLogger {} -impl EdgeLogger { +impl SlotLogger { pub fn new() -> Self { Self { edge_log: Default::default(), diff --git a/src/util/sanity/sanity_checker.rs b/src/util/sanity/sanity_checker.rs index d6ef849e35..06446d0ad0 100644 --- a/src/util/sanity/sanity_checker.rs +++ b/src/util/sanity/sanity_checker.rs @@ -1,7 +1,7 @@ use crate::plan::Plan; use crate::scheduler::gc_work::*; use crate::util::ObjectReference; -use crate::vm::edge_shape::Edge; +use crate::vm::slot::Slot; use crate::vm::*; use crate::MMTK; use crate::{scheduler::*, ObjectQueue}; @@ -9,7 +9,7 @@ use std::collections::HashSet; use std::ops::{Deref, DerefMut}; #[allow(dead_code)] -pub struct SanityChecker { +pub struct SanityChecker { /// Visited objects refs: HashSet, /// Cached root edges for sanity root scanning @@ -18,13 +18,13 @@ pub struct SanityChecker { root_nodes: Vec>, } -impl Default for SanityChecker { +impl Default for SanityChecker { fn default() -> Self { Self::new() } } -impl SanityChecker { +impl SanityChecker { pub fn new() -> Self { Self { refs: HashSet::new(), diff --git a/src/vm/mod.rs b/src/vm/mod.rs index e9b3003664..18cff83ae1 100644 --- a/src/vm/mod.rs +++ b/src/vm/mod.rs @@ -18,7 +18,7 @@ mod active_plan; mod collection; /// Allows MMTk to access edges in a VM-defined way. -pub mod edge_shape; +pub mod slot; pub(crate) mod object_model; mod reference_glue; mod scanning; @@ -60,9 +60,9 @@ where type VMReferenceGlue: ReferenceGlue; /// The type of edges in this VM. - type VMEdge: edge_shape::Edge; + type VMSlot: slot::Slot; /// The type of heap memory slice in this VM. - type VMMemorySlice: edge_shape::MemorySlice; + type VMMemorySlice: slot::MemorySlice; /// A value to fill in alignment gaps. This value can be used for debugging. const ALIGNMENT_VALUE: usize = 0xdead_beef; diff --git a/src/vm/scanning.rs b/src/vm/scanning.rs index 9d3951d0d9..2d1ee9d32a 100644 --- a/src/vm/scanning.rs +++ b/src/vm/scanning.rs @@ -2,17 +2,17 @@ use crate::plan::Mutator; use crate::scheduler::GCWorker; use crate::util::ObjectReference; use crate::util::VMWorkerThread; -use crate::vm::edge_shape::Edge; +use crate::vm::slot::Slot; use crate::vm::VMBinding; /// Callback trait of scanning functions that report edges. -pub trait EdgeVisitor { +pub trait EdgeVisitor { /// Call this function for each edge. fn visit_edge(&mut self, edge: ES); } /// This lets us use closures as EdgeVisitor. -impl EdgeVisitor for F { +impl EdgeVisitor for F { fn visit_edge(&mut self, edge: ES) { #[cfg(debug_assertions)] trace!( @@ -98,7 +98,7 @@ pub trait ObjectTracerContext: Clone + Send + 'static { /// it needs `Send` to be sent between threads. `'static` means it must not have /// references to variables with limited lifetime (such as local variables), because /// it needs to be moved between threads. -pub trait RootsWorkFactory: Clone + Send + 'static { +pub trait RootsWorkFactory: Clone + Send + 'static { /// Create work packets to handle root edges. /// /// The work packet may update the edges. @@ -164,7 +164,7 @@ pub trait Scanning { /// * `tls`: The VM-specific thread-local storage for the current worker. /// * `object`: The object to be scanned. /// * `edge_visitor`: Called back for each edge. - fn scan_object>( + fn scan_object>( tls: VMWorkerThread, object: ObjectReference, edge_visitor: &mut EV, @@ -227,7 +227,7 @@ pub trait Scanning { fn scan_roots_in_mutator_thread( tls: VMWorkerThread, mutator: &'static mut Mutator, - factory: impl RootsWorkFactory, + factory: impl RootsWorkFactory, ); /// Scan VM-specific roots. The creation of all root scan tasks (except thread scanning) @@ -239,7 +239,7 @@ pub trait Scanning { /// Arguments: /// * `tls`: The GC thread that is performing this scanning. /// * `factory`: The VM uses it to create work packets for scanning roots. - fn scan_vm_specific_roots(tls: VMWorkerThread, factory: impl RootsWorkFactory); + fn scan_vm_specific_roots(tls: VMWorkerThread, factory: impl RootsWorkFactory); /// Return whether the VM supports return barriers. This is unused at the moment. fn supports_return_barrier() -> bool; diff --git a/src/vm/edge_shape.rs b/src/vm/slot.rs similarity index 91% rename from src/vm/edge_shape.rs rename to src/vm/slot.rs index ffd24c0c0a..3188c44fb5 100644 --- a/src/vm/edge_shape.rs +++ b/src/vm/slot.rs @@ -44,7 +44,7 @@ use crate::util::{Address, ObjectReference}; /// Note: this trait only concerns the representation (i.e. the shape) of the edge, not its /// semantics, such as whether it holds strong or weak references. If a VM holds a weak reference /// in a word as a pointer, it can also use `SimpleEdge` for weak reference fields. -pub trait Edge: Copy + Send + Debug + PartialEq + Eq + Hash { +pub trait Slot: Copy + Send + Debug + PartialEq + Eq + Hash { /// Load object reference from the slot. /// /// If the slot is not holding an object reference (For example, if it is holding NULL or a @@ -89,11 +89,11 @@ pub trait Edge: Copy + Send + Debug + PartialEq + Eq + Hash { /// It is the default edge type, and should be suitable for most VMs. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] #[repr(transparent)] -pub struct SimpleEdge { +pub struct SimpleSlot { slot_addr: *mut Atomic
, } -impl SimpleEdge { +impl SimpleSlot { /// Create a simple edge from an address. /// /// Arguments: @@ -112,9 +112,9 @@ impl SimpleEdge { } } -unsafe impl Send for SimpleEdge {} +unsafe impl Send for SimpleSlot {} -impl Edge for SimpleEdge { +impl Slot for SimpleSlot { fn load(&self) -> Option { let addr = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; ObjectReference::from_raw_address(addr) @@ -135,7 +135,7 @@ impl Edge for SimpleEdge { /// hand, `SimpleEdge` is all about how to access a field that holds a reference represented /// simply as an `ObjectReference`. The intention and the semantics are clearer with /// `SimpleEdge`. -impl Edge for Address { +impl Slot for Address { fn load(&self) -> Option { let addr = unsafe { Address::load(*self) }; ObjectReference::from_raw_address(addr) @@ -147,9 +147,9 @@ impl Edge for Address { } #[test] -fn a_simple_edge_should_have_the_same_size_as_a_pointer() { +fn a_simple_slot_should_have_the_same_size_as_a_pointer() { assert_eq!( - std::mem::size_of::(), + std::mem::size_of::(), std::mem::size_of::<*mut libc::c_void>() ); } @@ -157,11 +157,11 @@ fn a_simple_edge_should_have_the_same_size_as_a_pointer() { /// A abstract memory slice represents a piece of **heap** memory. pub trait MemorySlice: Send + Debug + PartialEq + Eq + Clone + Hash { /// The associate type to define how to access edges from a memory slice. - type Edge: Edge; + type SlotType: Slot; /// The associate type to define how to iterate edges in a memory slice. - type EdgeIterator: Iterator; + type SlotIterator: Iterator; /// Iterate object edges within the slice. If there are non-reference values in the slice, the iterator should skip them. - fn iter_edges(&self) -> Self::EdgeIterator; + fn iter_slots(&self) -> Self::SlotIterator; /// The object which this slice belongs to. If we know the object for the slice, we will check the object state (e.g. mature or not), rather than the slice address. /// Normally checking the object and checking the slice does not make a difference, as the slice is part of the object (in terms of memory range). However, /// if a slice is in a different location from the object, the object state and the slice can be hugely different, and providing a proper implementation @@ -196,10 +196,10 @@ impl Iterator for AddressRangeIterator { } impl MemorySlice for Range
{ - type Edge = Address; - type EdgeIterator = AddressRangeIterator; + type SlotType = Address; + type SlotIterator = AddressRangeIterator; - fn iter_edges(&self) -> Self::EdgeIterator { + fn iter_slots(&self) -> Self::SlotIterator { AddressRangeIterator { cursor: self.start, limit: self.end, @@ -238,12 +238,12 @@ impl MemorySlice for Range
{ /// Memory slice type with empty implementations. /// For VMs that do not use the memory slice type. #[derive(Debug, PartialEq, Eq, Clone, Hash)] -pub struct UnimplementedMemorySlice(PhantomData); +pub struct UnimplementedMemorySlice(PhantomData); /// Edge iterator for `UnimplementedMemorySlice`. -pub struct UnimplementedMemorySliceEdgeIterator(PhantomData); +pub struct UnimplementedMemorySliceEdgeIterator(PhantomData); -impl Iterator for UnimplementedMemorySliceEdgeIterator { +impl Iterator for UnimplementedMemorySliceEdgeIterator { type Item = E; fn next(&mut self) -> Option { @@ -251,11 +251,11 @@ impl Iterator for UnimplementedMemorySliceEdgeIterator { } } -impl MemorySlice for UnimplementedMemorySlice { - type Edge = E; - type EdgeIterator = UnimplementedMemorySliceEdgeIterator; +impl MemorySlice for UnimplementedMemorySlice { + type SlotType = E; + type SlotIterator = UnimplementedMemorySliceEdgeIterator; - fn iter_edges(&self) -> Self::EdgeIterator { + fn iter_slots(&self) -> Self::SlotIterator { unimplemented!() } @@ -284,7 +284,7 @@ mod tests { fn address_range_iteration() { let src: Vec = (0..32).collect(); let src_slice = Address::from_ptr(&src[0])..Address::from_ptr(&src[0]) + src.len(); - for (i, v) in src_slice.iter_edges().enumerate() { + for (i, v) in src_slice.iter_slots().enumerate() { assert_eq!(i, unsafe { v.load::() }) } } From 1056a37bad36bcfbe054e146da199be3d98206dd Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Sat, 11 May 2024 11:15:22 +0800 Subject: [PATCH 02/25] Rename type params --- src/util/edge_logger.rs | 10 +++++----- src/util/sanity/sanity_checker.rs | 10 +++++----- src/vm/scanning.rs | 12 ++++++------ 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/util/edge_logger.rs b/src/util/edge_logger.rs index 94196a49e1..57ec41007e 100644 --- a/src/util/edge_logger.rs +++ b/src/util/edge_logger.rs @@ -10,14 +10,14 @@ use crate::vm::VMBinding; use std::collections::HashSet; use std::sync::RwLock; -pub struct SlotLogger { +pub struct SlotLogger { // A private hash-set to keep track of edges. - edge_log: RwLock>, + edge_log: RwLock>, } -unsafe impl Sync for SlotLogger {} +unsafe impl Sync for SlotLogger {} -impl SlotLogger { +impl SlotLogger { pub fn new() -> Self { Self { edge_log: Default::default(), @@ -31,7 +31,7 @@ impl SlotLogger { /// /// * `edge` - The edge to log. /// - pub fn log_edge(&self, edge: ES) { + pub fn log_edge(&self, edge: SL) { trace!("log_edge({:?})", edge); let mut edge_log = self.edge_log.write().unwrap(); assert!( diff --git a/src/util/sanity/sanity_checker.rs b/src/util/sanity/sanity_checker.rs index 06446d0ad0..2426b19176 100644 --- a/src/util/sanity/sanity_checker.rs +++ b/src/util/sanity/sanity_checker.rs @@ -9,22 +9,22 @@ use std::collections::HashSet; use std::ops::{Deref, DerefMut}; #[allow(dead_code)] -pub struct SanityChecker { +pub struct SanityChecker { /// Visited objects refs: HashSet, /// Cached root edges for sanity root scanning - root_edges: Vec>, + root_edges: Vec>, /// Cached root nodes for sanity root scanning root_nodes: Vec>, } -impl Default for SanityChecker { +impl Default for SanityChecker { fn default() -> Self { Self::new() } } -impl SanityChecker { +impl SanityChecker { pub fn new() -> Self { Self { refs: HashSet::new(), @@ -34,7 +34,7 @@ impl SanityChecker { } /// Cache a list of root edges to the sanity checker. - pub fn add_root_edges(&mut self, roots: Vec) { + pub fn add_root_edges(&mut self, roots: Vec) { self.root_edges.push(roots) } diff --git a/src/vm/scanning.rs b/src/vm/scanning.rs index 2d1ee9d32a..0a022bd0d5 100644 --- a/src/vm/scanning.rs +++ b/src/vm/scanning.rs @@ -6,14 +6,14 @@ use crate::vm::slot::Slot; use crate::vm::VMBinding; /// Callback trait of scanning functions that report edges. -pub trait EdgeVisitor { +pub trait EdgeVisitor { /// Call this function for each edge. - fn visit_edge(&mut self, edge: ES); + fn visit_edge(&mut self, edge: SL); } /// This lets us use closures as EdgeVisitor. -impl EdgeVisitor for F { - fn visit_edge(&mut self, edge: ES) { +impl EdgeVisitor for F { + fn visit_edge(&mut self, edge: SL) { #[cfg(debug_assertions)] trace!( "(FunctionClosure) Visit edge {:?} (pointing to {:?})", @@ -98,14 +98,14 @@ pub trait ObjectTracerContext: Clone + Send + 'static { /// it needs `Send` to be sent between threads. `'static` means it must not have /// references to variables with limited lifetime (such as local variables), because /// it needs to be moved between threads. -pub trait RootsWorkFactory: Clone + Send + 'static { +pub trait RootsWorkFactory: Clone + Send + 'static { /// Create work packets to handle root edges. /// /// The work packet may update the edges. /// /// Arguments: /// * `edges`: A vector of edges. - fn create_process_edge_roots_work(&mut self, edges: Vec); + fn create_process_edge_roots_work(&mut self, edges: Vec); /// Create work packets to handle non-transitively pinning roots. /// From 06be9a4fe872afecd28dc285b07d5e513ab5475e Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Sat, 11 May 2024 11:44:42 +0800 Subject: [PATCH 03/25] WIP: Fix comments in slot.rs --- src/vm/slot.rs | 87 +++++++++++++++++++++++++------------------------- 1 file changed, 44 insertions(+), 43 deletions(-) diff --git a/src/vm/slot.rs b/src/vm/slot.rs index 3188c44fb5..3f02a564f0 100644 --- a/src/vm/slot.rs +++ b/src/vm/slot.rs @@ -7,18 +7,20 @@ use atomic::Atomic; use crate::util::constants::{BYTES_IN_ADDRESS, LOG_BYTES_IN_ADDRESS}; use crate::util::{Address, ObjectReference}; -/// An `Edge` represents a slot in an object (a.k.a. a field), on the stack (i.e. a local variable) +/// A `Slot` represents a slot in an object (a.k.a. a field), on the stack (i.e. a local variable) /// or any other places (such as global variables). A slot may hold an object reference. We can -/// load the object reference from it, and we can store an ObjectReference into it. For some VMs, -/// a slot may sometimes not hold an object reference. For example, it can hold a special `NULL` -/// pointer which does not point to any object, or it can hold a tagged non-reference value, such -/// as small integers and special values such as `true`, `false`, `null` (a.k.a. "none", "nil", -/// etc. for other VMs), `undefined`, etc. +/// load the object reference from it, and we can update the object reference in it after the GC +/// moves the object. +/// +/// For some VMs, a slot may sometimes not hold an object reference. For example, it can hold a +/// special `NULL` pointer which does not point to any object, or it can hold a tagged +/// non-reference value, such as small integers and special values such as `true`, `false`, `null` +/// (a.k.a. "none", "nil", etc. for other VMs), `undefined`, etc. /// /// This intends to abstract out the differences of reference field representation among different /// VMs. If the VM represent a reference field as a word that holds the pointer to the object, it -/// can use the default `SimpleEdge` we provide. In some cases, the VM need to implement its own -/// `Edge` instances. +/// can use the default `SimpleSlot` we provide. In some cases, the VM need to implement its own +/// `Slot` instances. /// /// For example: /// - The VM uses compressed pointer (Compressed OOP in OpenJDK's terminology), where the heap @@ -28,22 +30,21 @@ use crate::util::{Address, ObjectReference}; /// - A field holds a pointer to the middle of an object (an object field, or an array element, /// or some arbitrary offset) for some reasons. /// -/// When loading, `Edge::load` shall decode its internal representation to a "regular" -/// `ObjectReference`. The implementation -/// can do this with any appropriate operations, usually shifting and masking bits or subtracting -/// offset from the address. By doing this conversion, MMTk can implement GC algorithms in a -/// VM-neutral way, knowing only `ObjectReference`. +/// When loading, `Slot::load` shall decode its internal representation to a "regular" +/// `ObjectReference`. The implementation can do this with any appropriate operations, usually +/// shifting and masking bits or subtracting offset from the address. By doing this conversion, +/// MMTk can implement GC algorithms in a VM-neutral way, knowing only `ObjectReference`. /// -/// When GC moves object, `Edge::store` shall convert the updated `ObjectReference` back to the -/// edge-specific representation. Compressed pointers remain compressed; tagged pointers preserve +/// When GC moves object, `Slot::store` shall convert the updated `ObjectReference` back to the +/// slot-specific representation. Compressed pointers remain compressed; tagged pointers preserve /// their tag bits; and offsetted pointers keep their offsets. /// /// The methods of this trait are called on hot paths. Please ensure they have high performance. /// Use inlining when appropriate. /// -/// Note: this trait only concerns the representation (i.e. the shape) of the edge, not its +/// Note: this trait only concerns the representation (i.e. the shape) of the slot, not its /// semantics, such as whether it holds strong or weak references. If a VM holds a weak reference -/// in a word as a pointer, it can also use `SimpleEdge` for weak reference fields. +/// in a word as a pointer, it can also use `SimpleSlot` for weak reference fields. pub trait Slot: Copy + Send + Debug + PartialEq + Eq + Hash { /// Load object reference from the slot. /// @@ -72,21 +73,21 @@ pub trait Slot: Copy + Send + Debug + PartialEq + Eq + Hash { /// See: fn store(&self, object: ObjectReference); - /// Prefetch the edge so that a subsequent `load` will be faster. + /// Prefetch the slot so that a subsequent `load` will be faster. fn prefetch_load(&self) { // no-op by default } - /// Prefetch the edge so that a subsequent `store` will be faster. + /// Prefetch the slot so that a subsequent `store` will be faster. fn prefetch_store(&self) { // no-op by default } } -/// A simple edge implementation that represents a word-sized slot which holds the raw address of +/// A simple slot implementation that represents a word-sized slot which holds the raw address of /// an `ObjectReference`, or 0 if it is holding a null reference. /// -/// It is the default edge type, and should be suitable for most VMs. +/// It is the default slot type, and should be suitable for most VMs. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] #[repr(transparent)] pub struct SimpleSlot { @@ -94,7 +95,7 @@ pub struct SimpleSlot { } impl SimpleSlot { - /// Create a simple edge from an address. + /// Create a simple slot from an address. /// /// Arguments: /// * `address`: The address in memory where an `ObjectReference` is stored. @@ -104,7 +105,7 @@ impl SimpleSlot { } } - /// Get the address of the edge. + /// Get the address of the slot. /// /// Return the address at which the `ObjectReference` is stored. pub fn as_address(&self) -> Address { @@ -125,16 +126,16 @@ impl Slot for SimpleSlot { } } -/// For backword compatibility, we let `Address` implement `Edge` so that existing bindings that -/// use `Address` to represent an edge can continue to work. +/// For backword compatibility, we let `Address` implement `Slot` with the same semantics as +/// [`SimpleSlot`] so that existing bindings that use `Address` as `Slot` can continue to work. /// -/// However, we should use `SimpleEdge` directly instead of using `Address`. The purpose of the +/// However, we should use `SimpleSlot` directly instead of using `Address`. The purpose of the /// `Address` type is to represent an address in memory. It is not directly related to fields /// that hold references to other objects. Calling `load()` and `store()` on an `Address` does /// not indicate how many bytes to load or store, or how to interpret those bytes. On the other -/// hand, `SimpleEdge` is all about how to access a field that holds a reference represented +/// hand, `SimpleSlot` is all about how to access a field that holds a reference represented /// simply as an `ObjectReference`. The intention and the semantics are clearer with -/// `SimpleEdge`. +/// `SimpleSlot`. impl Slot for Address { fn load(&self) -> Option { let addr = unsafe { Address::load(*self) }; @@ -154,13 +155,13 @@ fn a_simple_slot_should_have_the_same_size_as_a_pointer() { ); } -/// A abstract memory slice represents a piece of **heap** memory. +/// A abstract memory slice represents a piece of **heap** memory which may contains many slots. pub trait MemorySlice: Send + Debug + PartialEq + Eq + Clone + Hash { - /// The associate type to define how to access edges from a memory slice. + /// The associate type to define how to access slots from a memory slice. type SlotType: Slot; - /// The associate type to define how to iterate edges in a memory slice. + /// The associate type to define how to iterate slots in a memory slice. type SlotIterator: Iterator; - /// Iterate object edges within the slice. If there are non-reference values in the slice, the iterator should skip them. + /// Iterate object slots within the slice. If there are non-reference values in the slice, the iterator should skip them. fn iter_slots(&self) -> Self::SlotIterator; /// The object which this slice belongs to. If we know the object for the slice, we will check the object state (e.g. mature or not), rather than the slice address. /// Normally checking the object and checking the slice does not make a difference, as the slice is part of the object (in terms of memory range). However, @@ -175,7 +176,7 @@ pub trait MemorySlice: Send + Debug + PartialEq + Eq + Clone + Hash { fn copy(src: &Self, tgt: &Self); } -/// Iterate edges within `Range
`. +/// Iterate slots within `Range
`. pub struct AddressRangeIterator { cursor: Address, limit: Address, @@ -188,9 +189,9 @@ impl Iterator for AddressRangeIterator { if self.cursor >= self.limit { None } else { - let edge = self.cursor; + let slot = self.cursor; self.cursor += BYTES_IN_ADDRESS; - Some(edge) + Some(slot) } } } @@ -238,22 +239,22 @@ impl MemorySlice for Range
{ /// Memory slice type with empty implementations. /// For VMs that do not use the memory slice type. #[derive(Debug, PartialEq, Eq, Clone, Hash)] -pub struct UnimplementedMemorySlice(PhantomData); +pub struct UnimplementedMemorySlice(PhantomData); -/// Edge iterator for `UnimplementedMemorySlice`. -pub struct UnimplementedMemorySliceEdgeIterator(PhantomData); +/// Slot iterator for `UnimplementedMemorySlice`. +pub struct UnimplementedMemorySliceSlotIterator(PhantomData); -impl Iterator for UnimplementedMemorySliceEdgeIterator { - type Item = E; +impl Iterator for UnimplementedMemorySliceSlotIterator { + type Item = SL; fn next(&mut self) -> Option { unimplemented!() } } -impl MemorySlice for UnimplementedMemorySlice { - type SlotType = E; - type SlotIterator = UnimplementedMemorySliceEdgeIterator; +impl MemorySlice for UnimplementedMemorySlice { + type SlotType = SL; + type SlotIterator = UnimplementedMemorySliceSlotIterator; fn iter_slots(&self) -> Self::SlotIterator { unimplemented!() From 11c8d2fe262ccab44d74677c995a424f55018960 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 13 May 2024 17:19:30 +0800 Subject: [PATCH 04/25] edge_logger -> slot_logger --- src/mmtk.rs | 6 +++--- src/plan/markcompact/gc_work.rs | 2 +- src/scheduler/gc_work.rs | 4 ++-- src/scheduler/scheduler.rs | 4 ++-- src/util/mod.rs | 2 +- src/util/sanity/sanity_checker.rs | 2 +- src/util/{edge_logger.rs => slot_logger.rs} | 0 7 files changed, 10 insertions(+), 10 deletions(-) rename src/util/{edge_logger.rs => slot_logger.rs} (100%) diff --git a/src/mmtk.rs b/src/mmtk.rs index f767724b02..ff31f28969 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -9,7 +9,7 @@ use crate::scheduler::GCWorkScheduler; #[cfg(feature = "analysis")] use crate::util::analysis::AnalysisManager; #[cfg(feature = "extreme_assertions")] -use crate::util::edge_logger::SlotLogger; +use crate::util::slot_logger::SlotLogger; use crate::util::finalizable_processor::FinalizableProcessor; use crate::util::heap::gc_trigger::GCTrigger; use crate::util::heap::layout::vm_layout::VMLayout; @@ -115,7 +115,7 @@ pub struct MMTK { #[cfg(feature = "sanity")] pub(crate) sanity_checker: Mutex>, #[cfg(feature = "extreme_assertions")] - pub(crate) edge_logger: SlotLogger, + pub(crate) slot_logger: SlotLogger, pub(crate) gc_trigger: Arc>, pub(crate) gc_requester: Arc>, pub(crate) stats: Arc, @@ -222,7 +222,7 @@ impl MMTK { inside_sanity: AtomicBool::new(false), inside_harness: AtomicBool::new(false), #[cfg(feature = "extreme_assertions")] - edge_logger: SlotLogger::new(), + slot_logger: SlotLogger::new(), #[cfg(feature = "analysis")] analysis_manager: Arc::new(AnalysisManager::new(stats.clone())), gc_trigger, diff --git a/src/plan/markcompact/gc_work.rs b/src/plan/markcompact/gc_work.rs index a6a6371548..c89be04793 100644 --- a/src/plan/markcompact/gc_work.rs +++ b/src/plan/markcompact/gc_work.rs @@ -48,7 +48,7 @@ impl GCWork for UpdateReferences { plan_mut.common.release(worker.tls, true); plan_mut.common.prepare(worker.tls, true); #[cfg(feature = "extreme_assertions")] - mmtk.edge_logger.reset(); + mmtk.slot_logger.reset(); // We do two passes of transitive closures. We clear the live bytes from the first pass. #[cfg(feature = "count_live_bytes_in_gc")] diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 4d1afe29f6..dc8131ae5d 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -463,10 +463,10 @@ impl ProcessEdgesBase { bucket: WorkBucketStage, ) -> Self { #[cfg(feature = "extreme_assertions")] - if crate::util::edge_logger::should_check_duplicate_edges(mmtk.get_plan()) { + if crate::util::slot_logger::should_check_duplicate_edges(mmtk.get_plan()) { for edge in &edges { // log edge, panic if already logged - mmtk.edge_logger.log_edge(*edge); + mmtk.slot_logger.log_edge(*edge); } } Self { diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index 1c91609895..f27bc64b56 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -570,9 +570,9 @@ impl GCWorkScheduler { plan_mut.end_of_gc(worker.tls); #[cfg(feature = "extreme_assertions")] - if crate::util::edge_logger::should_check_duplicate_edges(mmtk.get_plan()) { + if crate::util::slot_logger::should_check_duplicate_edges(mmtk.get_plan()) { // reset the logging info at the end of each GC - mmtk.edge_logger.reset(); + mmtk.slot_logger.reset(); } // Reset the triggering information. diff --git a/src/util/mod.rs b/src/util/mod.rs index 393a24d539..38ad8ddaee 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -37,7 +37,7 @@ pub mod test_util; pub(crate) mod analysis; /// Logging edges to check duplicated edges in GC. #[cfg(feature = "extreme_assertions")] -pub(crate) mod edge_logger; +pub(crate) mod slot_logger; /// Non-generic refs to generic types of ``. pub(crate) mod erase_vm; /// Finalization implementation. diff --git a/src/util/sanity/sanity_checker.rs b/src/util/sanity/sanity_checker.rs index 2426b19176..3009ac65dc 100644 --- a/src/util/sanity/sanity_checker.rs +++ b/src/util/sanity/sanity_checker.rs @@ -68,7 +68,7 @@ impl GCWork for ScheduleSanityGC

{ // We are going to do sanity GC which will traverse the object graph again. Reset edge logger to clear recorded edges. #[cfg(feature = "extreme_assertions")] - mmtk.edge_logger.reset(); + mmtk.slot_logger.reset(); mmtk.sanity_begin(); // Stop & scan mutators (mutator scanning can happen before STW) diff --git a/src/util/edge_logger.rs b/src/util/slot_logger.rs similarity index 100% rename from src/util/edge_logger.rs rename to src/util/slot_logger.rs From 0548fef29f7c80b62dd849787c9aaa9473bee02d Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 13 May 2024 17:23:09 +0800 Subject: [PATCH 05/25] Comments in memory_manager.rs --- src/memory_manager.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/memory_manager.rs b/src/memory_manager.rs index 888a4dcdf2..6f41a3951b 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -315,7 +315,7 @@ pub fn object_reference_write_post( /// The *subsuming* memory region copy barrier by MMTk. /// This is called when the VM tries to copy a piece of heap memory to another. /// The data within the slice does not necessarily to be all valid pointers, -/// but the VM binding will be able to filter out non-reference values on edge iteration. +/// but the VM binding will be able to filter out non-reference values on slot iteration. /// /// For VMs that performs a heap memory copy operation, for example OpenJDK's array copy operation, the binding needs to /// call `memory_region_copy*` APIs. Same as `object_reference_write*`, the binding can choose either the subsuming barrier, @@ -340,7 +340,7 @@ pub fn memory_region_copy( /// *before* it performs memory copy. /// This is called when the VM tries to copy a piece of heap memory to another. /// The data within the slice does not necessarily to be all valid pointers, -/// but the VM binding will be able to filter out non-reference values on edge iteration. +/// but the VM binding will be able to filter out non-reference values on slot iteration. /// /// For VMs that performs a heap memory copy operation, for example OpenJDK's array copy operation, the binding needs to /// call `memory_region_copy*` APIs. Same as `object_reference_write*`, the binding can choose either the subsuming barrier, @@ -365,7 +365,7 @@ pub fn memory_region_copy_pre( /// *after* it performs memory copy. /// This is called when the VM tries to copy a piece of heap memory to another. /// The data within the slice does not necessarily to be all valid pointers, -/// but the VM binding will be able to filter out non-reference values on edge iteration. +/// but the VM binding will be able to filter out non-reference values on slot iteration. /// /// For VMs that performs a heap memory copy operation, for example OpenJDK's array copy operation, the binding needs to /// call `memory_region_copy*` APIs. Same as `object_reference_write*`, the binding can choose either the subsuming barrier, From 908a711df053c3caf2b0c61bc70f16411e3fe569 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 13 May 2024 18:04:18 +0800 Subject: [PATCH 06/25] Update types and methods in scanning.rs --- src/plan/tracing.rs | 6 +-- src/scheduler/gc_work.rs | 4 +- src/vm/mod.rs | 2 +- src/vm/scanning.rs | 77 +++++++++++++++++--------------- tests/test_roots_work_factory.rs | 4 +- 5 files changed, 48 insertions(+), 45 deletions(-) diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index 47fa003bad..fc75c4ce84 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -4,7 +4,7 @@ use crate::scheduler::gc_work::{EdgeOf, ProcessEdgesWork}; use crate::scheduler::{GCWorker, WorkBucketStage}; use crate::util::ObjectReference; -use crate::vm::EdgeVisitor; +use crate::vm::SlotVisitor; /// This trait represents an object queue to enqueue objects during tracing. pub trait ObjectQueue { @@ -111,8 +111,8 @@ impl<'a, E: ProcessEdgesWork> ObjectsClosure<'a, E> { } } -impl<'a, E: ProcessEdgesWork> EdgeVisitor> for ObjectsClosure<'a, E> { - fn visit_edge(&mut self, slot: EdgeOf) { +impl<'a, E: ProcessEdgesWork> SlotVisitor> for ObjectsClosure<'a, E> { + fn visit_slot(&mut self, slot: EdgeOf) { #[cfg(debug_assertions)] { use crate::vm::slot::Slot; diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index dc8131ae5d..3bc5dade7e 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -704,7 +704,7 @@ impl, PPE: ProcessEdgesWork, PPE: ProcessEdgesWork> RootsWorkFactory for ProcessEdgesWorkRootsWorkFactory { - fn create_process_edge_roots_work(&mut self, edges: Vec) { + fn create_process_slot_roots_work(&mut self, edges: Vec) { crate::memory_manager::add_work_packet( self.mmtk, WorkBucketStage::Closure, @@ -790,7 +790,7 @@ pub trait ScanObjectsWork: GCWork + Sized { .shared .increase_live_bytes(VM::VMObjectModel::get_current_size(object)); - if ::VMScanning::support_edge_enqueuing(tls, object) { + if ::VMScanning::support_slot_enqueuing(tls, object) { trace!("Scan object (edge) {}", object); // If an object supports edge-enqueuing, we enqueue its edges. ::VMScanning::scan_object(tls, object, &mut closure); diff --git a/src/vm/mod.rs b/src/vm/mod.rs index 18cff83ae1..bea40a3a7d 100644 --- a/src/vm/mod.rs +++ b/src/vm/mod.rs @@ -29,7 +29,7 @@ pub use self::object_model::specs::*; pub use self::object_model::ObjectModel; pub use self::reference_glue::Finalizable; pub use self::reference_glue::ReferenceGlue; -pub use self::scanning::EdgeVisitor; +pub use self::scanning::SlotVisitor; pub use self::scanning::ObjectTracer; pub use self::scanning::ObjectTracerContext; pub use self::scanning::RootsWorkFactory; diff --git a/src/vm/scanning.rs b/src/vm/scanning.rs index 0a022bd0d5..9b1a918898 100644 --- a/src/vm/scanning.rs +++ b/src/vm/scanning.rs @@ -5,26 +5,26 @@ use crate::util::VMWorkerThread; use crate::vm::slot::Slot; use crate::vm::VMBinding; -/// Callback trait of scanning functions that report edges. -pub trait EdgeVisitor { - /// Call this function for each edge. - fn visit_edge(&mut self, edge: SL); +/// Callback trait of scanning functions that report slots. +pub trait SlotVisitor { + /// Call this function for each slot. + fn visit_slot(&mut self, slot: SL); } -/// This lets us use closures as EdgeVisitor. -impl EdgeVisitor for F { - fn visit_edge(&mut self, edge: SL) { +/// This lets us use closures as SlotVisitor. +impl SlotVisitor for F { + fn visit_slot(&mut self, slot: SL) { #[cfg(debug_assertions)] trace!( - "(FunctionClosure) Visit edge {:?} (pointing to {:?})", - edge, - edge.load() + "(FunctionClosure) Visit slot {:?} (pointing to {:?})", + slot, + slot.load() ); - self(edge) + self(slot) } } -/// Callback trait of scanning functions that directly trace through edges. +/// Callback trait of scanning functions that directly trace through object graph edges. pub trait ObjectTracer { /// Call this function to trace through an object graph edge which points to `object`. /// @@ -99,13 +99,13 @@ pub trait ObjectTracerContext: Clone + Send + 'static { /// references to variables with limited lifetime (such as local variables), because /// it needs to be moved between threads. pub trait RootsWorkFactory: Clone + Send + 'static { - /// Create work packets to handle root edges. + /// Create work packets to handle root slots. /// - /// The work packet may update the edges. + /// The work packet may update the slots. /// /// Arguments: - /// * `edges`: A vector of edges. - fn create_process_edge_roots_work(&mut self, edges: Vec); + /// * `slots`: A vector of slots. + fn create_process_slot_roots_work(&mut self, slots: Vec); /// Create work packets to handle non-transitively pinning roots. /// @@ -114,10 +114,10 @@ pub trait RootsWorkFactory: Clone + Send + 'static { /// But it will not prevent the children of those objects from moving. /// /// This method is useful for conservative stack scanning, or VMs that cannot update some - /// of the root edges. + /// of the root slots. /// /// Arguments: - /// * `nodes`: A vector of references to objects pointed by root edges. + /// * `nodes`: A vector of references to objects pointed by edges from roots. fn create_process_pinning_roots_work(&mut self, nodes: Vec); /// Create work packets to handle transitively pinning (TP) roots. @@ -126,18 +126,18 @@ pub trait RootsWorkFactory: Clone + Send + 'static { /// Unlike `create_process_pinning_roots_work`, no objects in the transitive closure of `nodes` will be moved, either. /// /// Arguments: - /// * `nodes`: A vector of references to objects pointed by root edges. + /// * `nodes`: A vector of references to objects pointed by edges from roots. fn create_process_tpinning_roots_work(&mut self, nodes: Vec); } /// VM-specific methods for scanning roots/objects. pub trait Scanning { - /// Return true if the given object supports edge enqueuing. + /// Return true if the given object supports slot enqueuing. /// /// - If this returns true, MMTk core will call `scan_object` on the object. /// - Otherwise, MMTk core will call `scan_object_and_trace_edges` on the object. /// - /// For maximum performance, the VM should support edge-enqueuing for as many objects as + /// For maximum performance, the VM should support slot-enqueuing for as many objects as /// practical. Also note that this method is called for every object to be scanned, so it /// must be fast. The VM binding should avoid expensive checks and keep it as efficient as /// possible. @@ -145,16 +145,19 @@ pub trait Scanning { /// Arguments: /// * `tls`: The VM-specific thread-local storage for the current worker. /// * `object`: The object to be scanned. - fn support_edge_enqueuing(_tls: VMWorkerThread, _object: ObjectReference) -> bool { + fn support_slot_enqueuing(_tls: VMWorkerThread, _object: ObjectReference) -> bool { true } /// Delegated scanning of a object, visiting each reference field encountered. /// - /// The VM shall call `edge_visitor.visit_edge` on each reference field. + /// The VM shall call `slot_visitor.visit_slot` on each reference field. This effectively + /// visits all outgoing edges from the current object in the form of slots. /// - /// The VM may skip a reference field if it holds a null reference. If the VM supports tagged - /// references, it must skip tagged reference fields which are not holding references. + /// The VM may skip a reference field if it is not holding an object reference (e.g. if the + /// field is holding a null reference, or a tagged non-reference value such as small integer). + /// Even if not skipped, [`Slot::load`] will still return `None` if the slot is not holding an + /// object reference. /// /// The `memory_manager::is_mmtk_object` function can be used in this function if /// - the "is_mmtk_object" feature is enabled, and @@ -163,23 +166,23 @@ pub trait Scanning { /// Arguments: /// * `tls`: The VM-specific thread-local storage for the current worker. /// * `object`: The object to be scanned. - /// * `edge_visitor`: Called back for each edge. - fn scan_object>( + /// * `slot_visitor`: Called back for each field. + fn scan_object>( tls: VMWorkerThread, object: ObjectReference, - edge_visitor: &mut EV, + slot_visitor: &mut SV, ); - /// Delegated scanning of a object, visiting each reference field encountered, and trace the + /// Delegated scanning of a object, visiting each reference field encountered, and tracing the /// objects pointed by each field. /// - /// The VM shall call `object_tracer.trace_object` on the value held in each reference field, - /// and assign the returned value back to the field. If the VM uses tagged references, the - /// value passed to `object_tracer.trace_object` shall be the `ObjectReference` to the object - /// without any tag bits. + /// The VM shall call `object_tracer.trace_object` with the argument being the object reference + /// held in each reference field. If the GC moves the object, the VM shall update the field so + /// that it refers to the object using the object reference returned from `trace_object`. This + /// effectively traces through all outgoing edges from the current object directly. /// - /// The VM may skip a reference field if it holds a null reference. If the VM supports tagged - /// references, it must skip tagged reference fields which are not holding references. + /// The VM must skip reference fields that are not holding object references (e.g. if the + /// field is holding a null reference, or a tagged non-reference value such as small integer). /// /// The `memory_manager::is_mmtk_object` function can be used in this function if /// - the "is_mmtk_object" feature is enabled, and @@ -188,13 +191,13 @@ pub trait Scanning { /// Arguments: /// * `tls`: The VM-specific thread-local storage for the current worker. /// * `object`: The object to be scanned. - /// * `object_tracer`: Called back for the content of each edge. + /// * `object_tracer`: Called back for the object reference held in each field. fn scan_object_and_trace_edges( _tls: VMWorkerThread, _object: ObjectReference, _object_tracer: &mut OT, ) { - unreachable!("scan_object_and_trace_edges() will not be called when support_edge_enqueue() is always true.") + unreachable!("scan_object_and_trace_edges() will not be called when support_slot_enqueuing() is always true.") } /// MMTk calls this method at the first time during a collection that thread's stacks diff --git a/tests/test_roots_work_factory.rs b/tests/test_roots_work_factory.rs index 4a8b64b511..74db5306b6 100644 --- a/tests/test_roots_work_factory.rs +++ b/tests/test_roots_work_factory.rs @@ -21,7 +21,7 @@ impl MockScanning { } fn mock_scan_roots(&self, mut factory: impl mmtk::vm::RootsWorkFactory

) { - factory.create_process_edge_roots_work(self.roots.clone()); + factory.create_process_slot_roots_work(self.roots.clone()); } } @@ -42,7 +42,7 @@ struct MockFactory { } impl RootsWorkFactory
for MockFactory { - fn create_process_edge_roots_work(&mut self, edges: Vec
) { + fn create_process_slot_roots_work(&mut self, edges: Vec
) { assert_eq!(edges, EDGES); match self.round { 1 => { From b271602a786218addcebceea88f7db05897fac8e Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 13 May 2024 18:06:01 +0800 Subject: [PATCH 07/25] EdgeOf -> SlotOf --- src/plan/generational/gc_work.rs | 4 ++-- src/plan/tracing.rs | 8 ++++---- src/scheduler/gc_work.rs | 16 ++++++++-------- src/util/sanity/sanity_checker.rs | 2 +- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/plan/generational/gc_work.rs b/src/plan/generational/gc_work.rs index 3a2ccf4e8d..4aef705e06 100644 --- a/src/plan/generational/gc_work.rs +++ b/src/plan/generational/gc_work.rs @@ -32,7 +32,7 @@ impl + PlanTraceObject, const KIND type ScanObjectsWorkType = PlanScanObjects; fn new( - edges: Vec>, + edges: Vec>, roots: bool, mmtk: &'static MMTK, bucket: WorkBucketStage, @@ -52,7 +52,7 @@ impl + PlanTraceObject, const KIND ) } - fn process_edge(&mut self, slot: EdgeOf) { + fn process_edge(&mut self, slot: SlotOf) { let Some(object) = slot.load() else { // Skip slots that are not holding an object reference. return; diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index fc75c4ce84..5bd94d2527 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -1,7 +1,7 @@ //! This module contains code useful for tracing, //! i.e. visiting the reachable objects by traversing all or part of an object graph. -use crate::scheduler::gc_work::{EdgeOf, ProcessEdgesWork}; +use crate::scheduler::gc_work::{SlotOf, ProcessEdgesWork}; use crate::scheduler::{GCWorker, WorkBucketStage}; use crate::util::ObjectReference; use crate::vm::SlotVisitor; @@ -81,7 +81,7 @@ impl ObjectQueue for VectorQueue { /// It maintains a buffer for the edges, and flushes edges to a new work packet /// if the buffer is full or if the type gets dropped. pub struct ObjectsClosure<'a, E: ProcessEdgesWork> { - buffer: VectorQueue>, + buffer: VectorQueue>, pub(crate) worker: &'a mut GCWorker, bucket: WorkBucketStage, } @@ -111,8 +111,8 @@ impl<'a, E: ProcessEdgesWork> ObjectsClosure<'a, E> { } } -impl<'a, E: ProcessEdgesWork> SlotVisitor> for ObjectsClosure<'a, E> { - fn visit_slot(&mut self, slot: EdgeOf) { +impl<'a, E: ProcessEdgesWork> SlotVisitor> for ObjectsClosure<'a, E> { + fn visit_slot(&mut self, slot: SlotOf) { #[cfg(debug_assertions)] { use crate::vm::slot::Slot; diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 3bc5dade7e..5d3c562d43 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -504,8 +504,8 @@ impl ProcessEdgesBase { } } -/// A short-hand for `::VMEdge`. -pub type EdgeOf = <::VM as VMBinding>::VMSlot; +/// A short-hand for `::VMSlot`. +pub type SlotOf = <::VM as VMBinding>::VMSlot; /// Scan & update a list of object slots // @@ -545,7 +545,7 @@ pub trait ProcessEdgesWork: /// * `mmtk`: a reference to the MMTK instance. /// * `bucket`: which work bucket this packet belongs to. Further work generated from this packet will also be put to the same bucket. fn new( - edges: Vec>, + edges: Vec>, roots: bool, mmtk: &'static MMTK, bucket: WorkBucketStage, @@ -602,7 +602,7 @@ pub trait ProcessEdgesWork: /// Process an edge, including loading the object reference from the memory slot, /// trace the object and store back the new object reference if necessary. - fn process_edge(&mut self, slot: EdgeOf) { + fn process_edge(&mut self, slot: SlotOf) { let Some(object) = slot.load() else { // Skip slots that are not holding an object reference. return; @@ -653,7 +653,7 @@ impl ProcessEdgesWork for SFTProcessEdges { type ScanObjectsWorkType = ScanObjects; fn new( - edges: Vec>, + edges: Vec>, roots: bool, mmtk: &'static MMTK, bucket: WorkBucketStage, @@ -899,7 +899,7 @@ impl + Plan, const KIND: TraceKin type ScanObjectsWorkType = PlanScanObjects; fn new( - edges: Vec>, + edges: Vec>, roots: bool, mmtk: &'static MMTK, bucket: WorkBucketStage, @@ -920,7 +920,7 @@ impl + Plan, const KIND: TraceKin .trace_object::(&mut self.base.nodes, object, worker) } - fn process_edge(&mut self, slot: EdgeOf) { + fn process_edge(&mut self, slot: SlotOf) { let Some(object) = slot.load() else { // Skip slots that are not holding an object reference. return; @@ -1122,7 +1122,7 @@ impl ProcessEdgesWork for UnsupportedProcessEdges { type ScanObjectsWorkType = ScanObjects; fn new( - _edges: Vec>, + _edges: Vec>, _roots: bool, _mmtk: &'static MMTK, _bucket: WorkBucketStage, diff --git a/src/util/sanity/sanity_checker.rs b/src/util/sanity/sanity_checker.rs index 3009ac65dc..eb37feb45c 100644 --- a/src/util/sanity/sanity_checker.rs +++ b/src/util/sanity/sanity_checker.rs @@ -177,7 +177,7 @@ impl ProcessEdgesWork for SanityGCProcessEdges { const OVERWRITE_REFERENCE: bool = false; fn new( - edges: Vec>, + edges: Vec>, roots: bool, mmtk: &'static MMTK, bucket: WorkBucketStage, From d1cb70ae3ce8a8d1469e6f74c517c287fb21fc6a Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 13 May 2024 18:07:03 +0800 Subject: [PATCH 08/25] tracing.rs --- src/plan/tracing.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index 5bd94d2527..71504c31fd 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -77,8 +77,8 @@ impl ObjectQueue for VectorQueue { } } -/// A transitive closure visitor to collect the edges from objects. -/// It maintains a buffer for the edges, and flushes edges to a new work packet +/// A transitive closure visitor to collect the slots from objects. +/// It maintains a buffer for the slots, and flushes slots to a new work packet /// if the buffer is full or if the type gets dropped. pub struct ObjectsClosure<'a, E: ProcessEdgesWork> { buffer: VectorQueue>, @@ -117,7 +117,7 @@ impl<'a, E: ProcessEdgesWork> SlotVisitor> for ObjectsClosure<'a, E> { { use crate::vm::slot::Slot; trace!( - "(ObjectsClosure) Visit edge {:?} (pointing to {:?})", + "(ObjectsClosure) Visit slot {:?} (pointing to {:?})", slot, slot.load() ); From 499ac0d8cc5871e53c749a9449e4220c2063b771 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 13 May 2024 19:55:53 +0800 Subject: [PATCH 09/25] scheduler/gc_work.rs --- src/plan/generational/gc_work.rs | 2 +- src/scheduler/gc_work.rs | 115 +++++++++++++++++------------- src/scheduler/scheduler.rs | 2 +- src/util/sanity/sanity_checker.rs | 22 +++--- src/util/slot_logger.rs | 38 +++++----- 5 files changed, 96 insertions(+), 83 deletions(-) diff --git a/src/plan/generational/gc_work.rs b/src/plan/generational/gc_work.rs index 4aef705e06..7c7fd2debd 100644 --- a/src/plan/generational/gc_work.rs +++ b/src/plan/generational/gc_work.rs @@ -52,7 +52,7 @@ impl + PlanTraceObject, const KIND ) } - fn process_edge(&mut self, slot: SlotOf) { + fn process_slot(&mut self, slot: SlotOf) { let Some(object) = slot.load() else { // Skip slots that are not holding an object reference. return; diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 5d3c562d43..11765261bb 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -441,7 +441,7 @@ impl GCWork for ScanVMSpecificRoots { } pub struct ProcessEdgesBase { - pub edges: Vec, + pub slots: Vec, pub nodes: VectorObjectQueue, mmtk: &'static MMTK, // Use raw pointer for fast pointer dereferencing, instead of using `Option<&'static mut GCWorker>`. @@ -457,20 +457,20 @@ impl ProcessEdgesBase { // Requires an MMTk reference. Each plan-specific type that uses ProcessEdgesBase can get a static plan reference // at creation. This avoids overhead for dynamic dispatch or downcasting plan for each object traced. pub fn new( - edges: Vec, + slots: Vec, roots: bool, mmtk: &'static MMTK, bucket: WorkBucketStage, ) -> Self { #[cfg(feature = "extreme_assertions")] - if crate::util::slot_logger::should_check_duplicate_edges(mmtk.get_plan()) { - for edge in &edges { - // log edge, panic if already logged - mmtk.slot_logger.log_edge(*edge); + if crate::util::slot_logger::should_check_duplicate_slots(mmtk.get_plan()) { + for slot in &slots { + // log slot, panic if already logged + mmtk.slot_logger.log_slot(*slot); } } Self { - edges, + slots, nodes: VectorObjectQueue::new(), mmtk, worker: std::ptr::null_mut(), @@ -507,7 +507,19 @@ impl ProcessEdgesBase { /// A short-hand for `::VMSlot`. pub type SlotOf = <::VM as VMBinding>::VMSlot; -/// Scan & update a list of object slots +/// Process object graph edges represented as object slots. Work packets of this trait contain a +/// list of object slots to be processed. They trace the objects pointed by the object reference in +/// each slot, and update the slots if the GC moves the target object when tracing. +/// +/// Instances of this trait can also be abused as a provider of the `trace_object` method, without +/// representing edges as slots. In that case, an object graph edge is represented as the object +/// reference to the target node, while the source node is implicit. The caller of `trace_object` +/// is responsible for updating the slot (field or root variable). This is useful for +/// node-enqueuing tracing ([`Scanning::scan_object_and_trace_edges`]) as well as weak reference +/// processing ([`Scanning::process_weak_refs`]). +/// +/// TODO: We should refactor this trait to decouple it from slots. +/// See: // // Note: be very careful when using this trait. process_node() will push objects // to the buffer, and it is expected that at the end of the operation, flush() @@ -524,7 +536,7 @@ pub trait ProcessEdgesWork: /// The work packet type for scanning objects when using this ProcessEdgesWork. type ScanObjectsWorkType: ScanObjectsWork; - /// The maximum number of edges that should be put to one of this work packets. + /// The maximum number of slots that should be put to one of this work packets. /// The caller who creates a work packet of this trait should be responsible to /// comply with this capacity. /// Higher capacity means the packet will take longer to finish, and may lead to @@ -540,12 +552,12 @@ pub trait ProcessEdgesWork: /// Create a [`ProcessEdgesWork`]. /// /// Arguments: - /// * `edges`: a vector of the edges. + /// * `slots`: a vector of slots. /// * `roots`: are the objects root reachable objects? /// * `mmtk`: a reference to the MMTK instance. /// * `bucket`: which work bucket this packet belongs to. Further work generated from this packet will also be put to the same bucket. fn new( - edges: Vec>, + slots: Vec>, roots: bool, mmtk: &'static MMTK, bucket: WorkBucketStage, @@ -566,7 +578,7 @@ pub trait ProcessEdgesWork: .sanity_checker .lock() .unwrap() - .add_root_edges(self.edges.clone()); + .add_root_slots(self.slots.clone()); } /// Start the a scan work packet. If SCAN_OBJECTS_IMMEDIATELY, the work packet will be executed immediately, in this method. @@ -600,9 +612,9 @@ pub trait ProcessEdgesWork: } } - /// Process an edge, including loading the object reference from the memory slot, + /// Process a slot, including loading the object reference from the memory slot, /// trace the object and store back the new object reference if necessary. - fn process_edge(&mut self, slot: SlotOf) { + fn process_slot(&mut self, slot: SlotOf) { let Some(object) = slot.load() else { // Skip slots that are not holding an object reference. return; @@ -613,11 +625,11 @@ pub trait ProcessEdgesWork: } } - /// Process all the edges in the work packet. - fn process_edges(&mut self) { - probe!(mmtk, process_edges, self.edges.len(), self.is_roots()); - for i in 0..self.edges.len() { - self.process_edge(self.edges[i]) + /// Process all the slots in the work packet. + fn process_slots(&mut self) { + probe!(mmtk, process_slots, self.slots.len(), self.is_roots()); + for i in 0..self.slots.len() { + self.process_slot(self.slots[i]) } } } @@ -625,7 +637,7 @@ pub trait ProcessEdgesWork: impl GCWork for E { fn do_work(&mut self, worker: &mut GCWorker, _mmtk: &'static MMTK) { self.set_worker(worker); - self.process_edges(); + self.process_slots(); if !self.nodes.is_empty() { self.flush(); } @@ -637,12 +649,14 @@ impl GCWork for E { } } -/// A general process edges implementation using SFT. A plan can always implement their own process edges. However, -/// Most plans can use this work packet for tracing amd they do not need to provide a plan-specific trace object work packet. -/// If they choose to use this type, they need to provide a correct implementation for some related methods -/// (such as `Space.set_copy_for_sft_trace()`, `SFT.sft_trace_object()`). -/// Some plans are not using this type, mostly due to more complex tracing. Either it is impossible to use this type, or -/// there is performance overheads for using this general trace type. In such cases, they implement their specific process edges. +/// A general implementation of [`ProcessEdgesWork`] using SFT. A plan can always implement their +/// own [`ProcessEdgesWork`] instances. However, most plans can use this work packet for tracing amd +/// they do not need to provide a plan-specific trace object work packet. If they choose to use this +/// type, they need to provide a correct implementation for some related methods (such as +/// `Space.set_copy_for_sft_trace()`, `SFT.sft_trace_object()`). Some plans are not using this type, +/// mostly due to more complex tracing. Either it is impossible to use this type, or there is +/// performance overheads for using this general trace type. In such cases, they implement their +/// specific [`ProcessEdgesWork`] instances. // TODO: This is not used any more. Should we remove it? pub struct SFTProcessEdges { pub base: ProcessEdgesBase, @@ -653,12 +667,12 @@ impl ProcessEdgesWork for SFTProcessEdges { type ScanObjectsWorkType = ScanObjects; fn new( - edges: Vec>, + slots: Vec>, roots: bool, mmtk: &'static MMTK, bucket: WorkBucketStage, ) -> Self { - let base = ProcessEdgesBase::new(edges, roots, mmtk, bucket); + let base = ProcessEdgesBase::new(slots, roots, mmtk, bucket); Self { base } } @@ -704,11 +718,11 @@ impl, PPE: ProcessEdgesWork, PPE: ProcessEdgesWork> RootsWorkFactory for ProcessEdgesWorkRootsWorkFactory { - fn create_process_slot_roots_work(&mut self, edges: Vec) { + fn create_process_slot_roots_work(&mut self, slots: Vec) { crate::memory_manager::add_work_packet( self.mmtk, WorkBucketStage::Closure, - DPE::new(edges, true, self.mmtk, WorkBucketStage::Closure), + DPE::new(slots, true, self.mmtk, WorkBucketStage::Closure), ); } @@ -757,7 +771,8 @@ impl DerefMut for SFTProcessEdges { /// Trait for a work packet that scans objects pub trait ScanObjectsWork: GCWork + Sized { - /// The associated ProcessEdgesWork for processing the edges of the objects in this packet. + /// The associated ProcessEdgesWork for processing the outgoing edges of the objects in this + /// packet. type E: ProcessEdgesWork; /// Called after each object is scanned. @@ -775,10 +790,9 @@ pub trait ScanObjectsWork: GCWork + Sized { ) { let tls = worker.tls; - // Scan the nodes in the buffer. let objects_to_scan = buffer; - // Then scan those objects for edges. + // Scan the objects in the list that supports slot-enququing. let mut scan_later = vec![]; { let mut closure = ObjectsClosure::::new(worker, self.get_bucket()); @@ -791,12 +805,12 @@ pub trait ScanObjectsWork: GCWork + Sized { .increase_live_bytes(VM::VMObjectModel::get_current_size(object)); if ::VMScanning::support_slot_enqueuing(tls, object) { - trace!("Scan object (edge) {}", object); - // If an object supports edge-enqueuing, we enqueue its edges. + trace!("Scan object (slot) {}", object); + // If an object supports slot-enqueuing, we enqueue its slots. ::VMScanning::scan_object(tls, object, &mut closure); self.post_scan_object(object); } else { - // If an object does not support edge-enqueuing, we have to use + // If an object does not support slot-enqueuing, we have to use // `Scanning::scan_object_and_trace_edges` and offload the job of updating the // reference field to the VM. // @@ -807,7 +821,7 @@ pub trait ScanObjectsWork: GCWork + Sized { } } - // If any object does not support edge-enqueuing, we process them now. + // If any object does not support slot-enqueuing, we process them now. if !scan_later.is_empty() { let object_tracer_context = ProcessEdgesWorkTracerContext:: { stage: self.get_bucket(), @@ -815,7 +829,7 @@ pub trait ScanObjectsWork: GCWork + Sized { }; object_tracer_context.with_tracer(worker, |object_tracer| { - // Scan objects and trace their edges at the same time. + // Scan objects and trace their outgoing edges at the same time. for object in scan_later.iter().copied() { trace!("Scan object (node) {}", object); ::VMScanning::scan_object_and_trace_edges( @@ -830,8 +844,8 @@ pub trait ScanObjectsWork: GCWork + Sized { } } -/// Scan objects and enqueue the edges of the objects. For objects that do not support -/// edge-enqueuing, this work packet also processes the edges. +/// Scan objects and enqueue the slots of the objects. For objects that do not support +/// slot-enqueuing, this work packet also traces their outgoing edges directly. /// /// This work packet does not execute policy-specific post-scanning hooks /// (it won't call `post_scan_object()` in [`policy::gc_work::PolicyTraceObject`]). @@ -899,12 +913,12 @@ impl + Plan, const KIND: TraceKin type ScanObjectsWorkType = PlanScanObjects; fn new( - edges: Vec>, + slots: Vec>, roots: bool, mmtk: &'static MMTK, bucket: WorkBucketStage, ) -> Self { - let base = ProcessEdgesBase::new(edges, roots, mmtk, bucket); + let base = ProcessEdgesBase::new(slots, roots, mmtk, bucket); let plan = base.plan().downcast_ref::

().unwrap(); Self { plan, base } } @@ -920,7 +934,7 @@ impl + Plan, const KIND: TraceKin .trace_object::(&mut self.base.nodes, object, worker) } - fn process_edge(&mut self, slot: SlotOf) { + fn process_slot(&mut self, slot: SlotOf) { let Some(object) = slot.load() else { // Skip slots that are not holding an object reference. return; @@ -1059,15 +1073,14 @@ impl, O2OPE: ProcessEdgesWork ProcessEdgesWork for UnsupportedProcessEdges { type ScanObjectsWorkType = ScanObjects; fn new( - _edges: Vec>, + _slots: Vec>, _roots: bool, _mmtk: &'static MMTK, _bucket: WorkBucketStage, diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index f27bc64b56..967da81b6c 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -570,7 +570,7 @@ impl GCWorkScheduler { plan_mut.end_of_gc(worker.tls); #[cfg(feature = "extreme_assertions")] - if crate::util::slot_logger::should_check_duplicate_edges(mmtk.get_plan()) { + if crate::util::slot_logger::should_check_duplicate_slots(mmtk.get_plan()) { // reset the logging info at the end of each GC mmtk.slot_logger.reset(); } diff --git a/src/util/sanity/sanity_checker.rs b/src/util/sanity/sanity_checker.rs index eb37feb45c..150b4d67f7 100644 --- a/src/util/sanity/sanity_checker.rs +++ b/src/util/sanity/sanity_checker.rs @@ -12,8 +12,8 @@ use std::ops::{Deref, DerefMut}; pub struct SanityChecker { /// Visited objects refs: HashSet, - /// Cached root edges for sanity root scanning - root_edges: Vec>, + /// Cached root slots for sanity root scanning + root_slots: Vec>, /// Cached root nodes for sanity root scanning root_nodes: Vec>, } @@ -28,14 +28,14 @@ impl SanityChecker { pub fn new() -> Self { Self { refs: HashSet::new(), - root_edges: vec![], + root_slots: vec![], root_nodes: vec![], } } - /// Cache a list of root edges to the sanity checker. - pub fn add_root_edges(&mut self, roots: Vec) { - self.root_edges.push(roots) + /// Cache a list of root slots to the sanity checker. + pub fn add_root_slots(&mut self, roots: Vec) { + self.root_slots.push(roots) } pub fn add_root_nodes(&mut self, roots: Vec) { @@ -44,7 +44,7 @@ impl SanityChecker { /// Reset roots cache at the end of the sanity gc. fn clear_roots_cache(&mut self) { - self.root_edges.clear(); + self.root_slots.clear(); self.root_nodes.clear(); } } @@ -66,7 +66,7 @@ impl GCWork for ScheduleSanityGC

{ scheduler.reset_state(); - // We are going to do sanity GC which will traverse the object graph again. Reset edge logger to clear recorded edges. + // We are going to do sanity GC which will traverse the object graph again. Reset slot logger to clear recorded slots. #[cfg(feature = "extreme_assertions")] mmtk.slot_logger.reset(); @@ -85,7 +85,7 @@ impl GCWork for ScheduleSanityGC

{ // } { let sanity_checker = mmtk.sanity_checker.lock().unwrap(); - for roots in &sanity_checker.root_edges { + for roots in &sanity_checker.root_slots { scheduler.work_buckets[WorkBucketStage::Closure].add( SanityGCProcessEdges::::new( roots.clone(), @@ -177,13 +177,13 @@ impl ProcessEdgesWork for SanityGCProcessEdges { const OVERWRITE_REFERENCE: bool = false; fn new( - edges: Vec>, + slots: Vec>, roots: bool, mmtk: &'static MMTK, bucket: WorkBucketStage, ) -> Self { Self { - base: ProcessEdgesBase::new(edges, roots, mmtk, bucket), + base: ProcessEdgesBase::new(slots, roots, mmtk, bucket), // ..Default::default() } } diff --git a/src/util/slot_logger.rs b/src/util/slot_logger.rs index 57ec41007e..7cb231d8dc 100644 --- a/src/util/slot_logger.rs +++ b/src/util/slot_logger.rs @@ -1,6 +1,6 @@ -//! This is a simple module to log edges and check for duplicate edges. +//! This is a simple module to log slogs and check for duplicate slots. //! -//! It uses a hash-set to keep track of edge, and is so very expensive. +//! It uses a hash-set to keep track of slots, and is so very expensive. //! We currently only use this as part of the `extreme_assertions` feature. //! @@ -11,8 +11,8 @@ use std::collections::HashSet; use std::sync::RwLock; pub struct SlotLogger { - // A private hash-set to keep track of edges. - edge_log: RwLock>, + // A private hash-set to keep track of slots. + slot_log: RwLock>, } unsafe impl Sync for SlotLogger {} @@ -20,38 +20,38 @@ unsafe impl Sync for SlotLogger {} impl SlotLogger { pub fn new() -> Self { Self { - edge_log: Default::default(), + slot_log: Default::default(), } } - /// Logs an edge. - /// Panics if the edge was already logged. + /// Logs a slot. + /// Panics if the slot was already logged. /// /// # Arguments /// - /// * `edge` - The edge to log. + /// * `slot` - The slot to log. /// - pub fn log_edge(&self, edge: SL) { - trace!("log_edge({:?})", edge); - let mut edge_log = self.edge_log.write().unwrap(); + pub fn log_slot(&self, slot: SL) { + trace!("log_slot({:?})", slot); + let mut slot_log = self.slot_log.write().unwrap(); assert!( - edge_log.insert(edge), - "duplicate edge ({:?}) detected", - edge + slot_log.insert(slot), + "duplicate slot ({:?}) detected", + slot ); } - /// Reset the edge logger by clearing the hash-set of edges. + /// Reset the slot logger by clearing the hash-set of slots. /// This function is called at the end of each GC iteration. /// pub fn reset(&self) { - let mut edge_log = self.edge_log.write().unwrap(); - edge_log.clear(); + let mut slot_log = self.slot_log.write().unwrap(); + slot_log.clear(); } } -/// Whether we should check duplicate edges. This depends on the actual plan. -pub fn should_check_duplicate_edges(plan: &dyn Plan) -> bool { +/// Whether we should check duplicate slots. This depends on the actual plan. +pub fn should_check_duplicate_slots(plan: &dyn Plan) -> bool { // If a plan allows tracing duplicate edges, we should not run this check. !plan.constraints().may_trace_duplicate_edges } From d3b6df84262a2aface0de399836efd6544be2a33 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Tue, 14 May 2024 16:13:46 +0800 Subject: [PATCH 10/25] generational --- src/plan/generational/gc_work.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/plan/generational/gc_work.rs b/src/plan/generational/gc_work.rs index 7c7fd2debd..7b9028bf51 100644 --- a/src/plan/generational/gc_work.rs +++ b/src/plan/generational/gc_work.rs @@ -32,12 +32,12 @@ impl + PlanTraceObject, const KIND type ScanObjectsWorkType = PlanScanObjects; fn new( - edges: Vec>, + slots: Vec>, roots: bool, mmtk: &'static MMTK, bucket: WorkBucketStage, ) -> Self { - let base = ProcessEdgesBase::new(edges, roots, mmtk, bucket); + let base = ProcessEdgesBase::new(slots, roots, mmtk, bucket); let plan = base.plan().downcast_ref().unwrap(); Self { plan, base } } @@ -163,15 +163,15 @@ impl GCWork for ProcessRegionModBuf { .is_current_gc_nursery() { // Collect all the entries in all the slices - let mut edges = vec![]; + let mut slots = vec![]; for slice in &self.modbuf { - for edge in slice.iter_slots() { - edges.push(edge); + for slot in slice.iter_slots() { + slots.push(slot); } } // Forward entries GCWork::do_work( - &mut E::new(edges, false, mmtk, WorkBucketStage::Closure), + &mut E::new(slots, false, mmtk, WorkBucketStage::Closure), worker, mmtk, ) From a2a07e21fb24b25e0b7fa3d2224e8c6e4620796e Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Tue, 14 May 2024 16:37:38 +0800 Subject: [PATCH 11/25] Comment on may_trace_duplicate_edges --- src/plan/plan_constraints.rs | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/plan/plan_constraints.rs b/src/plan/plan_constraints.rs index 85087d0dfc..01fe978ad7 100644 --- a/src/plan/plan_constraints.rs +++ b/src/plan/plan_constraints.rs @@ -21,9 +21,14 @@ pub struct PlanConstraints { pub max_non_los_copy_bytes: usize, /// Does this plan use the log bit? See vm::ObjectModel::GLOBAL_LOG_BIT_SPEC. pub needs_log_bit: bool, - /// Some plans may allow benign race for testing mark bit, and this will lead to trace the same edges - /// multiple times. If a plan allows tracing duplicate edges, we will not run duplicate edge check - /// in extreme_assertions. + /// Some plans may allow benign race for testing mark bit, and this will lead to trace the same + /// edge multiple times. If a plan allows tracing duplicated edges, we will not run duplicate + /// edge check in extreme_assertions. + /// + /// Note: Both [`crate::vm::Scanning::scan_object`] (which enqueues slots) and + /// [`crate::vm::Scanning::scan_object_and_trace_edges`] (which traces the targets directly) are + /// affected by such benign races. But our current duplicate edge check in extreme_assertions + /// only identifies duplicated slots. pub may_trace_duplicate_edges: bool, /// The barrier this plan uses. A binding may check this and know what kind of write barrier is in use /// if they would like to implement the barrier fast path in the binding side. From 6a87616b977d0db70d953fcc02a9dc3cedbfbfff Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Tue, 14 May 2024 17:10:39 +0800 Subject: [PATCH 12/25] Update the comment of the ProccessEdgesWork trait --- src/scheduler/gc_work.rs | 35 ++++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 11765261bb..e4573b0cc1 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -507,19 +507,32 @@ impl ProcessEdgesBase { /// A short-hand for `::VMSlot`. pub type SlotOf = <::VM as VMBinding>::VMSlot; -/// Process object graph edges represented as object slots. Work packets of this trait contain a -/// list of object slots to be processed. They trace the objects pointed by the object reference in -/// each slot, and update the slots if the GC moves the target object when tracing. +/// An abstract trait for work packets that process object graph edges. Its method +/// [`ProcessEdgesWork::trace_object`] traces an object and, upon first visit, enqueues it into an +/// internal queue inside the `ProcessEdgesWork` instance. Each implementation of this trait +/// implement `trace_object` differently. During [`Plan::schedule_collection`], plans select +/// (usually via [`GCWorkContext`]) specialized implementations of this trait to be used during each +/// trace according the nature of each trace, such as whether it is a nursery collection, whether it +/// is a defrag collection, whether it pins objects, etc. /// -/// Instances of this trait can also be abused as a provider of the `trace_object` method, without -/// representing edges as slots. In that case, an object graph edge is represented as the object -/// reference to the target node, while the source node is implicit. The caller of `trace_object` -/// is responsible for updating the slot (field or root variable). This is useful for -/// node-enqueuing tracing ([`Scanning::scan_object_and_trace_edges`]) as well as weak reference -/// processing ([`Scanning::process_weak_refs`]). +/// This trait was originally designed for work packets that process object graph edges represented +/// as slots. The constructor [`ProcessEdgesWork::new`] takes a vector of slots, and the created +/// work packet will trace the objects pointed by the object reference in each slot using the +/// `trace_object` method, and update the slot if the GC moves the target object when tracing. /// -/// TODO: We should refactor this trait to decouple it from slots. -/// See: +/// This trait can also be used merely as a provider of the `trace_object` method by giving it an +/// empty vector of slots. This is useful for node-enqueuing tracing +/// ([`Scanning::scan_object_and_trace_edges`]) as well as weak reference processing +/// ([`Scanning::process_weak_refs`] as well as [`ReferenceProcessor`] and +/// [`FinalizableProcessor`]). In those cases, the caller passes the reference to the target object +/// to `trace_object`, an the caller is responsible for updating the slots according the return +/// value of `trace_object`. +/// +/// [`ReferenceProcessor`]: crate::util::reference_processor::ReferenceProcessor +/// [`FinalizableProcessor`]: crate::util::finalizable_processor::FinalizableProcessor +/// +/// TODO: We should refactor this trait to decouple it from slots. See: +/// // // Note: be very careful when using this trait. process_node() will push objects // to the buffer, and it is expected that at the end of the operation, flush() From f8ac1adb9b0f519e0b6413c963f793f79e5df0d2 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Tue, 14 May 2024 17:16:19 +0800 Subject: [PATCH 13/25] Remove an outdated comment on ProcessEdgesWork --- src/scheduler/gc_work.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index e4573b0cc1..b8e4cb2544 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -533,13 +533,6 @@ pub type SlotOf = <::VM as VMBinding>::VMSlot; /// /// TODO: We should refactor this trait to decouple it from slots. See: /// -// -// Note: be very careful when using this trait. process_node() will push objects -// to the buffer, and it is expected that at the end of the operation, flush() -// is called to create new scan work from the buffered objects. If flush() -// is not called, we may miss the objects in the GC and have dangling pointers. -// FIXME: We possibly want to enforce Drop on this trait, and require calling -// flush() in Drop. pub trait ProcessEdgesWork: Send + 'static + Sized + DerefMut + Deref> { From 729496c12a7b83ccfebc29ecfd6e4b6630e1a888 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Tue, 14 May 2024 17:51:33 +0800 Subject: [PATCH 14/25] address, vo_bit and util --- src/util/address.rs | 2 +- src/util/metadata/vo_bit/mod.rs | 16 ++++++++-------- src/util/mod.rs | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/util/address.rs b/src/util/address.rs index 2bd9c613a9..b23ef587c7 100644 --- a/src/util/address.rs +++ b/src/util/address.rs @@ -480,7 +480,7 @@ use crate::vm::VMBinding; /// the opaque `ObjectReference` type, and we haven't seen a use case for now. /// /// Note that [`ObjectReference`] cannot be null. For the cases where a non-null object reference -/// may or may not exist, (such as the result of [`crate::vm::edge_shape::Edge::load`]) +/// may or may not exist, (such as the result of [`crate::vm::slot::Slot::load`]) /// `Option` should be used. [`ObjectReference`] is backed by `NonZeroUsize` /// which cannot be zero, and it has the `#[repr(transparent)]` attribute. Thanks to [null pointer /// optimization (NPO)][NPO], `Option` has the same size as `NonZeroUsize` and diff --git a/src/util/metadata/vo_bit/mod.rs b/src/util/metadata/vo_bit/mod.rs index 7f1648de8d..a64c8f7908 100644 --- a/src/util/metadata/vo_bit/mod.rs +++ b/src/util/metadata/vo_bit/mod.rs @@ -30,14 +30,14 @@ //! When the VO bits are available during tracing, if a plan uses evacuation to reclaim space, then //! both the from-space copy and the to-space copy of an object will have the VO-bit set. //! -//! *(Note: There are several reasons behind this semantics. One reason is that an edge may be -//! visited multiple times during GC. If an edge is visited twice, we will see it pointing to the -//! from-space copy during the first visit, but pointing to the to-space copy during the second -//! visit. We consider the edge valid if it points to either the from-space or the to-space copy. -//! If each edge is visited only once, and we see an edge happen to hold a pointer into the -//! to-space during its only visit, that must a dangling pointer, and error should be reported. -//! However, it is hard to guarantee each edge is only visited once during tracing because both the -//! VM and the GC algorithm may break this guarantee. See: +//! *(Note: There are several reasons behind this semantics. One reason is that a slot may be +//! visited multiple times during GC. If a slot is visited twice, we will see the object reference +//! in the slot pointing to the from-space copy during the first visit, but pointing to the to-space +//! copy during the second visit. We consider an object reference valid if it points to either the +//! from-space or the to-space copy. If each slot is visited only once, and we see a slot happen to +//! hold a pointer into the to-space during its only visit, that must be a dangling pointer, and +//! error should be reported. However, it is hard to guarantee each slot is only visited once +//! during tracing because both the VM and the GC algorithm may break this guarantee. See: //! [`crate::plan::PlanConstraints::may_trace_duplicate_edges`])* // FIXME: The entire vo_bit module should only be available if the "vo_bit" feature is enabled. diff --git a/src/util/mod.rs b/src/util/mod.rs index 38ad8ddaee..8b48555ff7 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -35,7 +35,7 @@ pub mod test_util; /// An analysis framework for collecting data and profiling in GC. #[cfg(feature = "analysis")] pub(crate) mod analysis; -/// Logging edges to check duplicated edges in GC. +/// Logging slots to check duplicated edges in GC. #[cfg(feature = "extreme_assertions")] pub(crate) mod slot_logger; /// Non-generic refs to generic types of ``. From e5030737d0ff067f5baacce77d05ed217ba4a000 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Tue, 14 May 2024 17:57:52 +0800 Subject: [PATCH 15/25] WIP: mock tests --- src/util/test_util/mock_vm.rs | 24 ++++++++-------- ...{mock_test_edges.rs => mock_test_slots.rs} | 28 +++++++++---------- src/vm/tests/mock_tests/mod.rs | 2 +- 3 files changed, 27 insertions(+), 27 deletions(-) rename src/vm/tests/mock_tests/{mock_test_edges.rs => mock_test_slots.rs} (95%) diff --git a/src/util/test_util/mock_vm.rs b/src/util/test_util/mock_vm.rs index 30fae0f651..ee31be2269 100644 --- a/src/util/test_util/mock_vm.rs +++ b/src/util/test_util/mock_vm.rs @@ -12,7 +12,7 @@ use crate::util::heap::gc_trigger::GCTriggerPolicy; use crate::util::opaque_pointer::*; use crate::util::{Address, ObjectReference}; use crate::vm::object_model::specs::*; -use crate::vm::EdgeVisitor; +use crate::vm::SlotVisitor; use crate::vm::GCThreadContext; use crate::vm::ObjectTracer; use crate::vm::ObjectTracerContext; @@ -235,12 +235,12 @@ pub struct MockVM { pub weakref_get_referent: MockMethod>, pub weakref_enqueue_references: MockMethod<(&'static [ObjectReference], VMWorkerThread), ()>, // scanning - pub support_edge_enqueuing: MockMethod<(VMWorkerThread, ObjectReference), bool>, + pub support_slot_enqueuing: MockMethod<(VMWorkerThread, ObjectReference), bool>, pub scan_object: MockMethod< ( VMWorkerThread, ObjectReference, - &'static mut dyn EdgeVisitor<::VMEdge>, + &'static mut dyn SlotVisitor<::VMSlot>, ), (), >, @@ -312,7 +312,7 @@ impl Default for MockVM { weakref_set_referent: MockMethod::new_unimplemented(), weakref_enqueue_references: MockMethod::new_unimplemented(), - support_edge_enqueuing: MockMethod::new_fixed(Box::new(|_| true)), + support_slot_enqueuing: MockMethod::new_fixed(Box::new(|_| true)), scan_object: MockMethod::new_unimplemented(), scan_object_and_trace_edges: MockMethod::new_unimplemented(), // We instantiate a `MockMethod` with the arguments as ProcessEdgesWorkRootsWorkFactory<..., SFTProcessEdges, ...>, @@ -374,7 +374,7 @@ unsafe impl Sync for MockVM {} unsafe impl Send for MockVM {} impl VMBinding for MockVM { - type VMEdge = Address; + type VMSlot = Address; type VMMemorySlice = Range

; type VMActivePlan = MockVM; @@ -558,18 +558,18 @@ impl crate::vm::ReferenceGlue for MockVM { } impl crate::vm::Scanning for MockVM { - fn support_edge_enqueuing(tls: VMWorkerThread, object: ObjectReference) -> bool { - mock!(support_edge_enqueuing(tls, object)) + fn support_slot_enqueuing(tls: VMWorkerThread, object: ObjectReference) -> bool { + mock!(support_slot_enqueuing(tls, object)) } - fn scan_object::VMEdge>>( + fn scan_object::VMSlot>>( tls: VMWorkerThread, object: ObjectReference, - edge_visitor: &mut EV, + slot_visitor: &mut SV, ) { mock!(scan_object( tls, object, - lifetime!(edge_visitor as &mut dyn EdgeVisitor<::VMEdge>) + lifetime!(slot_visitor as &mut dyn SlotVisitor<::VMSlot>) )) } fn scan_object_and_trace_edges( @@ -586,7 +586,7 @@ impl crate::vm::Scanning for MockVM { fn scan_roots_in_mutator_thread( tls: VMWorkerThread, mutator: &'static mut Mutator, - factory: impl RootsWorkFactory<::VMEdge>, + factory: impl RootsWorkFactory<::VMSlot>, ) { mock_any!(scan_roots_in_mutator_thread( tls, @@ -596,7 +596,7 @@ impl crate::vm::Scanning for MockVM { } fn scan_vm_specific_roots( tls: VMWorkerThread, - factory: impl RootsWorkFactory<::VMEdge>, + factory: impl RootsWorkFactory<::VMSlot>, ) { mock_any!(scan_vm_specific_roots(tls, Box::new(factory))) } diff --git a/src/vm/tests/mock_tests/mock_test_edges.rs b/src/vm/tests/mock_tests/mock_test_slots.rs similarity index 95% rename from src/vm/tests/mock_tests/mock_test_edges.rs rename to src/vm/tests/mock_tests/mock_test_slots.rs index d068b40e4a..d8f01f2683 100644 --- a/src/vm/tests/mock_tests/mock_test_edges.rs +++ b/src/vm/tests/mock_tests/mock_test_slots.rs @@ -5,7 +5,7 @@ use super::mock_test_prelude::*; use crate::{ util::{Address, ObjectReference}, - vm::edge_shape::{Edge, SimpleEdge}, + vm::slot::{Slot, SimpleSlot}, }; use atomic::{Atomic, Ordering}; @@ -13,7 +13,7 @@ lazy_static! { static ref FIXTURE: Fixture = Fixture::new(); } -mod simple_edges { +mod simple_slots { use super::*; #[test] @@ -24,8 +24,8 @@ mod simple_edges { FIXTURE.with_fixture(|fixture| { let mut slot: Atomic = Atomic::new(fixture.objref1); - let edge = SimpleEdge::from_address(Address::from_ref(&slot)); - let objref = edge.load(); + let slot = SimpleSlot::from_address(Address::from_ref(&slot)); + let objref = slot.load(); assert_eq!(objref, Some(fixture.objref1)); }); @@ -42,7 +42,7 @@ mod simple_edges { FIXTURE.with_fixture(|fixture| { let mut slot: Atomic = Atomic::new(fixture.objref1); - let edge = SimpleEdge::from_address(Address::from_ref(&slot)); + let edge = SimpleSlot::from_address(Address::from_ref(&slot)); edge.store(fixture.objref2); assert_eq!(slot.load(Ordering::SeqCst), fixture.objref2); @@ -80,7 +80,7 @@ mod compressed_oop { } } - impl Edge for CompressedOopEdge { + impl Slot for CompressedOopEdge { fn load(&self) -> Option { let compressed = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; let expanded = (compressed as usize) << 3; @@ -173,7 +173,7 @@ mod offset_edge { } } - impl Edge for OffsetEdge { + impl Slot for OffsetEdge { fn load(&self) -> Option { let middle = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; let begin = middle - self.offset; @@ -254,7 +254,7 @@ mod tagged_edge { } } - impl Edge for TaggedEdge { + impl Slot for TaggedEdge { fn load(&self) -> Option { let tagged = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; let untagged = tagged & !Self::TAG_BITS_MASK; @@ -344,12 +344,12 @@ mod mixed { use super::tagged_edge::TaggedEdge; use super::tagged_edge::TAG1; use super::*; - use crate::vm::edge_shape::SimpleEdge; + use crate::vm::slot::SimpleSlot; /// If a VM supports multiple kinds of edges, we can use tagged union to represent all of them. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub enum DummyVMEdge { - Simple(SimpleEdge), + Simple(SimpleSlot), #[cfg(target_pointer_width = "64")] Compressed(compressed_oop::CompressedOopEdge), Offset(OffsetEdge), @@ -358,7 +358,7 @@ mod mixed { unsafe impl Send for DummyVMEdge {} - impl Edge for DummyVMEdge { + impl Slot for DummyVMEdge { fn load(&self) -> Option { match self { DummyVMEdge::Simple(e) => e.load(), @@ -395,7 +395,7 @@ mod mixed { let mut slot3: Atomic
= Atomic::new(addr1 + OFFSET); let mut slot4: Atomic = Atomic::new(addr1.as_usize() | TAG1); - let edge1 = SimpleEdge::from_address(Address::from_ref(&slot1)); + let edge1 = SimpleSlot::from_address(Address::from_ref(&slot1)); let edge3 = OffsetEdge::new_with_offset(Address::from_ref(&slot3), OFFSET); let edge4 = TaggedEdge::new(Address::from_ref(&slot4)); @@ -409,7 +409,7 @@ mod mixed { assert_eq!( objref, Some(fixture.objref1), - "Edge {} is not properly loaded", + "Slot {} is not properly loaded", i ); } @@ -421,7 +421,7 @@ mod mixed { assert_eq!( objref, Some(fixture.objref2), - "Edge {} is not properly loaded after store", + "Slot {} is not properly loaded after store", i ); } diff --git a/src/vm/tests/mock_tests/mod.rs b/src/vm/tests/mock_tests/mod.rs index 5bf880fbb6..11152519f3 100644 --- a/src/vm/tests/mock_tests/mod.rs +++ b/src/vm/tests/mock_tests/mod.rs @@ -29,7 +29,7 @@ mod mock_test_allocator_info; mod mock_test_barrier_slow_path_assertion; #[cfg(feature = "is_mmtk_object")] mod mock_test_conservatism; -mod mock_test_edges; +mod mock_test_slots; #[cfg(target_os = "linux")] mod mock_test_handle_mmap_conflict; mod mock_test_handle_mmap_oom; From 1d1e59aa33d61e473398aab56335c63a1c462657 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Tue, 14 May 2024 18:06:30 +0800 Subject: [PATCH 16/25] mock_test_slots --- src/vm/tests/mock_tests/mock_test_slots.rs | 194 +++++++++++---------- 1 file changed, 98 insertions(+), 96 deletions(-) diff --git a/src/vm/tests/mock_tests/mock_test_slots.rs b/src/vm/tests/mock_tests/mock_test_slots.rs index d8f01f2683..be448429c3 100644 --- a/src/vm/tests/mock_tests/mock_test_slots.rs +++ b/src/vm/tests/mock_tests/mock_test_slots.rs @@ -22,9 +22,9 @@ mod simple_slots { default_setup, || { FIXTURE.with_fixture(|fixture| { - let mut slot: Atomic = Atomic::new(fixture.objref1); + let mut rust_slot: Atomic = Atomic::new(fixture.objref1); - let slot = SimpleSlot::from_address(Address::from_ref(&slot)); + let slot = SimpleSlot::from_address(Address::from_ref(&rust_slot)); let objref = slot.load(); assert_eq!(objref, Some(fixture.objref1)); @@ -40,13 +40,13 @@ mod simple_slots { default_setup, || { FIXTURE.with_fixture(|fixture| { - let mut slot: Atomic = Atomic::new(fixture.objref1); + let mut rust_slot: Atomic = Atomic::new(fixture.objref1); - let edge = SimpleSlot::from_address(Address::from_ref(&slot)); - edge.store(fixture.objref2); - assert_eq!(slot.load(Ordering::SeqCst), fixture.objref2); + let slot = SimpleSlot::from_address(Address::from_ref(&rust_slot)); + slot.store(fixture.objref2); + assert_eq!(rust_slot.load(Ordering::SeqCst), fixture.objref2); - let objref = edge.load(); + let objref = slot.load(); assert_eq!(objref, Some(fixture.objref2)); }); }, @@ -61,15 +61,15 @@ mod compressed_oop { /// This represents a location that holds a 32-bit pointer on a 64-bit machine. /// - /// OpenJDK uses this kind of edge to store compressed OOPs on 64-bit machines. + /// OpenJDK uses this kind of slot to store compressed OOPs on 64-bit machines. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] - pub struct CompressedOopEdge { + pub struct CompressedOopSlot { slot_addr: *mut Atomic, } - unsafe impl Send for CompressedOopEdge {} + unsafe impl Send for CompressedOopSlot {} - impl CompressedOopEdge { + impl CompressedOopSlot { pub fn from_address(address: Address) -> Self { Self { slot_addr: address.to_mut_ptr(), @@ -80,7 +80,7 @@ mod compressed_oop { } } - impl Slot for CompressedOopEdge { + impl Slot for CompressedOopSlot { fn load(&self) -> Option { let compressed = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; let expanded = (compressed as usize) << 3; @@ -101,15 +101,15 @@ mod compressed_oop { #[test] pub fn load_compressed() { // Note: We cannot guarantee GC will allocate an object in the low address region. - // So we make up addresses just for testing the bit operations of compressed OOP edges. + // So we make up addresses just for testing the bit operations of compressed OOP slots. let compressed1 = (COMPRESSABLE_ADDR1 >> 3) as u32; let objref1 = ObjectReference::from_raw_address(unsafe { Address::from_usize(COMPRESSABLE_ADDR1) }); - let mut slot: Atomic = Atomic::new(compressed1); + let mut rust_slot: Atomic = Atomic::new(compressed1); - let edge = CompressedOopEdge::from_address(Address::from_ref(&slot)); - let objref = edge.load(); + let slot = CompressedOopSlot::from_address(Address::from_ref(&rust_slot)); + let objref = slot.load(); assert_eq!(objref, objref1); } @@ -117,39 +117,39 @@ mod compressed_oop { #[test] pub fn store_compressed() { // Note: We cannot guarantee GC will allocate an object in the low address region. - // So we make up addresses just for testing the bit operations of compressed OOP edges. + // So we make up addresses just for testing the bit operations of compressed OOP slots. let compressed1 = (COMPRESSABLE_ADDR1 >> 3) as u32; let compressed2 = (COMPRESSABLE_ADDR2 >> 3) as u32; let objref2 = ObjectReference::from_raw_address(unsafe { Address::from_usize(COMPRESSABLE_ADDR2) }) .unwrap(); - let mut slot: Atomic = Atomic::new(compressed1); + let mut rust_slot: Atomic = Atomic::new(compressed1); - let edge = CompressedOopEdge::from_address(Address::from_ref(&slot)); - edge.store(objref2); - assert_eq!(slot.load(Ordering::SeqCst), compressed2); + let slot = CompressedOopSlot::from_address(Address::from_ref(&rust_slot)); + slot.store(objref2); + assert_eq!(rust_slot.load(Ordering::SeqCst), compressed2); - let objref = edge.load(); + let objref = slot.load(); assert_eq!(objref, Some(objref2)); } } -mod offset_edge { +mod offset_slot { use super::*; - /// This represents an edge that holds a pointer to the *middle* of an object, and the offset is known. + /// This represents a slot that holds a pointer to the *middle* of an object, and the offset is known. /// /// Julia uses this trick to facilitate deleting array elements from the front. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] - pub struct OffsetEdge { + pub struct OffsetSlot { slot_addr: *mut Atomic
, offset: usize, } - unsafe impl Send for OffsetEdge {} + unsafe impl Send for OffsetSlot {} - impl OffsetEdge { + impl OffsetSlot { pub fn new_no_offset(address: Address) -> Self { Self { slot_addr: address.to_mut_ptr(), @@ -173,7 +173,7 @@ mod offset_edge { } } - impl Slot for OffsetEdge { + impl Slot for OffsetSlot { fn load(&self) -> Option { let middle = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; let begin = middle - self.offset; @@ -196,10 +196,10 @@ mod offset_edge { || { FIXTURE.with_fixture(|fixture| { let addr1 = fixture.objref1.to_raw_address(); - let mut slot: Atomic
= Atomic::new(addr1 + OFFSET); + let mut rust_slot: Atomic
= Atomic::new(addr1 + OFFSET); - let edge = OffsetEdge::new_with_offset(Address::from_ref(&slot), OFFSET); - let objref = edge.load(); + let slot = OffsetSlot::new_with_offset(Address::from_ref(&rust_slot), OFFSET); + let objref = slot.load(); assert_eq!(objref, Some(fixture.objref1)); }); @@ -216,13 +216,13 @@ mod offset_edge { FIXTURE.with_fixture(|fixture| { let addr1 = fixture.objref1.to_raw_address(); let addr2 = fixture.objref2.to_raw_address(); - let mut slot: Atomic
= Atomic::new(addr1 + OFFSET); + let mut rust_slot: Atomic
= Atomic::new(addr1 + OFFSET); - let edge = OffsetEdge::new_with_offset(Address::from_ref(&slot), OFFSET); - edge.store(fixture.objref2); - assert_eq!(slot.load(Ordering::SeqCst), addr2 + OFFSET); + let slot = OffsetSlot::new_with_offset(Address::from_ref(&rust_slot), OFFSET); + slot.store(fixture.objref2); + assert_eq!(rust_slot.load(Ordering::SeqCst), addr2 + OFFSET); - let objref = edge.load(); + let objref = slot.load(); assert_eq!(objref, Some(fixture.objref2)); }); }, @@ -231,18 +231,19 @@ mod offset_edge { } } -mod tagged_edge { +mod tagged_slot { use super::*; - /// This edge presents the object reference itself to mmtk-core. + /// This slot represents a slot that holds a tagged pointer. + /// The last two bits are tag bits and are not part of the object reference. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] - pub struct TaggedEdge { + pub struct TaggedSlot { slot_addr: *mut Atomic, } - unsafe impl Send for TaggedEdge {} + unsafe impl Send for TaggedSlot {} - impl TaggedEdge { + impl TaggedSlot { // The DummyVM has OBJECT_REF_OFFSET = 4. // Using a two-bit tag should be safe on both 32-bit and 64-bit platforms. const TAG_BITS_MASK: usize = 0b11; @@ -254,7 +255,7 @@ mod tagged_edge { } } - impl Slot for TaggedEdge { + impl Slot for TaggedSlot { fn load(&self) -> Option { let tagged = unsafe { (*self.slot_addr).load(atomic::Ordering::Relaxed) }; let untagged = tagged & !Self::TAG_BITS_MASK; @@ -278,15 +279,15 @@ mod tagged_edge { default_setup, || { FIXTURE.with_fixture(|fixture| { - let mut slot1: Atomic = + let mut rust_slot1: Atomic = Atomic::new(fixture.objref1.to_raw_address().as_usize() | TAG1); - let mut slot2: Atomic = + let mut rust_slot2: Atomic = Atomic::new(fixture.objref1.to_raw_address().as_usize() | TAG2); - let edge1 = TaggedEdge::new(Address::from_ref(&slot1)); - let edge2 = TaggedEdge::new(Address::from_ref(&slot2)); - let objref1 = edge1.load(); - let objref2 = edge2.load(); + let slot1 = TaggedSlot::new(Address::from_ref(&rust_slot1)); + let slot2 = TaggedSlot::new(Address::from_ref(&rust_slot2)); + let objref1 = slot1.load(); + let objref2 = slot2.load(); // Tags should not affect loaded values. assert_eq!(objref1, Some(fixture.objref1)); @@ -303,28 +304,28 @@ mod tagged_edge { default_setup, || { FIXTURE.with_fixture(|fixture| { - let mut slot1: Atomic = + let mut rust_slot1: Atomic = Atomic::new(fixture.objref1.to_raw_address().as_usize() | TAG1); - let mut slot2: Atomic = + let mut rust_slot2: Atomic = Atomic::new(fixture.objref1.to_raw_address().as_usize() | TAG2); - let edge1 = TaggedEdge::new(Address::from_ref(&slot1)); - let edge2 = TaggedEdge::new(Address::from_ref(&slot2)); - edge1.store(fixture.objref2); - edge2.store(fixture.objref2); + let slot1 = TaggedSlot::new(Address::from_ref(&rust_slot1)); + let slot2 = TaggedSlot::new(Address::from_ref(&rust_slot2)); + slot1.store(fixture.objref2); + slot2.store(fixture.objref2); // Tags should be preserved. assert_eq!( - slot1.load(Ordering::SeqCst), + rust_slot1.load(Ordering::SeqCst), fixture.objref2.to_raw_address().as_usize() | TAG1 ); assert_eq!( - slot2.load(Ordering::SeqCst), + rust_slot2.load(Ordering::SeqCst), fixture.objref2.to_raw_address().as_usize() | TAG2 ); - let objref1 = edge1.load(); - let objref2 = edge2.load(); + let objref1 = slot1.load(); + let objref2 = slot2.load(); // Tags should not affect loaded values. assert_eq!(objref1, Some(fixture.objref2)); @@ -338,44 +339,45 @@ mod tagged_edge { mod mixed { #[cfg(target_pointer_width = "64")] - use super::compressed_oop::CompressedOopEdge; - use super::offset_edge::OffsetEdge; - use super::offset_edge::OFFSET; - use super::tagged_edge::TaggedEdge; - use super::tagged_edge::TAG1; + use super::compressed_oop::CompressedOopSlot; + use super::offset_slot::OffsetSlot; + use super::offset_slot::OFFSET; + use super::tagged_slot::TaggedSlot; + use super::tagged_slot::TAG1; use super::*; use crate::vm::slot::SimpleSlot; - /// If a VM supports multiple kinds of edges, we can use tagged union to represent all of them. + /// If a VM supports multiple kinds of slots, we can use tagged union to represent all of them. + /// This is for testing, only. A Rust `enum` may not be the most efficient representation. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] - pub enum DummyVMEdge { + pub enum DummyVMSlot { Simple(SimpleSlot), #[cfg(target_pointer_width = "64")] - Compressed(compressed_oop::CompressedOopEdge), - Offset(OffsetEdge), - Tagged(TaggedEdge), + Compressed(compressed_oop::CompressedOopSlot), + Offset(OffsetSlot), + Tagged(TaggedSlot), } - unsafe impl Send for DummyVMEdge {} + unsafe impl Send for DummyVMSlot {} - impl Slot for DummyVMEdge { + impl Slot for DummyVMSlot { fn load(&self) -> Option { match self { - DummyVMEdge::Simple(e) => e.load(), + DummyVMSlot::Simple(e) => e.load(), #[cfg(target_pointer_width = "64")] - DummyVMEdge::Compressed(e) => e.load(), - DummyVMEdge::Offset(e) => e.load(), - DummyVMEdge::Tagged(e) => e.load(), + DummyVMSlot::Compressed(e) => e.load(), + DummyVMSlot::Offset(e) => e.load(), + DummyVMSlot::Tagged(e) => e.load(), } } fn store(&self, object: ObjectReference) { match self { - DummyVMEdge::Simple(e) => e.store(object), + DummyVMSlot::Simple(e) => e.store(object), #[cfg(target_pointer_width = "64")] - DummyVMEdge::Compressed(e) => e.store(object), - DummyVMEdge::Offset(e) => e.store(object), - DummyVMEdge::Tagged(e) => e.store(object), + DummyVMSlot::Compressed(e) => e.store(object), + DummyVMSlot::Offset(e) => e.store(object), + DummyVMSlot::Tagged(e) => e.store(object), } } } @@ -391,21 +393,21 @@ mod mixed { let addr1 = fixture.objref1.to_raw_address(); let addr2 = fixture.objref2.to_raw_address(); - let mut slot1: Atomic = Atomic::new(fixture.objref1); - let mut slot3: Atomic
= Atomic::new(addr1 + OFFSET); - let mut slot4: Atomic = Atomic::new(addr1.as_usize() | TAG1); + let mut rust_slot1: Atomic = Atomic::new(fixture.objref1); + let mut rust_slot3: Atomic
= Atomic::new(addr1 + OFFSET); + let mut rust_slot4: Atomic = Atomic::new(addr1.as_usize() | TAG1); - let edge1 = SimpleSlot::from_address(Address::from_ref(&slot1)); - let edge3 = OffsetEdge::new_with_offset(Address::from_ref(&slot3), OFFSET); - let edge4 = TaggedEdge::new(Address::from_ref(&slot4)); + let slot1 = SimpleSlot::from_address(Address::from_ref(&rust_slot1)); + let slot3 = OffsetSlot::new_with_offset(Address::from_ref(&rust_slot3), OFFSET); + let slot4 = TaggedSlot::new(Address::from_ref(&rust_slot4)); - let de1 = DummyVMEdge::Simple(edge1); - let de3 = DummyVMEdge::Offset(edge3); - let de4 = DummyVMEdge::Tagged(edge4); + let ds1 = DummyVMSlot::Simple(slot1); + let ds3 = DummyVMSlot::Offset(slot3); + let ds4 = DummyVMSlot::Tagged(slot4); - let edges = [de1, de3, de4]; - for (i, edge) in edges.iter().enumerate() { - let objref = edge.load(); + let slots = [ds1, ds3, ds4]; + for (i, slot) in slots.iter().enumerate() { + let objref = slot.load(); assert_eq!( objref, Some(fixture.objref1), @@ -414,10 +416,10 @@ mod mixed { ); } - let mutable_edges = [de1, de3, de4]; - for (i, edge) in mutable_edges.iter().enumerate() { - edge.store(fixture.objref2); - let objref = edge.load(); + let mutable_slots = [ds1, ds3, ds4]; + for (i, slot) in mutable_slots.iter().enumerate() { + slot.store(fixture.objref2); + let objref = slot.load(); assert_eq!( objref, Some(fixture.objref2), @@ -426,8 +428,8 @@ mod mixed { ); } - assert_eq!(slot1.load(Ordering::SeqCst), fixture.objref2); - assert_eq!(slot3.load(Ordering::SeqCst), addr2 + OFFSET); + assert_eq!(rust_slot1.load(Ordering::SeqCst), fixture.objref2); + assert_eq!(rust_slot3.load(Ordering::SeqCst), addr2 + OFFSET); }); }, no_cleanup, From e2bb3fbc833b0159cf374196a58d5a92a07b8166 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Tue, 14 May 2024 18:08:55 +0800 Subject: [PATCH 17/25] Other tests --- .../mock_test_barrier_slow_path_assertion.rs | 12 ++++++------ tests/test_roots_work_factory.rs | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/vm/tests/mock_tests/mock_test_barrier_slow_path_assertion.rs b/src/vm/tests/mock_tests/mock_test_barrier_slow_path_assertion.rs index 2d22d06aea..3f8dfe4582 100644 --- a/src/vm/tests/mock_tests/mock_test_barrier_slow_path_assertion.rs +++ b/src/vm/tests/mock_tests/mock_test_barrier_slow_path_assertion.rs @@ -19,9 +19,9 @@ fn test_assertion_barrier_invalid_ref() { FIXTURE.with_fixture_mut(|fixture| { let objref = fixture.objref; - // Create an edge + // Create a slot let slot = Atomic::new(objref); - let edge = Address::from_ref(&slot); + let slot = Address::from_ref(&slot); // Create an invalid object reference (offset 8 bytes on the original object ref), and invoke barrier slowpath with it // The invalid object ref has no VO bit, and the assertion should fail. @@ -29,7 +29,7 @@ fn test_assertion_barrier_invalid_ref() { ObjectReference::from_raw_address(objref.to_raw_address() + 8usize).unwrap(); fixture.mutator_mut().barrier.object_reference_write_slow( invalid_objref, - edge, + slot, Some(objref), ); }); @@ -46,14 +46,14 @@ fn test_assertion_barrier_valid_ref() { FIXTURE.with_fixture_mut(|fixture| { let objref = fixture.objref; - // Create an edge + // Create a slot let slot = Atomic::new(objref); - let edge = Address::from_ref(&slot); + let slot = Address::from_ref(&slot); // Invoke barrier slowpath with the valid object ref fixture.mutator_mut().barrier.object_reference_write_slow( objref, - edge, + slot, Some(objref), ); }); diff --git a/tests/test_roots_work_factory.rs b/tests/test_roots_work_factory.rs index 74db5306b6..52b39a05c1 100644 --- a/tests/test_roots_work_factory.rs +++ b/tests/test_roots_work_factory.rs @@ -25,7 +25,7 @@ impl MockScanning { } } -static EDGES: [Address; 3] = [ +static SLOTS: [Address; 3] = [ unsafe { Address::from_usize(0x8) }, unsafe { Address::from_usize(0x8) }, unsafe { Address::from_usize(0x8) }, @@ -42,8 +42,8 @@ struct MockFactory { } impl RootsWorkFactory
for MockFactory { - fn create_process_slot_roots_work(&mut self, edges: Vec
) { - assert_eq!(edges, EDGES); + fn create_process_slot_roots_work(&mut self, slots: Vec
) { + assert_eq!(slots, SLOTS); match self.round { 1 => { assert_eq!(self.v, "y"); @@ -84,7 +84,7 @@ fn test_scan() { a: Arc::new(Mutex::new("a".to_string())), }; let mut scanning = MockScanning::default(); - scanning.add_roots(&EDGES); + scanning.add_roots(&SLOTS); scanning.mock_scan_roots(factory); } @@ -105,7 +105,7 @@ fn test_clone() { let mut scanning = MockScanning::default(); - scanning.add_roots(&EDGES); + scanning.add_roots(&SLOTS); scanning.mock_scan_roots(factory1); scanning.mock_scan_roots(factory2); } From c84f7d68177918ecc1663853e208786c38d1d501 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Tue, 14 May 2024 18:26:26 +0800 Subject: [PATCH 18/25] vm --- src/vm/mod.rs | 3 +-- src/vm/slot.rs | 4 ++++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/vm/mod.rs b/src/vm/mod.rs index bea40a3a7d..2a6b4c3f5a 100644 --- a/src/vm/mod.rs +++ b/src/vm/mod.rs @@ -17,7 +17,6 @@ mod active_plan; mod collection; -/// Allows MMTk to access edges in a VM-defined way. pub mod slot; pub(crate) mod object_model; mod reference_glue; @@ -59,7 +58,7 @@ where /// The binding's implementation of [`crate::vm::ReferenceGlue`]. type VMReferenceGlue: ReferenceGlue; - /// The type of edges in this VM. + /// The type of slots in this VM. type VMSlot: slot::Slot; /// The type of heap memory slice in this VM. type VMMemorySlice: slot::MemorySlice; diff --git a/src/vm/slot.rs b/src/vm/slot.rs index 3f02a564f0..2a9886b78b 100644 --- a/src/vm/slot.rs +++ b/src/vm/slot.rs @@ -1,3 +1,7 @@ +//! This module provides the trait [`Slot`] and related traits and types which allow VMs to +//! customize the layout of slots and the behavior of loading and updating object references in +//! slots. + use std::hash::Hash; use std::marker::PhantomData; use std::{fmt::Debug, ops::Range}; From b93a6e96ddaacd4f7bb53eeab80079c3f4dcb8af Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Tue, 14 May 2024 18:38:17 +0800 Subject: [PATCH 19/25] eBPF tools --- tools/tracing/README.md | 8 ++++---- tools/tracing/performance/README.md | 23 ++++++++++++----------- tools/tracing/performance/packet_size.bt | 6 +++--- tools/tracing/timeline/capture.bt | 4 ++-- 4 files changed, 21 insertions(+), 20 deletions(-) diff --git a/tools/tracing/README.md b/tools/tracing/README.md index 0dc05b3210..1f4db1a489 100644 --- a/tools/tracing/README.md +++ b/tools/tracing/README.md @@ -27,9 +27,9 @@ Currently, the core provides the following tracepoints. - `mmtk:gcworker_exit()`: a GC worker thread exits its work loop - `mmtk:gc_start()`: a collection epoch starts - `mmtk:gc_end()`: a collection epoch ends -- `mmtk:process_edges(num_edges: int, is_roots: bool)`: a invocation of the `process_edges` - method. The first argument is the number of edges to be processed, and the second argument is - whether these edges are root edges. +- `mmtk:process_slots(num_slots: int, is_roots: bool)`: an invocation of the `process_slots` + method. The first argument is the number of slots to be processed, and the second argument is + whether these slots are root slots. - `mmtk:bucket_opened(id: int)`: a work bucket opened. The first argument is the numerical representation of `enum WorkBucketStage`. - `mmtk:work_poll()`: a work packet is to be polled. @@ -44,7 +44,7 @@ Currently, the core provides the following tracepoints. Each sub-directory contains a set of scripts. - `performance`: Print various GC-related statistics, such as the distribution of time spent in - allocation slow path, the time spent in each GC stages, and the distribution of `process_edges` + allocation slow path, the time spent in each GC stages, and the distribution of `process_slots` packet sizes. - `timeline`: Record the start and end time of each GC and each work packet, and visualize them on a timeline in Perfetto UI. diff --git a/tools/tracing/performance/README.md b/tools/tracing/performance/README.md index fb689285a3..bb4b00011d 100644 --- a/tools/tracing/performance/README.md +++ b/tools/tracing/performance/README.md @@ -152,15 +152,16 @@ If you can't tell which lock instance is for which lock in MMTk, you can trace the allocation of the Mutex and record the stack trace (note that you might want to compile MMTk with `force-frame-pointers` to obtain better stack traces). -### Measuring the distribution of `process_edges` packet sizes (`packet_size`) -Most of the GC time is spend in the transitive closure for tracing-based GCs, -and MMTk performs transitive closure via work packets that calls the `process_edges` method. -This tool measures the distribution of the sizes of these work packets, and also -count root edges separately. +### Measuring the distribution of `process_slots` packet sizes (`packet_size`) + +Most of the GC time is spend in the transitive closure for tracing-based GCs, and MMTk performs +transitive closure via work packets that call the `ProcessEdgesWork::process_slots` method. This +tool measures the distribution of the sizes of these work packets, and also counts root slots +separately. Sample output: ``` -@process_edges_packet_size: +@process_slots_packet_size: [1] 238 |@@@@@ | [2, 4) 806 |@@@@@@@@@@@@@@@@@ | [4, 8) 1453 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ | @@ -177,7 +178,7 @@ Sample output: [8K, 16K) 58 |@ | [16K, 32K) 5 | | -@process_edges_root_packet_size: +@process_slots_root_packet_size: [1] 71 |@@@@@@@ | [2, 4) 4 | | [4, 8) 276 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@ | @@ -195,8 +196,8 @@ Sample output: [16K, 32K) 3 | | ``` -In the above output, we can see that overall, the sizes of the `process_edges` -has a unimodal distribution with a peak around 16\~32 edges per packet. -However, if we focus on root edges, the distribution is roughly bimodal, with a -first peak around 8\~16 and a second peak around 4096\~8192. +In the above output, we can see that overall, the number of slots processed by a invocation of +`process_slots` has a unimodal distribution with a peak around 16\~32 slots per packet. However, if +we focus on root slots, the distribution is roughly bimodal, with a first peak around 8\~16 and a +second peak around 4096\~8192. diff --git a/tools/tracing/performance/packet_size.bt b/tools/tracing/performance/packet_size.bt index c50cd98af7..b3dc59a59e 100644 --- a/tools/tracing/performance/packet_size.bt +++ b/tools/tracing/performance/packet_size.bt @@ -1,8 +1,8 @@ -usdt:$MMTK:mmtk:process_edges { +usdt:$MMTK:mmtk:process_slots { if (@stats_enabled) { - @process_edges_packet_size = hist(arg0); + @process_slots_packet_size = hist(arg0); if (arg1) { - @process_edges_root_packet_size = hist(arg0); + @process_slots_root_packet_size = hist(arg0); } } } diff --git a/tools/tracing/timeline/capture.bt b/tools/tracing/timeline/capture.bt index fa6f01d4a2..edd3057ea6 100644 --- a/tools/tracing/timeline/capture.bt +++ b/tools/tracing/timeline/capture.bt @@ -73,9 +73,9 @@ usdt:$MMTK:mmtk:work { } } -usdt:$MMTK:mmtk:process_edges { +usdt:$MMTK:mmtk:process_slots { if (@enable_print) { - printf("process_edges,meta,%d,%lu,%lu,%lu\n", tid, nsecs, arg0, arg1); + printf("process_slots,meta,%d,%lu,%lu,%lu\n", tid, nsecs, arg0, arg1); } } From 89f040de023112ade8d553f84357e70e1ecac0a0 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Tue, 14 May 2024 18:38:56 +0800 Subject: [PATCH 20/25] Formatting --- src/mmtk.rs | 4 ++-- src/plan/barriers.rs | 2 +- src/plan/generational/gc_work.rs | 2 +- src/plan/tracing.rs | 2 +- src/util/mod.rs | 6 +++--- src/util/test_util/mock_vm.rs | 2 +- src/vm/mod.rs | 4 ++-- src/vm/tests/mock_tests/mock_test_slots.rs | 2 +- src/vm/tests/mock_tests/mod.rs | 2 +- 9 files changed, 13 insertions(+), 13 deletions(-) diff --git a/src/mmtk.rs b/src/mmtk.rs index ff31f28969..eea56e7e45 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -8,8 +8,6 @@ use crate::scheduler::GCWorkScheduler; #[cfg(feature = "analysis")] use crate::util::analysis::AnalysisManager; -#[cfg(feature = "extreme_assertions")] -use crate::util::slot_logger::SlotLogger; use crate::util::finalizable_processor::FinalizableProcessor; use crate::util::heap::gc_trigger::GCTrigger; use crate::util::heap::layout::vm_layout::VMLayout; @@ -20,6 +18,8 @@ use crate::util::options::Options; use crate::util::reference_processor::ReferenceProcessors; #[cfg(feature = "sanity")] use crate::util::sanity::sanity_checker::SanityChecker; +#[cfg(feature = "extreme_assertions")] +use crate::util::slot_logger::SlotLogger; use crate::util::statistics::stats::Stats; use crate::vm::ReferenceGlue; use crate::vm::VMBinding; diff --git a/src/plan/barriers.rs b/src/plan/barriers.rs index 3bcab4c1d5..39932fda93 100644 --- a/src/plan/barriers.rs +++ b/src/plan/barriers.rs @@ -1,6 +1,6 @@ //! Read/Write barrier implementations. -use crate::vm::slot::{Slot, MemorySlice}; +use crate::vm::slot::{MemorySlice, Slot}; use crate::vm::ObjectModel; use crate::{ util::{metadata::MetadataSpec, *}, diff --git a/src/plan/generational/gc_work.rs b/src/plan/generational/gc_work.rs index 7b9028bf51..03240b4e93 100644 --- a/src/plan/generational/gc_work.rs +++ b/src/plan/generational/gc_work.rs @@ -5,7 +5,7 @@ use crate::plan::VectorObjectQueue; use crate::policy::gc_work::TraceKind; use crate::scheduler::{gc_work::*, GCWork, GCWorker, WorkBucketStage}; use crate::util::ObjectReference; -use crate::vm::slot::{Slot, MemorySlice}; +use crate::vm::slot::{MemorySlice, Slot}; use crate::vm::*; use crate::MMTK; use std::marker::PhantomData; diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index 71504c31fd..2f9ac4c3c9 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -1,7 +1,7 @@ //! This module contains code useful for tracing, //! i.e. visiting the reachable objects by traversing all or part of an object graph. -use crate::scheduler::gc_work::{SlotOf, ProcessEdgesWork}; +use crate::scheduler::gc_work::{ProcessEdgesWork, SlotOf}; use crate::scheduler::{GCWorker, WorkBucketStage}; use crate::util::ObjectReference; use crate::vm::SlotVisitor; diff --git a/src/util/mod.rs b/src/util/mod.rs index 8b48555ff7..da38934e63 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -35,9 +35,6 @@ pub mod test_util; /// An analysis framework for collecting data and profiling in GC. #[cfg(feature = "analysis")] pub(crate) mod analysis; -/// Logging slots to check duplicated edges in GC. -#[cfg(feature = "extreme_assertions")] -pub(crate) mod slot_logger; /// Non-generic refs to generic types of ``. pub(crate) mod erase_vm; /// Finalization implementation. @@ -62,6 +59,9 @@ pub(crate) mod rust_util; /// Sanity checker for GC. #[cfg(feature = "sanity")] pub(crate) mod sanity; +/// Logging slots to check duplicated edges in GC. +#[cfg(feature = "extreme_assertions")] +pub(crate) mod slot_logger; /// Utils for collecting statistics. pub(crate) mod statistics; /// A treadmill implementation. diff --git a/src/util/test_util/mock_vm.rs b/src/util/test_util/mock_vm.rs index ee31be2269..ab0cac2d1a 100644 --- a/src/util/test_util/mock_vm.rs +++ b/src/util/test_util/mock_vm.rs @@ -12,11 +12,11 @@ use crate::util::heap::gc_trigger::GCTriggerPolicy; use crate::util::opaque_pointer::*; use crate::util::{Address, ObjectReference}; use crate::vm::object_model::specs::*; -use crate::vm::SlotVisitor; use crate::vm::GCThreadContext; use crate::vm::ObjectTracer; use crate::vm::ObjectTracerContext; use crate::vm::RootsWorkFactory; +use crate::vm::SlotVisitor; use crate::vm::VMBinding; use crate::Mutator; diff --git a/src/vm/mod.rs b/src/vm/mod.rs index 2a6b4c3f5a..bdc5516f9e 100644 --- a/src/vm/mod.rs +++ b/src/vm/mod.rs @@ -17,10 +17,10 @@ mod active_plan; mod collection; -pub mod slot; pub(crate) mod object_model; mod reference_glue; mod scanning; +pub mod slot; pub use self::active_plan::ActivePlan; pub use self::collection::Collection; pub use self::collection::GCThreadContext; @@ -28,11 +28,11 @@ pub use self::object_model::specs::*; pub use self::object_model::ObjectModel; pub use self::reference_glue::Finalizable; pub use self::reference_glue::ReferenceGlue; -pub use self::scanning::SlotVisitor; pub use self::scanning::ObjectTracer; pub use self::scanning::ObjectTracerContext; pub use self::scanning::RootsWorkFactory; pub use self::scanning::Scanning; +pub use self::scanning::SlotVisitor; #[cfg(test)] mod tests; diff --git a/src/vm/tests/mock_tests/mock_test_slots.rs b/src/vm/tests/mock_tests/mock_test_slots.rs index be448429c3..397e0f1caf 100644 --- a/src/vm/tests/mock_tests/mock_test_slots.rs +++ b/src/vm/tests/mock_tests/mock_test_slots.rs @@ -5,7 +5,7 @@ use super::mock_test_prelude::*; use crate::{ util::{Address, ObjectReference}, - vm::slot::{Slot, SimpleSlot}, + vm::slot::{SimpleSlot, Slot}, }; use atomic::{Atomic, Ordering}; diff --git a/src/vm/tests/mock_tests/mod.rs b/src/vm/tests/mock_tests/mod.rs index 11152519f3..0ccabb40d5 100644 --- a/src/vm/tests/mock_tests/mod.rs +++ b/src/vm/tests/mock_tests/mod.rs @@ -29,7 +29,6 @@ mod mock_test_allocator_info; mod mock_test_barrier_slow_path_assertion; #[cfg(feature = "is_mmtk_object")] mod mock_test_conservatism; -mod mock_test_slots; #[cfg(target_os = "linux")] mod mock_test_handle_mmap_conflict; mod mock_test_handle_mmap_oom; @@ -44,6 +43,7 @@ mod mock_test_malloc_ms; mod mock_test_mmtk_julia_pr_143; #[cfg(feature = "nogc_lock_free")] mod mock_test_nogc_lock_free; +mod mock_test_slots; #[cfg(target_pointer_width = "64")] mod mock_test_vm_layout_compressed_pointer; mod mock_test_vm_layout_default; From 1a23939389adc7d2f860520bd56f062cd605ae2e Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Tue, 14 May 2024 18:41:01 +0800 Subject: [PATCH 21/25] Do not link to private items. --- src/scheduler/gc_work.rs | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index b8e4cb2544..41c10eb10d 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -511,7 +511,7 @@ pub type SlotOf = <::VM as VMBinding>::VMSlot; /// [`ProcessEdgesWork::trace_object`] traces an object and, upon first visit, enqueues it into an /// internal queue inside the `ProcessEdgesWork` instance. Each implementation of this trait /// implement `trace_object` differently. During [`Plan::schedule_collection`], plans select -/// (usually via [`GCWorkContext`]) specialized implementations of this trait to be used during each +/// (usually via `GCWorkContext`) specialized implementations of this trait to be used during each /// trace according the nature of each trace, such as whether it is a nursery collection, whether it /// is a defrag collection, whether it pins objects, etc. /// @@ -523,13 +523,9 @@ pub type SlotOf = <::VM as VMBinding>::VMSlot; /// This trait can also be used merely as a provider of the `trace_object` method by giving it an /// empty vector of slots. This is useful for node-enqueuing tracing /// ([`Scanning::scan_object_and_trace_edges`]) as well as weak reference processing -/// ([`Scanning::process_weak_refs`] as well as [`ReferenceProcessor`] and -/// [`FinalizableProcessor`]). In those cases, the caller passes the reference to the target object -/// to `trace_object`, an the caller is responsible for updating the slots according the return -/// value of `trace_object`. -/// -/// [`ReferenceProcessor`]: crate::util::reference_processor::ReferenceProcessor -/// [`FinalizableProcessor`]: crate::util::finalizable_processor::FinalizableProcessor +/// ([`Scanning::process_weak_refs`] as well as `ReferenceProcessor` and `FinalizableProcessor`). +/// In those cases, the caller passes the reference to the target object to `trace_object`, an the +/// caller is responsible for updating the slots according the return value of `trace_object`. /// /// TODO: We should refactor this trait to decouple it from slots. See: /// From e46383ddbc60b3e418999294712e3b10a2c6282a Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Tue, 14 May 2024 18:43:35 +0800 Subject: [PATCH 22/25] Example code --- docs/userguide/src/tutorial/code/mygc_semispace/gc_work.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/userguide/src/tutorial/code/mygc_semispace/gc_work.rs b/docs/userguide/src/tutorial/code/mygc_semispace/gc_work.rs index bc7e2eda41..eaed17c17c 100644 --- a/docs/userguide/src/tutorial/code/mygc_semispace/gc_work.rs +++ b/docs/userguide/src/tutorial/code/mygc_semispace/gc_work.rs @@ -45,12 +45,12 @@ impl ProcessEdgesWork for MyGCProcessEdges { type ScanObjectsWorkType = ScanObjects; fn new( - edges: Vec>, + slots: Vec>, roots: bool, mmtk: &'static MMTK, bucket: WorkBucketStage, ) -> Self { - let base = ProcessEdgesBase::new(edges, roots, mmtk, bucket); + let base = ProcessEdgesBase::new(slots, roots, mmtk, bucket); let plan = base.plan().downcast_ref::>().unwrap(); Self { base, plan } } From 3df6c7fc00b37928696fc61f94593520662ec471 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 20 May 2024 13:52:13 +0800 Subject: [PATCH 23/25] Fix tracing tool README and table name Stop using "process_edges" to refer to the work packet. The work packet name is ProcessEdgesWork, and the method name is process_slots. --- tools/tracing/README.md | 4 ++-- tools/tracing/performance/README.md | 12 ++++++------ tools/tracing/performance/packet_size.bt | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tools/tracing/README.md b/tools/tracing/README.md index 1f4db1a489..4af4ebe0dd 100644 --- a/tools/tracing/README.md +++ b/tools/tracing/README.md @@ -44,8 +44,8 @@ Currently, the core provides the following tracepoints. Each sub-directory contains a set of scripts. - `performance`: Print various GC-related statistics, such as the distribution of time spent in - allocation slow path, the time spent in each GC stages, and the distribution of `process_slots` - packet sizes. + allocation slow path, the time spent in each GC stages, and the distribution of the + `ProcessEdgesWork` packet sizes. - `timeline`: Record the start and end time of each GC and each work packet, and visualize them on a timeline in Perfetto UI. diff --git a/tools/tracing/performance/README.md b/tools/tracing/performance/README.md index bb4b00011d..9f085f6474 100644 --- a/tools/tracing/performance/README.md +++ b/tools/tracing/performance/README.md @@ -152,16 +152,16 @@ If you can't tell which lock instance is for which lock in MMTk, you can trace the allocation of the Mutex and record the stack trace (note that you might want to compile MMTk with `force-frame-pointers` to obtain better stack traces). -### Measuring the distribution of `process_slots` packet sizes (`packet_size`) +### Measuring the distribution of `ProcessEdgesWork` packet sizes (`packet_size`) Most of the GC time is spend in the transitive closure for tracing-based GCs, and MMTk performs -transitive closure via work packets that call the `ProcessEdgesWork::process_slots` method. This -tool measures the distribution of the sizes of these work packets, and also counts root slots -separately. +transitive closure via work packets that implement `ProcessEdgesWork` and call the `process_slots` +method. This tool measures the distribution of the sizes (as the number of slots) of these work +packets, and also counts root slots separately. Sample output: ``` -@process_slots_packet_size: +@process_edges_work_packet_size: [1] 238 |@@@@@ | [2, 4) 806 |@@@@@@@@@@@@@@@@@ | [4, 8) 1453 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ | @@ -178,7 +178,7 @@ Sample output: [8K, 16K) 58 |@ | [16K, 32K) 5 | | -@process_slots_root_packet_size: +@process_edges_work_root_packet_size: [1] 71 |@@@@@@@ | [2, 4) 4 | | [4, 8) 276 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@ | diff --git a/tools/tracing/performance/packet_size.bt b/tools/tracing/performance/packet_size.bt index b3dc59a59e..ac65f5acae 100644 --- a/tools/tracing/performance/packet_size.bt +++ b/tools/tracing/performance/packet_size.bt @@ -1,8 +1,8 @@ usdt:$MMTK:mmtk:process_slots { if (@stats_enabled) { - @process_slots_packet_size = hist(arg0); + @process_edges_work_packet_size = hist(arg0); if (arg1) { - @process_slots_root_packet_size = hist(arg0); + @process_edges_work_root_packet_size = hist(arg0); } } } From 0e0ef693ab3b3727eb65ab7d4f4d6f30eef9ef5d Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 20 May 2024 14:10:04 +0800 Subject: [PATCH 24/25] slot roots -> root slots --- src/scheduler/gc_work.rs | 2 +- src/vm/scanning.rs | 9 ++++++++- tests/test_roots_work_factory.rs | 4 ++-- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 41c10eb10d..2f658240c7 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -720,7 +720,7 @@ impl, PPE: ProcessEdgesWork, PPE: ProcessEdgesWork> RootsWorkFactory for ProcessEdgesWorkRootsWorkFactory { - fn create_process_slot_roots_work(&mut self, slots: Vec) { + fn create_process_root_slots_work(&mut self, slots: Vec) { crate::memory_manager::add_work_packet( self.mmtk, WorkBucketStage::Closure, diff --git a/src/vm/scanning.rs b/src/vm/scanning.rs index 9b1a918898..6ab7e6c184 100644 --- a/src/vm/scanning.rs +++ b/src/vm/scanning.rs @@ -99,13 +99,20 @@ pub trait ObjectTracerContext: Clone + Send + 'static { /// references to variables with limited lifetime (such as local variables), because /// it needs to be moved between threads. pub trait RootsWorkFactory: Clone + Send + 'static { + // TODO: + // 1. Rename the functions and remove the repeating `create_process_` and `_work`. + // 2. Rename the functions to reflect both the form (slots / nodes) and the semantics (pinning + // / transitive pinning / non-pinning) of each function. + // 3. Introduce a function to give the VM binding a way to update root edges without + // representing the roots as slots. See: https://github.com/mmtk/mmtk-core/issues/710 + /// Create work packets to handle root slots. /// /// The work packet may update the slots. /// /// Arguments: /// * `slots`: A vector of slots. - fn create_process_slot_roots_work(&mut self, slots: Vec); + fn create_process_root_slots_work(&mut self, slots: Vec); /// Create work packets to handle non-transitively pinning roots. /// diff --git a/tests/test_roots_work_factory.rs b/tests/test_roots_work_factory.rs index 52b39a05c1..13725ef0e0 100644 --- a/tests/test_roots_work_factory.rs +++ b/tests/test_roots_work_factory.rs @@ -21,7 +21,7 @@ impl MockScanning { } fn mock_scan_roots(&self, mut factory: impl mmtk::vm::RootsWorkFactory
) { - factory.create_process_slot_roots_work(self.roots.clone()); + factory.create_process_root_slots_work(self.roots.clone()); } } @@ -42,7 +42,7 @@ struct MockFactory { } impl RootsWorkFactory
for MockFactory { - fn create_process_slot_roots_work(&mut self, slots: Vec
) { + fn create_process_root_slots_work(&mut self, slots: Vec
) { assert_eq!(slots, SLOTS); match self.round { 1 => { From cf57f5e3d3b3cfa96b04cde42b3a68e5e8e69468 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Tue, 21 May 2024 21:18:12 +0800 Subject: [PATCH 25/25] Rename to create_process_roots_work --- src/scheduler/gc_work.rs | 2 +- src/vm/scanning.rs | 5 +++-- tests/test_roots_work_factory.rs | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 2f658240c7..3cb3536003 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -720,7 +720,7 @@ impl, PPE: ProcessEdgesWork, PPE: ProcessEdgesWork> RootsWorkFactory for ProcessEdgesWorkRootsWorkFactory { - fn create_process_root_slots_work(&mut self, slots: Vec) { + fn create_process_roots_work(&mut self, slots: Vec) { crate::memory_manager::add_work_packet( self.mmtk, WorkBucketStage::Closure, diff --git a/src/vm/scanning.rs b/src/vm/scanning.rs index 6ab7e6c184..05b6260f32 100644 --- a/src/vm/scanning.rs +++ b/src/vm/scanning.rs @@ -106,13 +106,14 @@ pub trait RootsWorkFactory: Clone + Send + 'static { // 3. Introduce a function to give the VM binding a way to update root edges without // representing the roots as slots. See: https://github.com/mmtk/mmtk-core/issues/710 - /// Create work packets to handle root slots. + /// Create work packets to handle non-pinned roots. The roots are represented as slots so that + /// they can be updated. /// /// The work packet may update the slots. /// /// Arguments: /// * `slots`: A vector of slots. - fn create_process_root_slots_work(&mut self, slots: Vec); + fn create_process_roots_work(&mut self, slots: Vec); /// Create work packets to handle non-transitively pinning roots. /// diff --git a/tests/test_roots_work_factory.rs b/tests/test_roots_work_factory.rs index 13725ef0e0..baf3adbf96 100644 --- a/tests/test_roots_work_factory.rs +++ b/tests/test_roots_work_factory.rs @@ -21,7 +21,7 @@ impl MockScanning { } fn mock_scan_roots(&self, mut factory: impl mmtk::vm::RootsWorkFactory
) { - factory.create_process_root_slots_work(self.roots.clone()); + factory.create_process_roots_work(self.roots.clone()); } } @@ -42,7 +42,7 @@ struct MockFactory { } impl RootsWorkFactory
for MockFactory { - fn create_process_root_slots_work(&mut self, slots: Vec
) { + fn create_process_roots_work(&mut self, slots: Vec
) { assert_eq!(slots, SLOTS); match self.round { 1 => {