diff --git a/docs/tutorial/code/mygc_semispace/gc_work.rs b/docs/tutorial/code/mygc_semispace/gc_work.rs index 733eabe5eb..aa338ae55e 100644 --- a/docs/tutorial/code/mygc_semispace/gc_work.rs +++ b/docs/tutorial/code/mygc_semispace/gc_work.rs @@ -149,4 +149,14 @@ impl DerefMut for MyGCProcessEdges { &mut self.base } } -// ANCHOR_END: deref \ No newline at end of file +// ANCHOR_END: deref + +// ANCHOR: workcontext +pub struct MyGCWorkContext(std::marker::PhantomData); +impl crate::scheduler::GCWorkContext for MyGCWorkContext { + type VM = VM; + type PlanType = MyGC; + type CopyContextType = MyGCCopyContext; + type ProcessEdgesWorkType = MyGCProcessEdges; +} +// ANCHOR_END: workcontext diff --git a/docs/tutorial/code/mygc_semispace/global.rs b/docs/tutorial/code/mygc_semispace/global.rs index 129a945bb3..dcb234d4a2 100644 --- a/docs/tutorial/code/mygc_semispace/global.rs +++ b/docs/tutorial/code/mygc_semispace/global.rs @@ -4,6 +4,7 @@ use crate::plan::global::BasePlan; //Modify use crate::plan::global::CommonPlan; // Add use crate::plan::global::GcStatus; // Add use crate::plan::mygc::mutator::ALLOCATOR_MAPPING; +use crate::plan::mygc::gc_work::MyGCWorkContext; use crate::plan::AllocationSemantics; use crate::plan::Plan; use crate::plan::PlanConstraints; @@ -96,13 +97,7 @@ impl Plan for MyGC { fn schedule_collection(&'static self, scheduler: &GCWorkScheduler) { self.base().set_collection_kind::(self); self.base().set_gc_status(GcStatus::GcPrepare); - scheduler.work_buckets[WorkBucketStage::Unconstrained] - .add(StopMutators::>::new()); - scheduler.work_buckets[WorkBucketStage::Prepare] - .add(Prepare::>::new(self)); - scheduler.work_buckets[WorkBucketStage::Release] - .add(Release::>::new(self)); - scheduler.set_finalizer(Some(EndOfGC)); + scheduler.schedule_common_work::>(self); } // ANCHOR_END: schedule_collection diff --git a/docs/tutorial/src/mygc/ss/collection.md b/docs/tutorial/src/mygc/ss/collection.md index d1cb56fba3..a819043624 100644 --- a/docs/tutorial/src/mygc/ss/collection.md +++ b/docs/tutorial/src/mygc/ss/collection.md @@ -132,6 +132,17 @@ scheduler's prepare stage and resumes the mutators. The `StopMutators` work will invoke code from the bindings to scan threads and other roots, and those scanning work will further push work for a transitive closure. +Though you can add those work packets by yourself, `GCWorkScheduler` provides a +method `schedule_common_work()` that will add common work packets for you. + +To use `schedule_common_work()`, first we need to create a type `MyGCWorkContext` +and implement the trait `GCWorkContext` for it. We create this type in `gc_work.rs`. + +```rust +{{#include ../../../code/mygc_semispace/gc_work.rs:workcontext}} + +Then we implement `schedule_collection()` using `MyGCWorkContext` and `schedule_common_work()`. + ```rust {{#include ../../../code/mygc_semispace/global.rs:schedule_collection}} ``` diff --git a/src/plan/generational/copying/gc_work.rs b/src/plan/generational/copying/gc_work.rs index df586f05ed..c8e0b6c3e9 100644 --- a/src/plan/generational/copying/gc_work.rs +++ b/src/plan/generational/copying/gc_work.rs @@ -1,4 +1,5 @@ use super::global::GenCopy; +use crate::plan::generational::gc_work::GenNurseryProcessEdges; use crate::plan::CopyContext; use crate::plan::PlanConstraints; use crate::policy::space::Space; @@ -85,6 +86,7 @@ impl GenCopyMatureProcessEdges { impl ProcessEdgesWork for GenCopyMatureProcessEdges { type VM = VM; + fn new(edges: Vec
, roots: bool, mmtk: &'static MMTK) -> Self { let base = ProcessEdgesBase::new(edges, roots, mmtk); let plan = base.plan().downcast_ref::>().unwrap(); @@ -130,3 +132,19 @@ impl DerefMut for GenCopyMatureProcessEdges { &mut self.base } } + +pub struct GenCopyNurseryGCWorkContext(std::marker::PhantomData); +impl crate::scheduler::GCWorkContext for GenCopyNurseryGCWorkContext { + type VM = VM; + type PlanType = GenCopy; + type CopyContextType = GenCopyCopyContext; + type ProcessEdgesWorkType = GenNurseryProcessEdges; +} + +pub(super) struct GenCopyMatureGCWorkContext(std::marker::PhantomData); +impl crate::scheduler::GCWorkContext for GenCopyMatureGCWorkContext { + type VM = VM; + type PlanType = GenCopy; + type CopyContextType = GenCopyCopyContext; + type ProcessEdgesWorkType = GenCopyMatureProcessEdges; +} diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index 67ef671589..213e2d1efe 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -1,7 +1,6 @@ -use super::gc_work::{GenCopyCopyContext, GenCopyMatureProcessEdges}; +use super::gc_work::{GenCopyCopyContext, GenCopyMatureGCWorkContext, GenCopyNurseryGCWorkContext}; use super::mutator::ALLOCATOR_MAPPING; use crate::mmtk::MMTK; -use crate::plan::generational::gc_work::GenNurseryProcessEdges; use crate::plan::generational::global::Gen; use crate::plan::global::BasePlan; use crate::plan::global::CommonPlan; @@ -86,20 +85,10 @@ impl Plan for GenCopy { self.base().set_gc_status(GcStatus::GcPrepare); if !is_full_heap { debug!("Nursery GC"); - self.common() - .schedule_common::>, GenCopyCopyContext>( - self, - &GENCOPY_CONSTRAINTS, - scheduler, - ); + scheduler.schedule_common_work::>(self); } else { debug!("Full heap GC"); - self.common() - .schedule_common::, GenCopyCopyContext>( - self, - &GENCOPY_CONSTRAINTS, - scheduler, - ); + scheduler.schedule_common_work::>(self); } } diff --git a/src/plan/generational/gc_work.rs b/src/plan/generational/gc_work.rs index 6819cc22eb..b0639bda53 100644 --- a/src/plan/generational/gc_work.rs +++ b/src/plan/generational/gc_work.rs @@ -9,15 +9,16 @@ use crate::MMTK; use std::ops::{Deref, DerefMut}; /// Process edges for a nursery GC. A generatinoal plan should use this type for a nursery GC. -pub struct GenNurseryProcessEdges { +pub struct GenNurseryProcessEdges + GCWorkerLocal> { gen: &'static Gen, base: ProcessEdgesBase>, } -impl ProcessEdgesWork +impl + GCWorkerLocal> ProcessEdgesWork for GenNurseryProcessEdges { type VM = VM; + fn new(edges: Vec
, roots: bool, mmtk: &'static MMTK) -> Self { let base = ProcessEdgesBase::new(edges, roots, mmtk); let gen = base.plan().generational(); @@ -40,14 +41,18 @@ impl ProcessEdgesWork } } -impl Deref for GenNurseryProcessEdges { +impl + GCWorkerLocal> Deref + for GenNurseryProcessEdges +{ type Target = ProcessEdgesBase; fn deref(&self) -> &Self::Target { &self.base } } -impl DerefMut for GenNurseryProcessEdges { +impl + GCWorkerLocal> DerefMut + for GenNurseryProcessEdges +{ fn deref_mut(&mut self) -> &mut Self::Target { &mut self.base } diff --git a/src/plan/generational/immix/gc_work.rs b/src/plan/generational/immix/gc_work.rs index 5ec06f3431..1d08b5425f 100644 --- a/src/plan/generational/immix/gc_work.rs +++ b/src/plan/generational/immix/gc_work.rs @@ -1,4 +1,5 @@ use super::global::GenImmix; +use crate::plan::generational::gc_work::GenNurseryProcessEdges; use crate::plan::CopyContext; use crate::plan::PlanConstraints; use crate::policy::space::Space; @@ -187,3 +188,23 @@ impl DerefMut for GenImmixMatureProcessEdg &mut self.base } } + +pub struct GenImmixNurseryGCWorkContext(std::marker::PhantomData); +impl crate::scheduler::GCWorkContext for GenImmixNurseryGCWorkContext { + type VM = VM; + type PlanType = GenImmix; + type CopyContextType = GenImmixCopyContext; + type ProcessEdgesWorkType = GenNurseryProcessEdges; +} + +pub(super) struct GenImmixMatureGCWorkContext( + std::marker::PhantomData, +); +impl crate::scheduler::GCWorkContext + for GenImmixMatureGCWorkContext +{ + type VM = VM; + type PlanType = GenImmix; + type CopyContextType = GenImmixCopyContext; + type ProcessEdgesWorkType = GenImmixMatureProcessEdges; +} diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index d9e2d83628..a3c10e8756 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -1,5 +1,6 @@ -use super::gc_work::{GenImmixCopyContext, GenImmixMatureProcessEdges}; -use crate::plan::generational::gc_work::GenNurseryProcessEdges; +use super::gc_work::{ + GenImmixCopyContext, GenImmixMatureGCWorkContext, GenImmixNurseryGCWorkContext, +}; use crate::plan::generational::global::Gen; use crate::plan::global::BasePlan; use crate::plan::global::CommonPlan; @@ -127,28 +128,17 @@ impl Plan for GenImmix { if !is_full_heap { debug!("Nursery GC"); - self.common() - .schedule_common::>, GenImmixCopyContext>( - self, - &GENIMMIX_CONSTRAINTS, - scheduler, - ); + scheduler.schedule_common_work::>(self); } else if defrag { debug!("Full heap GC Defrag"); - self.common() - .schedule_common::, GenImmixCopyContext>( + scheduler + .schedule_common_work::>( self, - &GENIMMIX_CONSTRAINTS, - scheduler, ); } else { debug!("Full heap GC Fast"); - self.common() - .schedule_common::, GenImmixCopyContext>( - self, - &GENIMMIX_CONSTRAINTS, - scheduler, - ); + scheduler + .schedule_common_work::>(self); } } diff --git a/src/plan/global.rs b/src/plan/global.rs index dee9b05a34..4e5c5bd8c7 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -9,7 +9,6 @@ use crate::plan::Mutator; use crate::policy::immortalspace::ImmortalSpace; use crate::policy::largeobjectspace::LargeObjectSpace; use crate::policy::space::Space; -use crate::scheduler::gc_work::ProcessEdgesWork; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; #[cfg(feature = "analysis")] @@ -952,61 +951,6 @@ impl CommonPlan { self.base.release(tls, full_heap) } - /// Schedule all the common work packets - pub fn schedule_common< - P: Plan, - E: ProcessEdgesWork, - C: CopyContext + GCWorkerLocal, - >( - &self, - plan: &'static P, - constraints: &'static PlanConstraints, - scheduler: &GCWorkScheduler, - ) { - use crate::scheduler::gc_work::*; - - // Stop & scan mutators (mutator scanning can happen before STW) - scheduler.work_buckets[WorkBucketStage::Unconstrained].add(StopMutators::::new()); - - // Prepare global/collectors/mutators - scheduler.work_buckets[WorkBucketStage::Prepare].add(Prepare::::new(plan)); - - // VM-specific weak ref processing - scheduler.work_buckets[WorkBucketStage::RefClosure].add(ProcessWeakRefs::::new()); - - // Release global/collectors/mutators - scheduler.work_buckets[WorkBucketStage::Release].add(Release::::new(plan)); - - // Analysis GC work - #[cfg(feature = "analysis")] - { - use crate::util::analysis::GcHookWork; - scheduler.work_buckets[WorkBucketStage::Unconstrained].add(GcHookWork); - } - - // Sanity - #[cfg(feature = "sanity")] - { - use crate::util::sanity::sanity_checker::ScheduleSanityGC; - scheduler.work_buckets[WorkBucketStage::Final].add(ScheduleSanityGC::::new(plan)); - } - - // Finalization - if !self.base.options.no_finalizer { - use crate::util::finalizable_processor::{Finalization, ForwardFinalization}; - // finalization - scheduler.work_buckets[WorkBucketStage::RefClosure].add(Finalization::::new()); - // forward refs - if constraints.needs_forward_after_liveness { - scheduler.work_buckets[WorkBucketStage::RefForwarding] - .add(ForwardFinalization::::new()); - } - } - - // Set EndOfGC to run at the end - scheduler.set_finalizer(Some(EndOfGC)); - } - pub fn stacks_prepared(&self) -> bool { self.base.stacks_prepared() } diff --git a/src/plan/immix/gc_work.rs b/src/plan/immix/gc_work.rs index 1471aa1204..696821272e 100644 --- a/src/plan/immix/gc_work.rs +++ b/src/plan/immix/gc_work.rs @@ -97,6 +97,7 @@ impl ImmixProcessEdges { impl ProcessEdgesWork for ImmixProcessEdges { type VM = VM; + const OVERWRITE_REFERENCE: bool = crate::policy::immix::DEFRAG; fn new(edges: Vec
, roots: bool, mmtk: &'static MMTK) -> Self { @@ -166,3 +167,15 @@ impl DerefMut for ImmixProcessEdges( + std::marker::PhantomData, +); +impl crate::scheduler::GCWorkContext + for ImmixGCWorkContext +{ + type VM = VM; + type PlanType = Immix; + type CopyContextType = ImmixCopyContext; + type ProcessEdgesWorkType = ImmixProcessEdges; +} diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 5eb635f843..ed2b00ab5a 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -1,4 +1,4 @@ -use super::gc_work::{ImmixCopyContext, ImmixProcessEdges, TraceKind}; +use super::gc_work::{ImmixCopyContext, ImmixGCWorkContext, TraceKind}; use super::mutator::ALLOCATOR_MAPPING; use crate::plan::global::BasePlan; use crate::plan::global::CommonPlan; @@ -91,11 +91,9 @@ impl Plan for Immix { // The blocks are not identical, clippy is wrong. Probably it does not recognize the constant type parameter. #[allow(clippy::if_same_then_else)] if in_defrag { - self.common() - .schedule_common::, ImmixCopyContext>(self, &IMMIX_CONSTRAINTS, scheduler); + scheduler.schedule_common_work::>(self); } else { - self.common() - .schedule_common::, ImmixCopyContext>(self, &IMMIX_CONSTRAINTS, scheduler); + scheduler.schedule_common_work::>(self); } } diff --git a/src/plan/marksweep/gc_work.rs b/src/plan/marksweep/gc_work.rs index 4fce9cbdda..d3aed1e7c2 100644 --- a/src/plan/marksweep/gc_work.rs +++ b/src/plan/marksweep/gc_work.rs @@ -23,6 +23,7 @@ pub struct MSProcessEdges { impl ProcessEdgesWork for MSProcessEdges { type VM = VM; + const OVERWRITE_REFERENCE: bool = false; fn new(edges: Vec
, roots: bool, mmtk: &'static MMTK) -> Self { let base = ProcessEdgesBase::new(edges, roots, mmtk); @@ -118,3 +119,11 @@ impl GCWork for MSSweepChunks { mmtk.scheduler.work_buckets[WorkBucketStage::Release].bulk_add(work_packets); } } + +pub struct MSGCWorkContext(std::marker::PhantomData); +impl crate::scheduler::GCWorkContext for MSGCWorkContext { + type VM = VM; + type PlanType = MarkSweep; + type CopyContextType = NoCopy; + type ProcessEdgesWorkType = MSProcessEdges; +} diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index bb4c5b7699..9791715da3 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -3,7 +3,7 @@ use crate::plan::global::BasePlan; use crate::plan::global::CommonPlan; use crate::plan::global::GcStatus; use crate::plan::global::NoCopy; -use crate::plan::marksweep::gc_work::{MSProcessEdges, MSSweepChunks}; +use crate::plan::marksweep::gc_work::{MSGCWorkContext, MSSweepChunks}; use crate::plan::marksweep::mutator::ALLOCATOR_MAPPING; use crate::plan::AllocationSemantics; use crate::plan::Plan; @@ -56,12 +56,7 @@ impl Plan for MarkSweep { fn schedule_collection(&'static self, scheduler: &GCWorkScheduler) { self.base().set_collection_kind::(self); self.base().set_gc_status(GcStatus::GcPrepare); - self.common() - .schedule_common::, NoCopy>( - self, - &MS_CONSTRAINTS, - scheduler, - ); + scheduler.schedule_common_work::>(self); scheduler.work_buckets[WorkBucketStage::Prepare].add(MSSweepChunks::::new(self)); } diff --git a/src/plan/pageprotect/gc_work.rs b/src/plan/pageprotect/gc_work.rs index 66ff15d5bf..b519893b6b 100644 --- a/src/plan/pageprotect/gc_work.rs +++ b/src/plan/pageprotect/gc_work.rs @@ -18,6 +18,7 @@ pub struct PPProcessEdges { impl ProcessEdgesWork for PPProcessEdges { const OVERWRITE_REFERENCE: bool = false; type VM = VM; + fn new(edges: Vec
, roots: bool, mmtk: &'static MMTK) -> Self { let base = ProcessEdgesBase::new(edges, roots, mmtk); let plan = base.plan().downcast_ref::>().unwrap(); @@ -56,3 +57,11 @@ impl DerefMut for PPProcessEdges { &mut self.base } } + +pub struct PPGCWorkContext(std::marker::PhantomData); +impl crate::scheduler::GCWorkContext for PPGCWorkContext { + type VM = VM; + type PlanType = PageProtect; + type CopyContextType = NoCopy; + type ProcessEdgesWorkType = PPProcessEdges; +} diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 295c8a040a..692b83c243 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -1,4 +1,4 @@ -use super::gc_work::PPProcessEdges; +use super::gc_work::PPGCWorkContext; use super::mutator::ALLOCATOR_MAPPING; use crate::mmtk::MMTK; use crate::plan::global::GcStatus; @@ -75,8 +75,7 @@ impl Plan for PageProtect { fn schedule_collection(&'static self, scheduler: &GCWorkScheduler) { self.base().set_collection_kind::(self); self.base().set_gc_status(GcStatus::GcPrepare); - self.common() - .schedule_common::, NoCopy>(self, &CONSTRAINTS, scheduler); + scheduler.schedule_common_work::>(self); } fn get_allocator_mapping(&self) -> &'static EnumMap { diff --git a/src/plan/semispace/gc_work.rs b/src/plan/semispace/gc_work.rs index 9c444d47ff..c57aaa90dd 100644 --- a/src/plan/semispace/gc_work.rs +++ b/src/plan/semispace/gc_work.rs @@ -139,3 +139,11 @@ impl DerefMut for SSProcessEdges { &mut self.base } } + +pub struct SSGCWorkContext(std::marker::PhantomData); +impl crate::scheduler::GCWorkContext for SSGCWorkContext { + type VM = VM; + type PlanType = SemiSpace; + type CopyContextType = SSCopyContext; + type ProcessEdgesWorkType = SSProcessEdges; +} diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index 6c8f464d59..f95a4ce5d0 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -1,4 +1,4 @@ -use super::gc_work::{SSCopyContext, SSProcessEdges}; +use super::gc_work::{SSCopyContext, SSGCWorkContext}; use crate::mmtk::MMTK; use crate::plan::global::CommonPlan; use crate::plan::global::GcStatus; @@ -75,12 +75,7 @@ impl Plan for SemiSpace { fn schedule_collection(&'static self, scheduler: &GCWorkScheduler) { self.base().set_collection_kind::(self); self.base().set_gc_status(GcStatus::GcPrepare); - self.common() - .schedule_common::, SSCopyContext>( - self, - &SS_CONSTRAINTS, - scheduler, - ); + scheduler.schedule_common_work::>(self); } fn get_allocator_mapping(&self) -> &'static EnumMap { diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index e85122355a..24fa7f09a3 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -27,34 +27,31 @@ impl CoordinatorWork for ScheduleCollection {} /// We assume this work packet is the only running work packet that accesses plan, and there should /// be no other concurrent work packet that accesses plan (read or write). Otherwise, there may /// be a race condition. -pub struct Prepare { - pub plan: &'static P, - _p: PhantomData, +pub struct Prepare { + pub plan: &'static C::PlanType, } -impl Prepare { - pub fn new(plan: &'static P) -> Self { - Self { - plan, - _p: PhantomData, - } +impl Prepare { + pub fn new(plan: &'static C::PlanType) -> Self { + Self { plan } } } -impl GCWork for Prepare { - fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { +impl GCWork for Prepare { + fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { trace!("Prepare Global"); // We assume this is the only running work packet that accesses plan at the point of execution #[allow(clippy::cast_ref_to_mut)] - let plan_mut: &mut P = unsafe { &mut *(self.plan as *const _ as *mut _) }; + let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) }; plan_mut.prepare(worker.tls); - for mutator in ::VMActivePlan::mutators() { + for mutator in ::VMActivePlan::mutators() { mmtk.scheduler.work_buckets[WorkBucketStage::Prepare] - .add(PrepareMutator::::new(mutator)); + .add(PrepareMutator::::new(mutator)); } for w in &mmtk.scheduler.worker_group().workers { - w.local_work_bucket.add(PrepareCollector::::new()); + w.local_work_bucket + .add(PrepareCollector::::new()); } } } @@ -103,35 +100,32 @@ impl GCWork for PrepareCollec /// We assume this work packet is the only running work packet that accesses plan, and there should /// be no other concurrent work packet that accesses plan (read or write). Otherwise, there may /// be a race condition. -pub struct Release { - pub plan: &'static P, - _p: PhantomData, +pub struct Release { + pub plan: &'static C::PlanType, } -impl Release { - pub fn new(plan: &'static P) -> Self { - Self { - plan, - _p: PhantomData, - } +impl Release { + pub fn new(plan: &'static C::PlanType) -> Self { + Self { plan } } } -impl GCWork for Release { - fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { +impl GCWork for Release { + fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { trace!("Release Global"); - ::VMCollection::vm_release(); + ::VMCollection::vm_release(); // We assume this is the only running work packet that accesses plan at the point of execution #[allow(clippy::cast_ref_to_mut)] - let plan_mut: &mut P = unsafe { &mut *(self.plan as *const _ as *mut _) }; + let plan_mut: &mut C::PlanType = unsafe { &mut *(self.plan as *const _ as *mut _) }; plan_mut.release(worker.tls); - for mutator in ::VMActivePlan::mutators() { + for mutator in ::VMActivePlan::mutators() { mmtk.scheduler.work_buckets[WorkBucketStage::Release] - .add(ReleaseMutator::::new(mutator)); + .add(ReleaseMutator::::new(mutator)); } for w in &mmtk.scheduler.worker_group().workers { - w.local_work_bucket.add(ReleaseCollector::::new()); + w.local_work_bucket + .add(ReleaseCollector::::new()); } // TODO: Process weak references properly mmtk.reference_processors.clear(); @@ -406,6 +400,7 @@ pub trait ProcessEdgesWork: Send + 'static + Sized + DerefMut + Deref> { type VM: VMBinding; + const CAPACITY: usize = 4096; const OVERWRITE_REFERENCE: bool = true; const SCAN_OBJECTS_IMMEDIATELY: bool = true; diff --git a/src/scheduler/mod.rs b/src/scheduler/mod.rs index 516e62383d..672f400450 100644 --- a/src/scheduler/mod.rs +++ b/src/scheduler/mod.rs @@ -11,6 +11,7 @@ pub(self) mod work_counter; mod work; pub use work::CoordinatorWork; pub use work::GCWork; +pub(crate) use work::GCWorkContext; mod work_bucket; pub use work_bucket::WorkBucketStage; diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index d79e20b6e5..b290099eb8 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -145,6 +145,61 @@ impl GCWorkScheduler { } } + /// Schedule all the common work packets + pub fn schedule_common_work + 'static>( + &self, + plan: &'static C::PlanType, + ) { + use crate::plan::Plan; + use crate::scheduler::gc_work::*; + // Stop & scan mutators (mutator scanning can happen before STW) + self.work_buckets[WorkBucketStage::Unconstrained] + .add(StopMutators::::new()); + + // Prepare global/collectors/mutators + self.work_buckets[WorkBucketStage::Prepare].add(Prepare::::new(plan)); + + // VM-specific weak ref processing + self.work_buckets[WorkBucketStage::RefClosure] + .add(ProcessWeakRefs::::new()); + + // Release global/collectors/mutators + self.work_buckets[WorkBucketStage::Release].add(Release::::new(plan)); + + // Analysis GC work + #[cfg(feature = "analysis")] + { + use crate::util::analysis::GcHookWork; + self.work_buckets[WorkBucketStage::Unconstrained].add(GcHookWork); + } + + // Sanity + #[cfg(feature = "sanity")] + { + use crate::util::sanity::sanity_checker::ScheduleSanityGC; + self.work_buckets[WorkBucketStage::Final] + .add(ScheduleSanityGC::::new( + plan, + )); + } + + // Finalization + if !plan.base().options.no_finalizer { + use crate::util::finalizable_processor::{Finalization, ForwardFinalization}; + // finalization + self.work_buckets[WorkBucketStage::RefClosure] + .add(Finalization::::new()); + // forward refs + if plan.constraints().needs_forward_after_liveness { + self.work_buckets[WorkBucketStage::RefForwarding] + .add(ForwardFinalization::::new()); + } + } + + // Set EndOfGC to run at the end + self.set_finalizer(Some(EndOfGC)); + } + fn are_buckets_drained(&self, buckets: &[WorkBucketStage]) -> bool { buckets.iter().all(|&b| self.work_buckets[b].is_drained()) } diff --git a/src/scheduler/work.rs b/src/scheduler/work.rs index 2026cfdcdf..2deeea2641 100644 --- a/src/scheduler/work.rs +++ b/src/scheduler/work.rs @@ -22,3 +22,19 @@ pub trait GCWork: 'static + Send { stat.end_of_work(&mut worker.stat); } } + +use super::gc_work::ProcessEdgesWork; +use crate::plan::CopyContext; +use crate::plan::Plan; + +/// This trait provides a group of associated types that are needed to +/// create GC work packets for a certain plan. For example, `GCWorkScheduler.schedule_common_work()` +/// needs this trait to schedule different work packets. For certain plans, +/// they may need to provide several types that implement this trait, e.g. one for +/// nursery GC, one for mature GC. +pub trait GCWorkContext { + type VM: VMBinding; + type PlanType: Plan; + type CopyContextType: CopyContext + GCWorkerLocal; + type ProcessEdgesWorkType: ProcessEdgesWork; +} diff --git a/src/util/sanity/sanity_checker.rs b/src/util/sanity/sanity_checker.rs index a52a8817e7..a797c90765 100644 --- a/src/util/sanity/sanity_checker.rs +++ b/src/util/sanity/sanity_checker.rs @@ -176,6 +176,7 @@ impl DerefMut for SanityGCProcessEdges { impl ProcessEdgesWork for SanityGCProcessEdges { type VM = VM; + const OVERWRITE_REFERENCE: bool = false; fn new(edges: Vec
, roots: bool, mmtk: &'static MMTK) -> Self { Self {