Skip to content
Merged
Show file tree
Hide file tree
Changes from 21 commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
c9646fa
Extract common work packet to schedule_common()
qinsoon Oct 11, 2021
83428a2
Skip finalization for marksweep
qinsoon Oct 12, 2021
e6912e6
cargo fmt
qinsoon Oct 12, 2021
8f9460c
Remove commented code
qinsoon Oct 12, 2021
a68ba2f
Merge branch 'master' into extract-to-schedule-common
qinsoon Oct 26, 2021
488d8b6
Remove schedule_common_partially()
qinsoon Oct 26, 2021
7ecd5d3
Add CopyContext type as asscoiate type to ProcessEdgesWork
qinsoon Oct 27, 2021
c3e2b89
Introduce GCWorkContext
qinsoon Oct 28, 2021
455950a
Remove CopyContext from ProcessEdgesWork
qinsoon Oct 28, 2021
dffaa5f
Fix document
qinsoon Oct 28, 2021
1ac5aa0
Add comments for GCWorkContext
qinsoon Oct 28, 2021
88fd6a3
Use GCWorkContext for Prepare and Release
qinsoon Oct 28, 2021
e365ca6
Merge branch 'master' into gc-work-context
qinsoon Oct 28, 2021
fe4bd70
cargo fmt
qinsoon Oct 28, 2021
c4a133d
Fix tutorial code
qinsoon Oct 29, 2021
2a77273
Remove the PlanConstraints parameter from schedule_common
qinsoon Nov 2, 2021
f547059
Move schedule_common() to GCScheduler
qinsoon Nov 2, 2021
b6b2569
Fix tutorial code
qinsoon Nov 2, 2021
162c8d4
Rename schedule_common_packets to schedule_common_work
qinsoon Nov 2, 2021
44522a9
Merge branch 'master' into gc-work-context
qinsoon Nov 2, 2021
38becd6
cargo fmt
qinsoon Nov 3, 2021
64f7ae2
Update some out-dated comments
qinsoon Nov 8, 2021
2c80781
Merge branch 'master' into gc-work-context
qinsoon Nov 8, 2021
280397f
Merge branch 'master' into gc-work-context
qinsoon Nov 15, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion docs/tutorial/code/mygc_semispace/gc_work.rs
Original file line number Diff line number Diff line change
Expand Up @@ -149,4 +149,14 @@ impl<VM: VMBinding> DerefMut for MyGCProcessEdges<VM> {
&mut self.base
}
}
// ANCHOR_END: deref
// ANCHOR_END: deref

// ANCHOR: workcontext
pub struct MyGCWorkContext<VM: VMBinding>(std::marker::PhantomData<VM>);
impl<VM: VMBinding> crate::scheduler::GCWorkContext for MyGCWorkContext<VM> {
type VM = VM;
type PlanType = MyGC<VM>;
type CopyContextType = MyGCCopyContext<VM>;
type ProcessEdgesWorkType = MyGCProcessEdges<VM>;
}
// ANCHOR_END: workcontext
9 changes: 2 additions & 7 deletions docs/tutorial/code/mygc_semispace/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ use crate::plan::global::BasePlan; //Modify
use crate::plan::global::CommonPlan; // Add
use crate::plan::global::GcStatus; // Add
use crate::plan::mygc::mutator::ALLOCATOR_MAPPING;
use crate::plan::mygc::gc_work::MyGCWorkContext;
use crate::plan::AllocationSemantics;
use crate::plan::Plan;
use crate::plan::PlanConstraints;
Expand Down Expand Up @@ -96,13 +97,7 @@ impl<VM: VMBinding> Plan for MyGC<VM> {
fn schedule_collection(&'static self, scheduler: &GCWorkScheduler<VM>) {
self.base().set_collection_kind::<Self>(self);
self.base().set_gc_status(GcStatus::GcPrepare);
scheduler.work_buckets[WorkBucketStage::Unconstrained]
.add(StopMutators::<MyGCProcessEdges<VM>>::new());
scheduler.work_buckets[WorkBucketStage::Prepare]
.add(Prepare::<Self, MyGCCopyContext<VM>>::new(self));
scheduler.work_buckets[WorkBucketStage::Release]
.add(Release::<Self, MyGCCopyContext<VM>>::new(self));
scheduler.set_finalizer(Some(EndOfGC));
scheduler.schedule_common_work::<MyGCWorkContext<VM>>(self);
}
// ANCHOR_END: schedule_collection

Expand Down
11 changes: 11 additions & 0 deletions docs/tutorial/src/mygc/ss/collection.md
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,17 @@ scheduler's prepare stage and resumes the mutators. The `StopMutators` work
will invoke code from the bindings to scan threads and other roots, and those
scanning work will further push work for a transitive closure.

Though you can add those work packets by yourself, `CommonPlan` provides a
method `schedule_common()` that will add common work packets for you.

To use `schedule_common()`, first we need to create a type `MyGCWorkContext`
and implement the trait `GCWorkContext` for it. We create this type in `gc_work.rs`.

```rust
{{#include ../../../code/mygc_semispace/gc_work.rs:workcontext}}

Then we implement `schedule_collection()` using `MyGCWorkContext` and `schedule_common()`.

```rust
{{#include ../../../code/mygc_semispace/global.rs:schedule_collection}}
```
Expand Down
18 changes: 18 additions & 0 deletions src/plan/generational/copying/gc_work.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use super::global::GenCopy;
use crate::plan::generational::gc_work::GenNurseryProcessEdges;
use crate::plan::CopyContext;
use crate::plan::PlanConstraints;
use crate::policy::space::Space;
Expand Down Expand Up @@ -85,6 +86,7 @@ impl<VM: VMBinding> GenCopyMatureProcessEdges<VM> {

impl<VM: VMBinding> ProcessEdgesWork for GenCopyMatureProcessEdges<VM> {
type VM = VM;

fn new(edges: Vec<Address>, roots: bool, mmtk: &'static MMTK<VM>) -> Self {
let base = ProcessEdgesBase::new(edges, roots, mmtk);
let plan = base.plan().downcast_ref::<GenCopy<VM>>().unwrap();
Expand Down Expand Up @@ -130,3 +132,19 @@ impl<VM: VMBinding> DerefMut for GenCopyMatureProcessEdges<VM> {
&mut self.base
}
}

pub struct GenCopyNurseryGCWorkContext<VM: VMBinding>(std::marker::PhantomData<VM>);
impl<VM: VMBinding> crate::scheduler::GCWorkContext for GenCopyNurseryGCWorkContext<VM> {
type VM = VM;
type PlanType = GenCopy<VM>;
type CopyContextType = GenCopyCopyContext<VM>;
type ProcessEdgesWorkType = GenNurseryProcessEdges<VM, Self::CopyContextType>;
}

pub(super) struct GenCopyMatureGCWorkContext<VM: VMBinding>(std::marker::PhantomData<VM>);
impl<VM: VMBinding> crate::scheduler::GCWorkContext for GenCopyMatureGCWorkContext<VM> {
type VM = VM;
type PlanType = GenCopy<VM>;
type CopyContextType = GenCopyCopyContext<VM>;
type ProcessEdgesWorkType = GenCopyMatureProcessEdges<VM>;
}
17 changes: 3 additions & 14 deletions src/plan/generational/copying/global.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
use super::gc_work::{GenCopyCopyContext, GenCopyMatureProcessEdges};
use super::gc_work::{GenCopyCopyContext, GenCopyMatureGCWorkContext, GenCopyNurseryGCWorkContext};
use super::mutator::ALLOCATOR_MAPPING;
use crate::mmtk::MMTK;
use crate::plan::generational::gc_work::GenNurseryProcessEdges;
use crate::plan::generational::global::Gen;
use crate::plan::global::BasePlan;
use crate::plan::global::CommonPlan;
Expand Down Expand Up @@ -86,20 +85,10 @@ impl<VM: VMBinding> Plan for GenCopy<VM> {
self.base().set_gc_status(GcStatus::GcPrepare);
if !is_full_heap {
debug!("Nursery GC");
self.common()
.schedule_common::<Self, GenNurseryProcessEdges<VM, GenCopyCopyContext<VM>>, GenCopyCopyContext<VM>>(
self,
&GENCOPY_CONSTRAINTS,
scheduler,
);
scheduler.schedule_common_work::<GenCopyNurseryGCWorkContext<VM>>(self);
} else {
debug!("Full heap GC");
self.common()
.schedule_common::<Self, GenCopyMatureProcessEdges<VM>, GenCopyCopyContext<VM>>(
self,
&GENCOPY_CONSTRAINTS,
scheduler,
);
scheduler.schedule_common_work::<GenCopyMatureGCWorkContext<VM>>(self);
}
}

Expand Down
13 changes: 9 additions & 4 deletions src/plan/generational/gc_work.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,16 @@ use crate::MMTK;
use std::ops::{Deref, DerefMut};

/// Process edges for a nursery GC. A generatinoal plan should use this type for a nursery GC.
pub struct GenNurseryProcessEdges<VM: VMBinding, C: CopyContext + GCWorkerLocal> {
pub struct GenNurseryProcessEdges<VM: VMBinding, C: CopyContext<VM = VM> + GCWorkerLocal> {
gen: &'static Gen<VM>,
base: ProcessEdgesBase<GenNurseryProcessEdges<VM, C>>,
}

impl<VM: VMBinding, C: CopyContext + GCWorkerLocal> ProcessEdgesWork
impl<VM: VMBinding, C: CopyContext<VM = VM> + GCWorkerLocal> ProcessEdgesWork
for GenNurseryProcessEdges<VM, C>
{
type VM = VM;

fn new(edges: Vec<Address>, roots: bool, mmtk: &'static MMTK<VM>) -> Self {
let base = ProcessEdgesBase::new(edges, roots, mmtk);
let gen = base.plan().generational();
Expand All @@ -40,14 +41,18 @@ impl<VM: VMBinding, C: CopyContext + GCWorkerLocal> ProcessEdgesWork
}
}

impl<VM: VMBinding, C: CopyContext + GCWorkerLocal> Deref for GenNurseryProcessEdges<VM, C> {
impl<VM: VMBinding, C: CopyContext<VM = VM> + GCWorkerLocal> Deref
for GenNurseryProcessEdges<VM, C>
{
type Target = ProcessEdgesBase<Self>;
fn deref(&self) -> &Self::Target {
&self.base
}
}

impl<VM: VMBinding, C: CopyContext + GCWorkerLocal> DerefMut for GenNurseryProcessEdges<VM, C> {
impl<VM: VMBinding, C: CopyContext<VM = VM> + GCWorkerLocal> DerefMut
for GenNurseryProcessEdges<VM, C>
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.base
}
Expand Down
21 changes: 21 additions & 0 deletions src/plan/generational/immix/gc_work.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use super::global::GenImmix;
use crate::plan::generational::gc_work::GenNurseryProcessEdges;
use crate::plan::CopyContext;
use crate::plan::PlanConstraints;
use crate::policy::space::Space;
Expand Down Expand Up @@ -187,3 +188,23 @@ impl<VM: VMBinding, const KIND: TraceKind> DerefMut for GenImmixMatureProcessEdg
&mut self.base
}
}

pub struct GenImmixNurseryGCWorkContext<VM: VMBinding>(std::marker::PhantomData<VM>);
impl<VM: VMBinding> crate::scheduler::GCWorkContext for GenImmixNurseryGCWorkContext<VM> {
type VM = VM;
type PlanType = GenImmix<VM>;
type CopyContextType = GenImmixCopyContext<VM>;
type ProcessEdgesWorkType = GenNurseryProcessEdges<VM, Self::CopyContextType>;
}

pub(super) struct GenImmixMatureGCWorkContext<VM: VMBinding, const KIND: TraceKind>(
std::marker::PhantomData<VM>,
);
impl<VM: VMBinding, const KIND: TraceKind> crate::scheduler::GCWorkContext
for GenImmixMatureGCWorkContext<VM, KIND>
{
type VM = VM;
type PlanType = GenImmix<VM>;
type CopyContextType = GenImmixCopyContext<VM>;
type ProcessEdgesWorkType = GenImmixMatureProcessEdges<VM, KIND>;
}
26 changes: 8 additions & 18 deletions src/plan/generational/immix/global.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use super::gc_work::{GenImmixCopyContext, GenImmixMatureProcessEdges};
use crate::plan::generational::gc_work::GenNurseryProcessEdges;
use super::gc_work::{
GenImmixCopyContext, GenImmixMatureGCWorkContext, GenImmixNurseryGCWorkContext,
};
use crate::plan::generational::global::Gen;
use crate::plan::global::BasePlan;
use crate::plan::global::CommonPlan;
Expand Down Expand Up @@ -127,28 +128,17 @@ impl<VM: VMBinding> Plan for GenImmix<VM> {

if !is_full_heap {
debug!("Nursery GC");
self.common()
.schedule_common::<Self, GenNurseryProcessEdges<VM, GenImmixCopyContext<VM>>, GenImmixCopyContext<VM>>(
self,
&GENIMMIX_CONSTRAINTS,
scheduler,
);
scheduler.schedule_common_work::<GenImmixNurseryGCWorkContext<VM>>(self);
} else if defrag {
debug!("Full heap GC Defrag");
self.common()
.schedule_common::<Self, GenImmixMatureProcessEdges<VM, { TraceKind::Defrag }>, GenImmixCopyContext<VM>>(
scheduler
.schedule_common_work::<GenImmixMatureGCWorkContext<VM, { TraceKind::Defrag }>>(
self,
&GENIMMIX_CONSTRAINTS,
scheduler,
);
} else {
debug!("Full heap GC Fast");
self.common()
.schedule_common::<Self, GenImmixMatureProcessEdges<VM, { TraceKind::Fast }>, GenImmixCopyContext<VM>>(
self,
&GENIMMIX_CONSTRAINTS,
scheduler,
);
scheduler
.schedule_common_work::<GenImmixMatureGCWorkContext<VM, { TraceKind::Fast }>>(self);
}
}

Expand Down
56 changes: 0 additions & 56 deletions src/plan/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ use crate::plan::Mutator;
use crate::policy::immortalspace::ImmortalSpace;
use crate::policy::largeobjectspace::LargeObjectSpace;
use crate::policy::space::Space;
use crate::scheduler::gc_work::ProcessEdgesWork;
use crate::scheduler::*;
use crate::util::alloc::allocators::AllocatorSelector;
#[cfg(feature = "analysis")]
Expand Down Expand Up @@ -918,61 +917,6 @@ impl<VM: VMBinding> CommonPlan<VM> {
self.base.release(tls, full_heap)
}

/// Schedule all the common work packets
pub fn schedule_common<
P: Plan<VM = VM>,
E: ProcessEdgesWork<VM = VM>,
C: CopyContext<VM = VM> + GCWorkerLocal,
>(
&self,
plan: &'static P,
constraints: &'static PlanConstraints,
scheduler: &GCWorkScheduler<VM>,
) {
use crate::scheduler::gc_work::*;

// Stop & scan mutators (mutator scanning can happen before STW)
scheduler.work_buckets[WorkBucketStage::Unconstrained].add(StopMutators::<E>::new());

// Prepare global/collectors/mutators
scheduler.work_buckets[WorkBucketStage::Prepare].add(Prepare::<P, C>::new(plan));

// VM-specific weak ref processing
scheduler.work_buckets[WorkBucketStage::RefClosure].add(ProcessWeakRefs::<E>::new());

// Release global/collectors/mutators
scheduler.work_buckets[WorkBucketStage::Release].add(Release::<P, C>::new(plan));

// Analysis GC work
#[cfg(feature = "analysis")]
{
use crate::util::analysis::GcHookWork;
scheduler.work_buckets[WorkBucketStage::Unconstrained].add(GcHookWork);
}

// Sanity
#[cfg(feature = "sanity")]
{
use crate::util::sanity::sanity_checker::ScheduleSanityGC;
scheduler.work_buckets[WorkBucketStage::Final].add(ScheduleSanityGC::<P, C>::new(plan));
}

// Finalization
if !self.base.options.no_finalizer {
use crate::util::finalizable_processor::{Finalization, ForwardFinalization};
// finalization
scheduler.work_buckets[WorkBucketStage::RefClosure].add(Finalization::<E>::new());
// forward refs
if constraints.needs_forward_after_liveness {
scheduler.work_buckets[WorkBucketStage::RefForwarding]
.add(ForwardFinalization::<E>::new());
}
}

// Set EndOfGC to run at the end
scheduler.set_finalizer(Some(EndOfGC));
}

pub fn stacks_prepared(&self) -> bool {
self.base.stacks_prepared()
}
Expand Down
13 changes: 13 additions & 0 deletions src/plan/immix/gc_work.rs
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ impl<VM: VMBinding, const KIND: TraceKind> ImmixProcessEdges<VM, KIND> {

impl<VM: VMBinding, const KIND: TraceKind> ProcessEdgesWork for ImmixProcessEdges<VM, KIND> {
type VM = VM;

const OVERWRITE_REFERENCE: bool = crate::policy::immix::DEFRAG;

fn new(edges: Vec<Address>, roots: bool, mmtk: &'static MMTK<VM>) -> Self {
Expand Down Expand Up @@ -166,3 +167,15 @@ impl<VM: VMBinding, const KIND: TraceKind> DerefMut for ImmixProcessEdges<VM, KI
&mut self.base
}
}

pub(super) struct ImmixGCWorkContext<VM: VMBinding, const KIND: TraceKind>(
std::marker::PhantomData<VM>,
);
impl<VM: VMBinding, const KIND: TraceKind> crate::scheduler::GCWorkContext
for ImmixGCWorkContext<VM, KIND>
{
type VM = VM;
type PlanType = Immix<VM>;
type CopyContextType = ImmixCopyContext<VM>;
type ProcessEdgesWorkType = ImmixProcessEdges<VM, KIND>;
}
8 changes: 3 additions & 5 deletions src/plan/immix/global.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use super::gc_work::{ImmixCopyContext, ImmixProcessEdges, TraceKind};
use super::gc_work::{ImmixCopyContext, ImmixGCWorkContext, TraceKind};
use super::mutator::ALLOCATOR_MAPPING;
use crate::plan::global::BasePlan;
use crate::plan::global::CommonPlan;
Expand Down Expand Up @@ -91,11 +91,9 @@ impl<VM: VMBinding> Plan for Immix<VM> {
// The blocks are not identical, clippy is wrong. Probably it does not recognize the constant type parameter.
#[allow(clippy::if_same_then_else)]
if in_defrag {
self.common()
.schedule_common::<Self, ImmixProcessEdges<VM, { TraceKind::Defrag }>, ImmixCopyContext<VM>>(self, &IMMIX_CONSTRAINTS, scheduler);
scheduler.schedule_common_work::<ImmixGCWorkContext<VM, { TraceKind::Defrag }>>(self);
} else {
self.common()
.schedule_common::<Self, ImmixProcessEdges<VM, { TraceKind::Fast }>, ImmixCopyContext<VM>>(self, &IMMIX_CONSTRAINTS, scheduler);
scheduler.schedule_common_work::<ImmixGCWorkContext<VM, { TraceKind::Fast }>>(self);
}
}

Expand Down
9 changes: 9 additions & 0 deletions src/plan/marksweep/gc_work.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ pub struct MSProcessEdges<VM: VMBinding> {

impl<VM: VMBinding> ProcessEdgesWork for MSProcessEdges<VM> {
type VM = VM;

const OVERWRITE_REFERENCE: bool = false;
fn new(edges: Vec<Address>, roots: bool, mmtk: &'static MMTK<VM>) -> Self {
let base = ProcessEdgesBase::new(edges, roots, mmtk);
Expand Down Expand Up @@ -118,3 +119,11 @@ impl<VM: VMBinding> GCWork<VM> for MSSweepChunks<VM> {
mmtk.scheduler.work_buckets[WorkBucketStage::Release].bulk_add(work_packets);
}
}

pub struct MSGCWorkContext<VM: VMBinding>(std::marker::PhantomData<VM>);
impl<VM: VMBinding> crate::scheduler::GCWorkContext for MSGCWorkContext<VM> {
type VM = VM;
type PlanType = MarkSweep<VM>;
type CopyContextType = NoCopy<VM>;
type ProcessEdgesWorkType = MSProcessEdges<VM>;
}
9 changes: 2 additions & 7 deletions src/plan/marksweep/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use crate::plan::global::BasePlan;
use crate::plan::global::CommonPlan;
use crate::plan::global::GcStatus;
use crate::plan::global::NoCopy;
use crate::plan::marksweep::gc_work::{MSProcessEdges, MSSweepChunks};
use crate::plan::marksweep::gc_work::{MSGCWorkContext, MSSweepChunks};
use crate::plan::marksweep::mutator::ALLOCATOR_MAPPING;
use crate::plan::AllocationSemantics;
use crate::plan::Plan;
Expand Down Expand Up @@ -56,12 +56,7 @@ impl<VM: VMBinding> Plan for MarkSweep<VM> {
fn schedule_collection(&'static self, scheduler: &GCWorkScheduler<VM>) {
self.base().set_collection_kind::<Self>(self);
self.base().set_gc_status(GcStatus::GcPrepare);
self.common()
.schedule_common::<Self, MSProcessEdges<VM>, NoCopy<VM>>(
self,
&MS_CONSTRAINTS,
scheduler,
);
scheduler.schedule_common_work::<MSGCWorkContext<VM>>(self);
scheduler.work_buckets[WorkBucketStage::Prepare].add(MSSweepChunks::<VM>::new(self));
}

Expand Down
Loading