Skip to content
Closed
Show file tree
Hide file tree
Changes from 8 commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
d00e4c6
Support for object pinning
udesou Nov 21, 2022
00a33f2
Adding pinning metadata to immix space side metadata specs; Returning…
udesou Nov 22, 2022
a062970
Proper semantics for pin/unpin; Fixing checks in immixspace.
udesou Nov 23, 2022
dd0a28b
Applying discussed semantics for pinning/unpinning
udesou Nov 30, 2022
b56af7d
Merge branch 'mmtk:master' into feature/pinning-objects
udesou Nov 30, 2022
b574a75
Updating to latest master
udesou Nov 30, 2022
66e2709
Adding support for tracing 'red roots'
udesou Dec 1, 2022
38d2f6e
Minor fix
udesou Dec 1, 2022
b86b321
Fixing object model for DummyVM
udesou Dec 1, 2022
8e99228
Merge branch 'master' into feature/pinning-objects
udesou Dec 1, 2022
c013513
Fixing clippy issues
udesou Dec 1, 2022
2ded32d
Merge branch 'feature/pinning-objects' of https://github.com/udesou/m…
udesou Dec 1, 2022
235f83a
Converting object pinning into a feature
udesou Dec 2, 2022
13accdb
Renaming object-pinning => object_pinning
udesou Dec 2, 2022
b73d892
Merge branch 'master' into feature/pinning-objects
qinsoon Dec 2, 2022
57f5dcb
Removing assertion that was useful if pin bin happens to be the same …
udesou Dec 5, 2022
0dedd14
Revert "Minor fix"
udesou Dec 5, 2022
a915c22
Revert "Adding support for tracing 'red roots'"
udesou Dec 5, 2022
72bc39d
Merge branch 'feature/pinning-objects' into feature/object-moving
udesou Dec 5, 2022
3fe039d
Refactoring support for red roots
udesou Dec 5, 2022
c1feb58
Merge branch 'master' into feature/object-moving
udesou Dec 6, 2022
86ffdcf
Adding pinning methods to native ms space
udesou Dec 6, 2022
23090bb
Merge branch 'master' into feature/object-moving
udesou Dec 6, 2022
45209c0
Merge branch 'master' into feature/object-moving
qinsoon Jun 26, 2023
dc0582b
Merge remote-tracking branch 'upstream/master' into feature/object-mo…
udesou Aug 1, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 41 additions & 0 deletions src/memory_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -750,6 +750,47 @@ pub fn add_finalizer<VM: VMBinding>(
mmtk.finalizable_processor.lock().unwrap().add(object);
}

/// Pin an object. MMTk will make sure that the object does not move
/// during GC. Note that action cannot happen in some plans, eg, semispace.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes. I recommend adding a field to PlanConstraints to indicate whether a plan supports pinning. For example: supports_pinning: bool or allows_pinning: bool. Any plan that has moves_object = false should also have supports_pinning = true. By doing this, we can add debug_assert! or assert! here and panic early.

/// It returns true if the pinning operation has been performed, i.e.,
/// the object status changed from non-pinned to pinned
///
/// Arguments:
/// * `object`: The object to be pinned
pub fn pin_object<VM: VMBinding>(object: ObjectReference) -> bool {
use crate::mmtk::SFT_MAP;
use crate::policy::sft_map::SFTMap;
SFT_MAP
.get_checked(object.to_address::<VM>())
.pin_object(object)
}

/// Unpin an object.
/// Returns true if the unpinning operation has been performed, i.e.,
/// the object status changed from pinned to non-pinned
///
/// Arguments:
/// * `object`: The object to be pinned
pub fn unpin_object<VM: VMBinding>(object: ObjectReference) -> bool {
use crate::mmtk::SFT_MAP;
use crate::policy::sft_map::SFTMap;
SFT_MAP
.get_checked(object.to_address::<VM>())
.unpin_object(object)
}

/// Check whether an object is currently pinned
///
/// Arguments:
/// * `object`: The object to be checked
pub fn is_pinned<VM: VMBinding>(object: ObjectReference) -> bool {
use crate::mmtk::SFT_MAP;
use crate::policy::sft_map::SFTMap;
SFT_MAP
.get_checked(object.to_address::<VM>())
.is_object_pinned(object)
}

/// Get an object that is ready for finalization. After each GC, if any registered object is not
/// alive, this call will return one of the objects. MMTk will retain the liveness of those objects
/// until they are popped through this call. Once an object is popped, it is the responsibility of
Expand Down
153 changes: 149 additions & 4 deletions src/plan/immix/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,22 +3,29 @@ use super::mutator::ALLOCATOR_MAPPING;
use crate::plan::global::BasePlan;
use crate::plan::global::CommonPlan;
use crate::plan::global::GcStatus;
use crate::plan::mutator_context::MutatorContext;
use crate::plan::AllocationSemantics;
use crate::plan::Plan;
use crate::plan::PlanConstraints;
use crate::policy::immix::{TRACE_KIND_DEFRAG, TRACE_KIND_FAST};
use crate::policy::immix::{TRACE_KIND_DEFRAG, TRACE_KIND_FAST, TRACE_KIND_IMMOVABLE};
use crate::policy::space::Space;
use crate::scheduler::gc_work::PlanProcessEdges;
use crate::scheduler::gc_work::ScanVMImmovableRoots;
use crate::scheduler::*;
use crate::util::alloc::allocators::AllocatorSelector;
use crate::util::copy::*;
use crate::util::heap::layout::heap_layout::Mmapper;
use crate::util::heap::layout::heap_layout::VMMap;
use crate::util::heap::HeapMeta;
use crate::util::metadata::side_metadata::SideMetadataContext;
use crate::util::metadata::side_metadata::SideMetadataSanity;
use crate::util::metadata::side_metadata::{SideMetadataContext, SideMetadataSanity};
use crate::util::options::Options;
use crate::vm::ActivePlan;
use crate::vm::Collection;
use crate::vm::Scanning;
use crate::vm::VMBinding;
use crate::MMTK;
use crate::{policy::immix::ImmixSpace, util::opaque_pointer::VMWorkerThread};
use std::marker::PhantomData;
use std::sync::atomic::AtomicBool;
use std::sync::Arc;

Expand Down Expand Up @@ -94,7 +101,13 @@ impl<VM: VMBinding> Plan for Immix<VM> {
// The blocks are not identical, clippy is wrong. Probably it does not recognize the constant type parameter.
#[allow(clippy::if_same_then_else)]
if in_defrag {
scheduler.schedule_common_work::<ImmixGCWorkContext<VM, TRACE_KIND_DEFRAG>>(self);
schedule_stop_mutator_scan_immobile_roots::<
VM,
ImmixGCWorkContext<VM, TRACE_KIND_IMMOVABLE>,
>(scheduler, self);
schedule_remaining_work::<VM, ImmixGCWorkContext<VM, TRACE_KIND_DEFRAG>>(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You could have a runtime option or a feature. When it is turned on, you will schedule work for the immovable trace. In that case, you can just check the option/feature in schedule_common_work and schedule the work if enabled.

scheduler, self,
);
} else {
scheduler.schedule_common_work::<ImmixGCWorkContext<VM, TRACE_KIND_FAST>>(self);
}
Expand Down Expand Up @@ -133,6 +146,138 @@ impl<VM: VMBinding> Plan for Immix<VM> {
}
}

/// Stop all mutators and scan immovable roots
///
/// Schedule a `ScanVMImmovableRoots` immediately after a mutator is paused
///
/// TODO: Smaller work granularity
#[derive(Default)]
pub struct StopMutatorScanImmovable<ScanEdges: ProcessEdgesWork>(PhantomData<ScanEdges>);

impl<ScanEdges: ProcessEdgesWork> StopMutatorScanImmovable<ScanEdges> {
pub fn new() -> Self {
Self(PhantomData)
}
}

impl<E: ProcessEdgesWork> CoordinatorWork<E::VM> for StopMutatorScanImmovable<E> {}

impl<E: ProcessEdgesWork> GCWork<E::VM> for StopMutatorScanImmovable<E> {
fn do_work(&mut self, worker: &mut GCWorker<E::VM>, mmtk: &'static MMTK<E::VM>) {
// If the VM requires that only the coordinator thread can stop the world,
// we delegate the work to the coordinator.
if <E::VM as VMBinding>::VMCollection::COORDINATOR_ONLY_STW && !worker.is_coordinator() {
mmtk.scheduler
.add_coordinator_work(StopMutatorScanImmovable::<E>::new(), worker);
return;
}

trace!("stop_all_mutators start");
mmtk.plan.base().prepare_for_stack_scanning();
<E::VM as VMBinding>::VMCollection::stop_all_mutators(worker.tls, |mutator| {
mmtk.scheduler.work_buckets[WorkBucketStage::Prepare].add(ScanStackRoot::<E>(mutator));
});
trace!("stop_all_mutators end");
mmtk.scheduler.notify_mutators_paused(mmtk);
if <E::VM as VMBinding>::VMScanning::SCAN_MUTATORS_IN_SAFEPOINT {
// Prepare mutators if necessary
// FIXME: This test is probably redundant. JikesRVM requires to call `prepare_mutator` once after mutators are paused
if !mmtk.plan.base().stacks_prepared() {
for mutator in <E::VM as VMBinding>::VMActivePlan::mutators() {
<E::VM as VMBinding>::VMCollection::prepare_mutator(
worker.tls,
mutator.get_tls(),
mutator,
);
}
}
// Scan immovable roots with immovable trace
mmtk.scheduler.work_buckets[WorkBucketStage::Prepare].add(ScanVMImmovableRoots::<
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do you still need this? I notice that in normal StopMutators, you also scheduled ScanVMImmovableRoots. One of the two is redundant I guess.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Alternatively, you could change RootsWorkFactory::create_process_edge_roots_work() to let it take an extra boolean to say whether the roots are movable or not. In that case, the binding does not need to scan stacks twice, one for immovable roots and one for normal roots. They can just do one stack scanning, and create different work packet for different kinds of roots (I assume this is more natural and efficient for the bindings?).
This change will also help remove quite a few duplicate code in this PR, such as ScanMutators, ScanVMImmovableRoots, and ProcessEdgesWorkImmovableRootsWorkFactory.

PlanProcessEdges<E::VM, Immix<E::VM>, TRACE_KIND_IMMOVABLE>,
>::new());
}
}
}

fn schedule_stop_mutator_scan_immobile_roots<VM: VMBinding, C: GCWorkContext<VM = VM> + 'static>(
scheduler: &GCWorkScheduler<VM>,
_plan: &'static C::PlanType,
) {
// Stop & scan mutators (mutator scanning can happen before STW)
scheduler.work_buckets[WorkBucketStage::Unconstrained]
.add(StopMutatorScanImmovable::<C::ProcessEdgesWorkType>::new());
}

fn schedule_remaining_work<VM: VMBinding, C: GCWorkContext<VM = VM> + 'static>(
scheduler: &GCWorkScheduler<VM>,
plan: &'static C::PlanType,
) {
use crate::scheduler::gc_work::*;
// Scan mutators (mutator scanning can happen before STW)
scheduler.work_buckets[WorkBucketStage::Prepare]
.add(ScanMutators::<C::ProcessEdgesWorkType>::new());

// Prepare global/collectors/mutators
scheduler.work_buckets[WorkBucketStage::Prepare].add(Prepare::<C>::new(plan));

// Release global/collectors/mutators
scheduler.work_buckets[WorkBucketStage::Release].add(Release::<C>::new(plan));

// Analysis GC work
#[cfg(feature = "analysis")]
{
use crate::util::analysis::GcHookWork;
scheduler.work_buckets[WorkBucketStage::Unconstrained].add(GcHookWork);
}

// Sanity
#[cfg(feature = "sanity")]
{
use crate::util::sanity::sanity_checker::ScheduleSanityGC;
scheduler.work_buckets[WorkBucketStage::Final]
.add(ScheduleSanityGC::<C::PlanType>::new(plan));
}

// Reference processing
if !*plan.base().options.no_reference_types {
use crate::util::reference_processor::{
PhantomRefProcessing, SoftRefProcessing, WeakRefProcessing,
};
scheduler.work_buckets[WorkBucketStage::SoftRefClosure]
.add(SoftRefProcessing::<C::ProcessEdgesWorkType>::new());
scheduler.work_buckets[WorkBucketStage::WeakRefClosure]
.add(WeakRefProcessing::<C::ProcessEdgesWorkType>::new());
scheduler.work_buckets[WorkBucketStage::PhantomRefClosure]
.add(PhantomRefProcessing::<C::ProcessEdgesWorkType>::new());

// VM-specific weak ref processing
scheduler.work_buckets[WorkBucketStage::WeakRefClosure]
.add(VMProcessWeakRefs::<C::ProcessEdgesWorkType>::new());

use crate::util::reference_processor::RefForwarding;
if plan.constraints().needs_forward_after_liveness {
scheduler.work_buckets[WorkBucketStage::RefForwarding]
.add(RefForwarding::<C::ProcessEdgesWorkType>::new());
}

use crate::util::reference_processor::RefEnqueue;
scheduler.work_buckets[WorkBucketStage::Release].add(RefEnqueue::<VM>::new());
}

// Finalization
if !*plan.base().options.no_finalizer {
use crate::util::finalizable_processor::{Finalization, ForwardFinalization};
// finalization
scheduler.work_buckets[WorkBucketStage::FinalRefClosure]
.add(Finalization::<C::ProcessEdgesWorkType>::new());
// forward refs
if plan.constraints().needs_forward_after_liveness {
scheduler.work_buckets[WorkBucketStage::FinalizableForwarding]
.add(ForwardFinalization::<C::ProcessEdgesWorkType>::new());
}
}
}

impl<VM: VMBinding> Immix<VM> {
pub fn new(
vm_map: &'static VMMap,
Expand Down
4 changes: 3 additions & 1 deletion src/plan/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,9 @@ pub use plan_constraints::PlanConstraints;
pub use plan_constraints::DEFAULT_PLAN_CONSTRAINTS;

mod tracing;
pub use tracing::{ObjectQueue, ObjectsClosure, VectorObjectQueue, VectorQueue};
pub use tracing::{
ImmovableObjectsClosure, ObjectQueue, ObjectsClosure, VectorObjectQueue, VectorQueue,
};

mod generational;
mod immix;
Expand Down
42 changes: 42 additions & 0 deletions src/plan/tracing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -115,3 +115,45 @@ impl<'a, E: ProcessEdgesWork> Drop for ObjectsClosure<'a, E> {
self.flush();
}
}

/// A transitive closure visitor to collect all the edges of an object during ClosureImmovable.
pub struct ImmovableObjectsClosure<'a, E: ProcessEdgesWork> {
buffer: VectorQueue<EdgeOf<E>>,
worker: &'a mut GCWorker<E::VM>,
}

impl<'a, E: ProcessEdgesWork> ImmovableObjectsClosure<'a, E> {
pub fn new(worker: &'a mut GCWorker<E::VM>) -> Self {
Self {
buffer: VectorQueue::new(),
worker,
}
}

fn flush(&mut self) {
let buf = self.buffer.take();
if !buf.is_empty() {
self.worker.add_work(
WorkBucketStage::ClosureImmovable,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess the only difference of this type and a normal ObjectsClosure is which bucket new work will be added. You can just add a field closure_bucket to ObjectsClosure, and schedule work to the closure_bucket.

E::new(buf, false, self.worker.mmtk),
);
}
}
}

impl<'a, E: ProcessEdgesWork> EdgeVisitor<EdgeOf<E>> for ImmovableObjectsClosure<'a, E> {
#[inline(always)]
fn visit_edge(&mut self, slot: EdgeOf<E>) {
self.buffer.push(slot);
if self.buffer.is_full() {
self.flush();
}
}
}

impl<'a, E: ProcessEdgesWork> Drop for ImmovableObjectsClosure<'a, E> {
#[inline(always)]
fn drop(&mut self) {
self.flush();
}
}
12 changes: 12 additions & 0 deletions src/policy/copyspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,18 @@ impl<VM: VMBinding> SFT for CopySpace<VM> {
!self.is_from_space() || object_forwarding::is_forwarded::<VM>(object)
}

fn pin_object(&self, _object: ObjectReference) -> bool {
panic!("Cannot pin/unpin objects of CopySpace.")
}

fn unpin_object(&self, _object: ObjectReference) -> bool {
panic!("Cannot pin/unpin objects of CopySpace.")
}

fn is_object_pinned(&self, _object: ObjectReference) -> bool {
false
}

fn is_movable(&self) -> bool {
true
}
Expand Down
Loading