diff --git a/src/plan/barriers.rs b/src/plan/barriers.rs index 6ae193f919..56c069c982 100644 --- a/src/plan/barriers.rs +++ b/src/plan/barriers.rs @@ -171,13 +171,13 @@ impl ObjectBarrier { Self { semantics } } - /// Attepmt to atomically log an object. + /// Attempt to atomically log an object. /// Returns true if the object is not logged previously. fn object_is_unlogged(&self, object: ObjectReference) -> bool { unsafe { S::UNLOG_BIT_SPEC.load::(object, None) != 0 } } - /// Attepmt to atomically log an object. + /// Attempt to atomically log an object. /// Returns true if the object is not logged previously. fn log_object(&self, object: ObjectReference) -> bool { #[cfg(all(feature = "vo_bit", feature = "extreme_assertions"))] diff --git a/src/plan/generational/gc_work.rs b/src/plan/generational/gc_work.rs index 03240b4e93..fc16ba727b 100644 --- a/src/plan/generational/gc_work.rs +++ b/src/plan/generational/gc_work.rs @@ -108,22 +108,24 @@ impl ProcessModBuf { impl GCWork for ProcessModBuf { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - // Flip the per-object unlogged bits to "unlogged" state. - for obj in &self.modbuf { - ::VMObjectModel::GLOBAL_LOG_BIT_SPEC.store_atomic::( - *obj, - 1, - None, - Ordering::SeqCst, - ); - } - // scan modbuf only if the current GC is a nursery GC - if mmtk - .get_plan() - .generational() - .unwrap() - .is_current_gc_nursery() - { + // Process and scan modbuf only if the current GC is a nursery GC + let gen = mmtk.get_plan().generational().unwrap(); + if gen.is_current_gc_nursery() { + // Flip the per-object unlogged bits to "unlogged" state. + for obj in &self.modbuf { + debug_assert!( + !gen.is_object_in_nursery(*obj), + "{} was logged but is not mature. Dumping process memory maps:\n{}", + *obj, + crate::util::memory::get_process_memory_maps(), + ); + ::VMObjectModel::GLOBAL_LOG_BIT_SPEC.store_atomic::( + *obj, + 1, + None, + Ordering::SeqCst, + ); + } // Scan objects in the modbuf and forward pointers let modbuf = std::mem::take(&mut self.modbuf); GCWork::do_work( diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index 37c7249245..33d4651bcf 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -227,6 +227,7 @@ impl CommonGenPlan { if self.common.get_los().in_space(object) { return self.common.get_los().trace_object::(queue, object); } + object } diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index ba2850b3a4..80d2fbc2f5 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -249,10 +249,8 @@ impl GenImmix { let immix_space = ImmixSpace::new( plan_args.get_space_args("immix_mature", true, false, VMRequest::discontiguous()), ImmixSpaceArgs { - reset_log_bit_in_major_gc: false, - // We don't need to unlog objects at tracing. Instead, we unlog objects at copying. - // Any object is moved into the mature space, or is copied inside the mature space. We will unlog it. - unlog_object_when_traced: false, + // We need to unlog objects at tracing time since we currently clear all log bits during a major GC + unlog_object_when_traced: true, // In GenImmix, young objects are not allocated in ImmixSpace directly. #[cfg(feature = "vo_bit")] mixed_age: false, diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 9dc6380660..2e0e665f78 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -136,7 +136,6 @@ impl Immix { Self::new_with_args( plan_args, ImmixSpaceArgs { - reset_log_bit_in_major_gc: false, unlog_object_when_traced: false, #[cfg(feature = "vo_bit")] mixed_age: false, diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index 4a19f77841..d413d85632 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -57,7 +57,7 @@ impl Plan for MarkSweep { fn prepare(&mut self, tls: VMWorkerThread) { self.common.prepare(tls, true); - self.ms.prepare(); + self.ms.prepare(true); } fn release(&mut self, tls: VMWorkerThread) { diff --git a/src/plan/sticky/immix/global.rs b/src/plan/sticky/immix/global.rs index 7b41c7b854..7e78347477 100644 --- a/src/plan/sticky/immix/global.rs +++ b/src/plan/sticky/immix/global.rs @@ -254,6 +254,7 @@ impl crate::plan::generational::global::GenerationalPlanExt f trace!("Immix mature object {}, skip", object); return object; } else { + // Nursery object let object = if KIND == TRACE_KIND_TRANSITIVE_PIN || KIND == TRACE_KIND_FAST { trace!( "Immix nursery object {} is being traced without moving", @@ -326,9 +327,6 @@ impl StickyImmix { // Every object we trace in full heap GC is a mature object. Thus in both cases, // they should be unlogged. unlog_object_when_traced: true, - // In full heap GC, mature objects may die, and their unlogged bit needs to be reset. - // Along with the option above, we unlog them again during tracing. - reset_log_bit_in_major_gc: true, // In StickyImmix, both young and old objects are allocated in the ImmixSpace. #[cfg(feature = "vo_bit")] mixed_age: true, diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index ddf8e5bb04..bbc66ccd4d 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -196,6 +196,12 @@ impl CopySpace { side_forwarding_status_table.bzero_metadata(start, size); } + if self.common.needs_log_bit { + if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { + side.bzero_metadata(start, size); + } + } + // Clear VO bits because all objects in the space are dead. #[cfg(feature = "vo_bit")] crate::util::metadata::vo_bit::bzero_vo_bit(start, size); diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 331600bc91..b932808d8f 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -65,14 +65,6 @@ pub struct ImmixSpaceArgs { /// (no matter we copy an object or not). So we have to use `PromoteToMature`, and instead /// just set the log bit in the space when an object is traced. pub unlog_object_when_traced: bool, - /// Reset log bit at the start of a major GC. - /// Normally we do not need to do this. When immix is used as the mature space, - /// any object should be set as unlogged, and that bit does not need to be cleared - /// even if the object is dead. But in sticky Immix, the mature object and - /// the nursery object are in the same space, we will have to use the - /// bit to differentiate them. So we reset all the log bits in major GCs, - /// and unlogged the objects when they are traced (alive). - pub reset_log_bit_in_major_gc: bool, /// Whether this ImmixSpace instance contains both young and old objects. /// This affects the updating of valid-object bits. If some lines or blocks of this ImmixSpace /// instance contain young objects, their VO bits need to be updated during this GC. Currently @@ -293,7 +285,7 @@ impl ImmixSpace { Block::LOG_BYTES ); - if space_args.unlog_object_when_traced || space_args.reset_log_bit_in_major_gc { + if space_args.unlog_object_when_traced { assert!( args.constraints.needs_log_bit, "Invalid args when the plan does not use log bit" @@ -389,6 +381,14 @@ impl ImmixSpace { unimplemented!("cyclic mark bits is not supported at the moment"); } + if self.common.needs_log_bit { + if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { + for chunk in self.chunk_map.all_chunks() { + side.bzero_metadata(chunk.start(), Chunk::BYTES); + } + } + } + // Prepare defrag info if super::DEFRAG { self.defrag.prepare(self, plan_stats); @@ -838,6 +838,7 @@ impl ImmixSpace { /// A work packet to prepare each block for a major GC. /// Performs the action on a range of chunks. pub struct PrepareBlockState { + #[allow(dead_code)] pub space: &'static ImmixSpace, pub chunk: Chunk, pub defrag_threshold: Option, @@ -851,17 +852,6 @@ impl PrepareBlockState { if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC { side.bzero_metadata(self.chunk.start(), Chunk::BYTES); } - if self.space.space_args.reset_log_bit_in_major_gc { - if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { - // We zero all the log bits in major GC, and for every object we trace, we will mark the log bit again. - side.bzero_metadata(self.chunk.start(), Chunk::BYTES); - } else { - // If the log bit is not in side metadata, we cannot bulk zero. We can either - // clear the bit for dead objects in major GC, or clear the log bit for new - // objects. In either cases, we do not need to set log bit at tracing. - unimplemented!("We cannot bulk zero unlogged bit.") - } - } } } diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index cc4dd75bfc..c08ffeeaaa 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -213,6 +213,15 @@ impl ImmortalSpace { object ); if self.mark_state.test_and_mark::(object) { + // Set the unlog bit if required + if self.common.needs_log_bit { + VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.store_atomic::( + object, + 1, + None, + Ordering::SeqCst, + ); + } queue.enqueue(object); } object diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index cdeb87a9c0..effb39febd 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -269,7 +269,9 @@ impl LargeObjectSpace { trace!("LOS object {} is being marked now", object); self.treadmill.copy(object, nursery_object); // We just moved the object out of the logical nursery, mark it as unlogged. - if nursery_object && self.common.needs_log_bit { + // We also unlog mature objects as their unlog bit may have been unset before the + // full-heap GC + if self.common.needs_log_bit { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC .mark_as_unlogged::(object, Ordering::SeqCst); } @@ -288,6 +290,10 @@ impl LargeObjectSpace { let sweep = |object: ObjectReference| { #[cfg(feature = "vo_bit")] crate::util::metadata::vo_bit::unset_vo_bit(object); + // Clear log bits for dead objects to prevent a new nursery object having the unlog bit set + if self.common.needs_log_bit { + VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.clear::(object, Ordering::SeqCst); + } self.pr .release_pages(get_super_page(object.to_object_start::())); }; diff --git a/src/policy/marksweepspace/malloc_ms/global.rs b/src/policy/marksweepspace/malloc_ms/global.rs index 83ec1e369b..3b7b082770 100644 --- a/src/policy/marksweepspace/malloc_ms/global.rs +++ b/src/policy/marksweepspace/malloc_ms/global.rs @@ -488,7 +488,7 @@ impl MallocSpace { } } - pub fn prepare(&mut self) {} + pub fn prepare(&mut self, _full_heap: bool) {} pub fn release(&mut self) { use crate::scheduler::WorkBucketStage; diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index f39b051156..0b349c4508 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -405,7 +405,15 @@ impl MarkSweepSpace { self.chunk_map.set(block.chunk(), ChunkState::Allocated); } - pub fn prepare(&mut self) { + pub fn prepare(&mut self, full_heap: bool) { + if self.common.needs_log_bit && full_heap { + if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { + for chunk in self.chunk_map.all_chunks() { + side.bzero_metadata(chunk.start(), Chunk::BYTES); + } + } + } + #[cfg(debug_assertions)] self.abandoned_in_gc.lock().unwrap().assert_empty(); diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index c19e3a516b..95fdfe7369 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -283,6 +283,17 @@ impl VMSpace { ); debug_assert!(self.in_space(object)); if self.mark_state.test_and_mark::(object) { + // Flip the per-object unlogged bits to "unlogged" state for objects inside the + // bootimage + #[cfg(feature = "set_unlog_bits_vm_space")] + if self.common.needs_log_bit { + VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.store_atomic::( + object, + 1, + None, + Ordering::SeqCst, + ); + } queue.enqueue(object); } object diff --git a/src/util/metadata/log_bit.rs b/src/util/metadata/log_bit.rs index 78e198ea35..a5a9b8644f 100644 --- a/src/util/metadata/log_bit.rs +++ b/src/util/metadata/log_bit.rs @@ -6,6 +6,11 @@ use std::sync::atomic::Ordering; use super::MetadataSpec; impl VMGlobalLogBitSpec { + /// Clear the unlog bit to log object (0 means logged) + pub fn clear(&self, object: ObjectReference, order: Ordering) { + self.store_atomic::(object, 0, None, order) + } + /// Mark the log bit as unlogged (1 means unlogged) pub fn mark_as_unlogged(&self, object: ObjectReference, order: Ordering) { self.store_atomic::(object, 1, None, order)