Skip to content
Merged
17 changes: 12 additions & 5 deletions benches/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,18 +19,25 @@ use criterion::Criterion;
// However, I will just keep these benchmarks here. If we find it not useful, and we do not plan to improve MockVM, we can delete
// them.

mod alloc;
mod sft;
#[cfg(feature = "mock_test")]
mod mock_bench;

fn bench_main(c: &mut Criterion) {
pub fn bench_main(_c: &mut Criterion) {
#[cfg(feature = "mock_test")]
match std::env::var("MMTK_BENCH") {
Ok(bench) => match bench.as_str() {
"alloc" => alloc::bench(c),
"sft" => sft::bench(c),
"alloc" => mock_bench::alloc::bench(_c),
"sft" => mock_bench::sft::bench(_c),
_ => panic!("Unknown benchmark {:?}", bench),
},
Err(_) => panic!("Need to name a benchmark by the env var MMTK_BENCH"),
}

#[cfg(not(feature = "mock_test"))]
{
eprintln!("ERROR: Currently there are no benchmarks when the \"mock_test\" feature is not enabled.");
std::process::exit(1);
}
}

criterion_group!(benches, bench_main);
Expand Down
File renamed without changes.
2 changes: 2 additions & 0 deletions benches/mock_bench/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
pub mod alloc;
pub mod sft;
File renamed without changes.
1 change: 1 addition & 0 deletions src/plan/generational/immix/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,7 @@ impl<VM: VMBinding> GenImmix<VM> {
// Any object is moved into the mature space, or is copied inside the mature space. We will unlog it.
unlog_object_when_traced: false,
// In GenImmix, young objects are not allocated in ImmixSpace directly.
#[cfg(feature = "vo_bit")]
mixed_age: false,
},
);
Expand Down
1 change: 1 addition & 0 deletions src/plan/immix/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,7 @@ impl<VM: VMBinding> Immix<VM> {
ImmixSpaceArgs {
reset_log_bit_in_major_gc: false,
unlog_object_when_traced: false,
#[cfg(feature = "vo_bit")]
mixed_age: false,
},
)
Expand Down
12 changes: 0 additions & 12 deletions src/plan/mutator_context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -153,13 +153,6 @@ impl<VM: VMBinding> MutatorContext<VM> for Mutator<VM> {
self.mutator_tls
}

/// Used by specialized barrier slow-path calls to avoid dynamic dispatches.
unsafe fn barrier_impl<B: Barrier<VM>>(&mut self) -> &mut B {
debug_assert!(self.barrier().is::<B>());
let (payload, _vptr) = std::mem::transmute::<_, (*mut B, *mut ())>(self.barrier());
&mut *payload
}

fn barrier(&mut self) -> &mut dyn Barrier<VM> {
&mut *self.barrier
}
Expand Down Expand Up @@ -316,11 +309,6 @@ pub trait MutatorContext<VM: VMBinding>: Send + 'static {
fn get_tls(&self) -> VMMutatorThread;
/// Get active barrier trait object
fn barrier(&mut self) -> &mut dyn Barrier<VM>;
/// Force cast the barrier trait object to a concrete implementation.
///
/// # Safety
/// The safety of this function is ensured by a down-cast check.
unsafe fn barrier_impl<B: Barrier<VM>>(&mut self) -> &mut B;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree a binding can implement this method totally on the binding side if they need to. We can remove it.

The comments for write barriers methods mention using a specialized slow-path but they do not specifically mention this method. The comments seem to make sense even if we remove this method from our API. No change is required -- I just mention it in case you would like to improve the comments related to the function.

/// * Implement fast-path on the VM side, and do a specialized slow-path call.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes. The comment mentioned slow path, but it does not mention how to make the "specialized slow-path call". Calling methods of the Barrier trait is only one step of it. I think it is obvious to use downcast to eliminate a dynamic call for a trait that supports it.

}

/// This is used for plans to indicate the number of allocators reserved for the plan.
Expand Down
1 change: 1 addition & 0 deletions src/plan/sticky/immix/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -329,6 +329,7 @@ impl<VM: VMBinding> StickyImmix<VM> {
// Along with the option above, we unlog them again during tracing.
reset_log_bit_in_major_gc: true,
// In StickyImmix, both young and old objects are allocated in the ImmixSpace.
#[cfg(feature = "vo_bit")]
mixed_age: true,
},
);
Expand Down
2 changes: 2 additions & 0 deletions src/policy/immix/immixspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,8 @@ pub struct ImmixSpaceArgs {
/// instance contain young objects, their VO bits need to be updated during this GC. Currently
/// only StickyImmix is affected. GenImmix allocates young objects in a separete CopySpace
/// nursery and its VO bits can be cleared in bulk.
// Currently only used when "vo_bit" is enabled. Using #[cfg(...)] to eliminate dead code warning.
#[cfg(feature = "vo_bit")]
pub mixed_age: bool,
}

Expand Down
8 changes: 6 additions & 2 deletions src/policy/sft_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,12 @@ pub(crate) type SFTRawPointer = *const (dyn SFT + Sync + 'static);
/// see if 128 bits atomic instructions are available.
#[cfg(target_pointer_width = "64")]
type AtomicDoubleWord = portable_atomic::AtomicU128;
#[cfg(target_pointer_width = "64")]
type DoubleWord = u128;
#[cfg(target_pointer_width = "32")]
type AtomicDoubleWord = portable_atomic::AtomicU64;
#[cfg(target_pointer_width = "32")]
type DoubleWord = u64;

/// The type we store SFT raw pointer as. It basically just double word sized atomic integer.
/// This type provides an abstraction so we can access SFT easily.
Expand All @@ -128,7 +132,7 @@ impl SFTRefStorage {
}

pub fn new(sft: SFTRawPointer) -> Self {
let val = unsafe { std::mem::transmute(sft) };
let val: DoubleWord = unsafe { std::mem::transmute(sft) };
Self(AtomicDoubleWord::new(val))
}

Expand All @@ -140,7 +144,7 @@ impl SFTRefStorage {

// Store a raw SFT pointer with the release ordering.
pub fn store(&self, sft: SFTRawPointer) {
let val = unsafe { std::mem::transmute(sft) };
let val: DoubleWord = unsafe { std::mem::transmute(sft) };
self.0.store(val, Ordering::Release)
}
}
Expand Down
3 changes: 1 addition & 2 deletions src/util/address.rs
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ impl Address {
/// The lowest possible address.
pub const ZERO: Self = Address(0);
/// The highest possible address.
pub const MAX: Self = Address(usize::max_value());
pub const MAX: Self = Address(usize::MAX);

/// creates Address from a pointer
pub fn from_ptr<T>(ptr: *const T) -> Address {
Expand Down Expand Up @@ -164,7 +164,6 @@ impl Address {
/// It is unsafe and the user needs to be aware that they are creating an invalid address.
/// The max address should only be used as unininitialized or sentinel values in performance critical code (where you dont want to use `Option<Address>`).
pub unsafe fn max() -> Address {
use std::usize;
Address(usize::MAX)
}

Expand Down
4 changes: 2 additions & 2 deletions src/util/constants.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,8 @@ mod java_specific_constants {
pub const LOG_BITS_IN_LONG: u8 = LOG_BITS_IN_BYTE + LOG_BYTES_IN_LONG;
pub const BITS_IN_LONG: usize = 1 << LOG_BITS_IN_LONG;

pub const MAX_INT: usize = i32::max_value() as usize; // 0x7fff_ffff
pub const MIN_INT: usize = i32::min_value() as u32 as usize; // 0x8000_0000
pub const MAX_INT: usize = i32::MAX as usize; // 0x7fff_ffff
pub const MIN_INT: usize = i32::MIN as u32 as usize; // 0x8000_0000
}
pub(crate) use java_specific_constants::*;

Expand Down
3 changes: 2 additions & 1 deletion src/util/erase_vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ macro_rules! define_erased_vm_mut_ref {
pub struct $new_type<'a>(usize, PhantomData<&'a ()>);
impl<'a> $new_type<'a> {
pub fn new<VM: VMBinding>(r: &'a mut $orig_type) -> Self {
Self(unsafe { std::mem::transmute(r) }, PhantomData)
let worker_as_usize: usize = unsafe { std::mem::transmute(r) };
Self(worker_as_usize, PhantomData)
}
pub fn into_mut<VM: VMBinding>(self) -> &'a mut $orig_type {
unsafe { std::mem::transmute(self.0) }
Expand Down
18 changes: 14 additions & 4 deletions src/util/heap/layout/byte_map_mmapper.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ use std::sync::Mutex;

use atomic::Atomic;
use std::io::Result;
use std::mem::transmute;

const MMAP_NUM_CHUNKS: usize = if LOG_BYTES_IN_ADDRESS_SPACE == 32 {
1 << (LOG_BYTES_IN_ADDRESS_SPACE as usize - LOG_MMAP_CHUNK_BYTES)
Expand Down Expand Up @@ -134,11 +133,22 @@ impl Mmapper for ByteMapMmapper {

impl ByteMapMmapper {
pub fn new() -> Self {
// Hacky because AtomicU8 does not implement Copy
// Should be fiiine because AtomicXXX has the same bit representation as XXX
// Because AtomicU8 does not implement Copy, it is a compilation error to usen the
// expression `[Atomic::new(MapState::Unmapped); MMAP_NUM_CHUNKS]` because that involves
// copying. We must define a constant for it.
//
// TODO: Use the inline const expression `const { Atomic::new(MapState::Unmapped) }` after
// we bump MSRV to 1.79.

// If we declare a const Atomic, Clippy will warn about const items being interior mutable.
// Using inline const expression will eliminate this warning, but that is experimental until
// 1.79. Fix it after we bump MSRV.
#[allow(clippy::declare_interior_mutable_const)]
const INITIAL_ENTRY: Atomic<MapState> = Atomic::new(MapState::Unmapped);

ByteMapMmapper {
lock: Mutex::new(()),
mapped: unsafe { transmute([MapState::Unmapped; MMAP_NUM_CHUNKS]) },
mapped: [INITIAL_ENTRY; MMAP_NUM_CHUNKS],
strategy: Atomic::new(MmapStrategy::Normal),
}
}
Expand Down
17 changes: 14 additions & 3 deletions src/util/heap/layout/fragmented_mapper.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ use atomic::{Atomic, Ordering};
use std::cell::UnsafeCell;
use std::fmt;
use std::io::Result;
use std::mem::transmute;
use std::sync::Mutex;

const MMAP_NUM_CHUNKS: usize = 1 << (33 - LOG_MMAP_CHUNK_BYTES);
Expand Down Expand Up @@ -242,8 +241,20 @@ impl FragmentedMapper {
}

fn new_slab() -> Box<Slab> {
let mapped: Box<Slab> =
Box::new(unsafe { transmute([MapState::Unmapped; MMAP_NUM_CHUNKS]) });
// Because AtomicU8 does not implement Copy, it is a compilation error to usen the
// expression `[Atomic::new(MapState::Unmapped); MMAP_NUM_CHUNKS]` because that involves
// copying. We must define a constant for it.
//
// TODO: Use the inline const expression `const { Atomic::new(MapState::Unmapped) }` after
// we bump MSRV to 1.79.

// If we declare a const Atomic, Clippy will warn about const items being interior mutable.
// Using inline const expression will eliminate this warning, but that is experimental until
// 1.79. Fix it after we bump MSRV.
#[allow(clippy::declare_interior_mutable_const)]
const INITIAL_ENTRY: Atomic<MapState> = Atomic::new(MapState::Unmapped);

let mapped: Box<Slab> = Box::new([INITIAL_ENTRY; MMAP_NUM_CHUNKS]);
mapped
}

Expand Down
2 changes: 1 addition & 1 deletion src/util/heap/space_descriptor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ impl SpaceDescriptor {
let top = end == vm_layout().heap_end;
if vm_layout().force_use_contiguous_spaces {
let space_index = if start > vm_layout().heap_end {
::std::usize::MAX
usize::MAX
} else {
start >> vm_layout().space_shift_64()
};
Expand Down
5 changes: 3 additions & 2 deletions src/util/int_array_freelist.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use super::freelist::*;
use std::{mem, ptr::NonNull};
use std::ptr::NonNull;

#[derive(Debug)]
pub struct IntArrayFreeList {
Expand Down Expand Up @@ -42,11 +42,12 @@ impl IntArrayFreeList {
iafl
}
pub fn from_parent(parent: &IntArrayFreeList, ordinal: i32) -> Self {
let parent_ptr = std::ptr::NonNull::from(parent);
let iafl = IntArrayFreeList {
head: -(1 + ordinal),
heads: parent.heads,
table: None,
parent: Some(unsafe { mem::transmute(parent) }),
parent: Some(parent_ptr),
};
debug_assert!(-iafl.head <= iafl.heads);
iafl
Expand Down
2 changes: 1 addition & 1 deletion src/util/options.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use strum_macros::EnumString;

/// The default stress factor. This is set to the max usize,
/// which means we will never trigger a stress GC for the default value.
pub const DEFAULT_STRESS_FACTOR: usize = usize::max_value();
pub const DEFAULT_STRESS_FACTOR: usize = usize::MAX;

/// The zeroing approach to use for new object allocations.
/// Affects each plan differently.
Expand Down
7 changes: 6 additions & 1 deletion src/util/test_util/mock_vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,12 @@ lazy_static! {
// we do not store them for access after the mock method returns.
macro_rules! lifetime {
($e: expr) => {
unsafe { std::mem::transmute($e) }
unsafe {
// The dynamic nature of this lifetime-removal macro makes it impossible to reason
// about the source and destination types of the `transmute`.
#[allow(clippy::missing_transmute_annotations)]
std::mem::transmute($e)
}
};
}

Expand Down