diff --git a/jikesrvm/rvm/src/org/jikesrvm/mm/mminterface/MMTkMutatorContext.java b/jikesrvm/rvm/src/org/jikesrvm/mm/mminterface/MMTkMutatorContext.java index ea7a1ebb..ae3f3da6 100644 --- a/jikesrvm/rvm/src/org/jikesrvm/mm/mminterface/MMTkMutatorContext.java +++ b/jikesrvm/rvm/src/org/jikesrvm/mm/mminterface/MMTkMutatorContext.java @@ -148,6 +148,22 @@ public abstract class MMTkMutatorContext extends MutatorContext { @Entrypoint Address immixAllocator0OptionLineData; + // 1 x MarkCompactAllocator (same layout as bump allocator) + @Entrypoint + Address markCompactAllocator0Tls; + @Entrypoint + Address markCompactAllocator0Cursor; + @Entrypoint + Address markCompactAllocator0Limit; + @Entrypoint + Address markCompactAllocator0Space; + @Entrypoint + Address markCompactAllocator0SpaceFat; + @Entrypoint + Address markCompactAllocator0Plan; + @Entrypoint + Address markCompactAllocator0PlanFat; + // barrier @Entrypoint Address barrier; @@ -186,6 +202,8 @@ public abstract class MMTkMutatorContext extends MutatorContext { static final int MAX_BUMP_ALLOCATORS = 5; static final int MAX_LARGE_OBJECT_ALLOCATORS = 1; static final int MAX_MALLOC_ALLOCATORS = 1; + static final int MAX_IMMIX_ALLOCATORS = 1; + static final int MAX_MARK_COMPACT_ALLOCATORS = 1; // Bump allocator size static final int BUMP_ALLOCATOR_SIZE = 7 * BYTES_IN_WORD; // Bump allocator field offsets @@ -200,6 +218,10 @@ public abstract class MMTkMutatorContext extends MutatorContext { static final int LARGE_OBJECT_ALLOCATOR_SIZE = 4 * BYTES_IN_WORD; // Malloc allocator size. We do not need offsets for each field, as we don't need to implement fastpath for large object allocator. static final int MALLOC_ALLOCATOR_SIZE = 4 * BYTES_IN_WORD; + // Immix allocator size + static final int IMMIX_ALLOCATOR_SIZE = 13 * BYTES_IN_WORD; + // Mark compact allocator size (the same as bump allocator) + static final int MARK_COMPACT_ALLOCATOR_SIZE = BUMP_ALLOCATOR_SIZE; // The base offset of this mutator section static final Offset MUTATOR_BASE_OFFSET = EntrypointHelper.getField(MMTkMutatorContext.class, "bumpAllocator0Tls", Address.class).getOffset(); @@ -215,6 +237,8 @@ public abstract class MMTkMutatorContext extends MutatorContext { public static final int TAG_BUMP_POINTER = 0; public static final int TAG_LARGE_OBJECT = 1; public static final int TAG_MALLOC = 2; + public static final int TAG_IMMIX = 3; + public static final int TAG_MARK_COMPACT = 4; // tag for space type public static final int IMMORTAL_SPACE = 0; diff --git a/mmtk/Cargo.toml b/mmtk/Cargo.toml index 44c19d84..35f3d2bb 100644 --- a/mmtk/Cargo.toml +++ b/mmtk/Cargo.toml @@ -21,7 +21,7 @@ log = {version = "0.4", features = ["max_level_trace", "release_max_level_off"] # - change branch/rev # - change repo name # But other changes including adding/removing whitespaces in commented lines may break the CI. -mmtk = { git = "https://github.com/mmtk/mmtk-core.git", rev = "8e50e2c3fa8b45f7e4cd4b9e87e986fb5c17b07e" } +mmtk = { git = "https://github.com/mmtk/mmtk-core.git", rev = "4bd6ca82ad037dac97920bf3a1408e4912a6bf04" } # Uncomment the following to build locally - if you change the path locally, do not commit the change in a PR # mmtk = { path = "../repos/mmtk-core" } diff --git a/mmtk/src/object_model.rs b/mmtk/src/object_model.rs index cb5a6131..3cf4f80f 100644 --- a/mmtk/src/object_model.rs +++ b/mmtk/src/object_model.rs @@ -178,7 +178,7 @@ impl ObjectModel for VMObjectModel { let rvm_type = Self::load_rvm_type(from); trace!("Is it a class?"); - if unsafe { (rvm_type + IS_CLASS_TYPE_FIELD_OFFSET).load::() } { + if Self::is_class(rvm_type) { trace!("... yes"); Self::copy_scalar(from, tib, rvm_type, allocator, copy_context) } else { @@ -231,6 +231,32 @@ impl ObjectModel for VMObjectModel { Self::bytes_used(object, rvm_type) } + fn get_size_when_copied(object: ObjectReference) -> usize { + let rvm_type = Self::load_rvm_type(object); + + Self::bytes_required_when_copied(object, rvm_type) + } + + fn get_align_when_copied(object: ObjectReference) -> usize { + let rvm_type = Self::load_rvm_type(object); + + if Self::is_class(rvm_type) { + Self::get_alignment_class(rvm_type) + } else { + Self::get_alignment_array(rvm_type) + } + } + + fn get_align_offset_when_copied(object: ObjectReference) -> isize { + let rvm_type = Self::load_rvm_type(object); + + if Self::is_class(rvm_type) { + Self::get_offset_for_alignment_class(object, rvm_type) + } else { + Self::get_offset_for_alignment_array(object, rvm_type) + } + } + fn get_type_descriptor(_reference: ObjectReference) -> &'static [i8] { unimplemented!() } @@ -263,6 +289,10 @@ impl ObjectModel for VMObjectModel { } impl VMObjectModel { + fn is_class(rvm_type: Address) -> bool { + unsafe { (rvm_type + IS_CLASS_TYPE_FIELD_OFFSET).load::() } + } + #[inline(always)] fn copy_scalar( from: ObjectReference, @@ -319,12 +349,10 @@ impl VMObjectModel { #[inline(always)] fn bytes_required_when_copied(object: ObjectReference, rvm_type: Address) -> usize { trace!("VMObjectModel.bytes_required_when_copied"); - unsafe { - if (rvm_type + IS_CLASS_TYPE_FIELD_OFFSET).load::() { - Self::bytes_required_when_copied_class(object, rvm_type) - } else { - Self::bytes_required_when_copied_array(object, rvm_type) - } + if Self::is_class(rvm_type) { + Self::bytes_required_when_copied_class(object, rvm_type) + } else { + Self::bytes_required_when_copied_array(object, rvm_type) } } @@ -375,7 +403,7 @@ impl VMObjectModel { fn bytes_used(object: ObjectReference, rvm_type: Address) -> usize { trace!("VMObjectModel.bytes_used"); unsafe { - let is_class = (rvm_type + IS_CLASS_TYPE_FIELD_OFFSET).load::(); + let is_class = Self::is_class(rvm_type); let mut size = if is_class { (rvm_type + INSTANCE_SIZE_FIELD_OFFSET).load::() } else { diff --git a/mmtk/src/scanning.rs b/mmtk/src/scanning.rs index 6162ec29..98c1c5eb 100644 --- a/mmtk/src/scanning.rs +++ b/mmtk/src/scanning.rs @@ -172,6 +172,11 @@ impl Scanning for VMScanning { // FIXME: Really? cfg!(target_arch = "x86") } + + fn prepare_for_roots_re_scanning() { + // I guess we do not need to do anything special. However I will leave it as unimplemented for now. + unimplemented!() + } } struct ObjectsClosure<'a, E: ProcessEdgesWork>(