diff --git a/src/policy/sft_map.rs b/src/policy/sft_map.rs index c84aba3e19..b71f7ab586 100644 --- a/src/policy/sft_map.rs +++ b/src/policy/sft_map.rs @@ -67,17 +67,23 @@ pub trait SFTMap { pub(crate) fn create_sft_map() -> Box { cfg_if::cfg_if! { - if #[cfg(all(feature = "malloc_mark_sweep", target_pointer_width = "64"))] { - // 64-bit malloc mark sweep needs a chunk-based SFT map, but the sparse map is not suitable for 64bits. - Box::new(dense_chunk_map::SFTDenseChunkMap::new()) - } else if #[cfg(target_pointer_width = "64")] { + if #[cfg(target_pointer_width = "64")] { + // For 64bits, we generally want to use the space map, which requires using contiguous space and no off-heap memory. + // If the requirements do not meet, we have to choose a different SFT map implementation. use crate::util::heap::layout::vm_layout::vm_layout; - if vm_layout().force_use_contiguous_spaces { - Box::new(space_map::SFTSpaceMap::new()) - } else { + if !vm_layout().force_use_contiguous_spaces { + // This is usually the case for compressed pointer. Use the 32bits implementation. Box::new(sparse_chunk_map::SFTSparseChunkMap::new()) + } else if cfg!(any(feature = "malloc_mark_sweep", feature = "vm_space")) { + // We have off-heap memory (malloc'd objects, or VM space). We have to use a chunk-based map. + Box::new(dense_chunk_map::SFTDenseChunkMap::new()) + } else { + // We can use space map. + Box::new(space_map::SFTSpaceMap::new()) } } else if #[cfg(target_pointer_width = "32")] { + // Use sparse chunk map. As we have limited virtual address range on 32 bits, + // it is okay to have a sparse chunk map which maps every chunk into an index in the array. Box::new(sparse_chunk_map::SFTSparseChunkMap::new()) } else { compile_err!("Cannot figure out which SFT map to use."); @@ -154,15 +160,17 @@ mod space_map { /// Space map is a small table, and it has one entry for each MMTk space. pub struct SFTSpaceMap { sft: Vec, + space_address_start: Address, + space_address_end: Address, } unsafe impl Sync for SFTSpaceMap {} impl SFTMap for SFTSpaceMap { - fn has_sft_entry(&self, _addr: Address) -> bool { - // Address::ZERO is mapped to index 0, and Address::MAX is mapped to index 31 (TABLE_SIZE-1) - // So any address has an SFT entry. - true + fn has_sft_entry(&self, addr: Address) -> bool { + // An arbitrary address from Address::ZERO to Address::MAX will be cyclically mapped to an index between 0 and 31 + // Only addresses between the virtual address range we use have valid entries. + addr >= self.space_address_start && addr < self.space_address_end } fn get_side_metadata(&self) -> Option<&SideMetadataSpec> { @@ -186,7 +194,6 @@ mod space_map { start: Address, bytes: usize, ) { - let table_size = Self::addr_to_index(Address::MAX) + 1; let index = Self::addr_to_index(start); if cfg!(debug_assertions) { // Make sure we only update from empty to a valid space, or overwrite the space @@ -194,13 +201,16 @@ mod space_map { assert!((*old).name() == EMPTY_SFT_NAME || (*old).name() == (*space).name()); // Make sure the range is in the space let space_start = Self::index_to_space_start(index); - // FIXME: Curerntly skip the check for the last space. The following works fine for MMTk internal spaces, - // but the VM space is an exception. Any address after the last space is considered as the last space, - // based on our indexing function. In that case, we cannot assume the end of the region is within the last space (with MAX_SPACE_EXTENT). - if index != table_size - 1 { - assert!(start >= space_start); - assert!(start + bytes <= space_start + vm_layout().max_space_extent()); - } + assert!(start >= space_start); + assert!( + start + bytes <= space_start + vm_layout().max_space_extent(), + "The range of {} + {} bytes does not fall into the space range {} and {}, \ + and it is probably outside the address range we use.", + start, + bytes, + space_start, + space_start + vm_layout().max_space_extent() + ); } self.sft.get_unchecked(index).store(space); @@ -216,12 +226,15 @@ mod space_map { /// Create a new space map. #[allow(clippy::assertions_on_constants)] // We assert to make sure the constants pub fn new() -> Self { + use crate::util::heap::layout::heap_parameters::MAX_SPACES; let table_size = Self::addr_to_index(Address::MAX) + 1; - debug_assert!(table_size >= crate::util::heap::layout::heap_parameters::MAX_SPACES); + debug_assert!(table_size >= MAX_SPACES); Self { sft: std::iter::repeat_with(SFTRefStorage::default) .take(table_size) .collect(), + space_address_start: Self::index_to_space_range(1).0, // the start of the first space + space_address_end: Self::index_to_space_range(MAX_SPACES - 1).1, // the end of the last space } } @@ -261,7 +274,7 @@ mod space_map { let assert_for_index = |i: usize| { let (start, end) = SFTSpaceMap::index_to_space_range(i); - debug!("Space: Index#{} = [{}, {})", i, start, end); + println!("Space: Index#{} = [{}, {})", i, start, end); assert_eq!(SFTSpaceMap::addr_to_index(start), i); assert_eq!(SFTSpaceMap::addr_to_index(end - 1), i); }; diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index 7aa1eb9903..63b42104b0 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -127,20 +127,6 @@ impl Space for VMSpace { // The default implementation checks with vm map. But vm map has some assumptions about // the address range for spaces and the VM space may break those assumptions (as the space is // mmapped by the runtime rather than us). So we we use SFT here. - - // However, SFT map may not be an ideal solution either for 64 bits. The default - // implementation of SFT map on 64 bits is `SFTSpaceMap`, which maps the entire address - // space into an index between 0 and 31, and assumes any address with the same index - // is in the same space (with the same SFT). MMTk spaces uses 1-16. We guarantee that - // VM space does not overlap with the address range that MMTk spaces may use. So - // any region used as VM space will have an index of 0, or 17-31, and all the addresses - // that are mapped to the same index will be considered as in the VM space. That means, - // after we map a region as VM space, the nearby addresses will also be considered - // as in the VM space if we use the default `SFTSpaceMap`. We can guarantee the nearby - // addresses are not MMTk spaces, but we cannot tell whether they really in the VM space - // or not. - // A solution to this is to use `SFTDenseChunkMap` if `vm_space` is enabled on 64 bits. - // `SFTDenseChunkMap` has an overhead of a few percentages (~3%) compared to `SFTSpaceMap`. SFT_MAP.get_checked(start).name() == self.name() } } diff --git a/src/util/test_util/fixtures.rs b/src/util/test_util/fixtures.rs index 1867330fad..d40ea302aa 100644 --- a/src/util/test_util/fixtures.rs +++ b/src/util/test_util/fixtures.rs @@ -115,7 +115,7 @@ impl Default for SerialFixture { } pub struct MMTKFixture { - pub mmtk: &'static MMTK, + mmtk: *mut MMTK, } impl FixtureContent for MMTKFixture { @@ -143,13 +143,21 @@ impl MMTKFixture { let mmtk = memory_manager::mmtk_init(&builder); let mmtk_ptr = Box::into_raw(mmtk); - let mmtk_static: &'static MMTK = unsafe { &*mmtk_ptr }; if initialize_collection { + let mmtk_static: &'static MMTK = unsafe { &*mmtk_ptr }; memory_manager::initialize_collection(mmtk_static, VMThread::UNINITIALIZED); } - MMTKFixture { mmtk: mmtk_static } + MMTKFixture { mmtk: mmtk_ptr } + } + + pub fn get_mmtk(&self) -> &'static MMTK { + unsafe { &*self.mmtk } + } + + pub fn get_mmtk_mut(&mut self) -> &'static mut MMTK { + unsafe { &mut *self.mmtk } } } @@ -186,7 +194,7 @@ impl MutatorFixture { true, ); let mutator = - memory_manager::bind_mutator(mmtk.mmtk, VMMutatorThread(VMThread::UNINITIALIZED)); + memory_manager::bind_mutator(mmtk.get_mmtk(), VMMutatorThread(VMThread::UNINITIALIZED)); Self { mmtk, mutator } } @@ -196,12 +204,12 @@ impl MutatorFixture { { let mmtk = MMTKFixture::create_with_builder(with_builder, true); let mutator = - memory_manager::bind_mutator(mmtk.mmtk, VMMutatorThread(VMThread::UNINITIALIZED)); + memory_manager::bind_mutator(mmtk.get_mmtk(), VMMutatorThread(VMThread::UNINITIALIZED)); Self { mmtk, mutator } } pub fn mmtk(&self) -> &'static MMTK { - self.mmtk.mmtk + self.mmtk.get_mmtk() } } diff --git a/src/vm/tests/mock_tests/mock_test_allocate_without_initialize_collection.rs b/src/vm/tests/mock_tests/mock_test_allocate_without_initialize_collection.rs index 423c3a299c..1a4a22b422 100644 --- a/src/vm/tests/mock_tests/mock_test_allocate_without_initialize_collection.rs +++ b/src/vm/tests/mock_tests/mock_test_allocate_without_initialize_collection.rs @@ -25,7 +25,7 @@ pub fn allocate_without_initialize_collection() { // Build mutator let mut mutator = memory_manager::bind_mutator( - fixture.mmtk, + fixture.get_mmtk(), VMMutatorThread(VMThread::UNINITIALIZED), ); diff --git a/src/vm/tests/mock_tests/mock_test_allocator_info.rs b/src/vm/tests/mock_tests/mock_test_allocator_info.rs index 9b03f7c9c1..3761b2e42b 100644 --- a/src/vm/tests/mock_tests/mock_test_allocator_info.rs +++ b/src/vm/tests/mock_tests/mock_test_allocator_info.rs @@ -14,12 +14,14 @@ pub fn test_allocator_info() { || { let fixture = MMTKFixture::create(); - let selector = - memory_manager::get_allocator_mapping(fixture.mmtk, AllocationSemantics::Default); + let selector = memory_manager::get_allocator_mapping( + fixture.get_mmtk(), + AllocationSemantics::Default, + ); let base_offset = crate::plan::Mutator::::get_allocator_base_offset(selector); let allocator_info = AllocatorInfo::new::(selector); - match *fixture.mmtk.get_options().plan { + match *fixture.get_mmtk().get_options().plan { PlanSelector::NoGC | PlanSelector::Immix | PlanSelector::SemiSpace diff --git a/src/vm/tests/mock_tests/mock_test_doc_avoid_resolving_allocator.rs b/src/vm/tests/mock_tests/mock_test_doc_avoid_resolving_allocator.rs index 789b06c99e..b7c50ddee0 100644 --- a/src/vm/tests/mock_tests/mock_test_doc_avoid_resolving_allocator.rs +++ b/src/vm/tests/mock_tests/mock_test_doc_avoid_resolving_allocator.rs @@ -22,13 +22,15 @@ pub fn acquire_typed_allocator() { // ANCHOR: avoid_resolving_allocator // At boot time - let selector = - memory_manager::get_allocator_mapping(fixture.mmtk, AllocationSemantics::Default); + let selector = memory_manager::get_allocator_mapping( + fixture.get_mmtk(), + AllocationSemantics::Default, + ); unsafe { DEFAULT_ALLOCATOR_OFFSET = crate::plan::Mutator::::get_allocator_base_offset(selector); } - let mutator = memory_manager::bind_mutator(fixture.mmtk, tls_opaque_pointer); + let mutator = memory_manager::bind_mutator(fixture.get_mmtk(), tls_opaque_pointer); // At run time: allocate with the default semantics without resolving allocator let default_allocator: &mut BumpAllocator = { diff --git a/src/vm/tests/mock_tests/mock_test_doc_mutator_storage.rs b/src/vm/tests/mock_tests/mock_test_doc_mutator_storage.rs index 6d367c875d..a79ef25a55 100644 --- a/src/vm/tests/mock_tests/mock_test_doc_mutator_storage.rs +++ b/src/vm/tests/mock_tests/mock_test_doc_mutator_storage.rs @@ -28,7 +28,7 @@ pub fn boxed_pointer() { } // Bind an MMTk mutator - let mutator = memory_manager::bind_mutator(fixture.mmtk, tls_opaque_pointer); + let mutator = memory_manager::bind_mutator(fixture.get_mmtk(), tls_opaque_pointer); // Store the pointer in TLS let mut storage = MutatorInTLS { ptr: mutator }; @@ -58,7 +58,7 @@ pub fn embed_mutator_struct() { } // Bind an MMTk mutator - let mutator = memory_manager::bind_mutator(fixture.mmtk, tls_opaque_pointer); + let mutator = memory_manager::bind_mutator(fixture.get_mmtk(), tls_opaque_pointer); // Store the struct (or use memcpy for non-Rust code) let mut storage = MutatorInTLS { embed: *mutator }; // Allocate @@ -94,7 +94,7 @@ pub fn embed_fastpath_struct() { } // Bind an MMTk mutator - let mutator = memory_manager::bind_mutator(fixture.mmtk, tls_opaque_pointer); + let mutator = memory_manager::bind_mutator(fixture.get_mmtk(), tls_opaque_pointer); // Create a fastpath BumpPointer with default(). The BumpPointer from default() will guarantee to fail on the first allocation // so the allocation goes to the slowpath and we will get an allocation buffer from MMTk. let default_bump_pointer = BumpPointer::default(); @@ -116,7 +116,7 @@ pub fn embed_fastpath_struct() { } else { use crate::util::alloc::Allocator; let selector = memory_manager::get_allocator_mapping( - fixture.mmtk, + fixture.get_mmtk(), AllocationSemantics::Default, ); let default_allocator = unsafe { diff --git a/src/vm/tests/mock_tests/mock_test_malloc_counted.rs b/src/vm/tests/mock_tests/mock_test_malloc_counted.rs index 8dd50b6c60..a591b6eab1 100644 --- a/src/vm/tests/mock_tests/mock_test_malloc_counted.rs +++ b/src/vm/tests/mock_tests/mock_test_malloc_counted.rs @@ -12,15 +12,15 @@ pub fn malloc_free() { default_setup, || { MMTK.with_fixture(|fixture| { - let bytes_before = memory_manager::get_malloc_bytes(&fixture.mmtk); + let bytes_before = memory_manager::get_malloc_bytes(fixture.get_mmtk()); - let res = memory_manager::counted_malloc(&fixture.mmtk, 8); + let res = memory_manager::counted_malloc(fixture.get_mmtk(), 8); assert!(!res.is_zero()); - let bytes_after_alloc = memory_manager::get_malloc_bytes(&fixture.mmtk); + let bytes_after_alloc = memory_manager::get_malloc_bytes(fixture.get_mmtk()); assert_eq!(bytes_before + 8, bytes_after_alloc); - memory_manager::free_with_size(&fixture.mmtk, res, 8); - let bytes_after_free = memory_manager::get_malloc_bytes(&fixture.mmtk); + memory_manager::free_with_size(fixture.get_mmtk(), res, 8); + let bytes_after_free = memory_manager::get_malloc_bytes(fixture.get_mmtk()); assert_eq!(bytes_before, bytes_after_free); }); }, @@ -34,15 +34,15 @@ pub fn calloc_free() { default_setup, || { MMTK.with_fixture(|fixture| { - let bytes_before = memory_manager::get_malloc_bytes(&fixture.mmtk); + let bytes_before = memory_manager::get_malloc_bytes(fixture.get_mmtk()); - let res = memory_manager::counted_calloc(&fixture.mmtk, 1, 8); + let res = memory_manager::counted_calloc(fixture.get_mmtk(), 1, 8); assert!(!res.is_zero()); - let bytes_after_alloc = memory_manager::get_malloc_bytes(&fixture.mmtk); + let bytes_after_alloc = memory_manager::get_malloc_bytes(fixture.get_mmtk()); assert_eq!(bytes_before + 8, bytes_after_alloc); - memory_manager::free_with_size(&fixture.mmtk, res, 8); - let bytes_after_free = memory_manager::get_malloc_bytes(&fixture.mmtk); + memory_manager::free_with_size(fixture.get_mmtk(), res, 8); + let bytes_after_free = memory_manager::get_malloc_bytes(fixture.get_mmtk()); assert_eq!(bytes_before, bytes_after_free); }); }, @@ -56,21 +56,21 @@ pub fn realloc_grow() { default_setup, || { MMTK.with_fixture(|fixture| { - let bytes_before = memory_manager::get_malloc_bytes(&fixture.mmtk); + let bytes_before = memory_manager::get_malloc_bytes(fixture.get_mmtk()); - let res1 = memory_manager::counted_malloc(&fixture.mmtk, 8); + let res1 = memory_manager::counted_malloc(&fixture.get_mmtk(), 8); assert!(!res1.is_zero()); - let bytes_after_alloc = memory_manager::get_malloc_bytes(&fixture.mmtk); + let bytes_after_alloc = memory_manager::get_malloc_bytes(fixture.get_mmtk()); assert_eq!(bytes_before + 8, bytes_after_alloc); // grow to 16 bytes - let res2 = memory_manager::realloc_with_old_size(&fixture.mmtk, res1, 16, 8); + let res2 = memory_manager::realloc_with_old_size(fixture.get_mmtk(), res1, 16, 8); assert!(!res2.is_zero()); - let bytes_after_realloc = memory_manager::get_malloc_bytes(&fixture.mmtk); + let bytes_after_realloc = memory_manager::get_malloc_bytes(fixture.get_mmtk()); assert_eq!(bytes_before + 16, bytes_after_realloc); - memory_manager::free_with_size(&fixture.mmtk, res2, 16); - let bytes_after_free = memory_manager::get_malloc_bytes(&fixture.mmtk); + memory_manager::free_with_size(&fixture.get_mmtk(), res2, 16); + let bytes_after_free = memory_manager::get_malloc_bytes(fixture.get_mmtk()); assert_eq!(bytes_before, bytes_after_free); }); }, @@ -84,21 +84,21 @@ pub fn realloc_shrink() { default_setup, || { MMTK.with_fixture(|fixture| { - let bytes_before = memory_manager::get_malloc_bytes(&fixture.mmtk); + let bytes_before = memory_manager::get_malloc_bytes(fixture.get_mmtk()); - let res1 = memory_manager::counted_malloc(&fixture.mmtk, 16); + let res1 = memory_manager::counted_malloc(fixture.get_mmtk(), 16); assert!(!res1.is_zero()); - let bytes_after_alloc = memory_manager::get_malloc_bytes(&fixture.mmtk); + let bytes_after_alloc = memory_manager::get_malloc_bytes(fixture.get_mmtk()); assert_eq!(bytes_before + 16, bytes_after_alloc); // shrink to 8 bytes - let res2 = memory_manager::realloc_with_old_size(&fixture.mmtk, res1, 8, 16); + let res2 = memory_manager::realloc_with_old_size(fixture.get_mmtk(), res1, 8, 16); assert!(!res2.is_zero()); - let bytes_after_realloc = memory_manager::get_malloc_bytes(&fixture.mmtk); + let bytes_after_realloc = memory_manager::get_malloc_bytes(fixture.get_mmtk()); assert_eq!(bytes_before + 8, bytes_after_realloc); - memory_manager::free_with_size(&fixture.mmtk, res2, 8); - let bytes_after_free = memory_manager::get_malloc_bytes(&fixture.mmtk); + memory_manager::free_with_size(fixture.get_mmtk(), res2, 8); + let bytes_after_free = memory_manager::get_malloc_bytes(fixture.get_mmtk()); assert_eq!(bytes_before, bytes_after_free); }); }, diff --git a/src/vm/tests/mock_tests/mock_test_mmtk_julia_pr_143.rs b/src/vm/tests/mock_tests/mock_test_mmtk_julia_pr_143.rs new file mode 100644 index 0000000000..fc9373bfe2 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_mmtk_julia_pr_143.rs @@ -0,0 +1,29 @@ +// GITHUB-CI: MMTK_PLAN=Immix +// GITHUB-CI: FEATURES=vm_space + +// This test only runs for 64bits. +// It tries to set a certain range as VM space. The range does not conflict with the virtual +// address range we use for spaces. We cannot use SFTSpaceMap as the SFT map implementation. + +use lazy_static::lazy_static; + +use super::mock_test_prelude::*; +use crate::memory_manager; +use crate::util::Address; + +#[test] +fn test_set_vm_space() { + with_mockvm( + default_setup, + || { + let mut fixture = MMTKFixture::create(); + + let start_addr = unsafe { Address::from_usize(0x78624DC00000) }; + let end_addr = unsafe { Address::from_usize(0x786258000000) }; + let size = end_addr - start_addr; + + memory_manager::set_vm_space::(fixture.get_mmtk_mut(), start_addr, size); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mod.rs b/src/vm/tests/mock_tests/mod.rs index 6255b1fb8a..51a085cec8 100644 --- a/src/vm/tests/mock_tests/mod.rs +++ b/src/vm/tests/mock_tests/mod.rs @@ -39,6 +39,8 @@ mod mock_test_issue867_allocate_unrealistically_large_object; #[cfg(feature = "malloc_counted_size")] mod mock_test_malloc_counted; mod mock_test_malloc_ms; +#[cfg(all(target_pointer_width = "64", feature = "vm_space"))] +mod mock_test_mmtk_julia_pr_143; #[cfg(feature = "nogc_lock_free")] mod mock_test_nogc_lock_free; #[cfg(target_pointer_width = "64")]