Skip to content

Commit ec0551e

Browse files
committed
Add BlockPageResource for ImmixSpace
1 parent 339640f commit ec0551e

File tree

7 files changed

+176
-18
lines changed

7 files changed

+176
-18
lines changed

Cargo.toml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,11 +26,12 @@ hoard-sys = {version = "0.1.1", optional = true }
2626
lazy_static = "1.1"
2727
log = {version = "0.4", features = ["max_level_trace", "release_max_level_off"] }
2828
crossbeam-deque = "0.6"
29+
crossbeam-queue = "0.3.2"
2930
num_cpus = "1.8"
3031
enum-map = "0.6.2"
3132
downcast-rs = "1.1.1"
3233
atomic-traits = "0.2.0"
33-
atomic = "0.4.6"
34+
atomic = "0.5"
3435
spin = "0.5.2"
3536
env_logger = "0.8.2"
3637
pfm = {version = "0.1.0-beta.1", optional = true}

src/policy/immix/block.rs

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -235,8 +235,7 @@ impl Block {
235235
match self.get_state() {
236236
BlockState::Unallocated => false,
237237
BlockState::Unmarked => {
238-
// Release the block if it is allocated but not marked by the current GC.
239-
space.release_block(*self);
238+
// Notify the caller to release the block if it is allocated but not marked by the current GC.
240239
true
241240
}
242241
BlockState::Marked => {
@@ -265,8 +264,7 @@ impl Block {
265264
}
266265

267266
if marked_lines == 0 {
268-
// Release the block if non of its lines are marked.
269-
space.release_block(*self);
267+
// Notify the caller to release the block if non of its lines are marked.
270268
true
271269
} else {
272270
// There are some marked lines. Keep the block live.

src/policy/immix/chunk.rs

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ use crate::{
88
vm::*,
99
MMTK,
1010
};
11+
use crossbeam_queue::ArrayQueue;
1112
use spin::Mutex;
1213
use std::{iter::Step, ops::Range, sync::atomic::Ordering};
1314

@@ -61,6 +62,7 @@ impl Chunk {
6162
} else {
6263
Some(space.line_mark_state.load(Ordering::Acquire))
6364
};
65+
let dead_blocks = ArrayQueue::new(Chunk::BLOCKS);
6466
// number of allocated blocks.
6567
let mut allocated_blocks = 0;
6668
// Iterate over all allocated blocks in this chunk.
@@ -71,8 +73,14 @@ impl Chunk {
7173
if !block.sweep(space, mark_histogram, line_mark_state) {
7274
// Block is live. Increment the allocated block count.
7375
allocated_blocks += 1;
76+
} else {
77+
block.deinit();
78+
dead_blocks.push(block.start()).unwrap();
7479
}
7580
}
81+
if !dead_blocks.is_empty() {
82+
space.pr.release_bulk(dead_blocks)
83+
}
7684
// Set this chunk as free if there is not live blocks.
7785
if allocated_blocks == 0 {
7886
space.chunk_map.set(*self, ChunkState::Free)

src/policy/immix/immixspace.rs

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ use crate::{
2020
plan::TransitiveClosure,
2121
scheduler::{gc_work::ProcessEdgesWork, GCWork, GCWorkScheduler, GCWorker, WorkBucketStage},
2222
util::{
23-
heap::FreeListPageResource,
23+
heap::BlockPageResource,
2424
opaque_pointer::{VMThread, VMWorkerThread},
2525
},
2626
AllocationSemantics, CopyContext, MMTK,
@@ -34,7 +34,7 @@ use std::{
3434

3535
pub struct ImmixSpace<VM: VMBinding> {
3636
common: CommonSpace<VM>,
37-
pr: FreeListPageResource<VM>,
37+
pub(super) pr: BlockPageResource<VM>,
3838
/// Allocation status for all chunks in immix space
3939
pub chunk_map: ChunkMap,
4040
/// Current line mark state
@@ -151,9 +151,14 @@ impl<VM: VMBinding> ImmixSpace<VM> {
151151
);
152152
ImmixSpace {
153153
pr: if common.vmrequest.is_discontiguous() {
154-
FreeListPageResource::new_discontiguous(0, vm_map)
154+
unimplemented!()
155155
} else {
156-
FreeListPageResource::new_contiguous(common.start, common.extent, 0, vm_map)
156+
BlockPageResource::new_contiguous(
157+
Block::LOG_PAGES,
158+
common.start,
159+
common.extent,
160+
vm_map,
161+
)
157162
},
158163
common,
159164
chunk_map: ChunkMap::new(),
@@ -271,12 +276,6 @@ impl<VM: VMBinding> ImmixSpace<VM> {
271276
did_defrag
272277
}
273278

274-
/// Release a block.
275-
pub fn release_block(&self, block: Block) {
276-
block.deinit();
277-
self.pr.release_pages(block.start());
278-
}
279-
280279
/// Allocate a clean block.
281280
pub fn get_clean_block(&self, tls: VMThread, copy: bool) -> Option<Block> {
282281
let block_address = self.acquire(tls, Block::PAGES);

src/util/heap/blockpageresource.rs

Lines changed: 144 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
use super::pageresource::{PRAllocFail, PRAllocResult};
2+
use super::PageResource;
3+
use crate::util::address::Address;
4+
use crate::util::constants::*;
5+
use crate::util::heap::layout::heap_layout::VMMap;
6+
use crate::util::heap::layout::vm_layout_constants::*;
7+
use crate::util::heap::pageresource::CommonPageResource;
8+
use crate::util::heap::space_descriptor::SpaceDescriptor;
9+
use crate::util::opaque_pointer::*;
10+
use crate::vm::*;
11+
use atomic::{Atomic, Ordering};
12+
use crossbeam_queue::ArrayQueue;
13+
use crossbeam_queue::SegQueue;
14+
use std::marker::PhantomData;
15+
use std::sync::Mutex;
16+
17+
const UNINITIALIZED_WATER_MARK: i32 = -1;
18+
19+
pub struct BlockPageResource<VM: VMBinding> {
20+
common: CommonPageResource,
21+
log_pages_in_block: usize,
22+
sync: Mutex<()>,
23+
head_locally_freed_blocks: spin::RwLock<Option<ArrayQueue<Address>>>,
24+
locally_freed_blocks: SegQueue<ArrayQueue<Address>>,
25+
highwater: Atomic<Address>,
26+
limit: Address,
27+
_p: PhantomData<VM>,
28+
}
29+
30+
impl<VM: VMBinding> PageResource<VM> for BlockPageResource<VM> {
31+
#[inline(always)]
32+
fn common(&self) -> &CommonPageResource {
33+
&self.common
34+
}
35+
36+
#[inline(always)]
37+
fn common_mut(&mut self) -> &mut CommonPageResource {
38+
&mut self.common
39+
}
40+
41+
fn alloc_pages(
42+
&self,
43+
space_descriptor: SpaceDescriptor,
44+
reserved_pages: usize,
45+
required_pages: usize,
46+
tls: VMThread,
47+
) -> Result<PRAllocResult, PRAllocFail> {
48+
// let _sync = self.sync.lock().unwrap();
49+
unsafe { self.alloc_pages_no_lock(space_descriptor, reserved_pages, required_pages, tls) }
50+
}
51+
52+
fn adjust_for_metadata(&self, pages: usize) -> usize {
53+
pages
54+
}
55+
}
56+
57+
impl<VM: VMBinding> BlockPageResource<VM> {
58+
pub fn new_contiguous(
59+
log_pages_in_block: usize,
60+
start: Address,
61+
bytes: usize,
62+
vm_map: &'static VMMap,
63+
) -> Self {
64+
let growable = cfg!(target_pointer_width = "64");
65+
Self {
66+
log_pages_in_block,
67+
common: CommonPageResource::new(true, growable, vm_map),
68+
sync: Mutex::new(()),
69+
head_locally_freed_blocks: Default::default(),
70+
locally_freed_blocks: Default::default(),
71+
highwater: Atomic::new(start),
72+
limit: (start + bytes).align_up(BYTES_IN_CHUNK),
73+
_p: PhantomData,
74+
}
75+
}
76+
77+
/// The caller needs to ensure this is called by only one thread.
78+
pub unsafe fn alloc_pages_no_lock(
79+
&self,
80+
_space_descriptor: SpaceDescriptor,
81+
reserved_pages: usize,
82+
required_pages: usize,
83+
tls: VMThread,
84+
) -> Result<PRAllocResult, PRAllocFail> {
85+
debug_assert_eq!(reserved_pages, required_pages);
86+
debug_assert_eq!(reserved_pages, 1 << self.log_pages_in_block);
87+
// Fast allocate from the locally-released-blocks list
88+
let head_locally_freed_blocks = self.head_locally_freed_blocks.upgradeable_read();
89+
if let Some(block) = head_locally_freed_blocks
90+
.as_ref()
91+
.map(|q| q.pop())
92+
.flatten()
93+
{
94+
self.commit_pages(reserved_pages, required_pages, tls);
95+
return Result::Ok(PRAllocResult {
96+
start: block,
97+
pages: required_pages,
98+
new_chunk: false,
99+
});
100+
} else if let Some(blocks) = self.locally_freed_blocks.pop() {
101+
let block = blocks.pop().unwrap();
102+
let mut head_locally_freed_blocks = head_locally_freed_blocks.upgrade();
103+
*head_locally_freed_blocks = Some(blocks);
104+
self.commit_pages(reserved_pages, required_pages, tls);
105+
return Result::Ok(PRAllocResult {
106+
start: block,
107+
pages: required_pages,
108+
new_chunk: false,
109+
});
110+
}
111+
// Grow space
112+
let start: Address =
113+
match self
114+
.highwater
115+
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| {
116+
if x >= self.limit {
117+
None
118+
} else {
119+
Some(x + (1usize << (self.log_pages_in_block + LOG_BYTES_IN_PAGE as usize)))
120+
}
121+
}) {
122+
Ok(a) => a,
123+
_ => return Result::Err(PRAllocFail),
124+
};
125+
let new_chunk = start.is_aligned_to(BYTES_IN_CHUNK);
126+
127+
self.commit_pages(reserved_pages, required_pages, tls);
128+
Result::Ok(PRAllocResult {
129+
start: start,
130+
pages: required_pages,
131+
new_chunk: new_chunk,
132+
})
133+
}
134+
135+
pub fn release_bulk(&self, queue: ArrayQueue<Address>) {
136+
if queue.is_empty() {
137+
return;
138+
}
139+
let blocks = queue.len();
140+
let pages = blocks << self.log_pages_in_block;
141+
self.common.accounting.release(pages as _);
142+
self.locally_freed_blocks.push(queue);
143+
}
144+
}

src/util/heap/mod.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
mod accounting;
22
#[macro_use]
33
pub mod layout;
4+
pub mod blockpageresource;
45
pub mod freelistpageresource;
56
mod heap_meta;
67
pub mod monotonepageresource;
@@ -9,6 +10,7 @@ pub mod space_descriptor;
910
mod vmrequest;
1011

1112
pub use self::accounting::PageAccounting;
13+
pub use self::blockpageresource::BlockPageResource;
1214
pub use self::freelistpageresource::FreeListPageResource;
1315
pub use self::heap_meta::HeapMeta;
1416
pub use self::monotonepageresource::MonotonePageResource;

src/util/metadata/side_metadata/helpers.rs

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -72,9 +72,15 @@ pub(crate) fn try_mmap_contiguous_metadata_space(
7272
// nearest page-aligned starting address
7373
let mmap_start = address_to_meta_address(spec, start).align_down(BYTES_IN_PAGE);
7474
// nearest page-aligned ending address
75-
let mmap_size =
76-
address_to_meta_address(spec, start + size).align_up(BYTES_IN_PAGE) - mmap_start;
77-
if mmap_size > 0 {
75+
let mmap_size = {
76+
let meta_address = address_to_meta_address(spec, start + size);
77+
if meta_address == mmap_start {
78+
BYTES_IN_PAGE
79+
} else {
80+
meta_address.align_up(BYTES_IN_PAGE) - mmap_start
81+
}
82+
};
83+
if size != 0 && mmap_size > 0 {
7884
if !no_reserve {
7985
MMAPPER.ensure_mapped(mmap_start, mmap_size >> LOG_BYTES_IN_PAGE)
8086
} else {

0 commit comments

Comments
 (0)