|
| 1 | +use super::pageresource::{PRAllocFail, PRAllocResult}; |
| 2 | +use super::PageResource; |
| 3 | +use crate::util::address::Address; |
| 4 | +use crate::util::constants::*; |
| 5 | +use crate::util::heap::layout::heap_layout::VMMap; |
| 6 | +use crate::util::heap::layout::vm_layout_constants::*; |
| 7 | +use crate::util::heap::pageresource::CommonPageResource; |
| 8 | +use crate::util::heap::space_descriptor::SpaceDescriptor; |
| 9 | +use crate::util::opaque_pointer::*; |
| 10 | +use crate::vm::*; |
| 11 | +use atomic::{Atomic, Ordering}; |
| 12 | +use crossbeam_queue::ArrayQueue; |
| 13 | +use crossbeam_queue::SegQueue; |
| 14 | +use std::marker::PhantomData; |
| 15 | +use std::sync::Mutex; |
| 16 | + |
| 17 | +const UNINITIALIZED_WATER_MARK: i32 = -1; |
| 18 | + |
| 19 | +pub struct BlockPageResource<VM: VMBinding> { |
| 20 | + common: CommonPageResource, |
| 21 | + log_pages_in_block: usize, |
| 22 | + sync: Mutex<()>, |
| 23 | + head_locally_freed_blocks: spin::RwLock<Option<ArrayQueue<Address>>>, |
| 24 | + locally_freed_blocks: SegQueue<ArrayQueue<Address>>, |
| 25 | + highwater: Atomic<Address>, |
| 26 | + limit: Address, |
| 27 | + _p: PhantomData<VM>, |
| 28 | +} |
| 29 | + |
| 30 | +impl<VM: VMBinding> PageResource<VM> for BlockPageResource<VM> { |
| 31 | + #[inline(always)] |
| 32 | + fn common(&self) -> &CommonPageResource { |
| 33 | + &self.common |
| 34 | + } |
| 35 | + |
| 36 | + #[inline(always)] |
| 37 | + fn common_mut(&mut self) -> &mut CommonPageResource { |
| 38 | + &mut self.common |
| 39 | + } |
| 40 | + |
| 41 | + fn alloc_pages( |
| 42 | + &self, |
| 43 | + space_descriptor: SpaceDescriptor, |
| 44 | + reserved_pages: usize, |
| 45 | + required_pages: usize, |
| 46 | + tls: VMThread, |
| 47 | + ) -> Result<PRAllocResult, PRAllocFail> { |
| 48 | + // let _sync = self.sync.lock().unwrap(); |
| 49 | + unsafe { self.alloc_pages_no_lock(space_descriptor, reserved_pages, required_pages, tls) } |
| 50 | + } |
| 51 | + |
| 52 | + fn adjust_for_metadata(&self, pages: usize) -> usize { |
| 53 | + pages |
| 54 | + } |
| 55 | +} |
| 56 | + |
| 57 | +impl<VM: VMBinding> BlockPageResource<VM> { |
| 58 | + pub fn new_contiguous( |
| 59 | + log_pages_in_block: usize, |
| 60 | + start: Address, |
| 61 | + bytes: usize, |
| 62 | + vm_map: &'static VMMap, |
| 63 | + ) -> Self { |
| 64 | + let growable = cfg!(target_pointer_width = "64"); |
| 65 | + Self { |
| 66 | + log_pages_in_block, |
| 67 | + common: CommonPageResource::new(true, growable, vm_map), |
| 68 | + sync: Mutex::new(()), |
| 69 | + head_locally_freed_blocks: Default::default(), |
| 70 | + locally_freed_blocks: Default::default(), |
| 71 | + highwater: Atomic::new(start), |
| 72 | + limit: (start + bytes).align_up(BYTES_IN_CHUNK), |
| 73 | + _p: PhantomData, |
| 74 | + } |
| 75 | + } |
| 76 | + |
| 77 | + /// The caller needs to ensure this is called by only one thread. |
| 78 | + pub unsafe fn alloc_pages_no_lock( |
| 79 | + &self, |
| 80 | + _space_descriptor: SpaceDescriptor, |
| 81 | + reserved_pages: usize, |
| 82 | + required_pages: usize, |
| 83 | + tls: VMThread, |
| 84 | + ) -> Result<PRAllocResult, PRAllocFail> { |
| 85 | + debug_assert_eq!(reserved_pages, required_pages); |
| 86 | + debug_assert_eq!(reserved_pages, 1 << self.log_pages_in_block); |
| 87 | + // Fast allocate from the locally-released-blocks list |
| 88 | + let head_locally_freed_blocks = self.head_locally_freed_blocks.upgradeable_read(); |
| 89 | + if let Some(block) = head_locally_freed_blocks |
| 90 | + .as_ref() |
| 91 | + .map(|q| q.pop()) |
| 92 | + .flatten() |
| 93 | + { |
| 94 | + self.commit_pages(reserved_pages, required_pages, tls); |
| 95 | + return Result::Ok(PRAllocResult { |
| 96 | + start: block, |
| 97 | + pages: required_pages, |
| 98 | + new_chunk: false, |
| 99 | + }); |
| 100 | + } else if let Some(blocks) = self.locally_freed_blocks.pop() { |
| 101 | + let block = blocks.pop().unwrap(); |
| 102 | + let mut head_locally_freed_blocks = head_locally_freed_blocks.upgrade(); |
| 103 | + *head_locally_freed_blocks = Some(blocks); |
| 104 | + self.commit_pages(reserved_pages, required_pages, tls); |
| 105 | + return Result::Ok(PRAllocResult { |
| 106 | + start: block, |
| 107 | + pages: required_pages, |
| 108 | + new_chunk: false, |
| 109 | + }); |
| 110 | + } |
| 111 | + // Grow space |
| 112 | + let start: Address = |
| 113 | + match self |
| 114 | + .highwater |
| 115 | + .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| { |
| 116 | + if x >= self.limit { |
| 117 | + None |
| 118 | + } else { |
| 119 | + Some(x + (1usize << (self.log_pages_in_block + LOG_BYTES_IN_PAGE as usize))) |
| 120 | + } |
| 121 | + }) { |
| 122 | + Ok(a) => a, |
| 123 | + _ => return Result::Err(PRAllocFail), |
| 124 | + }; |
| 125 | + let new_chunk = start.is_aligned_to(BYTES_IN_CHUNK); |
| 126 | + |
| 127 | + self.commit_pages(reserved_pages, required_pages, tls); |
| 128 | + Result::Ok(PRAllocResult { |
| 129 | + start: start, |
| 130 | + pages: required_pages, |
| 131 | + new_chunk: new_chunk, |
| 132 | + }) |
| 133 | + } |
| 134 | + |
| 135 | + pub fn release_bulk(&self, queue: ArrayQueue<Address>) { |
| 136 | + if queue.is_empty() { |
| 137 | + return; |
| 138 | + } |
| 139 | + let blocks = queue.len(); |
| 140 | + let pages = blocks << self.log_pages_in_block; |
| 141 | + self.common.accounting.release(pages as _); |
| 142 | + self.locally_freed_blocks.push(queue); |
| 143 | + } |
| 144 | +} |
0 commit comments