11use std:: alloc:: { self , Layout } ;
22use std:: sync;
33
4- use crate :: helpers:: ToU64 ;
5-
64static ALLOCATOR : sync:: Mutex < MachineAlloc > = sync:: Mutex :: new ( MachineAlloc :: empty ( ) ) ;
75
86/// A distinct allocator for interpreter memory contents, allowing us to manage its
97/// memory separately from that of Miri itself. This is very useful for native-lib mode.
108#[ derive( Debug ) ]
119pub struct MachineAlloc {
10+ /// Pointers to page-aligned memory that has been claimed by the allocator.
11+ /// Every pointer here must point to a page-sized allocation claimed via
12+ /// the global allocator.
1213 pages : Vec < * mut u8 > ,
14+ /// Pointers to multi-page-sized allocations. These must also be page-aligned,
15+ /// with a size of `page_size * count` (where `count` is the second element
16+ /// of the vector).
1317 huge_allocs : Vec < ( * mut u8 , usize ) > ,
18+ /// Metadata about which bytes have been allocated on each page. The length
19+ /// of this vector must be the same as that of `pages`, and the length of the
20+ /// boxed slice must be exactly `page_size / 8`.
21+ ///
22+ /// Conceptually, each bit of the `u8` represents the allocation status of one
23+ /// byte on the corresponding element of `pages`; in practice, we only allocate
24+ /// in 8-byte chunks currently, so the `u8`s are only ever 0 (fully free) or
25+ /// 255 (fully allocated).
1426 allocated : Vec < Box < [ u8 ] > > ,
27+ /// The host (not emulated) page size.
1528 page_size : usize ,
29+ /// If false, calls to `alloc()` and `alloc_zeroed()` just wrap the corresponding
30+ /// function in the global allocator. Otherwise, uses the pages tracked
31+ /// internally.
1632 enabled : bool ,
1733}
1834
1935// SAFETY: We only point to heap-allocated data
2036unsafe impl Send for MachineAlloc { }
2137
2238impl MachineAlloc {
23- // Allocation-related methods
24-
25- /// Initializes the allocator with placeholder 4k pages .
39+ /// Initializes the allocator. `page_size` is set to 4k as a placeholder to
40+ /// allow this function to be `const`; it is updated to its real value when
41+ /// `enable()` is called .
2642 const fn empty ( ) -> Self {
2743 Self {
2844 pages : Vec :: new ( ) ,
@@ -33,62 +49,70 @@ impl MachineAlloc {
3349 }
3450 }
3551
36- /// SAFETY: There must be no existing `MiriAllocBytes`
37- pub unsafe fn enable ( ) {
52+ /// Enables the allocator. From this point onwards, calls to `alloc()` and
53+ /// `alloc_zeroed()` will return `(ptr, false)`.
54+ pub fn enable ( ) {
3855 let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
3956 alloc. enabled = true ;
4057 // This needs to specifically be the system pagesize!
4158 alloc. page_size = unsafe {
42- let ret = libc:: sysconf ( libc:: _SC_PAGE_SIZE) ;
43- if ret > 0 {
44- ret. try_into ( ) . unwrap ( )
45- } else {
46- 4096 // fallback
47- }
59+ // If sysconf errors, better to just panic
60+ libc:: sysconf ( libc:: _SC_PAGE_SIZE) . try_into ( ) . unwrap ( )
4861 }
4962 }
5063
51- /// Returns a vector of page addresses managed by the allocator.
52- #[ expect( dead_code) ]
53- pub fn pages ( ) -> Vec < u64 > {
54- let alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
55- alloc. pages . clone ( ) . into_iter ( ) . map ( |p| p. addr ( ) . to_u64 ( ) ) . collect ( )
56- }
57-
64+ /// Expands the available memory pool by adding one page.
5865 fn add_page ( & mut self ) {
5966 let page_layout =
6067 unsafe { Layout :: from_size_align_unchecked ( self . page_size , self . page_size ) } ;
6168 let page_ptr = unsafe { alloc:: alloc ( page_layout) } ;
62- if page_ptr. is_null ( ) {
63- panic ! ( "aligned_alloc failed!!!" )
64- }
6569 self . allocated . push ( vec ! [ 0u8 ; self . page_size / 8 ] . into_boxed_slice ( ) ) ;
6670 self . pages . push ( page_ptr) ;
6771 }
6872
73+ /// For simplicity, we allocate in multiples of 8 bytes with at least that
74+ /// alignment.
6975 #[ inline]
7076 fn normalized_layout ( layout : Layout ) -> ( usize , usize ) {
7177 let align = if layout. align ( ) < 8 { 8 } else { layout. align ( ) } ;
7278 let size = layout. size ( ) . next_multiple_of ( 8 ) ;
7379 ( size, align)
7480 }
7581
82+ /// If a requested allocation is greater than one page, we simply allocate
83+ /// a fixed number of pages for it.
7684 #[ inline]
7785 fn huge_normalized_layout ( & self , layout : Layout ) -> ( usize , usize ) {
7886 let size = layout. size ( ) . next_multiple_of ( self . page_size ) ;
7987 let align = std:: cmp:: max ( layout. align ( ) , self . page_size ) ;
8088 ( size, align)
8189 }
8290
91+ /// Allocates memory as described in `Layout`. If `MachineAlloc::enable()`
92+ /// has *not* been called yet, this is just a wrapper for `(alloc::alloc(),
93+ /// true)`. Otherwise, it will allocate from its own memory pool and
94+ /// return `(ptr, false)`. The latter field is meant to correspond with the
95+ /// field `alloc_is_global` for `MiriAllocBytes`.
96+ ///
8397 /// SAFETY: See alloc::alloc()
8498 #[ inline]
85- pub unsafe fn alloc ( layout : Layout ) -> * mut u8 {
99+ pub unsafe fn alloc ( layout : Layout ) -> ( * mut u8 , bool ) {
86100 let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
87- unsafe { if alloc. enabled { alloc. alloc_inner ( layout) } else { alloc:: alloc ( layout) } }
101+ unsafe {
102+ if alloc. enabled {
103+ ( alloc. alloc_inner ( layout) , false )
104+ } else {
105+ ( alloc:: alloc ( layout) , true )
106+ }
107+ }
88108 }
89109
110+ /// Same as `alloc()`, but zeroes out data before allocating. Instead
111+ /// wraps `alloc::alloc_zeroed()` if `MachineAlloc::enable()` has not been
112+ /// called yet.
113+ ///
90114 /// SAFETY: See alloc::alloc_zeroed()
91- pub unsafe fn alloc_zeroed ( layout : Layout ) -> * mut u8 {
115+ pub unsafe fn alloc_zeroed ( layout : Layout ) -> ( * mut u8 , bool ) {
92116 let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
93117 if alloc. enabled {
94118 let ptr = unsafe { alloc. alloc_inner ( layout) } ;
@@ -97,13 +121,14 @@ impl MachineAlloc {
97121 ptr. write_bytes ( 0 , layout. size ( ) ) ;
98122 }
99123 }
100- ptr
124+ ( ptr, false )
101125 } else {
102- unsafe { alloc:: alloc_zeroed ( layout) }
126+ unsafe { ( alloc:: alloc_zeroed ( layout) , true ) }
103127 }
104128 }
105129
106- /// SAFETY: See alloc::alloc()
130+ /// SAFETY: The allocator must have been `enable()`d already and
131+ /// the `layout` must be valid.
107132 unsafe fn alloc_inner ( & mut self , layout : Layout ) -> * mut u8 {
108133 let ( size, align) = MachineAlloc :: normalized_layout ( layout) ;
109134
@@ -136,7 +161,8 @@ impl MachineAlloc {
136161 }
137162 }
138163
139- /// SAFETY: See alloc::alloc()
164+ /// SAFETY: Same as `alloc_inner()` with the added requirement that `layout`
165+ /// must ask for a size larger than the host pagesize.
140166 unsafe fn alloc_multi_page ( & mut self , layout : Layout ) -> * mut u8 {
141167 let ( size, align) = self . huge_normalized_layout ( layout) ;
142168
@@ -146,38 +172,36 @@ impl MachineAlloc {
146172 ret
147173 }
148174
149- /// Safety: see alloc::dealloc()
175+ /// Deallocates a pointer from the machine allocator. While not unsound,
176+ /// attempting to deallocate a pointer if `MachineAlloc` has not been enabled
177+ /// will likely result in a panic.
178+ ///
179+ /// SAFETY: This pointer must have been allocated with `MachineAlloc::alloc()`
180+ /// (or `alloc_zeroed()`) which must have returned `(ptr, false)` specifically!
181+ /// If it returned `(ptr, true)`, then deallocate it with `alloc::dealloc()` instead.
150182 pub unsafe fn dealloc ( ptr : * mut u8 , layout : Layout ) {
151- let mut alloc = ALLOCATOR . lock ( ) . unwrap ( ) ;
152- unsafe {
153- if alloc. enabled {
154- alloc. dealloc_inner ( ptr, layout) ;
155- } else {
156- alloc:: dealloc ( ptr, layout) ;
157- }
158- }
159- }
183+ let mut alloc_guard = ALLOCATOR . lock ( ) . unwrap ( ) ;
184+ // Doing it this way lets us grab 2 mutable references to different fields at once
185+ let alloc: & mut MachineAlloc = & mut alloc_guard;
160186
161- /// SAFETY: See alloc::dealloc()
162- unsafe fn dealloc_inner ( & mut self , ptr : * mut u8 , layout : Layout ) {
163187 let ( size, align) = MachineAlloc :: normalized_layout ( layout) ;
164188
165189 if size == 0 || ptr. is_null ( ) {
166190 return ;
167191 }
168192
169- let ptr_idx = ptr. addr ( ) % self . page_size ;
193+ let ptr_idx = ptr. addr ( ) % alloc . page_size ;
170194 let page_addr = ptr. addr ( ) - ptr_idx;
171195
172- if align > self . page_size || size > self . page_size {
196+ if align > alloc . page_size || size > alloc . page_size {
173197 unsafe {
174- self . dealloc_multi_page ( ptr, layout) ;
198+ alloc . dealloc_multi_page ( ptr, layout) ;
175199 }
176200 } else {
177- let pinfo = std:: iter:: zip ( & mut self . pages , & mut self . allocated )
201+ let pinfo = std:: iter:: zip ( & mut alloc . pages , & mut alloc . allocated )
178202 . find ( |( page, _) | page. addr ( ) == page_addr) ;
179203 let Some ( ( _, pinfo) ) = pinfo else {
180- panic ! ( "Freeing in an unallocated page: {ptr:?}\n Holding pages {:?}" , self . pages)
204+ panic ! ( "Freeing in an unallocated page: {ptr:?}\n Holding pages {:?}" , alloc . pages)
181205 } ;
182206 let ptr_idx_pinfo = ptr_idx / 8 ;
183207 let size_pinfo = size / 8 ;
@@ -187,22 +211,23 @@ impl MachineAlloc {
187211
188212 let mut free = vec ! [ ] ;
189213 let page_layout =
190- unsafe { Layout :: from_size_align_unchecked ( self . page_size , self . page_size ) } ;
191- for ( idx, pinfo) in self . allocated . iter ( ) . enumerate ( ) {
214+ unsafe { Layout :: from_size_align_unchecked ( alloc . page_size , alloc . page_size ) } ;
215+ for ( idx, pinfo) in alloc . allocated . iter ( ) . enumerate ( ) {
192216 if pinfo. iter ( ) . all ( |p| * p == 0 ) {
193217 free. push ( idx) ;
194218 }
195219 }
196220 free. reverse ( ) ;
197221 for idx in free {
198- let _ = self . allocated . remove ( idx) ;
222+ let _ = alloc . allocated . remove ( idx) ;
199223 unsafe {
200- alloc:: dealloc ( self . pages . remove ( idx) , page_layout) ;
224+ alloc:: dealloc ( alloc . pages . remove ( idx) , page_layout) ;
201225 }
202226 }
203227 }
204228
205- /// SAFETY: See alloc::dealloc()
229+ /// SAFETY: Same as `dealloc()` with the added requirement that `layout`
230+ /// must ask for a size larger than the host pagesize.
206231 unsafe fn dealloc_multi_page ( & mut self , ptr : * mut u8 , layout : Layout ) {
207232 let ( idx, _) = self
208233 . huge_allocs
0 commit comments