@@ -39,7 +39,9 @@ pub struct JuliaGCTrigger {
3939 interval_all_threads : AtomicUsize ,
4040 actual_allocd : AtomicUsize ,
4141 prev_sweep_full : AtomicBool ,
42-
42+ /// The number of pending allocation pages. The allocation requests for them have failed, and a GC is triggered.
43+ /// We will need to take them into consideration so that the new heap size can accomodate those allocations.
44+ pending_pages : AtomicUsize ,
4345 last_recorded_reserved_pages : AtomicUsize ,
4446}
4547
@@ -79,14 +81,18 @@ impl JuliaGCTrigger {
7981 actual_allocd : AtomicUsize :: new ( 0 ) ,
8082 prev_sweep_full : AtomicBool :: new ( true ) ,
8183 last_recorded_reserved_pages : AtomicUsize :: new ( 0 ) ,
84+ pending_pages : AtomicUsize :: new ( 0 ) ,
8285 }
8386 }
8487}
8588
8689impl GCTriggerPolicy < JuliaVM > for JuliaGCTrigger {
8790 fn on_gc_start ( & self , mmtk : & ' static MMTK < JuliaVM > ) {
8891 let reserved_pages_in_last_gc = self . last_recorded_reserved_pages . load ( Ordering :: Relaxed ) ;
89- let reserved_pages_now = mmtk. get_plan ( ) . get_reserved_pages ( ) ;
92+ // reserved pages now should include pending allocations
93+ let reserved_pages_now =
94+ mmtk. get_plan ( ) . get_reserved_pages ( ) + self . pending_pages . load ( Ordering :: SeqCst ) ;
95+
9096 self . last_recorded_reserved_pages
9197 . store ( reserved_pages_now, Ordering :: Relaxed ) ;
9298 self . actual_allocd . store (
@@ -110,7 +116,8 @@ impl GCTriggerPolicy<JuliaVM> for JuliaGCTrigger {
110116 let n_mutators = crate :: active_plan:: VMActivePlan :: number_of_mutators ( ) ;
111117
112118 let reserved_pages_before_gc = self . last_recorded_reserved_pages . load ( Ordering :: Relaxed ) ;
113- let reserved_pages_now = mmtk. get_plan ( ) . get_reserved_pages ( ) ;
119+ let reserved_pages_now =
120+ mmtk. get_plan ( ) . get_reserved_pages ( ) + self . pending_pages . load ( Ordering :: SeqCst ) ;
114121 let freed = conversions:: pages_to_bytes (
115122 reserved_pages_before_gc. saturating_sub ( reserved_pages_now) ,
116123 ) ;
@@ -122,7 +129,7 @@ impl GCTriggerPolicy<JuliaVM> for JuliaGCTrigger {
122129 // ignore large frontier (large frontier means the bytes of pointers reachable from the remset is larger than the default collect interval)
123130 let gc_auto = !mmtk. is_user_triggered_collection ( ) ;
124131 let not_freed_enough = gc_auto
125- && ( freed as f64 ) < ( self . actual_allocd . load ( Ordering :: Relaxed ) as f64 * 0.7f64 ) ;
132+ && ( ( freed as f64 ) < ( self . actual_allocd . load ( Ordering :: Relaxed ) as f64 * 0.7f64 ) ) ;
126133 let mut sweep_full = false ;
127134 if gc_auto {
128135 if not_freed_enough {
@@ -240,6 +247,13 @@ impl GCTriggerPolicy<JuliaVM> for JuliaGCTrigger {
240247 self . interval . load ( Ordering :: Relaxed ) * n_mutators / 2 ,
241248 Ordering :: Relaxed ,
242249 ) ;
250+
251+ // Clear pending allocation pages at the end of GC, no matter we used it or not.
252+ self . pending_pages . store ( 0 , Ordering :: SeqCst ) ;
253+ }
254+
255+ fn on_pending_allocation ( & self , pages : usize ) {
256+ self . pending_pages . fetch_add ( pages, Ordering :: SeqCst ) ;
243257 }
244258
245259 /// Is a GC required now?
@@ -249,9 +263,19 @@ impl GCTriggerPolicy<JuliaVM> for JuliaGCTrigger {
249263 space : Option < SpaceStats < JuliaVM > > ,
250264 plan : & dyn Plan < VM = JuliaVM > ,
251265 ) -> bool {
266+ let reserved_pages_now = plan. get_reserved_pages ( ) ;
267+ let reserved_pages_before_gc = self . last_recorded_reserved_pages . load ( Ordering :: Relaxed ) ;
268+
252269 let allocd_so_far = conversions:: pages_to_bytes (
253- plan . get_reserved_pages ( ) - self . last_recorded_reserved_pages . load ( Ordering :: Relaxed ) ,
270+ reserved_pages_now . saturating_sub ( reserved_pages_before_gc ) ,
254271 ) ;
272+
273+ trace ! (
274+ "Reserved now = {}, last recorded reserved = {}, Allocd so far: {}. interval_all_threads = {}" ,
275+ plan. get_reserved_pages( ) , self . last_recorded_reserved_pages. load( Ordering :: Relaxed ) , allocd_so_far,
276+ self . interval_all_threads. load( Ordering :: Relaxed )
277+ ) ;
278+
255279 // Check against interval_all_threads, as we count allocation from all threads.
256280 if allocd_so_far > self . interval_all_threads . load ( Ordering :: Relaxed ) {
257281 return true ;
0 commit comments