@@ -1301,7 +1301,7 @@ static NOINLINE jl_taggedvalue_t *gc_add_page(jl_gc_pool_t *p) JL_NOTSAFEPOINT
13011301 // Do not pass in `ptls` as argument. This slows down the fast path
13021302 // in pool_alloc significantly
13031303 jl_ptls_t ptls = jl_current_task -> ptls ;
1304- jl_gc_pagemeta_t * pg = pop_lf_back (& ptls -> page_metadata_lazily_freed );
1304+ jl_gc_pagemeta_t * pg = pop_lf_back (& ptls -> page_metadata_buffered );
13051305 if (pg != NULL ) {
13061306 gc_alloc_map_set (pg -> data , GC_PAGE_ALLOCATED );
13071307 }
@@ -1410,10 +1410,10 @@ int jl_gc_classify_pools(size_t sz, int *osize)
14101410
14111411// sweep phase
14121412
1413- int64_t lazy_freed_pages = 0 ;
1413+ int64_t buffered_pages = 0 ;
14141414
14151415// Returns pointer to terminal pointer of list rooted at *pfl.
1416- static void gc_sweep_page (jl_gc_pool_t * p , jl_gc_page_stack_t * allocd , jl_gc_page_stack_t * lazily_freed ,
1416+ static void gc_sweep_page (jl_gc_pool_t * p , jl_gc_page_stack_t * allocd , jl_gc_page_stack_t * buffered ,
14171417 jl_gc_pagemeta_t * pg , int osize ) JL_NOTSAFEPOINT
14181418{
14191419 char * data = pg -> data ;
@@ -1427,7 +1427,7 @@ static void gc_sweep_page(jl_gc_pool_t *p, jl_gc_page_stack_t *allocd, jl_gc_pag
14271427 size_t nfree ;
14281428
14291429 int re_use_page = 1 ;
1430- int freed_lazily = 0 ;
1430+ int keep_as_local_buffer = 0 ;
14311431 int freedall = 1 ;
14321432 int pg_skpd = 1 ;
14331433 if (!pg -> has_marked ) {
@@ -1438,9 +1438,9 @@ static void gc_sweep_page(jl_gc_pool_t *p, jl_gc_page_stack_t *allocd, jl_gc_pag
14381438 // the eager one uses less memory.
14391439 // FIXME - need to do accounting on a per-thread basis
14401440 // on quick sweeps, keep a few pages empty but allocated for performance
1441- if (!current_sweep_full && lazy_freed_pages <= default_collect_interval / GC_PAGE_SZ ) {
1442- lazy_freed_pages ++ ;
1443- freed_lazily = 1 ;
1441+ if (!current_sweep_full && buffered_pages <= default_collect_interval / GC_PAGE_SZ ) {
1442+ buffered_pages ++ ;
1443+ keep_as_local_buffer = 1 ;
14441444 }
14451445 #endif
14461446 nfree = (GC_PAGE_SZ - GC_PAGE_OFFSET ) / osize ;
@@ -1509,26 +1509,15 @@ static void gc_sweep_page(jl_gc_pool_t *p, jl_gc_page_stack_t *allocd, jl_gc_pag
15091509 if (re_use_page ) {
15101510 push_lf_back (allocd , pg );
15111511 }
1512- else if (freed_lazily ) {
1513- gc_alloc_map_set (pg -> data , GC_PAGE_LAZILY_FREED );
1514- push_lf_back (lazily_freed , pg );
1515- jl_atomic_fetch_add_relaxed (& gc_heap_stats .heap_size , - GC_PAGE_SZ );
1516- }
15171512 else {
1513+ gc_alloc_map_set (pg -> data , GC_PAGE_LAZILY_FREED );
15181514 jl_atomic_fetch_add_relaxed (& gc_heap_stats .heap_size , - GC_PAGE_SZ );
1519- #ifdef _P64 // only enable concurrent sweeping on 64bit
1520- if (jl_n_sweepthreads == 0 ) {
1521- jl_gc_free_page (pg );
1522- push_lf_back (& global_page_pool_freed , pg );
1515+ if (keep_as_local_buffer ) {
1516+ push_lf_back (buffered , pg );
15231517 }
15241518 else {
1525- gc_alloc_map_set (pg -> data , GC_PAGE_LAZILY_FREED );
15261519 push_lf_back (& global_page_pool_lazily_freed , pg );
15271520 }
1528- #else
1529- jl_gc_free_page (pg );
1530- push_lf_back (& global_page_pool_freed , pg );
1531- #endif
15321521 }
15331522 gc_time_count_page (freedall , pg_skpd );
15341523 jl_atomic_fetch_add ((_Atomic (int64_t ) * )& gc_num .freed , (nfree - old_nfree ) * osize );
@@ -1598,7 +1587,7 @@ void gc_sweep_pool_parallel(void)
15981587 if (pg == NULL ) {
15991588 continue ;
16001589 }
1601- gc_sweep_pool_page (allocd , & ptls2 -> page_metadata_lazily_freed , pg );
1590+ gc_sweep_pool_page (allocd , & ptls2 -> page_metadata_buffered , pg );
16021591 found_pg = 1 ;
16031592 }
16041593 if (!found_pg ) {
@@ -1617,11 +1606,23 @@ void gc_sweep_wait_for_all(void)
16171606 }
16181607}
16191608
1609+ void gc_free_pages (void )
1610+ {
1611+ while (1 ) {
1612+ jl_gc_pagemeta_t * pg = pop_lf_back (& global_page_pool_lazily_freed );
1613+ if (pg == NULL ) {
1614+ break ;
1615+ }
1616+ jl_gc_free_page (pg );
1617+ push_lf_back (& global_page_pool_freed , pg );
1618+ }
1619+ }
1620+
16201621// setup the data-structures for a sweep over all memory pools
16211622static void gc_sweep_pool (void )
16221623{
16231624 gc_time_pool_start ();
1624- lazy_freed_pages = 0 ;
1625+ buffered_pages = 0 ;
16251626
16261627 // For the benefit of the analyzer, which doesn't know that gc_n_threads
16271628 // doesn't change over the course of this function
@@ -1661,10 +1662,10 @@ static void gc_sweep_pool(void)
16611662 pg -> has_young = 1 ;
16621663 }
16631664 }
1664- jl_gc_pagemeta_t * pg = jl_atomic_load_relaxed (& ptls2 -> page_metadata_lazily_freed .bottom );
1665+ jl_gc_pagemeta_t * pg = jl_atomic_load_relaxed (& ptls2 -> page_metadata_buffered .bottom );
16651666 while (pg != NULL ) {
16661667 jl_gc_pagemeta_t * pg2 = pg -> next ;
1667- lazy_freed_pages ++ ;
1668+ buffered_pages ++ ;
16681669 pg = pg2 ;
16691670 }
16701671 }
@@ -1723,6 +1724,11 @@ static void gc_sweep_pool(void)
17231724 if (jl_n_sweepthreads > 0 ) {
17241725 uv_sem_post (& gc_sweep_assists_needed );
17251726 }
1727+ else {
1728+ gc_free_pages ();
1729+ }
1730+ #else
1731+ gc_free_pages ();
17261732#endif
17271733
17281734 gc_time_pool_end (current_sweep_full );
0 commit comments