Skip to content

Commit

Permalink
mm: stop zeroing memory during allocation in vmemmap
Browse files Browse the repository at this point in the history
vmemmap_alloc_block() will no longer zero the block, so zero memory at
its call sites for everything except struct pages.  Struct page memory
is zero'd by struct page initialization.

Replace allocators in sparse-vmemmap to use the non-zeroing version.
So, we will get the performance improvement by zeroing the memory in
parallel when struct pages are zeroed.

Add struct page zeroing as a part of initialization of other fields in
__init_single_page().

This single thread performance collected on: Intel(R) Xeon(R) CPU E7-8895
v3 @ 2.60GHz with 1T of memory (268400646 pages in 8 nodes):

                         BASE            FIX
sparse_init     11.244671836s   0.007199623s
zone_sizes_init  4.879775891s   8.355182299s
                  --------------------------
Total           16.124447727s   8.362381922s

sparse_init is where memory for struct pages is zeroed, and the zeroing
part is moved later in this patch into __init_single_page(), which is
called from zone_sizes_init().

[[email protected]: make vmemmap_alloc_block_zero() private to sparse-vmemmap.c]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Pavel Tatashin <[email protected]>
Reviewed-by: Steven Sistare <[email protected]>
Reviewed-by: Daniel Jordan <[email protected]>
Reviewed-by: Bob Picco <[email protected]>
Tested-by: Bob Picco <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Andrey Ryabinin <[email protected]>
Cc: Ard Biesheuvel <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: Christian Borntraeger <[email protected]>
Cc: David S. Miller <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Heiko Carstens <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Sam Ravnborg <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Will Deacon <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Pavel Tatashin authored and torvalds committed Nov 16, 2017
1 parent e17d802 commit f7f9910
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 11 deletions.
1 change: 1 addition & 0 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1168,6 +1168,7 @@ static void free_one_page(struct zone *zone,
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
unsigned long zone, int nid)
{
mm_zero_struct_page(page);
set_page_links(page, zone, nid, pfn);
init_page_count(page);
page_mapcount_reset(page);
Expand Down
26 changes: 18 additions & 8 deletions mm/sparse-vmemmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ static void * __ref __earlyonly_bootmem_alloc(int node,
unsigned long align,
unsigned long goal)
{
return memblock_virt_alloc_try_nid(size, align, goal,
return memblock_virt_alloc_try_nid_raw(size, align, goal,
BOOTMEM_ALLOC_ACCESSIBLE, node);
}

Expand All @@ -55,9 +55,8 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
if (slab_is_available()) {
struct page *page;

page = alloc_pages_node(node,
GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL,
get_order(size));
page = alloc_pages_node(node, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
get_order(size));
if (page)
return page_address(page);
return NULL;
Expand Down Expand Up @@ -180,11 +179,22 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
return pte;
}

static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
{
void *p = vmemmap_alloc_block(size, node);

if (!p)
return NULL;
memset(p, 0, size);

return p;
}

pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
{
pmd_t *pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
if (!p)
return NULL;
pmd_populate_kernel(&init_mm, pmd, p);
Expand All @@ -196,7 +206,7 @@ pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
{
pud_t *pud = pud_offset(p4d, addr);
if (pud_none(*pud)) {
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
if (!p)
return NULL;
pud_populate(&init_mm, pud, p);
Expand All @@ -208,7 +218,7 @@ p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
{
p4d_t *p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d)) {
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
if (!p)
return NULL;
p4d_populate(&init_mm, p4d, p);
Expand All @@ -220,7 +230,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
{
pgd_t *pgd = pgd_offset_k(addr);
if (pgd_none(*pgd)) {
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
if (!p)
return NULL;
pgd_populate(&init_mm, pgd, p);
Expand Down
6 changes: 3 additions & 3 deletions mm/sparse.c
Original file line number Diff line number Diff line change
Expand Up @@ -453,9 +453,9 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
}

size = PAGE_ALIGN(size);
map = memblock_virt_alloc_try_nid(size * map_count,
PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
map = memblock_virt_alloc_try_nid_raw(size * map_count,
PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
if (map) {
for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
if (!present_section_nr(pnum))
Expand Down

0 comments on commit f7f9910

Please sign in to comment.