Skip to content
This repository has been archived by the owner on Jan 12, 2021. It is now read-only.

Commit

Permalink
hugetlb: fix copy_hugetlb_page_range() to handle migration/hwpoisoned…
Browse files Browse the repository at this point in the history
… entry

There's a race between fork() and hugepage migration, as a result we try
to "dereference" a swap entry as a normal pte, causing kernel panic.  The
cause of the problem is that copy_hugetlb_page_range() can't handle "swap
entry" family (migration entry and hwpoisoned entry,) so let's fix it.

Signed-off-by: Naoya Horiguchi <[email protected]>
Acked-by: Hugh Dickins <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: <[email protected]>	[2.6.37+]
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
Naoya Horiguchi authored and sfrothwell committed Jun 20, 2014
1 parent bac2c3e commit f1c5b04
Showing 1 changed file with 42 additions and 28 deletions.
70 changes: 42 additions & 28 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -2520,6 +2520,31 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
update_mmu_cache(vma, address, ptep);
}

static int is_hugetlb_entry_migration(pte_t pte)
{
swp_entry_t swp;

if (huge_pte_none(pte) || pte_present(pte))
return 0;
swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_migration_entry(swp))
return 1;
else
return 0;
}

static int is_hugetlb_entry_hwpoisoned(pte_t pte)
{
swp_entry_t swp;

if (huge_pte_none(pte) || pte_present(pte))
return 0;
swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_hwpoison_entry(swp))
return 1;
else
return 0;
}

int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
Expand Down Expand Up @@ -2559,10 +2584,25 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
dst_ptl = huge_pte_lock(h, dst, dst_pte);
src_ptl = huge_pte_lockptr(h, src, src_pte);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
if (!huge_pte_none(huge_ptep_get(src_pte))) {
entry = huge_ptep_get(src_pte);
if (huge_pte_none(entry)) { /* skip none entry */
;
} else if (unlikely(is_hugetlb_entry_migration(entry) ||
is_hugetlb_entry_hwpoisoned(entry))) {
swp_entry_t swp_entry = pte_to_swp_entry(entry);
if (is_write_migration_entry(swp_entry) && cow) {
/*
* COW mappings require pages in both
* parent and child to be set to read.
*/
make_migration_entry_read(&swp_entry);
entry = swp_entry_to_pte(swp_entry);
set_huge_pte_at(src, addr, src_pte, entry);
}
set_huge_pte_at(dst, addr, dst_pte, entry);
} else {
if (cow)
huge_ptep_set_wrprotect(src, addr, src_pte);
entry = huge_ptep_get(src_pte);
ptepage = pte_page(entry);
get_page(ptepage);
page_dup_rmap(ptepage);
Expand All @@ -2578,32 +2618,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
return ret;
}

static int is_hugetlb_entry_migration(pte_t pte)
{
swp_entry_t swp;

if (huge_pte_none(pte) || pte_present(pte))
return 0;
swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_migration_entry(swp))
return 1;
else
return 0;
}

static int is_hugetlb_entry_hwpoisoned(pte_t pte)
{
swp_entry_t swp;

if (huge_pte_none(pte) || pte_present(pte))
return 0;
swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_hwpoison_entry(swp))
return 1;
else
return 0;
}

void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct page *ref_page)
Expand Down

0 comments on commit f1c5b04

Please sign in to comment.