Skip to content

Commit

Permalink
mm: thp: pass correct vm_flags to hugepage_vma_check()
Browse files Browse the repository at this point in the history
khugepaged_enter_vma_merge() passes a stale vma->vm_flags to
hugepage_vma_check().  The argument vm_flags contains the latest value.
Therefore, it is necessary to pass this vm_flags into
hugepage_vma_check().

With this bug, madvise(MADV_HUGEPAGE) for mmap files in shmem fails to
put memory in huge pages.  Here is an example of failed madvise():

   /* mount /dev/shm with huge=advise:
    *     mount -o remount,huge=advise /dev/shm */
   /* create file /dev/shm/huge */
   #define HUGE_FILE "/dev/shm/huge"

   fd = open(HUGE_FILE, O_RDONLY);
   ptr = mmap(NULL, FILE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0);
   ret = madvise(ptr, FILE_SIZE, MADV_HUGEPAGE);

madvise() will return 0, but this memory region is never put in huge
page (check from /proc/meminfo: ShmemHugePages).

Link: http://lkml.kernel.org/r/[email protected]
Fixes: 02b75dc ("mm: thp: register mm for khugepaged when merging vma for shmem")
Signed-off-by: Song Liu <[email protected]>
Reviewed-by: Rik van Riel <[email protected]>
Reviewed-by: Yang Shi <[email protected]>
Cc: Kirill A. Shutemov <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
liu-song-6 authored and torvalds committed Aug 17, 2018
1 parent a718e28 commit 50f8b92
Showing 1 changed file with 8 additions and 7 deletions.
15 changes: 8 additions & 7 deletions mm/khugepaged.c
Original file line number Diff line number Diff line change
Expand Up @@ -397,10 +397,11 @@ static inline int khugepaged_test_exit(struct mm_struct *mm)
return atomic_read(&mm->mm_users) == 0;
}

static bool hugepage_vma_check(struct vm_area_struct *vma)
static bool hugepage_vma_check(struct vm_area_struct *vma,
unsigned long vm_flags)
{
if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
(vma->vm_flags & VM_NOHUGEPAGE) ||
if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
(vm_flags & VM_NOHUGEPAGE) ||
test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
return false;
if (shmem_file(vma->vm_file)) {
Expand All @@ -413,7 +414,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
return false;
if (is_vma_temporary_stack(vma))
return false;
return !(vma->vm_flags & VM_NO_KHUGEPAGED);
return !(vm_flags & VM_NO_KHUGEPAGED);
}

int __khugepaged_enter(struct mm_struct *mm)
Expand Down Expand Up @@ -458,7 +459,7 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
* khugepaged does not yet work on non-shmem files or special
* mappings. And file-private shmem THP is not supported.
*/
if (!hugepage_vma_check(vma))
if (!hugepage_vma_check(vma, vm_flags))
return 0;

hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
Expand Down Expand Up @@ -861,7 +862,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
hend = vma->vm_end & HPAGE_PMD_MASK;
if (address < hstart || address + HPAGE_PMD_SIZE > hend)
return SCAN_ADDRESS_RANGE;
if (!hugepage_vma_check(vma))
if (!hugepage_vma_check(vma, vma->vm_flags))
return SCAN_VMA_CHECK;
return 0;
}
Expand Down Expand Up @@ -1695,7 +1696,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
progress++;
break;
}
if (!hugepage_vma_check(vma)) {
if (!hugepage_vma_check(vma, vma->vm_flags)) {
skip:
progress++;
continue;
Expand Down

0 comments on commit 50f8b92

Please sign in to comment.