Skip to content

Commit dbc8358

Browse files
JoonsooKimtorvalds
authored andcommitted
mm/nommu: use alloc_pages_exact() rather than its own implementation
do_mmap_private() in nommu.c try to allocate physically contiguous pages with arbitrary size in some cases and we now have good abstract function to do exactly same thing, alloc_pages_exact(). So, change to use it. There is no functional change. This is the preparation step for support page owner feature accurately. Signed-off-by: Joonsoo Kim <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Michal Nazarewicz <[email protected]> Cc: Jungsoo Son <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Joonsoo Kim <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 031bc57 commit dbc8358

File tree

1 file changed

+11
-22
lines changed

1 file changed

+11
-22
lines changed

mm/nommu.c

+11-22
Original file line numberDiff line numberDiff line change
@@ -1149,8 +1149,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
11491149
unsigned long len,
11501150
unsigned long capabilities)
11511151
{
1152-
struct page *pages;
1153-
unsigned long total, point, n;
1152+
unsigned long total, point;
11541153
void *base;
11551154
int ret, order;
11561155

@@ -1182,33 +1181,23 @@ static int do_mmap_private(struct vm_area_struct *vma,
11821181
order = get_order(len);
11831182
kdebug("alloc order %d for %lx", order, len);
11841183

1185-
pages = alloc_pages(GFP_KERNEL, order);
1186-
if (!pages)
1187-
goto enomem;
1188-
11891184
total = 1 << order;
1190-
atomic_long_add(total, &mmap_pages_allocated);
1191-
11921185
point = len >> PAGE_SHIFT;
11931186

1194-
/* we allocated a power-of-2 sized page set, so we may want to trim off
1195-
* the excess */
1187+
/* we don't want to allocate a power-of-2 sized page set */
11961188
if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
1197-
while (total > point) {
1198-
order = ilog2(total - point);
1199-
n = 1 << order;
1200-
kdebug("shave %lu/%lu @%lu", n, total - point, total);
1201-
atomic_long_sub(n, &mmap_pages_allocated);
1202-
total -= n;
1203-
set_page_refcounted(pages + total);
1204-
__free_pages(pages + total, order);
1205-
}
1189+
total = point;
1190+
kdebug("try to alloc exact %lu pages", total);
1191+
base = alloc_pages_exact(len, GFP_KERNEL);
1192+
} else {
1193+
base = (void *)__get_free_pages(GFP_KERNEL, order);
12061194
}
12071195

1208-
for (point = 1; point < total; point++)
1209-
set_page_refcounted(&pages[point]);
1196+
if (!base)
1197+
goto enomem;
1198+
1199+
atomic_long_add(total, &mmap_pages_allocated);
12101200

1211-
base = page_address(pages);
12121201
region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
12131202
region->vm_start = (unsigned long) base;
12141203
region->vm_end = region->vm_start + len;

0 commit comments

Comments
 (0)