mm/nommu: use alloc_pages_exact() rather than its own implementation
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>
Sat, 13 Dec 2014 00:55:55 +0000 (16:55 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 13 Dec 2014 20:42:48 +0000 (12:42 -0800)
do_mmap_private() in nommu.c try to allocate physically contiguous pages
with arbitrary size in some cases and we now have good abstract function
to do exactly same thing, alloc_pages_exact().  So, change to use it.

There is no functional change.  This is the preparation step for support
page owner feature accurately.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Dave Hansen <dave@sr71.net>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Jungsoo Son <jungsoo.son@lge.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/nommu.c

index cd519e1cd8a7a2833909871f25f7792f926001ca..b51eadf6d9528fa69ea80ad6d1c994763656e8da 100644 (file)
@@ -1149,8 +1149,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
                           unsigned long len,
                           unsigned long capabilities)
 {
-       struct page *pages;
-       unsigned long total, point, n;
+       unsigned long total, point;
        void *base;
        int ret, order;
 
@@ -1182,33 +1181,23 @@ static int do_mmap_private(struct vm_area_struct *vma,
        order = get_order(len);
        kdebug("alloc order %d for %lx", order, len);
 
-       pages = alloc_pages(GFP_KERNEL, order);
-       if (!pages)
-               goto enomem;
-
        total = 1 << order;
-       atomic_long_add(total, &mmap_pages_allocated);
-
        point = len >> PAGE_SHIFT;
 
-       /* we allocated a power-of-2 sized page set, so we may want to trim off
-        * the excess */
+       /* we don't want to allocate a power-of-2 sized page set */
        if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
-               while (total > point) {
-                       order = ilog2(total - point);
-                       n = 1 << order;
-                       kdebug("shave %lu/%lu @%lu", n, total - point, total);
-                       atomic_long_sub(n, &mmap_pages_allocated);
-                       total -= n;
-                       set_page_refcounted(pages + total);
-                       __free_pages(pages + total, order);
-               }
+               total = point;
+               kdebug("try to alloc exact %lu pages", total);
+               base = alloc_pages_exact(len, GFP_KERNEL);
+       } else {
+               base = (void *)__get_free_pages(GFP_KERNEL, order);
        }
 
-       for (point = 1; point < total; point++)
-               set_page_refcounted(&pages[point]);
+       if (!base)
+               goto enomem;
+
+       atomic_long_add(total, &mmap_pages_allocated);
 
-       base = page_address(pages);
        region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
        region->vm_start = (unsigned long) base;
        region->vm_end   = region->vm_start + len;