mm/memory_hotplug.c: cleanup __add_pages()
authorDavid Hildenbrand <david@redhat.com>
Tue, 7 Apr 2020 03:06:56 +0000 (20:06 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 7 Apr 2020 17:43:40 +0000 (10:43 -0700)
Let's drop the basically unused section stuff and simplify.  The logic now
matches the logic in __remove_pages().

Signed-off-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Baoquan He <bhe@redhat.com>
Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
Cc: Segher Boessenkool <segher@kernel.crashing.org>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Link: http://lkml.kernel.org/r/20200228095819.10750-3-david@redhat.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memory_hotplug.c

index df748b7834188dd6e1f6631360553efd93224e2d..04cb87ec39d0f3cfb90fdf8987061a6e4f31c796 100644 (file)
@@ -307,8 +307,9 @@ static int check_hotplug_memory_addressable(unsigned long pfn,
 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
                struct mhp_restrictions *restrictions)
 {
+       const unsigned long end_pfn = pfn + nr_pages;
+       unsigned long cur_nr_pages;
        int err;
-       unsigned long nr, start_sec, end_sec;
        struct vmem_altmap *altmap = restrictions->altmap;
 
        err = check_hotplug_memory_addressable(pfn, nr_pages);
@@ -331,18 +332,13 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
        if (err)
                return err;
 
-       start_sec = pfn_to_section_nr(pfn);
-       end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
-       for (nr = start_sec; nr <= end_sec; nr++) {
-               unsigned long pfns;
-
-               pfns = min(nr_pages, PAGES_PER_SECTION
-                               - (pfn & ~PAGE_SECTION_MASK));
-               err = sparse_add_section(nid, pfn, pfns, altmap);
+       for (; pfn < end_pfn; pfn += cur_nr_pages) {
+               /* Select all remaining pages up to the next section boundary */
+               cur_nr_pages = min(end_pfn - pfn,
+                                  SECTION_ALIGN_UP(pfn + 1) - pfn);
+               err = sparse_add_section(nid, pfn, cur_nr_pages, altmap);
                if (err)
                        break;
-               pfn += pfns;
-               nr_pages -= pfns;
                cond_resched();
        }
        vmemmap_populate_print_last();