mm/sparsemem: convert kmalloc_section_memmap() to populate_section_memmap()
authorDan Williams <dan.j.williams@intel.com>
Thu, 18 Jul 2019 22:58:11 +0000 (15:58 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 19 Jul 2019 00:08:07 +0000 (17:08 -0700)
Allow sub-section sized ranges to be added to the memmap.

populate_section_memmap() takes an explict pfn range rather than
assuming a full section, and those parameters are plumbed all the way
through to vmmemap_populate().  There should be no sub-section usage in
current deployments.  New warnings are added to clarify which memmap
allocation paths are sub-section capable.

Link: http://lkml.kernel.org/r/156092352058.979959.6551283472062305149.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com>
Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> [ppc64]
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Toshi Kani <toshi.kani@hpe.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yang <richardw.yang@linux.intel.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/mm/init_64.c
include/linux/mm.h
mm/sparse-vmemmap.c
mm/sparse.c

index 5a289a2ab1086d0b95d37a69ae8b32bba5dcf85d..a6b5c653727badfd0823a6035eb5c1f9e0f5eb3c 100644 (file)
@@ -1518,7 +1518,9 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
 {
        int err;
 
-       if (boot_cpu_has(X86_FEATURE_PSE))
+       if (end - start < PAGES_PER_SECTION * sizeof(struct page))
+               err = vmemmap_populate_basepages(start, end, node);
+       else if (boot_cpu_has(X86_FEATURE_PSE))
                err = vmemmap_populate_hugepages(start, end, node, altmap);
        else if (altmap) {
                pr_err_once("%s: no cpu support for altmap allocations\n",
index 48ab7b982d828bbb5085b038d09df805ea14bdc9..0334ca97c584d8dc85e2038399bf1dfb2432c557 100644 (file)
@@ -2767,8 +2767,8 @@ static inline void print_vma_addr(char *prefix, unsigned long rip)
 #endif
 
 void *sparse_buffer_alloc(unsigned long size);
-struct page *sparse_mem_map_populate(unsigned long pnum, int nid,
-               struct vmem_altmap *altmap);
+struct page * __populate_section_memmap(unsigned long pfn,
+               unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
 p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
 pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
index 7fec057967966c27fdedf3ccffc082ce12eb7ef7..200aef686722675d73450977c6ce7f2c80210453 100644 (file)
@@ -245,19 +245,26 @@ int __meminit vmemmap_populate_basepages(unsigned long start,
        return 0;
 }
 
-struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid,
-               struct vmem_altmap *altmap)
+struct page * __meminit __populate_section_memmap(unsigned long pfn,
+               unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
 {
        unsigned long start;
        unsigned long end;
-       struct page *map;
 
-       map = pfn_to_page(pnum * PAGES_PER_SECTION);
-       start = (unsigned long)map;
-       end = (unsigned long)(map + PAGES_PER_SECTION);
+       /*
+        * The minimum granularity of memmap extensions is
+        * PAGES_PER_SUBSECTION as allocations are tracked in the
+        * 'subsection_map' bitmap of the section.
+        */
+       end = ALIGN(pfn + nr_pages, PAGES_PER_SUBSECTION);
+       pfn &= PAGE_SUBSECTION_MASK;
+       nr_pages = end - pfn;
+
+       start = (unsigned long) pfn_to_page(pfn);
+       end = start + nr_pages * sizeof(struct page);
 
        if (vmemmap_populate(start, end, nid, altmap))
                return NULL;
 
-       return map;
+       return pfn_to_page(pfn);
 }
index 26b48ee1a262574f0800e6752d1644a2e2464efa..6b01022e23a99e5d8ed9925caf9bbf5b46fadb8e 100644 (file)
@@ -439,8 +439,8 @@ static unsigned long __init section_map_size(void)
        return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
 }
 
-struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
-               struct vmem_altmap *altmap)
+struct page __init *__populate_section_memmap(unsigned long pfn,
+               unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
 {
        unsigned long size = section_map_size();
        struct page *map = sparse_buffer_alloc(size);
@@ -521,10 +521,13 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
        }
        sparse_buffer_init(map_count * section_map_size(), nid);
        for_each_present_section_nr(pnum_begin, pnum) {
+               unsigned long pfn = section_nr_to_pfn(pnum);
+
                if (pnum >= pnum_end)
                        break;
 
-               map = sparse_mem_map_populate(pnum, nid, NULL);
+               map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
+                               nid, NULL);
                if (!map) {
                        pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
                               __func__, nid);
@@ -625,17 +628,17 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
 #endif
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
-static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
-               struct vmem_altmap *altmap)
+static struct page *populate_section_memmap(unsigned long pfn,
+               unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
 {
-       /* This will make the necessary allocations eventually. */
-       return sparse_mem_map_populate(pnum, nid, altmap);
+       return __populate_section_memmap(pfn, nr_pages, nid, altmap);
 }
-static void __kfree_section_memmap(struct page *memmap,
+
+static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
                struct vmem_altmap *altmap)
 {
-       unsigned long start = (unsigned long)memmap;
-       unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
+       unsigned long start = (unsigned long) pfn_to_page(pfn);
+       unsigned long end = start + nr_pages * sizeof(struct page);
 
        vmemmap_free(start, end, altmap);
 }
@@ -647,7 +650,8 @@ static void free_map_bootmem(struct page *memmap)
        vmemmap_free(start, end, NULL);
 }
 #else
-static struct page *__kmalloc_section_memmap(void)
+struct page *populate_section_memmap(unsigned long pfn,
+               unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
 {
        struct page *page, *ret;
        unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
@@ -668,15 +672,11 @@ got_map_ptr:
        return ret;
 }
 
-static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
+static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
                struct vmem_altmap *altmap)
 {
-       return __kmalloc_section_memmap();
-}
+       struct page *memmap = pfn_to_page(pfn);
 
-static void __kfree_section_memmap(struct page *memmap,
-               struct vmem_altmap *altmap)
-{
        if (is_vmalloc_addr(memmap))
                vfree(memmap);
        else
@@ -745,12 +745,13 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn,
        if (ret < 0 && ret != -EEXIST)
                return ret;
        ret = 0;
-       memmap = kmalloc_section_memmap(section_nr, nid, altmap);
+       memmap = populate_section_memmap(start_pfn, PAGES_PER_SECTION, nid,
+                       altmap);
        if (!memmap)
                return -ENOMEM;
        usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
        if (!usage) {
-               __kfree_section_memmap(memmap, altmap);
+               depopulate_section_memmap(start_pfn, PAGES_PER_SECTION, altmap);
                return -ENOMEM;
        }
 
@@ -773,7 +774,7 @@ int __meminit sparse_add_one_section(int nid, unsigned long start_pfn,
 out:
        if (ret < 0) {
                kfree(usage);
-               __kfree_section_memmap(memmap, altmap);
+               depopulate_section_memmap(start_pfn, PAGES_PER_SECTION, altmap);
        }
        return ret;
 }
@@ -809,7 +810,8 @@ static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
 #endif
 
 static void free_section_usage(struct mem_section *ms, struct page *memmap,
-               struct mem_section_usage *usage, struct vmem_altmap *altmap)
+               struct mem_section_usage *usage, unsigned long pfn,
+               unsigned long nr_pages, struct vmem_altmap *altmap)
 {
        if (!usage)
                return;
@@ -820,7 +822,7 @@ static void free_section_usage(struct mem_section *ms, struct page *memmap,
        if (!early_section(ms)) {
                kfree(usage);
                if (memmap)
-                       __kfree_section_memmap(memmap, altmap);
+                       depopulate_section_memmap(pfn, nr_pages, altmap);
                return;
        }
 
@@ -849,6 +851,8 @@ void sparse_remove_one_section(struct mem_section *ms, unsigned long map_offset,
 
        clear_hwpoisoned_pages(memmap + map_offset,
                        PAGES_PER_SECTION - map_offset);
-       free_section_usage(ms, memmap, usage, altmap);
+       free_section_usage(ms, memmap, usage,
+                       section_nr_to_pfn(__section_nr(ms)),
+                       PAGES_PER_SECTION, altmap);
 }
 #endif /* CONFIG_MEMORY_HOTPLUG */