memblock: replace BOOTMEM_ALLOC_* with MEMBLOCK variants
authorMike Rapoport <rppt@linux.vnet.ibm.com>
Tue, 30 Oct 2018 22:09:44 +0000 (15:09 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 31 Oct 2018 15:54:16 +0000 (08:54 -0700)
Drop BOOTMEM_ALLOC_ACCESSIBLE and BOOTMEM_ALLOC_ANYWHERE in favor of
identical MEMBLOCK definitions.

Link: http://lkml.kernel.org/r/1536927045-23536-29-git-send-email-rppt@linux.vnet.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Palmer Dabbelt <palmer@sifive.com>
Cc: Paul Burton <paul.burton@mips.com>
Cc: Richard Kuo <rkuo@codeaurora.org>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Serge Semin <fancer.lancer@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/ia64/mm/discontig.c
arch/powerpc/kernel/setup_64.c
arch/sparc/kernel/smp_64.c
arch/x86/kernel/setup_percpu.c
arch/x86/mm/kasan_init_64.c
mm/hugetlb.c
mm/kasan/kasan_init.c
mm/memblock.c
mm/page_ext.c
mm/sparse-vmemmap.c
mm/sparse.c

index 918dda9729758f8e1b4d38defb3c6bde07c42fc4..70609f823960ed54c032f3c2a4429d0281acb826 100644 (file)
@@ -453,7 +453,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
 
        ptr = memblock_alloc_try_nid(pernodesize, PERCPU_PAGE_SIZE,
                                     __pa(MAX_DMA_ADDRESS),
-                                    BOOTMEM_ALLOC_ACCESSIBLE,
+                                    MEMBLOCK_ALLOC_ACCESSIBLE,
                                     bestnode);
 
        return ptr;
index f90ab3ea9af39a3dc68e071a7af79a4d17dc3a42..9216c3a7fcfcd297bd7f0cf1c7bd13b8e93f5776 100644 (file)
@@ -764,7 +764,7 @@ void __init emergency_stack_init(void)
 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
 {
        return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
-                                     BOOTMEM_ALLOC_ACCESSIBLE,
+                                     MEMBLOCK_ALLOC_ACCESSIBLE,
                                      early_cpu_to_node(cpu));
 
 }
index a087a6a25f06e1aec1c50991b8f9b31b3c8fe82e..6cc80d0f4b9fb01215fbf010c099b886b83ce1b3 100644 (file)
@@ -1595,7 +1595,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
                         cpu, size, __pa(ptr));
        } else {
                ptr = memblock_alloc_try_nid(size, align, goal,
-                                            BOOTMEM_ALLOC_ACCESSIBLE, node);
+                                            MEMBLOCK_ALLOC_ACCESSIBLE, node);
                pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
                         "%016lx\n", cpu, size, node, __pa(ptr));
        }
index a006f1ba4c39d93005210667b22ea01f0a51164f..483412fb8a24d6b92db64000fc92429b5c4ba576 100644 (file)
@@ -114,7 +114,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
                         cpu, size, __pa(ptr));
        } else {
                ptr = memblock_alloc_try_nid_nopanic(size, align, goal,
-                                                    BOOTMEM_ALLOC_ACCESSIBLE,
+                                                    MEMBLOCK_ALLOC_ACCESSIBLE,
                                                     node);
 
                pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
index 77b857cb036f01f0dfd1f7f63b6a9a0718d24d09..8f87499124b8011c3446c4070f4f059cec10d2b0 100644 (file)
@@ -29,10 +29,10 @@ static __init void *early_alloc(size_t size, int nid, bool panic)
 {
        if (panic)
                return memblock_alloc_try_nid(size, size,
-                       __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
+                       __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
        else
                return memblock_alloc_try_nid_nopanic(size, size,
-                       __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
+                       __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
 }
 
 static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
index 51e9f17dbd5ceb890e7b0b1029fc6b69a5c2196d..e35d99844612a5abc0fe3d8ab1ae4b973a7b47cb 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/cpuset.h>
 #include <linux/mutex.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/sysfs.h>
 #include <linux/slab.h>
 #include <linux/mmdebug.h>
@@ -2102,7 +2103,7 @@ int __alloc_bootmem_huge_page(struct hstate *h)
 
                addr = memblock_alloc_try_nid_raw(
                                huge_page_size(h), huge_page_size(h),
-                               0, BOOTMEM_ALLOC_ACCESSIBLE, node);
+                               0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
                if (addr) {
                        /*
                         * Use the beginning of the huge page to store the
index 24d734bdff6bc812fff3b981dfcbc056abc8ef9e..785a9707786b2efff5ff7b9e36dd4a853a54e27b 100644 (file)
@@ -84,7 +84,7 @@ static inline bool kasan_zero_page_entry(pte_t pte)
 static __init void *early_alloc(size_t size, int node)
 {
        return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
-                                       BOOTMEM_ALLOC_ACCESSIBLE, node);
+                                       MEMBLOCK_ALLOC_ACCESSIBLE, node);
 }
 
 static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
index 3dd9cfef996c4d3d49db5e17fedf761f67dfe242..2ed73245b5da65b0fb35a10d679323f433d6f224 100644 (file)
@@ -1342,7 +1342,7 @@ phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t ali
  * hold the requested memory.
  *
  * The allocation is performed from memory region limited by
- * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
+ * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
  *
  * The memory block is aligned on %SMP_CACHE_BYTES if @align == 0.
  *
@@ -1429,7 +1429,7 @@ done:
  * @min_addr: the lower bound of the memory region from where the allocation
  *       is preferred (phys address)
  * @max_addr: the upper bound of the memory region from where the allocation
- *           is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
+ *           is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
  *           allocate only from memory limited by memblock.current_limit value
  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  *
@@ -1466,7 +1466,7 @@ void * __init memblock_alloc_try_nid_raw(
  * @min_addr: the lower bound of the memory region from where the allocation
  *       is preferred (phys address)
  * @max_addr: the upper bound of the memory region from where the allocation
- *           is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
+ *           is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
  *           allocate only from memory limited by memblock.current_limit value
  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  *
@@ -1501,7 +1501,7 @@ void * __init memblock_alloc_try_nid_nopanic(
  * @min_addr: the lower bound of the memory region from where the allocation
  *       is preferred (phys address)
  * @max_addr: the upper bound of the memory region from where the allocation
- *           is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
+ *           is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
  *           allocate only from memory limited by memblock.current_limit value
  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
  *
index e77c0f031dd0ad487b90f1d7ebf746719d9f0280..5323c2ade68649afd2202d8947ec67a8fa3e1903 100644 (file)
@@ -163,7 +163,7 @@ static int __init alloc_node_page_ext(int nid)
 
        base = memblock_alloc_try_nid_nopanic(
                        table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
-                       BOOTMEM_ALLOC_ACCESSIBLE, nid);
+                       MEMBLOCK_ALLOC_ACCESSIBLE, nid);
        if (!base)
                return -ENOMEM;
        NODE_DATA(nid)->node_page_ext = base;
index 91c2c3d25827fe7b05b358553d556790481d506d..7408cabed61aad66eed41b725d4f181eeb55fedf 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/mm.h>
 #include <linux/mmzone.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/memremap.h>
 #include <linux/highmem.h>
 #include <linux/slab.h>
@@ -43,7 +44,7 @@ static void * __ref __earlyonly_bootmem_alloc(int node,
                                unsigned long goal)
 {
        return memblock_alloc_try_nid_raw(size, align, goal,
-                                              BOOTMEM_ALLOC_ACCESSIBLE, node);
+                                              MEMBLOCK_ALLOC_ACCESSIBLE, node);
 }
 
 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
index d1296610562b3ab5d39af7c959e80cc919e3acfb..b139fbc61d101f0c39f73bbb875e6a3653651fd0 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/slab.h>
 #include <linux/mmzone.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/compiler.h>
 #include <linux/highmem.h>
 #include <linux/export.h>
@@ -393,7 +394,7 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
 
        map = memblock_alloc_try_nid(size,
                                          PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
-                                         BOOTMEM_ALLOC_ACCESSIBLE, nid);
+                                         MEMBLOCK_ALLOC_ACCESSIBLE, nid);
        return map;
 }
 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
@@ -407,7 +408,7 @@ static void __init sparse_buffer_init(unsigned long size, int nid)
        sparsemap_buf =
                memblock_alloc_try_nid_raw(size, PAGE_SIZE,
                                                __pa(MAX_DMA_ADDRESS),
-                                               BOOTMEM_ALLOC_ACCESSIBLE, nid);
+                                               MEMBLOCK_ALLOC_ACCESSIBLE, nid);
        sparsemap_buf_end = sparsemap_buf + size;
 }