percpu: don't implicitly include slab.h from percpu.h
authorTejun Heo <tj@kernel.org>
Wed, 24 Mar 2010 08:06:43 +0000 (17:06 +0900)
committerTejun Heo <tj@kernel.org>
Tue, 30 Mar 2010 13:02:32 +0000 (22:02 +0900)
percpu.h has always been including slab.h to get k[mz]alloc/free() for
UP inline implementation.  percpu.h being used by very low level
headers including module.h and sched.h, this meant that a lot files
unintentionally got slab.h inclusion.

Lee Schermerhorn was trying to make topology.h use percpu.h and got
bitten by this implicit inclusion.  The right thing to do is break
this ultimately unnecessary dependency.  The previous patch added
explicit inclusion of either gfp.h or slab.h to the source files using
them.  This patch updates percpu.h such that slab.h is no longer
included from percpu.h.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
include/linux/percpu.h
mm/Makefile
mm/percpu_up.c [new file with mode: 0644]

index a93e5bfdccb8e8b825006776f4afb77ebc975e90..c7845130bfdfffcc6c08130f0a199d225715db37 100644 (file)
@@ -2,10 +2,10 @@
 #define __LINUX_PERCPU_H
 
 #include <linux/preempt.h>
-#include <linux/slab.h> /* For kmalloc() */
 #include <linux/smp.h>
 #include <linux/cpumask.h>
 #include <linux/pfn.h>
+#include <linux/init.h>
 
 #include <asm/percpu.h>
 
@@ -135,9 +135,6 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
 #define per_cpu_ptr(ptr, cpu)  SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
 
 extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
-extern void __percpu *__alloc_percpu(size_t size, size_t align);
-extern void free_percpu(void __percpu *__pdata);
-extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
 
 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
 extern void __init setup_per_cpu_areas(void);
@@ -147,27 +144,6 @@ extern void __init setup_per_cpu_areas(void);
 
 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
 
-static inline void __percpu *__alloc_percpu(size_t size, size_t align)
-{
-       /*
-        * Can't easily make larger alignment work with kmalloc.  WARN
-        * on it.  Larger alignment should only be used for module
-        * percpu sections on SMP for which this path isn't used.
-        */
-       WARN_ON_ONCE(align > SMP_CACHE_BYTES);
-       return kzalloc(size, GFP_KERNEL);
-}
-
-static inline void free_percpu(void __percpu *p)
-{
-       kfree(p);
-}
-
-static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
-{
-       return __pa(addr);
-}
-
 static inline void __init setup_per_cpu_areas(void) { }
 
 static inline void *pcpu_lpage_remapped(void *kaddr)
@@ -177,6 +153,10 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
 
 #endif /* CONFIG_SMP */
 
+extern void __percpu *__alloc_percpu(size_t size, size_t align);
+extern void free_percpu(void __percpu *__pdata);
+extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
+
 #define alloc_percpu(type)     \
        (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
 
index 7a68d2ab556093ab3398cbb95d8386c65357e07a..6c2a73a54a43c2bb6b412ebf7af56d622315a329 100644 (file)
@@ -33,7 +33,11 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
 obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
 obj-$(CONFIG_FS_XIP) += filemap_xip.o
 obj-$(CONFIG_MIGRATION) += migrate.o
-obj-$(CONFIG_SMP) += percpu.o
+ifdef CONFIG_SMP
+obj-y += percpu.o
+else
+obj-y += percpu_up.o
+endif
 obj-$(CONFIG_QUICKLIST) += quicklist.o
 obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
 obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
diff --git a/mm/percpu_up.c b/mm/percpu_up.c
new file mode 100644 (file)
index 0000000..c4351c7
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * mm/percpu_up.c - dummy percpu memory allocator implementation for UP
+ */
+
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+
+void __percpu *__alloc_percpu(size_t size, size_t align)
+{
+       /*
+        * Can't easily make larger alignment work with kmalloc.  WARN
+        * on it.  Larger alignment should only be used for module
+        * percpu sections on SMP for which this path isn't used.
+        */
+       WARN_ON_ONCE(align > SMP_CACHE_BYTES);
+       return kzalloc(size, GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(__alloc_percpu);
+
+void free_percpu(void __percpu *p)
+{
+       kfree(p);
+}
+EXPORT_SYMBOL_GPL(free_percpu);
+
+phys_addr_t per_cpu_ptr_to_phys(void *addr)
+{
+       return __pa(addr);
+}