[PATCH] Module per-cpu alignment cannot always be met
authorRusty Russell <rusty@rustcorp.com.au>
Tue, 2 Aug 2005 04:11:47 +0000 (21:11 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Tue, 2 Aug 2005 04:38:01 +0000 (21:38 -0700)
The module code assumes noone will ever ask for a per-cpu area more than
SMP_CACHE_BYTES aligned.  However, as these cases show, gcc asks sometimes
asks for 32-byte alignment for the per-cpu section on a module, and if
CONFIG_X86_L1_CACHE_SHIFT is 4, we hit that BUG_ON().  This is obviously an
unusual combination, as there have been few reports, but better to warn
than die.

See:
http://www.ussg.iu.edu/hypermail/linux/kernel/0409.0/0768.html

And more recently:
http://bugs.gentoo.org/show_bug.cgi?id=97006

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
kernel/module.c

index 068e271ab3a538761c9129ec4b5fae0fdbe50910..c32995fbd8fd14b608bb49e5b0f5899e41ffd979 100644 (file)
@@ -250,13 +250,18 @@ static inline unsigned int block_size(int val)
 /* Created by linker magic */
 extern char __per_cpu_start[], __per_cpu_end[];
 
-static void *percpu_modalloc(unsigned long size, unsigned long align)
+static void *percpu_modalloc(unsigned long size, unsigned long align,
+                            const char *name)
 {
        unsigned long extra;
        unsigned int i;
        void *ptr;
 
-       BUG_ON(align > SMP_CACHE_BYTES);
+       if (align > SMP_CACHE_BYTES) {
+               printk(KERN_WARNING "%s: per-cpu alignment %li > %i\n",
+                      name, align, SMP_CACHE_BYTES);
+               align = SMP_CACHE_BYTES;
+       }
 
        ptr = __per_cpu_start;
        for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
@@ -348,7 +353,8 @@ static int percpu_modinit(void)
 }      
 __initcall(percpu_modinit);
 #else /* ... !CONFIG_SMP */
-static inline void *percpu_modalloc(unsigned long size, unsigned long align)
+static inline void *percpu_modalloc(unsigned long size, unsigned long align,
+                                   const char *name)
 {
        return NULL;
 }
@@ -1644,7 +1650,8 @@ static struct module *load_module(void __user *umod,
        if (pcpuindex) {
                /* We have a special allocation for this section. */
                percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
-                                        sechdrs[pcpuindex].sh_addralign);
+                                        sechdrs[pcpuindex].sh_addralign,
+                                        mod->name);
                if (!percpu) {
                        err = -ENOMEM;
                        goto free_mod;