Merge git://git.linux-nfs.org/projects/trondmy/nfs-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 14 Dec 2009 18:00:24 +0000 (10:00 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 14 Dec 2009 18:00:24 +0000 (10:00 -0800)
* git://git.linux-nfs.org/projects/trondmy/nfs-2.6: (75 commits)
  NFS: Fix nfs_migrate_page()
  rpc: remove unneeded function parameter in gss_add_msg()
  nfs41: Invoke RECLAIM_COMPLETE on all new client ids
  SUNRPC: IS_ERR/PTR_ERR confusion
  NFSv41: Fix a potential state leakage when restarting nfs4_close_prepare
  nfs41: Handle NFSv4.1 session errors in the delegation recall code
  nfs41: Retry delegation return if it failed with session error
  nfs41: Handle session errors during delegation return
  nfs41: Mark stateids in need of reclaim if state manager gets stale clientid
  NFS: Fix up the declaration of nfs4_restart_rpc when NFSv4 not configured
  nfs41: Don't clear DRAINING flag on NFS4ERR_STALE_CLIENTID
  nfs41: nfs41_setup_state_renewal
  NFSv41: More cleanups
  NFSv41: Fix up some bugs in the NFS4CLNT_SESSION_DRAINING code
  NFSv41: Clean up slot table management
  NFSv41: Fix nfs4_proc_create_session
  nfs41: Invoke RECLAIM_COMPLETE
  nfs41: RECLAIM_COMPLETE functionality
  nfs41: RECLAIM_COMPLETE XDR functionality
  Cleanup some NFSv4 XDR decode comments
  ...

80 files changed:
Documentation/spinlocks.txt
arch/ia64/Kconfig
arch/ia64/include/asm/meminit.h
arch/ia64/include/asm/pgtable.h
arch/ia64/include/asm/processor.h
arch/ia64/kernel/acpi.c
arch/ia64/kernel/head.S
arch/ia64/kernel/ia64_ksyms.c
arch/ia64/kernel/mca_asm.S
arch/ia64/kernel/relocate_kernel.S
arch/ia64/kernel/setup.c
arch/ia64/kernel/vmlinux.lds.S
arch/ia64/mm/contig.c
arch/ia64/mm/discontig.c
arch/ia64/mm/init.c
arch/ia64/sn/kernel/sn2/sn2_smp.c
arch/ia64/xen/irq_xen.c
arch/ia64/xen/time.c
arch/m68k/include/asm/pgtable_mm.h
arch/m68k/sun3/mmu_emu.c
arch/mn10300/kernel/kprobes.c
arch/powerpc/include/asm/smp.h
arch/powerpc/kernel/perf_callchain.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/smp.c
arch/powerpc/platforms/cell/interrupt.c
arch/powerpc/platforms/pseries/dtl.c
arch/sparc/kernel/nmi.c
arch/x86/include/asm/percpu.h
arch/x86/kernel/apic/nmi.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu_debug.c
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/ds.c
arch/x86/kvm/svm.c
arch/x86/xen/smp.c
arch/x86/xen/time.c
crypto/cryptd.c
drivers/base/cpu.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/freq_table.c
drivers/crypto/padlock-aes.c
drivers/dma/dmaengine.c
drivers/infiniband/hw/ehca/ehca_irq.c
drivers/lguest/x86/core.c
drivers/net/chelsio/sge.c
drivers/net/loopback.c
drivers/net/veth.c
drivers/oprofile/cpu_buffer.c
drivers/oprofile/cpu_buffer.h
drivers/oprofile/oprofile_stats.c
drivers/s390/net/netiucv.c
fs/ext4/mballoc.c
fs/nfs/iostat.h
fs/xfs/xfs_mount.c
include/asm-generic/percpu.h
include/linux/percpu-defs.h
include/linux/percpu.h
include/linux/vmstat.h
include/net/neighbour.h
include/net/netfilter/nf_conntrack.h
include/net/snmp.h
kernel/lockdep.c
kernel/module.c
kernel/rcutorture.c
kernel/sched.c
kernel/softirq.c
kernel/softlockup.c
kernel/time/timer_stats.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_functions_graph.c
kernel/trace/trace_hw_branches.c
mm/Makefile
mm/allocpercpu.c [deleted file]
mm/percpu.c
mm/slab.c
mm/vmalloc.c
mm/vmstat.c

index 619699dde5938b64c6d63e1059cf0ed702c128af..178c831b907d08c8096b72c307508d0b272fccbe 100644 (file)
@@ -1,73 +1,8 @@
-SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and
-are hence deprecated.
+Lesson 1: Spin locks
 
-Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or
-__SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate for static
-initialization.
-
-Most of the time, you can simply turn:
-
-       static spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED;
-
-into:
-
-       static DEFINE_SPINLOCK(xxx_lock);
-
-Static structure member variables go from:
-
-       struct foo bar {
-               .lock   =       SPIN_LOCK_UNLOCKED;
-       };
-
-to:
-
-       struct foo bar {
-               .lock   =       __SPIN_LOCK_UNLOCKED(bar.lock);
-       };
-
-Declaration of static rw_locks undergo a similar transformation.
-
-Dynamic initialization, when necessary, may be performed as
-demonstrated below.
-
-   spinlock_t xxx_lock;
-   rwlock_t xxx_rw_lock;
-
-   static int __init xxx_init(void)
-   {
-       spin_lock_init(&xxx_lock);
-       rwlock_init(&xxx_rw_lock);
-       ...
-   }
-
-   module_init(xxx_init);
-
-The following discussion is still valid, however, with the dynamic
-initialization of spinlocks or with DEFINE_SPINLOCK, etc., used
-instead of SPIN_LOCK_UNLOCKED.
-
------------------------
-
-On Fri, 2 Jan 1998, Doug Ledford wrote:
-> 
-> I'm working on making the aic7xxx driver more SMP friendly (as well as
-> importing the latest FreeBSD sequencer code to have 7895 support) and wanted
-> to get some info from you.  The goal here is to make the various routines
-> SMP safe as well as UP safe during interrupts and other manipulating
-> routines.  So far, I've added a spin_lock variable to things like my queue
-> structs.  Now, from what I recall, there are some spin lock functions I can
-> use to lock these spin locks from other use as opposed to a (nasty)
-> save_flags(); cli(); stuff; restore_flags(); construct.  Where do I find
-> these routines and go about making use of them?  Do they only lock on a
-> per-processor basis or can they also lock say an interrupt routine from
-> mucking with a queue if the queue routine was manipulating it when the
-> interrupt occurred, or should I still use a cli(); based construct on that
-> one?
-
-See <asm/spinlock.h>. The basic version is:
-
-   spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED;
+The most basic primitive for locking is spinlock.
 
+static DEFINE_SPINLOCK(xxx_lock);
 
        unsigned long flags;
 
@@ -75,13 +10,11 @@ See <asm/spinlock.h>. The basic version is:
        ... critical section here ..
        spin_unlock_irqrestore(&xxx_lock, flags);
 
-and the above is always safe. It will disable interrupts _locally_, but the
+The above is always safe. It will disable interrupts _locally_, but the
 spinlock itself will guarantee the global lock, so it will guarantee that
 there is only one thread-of-control within the region(s) protected by that
-lock. 
-
-Note that it works well even under UP - the above sequence under UP
-essentially is just the same as doing a
+lock. This works well even under UP. The above sequence under UP
+essentially is just the same as doing
 
        unsigned long flags;
 
@@ -91,15 +24,13 @@ essentially is just the same as doing a
 
 so the code does _not_ need to worry about UP vs SMP issues: the spinlocks
 work correctly under both (and spinlocks are actually more efficient on
-architectures that allow doing the "save_flags + cli" in one go because I
-don't export that interface normally).
+architectures that allow doing the "save_flags + cli" in one operation).
+
+   NOTE! Implications of spin_locks for memory are further described in:
 
-NOTE NOTE NOTE! The reason the spinlock is so much faster than a global
-interrupt lock under SMP is exactly because it disables interrupts only on
-the local CPU. The spin-lock is safe only when you _also_ use the lock
-itself to do locking across CPU's, which implies that EVERYTHING that
-touches a shared variable has to agree about the spinlock they want to
-use.
+     Documentation/memory-barriers.txt
+       (5) LOCK operations.
+       (6) UNLOCK operations.
 
 The above is usually pretty simple (you usually need and want only one
 spinlock for most things - using more than one spinlock can make things a
@@ -120,20 +51,24 @@ and another sequence that does
 then they are NOT mutually exclusive, and the critical regions can happen
 at the same time on two different CPU's. That's fine per se, but the
 critical regions had better be critical for different things (ie they
-can't stomp on each other). 
+can't stomp on each other).
 
 The above is a problem mainly if you end up mixing code - for example the
 routines in ll_rw_block() tend to use cli/sti to protect the atomicity of
 their actions, and if a driver uses spinlocks instead then you should
-think about issues like the above..
+think about issues like the above.
 
 This is really the only really hard part about spinlocks: once you start
 using spinlocks they tend to expand to areas you might not have noticed
 before, because you have to make sure the spinlocks correctly protect the
 shared data structures _everywhere_ they are used. The spinlocks are most
-easily added to places that are completely independent of other code (ie
-internal driver data structures that nobody else ever touches, for
-example). 
+easily added to places that are completely independent of other code (for
+example, internal driver data structures that nobody else ever touches).
+
+   NOTE! The spin-lock is safe only when you _also_ use the lock itself
+   to do locking across CPU's, which implies that EVERYTHING that
+   touches a shared variable has to agree about the spinlock they want
+   to use.
 
 ----
 
@@ -141,13 +76,17 @@ Lesson 2: reader-writer spinlocks.
 
 If your data accesses have a very natural pattern where you usually tend
 to mostly read from the shared variables, the reader-writer locks
-(rw_lock) versions of the spinlocks are often nicer. They allow multiple
+(rw_lock) versions of the spinlocks are sometimes useful. They allow multiple
 readers to be in the same critical region at once, but if somebody wants
-to change the variables it has to get an exclusive write lock. The
-routines look the same as above:
+to change the variables it has to get an exclusive write lock.
 
-   rwlock_t xxx_lock = RW_LOCK_UNLOCKED;
+   NOTE! reader-writer locks require more atomic memory operations than
+   simple spinlocks.  Unless the reader critical section is long, you
+   are better off just using spinlocks.
 
+The routines look the same as above:
+
+   rwlock_t xxx_lock = RW_LOCK_UNLOCKED;
 
        unsigned long flags;
 
@@ -159,18 +98,21 @@ routines look the same as above:
        .. read and write exclusive access to the info ...
        write_unlock_irqrestore(&xxx_lock, flags);
 
-The above kind of lock is useful for complex data structures like linked
-lists etc, especially when you know that most of the work is to just
-traverse the list searching for entries without changing the list itself,
-for example. Then you can use the read lock for that kind of list
-traversal, which allows many concurrent readers. Anything that _changes_
-the list will have to get the write lock. 
+The above kind of lock may be useful for complex data structures like
+linked lists, especially searching for entries without changing the list
+itself.  The read lock allows many concurrent readers.  Anything that
+_changes_ the list will have to get the write lock.
+
+   NOTE! RCU is better for list traversal, but requires careful
+   attention to design detail (see Documentation/RCU/listRCU.txt).
 
-Note: you cannot "upgrade" a read-lock to a write-lock, so if you at _any_
+Also, you cannot "upgrade" a read-lock to a write-lock, so if you at _any_
 time need to do any changes (even if you don't do it every time), you have
-to get the write-lock at the very beginning. I could fairly easily add a
-primitive to create a "upgradeable" read-lock, but it hasn't been an issue
-yet. Tell me if you'd want one. 
+to get the write-lock at the very beginning.
+
+   NOTE! We are working hard to remove reader-writer spinlocks in most
+   cases, so please don't add a new one without consensus.  (Instead, see
+   Documentation/RCU/rcu.txt for complete information.)
 
 ----
 
@@ -233,4 +175,46 @@ indeed), while write-locks need to protect themselves against interrupts.
 
                Linus
 
+----
+
+Reference information:
+
+For dynamic initialization, use spin_lock_init() or rwlock_init() as
+appropriate:
+
+   spinlock_t xxx_lock;
+   rwlock_t xxx_rw_lock;
+
+   static int __init xxx_init(void)
+   {
+       spin_lock_init(&xxx_lock);
+       rwlock_init(&xxx_rw_lock);
+       ...
+   }
+
+   module_init(xxx_init);
+
+For static initialization, use DEFINE_SPINLOCK() / DEFINE_RWLOCK() or
+__SPIN_LOCK_UNLOCKED() / __RW_LOCK_UNLOCKED() as appropriate.
+
+SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED are deprecated.  These interfere
+with lockdep state tracking.
+
+Most of the time, you can simply turn:
+       static spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED;
+into:
+       static DEFINE_SPINLOCK(xxx_lock);
+
+Static structure member variables go from:
+
+       struct foo bar {
+               .lock   =       SPIN_LOCK_UNLOCKED;
+       };
+
+to:
 
+       struct foo bar {
+               .lock   =       __SPIN_LOCK_UNLOCKED(bar.lock);
+       };
+
+Declaration of static rw_locks undergo a similar transformation.
index 1ee596cd942ff8c3a3915b2428acacb206c1f6b8..2d7f56a98e0f484ce80d544223b4b9923526cb2a 100644 (file)
@@ -87,9 +87,6 @@ config GENERIC_TIME_VSYSCALL
        bool
        default y
 
-config HAVE_LEGACY_PER_CPU_AREA
-       def_bool y
-
 config HAVE_SETUP_PER_CPU_AREA
        def_bool y
 
index 688a812c017d64c1f5c9763012ecac75d7b00f0e..61c7b1750b169f33ccb544b5cb83c83e1dd432ee 100644 (file)
@@ -61,7 +61,7 @@ extern int register_active_ranges(u64 start, u64 len, int nid);
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
 # define LARGE_GAP     0x40000000 /* Use virtual mem map if hole is > than this */
-  extern unsigned long vmalloc_end;
+  extern unsigned long VMALLOC_END;
   extern struct page *vmem_map;
   extern int find_largest_hole(u64 start, u64 end, void *arg);
   extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
index 8840a690d1e7aa346b82fc68672186631c2e0828..69bf13857a9fc8ac9ff07e73a24436c17af19adb 100644 (file)
@@ -228,8 +228,7 @@ ia64_phys_addr_valid (unsigned long addr)
 #define VMALLOC_START          (RGN_BASE(RGN_GATE) + 0x200000000UL)
 #ifdef CONFIG_VIRTUAL_MEM_MAP
 # define VMALLOC_END_INIT      (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
-# define VMALLOC_END           vmalloc_end
-  extern unsigned long vmalloc_end;
+extern unsigned long VMALLOC_END;
 #else
 #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
 /* SPARSEMEM_VMEMMAP uses half of vmalloc... */
index 3eaeedf1aef2cf20d5a8c707d64ddd34746c18b2..7fa90f73f6be37757e3a36ed46c85ac3ede3df77 100644 (file)
@@ -229,7 +229,7 @@ struct cpuinfo_ia64 {
 #endif
 };
 
-DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
+DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
 
 /*
  * The "local" data variable.  It refers to the per-CPU data of the currently executing
@@ -237,8 +237,8 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
  * Do not use the address of local_cpu_data, since it will be different from
  * cpu_data(smp_processor_id())!
  */
-#define local_cpu_data         (&__ia64_per_cpu_var(cpu_info))
-#define cpu_data(cpu)          (&per_cpu(cpu_info, cpu))
+#define local_cpu_data         (&__ia64_per_cpu_var(ia64_cpu_info))
+#define cpu_data(cpu)          (&per_cpu(ia64_cpu_info, cpu))
 
 extern void print_cpu_info (struct cpuinfo_ia64 *);
 
index baec6f00f7f3feee71c5110b01f12b5c8b3f4fda..40574ae114018e4db5efce7fc499435c4c863eec 100644 (file)
@@ -702,11 +702,23 @@ int __init early_acpi_boot_init(void)
                printk(KERN_ERR PREFIX
                       "Error parsing MADT - no LAPIC entries\n");
 
+#ifdef CONFIG_SMP
+       if (available_cpus == 0) {
+               printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
+               printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
+               smp_boot_data.cpu_phys_id[available_cpus] =
+                   hard_smp_processor_id();
+               available_cpus = 1;     /* We've got at least one of these, no? */
+       }
+       smp_boot_data.cpu_count = available_cpus;
+#endif
+       /* Make boot-up look pretty */
+       printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
+              total_cpus);
+
        return 0;
 }
 
-
-
 int __init acpi_boot_init(void)
 {
 
@@ -769,18 +781,8 @@ int __init acpi_boot_init(void)
        if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
                printk(KERN_ERR PREFIX "Can't find FADT\n");
 
+#ifdef CONFIG_ACPI_NUMA
 #ifdef CONFIG_SMP
-       if (available_cpus == 0) {
-               printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
-               printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
-               smp_boot_data.cpu_phys_id[available_cpus] =
-                   hard_smp_processor_id();
-               available_cpus = 1;     /* We've got at least one of these, no? */
-       }
-       smp_boot_data.cpu_count = available_cpus;
-
-       smp_build_cpu_map();
-# ifdef CONFIG_ACPI_NUMA
        if (srat_num_cpus == 0) {
                int cpu, i = 1;
                for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
@@ -789,14 +791,9 @@ int __init acpi_boot_init(void)
                                node_cpuid[i++].phys_id =
                                    smp_boot_data.cpu_phys_id[cpu];
        }
-# endif
 #endif
-#ifdef CONFIG_ACPI_NUMA
        build_cpu_to_node_map();
 #endif
-       /* Make boot-up look pretty */
-       printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
-              total_cpus);
        return 0;
 }
 
index 696eff28a0c44c477465314a4c3ee5daa71b4198..17a9fba38930ab0504aa7b17262765af117c2740 100644 (file)
@@ -1051,7 +1051,7 @@ END(ia64_delay_loop)
  * intermediate precision so that we can produce a full 64-bit result.
  */
 GLOBAL_ENTRY(ia64_native_sched_clock)
-       addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
+       addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
        mov.m r9=ar.itc         // fetch cycle-counter                          (35 cyc)
        ;;
        ldf8 f8=[r8]
@@ -1077,7 +1077,7 @@ sched_clock = ia64_native_sched_clock
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 GLOBAL_ENTRY(cycle_to_cputime)
        alloc r16=ar.pfs,1,0,0,0
-       addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
+       addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
        ;;
        ldf8 f8=[r8]
        ;;
index 14d39e3006274fb054382505d8f0ba32959c6cb8..461b99902bf6ff2f703029c6fed6463271584048 100644 (file)
@@ -30,7 +30,7 @@ EXPORT_SYMBOL(max_low_pfn);   /* defined by bootmem.c, but not exported by generic
 #endif
 
 #include <asm/processor.h>
-EXPORT_SYMBOL(per_cpu__cpu_info);
+EXPORT_SYMBOL(per_cpu__ia64_cpu_info);
 #ifdef CONFIG_SMP
 EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
 #endif
index 7461d2573d41106191fe7b7a42d7cfe2d370a9bf..d5bdf9de36b6362f81a039b4494f0fa657497e87 100644 (file)
@@ -59,7 +59,7 @@
 ia64_do_tlb_purge:
 #define O(member)      IA64_CPUINFO_##member##_OFFSET
 
-       GET_THIS_PADDR(r2, cpu_info)    // load phys addr of cpu_info into r2
+       GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
        ;;
        addl r17=O(PTCE_STRIDE),r2
        addl r2=O(PTCE_BASE),r2
index 32f6fc131fbe2946831ee4eb2ef15d2f35bf132e..c370e02f0061fbf8414457b16dd342fbd5201872 100644 (file)
@@ -61,7 +61,7 @@ GLOBAL_ENTRY(relocate_new_kernel)
 
        // purge all TC entries
 #define O(member)       IA64_CPUINFO_##member##_OFFSET
-        GET_THIS_PADDR(r2, cpu_info)    // load phys addr of cpu_info into r2
+        GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
         ;;
         addl r17=O(PTCE_STRIDE),r2
         addl r2=O(PTCE_BASE),r2
index 1de86c96801d60fe81ae2d6fc4930bdfdc190c45..a1ea879197770a2347ccff6ac0049146937d1ca9 100644 (file)
@@ -74,7 +74,7 @@ unsigned long __per_cpu_offset[NR_CPUS];
 EXPORT_SYMBOL(__per_cpu_offset);
 #endif
 
-DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
+DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
 unsigned long ia64_cycles_per_usec;
 struct ia64_boot_param *ia64_boot_param;
@@ -566,19 +566,18 @@ setup_arch (char **cmdline_p)
        early_acpi_boot_init();
 # ifdef CONFIG_ACPI_NUMA
        acpi_numa_init();
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
+#  ifdef CONFIG_ACPI_HOTPLUG_CPU
        prefill_possible_map();
-#endif
+#  endif
        per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
                32 : cpus_weight(early_cpu_possible_map)),
                additional_cpus > 0 ? additional_cpus : 0);
 # endif
-#else
-# ifdef CONFIG_SMP
-       smp_build_cpu_map();    /* happens, e.g., with the Ski simulator */
-# endif
 #endif /* CONFIG_APCI_BOOT */
 
+#ifdef CONFIG_SMP
+       smp_build_cpu_map();
+#endif
        find_memory();
 
        /* process SAL system table: */
@@ -855,18 +854,6 @@ identify_cpu (struct cpuinfo_ia64 *c)
        c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
 }
 
-/*
- * In UP configuration, setup_per_cpu_areas() is defined in
- * include/linux/percpu.h
- */
-#ifdef CONFIG_SMP
-void __init
-setup_per_cpu_areas (void)
-{
-       /* start_kernel() requires this... */
-}
-#endif
-
 /*
  * Do the following calculations:
  *
@@ -980,7 +967,7 @@ cpu_init (void)
         * depends on the data returned by identify_cpu().  We break the dependency by
         * accessing cpu_data() through the canonical per-CPU address.
         */
-       cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
+       cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
        identify_cpu(cpu_info);
 
 #ifdef CONFIG_MCKINLEY
index 0a0c77b2c9881cd01c9766b203ac0af7adefc95d..1295ba327f6ff2728f422079aedd6090bc51013b 100644 (file)
@@ -166,6 +166,12 @@ SECTIONS
        }
 #endif
 
+#ifdef CONFIG_SMP
+  . = ALIGN(PERCPU_PAGE_SIZE);
+  __cpu0_per_cpu = .;
+  . = . + PERCPU_PAGE_SIZE;    /* cpu0 per-cpu space */
+#endif
+
   . = ALIGN(PAGE_SIZE);
   __init_end = .;
 
@@ -198,11 +204,6 @@ SECTIONS
   data : { } :data
   .data : AT(ADDR(.data) - LOAD_OFFSET)
        {
-#ifdef CONFIG_SMP
-  . = ALIGN(PERCPU_PAGE_SIZE);
-               __cpu0_per_cpu = .;
-  . = . + PERCPU_PAGE_SIZE;    /* cpu0 per-cpu space */
-#endif
                INIT_TASK_DATA(PAGE_SIZE)
                CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
                READ_MOSTLY_DATA(SMP_CACHE_BYTES)
index 2f724d2bf2995d42251ed223bcca86ce2db21131..54bf540598118339a04919e1f08c505a5ad57cd8 100644 (file)
@@ -154,38 +154,99 @@ static void *cpu_data;
 void * __cpuinit
 per_cpu_init (void)
 {
-       int cpu;
-       static int first_time=1;
+       static bool first_time = true;
+       void *cpu0_data = __cpu0_per_cpu;
+       unsigned int cpu;
+
+       if (!first_time)
+               goto skip;
+       first_time = false;
 
        /*
-        * get_free_pages() cannot be used before cpu_init() done.  BSP
-        * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
-        * get_zeroed_page().
+        * get_free_pages() cannot be used before cpu_init() done.
+        * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
+        * to avoid that AP calls get_zeroed_page().
         */
-       if (first_time) {
-               void *cpu0_data = __cpu0_per_cpu;
+       for_each_possible_cpu(cpu) {
+               void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
 
-               first_time=0;
+               memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
+               __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
+               per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
 
-               __per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start;
-               per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0];
+               /*
+                * percpu area for cpu0 is moved from the __init area
+                * which is setup by head.S and used till this point.
+                * Update ar.k3.  This move is ensures that percpu
+                * area for cpu0 is on the correct node and its
+                * virtual address isn't insanely far from other
+                * percpu areas which is important for congruent
+                * percpu allocator.
+                */
+               if (cpu == 0)
+                       ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
+                                   (unsigned long)__per_cpu_start);
 
-               for (cpu = 1; cpu < NR_CPUS; cpu++) {
-                       memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
-                       __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
-                       cpu_data += PERCPU_PAGE_SIZE;
-                       per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
-               }
+               cpu_data += PERCPU_PAGE_SIZE;
        }
+skip:
        return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
 }
 
 static inline void
 alloc_per_cpu_data(void)
 {
-       cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1,
+       cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
                                   PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
 }
+
+/**
+ * setup_per_cpu_areas - setup percpu areas
+ *
+ * Arch code has already allocated and initialized percpu areas.  All
+ * this function has to do is to teach the determined layout to the
+ * dynamic percpu allocator, which happens to be more complex than
+ * creating whole new ones using helpers.
+ */
+void __init
+setup_per_cpu_areas(void)
+{
+       struct pcpu_alloc_info *ai;
+       struct pcpu_group_info *gi;
+       unsigned int cpu;
+       ssize_t static_size, reserved_size, dyn_size;
+       int rc;
+
+       ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
+       if (!ai)
+               panic("failed to allocate pcpu_alloc_info");
+       gi = &ai->groups[0];
+
+       /* units are assigned consecutively to possible cpus */
+       for_each_possible_cpu(cpu)
+               gi->cpu_map[gi->nr_units++] = cpu;
+
+       /* set parameters */
+       static_size = __per_cpu_end - __per_cpu_start;
+       reserved_size = PERCPU_MODULE_RESERVE;
+       dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
+       if (dyn_size < 0)
+               panic("percpu area overflow static=%zd reserved=%zd\n",
+                     static_size, reserved_size);
+
+       ai->static_size         = static_size;
+       ai->reserved_size       = reserved_size;
+       ai->dyn_size            = dyn_size;
+       ai->unit_size           = PERCPU_PAGE_SIZE;
+       ai->atom_size           = PAGE_SIZE;
+       ai->alloc_size          = PERCPU_PAGE_SIZE;
+
+       rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
+       if (rc)
+               panic("failed to setup percpu area (err=%d)", rc);
+
+       pcpu_free_alloc_info(ai);
+}
 #else
 #define alloc_per_cpu_data() do { } while (0)
 #endif /* CONFIG_SMP */
@@ -270,8 +331,8 @@ paging_init (void)
 
                map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
                        sizeof(struct page));
-               vmalloc_end -= map_size;
-               vmem_map = (struct page *) vmalloc_end;
+               VMALLOC_END -= map_size;
+               vmem_map = (struct page *) VMALLOC_END;
                efi_memmap_walk(create_mem_map_page_table, NULL);
 
                /*
index d85ba98d90087dea812636768a2f223e6a940fdf..19c4b2195dceef1af966bccf388c432050b1f3e3 100644 (file)
@@ -143,22 +143,120 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
        int cpu;
 
        for_each_possible_early_cpu(cpu) {
-               if (cpu == 0) {
-                       void *cpu0_data = __cpu0_per_cpu;
-                       __per_cpu_offset[cpu] = (char*)cpu0_data -
-                               __per_cpu_start;
-               } else if (node == node_cpuid[cpu].nid) {
-                       memcpy(__va(cpu_data), __phys_per_cpu_start,
-                              __per_cpu_end - __per_cpu_start);
-                       __per_cpu_offset[cpu] = (char*)__va(cpu_data) -
-                               __per_cpu_start;
-                       cpu_data += PERCPU_PAGE_SIZE;
-               }
+               void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
+
+               if (node != node_cpuid[cpu].nid)
+                       continue;
+
+               memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
+               __per_cpu_offset[cpu] = (char *)__va(cpu_data) -
+                       __per_cpu_start;
+
+               /*
+                * percpu area for cpu0 is moved from the __init area
+                * which is setup by head.S and used till this point.
+                * Update ar.k3.  This move is ensures that percpu
+                * area for cpu0 is on the correct node and its
+                * virtual address isn't insanely far from other
+                * percpu areas which is important for congruent
+                * percpu allocator.
+                */
+               if (cpu == 0)
+                       ia64_set_kr(IA64_KR_PER_CPU_DATA,
+                                   (unsigned long)cpu_data -
+                                   (unsigned long)__per_cpu_start);
+
+               cpu_data += PERCPU_PAGE_SIZE;
        }
 #endif
        return cpu_data;
 }
 
+#ifdef CONFIG_SMP
+/**
+ * setup_per_cpu_areas - setup percpu areas
+ *
+ * Arch code has already allocated and initialized percpu areas.  All
+ * this function has to do is to teach the determined layout to the
+ * dynamic percpu allocator, which happens to be more complex than
+ * creating whole new ones using helpers.
+ */
+void __init setup_per_cpu_areas(void)
+{
+       struct pcpu_alloc_info *ai;
+       struct pcpu_group_info *uninitialized_var(gi);
+       unsigned int *cpu_map;
+       void *base;
+       unsigned long base_offset;
+       unsigned int cpu;
+       ssize_t static_size, reserved_size, dyn_size;
+       int node, prev_node, unit, nr_units, rc;
+
+       ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
+       if (!ai)
+               panic("failed to allocate pcpu_alloc_info");
+       cpu_map = ai->groups[0].cpu_map;
+
+       /* determine base */
+       base = (void *)ULONG_MAX;
+       for_each_possible_cpu(cpu)
+               base = min(base,
+                          (void *)(__per_cpu_offset[cpu] + __per_cpu_start));
+       base_offset = (void *)__per_cpu_start - base;
+
+       /* build cpu_map, units are grouped by node */
+       unit = 0;
+       for_each_node(node)
+               for_each_possible_cpu(cpu)
+                       if (node == node_cpuid[cpu].nid)
+                               cpu_map[unit++] = cpu;
+       nr_units = unit;
+
+       /* set basic parameters */
+       static_size = __per_cpu_end - __per_cpu_start;
+       reserved_size = PERCPU_MODULE_RESERVE;
+       dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
+       if (dyn_size < 0)
+               panic("percpu area overflow static=%zd reserved=%zd\n",
+                     static_size, reserved_size);
+
+       ai->static_size         = static_size;
+       ai->reserved_size       = reserved_size;
+       ai->dyn_size            = dyn_size;
+       ai->unit_size           = PERCPU_PAGE_SIZE;
+       ai->atom_size           = PAGE_SIZE;
+       ai->alloc_size          = PERCPU_PAGE_SIZE;
+
+       /*
+        * CPUs are put into groups according to node.  Walk cpu_map
+        * and create new groups at node boundaries.
+        */
+       prev_node = -1;
+       ai->nr_groups = 0;
+       for (unit = 0; unit < nr_units; unit++) {
+               cpu = cpu_map[unit];
+               node = node_cpuid[cpu].nid;
+
+               if (node == prev_node) {
+                       gi->nr_units++;
+                       continue;
+               }
+               prev_node = node;
+
+               gi = &ai->groups[ai->nr_groups++];
+               gi->nr_units            = 1;
+               gi->base_offset         = __per_cpu_offset[cpu] + base_offset;
+               gi->cpu_map             = &cpu_map[unit];
+       }
+
+       rc = pcpu_setup_first_chunk(ai, base);
+       if (rc)
+               panic("failed to setup percpu area (err=%d)", rc);
+
+       pcpu_free_alloc_info(ai);
+}
+#endif
+
 /**
  * fill_pernode - initialize pernode data.
  * @node: the node id.
@@ -352,7 +450,8 @@ static void __init initialize_pernode_data(void)
        /* Set the node_data pointer for each per-cpu struct */
        for_each_possible_early_cpu(cpu) {
                node = node_cpuid[cpu].nid;
-               per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
+               per_cpu(ia64_cpu_info, cpu).node_data =
+                       mem_data[node].node_data;
        }
 #else
        {
@@ -360,7 +459,7 @@ static void __init initialize_pernode_data(void)
                cpu = 0;
                node = node_cpuid[cpu].nid;
                cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
-                       ((char *)&per_cpu__cpu_info - __per_cpu_start));
+                       ((char *)&per_cpu__ia64_cpu_info - __per_cpu_start));
                cpu0_cpu_info->node_data = mem_data[node].node_data;
        }
 #endif /* CONFIG_SMP */
@@ -666,9 +765,9 @@ void __init paging_init(void)
        sparse_init();
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
-       vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+       VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
                sizeof(struct page));
-       vmem_map = (struct page *) vmalloc_end;
+       vmem_map = (struct page *) VMALLOC_END;
        efi_memmap_walk(create_mem_map_page_table, NULL);
        printk("Virtual mem_map starts at 0x%p\n", vmem_map);
 #endif
index 1857766a63c1850d968320559471c22a851b6335..b9609c69343a373f5e742be333bcac7d24cf4381 100644 (file)
@@ -44,8 +44,8 @@ extern void ia64_tlb_init (void);
 unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
-unsigned long vmalloc_end = VMALLOC_END_INIT;
-EXPORT_SYMBOL(vmalloc_end);
+unsigned long VMALLOC_END = VMALLOC_END_INIT;
+EXPORT_SYMBOL(VMALLOC_END);
 struct page *vmem_map;
 EXPORT_SYMBOL(vmem_map);
 #endif
index 1176506b2baeb780e5cad3ba08cb533c54a801d8..e884ba4e031de4d1550cc2811a52a5a2c182db42 100644 (file)
@@ -496,13 +496,13 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
                seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
                                stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
                                stat->deadlocks,
-                               1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
-                               1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
-                               1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec,
+                               1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
+                               1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
+                               1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
                                stat->shub_ptc_flushes_not_my_mm,
                                stat->deadlocks2,
                                stat->shub_ipi_flushes,
-                               1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec);
+                               1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec);
        }
        return 0;
 }
index f042e192d2feadd4f32c0188154920771d225fae..a3fb7cf9ae1db1664217a7f7bd915b597ebeb483 100644 (file)
@@ -63,19 +63,19 @@ xen_free_irq_vector(int vector)
 }
 
 
-static DEFINE_PER_CPU(int, timer_irq) = -1;
-static DEFINE_PER_CPU(int, ipi_irq) = -1;
-static DEFINE_PER_CPU(int, resched_irq) = -1;
-static DEFINE_PER_CPU(int, cmc_irq) = -1;
-static DEFINE_PER_CPU(int, cmcp_irq) = -1;
-static DEFINE_PER_CPU(int, cpep_irq) = -1;
+static DEFINE_PER_CPU(int, xen_timer_irq) = -1;
+static DEFINE_PER_CPU(int, xen_ipi_irq) = -1;
+static DEFINE_PER_CPU(int, xen_resched_irq) = -1;
+static DEFINE_PER_CPU(int, xen_cmc_irq) = -1;
+static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1;
+static DEFINE_PER_CPU(int, xen_cpep_irq) = -1;
 #define NAME_SIZE      15
-static DEFINE_PER_CPU(char[NAME_SIZE], timer_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], resched_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name);
 #undef NAME_SIZE
 
 struct saved_irq {
@@ -144,64 +144,64 @@ __xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
        if (xen_slab_ready) {
                switch (vec) {
                case IA64_TIMER_VECTOR:
-                       snprintf(per_cpu(timer_name, cpu),
-                                sizeof(per_cpu(timer_name, cpu)),
+                       snprintf(per_cpu(xen_timer_name, cpu),
+                                sizeof(per_cpu(xen_timer_name, cpu)),
                                 "%s%d", action->name, cpu);
                        irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
                                action->handler, action->flags,
-                               per_cpu(timer_name, cpu), action->dev_id);
-                       per_cpu(timer_irq, cpu) = irq;
+                               per_cpu(xen_timer_name, cpu), action->dev_id);
+                       per_cpu(xen_timer_irq, cpu) = irq;
                        break;
                case IA64_IPI_RESCHEDULE:
-                       snprintf(per_cpu(resched_name, cpu),
-                                sizeof(per_cpu(resched_name, cpu)),
+                       snprintf(per_cpu(xen_resched_name, cpu),
+                                sizeof(per_cpu(xen_resched_name, cpu)),
                                 "%s%d", action->name, cpu);
                        irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
                                action->handler, action->flags,
-                               per_cpu(resched_name, cpu), action->dev_id);
-                       per_cpu(resched_irq, cpu) = irq;
+                               per_cpu(xen_resched_name, cpu), action->dev_id);
+                       per_cpu(xen_resched_irq, cpu) = irq;
                        break;
                case IA64_IPI_VECTOR:
-                       snprintf(per_cpu(ipi_name, cpu),
-                                sizeof(per_cpu(ipi_name, cpu)),
+                       snprintf(per_cpu(xen_ipi_name, cpu),
+                                sizeof(per_cpu(xen_ipi_name, cpu)),
                                 "%s%d", action->name, cpu);
                        irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
                                action->handler, action->flags,
-                               per_cpu(ipi_name, cpu), action->dev_id);
-                       per_cpu(ipi_irq, cpu) = irq;
+                               per_cpu(xen_ipi_name, cpu), action->dev_id);
+                       per_cpu(xen_ipi_irq, cpu) = irq;
                        break;
                case IA64_CMC_VECTOR:
-                       snprintf(per_cpu(cmc_name, cpu),
-                                sizeof(per_cpu(cmc_name, cpu)),
+                       snprintf(per_cpu(xen_cmc_name, cpu),
+                                sizeof(per_cpu(xen_cmc_name, cpu)),
                                 "%s%d", action->name, cpu);
                        irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
-                                                     action->handler,
-                                                     action->flags,
-                                                     per_cpu(cmc_name, cpu),
-                                                     action->dev_id);
-                       per_cpu(cmc_irq, cpu) = irq;
+                                               action->handler,
+                                               action->flags,
+                                               per_cpu(xen_cmc_name, cpu),
+                                               action->dev_id);
+                       per_cpu(xen_cmc_irq, cpu) = irq;
                        break;
                case IA64_CMCP_VECTOR:
-                       snprintf(per_cpu(cmcp_name, cpu),
-                                sizeof(per_cpu(cmcp_name, cpu)),
+                       snprintf(per_cpu(xen_cmcp_name, cpu),
+                                sizeof(per_cpu(xen_cmcp_name, cpu)),
                                 "%s%d", action->name, cpu);
                        irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
-                                                    action->handler,
-                                                    action->flags,
-                                                    per_cpu(cmcp_name, cpu),
-                                                    action->dev_id);
-                       per_cpu(cmcp_irq, cpu) = irq;
+                                               action->handler,
+                                               action->flags,
+                                               per_cpu(xen_cmcp_name, cpu),
+                                               action->dev_id);
+                       per_cpu(xen_cmcp_irq, cpu) = irq;
                        break;
                case IA64_CPEP_VECTOR:
-                       snprintf(per_cpu(cpep_name, cpu),
-                                sizeof(per_cpu(cpep_name, cpu)),
+                       snprintf(per_cpu(xen_cpep_name, cpu),
+                                sizeof(per_cpu(xen_cpep_name, cpu)),
                                 "%s%d", action->name, cpu);
                        irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
-                                                    action->handler,
-                                                    action->flags,
-                                                    per_cpu(cpep_name, cpu),
-                                                    action->dev_id);
-                       per_cpu(cpep_irq, cpu) = irq;
+                                               action->handler,
+                                               action->flags,
+                                               per_cpu(xen_cpep_name, cpu),
+                                               action->dev_id);
+                       per_cpu(xen_cpep_irq, cpu) = irq;
                        break;
                case IA64_CPE_VECTOR:
                case IA64_MCA_RENDEZ_VECTOR:
@@ -275,30 +275,33 @@ unbind_evtchn_callback(struct notifier_block *nfb,
 
        if (action == CPU_DEAD) {
                /* Unregister evtchn.  */
-               if (per_cpu(cpep_irq, cpu) >= 0) {
-                       unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL);
-                       per_cpu(cpep_irq, cpu) = -1;
+               if (per_cpu(xen_cpep_irq, cpu) >= 0) {
+                       unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu),
+                                              NULL);
+                       per_cpu(xen_cpep_irq, cpu) = -1;
                }
-               if (per_cpu(cmcp_irq, cpu) >= 0) {
-                       unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL);
-                       per_cpu(cmcp_irq, cpu) = -1;
+               if (per_cpu(xen_cmcp_irq, cpu) >= 0) {
+                       unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu),
+                                              NULL);
+                       per_cpu(xen_cmcp_irq, cpu) = -1;
                }
-               if (per_cpu(cmc_irq, cpu) >= 0) {
-                       unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL);
-                       per_cpu(cmc_irq, cpu) = -1;
+               if (per_cpu(xen_cmc_irq, cpu) >= 0) {
+                       unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL);
+                       per_cpu(xen_cmc_irq, cpu) = -1;
                }
-               if (per_cpu(ipi_irq, cpu) >= 0) {
-                       unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL);
-                       per_cpu(ipi_irq, cpu) = -1;
+               if (per_cpu(xen_ipi_irq, cpu) >= 0) {
+                       unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL);
+                       per_cpu(xen_ipi_irq, cpu) = -1;
                }
-               if (per_cpu(resched_irq, cpu) >= 0) {
-                       unbind_from_irqhandler(per_cpu(resched_irq, cpu),
-                                               NULL);
-                       per_cpu(resched_irq, cpu) = -1;
+               if (per_cpu(xen_resched_irq, cpu) >= 0) {
+                       unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu),
+                                              NULL);
+                       per_cpu(xen_resched_irq, cpu) = -1;
                }
-               if (per_cpu(timer_irq, cpu) >= 0) {
-                       unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
-                       per_cpu(timer_irq, cpu) = -1;
+               if (per_cpu(xen_timer_irq, cpu) >= 0) {
+                       unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu),
+                                              NULL);
+                       per_cpu(xen_timer_irq, cpu) = -1;
                }
        }
        return NOTIFY_OK;
index dbeadb9c8e20550fd0e4a6ad4d2c84e7420430b9..c1c544513e8d0926a8a4bd879f0056240f067bb0 100644 (file)
 
 #include "../kernel/fsyscall_gtod_data.h"
 
-DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
-DEFINE_PER_CPU(unsigned long, processed_stolen_time);
-DEFINE_PER_CPU(unsigned long, processed_blocked_time);
+static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
+static DEFINE_PER_CPU(unsigned long, xen_stolen_time);
+static DEFINE_PER_CPU(unsigned long, xen_blocked_time);
 
 /* taken from i386/kernel/time-xen.c */
 static void xen_init_missing_ticks_accounting(int cpu)
 {
        struct vcpu_register_runstate_memory_area area;
-       struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
+       struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu);
        int rc;
 
        memset(runstate, 0, sizeof(*runstate));
@@ -52,8 +52,8 @@ static void xen_init_missing_ticks_accounting(int cpu)
                                &area);
        WARN_ON(rc && rc != -ENOSYS);
 
-       per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
-       per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
+       per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
+       per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
                                            + runstate->time[RUNSTATE_offline];
 }
 
@@ -68,7 +68,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
 
        BUG_ON(preemptible());
 
-       state = &__get_cpu_var(runstate);
+       state = &__get_cpu_var(xen_runstate);
 
        /*
         * The runstate info is always updated by the hypervisor on
@@ -103,12 +103,12 @@ consider_steal_time(unsigned long new_itm)
         * This function just checks and reject this effect.
         */
        if (!time_after_eq(runstate.time[RUNSTATE_blocked],
-                          per_cpu(processed_blocked_time, cpu)))
+                          per_cpu(xen_blocked_time, cpu)))
                blocked = 0;
 
        if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
                           runstate.time[RUNSTATE_offline],
-                          per_cpu(processed_stolen_time, cpu)))
+                          per_cpu(xen_stolen_time, cpu)))
                stolen = 0;
 
        if (!time_after(delta_itm + new_itm, ia64_get_itc()))
@@ -147,8 +147,8 @@ consider_steal_time(unsigned long new_itm)
                } else {
                        local_cpu_data->itm_next = delta_itm + new_itm;
                }
-               per_cpu(processed_stolen_time, cpu) += NS_PER_TICK * stolen;
-               per_cpu(processed_blocked_time, cpu) += NS_PER_TICK * blocked;
+               per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
+               per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
        }
        return delta_itm;
 }
index fe60e1abaee8900523cb827ea8d9ca1a410dcde8..aca0e28581c7733febbd2029a5c0a687f41b7485 100644 (file)
@@ -83,9 +83,9 @@
 #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
 #define VMALLOC_END KMAP_START
 #else
-extern unsigned long vmalloc_end;
+extern unsigned long m68k_vmalloc_end;
 #define VMALLOC_START 0x0f800000
-#define VMALLOC_END vmalloc_end
+#define VMALLOC_END m68k_vmalloc_end
 #endif /* CONFIG_SUN3 */
 
 /* zero page used for uninitialized stuff */
index 3cd19390aae51fcfa73ebe35b8c8075249cf780e..94f81ecfe3f83f91217dc1d721402d658e204dc5 100644 (file)
@@ -45,8 +45,8 @@
 ** Globals
 */
 
-unsigned long vmalloc_end;
-EXPORT_SYMBOL(vmalloc_end);
+unsigned long m68k_vmalloc_end;
+EXPORT_SYMBOL(m68k_vmalloc_end);
 
 unsigned long pmeg_vaddr[PMEGS_NUM];
 unsigned char pmeg_alloc[PMEGS_NUM];
@@ -172,8 +172,8 @@ void mmu_emu_init(unsigned long bootmem_end)
 #endif
                        // the lowest mapping here is the end of our
                        // vmalloc region
-                       if(!vmalloc_end)
-                               vmalloc_end = seg;
+                       if (!m68k_vmalloc_end)
+                               m68k_vmalloc_end = seg;
 
                        // mark the segmap alloc'd, and reserve any
                        // of the first 0xbff pages the hardware is
index dacafab00eb25c4645ffd787b0fc77730cf25859..67e6389d625a43a7cf62d4d1f5358233b5fef3d7 100644 (file)
@@ -31,13 +31,13 @@ const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
 #define KPROBE_HIT_ACTIVE      0x00000001
 #define KPROBE_HIT_SS          0x00000002
 
-static struct kprobe *current_kprobe;
-static unsigned long current_kprobe_orig_pc;
-static unsigned long current_kprobe_next_pc;
-static int current_kprobe_ss_flags;
+static struct kprobe *cur_kprobe;
+static unsigned long cur_kprobe_orig_pc;
+static unsigned long cur_kprobe_next_pc;
+static int cur_kprobe_ss_flags;
 static unsigned long kprobe_status;
-static kprobe_opcode_t current_kprobe_ss_buf[MAX_INSN_SIZE + 2];
-static unsigned long current_kprobe_bp_addr;
+static kprobe_opcode_t cur_kprobe_ss_buf[MAX_INSN_SIZE + 2];
+static unsigned long cur_kprobe_bp_addr;
 
 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
 
@@ -399,26 +399,25 @@ void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
 {
        unsigned long nextpc;
 
-       current_kprobe_orig_pc = regs->pc;
-       memcpy(current_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE);
-       regs->pc = (unsigned long) current_kprobe_ss_buf;
+       cur_kprobe_orig_pc = regs->pc;
+       memcpy(cur_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE);
+       regs->pc = (unsigned long) cur_kprobe_ss_buf;
 
-       nextpc = find_nextpc(regs, &current_kprobe_ss_flags);
-       if (current_kprobe_ss_flags & SINGLESTEP_PCREL)
-               current_kprobe_next_pc =
-                       current_kprobe_orig_pc + (nextpc - regs->pc);
+       nextpc = find_nextpc(regs, &cur_kprobe_ss_flags);
+       if (cur_kprobe_ss_flags & SINGLESTEP_PCREL)
+               cur_kprobe_next_pc = cur_kprobe_orig_pc + (nextpc - regs->pc);
        else
-               current_kprobe_next_pc = nextpc;
+               cur_kprobe_next_pc = nextpc;
 
        /* branching instructions need special handling */
-       if (current_kprobe_ss_flags & SINGLESTEP_BRANCH)
+       if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH)
                nextpc = singlestep_branch_setup(regs);
 
-       current_kprobe_bp_addr = nextpc;
+       cur_kprobe_bp_addr = nextpc;
 
        *(u8 *) nextpc = BREAKPOINT_INSTRUCTION;
-       mn10300_dcache_flush_range2((unsigned) current_kprobe_ss_buf,
-                                   sizeof(current_kprobe_ss_buf));
+       mn10300_dcache_flush_range2((unsigned) cur_kprobe_ss_buf,
+                                   sizeof(cur_kprobe_ss_buf));
        mn10300_icache_inv();
 }
 
@@ -440,7 +439,7 @@ static inline int __kprobes kprobe_handler(struct pt_regs *regs)
                        disarm_kprobe(p, regs);
                        ret = 1;
                } else {
-                       p = current_kprobe;
+                       p = cur_kprobe;
                        if (p->break_handler && p->break_handler(p, regs))
                                goto ss_probe;
                }
@@ -464,7 +463,7 @@ static inline int __kprobes kprobe_handler(struct pt_regs *regs)
        }
 
        kprobe_status = KPROBE_HIT_ACTIVE;
-       current_kprobe = p;
+       cur_kprobe = p;
        if (p->pre_handler(p, regs)) {
                /* handler has already set things up, so skip ss setup */
                return 1;
@@ -491,8 +490,8 @@ no_kprobe:
 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
 {
        /* we may need to fixup regs/stack after singlestepping a call insn */
-       if (current_kprobe_ss_flags & SINGLESTEP_BRANCH) {
-               regs->pc = current_kprobe_orig_pc;
+       if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH) {
+               regs->pc = cur_kprobe_orig_pc;
                switch (p->ainsn.insn[0]) {
                case 0xcd:      /* CALL (d16,PC) */
                        *(unsigned *) regs->sp = regs->mdr = regs->pc + 5;
@@ -523,8 +522,8 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
                }
        }
 
-       regs->pc = current_kprobe_next_pc;
-       current_kprobe_bp_addr = 0;
+       regs->pc = cur_kprobe_next_pc;
+       cur_kprobe_bp_addr = 0;
 }
 
 static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
@@ -532,10 +531,10 @@ static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
        if (!kprobe_running())
                return 0;
 
-       if (current_kprobe->post_handler)
-               current_kprobe->post_handler(current_kprobe, regs, 0);
+       if (cur_kprobe->post_handler)
+               cur_kprobe->post_handler(cur_kprobe, regs, 0);
 
-       resume_execution(current_kprobe, regs);
+       resume_execution(cur_kprobe, regs);
        reset_current_kprobe();
        preempt_enable_no_resched();
        return 1;
@@ -545,12 +544,12 @@ static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
 static inline
 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
 {
-       if (current_kprobe->fault_handler &&
-           current_kprobe->fault_handler(current_kprobe, regs, trapnr))
+       if (cur_kprobe->fault_handler &&
+           cur_kprobe->fault_handler(cur_kprobe, regs, trapnr))
                return 1;
 
        if (kprobe_status & KPROBE_HIT_SS) {
-               resume_execution(current_kprobe, regs);
+               resume_execution(cur_kprobe, regs);
                reset_current_kprobe();
                preempt_enable_no_resched();
        }
@@ -567,7 +566,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
 
        switch (val) {
        case DIE_BREAKPOINT:
-               if (current_kprobe_bp_addr != args->regs->pc) {
+               if (cur_kprobe_bp_addr != args->regs->pc) {
                        if (kprobe_handler(args->regs))
                                return NOTIFY_STOP;
                } else {
index d9ea8d39c3428fa88ef2c88fbab5e40d12e83fe5..1d3b270d30835c1f8d94b054b35584ad1d5c9c89 100644 (file)
@@ -37,7 +37,7 @@ extern void cpu_die(void);
 extern void smp_send_debugger_break(int cpu);
 extern void smp_message_recv(int);
 
-DECLARE_PER_CPU(unsigned int, pvr);
+DECLARE_PER_CPU(unsigned int, cpu_pvr);
 
 #ifdef CONFIG_HOTPLUG_CPU
 extern void fixup_irqs(cpumask_t map);
index 936f04dbfc6f00bd1803a643753520b34e1ae8b1..a3c11cac3d7154d1381d77a83ec513e5c6601c11 100644 (file)
@@ -487,11 +487,11 @@ static void perf_callchain_user_32(struct pt_regs *regs,
  * Since we can't get PMU interrupts inside a PMU interrupt handler,
  * we don't need separate irq and nmi entries here.
  */
-static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
+static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain);
 
 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
 {
-       struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
+       struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain);
 
        entry->nr = 0;
 
index 845c72ab7357884c580a1c6f3f3217ed4987ee75..03dd6a248198c3247af741db5f154452b2c84f2c 100644 (file)
@@ -157,7 +157,7 @@ extern u32 cpu_temp_both(unsigned long cpu);
 #endif /* CONFIG_TAU */
 
 #ifdef CONFIG_SMP
-DEFINE_PER_CPU(unsigned int, pvr);
+DEFINE_PER_CPU(unsigned int, cpu_pvr);
 #endif
 
 static int show_cpuinfo(struct seq_file *m, void *v)
@@ -209,7 +209,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        }
 
 #ifdef CONFIG_SMP
-       pvr = per_cpu(pvr, cpu_id);
+       pvr = per_cpu(cpu_pvr, cpu_id);
 #else
        pvr = mfspr(SPRN_PVR);
 #endif
index 97196eefef3edccba66bfd7398b9d75962e1d5c4..a521fb8a40ee2fcb206397cf490aab9550cf9c96 100644 (file)
@@ -235,7 +235,7 @@ struct thread_info *current_set[NR_CPUS];
 
 static void __devinit smp_store_cpu_info(int id)
 {
-       per_cpu(pvr, id) = mfspr(SPRN_PVR);
+       per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
 }
 
 static void __init smp_create_idle(unsigned int cpu)
index f9dbf76a763f74fea41d2af522a23123f6880250..7267effc8078b53265e43898a7551f6adf2523d8 100644 (file)
@@ -54,7 +54,7 @@ struct iic {
        struct device_node *node;
 };
 
-static DEFINE_PER_CPU(struct iic, iic);
+static DEFINE_PER_CPU(struct iic, cpu_iic);
 #define IIC_NODE_COUNT 2
 static struct irq_host *iic_host;
 
@@ -82,7 +82,7 @@ static void iic_unmask(unsigned int irq)
 
 static void iic_eoi(unsigned int irq)
 {
-       struct iic *iic = &__get_cpu_var(iic);
+       struct iic *iic = &__get_cpu_var(cpu_iic);
        out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
        BUG_ON(iic->eoi_ptr < 0);
 }
@@ -146,7 +146,7 @@ static unsigned int iic_get_irq(void)
        struct iic *iic;
        unsigned int virq;
 
-       iic = &__get_cpu_var(iic);
+       iic = &__get_cpu_var(cpu_iic);
        *(unsigned long *) &pending =
                in_be64((u64 __iomem *) &iic->regs->pending_destr);
        if (!(pending.flags & CBE_IIC_IRQ_VALID))
@@ -161,12 +161,12 @@ static unsigned int iic_get_irq(void)
 
 void iic_setup_cpu(void)
 {
-       out_be64(&__get_cpu_var(iic).regs->prio, 0xff);
+       out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff);
 }
 
 u8 iic_get_target_id(int cpu)
 {
-       return per_cpu(iic, cpu).target_id;
+       return per_cpu(cpu_iic, cpu).target_id;
 }
 
 EXPORT_SYMBOL_GPL(iic_get_target_id);
@@ -181,7 +181,7 @@ static inline int iic_ipi_to_irq(int ipi)
 
 void iic_cause_IPI(int cpu, int mesg)
 {
-       out_be64(&per_cpu(iic, cpu).regs->generate, (0xf - mesg) << 4);
+       out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - mesg) << 4);
 }
 
 struct irq_host *iic_get_irq_host(int node)
@@ -348,7 +348,7 @@ static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
        /* XXX FIXME: should locate the linux CPU number from the HW cpu
         * number properly. We are lucky for now
         */
-       struct iic *iic = &per_cpu(iic, hw_cpu);
+       struct iic *iic = &per_cpu(cpu_iic, hw_cpu);
 
        iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
        BUG_ON(iic->regs == NULL);
index 937a544a236d5e1a58be64b821d23bb20e965c63..c5f3116b6ca524e8b08aff7193ca796ccd089e00 100644 (file)
@@ -54,7 +54,7 @@ struct dtl {
        int                     buf_entries;
        u64                     last_idx;
 };
-static DEFINE_PER_CPU(struct dtl, dtl);
+static DEFINE_PER_CPU(struct dtl, cpu_dtl);
 
 /*
  * Dispatch trace log event mask:
@@ -261,7 +261,7 @@ static int dtl_init(void)
 
        /* set up the per-cpu log structures */
        for_each_possible_cpu(i) {
-               struct dtl *dtl = &per_cpu(dtl, i);
+               struct dtl *dtl = &per_cpu(cpu_dtl, i);
                dtl->cpu = i;
 
                rc = dtl_setup_file(dtl);
index b129611590a449f61de21fdcaa803eefbf7eb6fe..f30f4a1ead23d373afb51161f3303dfeae9fbe17 100644 (file)
@@ -47,7 +47,7 @@ static DEFINE_PER_CPU(short, wd_enabled);
 static int endflag __initdata;
 
 static DEFINE_PER_CPU(unsigned int, last_irq_sum);
-static DEFINE_PER_CPU(local_t, alert_counter);
+static DEFINE_PER_CPU(long, alert_counter);
 static DEFINE_PER_CPU(int, nmi_touch);
 
 void touch_nmi_watchdog(void)
@@ -112,13 +112,13 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
                touched = 1;
        }
        if (!touched && __get_cpu_var(last_irq_sum) == sum) {
-               local_inc(&__get_cpu_var(alert_counter));
-               if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz)
+               __this_cpu_inc(per_cpu_var(alert_counter));
+               if (__this_cpu_read(per_cpu_var(alert_counter)) == 30 * nmi_hz)
                        die_nmi("BUG: NMI Watchdog detected LOCKUP",
                                regs, panic_on_timeout);
        } else {
                __get_cpu_var(last_irq_sum) = sum;
-               local_set(&__get_cpu_var(alert_counter), 0);
+               __this_cpu_write(per_cpu_var(alert_counter), 0);
        }
        if (__get_cpu_var(wd_enabled)) {
                write_pic(picl_value(nmi_hz));
index b65a36defeb737434f8823c21c625f14db8e6347..0c44196b78ac82ec706220182acfc485b6d8fb87 100644 (file)
@@ -74,31 +74,31 @@ extern void __bad_percpu_size(void);
 
 #define percpu_to_op(op, var, val)                     \
 do {                                                   \
-       typedef typeof(var) T__;                        \
+       typedef typeof(var) pto_T__;                    \
        if (0) {                                        \
-               T__ tmp__;                              \
-               tmp__ = (val);                          \
+               pto_T__ pto_tmp__;                      \
+               pto_tmp__ = (val);                      \
        }                                               \
        switch (sizeof(var)) {                          \
        case 1:                                         \
                asm(op "b %1,"__percpu_arg(0)           \
                    : "+m" (var)                        \
-                   : "qi" ((T__)(val)));               \
+                   : "qi" ((pto_T__)(val)));           \
                break;                                  \
        case 2:                                         \
                asm(op "w %1,"__percpu_arg(0)           \
                    : "+m" (var)                        \
-                   : "ri" ((T__)(val)));               \
+                   : "ri" ((pto_T__)(val)));           \
                break;                                  \
        case 4:                                         \
                asm(op "l %1,"__percpu_arg(0)           \
                    : "+m" (var)                        \
-                   : "ri" ((T__)(val)));               \
+                   : "ri" ((pto_T__)(val)));           \
                break;                                  \
        case 8:                                         \
                asm(op "q %1,"__percpu_arg(0)           \
                    : "+m" (var)                        \
-                   : "re" ((T__)(val)));               \
+                   : "re" ((pto_T__)(val)));           \
                break;                                  \
        default: __bad_percpu_size();                   \
        }                                               \
@@ -106,31 +106,31 @@ do {                                                      \
 
 #define percpu_from_op(op, var, constraint)            \
 ({                                                     \
-       typeof(var) ret__;                              \
+       typeof(var) pfo_ret__;                          \
        switch (sizeof(var)) {                          \
        case 1:                                         \
                asm(op "b "__percpu_arg(1)",%0"         \
-                   : "=q" (ret__)                      \
+                   : "=q" (pfo_ret__)                  \
                    : constraint);                      \
                break;                                  \
        case 2:                                         \
                asm(op "w "__percpu_arg(1)",%0"         \
-                   : "=r" (ret__)                      \
+                   : "=r" (pfo_ret__)                  \
                    : constraint);                      \
                break;                                  \
        case 4:                                         \
                asm(op "l "__percpu_arg(1)",%0"         \
-                   : "=r" (ret__)                      \
+                   : "=r" (pfo_ret__)                  \
                    : constraint);                      \
                break;                                  \
        case 8:                                         \
                asm(op "q "__percpu_arg(1)",%0"         \
-                   : "=r" (ret__)                      \
+                   : "=r" (pfo_ret__)                  \
                    : constraint);                      \
                break;                                  \
        default: __bad_percpu_size();                   \
        }                                               \
-       ret__;                                          \
+       pfo_ret__;                                      \
 })
 
 /*
@@ -153,6 +153,84 @@ do {                                                       \
 #define percpu_or(var, val)    percpu_to_op("or", per_cpu__##var, val)
 #define percpu_xor(var, val)   percpu_to_op("xor", per_cpu__##var, val)
 
+#define __this_cpu_read_1(pcp)         percpu_from_op("mov", (pcp), "m"(pcp))
+#define __this_cpu_read_2(pcp)         percpu_from_op("mov", (pcp), "m"(pcp))
+#define __this_cpu_read_4(pcp)         percpu_from_op("mov", (pcp), "m"(pcp))
+
+#define __this_cpu_write_1(pcp, val)   percpu_to_op("mov", (pcp), val)
+#define __this_cpu_write_2(pcp, val)   percpu_to_op("mov", (pcp), val)
+#define __this_cpu_write_4(pcp, val)   percpu_to_op("mov", (pcp), val)
+#define __this_cpu_add_1(pcp, val)     percpu_to_op("add", (pcp), val)
+#define __this_cpu_add_2(pcp, val)     percpu_to_op("add", (pcp), val)
+#define __this_cpu_add_4(pcp, val)     percpu_to_op("add", (pcp), val)
+#define __this_cpu_and_1(pcp, val)     percpu_to_op("and", (pcp), val)
+#define __this_cpu_and_2(pcp, val)     percpu_to_op("and", (pcp), val)
+#define __this_cpu_and_4(pcp, val)     percpu_to_op("and", (pcp), val)
+#define __this_cpu_or_1(pcp, val)      percpu_to_op("or", (pcp), val)
+#define __this_cpu_or_2(pcp, val)      percpu_to_op("or", (pcp), val)
+#define __this_cpu_or_4(pcp, val)      percpu_to_op("or", (pcp), val)
+#define __this_cpu_xor_1(pcp, val)     percpu_to_op("xor", (pcp), val)
+#define __this_cpu_xor_2(pcp, val)     percpu_to_op("xor", (pcp), val)
+#define __this_cpu_xor_4(pcp, val)     percpu_to_op("xor", (pcp), val)
+
+#define this_cpu_read_1(pcp)           percpu_from_op("mov", (pcp), "m"(pcp))
+#define this_cpu_read_2(pcp)           percpu_from_op("mov", (pcp), "m"(pcp))
+#define this_cpu_read_4(pcp)           percpu_from_op("mov", (pcp), "m"(pcp))
+#define this_cpu_write_1(pcp, val)     percpu_to_op("mov", (pcp), val)
+#define this_cpu_write_2(pcp, val)     percpu_to_op("mov", (pcp), val)
+#define this_cpu_write_4(pcp, val)     percpu_to_op("mov", (pcp), val)
+#define this_cpu_add_1(pcp, val)       percpu_to_op("add", (pcp), val)
+#define this_cpu_add_2(pcp, val)       percpu_to_op("add", (pcp), val)
+#define this_cpu_add_4(pcp, val)       percpu_to_op("add", (pcp), val)
+#define this_cpu_and_1(pcp, val)       percpu_to_op("and", (pcp), val)
+#define this_cpu_and_2(pcp, val)       percpu_to_op("and", (pcp), val)
+#define this_cpu_and_4(pcp, val)       percpu_to_op("and", (pcp), val)
+#define this_cpu_or_1(pcp, val)                percpu_to_op("or", (pcp), val)
+#define this_cpu_or_2(pcp, val)                percpu_to_op("or", (pcp), val)
+#define this_cpu_or_4(pcp, val)                percpu_to_op("or", (pcp), val)
+#define this_cpu_xor_1(pcp, val)       percpu_to_op("xor", (pcp), val)
+#define this_cpu_xor_2(pcp, val)       percpu_to_op("xor", (pcp), val)
+#define this_cpu_xor_4(pcp, val)       percpu_to_op("xor", (pcp), val)
+
+#define irqsafe_cpu_add_1(pcp, val)    percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_add_2(pcp, val)    percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_add_4(pcp, val)    percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_and_1(pcp, val)    percpu_to_op("and", (pcp), val)
+#define irqsafe_cpu_and_2(pcp, val)    percpu_to_op("and", (pcp), val)
+#define irqsafe_cpu_and_4(pcp, val)    percpu_to_op("and", (pcp), val)
+#define irqsafe_cpu_or_1(pcp, val)     percpu_to_op("or", (pcp), val)
+#define irqsafe_cpu_or_2(pcp, val)     percpu_to_op("or", (pcp), val)
+#define irqsafe_cpu_or_4(pcp, val)     percpu_to_op("or", (pcp), val)
+#define irqsafe_cpu_xor_1(pcp, val)    percpu_to_op("xor", (pcp), val)
+#define irqsafe_cpu_xor_2(pcp, val)    percpu_to_op("xor", (pcp), val)
+#define irqsafe_cpu_xor_4(pcp, val)    percpu_to_op("xor", (pcp), val)
+
+/*
+ * Per cpu atomic 64 bit operations are only available under 64 bit.
+ * 32 bit must fall back to generic operations.
+ */
+#ifdef CONFIG_X86_64
+#define __this_cpu_read_8(pcp)         percpu_from_op("mov", (pcp), "m"(pcp))
+#define __this_cpu_write_8(pcp, val)   percpu_to_op("mov", (pcp), val)
+#define __this_cpu_add_8(pcp, val)     percpu_to_op("add", (pcp), val)
+#define __this_cpu_and_8(pcp, val)     percpu_to_op("and", (pcp), val)
+#define __this_cpu_or_8(pcp, val)      percpu_to_op("or", (pcp), val)
+#define __this_cpu_xor_8(pcp, val)     percpu_to_op("xor", (pcp), val)
+
+#define this_cpu_read_8(pcp)           percpu_from_op("mov", (pcp), "m"(pcp))
+#define this_cpu_write_8(pcp, val)     percpu_to_op("mov", (pcp), val)
+#define this_cpu_add_8(pcp, val)       percpu_to_op("add", (pcp), val)
+#define this_cpu_and_8(pcp, val)       percpu_to_op("and", (pcp), val)
+#define this_cpu_or_8(pcp, val)                percpu_to_op("or", (pcp), val)
+#define this_cpu_xor_8(pcp, val)       percpu_to_op("xor", (pcp), val)
+
+#define irqsafe_cpu_add_8(pcp, val)    percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_and_8(pcp, val)    percpu_to_op("and", (pcp), val)
+#define irqsafe_cpu_or_8(pcp, val)     percpu_to_op("or", (pcp), val)
+#define irqsafe_cpu_xor_8(pcp, val)    percpu_to_op("xor", (pcp), val)
+
+#endif
+
 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
 #define x86_test_and_clear_bit_percpu(bit, var)                                \
 ({                                                                     \
index 6389432a9dbf7f07a0dd08b4e67857c6ec899d6d..0159a69396cba449a424190459a02d83a3f417d8 100644 (file)
@@ -361,7 +361,7 @@ void stop_apic_nmi_watchdog(void *unused)
  */
 
 static DEFINE_PER_CPU(unsigned, last_irq_sum);
-static DEFINE_PER_CPU(local_t, alert_counter);
+static DEFINE_PER_CPU(long, alert_counter);
 static DEFINE_PER_CPU(int, nmi_touch);
 
 void touch_nmi_watchdog(void)
@@ -438,8 +438,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
                 * Ayiee, looks like this CPU is stuck ...
                 * wait a few IRQs (5 seconds) before doing the oops ...
                 */
-               local_inc(&__get_cpu_var(alert_counter));
-               if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
+               __this_cpu_inc(per_cpu_var(alert_counter));
+               if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz)
                        /*
                         * die_nmi will return ONLY if NOTIFY_STOP happens..
                         */
@@ -447,7 +447,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
                                regs, panic_on_timeout);
        } else {
                __get_cpu_var(last_irq_sum) = sum;
-               local_set(&__get_cpu_var(alert_counter), 0);
+               __this_cpu_write(per_cpu_var(alert_counter), 0);
        }
 
        /* see if the nmi watchdog went off */
index c1afa990a6c84bf49229a9c50052b063b9731236..20399b7b0c3f1a4e3a3c9d5e66a92d952d93f028 100644 (file)
@@ -1093,7 +1093,7 @@ static void clear_all_debug_regs(void)
 
 void __cpuinit cpu_init(void)
 {
-       struct orig_ist *orig_ist;
+       struct orig_ist *oist;
        struct task_struct *me;
        struct tss_struct *t;
        unsigned long v;
@@ -1102,7 +1102,7 @@ void __cpuinit cpu_init(void)
 
        cpu = stack_smp_processor_id();
        t = &per_cpu(init_tss, cpu);
-       orig_ist = &per_cpu(orig_ist, cpu);
+       oist = &per_cpu(orig_ist, cpu);
 
 #ifdef CONFIG_NUMA
        if (cpu != 0 && percpu_read(node_number) == 0 &&
@@ -1143,12 +1143,12 @@ void __cpuinit cpu_init(void)
        /*
         * set up and load the per-CPU TSS
         */
-       if (!orig_ist->ist[0]) {
+       if (!oist->ist[0]) {
                char *estacks = per_cpu(exception_stacks, cpu);
 
                for (v = 0; v < N_EXCEPTION_STACKS; v++) {
                        estacks += exception_stack_sizes[v];
-                       orig_ist->ist[v] = t->x86_tss.ist[v] =
+                       oist->ist[v] = t->x86_tss.ist[v] =
                                        (unsigned long)estacks;
                }
        }
index dca325c0399960c4bdacc7659d3f04dbfbbdd541..b368cd862997d7cbcfb0b425fb9f1d45e136e9d0 100644 (file)
@@ -30,9 +30,9 @@
 #include <asm/apic.h>
 #include <asm/desc.h>
 
-static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr);
-static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr);
-static DEFINE_PER_CPU(int, cpu_priv_count);
+static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpud_arr);
+static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], cpud_priv_arr);
+static DEFINE_PER_CPU(int, cpud_priv_count);
 
 static DEFINE_MUTEX(cpu_debug_lock);
 
@@ -531,7 +531,7 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
 
        /* Already intialized */
        if (file == CPU_INDEX_BIT)
-               if (per_cpu(cpu_arr[type].init, cpu))
+               if (per_cpu(cpud_arr[type].init, cpu))
                        return 0;
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -543,8 +543,8 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
        priv->reg = reg;
        priv->file = file;
        mutex_lock(&cpu_debug_lock);
-       per_cpu(priv_arr[type], cpu) = priv;
-       per_cpu(cpu_priv_count, cpu)++;
+       per_cpu(cpud_priv_arr[type], cpu) = priv;
+       per_cpu(cpud_priv_count, cpu)++;
        mutex_unlock(&cpu_debug_lock);
 
        if (file)
@@ -552,10 +552,10 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
                                    dentry, (void *)priv, &cpu_fops);
        else {
                debugfs_create_file(cpu_base[type].name, S_IRUGO,
-                                   per_cpu(cpu_arr[type].dentry, cpu),
+                                   per_cpu(cpud_arr[type].dentry, cpu),
                                    (void *)priv, &cpu_fops);
                mutex_lock(&cpu_debug_lock);
-               per_cpu(cpu_arr[type].init, cpu) = 1;
+               per_cpu(cpud_arr[type].init, cpu) = 1;
                mutex_unlock(&cpu_debug_lock);
        }
 
@@ -615,7 +615,7 @@ static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
                if (!is_typeflag_valid(cpu, cpu_base[type].flag))
                        continue;
                cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
-               per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
+               per_cpu(cpud_arr[type].dentry, cpu) = cpu_dentry;
 
                if (type < CPU_TSS_BIT)
                        err = cpu_init_msr(cpu, type, cpu_dentry);
@@ -647,11 +647,11 @@ static int cpu_init_cpu(void)
                err = cpu_init_allreg(cpu, cpu_dentry);
 
                pr_info("cpu%d(%d) debug files %d\n",
-                       cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
-               if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
+                       cpu, nr_cpu_ids, per_cpu(cpud_priv_count, cpu));
+               if (per_cpu(cpud_priv_count, cpu) > MAX_CPU_FILES) {
                        pr_err("Register files count %d exceeds limit %d\n",
-                               per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
-                       per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
+                               per_cpu(cpud_priv_count, cpu), MAX_CPU_FILES);
+                       per_cpu(cpud_priv_count, cpu) = MAX_CPU_FILES;
                        err = -ENFILE;
                }
                if (err)
@@ -676,8 +676,8 @@ static void __exit cpu_debug_exit(void)
                debugfs_remove_recursive(cpu_debugfs_dir);
 
        for (cpu = 0; cpu <  nr_cpu_ids; cpu++)
-               for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
-                       kfree(per_cpu(priv_arr[i], cpu));
+               for (i = 0; i < per_cpu(cpud_priv_count, cpu); i++)
+                       kfree(per_cpu(cpud_priv_arr[i], cpu));
 }
 
 module_init(cpu_debug_init);
index d2e7c77c1ea4901a8e3e6b2a70c99b5c991132f3..f28decf8dde3990626f493e5d962bfea9d48e59f 100644 (file)
@@ -68,9 +68,9 @@ struct acpi_cpufreq_data {
        unsigned int cpu_feature;
 };
 
-static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
+static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
 
-static DEFINE_PER_CPU(struct aperfmperf, old_perf);
+static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
 
 /* acpi_perf_data is a pointer to percpu data. */
 static struct acpi_processor_performance *acpi_perf_data;
@@ -214,14 +214,14 @@ static u32 get_cur_val(const struct cpumask *mask)
        if (unlikely(cpumask_empty(mask)))
                return 0;
 
-       switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) {
+       switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
        case SYSTEM_INTEL_MSR_CAPABLE:
                cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
                cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
                break;
        case SYSTEM_IO_CAPABLE:
                cmd.type = SYSTEM_IO_CAPABLE;
-               perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data;
+               perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
                cmd.addr.io.port = perf->control_register.address;
                cmd.addr.io.bit_width = perf->control_register.bit_width;
                break;
@@ -268,8 +268,8 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
        if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
                return 0;
 
-       ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf);
-       per_cpu(old_perf, cpu) = perf;
+       ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
+       per_cpu(acfreq_old_perf, cpu) = perf;
 
        retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
 
@@ -278,7 +278,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
 
 static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
 {
-       struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
+       struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
        unsigned int freq;
        unsigned int cached_freq;
 
@@ -322,7 +322,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
 static int acpi_cpufreq_target(struct cpufreq_policy *policy,
                               unsigned int target_freq, unsigned int relation)
 {
-       struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+       struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
        struct acpi_processor_performance *perf;
        struct cpufreq_freqs freqs;
        struct drv_cmd cmd;
@@ -416,7 +416,7 @@ out:
 
 static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
 {
-       struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+       struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
 
        dprintk("acpi_cpufreq_verify\n");
 
@@ -574,7 +574,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
                return -ENOMEM;
 
        data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
-       per_cpu(drv_data, cpu) = data;
+       per_cpu(acfreq_data, cpu) = data;
 
        if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
                acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
@@ -725,20 +725,20 @@ err_unreg:
        acpi_processor_unregister_performance(perf, cpu);
 err_free:
        kfree(data);
-       per_cpu(drv_data, cpu) = NULL;
+       per_cpu(acfreq_data, cpu) = NULL;
 
        return result;
 }
 
 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 {
-       struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+       struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
 
        dprintk("acpi_cpufreq_cpu_exit\n");
 
        if (data) {
                cpufreq_frequency_table_put_attr(policy->cpu);
-               per_cpu(drv_data, policy->cpu) = NULL;
+               per_cpu(acfreq_data, policy->cpu) = NULL;
                acpi_processor_unregister_performance(data->acpi_data,
                                                      policy->cpu);
                kfree(data);
@@ -749,7 +749,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 
 static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
 {
-       struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+       struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
 
        dprintk("acpi_cpufreq_resume\n");
 
index 6c40f6b5b340b031192232bdc946d62c7076d0cc..0c06bca2a1dcc1dc68003aa0bdde3935b254967d 100644 (file)
@@ -499,8 +499,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
 #ifdef CONFIG_SYSFS
 
 /* pointer to _cpuid4_info array (for each cache leaf) */
-static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
-#define CPUID4_INFO_IDX(x, y)  (&((per_cpu(cpuid4_info, x))[y]))
+static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
+#define CPUID4_INFO_IDX(x, y)  (&((per_cpu(ici_cpuid4_info, x))[y]))
 
 #ifdef CONFIG_SMP
 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
@@ -513,7 +513,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
        if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
                struct cpuinfo_x86 *d;
                for_each_online_cpu(i) {
-                       if (!per_cpu(cpuid4_info, i))
+                       if (!per_cpu(ici_cpuid4_info, i))
                                continue;
                        d = &cpu_data(i);
                        this_leaf = CPUID4_INFO_IDX(i, index);
@@ -535,7 +535,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
                            c->apicid >> index_msb) {
                                cpumask_set_cpu(i,
                                        to_cpumask(this_leaf->shared_cpu_map));
-                               if (i != cpu && per_cpu(cpuid4_info, i))  {
+                               if (i != cpu && per_cpu(ici_cpuid4_info, i))  {
                                        sibling_leaf =
                                                CPUID4_INFO_IDX(i, index);
                                        cpumask_set_cpu(cpu, to_cpumask(
@@ -574,8 +574,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
        for (i = 0; i < num_cache_leaves; i++)
                cache_remove_shared_cpu_map(cpu, i);
 
-       kfree(per_cpu(cpuid4_info, cpu));
-       per_cpu(cpuid4_info, cpu) = NULL;
+       kfree(per_cpu(ici_cpuid4_info, cpu));
+       per_cpu(ici_cpuid4_info, cpu) = NULL;
 }
 
 static int
@@ -614,15 +614,15 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
        if (num_cache_leaves == 0)
                return -ENOENT;
 
-       per_cpu(cpuid4_info, cpu) = kzalloc(
+       per_cpu(ici_cpuid4_info, cpu) = kzalloc(
            sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
-       if (per_cpu(cpuid4_info, cpu) == NULL)
+       if (per_cpu(ici_cpuid4_info, cpu) == NULL)
                return -ENOMEM;
 
        smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
        if (retval) {
-               kfree(per_cpu(cpuid4_info, cpu));
-               per_cpu(cpuid4_info, cpu) = NULL;
+               kfree(per_cpu(ici_cpuid4_info, cpu));
+               per_cpu(ici_cpuid4_info, cpu) = NULL;
        }
 
        return retval;
@@ -634,7 +634,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
 
 /* pointer to kobject for cpuX/cache */
-static DEFINE_PER_CPU(struct kobject *, cache_kobject);
+static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
 
 struct _index_kobject {
        struct kobject kobj;
@@ -643,8 +643,8 @@ struct _index_kobject {
 };
 
 /* pointer to array of kobjects for cpuX/cache/indexY */
-static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
-#define INDEX_KOBJECT_PTR(x, y)                (&((per_cpu(index_kobject, x))[y]))
+static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
+#define INDEX_KOBJECT_PTR(x, y)                (&((per_cpu(ici_index_kobject, x))[y]))
 
 #define show_one_plus(file_name, object, val)                          \
 static ssize_t show_##file_name                                                \
@@ -863,10 +863,10 @@ static struct kobj_type ktype_percpu_entry = {
 
 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
 {
-       kfree(per_cpu(cache_kobject, cpu));
-       kfree(per_cpu(index_kobject, cpu));
-       per_cpu(cache_kobject, cpu) = NULL;
-       per_cpu(index_kobject, cpu) = NULL;
+       kfree(per_cpu(ici_cache_kobject, cpu));
+       kfree(per_cpu(ici_index_kobject, cpu));
+       per_cpu(ici_cache_kobject, cpu) = NULL;
+       per_cpu(ici_index_kobject, cpu) = NULL;
        free_cache_attributes(cpu);
 }
 
@@ -882,14 +882,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
                return err;
 
        /* Allocate all required memory */
-       per_cpu(cache_kobject, cpu) =
+       per_cpu(ici_cache_kobject, cpu) =
                kzalloc(sizeof(struct kobject), GFP_KERNEL);
-       if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
+       if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
                goto err_out;
 
-       per_cpu(index_kobject, cpu) = kzalloc(
+       per_cpu(ici_index_kobject, cpu) = kzalloc(
            sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
-       if (unlikely(per_cpu(index_kobject, cpu) == NULL))
+       if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
                goto err_out;
 
        return 0;
@@ -913,7 +913,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
        if (unlikely(retval < 0))
                return retval;
 
-       retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
+       retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
                                      &ktype_percpu_entry,
                                      &sys_dev->kobj, "%s", "cache");
        if (retval < 0) {
@@ -927,12 +927,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
                this_object->index = i;
                retval = kobject_init_and_add(&(this_object->kobj),
                                              &ktype_cache,
-                                             per_cpu(cache_kobject, cpu),
+                                             per_cpu(ici_cache_kobject, cpu),
                                              "index%1lu", i);
                if (unlikely(retval)) {
                        for (j = 0; j < i; j++)
                                kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
-                       kobject_put(per_cpu(cache_kobject, cpu));
+                       kobject_put(per_cpu(ici_cache_kobject, cpu));
                        cpuid4_cache_sysfs_exit(cpu);
                        return retval;
                }
@@ -940,7 +940,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
        }
        cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
 
-       kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
+       kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
        return 0;
 }
 
@@ -949,7 +949,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
        unsigned int cpu = sys_dev->id;
        unsigned long i;
 
-       if (per_cpu(cpuid4_info, cpu) == NULL)
+       if (per_cpu(ici_cpuid4_info, cpu) == NULL)
                return;
        if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
                return;
@@ -957,7 +957,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
 
        for (i = 0; i < num_cache_leaves; i++)
                kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
-       kobject_put(per_cpu(cache_kobject, cpu));
+       kobject_put(per_cpu(ici_cache_kobject, cpu));
        cpuid4_cache_sysfs_exit(cpu);
 }
 
index ef42a038f1a640ed43231e9a3bad1c6e741eff29..1c47390dd0e51ef7ccbd4698302a2b19bcf7ee20 100644 (file)
@@ -265,13 +265,13 @@ struct ds_context {
        int                     cpu;
 };
 
-static DEFINE_PER_CPU(struct ds_context *, cpu_context);
+static DEFINE_PER_CPU(struct ds_context *, cpu_ds_context);
 
 
 static struct ds_context *ds_get_context(struct task_struct *task, int cpu)
 {
        struct ds_context **p_context =
-               (task ? &task->thread.ds_ctx : &per_cpu(cpu_context, cpu));
+               (task ? &task->thread.ds_ctx : &per_cpu(cpu_ds_context, cpu));
        struct ds_context *context = NULL;
        struct ds_context *new_context = NULL;
 
index 3de0b37ec038673c3a70b4f14be7dcd5656dfcfe..1d9b33843c80ef521dc059697285cfed06cfd7d7 100644 (file)
@@ -316,7 +316,7 @@ static void svm_hardware_disable(void *garbage)
 static int svm_hardware_enable(void *garbage)
 {
 
-       struct svm_cpu_data *svm_data;
+       struct svm_cpu_data *sd;
        uint64_t efer;
        struct descriptor_table gdt_descr;
        struct desc_struct *gdt;
@@ -331,63 +331,61 @@ static int svm_hardware_enable(void *garbage)
                       me);
                return -EINVAL;
        }
-       svm_data = per_cpu(svm_data, me);
+       sd = per_cpu(svm_data, me);
 
-       if (!svm_data) {
+       if (!sd) {
                printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
                       me);
                return -EINVAL;
        }
 
-       svm_data->asid_generation = 1;
-       svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
-       svm_data->next_asid = svm_data->max_asid + 1;
+       sd->asid_generation = 1;
+       sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
+       sd->next_asid = sd->max_asid + 1;
 
        kvm_get_gdt(&gdt_descr);
        gdt = (struct desc_struct *)gdt_descr.base;
-       svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
+       sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
 
        wrmsrl(MSR_EFER, efer | EFER_SVME);
 
-       wrmsrl(MSR_VM_HSAVE_PA,
-              page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
+       wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
 
        return 0;
 }
 
 static void svm_cpu_uninit(int cpu)
 {
-       struct svm_cpu_data *svm_data
-               = per_cpu(svm_data, raw_smp_processor_id());
+       struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
 
-       if (!svm_data)
+       if (!sd)
                return;
 
        per_cpu(svm_data, raw_smp_processor_id()) = NULL;
-       __free_page(svm_data->save_area);
-       kfree(svm_data);
+       __free_page(sd->save_area);
+       kfree(sd);
 }
 
 static int svm_cpu_init(int cpu)
 {
-       struct svm_cpu_data *svm_data;
+       struct svm_cpu_data *sd;
        int r;
 
-       svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
-       if (!svm_data)
+       sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
+       if (!sd)
                return -ENOMEM;
-       svm_data->cpu = cpu;
-       svm_data->save_area = alloc_page(GFP_KERNEL);
+       sd->cpu = cpu;
+       sd->save_area = alloc_page(GFP_KERNEL);
        r = -ENOMEM;
-       if (!svm_data->save_area)
+       if (!sd->save_area)
                goto err_1;
 
-       per_cpu(svm_data, cpu) = svm_data;
+       per_cpu(svm_data, cpu) = sd;
 
        return 0;
 
 err_1:
-       kfree(svm_data);
+       kfree(sd);
        return r;
 
 }
@@ -1092,16 +1090,16 @@ static void save_host_msrs(struct kvm_vcpu *vcpu)
 #endif
 }
 
-static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
+static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
 {
-       if (svm_data->next_asid > svm_data->max_asid) {
-               ++svm_data->asid_generation;
-               svm_data->next_asid = 1;
+       if (sd->next_asid > sd->max_asid) {
+               ++sd->asid_generation;
+               sd->next_asid = 1;
                svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
        }
 
-       svm->asid_generation = svm_data->asid_generation;
-       svm->vmcb->control.asid = svm_data->next_asid++;
+       svm->asid_generation = sd->asid_generation;
+       svm->vmcb->control.asid = sd->next_asid++;
 }
 
 static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
@@ -2429,8 +2427,8 @@ static void reload_tss(struct kvm_vcpu *vcpu)
 {
        int cpu = raw_smp_processor_id();
 
-       struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
-       svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
+       struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+       sd->tss_desc->type = 9; /* available 32/64-bit TSS */
        load_TR_desc();
 }
 
@@ -2438,12 +2436,12 @@ static void pre_svm_run(struct vcpu_svm *svm)
 {
        int cpu = raw_smp_processor_id();
 
-       struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
+       struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
 
        svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
        /* FIXME: handle wraparound of asid_generation */
-       if (svm->asid_generation != svm_data->asid_generation)
-               new_asid(svm, svm_data);
+       if (svm->asid_generation != sd->asid_generation)
+               new_asid(svm, sd);
 }
 
 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
index 64757c0ba5fc2255a4d9188b3a68636832a73a1f..563d20504988ef7671cf600cdc2762f00418a4c8 100644 (file)
 
 cpumask_var_t xen_cpu_initialized_map;
 
-static DEFINE_PER_CPU(int, resched_irq);
-static DEFINE_PER_CPU(int, callfunc_irq);
-static DEFINE_PER_CPU(int, callfuncsingle_irq);
-static DEFINE_PER_CPU(int, debug_irq) = -1;
+static DEFINE_PER_CPU(int, xen_resched_irq);
+static DEFINE_PER_CPU(int, xen_callfunc_irq);
+static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
+static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
 
 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
@@ -103,7 +103,7 @@ static int xen_smp_intr_init(unsigned int cpu)
                                    NULL);
        if (rc < 0)
                goto fail;
-       per_cpu(resched_irq, cpu) = rc;
+       per_cpu(xen_resched_irq, cpu) = rc;
 
        callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
        rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
@@ -114,7 +114,7 @@ static int xen_smp_intr_init(unsigned int cpu)
                                    NULL);
        if (rc < 0)
                goto fail;
-       per_cpu(callfunc_irq, cpu) = rc;
+       per_cpu(xen_callfunc_irq, cpu) = rc;
 
        debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
        rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
@@ -122,7 +122,7 @@ static int xen_smp_intr_init(unsigned int cpu)
                                     debug_name, NULL);
        if (rc < 0)
                goto fail;
-       per_cpu(debug_irq, cpu) = rc;
+       per_cpu(xen_debug_irq, cpu) = rc;
 
        callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
        rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
@@ -133,19 +133,20 @@ static int xen_smp_intr_init(unsigned int cpu)
                                    NULL);
        if (rc < 0)
                goto fail;
-       per_cpu(callfuncsingle_irq, cpu) = rc;
+       per_cpu(xen_callfuncsingle_irq, cpu) = rc;
 
        return 0;
 
  fail:
-       if (per_cpu(resched_irq, cpu) >= 0)
-               unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
-       if (per_cpu(callfunc_irq, cpu) >= 0)
-               unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
-       if (per_cpu(debug_irq, cpu) >= 0)
-               unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
-       if (per_cpu(callfuncsingle_irq, cpu) >= 0)
-               unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
+       if (per_cpu(xen_resched_irq, cpu) >= 0)
+               unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
+       if (per_cpu(xen_callfunc_irq, cpu) >= 0)
+               unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
+       if (per_cpu(xen_debug_irq, cpu) >= 0)
+               unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
+       if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
+               unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
+                                      NULL);
 
        return rc;
 }
@@ -349,10 +350,10 @@ static void xen_cpu_die(unsigned int cpu)
                current->state = TASK_UNINTERRUPTIBLE;
                schedule_timeout(HZ/10);
        }
-       unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
-       unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
-       unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
-       unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
+       unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
+       unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
+       unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
+       unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
        xen_uninit_lock_cpu(cpu);
        xen_teardown_timer(cpu);
 
index 9d1f853120d859cfc814e0f2eaa2e01b3f1575e8..0d3f07cd1b5fe9aee977674b11b0beb311e25eb0 100644 (file)
 #define NS_PER_TICK    (1000000000LL / HZ)
 
 /* runstate info updated by Xen */
-static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
+static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
 
 /* snapshots of runstate info */
-static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate_snapshot);
+static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
 
 /* unused ns of stolen and blocked time */
-static DEFINE_PER_CPU(u64, residual_stolen);
-static DEFINE_PER_CPU(u64, residual_blocked);
+static DEFINE_PER_CPU(u64, xen_residual_stolen);
+static DEFINE_PER_CPU(u64, xen_residual_blocked);
 
 /* return an consistent snapshot of 64-bit time/counter value */
 static u64 get64(const u64 *p)
@@ -79,7 +79,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
 
        BUG_ON(preemptible());
 
-       state = &__get_cpu_var(runstate);
+       state = &__get_cpu_var(xen_runstate);
 
        /*
         * The runstate info is always updated by the hypervisor on
@@ -97,14 +97,14 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
 /* return true when a vcpu could run but has no real cpu to run on */
 bool xen_vcpu_stolen(int vcpu)
 {
-       return per_cpu(runstate, vcpu).state == RUNSTATE_runnable;
+       return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
 }
 
 void xen_setup_runstate_info(int cpu)
 {
        struct vcpu_register_runstate_memory_area area;
 
-       area.addr.v = &per_cpu(runstate, cpu);
+       area.addr.v = &per_cpu(xen_runstate, cpu);
 
        if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
                               cpu, &area))
@@ -122,7 +122,7 @@ static void do_stolen_accounting(void)
 
        WARN_ON(state.state != RUNSTATE_running);
 
-       snap = &__get_cpu_var(runstate_snapshot);
+       snap = &__get_cpu_var(xen_runstate_snapshot);
 
        /* work out how much time the VCPU has not been runn*ing*  */
        blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
@@ -133,24 +133,24 @@ static void do_stolen_accounting(void)
 
        /* Add the appropriate number of ticks of stolen time,
           including any left-overs from last time. */
-       stolen = runnable + offline + __get_cpu_var(residual_stolen);
+       stolen = runnable + offline + __get_cpu_var(xen_residual_stolen);
 
        if (stolen < 0)
                stolen = 0;
 
        ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
-       __get_cpu_var(residual_stolen) = stolen;
+       __get_cpu_var(xen_residual_stolen) = stolen;
        account_steal_ticks(ticks);
 
        /* Add the appropriate number of ticks of blocked time,
           including any left-overs from last time. */
-       blocked += __get_cpu_var(residual_blocked);
+       blocked += __get_cpu_var(xen_residual_blocked);
 
        if (blocked < 0)
                blocked = 0;
 
        ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
-       __get_cpu_var(residual_blocked) = blocked;
+       __get_cpu_var(xen_residual_blocked) = blocked;
        account_idle_ticks(ticks);
 }
 
index f8ae0d94a6471e0703bfc24900f88dfeb1fc8be7..704c141153236917288ee7a09c12ce8b037754d8 100644 (file)
@@ -99,7 +99,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
        struct cryptd_cpu_queue *cpu_queue;
 
        cpu = get_cpu();
-       cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+       cpu_queue = this_cpu_ptr(queue->cpu_queue);
        err = crypto_enqueue_request(&cpu_queue->queue, request);
        queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
        put_cpu();
index 27fd775375b04965b834d7817db43efa059a2c97..958bd1540c303d92f84cbcea016c7da4c0ce925c 100644 (file)
@@ -131,7 +131,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute
         * boot up and this data does not change there after. Hence this
         * operation should be safe. No locking required.
         */
-       addr = __pa(per_cpu_ptr(crash_notes, cpunum));
+       addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
        rc = sprintf(buf, "%Lx\n", addr);
        return rc;
 }
index f20668c09ce0611d3da1fa8582514ccfec949347..67bc2ece7b4b508da7855937a01eb388cd33ab5b 100644 (file)
@@ -64,14 +64,14 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
  * - Lock should not be held across
  *     __cpufreq_governor(data, CPUFREQ_GOV_STOP);
  */
-static DEFINE_PER_CPU(int, policy_cpu);
+static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
 
 #define lock_policy_rwsem(mode, cpu)                                   \
 int lock_policy_rwsem_##mode                                           \
 (int cpu)                                                              \
 {                                                                      \
-       int policy_cpu = per_cpu(policy_cpu, cpu);                      \
+       int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);              \
        BUG_ON(policy_cpu == -1);                                       \
        down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));            \
        if (unlikely(!cpu_online(cpu))) {                               \
@@ -90,7 +90,7 @@ EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
 
 void unlock_policy_rwsem_read(int cpu)
 {
-       int policy_cpu = per_cpu(policy_cpu, cpu);
+       int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
        BUG_ON(policy_cpu == -1);
        up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
 }
@@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
 
 void unlock_policy_rwsem_write(int cpu)
 {
-       int policy_cpu = per_cpu(policy_cpu, cpu);
+       int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
        BUG_ON(policy_cpu == -1);
        up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
 }
@@ -818,7 +818,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu,
 
                        /* Set proper policy_cpu */
                        unlock_policy_rwsem_write(cpu);
-                       per_cpu(policy_cpu, cpu) = managed_policy->cpu;
+                       per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
 
                        if (lock_policy_rwsem_write(cpu) < 0) {
                                /* Should not go through policy unlock path */
@@ -932,7 +932,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
        if (!cpu_online(j))
                continue;
                per_cpu(cpufreq_cpu_data, j) = policy;
-               per_cpu(policy_cpu, j) = policy->cpu;
+               per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
        }
        spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
@@ -1020,7 +1020,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
        cpumask_copy(policy->cpus, cpumask_of(cpu));
 
        /* Initially set CPU itself as the policy_cpu */
-       per_cpu(policy_cpu, cpu) = cpu;
+       per_cpu(cpufreq_policy_cpu, cpu) = cpu;
        ret = (lock_policy_rwsem_write(cpu) < 0);
        WARN_ON(ret);
 
@@ -2002,7 +2002,7 @@ static int __init cpufreq_core_init(void)
        int cpu;
 
        for_each_possible_cpu(cpu) {
-               per_cpu(policy_cpu, cpu) = -1;
+               per_cpu(cpufreq_policy_cpu, cpu) = -1;
                init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
        }
 
index a9bd3a05a684548cc7f36aa34fe8a1057d2bfbb3..05432216e2246ac4d4c78a665e3e63ec67370015 100644 (file)
@@ -174,7 +174,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
 }
 EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
 
-static DEFINE_PER_CPU(struct cpufreq_frequency_table *, show_table);
+static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
 /**
  * show_available_freqs - show available frequencies for the specified CPU
  */
@@ -185,10 +185,10 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf)
        ssize_t count = 0;
        struct cpufreq_frequency_table *table;
 
-       if (!per_cpu(show_table, cpu))
+       if (!per_cpu(cpufreq_show_table, cpu))
                return -ENODEV;
 
-       table = per_cpu(show_table, cpu);
+       table = per_cpu(cpufreq_show_table, cpu);
 
        for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
                if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
@@ -217,20 +217,20 @@ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
                                      unsigned int cpu)
 {
        dprintk("setting show_table for cpu %u to %p\n", cpu, table);
-       per_cpu(show_table, cpu) = table;
+       per_cpu(cpufreq_show_table, cpu) = table;
 }
 EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr);
 
 void cpufreq_frequency_table_put_attr(unsigned int cpu)
 {
        dprintk("clearing show_table for cpu %u\n", cpu);
-       per_cpu(show_table, cpu) = NULL;
+       per_cpu(cpufreq_show_table, cpu) = NULL;
 }
 EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
 
 struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
 {
-       return per_cpu(show_table, cpu);
+       return per_cpu(cpufreq_show_table, cpu);
 }
 EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
 
index 84c51e17726966c196ac53ce05671e972f65b98a..8c2f3703ec855f27a6863cb964c5d58a9d01dfa2 100644 (file)
@@ -64,7 +64,7 @@ struct aes_ctx {
        u32 *D;
 };
 
-static DEFINE_PER_CPU(struct cword *, last_cword);
+static DEFINE_PER_CPU(struct cword *, paes_last_cword);
 
 /* Tells whether the ACE is capable to generate
    the extended key for a given key_len. */
@@ -152,9 +152,9 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 
 ok:
        for_each_online_cpu(cpu)
-               if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) ||
-                   &ctx->cword.decrypt == per_cpu(last_cword, cpu))
-                       per_cpu(last_cword, cpu) = NULL;
+               if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
+                   &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
+                       per_cpu(paes_last_cword, cpu) = NULL;
 
        return 0;
 }
@@ -166,7 +166,7 @@ static inline void padlock_reset_key(struct cword *cword)
 {
        int cpu = raw_smp_processor_id();
 
-       if (cword != per_cpu(last_cword, cpu))
+       if (cword != per_cpu(paes_last_cword, cpu))
 #ifndef CONFIG_X86_64
                asm volatile ("pushfl; popfl");
 #else
@@ -176,7 +176,7 @@ static inline void padlock_reset_key(struct cword *cword)
 
 static inline void padlock_store_cword(struct cword *cword)
 {
-       per_cpu(last_cword, raw_smp_processor_id()) = cword;
+       per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
 }
 
 /*
index 8f99354082ceaa169f7ac081594bc83b0c003478..6f51a0a7a8bbdbca798f53293516e178ead5f4d2 100644 (file)
@@ -326,14 +326,7 @@ arch_initcall(dma_channel_table_init);
  */
 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 {
-       struct dma_chan *chan;
-       int cpu;
-
-       cpu = get_cpu();
-       chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
-       put_cpu();
-
-       return chan;
+       return this_cpu_read(channel_table[tx_type]->chan);
 }
 EXPORT_SYMBOL(dma_find_channel);
 
@@ -857,7 +850,6 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
        struct dma_async_tx_descriptor *tx;
        dma_addr_t dma_dest, dma_src;
        dma_cookie_t cookie;
-       int cpu;
        unsigned long flags;
 
        dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
@@ -876,10 +868,10 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
        tx->callback = NULL;
        cookie = tx->tx_submit(tx);
 
-       cpu = get_cpu();
-       per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
-       per_cpu_ptr(chan->local, cpu)->memcpy_count++;
-       put_cpu();
+       preempt_disable();
+       __this_cpu_add(chan->local->bytes_transferred, len);
+       __this_cpu_inc(chan->local->memcpy_count);
+       preempt_enable();
 
        return cookie;
 }
@@ -906,7 +898,6 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
        struct dma_async_tx_descriptor *tx;
        dma_addr_t dma_dest, dma_src;
        dma_cookie_t cookie;
-       int cpu;
        unsigned long flags;
 
        dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
@@ -923,10 +914,10 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
        tx->callback = NULL;
        cookie = tx->tx_submit(tx);
 
-       cpu = get_cpu();
-       per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
-       per_cpu_ptr(chan->local, cpu)->memcpy_count++;
-       put_cpu();
+       preempt_disable();
+       __this_cpu_add(chan->local->bytes_transferred, len);
+       __this_cpu_inc(chan->local->memcpy_count);
+       preempt_enable();
 
        return cookie;
 }
@@ -955,7 +946,6 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
        struct dma_async_tx_descriptor *tx;
        dma_addr_t dma_dest, dma_src;
        dma_cookie_t cookie;
-       int cpu;
        unsigned long flags;
 
        dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
@@ -973,10 +963,10 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
        tx->callback = NULL;
        cookie = tx->tx_submit(tx);
 
-       cpu = get_cpu();
-       per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
-       per_cpu_ptr(chan->local, cpu)->memcpy_count++;
-       put_cpu();
+       preempt_disable();
+       __this_cpu_add(chan->local->bytes_transferred, len);
+       __this_cpu_inc(chan->local->memcpy_count);
+       preempt_enable();
 
        return cookie;
 }
index 4b89b791be6a3cf5fdb7947638778ddd4451f8ab..42be0b15084bb2445e6024eea21033d765bf7438 100644 (file)
@@ -826,8 +826,7 @@ static void __cpuinit take_over_work(struct ehca_comp_pool *pool, int cpu)
                cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
 
                list_del(&cq->entry);
-               __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
-                                                 smp_processor_id()));
+               __queue_comp_task(cq, this_cpu_ptr(pool->cpu_comp_tasks));
        }
 
        spin_unlock_irqrestore(&cct->task_lock, flags_cct);
index 6ae388849a3b1c60c7b7263041c6f46552c1a7fc..fb2b7ef7868ef6e0126c932049122f729fab7cb1 100644 (file)
@@ -69,7 +69,7 @@ static struct lguest_pages *lguest_pages(unsigned int cpu)
                  (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
 }
 
-static DEFINE_PER_CPU(struct lg_cpu *, last_cpu);
+static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu);
 
 /*S:010
  * We approach the Switcher.
@@ -90,8 +90,8 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
         * meanwhile).  If that's not the case, we pretend everything in the
         * Guest has changed.
         */
-       if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) {
-               __get_cpu_var(last_cpu) = cpu;
+       if (__get_cpu_var(lg_last_cpu) != cpu || cpu->last_pages != pages) {
+               __get_cpu_var(lg_last_cpu) = cpu;
                cpu->last_pages = pages;
                cpu->changed = CHANGED_ALL;
        }
index 8c658cf6f62f3066d84c3b7e45ce79d6d0979dd4..109d2783e4d88fc6823b57d36dd8138a30e5d68b 100644 (file)
@@ -1378,7 +1378,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
        }
        __skb_pull(skb, sizeof(*p));
 
-       st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
+       st = this_cpu_ptr(sge->port_stats[p->iff]);
 
        skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
        if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
@@ -1780,8 +1780,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct adapter *adapter = dev->ml_priv;
        struct sge *sge = adapter->sge;
-       struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port],
-                                               smp_processor_id());
+       struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
        struct cpl_tx_pkt *cpl;
        struct sk_buff *orig_skb = skb;
        int ret;
index eae4ad749e9d87956facbfadd53036d5d882ce03..b9fcc9819837e7f79530ad11782272ea0f81a344 100644 (file)
@@ -81,7 +81,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
 
        /* it's OK to use per_cpu_ptr() because BHs are off */
        pcpu_lstats = dev->ml_priv;
-       lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id());
+       lb_stats = this_cpu_ptr(pcpu_lstats);
 
        len = skb->len;
        if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
index 63099c58a6ddd7a92f616ca196141becbd7ad463..3a15de56df9caa31b818139dbcd75e2dc99e5cc8 100644 (file)
@@ -153,15 +153,14 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
        struct net_device *rcv = NULL;
        struct veth_priv *priv, *rcv_priv;
        struct veth_net_stats *stats, *rcv_stats;
-       int length, cpu;
+       int length;
 
        priv = netdev_priv(dev);
        rcv = priv->peer;
        rcv_priv = netdev_priv(rcv);
 
-       cpu = smp_processor_id();
-       stats = per_cpu_ptr(priv->stats, cpu);
-       rcv_stats = per_cpu_ptr(rcv_priv->stats, cpu);
+       stats = this_cpu_ptr(priv->stats);
+       rcv_stats = this_cpu_ptr(rcv_priv->stats);
 
        if (!(rcv->flags & IFF_UP))
                goto tx_drop;
index a7aae24f2889a4578ebd90dad52ae72aab916116..166b67ea622f11563a33c539b78c5dafc7c2503f 100644 (file)
@@ -47,7 +47,7 @@
  */
 static struct ring_buffer *op_ring_buffer_read;
 static struct ring_buffer *op_ring_buffer_write;
-DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
+DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
 
 static void wq_sync_buffer(struct work_struct *work);
 
@@ -61,8 +61,7 @@ unsigned long oprofile_get_cpu_buffer_size(void)
 
 void oprofile_cpu_buffer_inc_smpl_lost(void)
 {
-       struct oprofile_cpu_buffer *cpu_buf
-               = &__get_cpu_var(cpu_buffer);
+       struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
 
        cpu_buf->sample_lost_overflow++;
 }
@@ -95,7 +94,7 @@ int alloc_cpu_buffers(void)
                goto fail;
 
        for_each_possible_cpu(i) {
-               struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
+               struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
 
                b->last_task = NULL;
                b->last_is_kernel = -1;
@@ -122,7 +121,7 @@ void start_cpu_work(void)
        work_enabled = 1;
 
        for_each_online_cpu(i) {
-               struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
+               struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
 
                /*
                 * Spread the work by 1 jiffy per cpu so they dont all
@@ -139,7 +138,7 @@ void end_cpu_work(void)
        work_enabled = 0;
 
        for_each_online_cpu(i) {
-               struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
+               struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
 
                cancel_delayed_work(&b->work);
        }
@@ -330,7 +329,7 @@ static inline void
 __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
                          unsigned long event, int is_kernel)
 {
-       struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
+       struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
        unsigned long backtrace = oprofile_backtrace_depth;
 
        /*
@@ -375,7 +374,7 @@ oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
 {
        struct op_sample *sample;
        int is_kernel = !user_mode(regs);
-       struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
+       struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
 
        cpu_buf->sample_received++;
 
@@ -430,13 +429,13 @@ int oprofile_write_commit(struct op_entry *entry)
 
 void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
 {
-       struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
+       struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
        log_sample(cpu_buf, pc, 0, is_kernel, event);
 }
 
 void oprofile_add_trace(unsigned long pc)
 {
-       struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
+       struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
 
        if (!cpu_buf->tracing)
                return;
index 272995d20293ab10aa4d77fb44d7f4bd79d76afb..68ea16ab645f3e3e134b56db9269df2044234b92 100644 (file)
@@ -50,7 +50,7 @@ struct oprofile_cpu_buffer {
        struct delayed_work work;
 };
 
-DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
+DECLARE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
 
 /*
  * Resets the cpu buffer to a sane state.
@@ -60,7 +60,7 @@ DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
  */
 static inline void op_cpu_buffer_reset(int cpu)
 {
-       struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
+       struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu);
 
        cpu_buf->last_is_kernel = -1;
        cpu_buf->last_task = NULL;
index 61689e814d465fc90bb0652d0c5cec9380e185f5..917d28ebeacd24ab2a6f0f3772be1846b4011bb5 100644 (file)
@@ -23,7 +23,7 @@ void oprofile_reset_stats(void)
        int i;
 
        for_each_possible_cpu(i) {
-               cpu_buf = &per_cpu(cpu_buffer, i);
+               cpu_buf = &per_cpu(op_cpu_buffer, i);
                cpu_buf->sample_received = 0;
                cpu_buf->sample_lost_overflow = 0;
                cpu_buf->backtrace_aborted = 0;
@@ -51,7 +51,7 @@ void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
                return;
 
        for_each_possible_cpu(i) {
-               cpu_buf = &per_cpu(cpu_buffer, i);
+               cpu_buf = &per_cpu(op_cpu_buffer, i);
                snprintf(buf, 10, "cpu%d", i);
                cpudir = oprofilefs_mkdir(sb, dir, buf);
 
index 395c04c2b00fcd5cca85a696bccddf6f540b7064..98c04cac43c1d87467ee60f43807c0dbc654409b 100644 (file)
@@ -113,11 +113,9 @@ static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
 #define IUCV_DBF_TEXT_(name, level, text...) \
        do { \
                if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
-                       char* iucv_dbf_txt_buf = \
-                                       get_cpu_var(iucv_dbf_txt_buf); \
-                       sprintf(iucv_dbf_txt_buf, text); \
-                       debug_text_event(iucv_dbf_##name, level, \
-                                               iucv_dbf_txt_buf); \
+                       char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
+                       sprintf(__buf, text); \
+                       debug_text_event(iucv_dbf_##name, level, __buf); \
                        put_cpu_var(iucv_dbf_txt_buf); \
                } \
        } while (0)
index c1e19d5b5985f2a36d39801e697b9b45f7283f50..b1fd3daadc9c50522894fd00ec62bb8e600f9fd0 100644 (file)
@@ -3955,7 +3955,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
         * per cpu locality group is to reduce the contention between block
         * request from multiple CPUs.
         */
-       ac->ac_lg = per_cpu_ptr(sbi->s_locality_groups, raw_smp_processor_id());
+       ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups);
 
        /* we're going to use group allocation */
        ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
index ceda50aad73cc5a3bc93baeaaa7771ebd7a0f00f..46d779abafd356e3344ef79c151466dfc863fed5 100644 (file)
@@ -25,13 +25,7 @@ struct nfs_iostats {
 static inline void nfs_inc_server_stats(const struct nfs_server *server,
                                        enum nfs_stat_eventcounters stat)
 {
-       struct nfs_iostats *iostats;
-       int cpu;
-
-       cpu = get_cpu();
-       iostats = per_cpu_ptr(server->io_stats, cpu);
-       iostats->events[stat]++;
-       put_cpu();
+       this_cpu_inc(server->io_stats->events[stat]);
 }
 
 static inline void nfs_inc_stats(const struct inode *inode,
@@ -44,13 +38,7 @@ static inline void nfs_add_server_stats(const struct nfs_server *server,
                                        enum nfs_stat_bytecounters stat,
                                        unsigned long addend)
 {
-       struct nfs_iostats *iostats;
-       int cpu;
-
-       cpu = get_cpu();
-       iostats = per_cpu_ptr(server->io_stats, cpu);
-       iostats->bytes[stat] += addend;
-       put_cpu();
+       this_cpu_add(server->io_stats->bytes[stat], addend);
 }
 
 static inline void nfs_add_stats(const struct inode *inode,
@@ -65,13 +53,7 @@ static inline void nfs_add_fscache_stats(struct inode *inode,
                                         enum nfs_stat_fscachecounters stat,
                                         unsigned long addend)
 {
-       struct nfs_iostats *iostats;
-       int cpu;
-
-       cpu = get_cpu();
-       iostats = per_cpu_ptr(NFS_SERVER(inode)->io_stats, cpu);
-       iostats->fscache[stat] += addend;
-       put_cpu();
+       this_cpu_add(NFS_SERVER(inode)->io_stats->fscache[stat], addend);
 }
 #endif
 
index 66a888a9ad6f87a0f913bc6b1508d0f8897927fb..bfffd6334abbfae37d804174241bc1a1c17760d3 100644 (file)
@@ -2389,12 +2389,12 @@ xfs_icsb_modify_counters(
 {
        xfs_icsb_cnts_t *icsbp;
        long long       lcounter;       /* long counter for 64 bit fields */
-       int             cpu, ret = 0;
+       int             ret = 0;
 
        might_sleep();
 again:
-       cpu = get_cpu();
-       icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu);
+       preempt_disable();
+       icsbp = this_cpu_ptr(mp->m_sb_cnts);
 
        /*
         * if the counter is disabled, go to slow path
@@ -2438,11 +2438,11 @@ again:
                break;
        }
        xfs_icsb_unlock_cntr(icsbp);
-       put_cpu();
+       preempt_enable();
        return 0;
 
 slow_path:
-       put_cpu();
+       preempt_enable();
 
        /*
         * serialise with a mutex so we don't burn lots of cpu on
@@ -2490,7 +2490,7 @@ slow_path:
 
 balance_counter:
        xfs_icsb_unlock_cntr(icsbp);
-       put_cpu();
+       preempt_enable();
 
        /*
         * We may have multiple threads here if multiple per-cpu
index 90079c373f1c462d2957b98b64f5f15b8ea7ac19..8087b90d4673d52cdd2f2d8ccf888009b7d0e115 100644 (file)
@@ -56,6 +56,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
 #define __raw_get_cpu_var(var) \
        (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
 
+#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
+#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
+
 
 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
 extern void setup_per_cpu_areas(void);
@@ -66,6 +69,8 @@ extern void setup_per_cpu_areas(void);
 #define per_cpu(var, cpu)                      (*((void)(cpu), &per_cpu_var(var)))
 #define __get_cpu_var(var)                     per_cpu_var(var)
 #define __raw_get_cpu_var(var)                 per_cpu_var(var)
+#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
+#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
 
 #endif /* SMP */
 
index 9bd03193ecd48c5e2c1283309a2c8ac2914cab37..5a5d6ce4bd55a156c979c89072b0c830c93fc5d2 100644 (file)
@@ -60,6 +60,7 @@
 
 #define DEFINE_PER_CPU_SECTION(type, name, sec)                                \
        __PCPU_DUMMY_ATTRS char __pcpu_scope_##name;                    \
+       extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name;            \
        __PCPU_DUMMY_ATTRS char __pcpu_unique_##name;                   \
        __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak                 \
        __typeof__(type) per_cpu__##name
index 878836ca999c0f3e2b65836cbe8bd7816d44ffaa..cf5efbcf716c8cecf74d4d315e2619f6fdcfa1f4 100644 (file)
@@ -34,8 +34,6 @@
 
 #ifdef CONFIG_SMP
 
-#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
-
 /* minimum unit size, also is the maximum supported allocation size */
 #define PCPU_MIN_UNIT_SIZE             PFN_ALIGN(64 << 10)
 
@@ -130,30 +128,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
 #define per_cpu_ptr(ptr, cpu)  SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
 
 extern void *__alloc_reserved_percpu(size_t size, size_t align);
-
-#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
-
-struct percpu_data {
-       void *ptrs[1];
-};
-
-/* pointer disguising messes up the kmemleak objects tracking */
-#ifndef CONFIG_DEBUG_KMEMLEAK
-#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
-#else
-#define __percpu_disguise(pdata) (struct percpu_data *)(pdata)
-#endif
-
-#define per_cpu_ptr(ptr, cpu)                                          \
-({                                                                     \
-        struct percpu_data *__p = __percpu_disguise(ptr);              \
-        (__typeof__(ptr))__p->ptrs[(cpu)];                             \
-})
-
-#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
-
 extern void *__alloc_percpu(size_t size, size_t align);
 extern void free_percpu(void *__pdata);
+extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
 
 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
 extern void __init setup_per_cpu_areas(void);
@@ -179,6 +156,11 @@ static inline void free_percpu(void *p)
        kfree(p);
 }
 
+static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
+{
+       return __pa(addr);
+}
+
 static inline void __init setup_per_cpu_areas(void) { }
 
 static inline void *pcpu_lpage_remapped(void *kaddr)
@@ -188,8 +170,8 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
 
 #endif /* CONFIG_SMP */
 
-#define alloc_percpu(type)     (type *)__alloc_percpu(sizeof(type), \
-                                                      __alignof__(type))
+#define alloc_percpu(type)     \
+       (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type))
 
 /*
  * Optional methods for optimized non-lvalue per-cpu variable access.
@@ -243,4 +225,404 @@ do {                                                                      \
 # define percpu_xor(var, val)          __percpu_generic_to_op(var, (val), ^=)
 #endif
 
+/*
+ * Branching function to split up a function into a set of functions that
+ * are called for different scalar sizes of the objects handled.
+ */
+
+extern void __bad_size_call_parameter(void);
+
+#define __pcpu_size_call_return(stem, variable)                                \
+({     typeof(variable) pscr_ret__;                                    \
+       switch(sizeof(variable)) {                                      \
+       case 1: pscr_ret__ = stem##1(variable);break;                   \
+       case 2: pscr_ret__ = stem##2(variable);break;                   \
+       case 4: pscr_ret__ = stem##4(variable);break;                   \
+       case 8: pscr_ret__ = stem##8(variable);break;                   \
+       default:                                                        \
+               __bad_size_call_parameter();break;                      \
+       }                                                               \
+       pscr_ret__;                                                     \
+})
+
+#define __pcpu_size_call(stem, variable, ...)                          \
+do {                                                                   \
+       switch(sizeof(variable)) {                                      \
+               case 1: stem##1(variable, __VA_ARGS__);break;           \
+               case 2: stem##2(variable, __VA_ARGS__);break;           \
+               case 4: stem##4(variable, __VA_ARGS__);break;           \
+               case 8: stem##8(variable, __VA_ARGS__);break;           \
+               default:                                                \
+                       __bad_size_call_parameter();break;              \
+       }                                                               \
+} while (0)
+
+/*
+ * Optimized manipulation for memory allocated through the per cpu
+ * allocator or for addresses of per cpu variables (can be determined
+ * using per_cpu_var(xx).
+ *
+ * These operation guarantee exclusivity of access for other operations
+ * on the *same* processor. The assumption is that per cpu data is only
+ * accessed by a single processor instance (the current one).
+ *
+ * The first group is used for accesses that must be done in a
+ * preemption safe way since we know that the context is not preempt
+ * safe. Interrupts may occur. If the interrupt modifies the variable
+ * too then RMW actions will not be reliable.
+ *
+ * The arch code can provide optimized functions in two ways:
+ *
+ * 1. Override the function completely. F.e. define this_cpu_add().
+ *    The arch must then ensure that the various scalar format passed
+ *    are handled correctly.
+ *
+ * 2. Provide functions for certain scalar sizes. F.e. provide
+ *    this_cpu_add_2() to provide per cpu atomic operations for 2 byte
+ *    sized RMW actions. If arch code does not provide operations for
+ *    a scalar size then the fallback in the generic code will be
+ *    used.
+ */
+
+#define _this_cpu_generic_read(pcp)                                    \
+({     typeof(pcp) ret__;                                              \
+       preempt_disable();                                              \
+       ret__ = *this_cpu_ptr(&(pcp));                                  \
+       preempt_enable();                                               \
+       ret__;                                                          \
+})
+
+#ifndef this_cpu_read
+# ifndef this_cpu_read_1
+#  define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
+# endif
+# ifndef this_cpu_read_2
+#  define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
+# endif
+# ifndef this_cpu_read_4
+#  define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
+# endif
+# ifndef this_cpu_read_8
+#  define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
+# endif
+# define this_cpu_read(pcp)    __pcpu_size_call_return(this_cpu_read_, (pcp))
+#endif
+
+#define _this_cpu_generic_to_op(pcp, val, op)                          \
+do {                                                                   \
+       preempt_disable();                                              \
+       *__this_cpu_ptr(&pcp) op val;                                   \
+       preempt_enable();                                               \
+} while (0)
+
+#ifndef this_cpu_write
+# ifndef this_cpu_write_1
+#  define this_cpu_write_1(pcp, val)   _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef this_cpu_write_2
+#  define this_cpu_write_2(pcp, val)   _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef this_cpu_write_4
+#  define this_cpu_write_4(pcp, val)   _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef this_cpu_write_8
+#  define this_cpu_write_8(pcp, val)   _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# define this_cpu_write(pcp, val)      __pcpu_size_call(this_cpu_write_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_add
+# ifndef this_cpu_add_1
+#  define this_cpu_add_1(pcp, val)     _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef this_cpu_add_2
+#  define this_cpu_add_2(pcp, val)     _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef this_cpu_add_4
+#  define this_cpu_add_4(pcp, val)     _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef this_cpu_add_8
+#  define this_cpu_add_8(pcp, val)     _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# define this_cpu_add(pcp, val)                __pcpu_size_call(this_cpu_add_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_sub
+# define this_cpu_sub(pcp, val)                this_cpu_add((pcp), -(val))
+#endif
+
+#ifndef this_cpu_inc
+# define this_cpu_inc(pcp)             this_cpu_add((pcp), 1)
+#endif
+
+#ifndef this_cpu_dec
+# define this_cpu_dec(pcp)             this_cpu_sub((pcp), 1)
+#endif
+
+#ifndef this_cpu_and
+# ifndef this_cpu_and_1
+#  define this_cpu_and_1(pcp, val)     _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef this_cpu_and_2
+#  define this_cpu_and_2(pcp, val)     _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef this_cpu_and_4
+#  define this_cpu_and_4(pcp, val)     _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef this_cpu_and_8
+#  define this_cpu_and_8(pcp, val)     _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# define this_cpu_and(pcp, val)                __pcpu_size_call(this_cpu_and_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_or
+# ifndef this_cpu_or_1
+#  define this_cpu_or_1(pcp, val)      _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef this_cpu_or_2
+#  define this_cpu_or_2(pcp, val)      _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef this_cpu_or_4
+#  define this_cpu_or_4(pcp, val)      _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef this_cpu_or_8
+#  define this_cpu_or_8(pcp, val)      _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# define this_cpu_or(pcp, val)         __pcpu_size_call(this_cpu_or_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_xor
+# ifndef this_cpu_xor_1
+#  define this_cpu_xor_1(pcp, val)     _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef this_cpu_xor_2
+#  define this_cpu_xor_2(pcp, val)     _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef this_cpu_xor_4
+#  define this_cpu_xor_4(pcp, val)     _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef this_cpu_xor_8
+#  define this_cpu_xor_8(pcp, val)     _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# define this_cpu_xor(pcp, val)                __pcpu_size_call(this_cpu_or_, (pcp), (val))
+#endif
+
+/*
+ * Generic percpu operations that do not require preemption handling.
+ * Either we do not care about races or the caller has the
+ * responsibility of handling preemptions issues. Arch code can still
+ * override these instructions since the arch per cpu code may be more
+ * efficient and may actually get race freeness for free (that is the
+ * case for x86 for example).
+ *
+ * If there is no other protection through preempt disable and/or
+ * disabling interupts then one of these RMW operations can show unexpected
+ * behavior because the execution thread was rescheduled on another processor
+ * or an interrupt occurred and the same percpu variable was modified from
+ * the interrupt context.
+ */
+#ifndef __this_cpu_read
+# ifndef __this_cpu_read_1
+#  define __this_cpu_read_1(pcp)       (*__this_cpu_ptr(&(pcp)))
+# endif
+# ifndef __this_cpu_read_2
+#  define __this_cpu_read_2(pcp)       (*__this_cpu_ptr(&(pcp)))
+# endif
+# ifndef __this_cpu_read_4
+#  define __this_cpu_read_4(pcp)       (*__this_cpu_ptr(&(pcp)))
+# endif
+# ifndef __this_cpu_read_8
+#  define __this_cpu_read_8(pcp)       (*__this_cpu_ptr(&(pcp)))
+# endif
+# define __this_cpu_read(pcp)  __pcpu_size_call_return(__this_cpu_read_, (pcp))
+#endif
+
+#define __this_cpu_generic_to_op(pcp, val, op)                         \
+do {                                                                   \
+       *__this_cpu_ptr(&(pcp)) op val;                                 \
+} while (0)
+
+#ifndef __this_cpu_write
+# ifndef __this_cpu_write_1
+#  define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef __this_cpu_write_2
+#  define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef __this_cpu_write_4
+#  define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef __this_cpu_write_8
+#  define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# define __this_cpu_write(pcp, val)    __pcpu_size_call(__this_cpu_write_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_add
+# ifndef __this_cpu_add_1
+#  define __this_cpu_add_1(pcp, val)   __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef __this_cpu_add_2
+#  define __this_cpu_add_2(pcp, val)   __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef __this_cpu_add_4
+#  define __this_cpu_add_4(pcp, val)   __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef __this_cpu_add_8
+#  define __this_cpu_add_8(pcp, val)   __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# define __this_cpu_add(pcp, val)      __pcpu_size_call(__this_cpu_add_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_sub
+# define __this_cpu_sub(pcp, val)      __this_cpu_add((pcp), -(val))
+#endif
+
+#ifndef __this_cpu_inc
+# define __this_cpu_inc(pcp)           __this_cpu_add((pcp), 1)
+#endif
+
+#ifndef __this_cpu_dec
+# define __this_cpu_dec(pcp)           __this_cpu_sub((pcp), 1)
+#endif
+
+#ifndef __this_cpu_and
+# ifndef __this_cpu_and_1
+#  define __this_cpu_and_1(pcp, val)   __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef __this_cpu_and_2
+#  define __this_cpu_and_2(pcp, val)   __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef __this_cpu_and_4
+#  define __this_cpu_and_4(pcp, val)   __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef __this_cpu_and_8
+#  define __this_cpu_and_8(pcp, val)   __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# define __this_cpu_and(pcp, val)      __pcpu_size_call(__this_cpu_and_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_or
+# ifndef __this_cpu_or_1
+#  define __this_cpu_or_1(pcp, val)    __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef __this_cpu_or_2
+#  define __this_cpu_or_2(pcp, val)    __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef __this_cpu_or_4
+#  define __this_cpu_or_4(pcp, val)    __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef __this_cpu_or_8
+#  define __this_cpu_or_8(pcp, val)    __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# define __this_cpu_or(pcp, val)       __pcpu_size_call(__this_cpu_or_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_xor
+# ifndef __this_cpu_xor_1
+#  define __this_cpu_xor_1(pcp, val)   __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef __this_cpu_xor_2
+#  define __this_cpu_xor_2(pcp, val)   __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef __this_cpu_xor_4
+#  define __this_cpu_xor_4(pcp, val)   __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef __this_cpu_xor_8
+#  define __this_cpu_xor_8(pcp, val)   __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# define __this_cpu_xor(pcp, val)      __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
+#endif
+
+/*
+ * IRQ safe versions of the per cpu RMW operations. Note that these operations
+ * are *not* safe against modification of the same variable from another
+ * processors (which one gets when using regular atomic operations)
+ . They are guaranteed to be atomic vs. local interrupts and
+ * preemption only.
+ */
+#define irqsafe_cpu_generic_to_op(pcp, val, op)                                \
+do {                                                                   \
+       unsigned long flags;                                            \
+       local_irq_save(flags);                                          \
+       *__this_cpu_ptr(&(pcp)) op val;                                 \
+       local_irq_restore(flags);                                       \
+} while (0)
+
+#ifndef irqsafe_cpu_add
+# ifndef irqsafe_cpu_add_1
+#  define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef irqsafe_cpu_add_2
+#  define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef irqsafe_cpu_add_4
+#  define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef irqsafe_cpu_add_8
+#  define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
+#endif
+
+#ifndef irqsafe_cpu_sub
+# define irqsafe_cpu_sub(pcp, val)     irqsafe_cpu_add((pcp), -(val))
+#endif
+
+#ifndef irqsafe_cpu_inc
+# define irqsafe_cpu_inc(pcp)  irqsafe_cpu_add((pcp), 1)
+#endif
+
+#ifndef irqsafe_cpu_dec
+# define irqsafe_cpu_dec(pcp)  irqsafe_cpu_sub((pcp), 1)
+#endif
+
+#ifndef irqsafe_cpu_and
+# ifndef irqsafe_cpu_and_1
+#  define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef irqsafe_cpu_and_2
+#  define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef irqsafe_cpu_and_4
+#  define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef irqsafe_cpu_and_8
+#  define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
+#endif
+
+#ifndef irqsafe_cpu_or
+# ifndef irqsafe_cpu_or_1
+#  define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef irqsafe_cpu_or_2
+#  define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef irqsafe_cpu_or_4
+#  define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef irqsafe_cpu_or_8
+#  define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
+#endif
+
+#ifndef irqsafe_cpu_xor
+# ifndef irqsafe_cpu_xor_1
+#  define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef irqsafe_cpu_xor_2
+#  define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef irqsafe_cpu_xor_4
+#  define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef irqsafe_cpu_xor_8
+#  define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
+#endif
+
 #endif /* __LINUX_PERCPU_H */
index 2d0f222388a8e9f2dfb3f8bec716eff2999d78f2..d85889710f9b6a4fbf5040fd941c1cfca7847428 100644 (file)
@@ -76,24 +76,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
 
 static inline void __count_vm_event(enum vm_event_item item)
 {
-       __get_cpu_var(vm_event_states).event[item]++;
+       __this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
 }
 
 static inline void count_vm_event(enum vm_event_item item)
 {
-       get_cpu_var(vm_event_states).event[item]++;
-       put_cpu();
+       this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
 }
 
 static inline void __count_vm_events(enum vm_event_item item, long delta)
 {
-       __get_cpu_var(vm_event_states).event[item] += delta;
+       __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
 }
 
 static inline void count_vm_events(enum vm_event_item item, long delta)
 {
-       get_cpu_var(vm_event_states).event[item] += delta;
-       put_cpu();
+       this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
 }
 
 extern void all_vm_events(unsigned long *);
index 0302f31a2fb7e53956f089efac74848fbd15466d..b0173202cad96f3a8aa55df85d16b2d11a62b608 100644 (file)
@@ -88,12 +88,7 @@ struct neigh_statistics {
        unsigned long unres_discards;   /* number of unresolved drops */
 };
 
-#define NEIGH_CACHE_STAT_INC(tbl, field)                               \
-       do {                                                            \
-               preempt_disable();                                      \
-               (per_cpu_ptr((tbl)->stats, smp_processor_id())->field)++; \
-               preempt_enable();                                       \
-       } while (0)
+#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
 
 struct neighbour {
        struct neighbour        *next;
index 5cf7270e3ffc3e94c6d80bd79168056e9c1e539f..a0904adfb8f7aba343152a5af47aa7e818c3b39d 100644 (file)
@@ -293,11 +293,11 @@ extern unsigned int nf_conntrack_htable_size;
 extern unsigned int nf_conntrack_max;
 
 #define NF_CT_STAT_INC(net, count)     \
-       (per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++)
+       __this_cpu_inc((net)->ct.stat->count)
 #define NF_CT_STAT_INC_ATOMIC(net, count)              \
 do {                                                   \
        local_bh_disable();                             \
-       per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++;   \
+       __this_cpu_inc((net)->ct.stat->count);          \
        local_bh_enable();                              \
 } while (0)
 
index 8c842e06bec86ad77cbe2f612d1a29d09bb3b421..f0d756f2ac99773f573613d145752b2984dddd13 100644 (file)
@@ -136,45 +136,31 @@ struct linux_xfrm_mib {
 #define SNMP_STAT_BHPTR(name)  (name[0])
 #define SNMP_STAT_USRPTR(name) (name[1])
 
-#define SNMP_INC_STATS_BH(mib, field)  \
-       (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++)
-#define SNMP_INC_STATS_USER(mib, field) \
-       do { \
-               per_cpu_ptr(mib[1], get_cpu())->mibs[field]++; \
-               put_cpu(); \
-       } while (0)
-#define SNMP_INC_STATS(mib, field)     \
-       do { \
-               per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]++; \
-               put_cpu(); \
-       } while (0)
-#define SNMP_DEC_STATS(mib, field)     \
-       do { \
-               per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]--; \
-               put_cpu(); \
-       } while (0)
-#define SNMP_ADD_STATS(mib, field, addend)     \
-       do { \
-               per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field] += addend; \
-               put_cpu(); \
-       } while (0)
-#define SNMP_ADD_STATS_BH(mib, field, addend)  \
-       (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend)
-#define SNMP_ADD_STATS_USER(mib, field, addend)        \
-       do { \
-               per_cpu_ptr(mib[1], get_cpu())->mibs[field] += addend; \
-               put_cpu(); \
-       } while (0)
+#define SNMP_INC_STATS_BH(mib, field)  \
+                       __this_cpu_inc(mib[0]->mibs[field])
+#define SNMP_INC_STATS_USER(mib, field)        \
+                       this_cpu_inc(mib[1]->mibs[field])
+#define SNMP_INC_STATS(mib, field)     \
+                       this_cpu_inc(mib[!in_softirq()]->mibs[field])
+#define SNMP_DEC_STATS(mib, field)     \
+                       this_cpu_dec(mib[!in_softirq()]->mibs[field])
+#define SNMP_ADD_STATS_BH(mib, field, addend)  \
+                       __this_cpu_add(mib[0]->mibs[field], addend)
+#define SNMP_ADD_STATS_USER(mib, field, addend)        \
+                       this_cpu_add(mib[1]->mibs[field], addend)
 #define SNMP_UPD_PO_STATS(mib, basefield, addend)      \
        do { \
-               __typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], get_cpu());\
+               __typeof__(mib[0]) ptr; \
+               preempt_disable(); \
+               ptr = this_cpu_ptr((mib)[!in_softirq()]); \
                ptr->mibs[basefield##PKTS]++; \
                ptr->mibs[basefield##OCTETS] += addend;\
-               put_cpu(); \
+               preempt_enable(); \
        } while (0)
 #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend)   \
        do { \
-               __typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id());\
+               __typeof__(mib[0]) ptr = \
+                       __this_cpu_ptr((mib)[!in_softirq()]); \
                ptr->mibs[basefield##PKTS]++; \
                ptr->mibs[basefield##OCTETS] += addend;\
        } while (0)
index 4f8df01dbe51ad05e1957fa7e1eecd20af7e0765..429540c70d3f497ae7fc58998b0f248817a2cbf9 100644 (file)
@@ -140,7 +140,8 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
 }
 
 #ifdef CONFIG_LOCK_STAT
-static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
+static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
+                     cpu_lock_stats);
 
 static inline u64 lockstat_clock(void)
 {
@@ -198,7 +199,7 @@ struct lock_class_stats lock_stats(struct lock_class *class)
        memset(&stats, 0, sizeof(struct lock_class_stats));
        for_each_possible_cpu(cpu) {
                struct lock_class_stats *pcs =
-                       &per_cpu(lock_stats, cpu)[class - lock_classes];
+                       &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
 
                for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
                        stats.contention_point[i] += pcs->contention_point[i];
@@ -225,7 +226,7 @@ void clear_lock_stats(struct lock_class *class)
 
        for_each_possible_cpu(cpu) {
                struct lock_class_stats *cpu_stats =
-                       &per_cpu(lock_stats, cpu)[class - lock_classes];
+                       &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
 
                memset(cpu_stats, 0, sizeof(struct lock_class_stats));
        }
@@ -235,12 +236,12 @@ void clear_lock_stats(struct lock_class *class)
 
 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
 {
-       return &get_cpu_var(lock_stats)[class - lock_classes];
+       return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
 }
 
 static void put_lock_stats(struct lock_class_stats *stats)
 {
-       put_cpu_var(lock_stats);
+       put_cpu_var(cpu_lock_stats);
 }
 
 static void lock_release_holdtime(struct held_lock *hlock)
index 5842a71cf0527163c960cc8c41d549a5033e3443..12afc5a3ddd3a73972604050695ff4ab4ef708f6 100644 (file)
@@ -370,8 +370,6 @@ EXPORT_SYMBOL_GPL(find_module);
 
 #ifdef CONFIG_SMP
 
-#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
-
 static void *percpu_modalloc(unsigned long size, unsigned long align,
                             const char *name)
 {
@@ -395,154 +393,6 @@ static void percpu_modfree(void *freeme)
        free_percpu(freeme);
 }
 
-#else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */
-
-/* Number of blocks used and allocated. */
-static unsigned int pcpu_num_used, pcpu_num_allocated;
-/* Size of each block.  -ve means used. */
-static int *pcpu_size;
-
-static int split_block(unsigned int i, unsigned short size)
-{
-       /* Reallocation required? */
-       if (pcpu_num_used + 1 > pcpu_num_allocated) {
-               int *new;
-
-               new = krealloc(pcpu_size, sizeof(new[0])*pcpu_num_allocated*2,
-                              GFP_KERNEL);
-               if (!new)
-                       return 0;
-
-               pcpu_num_allocated *= 2;
-               pcpu_size = new;
-       }
-
-       /* Insert a new subblock */
-       memmove(&pcpu_size[i+1], &pcpu_size[i],
-               sizeof(pcpu_size[0]) * (pcpu_num_used - i));
-       pcpu_num_used++;
-
-       pcpu_size[i+1] -= size;
-       pcpu_size[i] = size;
-       return 1;
-}
-
-static inline unsigned int block_size(int val)
-{
-       if (val < 0)
-               return -val;
-       return val;
-}
-
-static void *percpu_modalloc(unsigned long size, unsigned long align,
-                            const char *name)
-{
-       unsigned long extra;
-       unsigned int i;
-       void *ptr;
-       int cpu;
-
-       if (align > PAGE_SIZE) {
-               printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
-                      name, align, PAGE_SIZE);
-               align = PAGE_SIZE;
-       }
-
-       ptr = __per_cpu_start;
-       for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
-               /* Extra for alignment requirement. */
-               extra = ALIGN((unsigned long)ptr, align) - (unsigned long)ptr;
-               BUG_ON(i == 0 && extra != 0);
-
-               if (pcpu_size[i] < 0 || pcpu_size[i] < extra + size)
-                       continue;
-
-               /* Transfer extra to previous block. */
-               if (pcpu_size[i-1] < 0)
-                       pcpu_size[i-1] -= extra;
-               else
-                       pcpu_size[i-1] += extra;
-               pcpu_size[i] -= extra;
-               ptr += extra;
-
-               /* Split block if warranted */
-               if (pcpu_size[i] - size > sizeof(unsigned long))
-                       if (!split_block(i, size))
-                               return NULL;
-
-               /* add the per-cpu scanning areas */
-               for_each_possible_cpu(cpu)
-                       kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0,
-                                      GFP_KERNEL);
-
-               /* Mark allocated */
-               pcpu_size[i] = -pcpu_size[i];
-               return ptr;
-       }
-
-       printk(KERN_WARNING "Could not allocate %lu bytes percpu data\n",
-              size);
-       return NULL;
-}
-
-static void percpu_modfree(void *freeme)
-{
-       unsigned int i;
-       void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
-       int cpu;
-
-       /* First entry is core kernel percpu data. */
-       for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
-               if (ptr == freeme) {
-                       pcpu_size[i] = -pcpu_size[i];
-                       goto free;
-               }
-       }
-       BUG();
-
- free:
-       /* remove the per-cpu scanning areas */
-       for_each_possible_cpu(cpu)
-               kmemleak_free(freeme + per_cpu_offset(cpu));
-
-       /* Merge with previous? */
-       if (pcpu_size[i-1] >= 0) {
-               pcpu_size[i-1] += pcpu_size[i];
-               pcpu_num_used--;
-               memmove(&pcpu_size[i], &pcpu_size[i+1],
-                       (pcpu_num_used - i) * sizeof(pcpu_size[0]));
-               i--;
-       }
-       /* Merge with next? */
-       if (i+1 < pcpu_num_used && pcpu_size[i+1] >= 0) {
-               pcpu_size[i] += pcpu_size[i+1];
-               pcpu_num_used--;
-               memmove(&pcpu_size[i+1], &pcpu_size[i+2],
-                       (pcpu_num_used - (i+1)) * sizeof(pcpu_size[0]));
-       }
-}
-
-static int percpu_modinit(void)
-{
-       pcpu_num_used = 2;
-       pcpu_num_allocated = 2;
-       pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated,
-                           GFP_KERNEL);
-       /* Static in-kernel percpu data (used). */
-       pcpu_size[0] = -(__per_cpu_end-__per_cpu_start);
-       /* Free room. */
-       pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0];
-       if (pcpu_size[1] < 0) {
-               printk(KERN_ERR "No per-cpu room for modules.\n");
-               pcpu_num_used = 1;
-       }
-
-       return 0;
-}
-__initcall(percpu_modinit);
-
-#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
-
 static unsigned int find_pcpusec(Elf_Ehdr *hdr,
                                 Elf_Shdr *sechdrs,
                                 const char *secstrings)
index a621a67ef4e3bd5e4aa97522cdff8b443db15e32..9bb52177af02a3e20aa347e3b65c0a236caa1922 100644 (file)
@@ -763,13 +763,13 @@ static void rcu_torture_timer(unsigned long unused)
                /* Should not happen, but... */
                pipe_count = RCU_TORTURE_PIPE_LEN;
        }
-       ++__get_cpu_var(rcu_torture_count)[pipe_count];
+       __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
        completed = cur_ops->completed() - completed;
        if (completed > RCU_TORTURE_PIPE_LEN) {
                /* Should not happen, but... */
                completed = RCU_TORTURE_PIPE_LEN;
        }
-       ++__get_cpu_var(rcu_torture_batch)[completed];
+       __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
        preempt_enable();
        cur_ops->readunlock(idx);
 }
@@ -818,13 +818,13 @@ rcu_torture_reader(void *arg)
                        /* Should not happen, but... */
                        pipe_count = RCU_TORTURE_PIPE_LEN;
                }
-               ++__get_cpu_var(rcu_torture_count)[pipe_count];
+               __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
                completed = cur_ops->completed() - completed;
                if (completed > RCU_TORTURE_PIPE_LEN) {
                        /* Should not happen, but... */
                        completed = RCU_TORTURE_PIPE_LEN;
                }
-               ++__get_cpu_var(rcu_torture_batch)[completed];
+               __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
                preempt_enable();
                cur_ops->readunlock(idx);
                schedule();
index ff39cadf621e91834dcdc9116d123f8912eab225..fd05861b2111005a5a88b386d7d77ee036f5e160 100644 (file)
@@ -298,7 +298,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
 
 #ifdef CONFIG_RT_GROUP_SCHED
 static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var);
 #endif /* CONFIG_RT_GROUP_SCHED */
 #else /* !CONFIG_USER_SCHED */
 #define root_task_group init_task_group
@@ -8286,14 +8286,14 @@ enum s_alloc {
  */
 #ifdef CONFIG_SCHED_SMT
 static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
-static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
+static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
 
 static int
 cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
                 struct sched_group **sg, struct cpumask *unused)
 {
        if (sg)
-               *sg = &per_cpu(sched_group_cpus, cpu).sg;
+               *sg = &per_cpu(sched_groups, cpu).sg;
        return cpu;
 }
 #endif /* CONFIG_SCHED_SMT */
@@ -9583,7 +9583,7 @@ void __init sched_init(void)
 #elif defined CONFIG_USER_SCHED
                init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
                init_tg_rt_entry(&init_task_group,
-                               &per_cpu(init_rt_rq, i),
+                               &per_cpu(init_rt_rq_var, i),
                                &per_cpu(init_sched_rt_entity, i), i, 1,
                                root_task_group.rt_se[i]);
 #endif
index 21939d9e830e22b040f15e5562c97b0a8b8a9e47..a09502e2ef758721917ab537e345da0036197628 100644 (file)
@@ -697,7 +697,7 @@ void __init softirq_init(void)
        open_softirq(HI_SOFTIRQ, tasklet_hi_action);
 }
 
-static int ksoftirqd(void * __bind_cpu)
+static int run_ksoftirqd(void * __bind_cpu)
 {
        set_current_state(TASK_INTERRUPTIBLE);
 
@@ -810,7 +810,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
        switch (action) {
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
-               p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
+               p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
                if (IS_ERR(p)) {
                        printk("ksoftirqd for %i failed\n", hotcpu);
                        return NOTIFY_BAD;
index 81324d12eb35a5db7fae8a0ae3d18c76ca38ce67..d22579087e27a5bdb04e3962b117432c29497b33 100644 (file)
@@ -22,9 +22,9 @@
 
 static DEFINE_SPINLOCK(print_lock);
 
-static DEFINE_PER_CPU(unsigned long, touch_timestamp);
-static DEFINE_PER_CPU(unsigned long, print_timestamp);
-static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
+static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */
+static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */
+static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
 
 static int __read_mostly did_panic;
 int __read_mostly softlockup_thresh = 60;
@@ -70,12 +70,12 @@ static void __touch_softlockup_watchdog(void)
 {
        int this_cpu = raw_smp_processor_id();
 
-       __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
+       __raw_get_cpu_var(softlockup_touch_ts) = get_timestamp(this_cpu);
 }
 
 void touch_softlockup_watchdog(void)
 {
-       __raw_get_cpu_var(touch_timestamp) = 0;
+       __raw_get_cpu_var(softlockup_touch_ts) = 0;
 }
 EXPORT_SYMBOL(touch_softlockup_watchdog);
 
@@ -85,7 +85,7 @@ void touch_all_softlockup_watchdogs(void)
 
        /* Cause each CPU to re-update its timestamp rather than complain */
        for_each_online_cpu(cpu)
-               per_cpu(touch_timestamp, cpu) = 0;
+               per_cpu(softlockup_touch_ts, cpu) = 0;
 }
 EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
 
@@ -104,28 +104,28 @@ int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
 void softlockup_tick(void)
 {
        int this_cpu = smp_processor_id();
-       unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
-       unsigned long print_timestamp;
+       unsigned long touch_ts = per_cpu(softlockup_touch_ts, this_cpu);
+       unsigned long print_ts;
        struct pt_regs *regs = get_irq_regs();
        unsigned long now;
 
        /* Is detection switched off? */
-       if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) {
+       if (!per_cpu(softlockup_watchdog, this_cpu) || softlockup_thresh <= 0) {
                /* Be sure we don't false trigger if switched back on */
-               if (touch_timestamp)
-                       per_cpu(touch_timestamp, this_cpu) = 0;
+               if (touch_ts)
+                       per_cpu(softlockup_touch_ts, this_cpu) = 0;
                return;
        }
 
-       if (touch_timestamp == 0) {
+       if (touch_ts == 0) {
                __touch_softlockup_watchdog();
                return;
        }
 
-       print_timestamp = per_cpu(print_timestamp, this_cpu);
+       print_ts = per_cpu(softlockup_print_ts, this_cpu);
 
        /* report at most once a second */
-       if (print_timestamp == touch_timestamp || did_panic)
+       if (print_ts == touch_ts || did_panic)
                return;
 
        /* do not print during early bootup: */
@@ -140,18 +140,18 @@ void softlockup_tick(void)
         * Wake up the high-prio watchdog task twice per
         * threshold timespan.
         */
-       if (now > touch_timestamp + softlockup_thresh/2)
-               wake_up_process(per_cpu(watchdog_task, this_cpu));
+       if (now > touch_ts + softlockup_thresh/2)
+               wake_up_process(per_cpu(softlockup_watchdog, this_cpu));
 
        /* Warn about unreasonable delays: */
-       if (now <= (touch_timestamp + softlockup_thresh))
+       if (now <= (touch_ts + softlockup_thresh))
                return;
 
-       per_cpu(print_timestamp, this_cpu) = touch_timestamp;
+       per_cpu(softlockup_print_ts, this_cpu) = touch_ts;
 
        spin_lock(&print_lock);
        printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
-                       this_cpu, now - touch_timestamp,
+                       this_cpu, now - touch_ts,
                        current->comm, task_pid_nr(current));
        print_modules();
        print_irqtrace_events(current);
@@ -209,32 +209,32 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
        switch (action) {
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
-               BUG_ON(per_cpu(watchdog_task, hotcpu));
+               BUG_ON(per_cpu(softlockup_watchdog, hotcpu));
                p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
                if (IS_ERR(p)) {
                        printk(KERN_ERR "watchdog for %i failed\n", hotcpu);
                        return NOTIFY_BAD;
                }
-               per_cpu(touch_timestamp, hotcpu) = 0;
-               per_cpu(watchdog_task, hotcpu) = p;
+               per_cpu(softlockup_touch_ts, hotcpu) = 0;
+               per_cpu(softlockup_watchdog, hotcpu) = p;
                kthread_bind(p, hotcpu);
                break;
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
-               wake_up_process(per_cpu(watchdog_task, hotcpu));
+               wake_up_process(per_cpu(softlockup_watchdog, hotcpu));
                break;
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
        case CPU_UP_CANCELED_FROZEN:
-               if (!per_cpu(watchdog_task, hotcpu))
+               if (!per_cpu(softlockup_watchdog, hotcpu))
                        break;
                /* Unbind so it can run.  Fall thru. */
-               kthread_bind(per_cpu(watchdog_task, hotcpu),
+               kthread_bind(per_cpu(softlockup_watchdog, hotcpu),
                             cpumask_any(cpu_online_mask));
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
-               p = per_cpu(watchdog_task, hotcpu);
-               per_cpu(watchdog_task, hotcpu) = NULL;
+               p = per_cpu(softlockup_watchdog, hotcpu);
+               per_cpu(softlockup_watchdog, hotcpu) = NULL;
                kthread_stop(p);
                break;
 #endif /* CONFIG_HOTPLUG_CPU */
index ee5681f8d7ecd2f1429d5455ea2839f12921b42f..63b117e9eba13fb430372601bdc36ff7d3ef42a2 100644 (file)
@@ -86,7 +86,7 @@ static DEFINE_SPINLOCK(table_lock);
 /*
  * Per-CPU lookup locks for fast hash lookup:
  */
-static DEFINE_PER_CPU(spinlock_t, lookup_lock);
+static DEFINE_PER_CPU(spinlock_t, tstats_lookup_lock);
 
 /*
  * Mutex to serialize state changes with show-stats activities:
@@ -245,7 +245,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
        if (likely(!timer_stats_active))
                return;
 
-       lock = &per_cpu(lookup_lock, raw_smp_processor_id());
+       lock = &per_cpu(tstats_lookup_lock, raw_smp_processor_id());
 
        input.timer = timer;
        input.start_func = startf;
@@ -348,9 +348,10 @@ static void sync_access(void)
        int cpu;
 
        for_each_online_cpu(cpu) {
-               spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags);
+               spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu);
+               spin_lock_irqsave(lock, flags);
                /* nothing */
-               spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags);
+               spin_unlock_irqrestore(lock, flags);
        }
 }
 
@@ -408,7 +409,7 @@ void __init init_timer_stats(void)
        int cpu;
 
        for_each_possible_cpu(cpu)
-               spin_lock_init(&per_cpu(lookup_lock, cpu));
+               spin_lock_init(&per_cpu(tstats_lookup_lock, cpu));
 }
 
 static int __init init_tstats_procfs(void)
index 88bd9ae2a9ed9b71b881c665f7cdb4eb722cf309..c82dfd92fdfd8d663b6146ea6a584d42ea715508 100644 (file)
@@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
  */
 static int tracing_disabled = 1;
 
-DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
+DEFINE_PER_CPU(int, ftrace_cpu_disabled);
 
 static inline void ftrace_disable_cpu(void)
 {
        preempt_disable();
-       local_inc(&__get_cpu_var(ftrace_cpu_disabled));
+       __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled));
 }
 
 static inline void ftrace_enable_cpu(void)
 {
-       local_dec(&__get_cpu_var(ftrace_cpu_disabled));
+       __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled));
        preempt_enable();
 }
 
@@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu)
  */
 static struct trace_array      max_tr;
 
-static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
+static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
 
 /* tracer_enabled is used to toggle activation of a tracer */
 static int                     tracer_enabled = 1;
@@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr,
        struct ftrace_entry *entry;
 
        /* If we are reading the ring buffer, don't trace */
-       if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+       if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
                return;
 
        event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
@@ -4454,7 +4454,7 @@ __init static int tracer_alloc_buffers(void)
        /* Allocate the first page for all buffers */
        for_each_tracing_cpu(i) {
                global_trace.data[i] = &per_cpu(global_trace_cpu, i);
-               max_tr.data[i] = &per_cpu(max_data, i);
+               max_tr.data[i] = &per_cpu(max_tr_data, i);
        }
 
        trace_init_cmdlines();
index 7fa33cab69629299a9d4470fbf13db81d20e3c94..a52bed2eedd848ec5e7c249d479baa8d14a98ad6 100644 (file)
@@ -443,7 +443,7 @@ extern int DYN_FTRACE_TEST_NAME(void);
 
 extern int ring_buffer_expanded;
 extern bool tracing_selftest_disabled;
-DECLARE_PER_CPU(local_t, ftrace_cpu_disabled);
+DECLARE_PER_CPU(int, ftrace_cpu_disabled);
 
 #ifdef CONFIG_FTRACE_STARTUP_TEST
 extern int trace_selftest_startup_function(struct tracer *trace,
index a43d009c561a28bcbf1078626e086c63bf475d5e..b1342c5d37cfb821cfb96fcfd610cb95cdfb1082 100644 (file)
@@ -187,7 +187,7 @@ static int __trace_graph_entry(struct trace_array *tr,
        struct ring_buffer *buffer = tr->buffer;
        struct ftrace_graph_ent_entry *entry;
 
-       if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+       if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
                return 0;
 
        event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -251,7 +251,7 @@ static void __trace_graph_return(struct trace_array *tr,
        struct ring_buffer *buffer = tr->buffer;
        struct ftrace_graph_ret_entry *entry;
 
-       if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+       if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
                return;
 
        event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
index 69543a905cd5f1c92086cb47576a21028db11a37..7b97000745f5cc38601aff4101b58d9d8bb99b6c 100644 (file)
 
 #define BTS_BUFFER_SIZE (1 << 13)
 
-static DEFINE_PER_CPU(struct bts_tracer *, tracer);
-static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer);
+static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer);
+static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer);
 
-#define this_tracer per_cpu(tracer, smp_processor_id())
+#define this_tracer per_cpu(hwb_tracer, smp_processor_id())
 
 static int trace_hw_branches_enabled __read_mostly;
 static int trace_hw_branches_suspended __read_mostly;
@@ -32,12 +32,13 @@ static struct trace_array *hw_branch_trace __read_mostly;
 
 static void bts_trace_init_cpu(int cpu)
 {
-       per_cpu(tracer, cpu) =
-               ds_request_bts_cpu(cpu, per_cpu(buffer, cpu), BTS_BUFFER_SIZE,
-                                  NULL, (size_t)-1, BTS_KERNEL);
+       per_cpu(hwb_tracer, cpu) =
+               ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu),
+                                  BTS_BUFFER_SIZE, NULL, (size_t)-1,
+                                  BTS_KERNEL);
 
-       if (IS_ERR(per_cpu(tracer, cpu)))
-               per_cpu(tracer, cpu) = NULL;
+       if (IS_ERR(per_cpu(hwb_tracer, cpu)))
+               per_cpu(hwb_tracer, cpu) = NULL;
 }
 
 static int bts_trace_init(struct trace_array *tr)
@@ -51,7 +52,7 @@ static int bts_trace_init(struct trace_array *tr)
        for_each_online_cpu(cpu) {
                bts_trace_init_cpu(cpu);
 
-               if (likely(per_cpu(tracer, cpu)))
+               if (likely(per_cpu(hwb_tracer, cpu)))
                        trace_hw_branches_enabled = 1;
        }
        trace_hw_branches_suspended = 0;
@@ -67,9 +68,9 @@ static void bts_trace_reset(struct trace_array *tr)
 
        get_online_cpus();
        for_each_online_cpu(cpu) {
-               if (likely(per_cpu(tracer, cpu))) {
-                       ds_release_bts(per_cpu(tracer, cpu));
-                       per_cpu(tracer, cpu) = NULL;
+               if (likely(per_cpu(hwb_tracer, cpu))) {
+                       ds_release_bts(per_cpu(hwb_tracer, cpu));
+                       per_cpu(hwb_tracer, cpu) = NULL;
                }
        }
        trace_hw_branches_enabled = 0;
@@ -83,8 +84,8 @@ static void bts_trace_start(struct trace_array *tr)
 
        get_online_cpus();
        for_each_online_cpu(cpu)
-               if (likely(per_cpu(tracer, cpu)))
-                       ds_resume_bts(per_cpu(tracer, cpu));
+               if (likely(per_cpu(hwb_tracer, cpu)))
+                       ds_resume_bts(per_cpu(hwb_tracer, cpu));
        trace_hw_branches_suspended = 0;
        put_online_cpus();
 }
@@ -95,8 +96,8 @@ static void bts_trace_stop(struct trace_array *tr)
 
        get_online_cpus();
        for_each_online_cpu(cpu)
-               if (likely(per_cpu(tracer, cpu)))
-                       ds_suspend_bts(per_cpu(tracer, cpu));
+               if (likely(per_cpu(hwb_tracer, cpu)))
+                       ds_suspend_bts(per_cpu(hwb_tracer, cpu));
        trace_hw_branches_suspended = 1;
        put_online_cpus();
 }
@@ -114,16 +115,16 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
                        bts_trace_init_cpu(cpu);
 
                        if (trace_hw_branches_suspended &&
-                           likely(per_cpu(tracer, cpu)))
-                               ds_suspend_bts(per_cpu(tracer, cpu));
+                           likely(per_cpu(hwb_tracer, cpu)))
+                               ds_suspend_bts(per_cpu(hwb_tracer, cpu));
                }
                break;
 
        case CPU_DOWN_PREPARE:
                /* The notification is sent with interrupts enabled. */
-               if (likely(per_cpu(tracer, cpu))) {
-                       ds_release_bts(per_cpu(tracer, cpu));
-                       per_cpu(tracer, cpu) = NULL;
+               if (likely(per_cpu(hwb_tracer, cpu))) {
+                       ds_release_bts(per_cpu(hwb_tracer, cpu));
+                       per_cpu(hwb_tracer, cpu) = NULL;
                }
        }
 
@@ -258,8 +259,8 @@ static void trace_bts_prepare(struct trace_iterator *iter)
 
        get_online_cpus();
        for_each_online_cpu(cpu)
-               if (likely(per_cpu(tracer, cpu)))
-                       ds_suspend_bts(per_cpu(tracer, cpu));
+               if (likely(per_cpu(hwb_tracer, cpu)))
+                       ds_suspend_bts(per_cpu(hwb_tracer, cpu));
        /*
         * We need to collect the trace on the respective cpu since ftrace
         * implicitly adds the record for the current cpu.
@@ -268,8 +269,8 @@ static void trace_bts_prepare(struct trace_iterator *iter)
        on_each_cpu(trace_bts_cpu, iter->tr, 1);
 
        for_each_online_cpu(cpu)
-               if (likely(per_cpu(tracer, cpu)))
-                       ds_resume_bts(per_cpu(tracer, cpu));
+               if (likely(per_cpu(hwb_tracer, cpu)))
+                       ds_resume_bts(per_cpu(hwb_tracer, cpu));
        put_online_cpus();
 }
 
index ebf849042ed3d6dd6dc7543d47d35585077b8a1b..82131d0f8d85fbfb007e084e882333771dd47c10 100644 (file)
@@ -34,11 +34,7 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
 obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
 obj-$(CONFIG_FS_XIP) += filemap_xip.o
 obj-$(CONFIG_MIGRATION) += migrate.o
-ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
 obj-$(CONFIG_SMP) += percpu.o
-else
-obj-$(CONFIG_SMP) += allocpercpu.o
-endif
 obj-$(CONFIG_QUICKLIST) += quicklist.o
 obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
 obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
deleted file mode 100644 (file)
index df34cea..0000000
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * linux/mm/allocpercpu.c
- *
- * Separated from slab.c August 11, 2006 Christoph Lameter
- */
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/bootmem.h>
-#include <asm/sections.h>
-
-#ifndef cache_line_size
-#define cache_line_size()      L1_CACHE_BYTES
-#endif
-
-/**
- * percpu_depopulate - depopulate per-cpu data for given cpu
- * @__pdata: per-cpu data to depopulate
- * @cpu: depopulate per-cpu data for this cpu
- *
- * Depopulating per-cpu data for a cpu going offline would be a typical
- * use case. You need to register a cpu hotplug handler for that purpose.
- */
-static void percpu_depopulate(void *__pdata, int cpu)
-{
-       struct percpu_data *pdata = __percpu_disguise(__pdata);
-
-       kfree(pdata->ptrs[cpu]);
-       pdata->ptrs[cpu] = NULL;
-}
-
-/**
- * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
- * @__pdata: per-cpu data to depopulate
- * @mask: depopulate per-cpu data for cpu's selected through mask bits
- */
-static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask)
-{
-       int cpu;
-       for_each_cpu_mask_nr(cpu, *mask)
-               percpu_depopulate(__pdata, cpu);
-}
-
-#define percpu_depopulate_mask(__pdata, mask) \
-       __percpu_depopulate_mask((__pdata), &(mask))
-
-/**
- * percpu_populate - populate per-cpu data for given cpu
- * @__pdata: per-cpu data to populate further
- * @size: size of per-cpu object
- * @gfp: may sleep or not etc.
- * @cpu: populate per-data for this cpu
- *
- * Populating per-cpu data for a cpu coming online would be a typical
- * use case. You need to register a cpu hotplug handler for that purpose.
- * Per-cpu object is populated with zeroed buffer.
- */
-static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
-{
-       struct percpu_data *pdata = __percpu_disguise(__pdata);
-       int node = cpu_to_node(cpu);
-
-       /*
-        * We should make sure each CPU gets private memory.
-        */
-       size = roundup(size, cache_line_size());
-
-       BUG_ON(pdata->ptrs[cpu]);
-       if (node_online(node))
-               pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
-       else
-               pdata->ptrs[cpu] = kzalloc(size, gfp);
-       return pdata->ptrs[cpu];
-}
-
-/**
- * percpu_populate_mask - populate per-cpu data for more cpu's
- * @__pdata: per-cpu data to populate further
- * @size: size of per-cpu object
- * @gfp: may sleep or not etc.
- * @mask: populate per-cpu data for cpu's selected through mask bits
- *
- * Per-cpu objects are populated with zeroed buffers.
- */
-static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
-                                 cpumask_t *mask)
-{
-       cpumask_t populated;
-       int cpu;
-
-       cpus_clear(populated);
-       for_each_cpu_mask_nr(cpu, *mask)
-               if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
-                       __percpu_depopulate_mask(__pdata, &populated);
-                       return -ENOMEM;
-               } else
-                       cpu_set(cpu, populated);
-       return 0;
-}
-
-#define percpu_populate_mask(__pdata, size, gfp, mask) \
-       __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
-
-/**
- * alloc_percpu - initial setup of per-cpu data
- * @size: size of per-cpu object
- * @align: alignment
- *
- * Allocate dynamic percpu area.  Percpu objects are populated with
- * zeroed buffers.
- */
-void *__alloc_percpu(size_t size, size_t align)
-{
-       /*
-        * We allocate whole cache lines to avoid false sharing
-        */
-       size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
-       void *pdata = kzalloc(sz, GFP_KERNEL);
-       void *__pdata = __percpu_disguise(pdata);
-
-       /*
-        * Can't easily make larger alignment work with kmalloc.  WARN
-        * on it.  Larger alignment should only be used for module
-        * percpu sections on SMP for which this path isn't used.
-        */
-       WARN_ON_ONCE(align > SMP_CACHE_BYTES);
-
-       if (unlikely(!pdata))
-               return NULL;
-       if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
-                                          &cpu_possible_map)))
-               return __pdata;
-       kfree(pdata);
-       return NULL;
-}
-EXPORT_SYMBOL_GPL(__alloc_percpu);
-
-/**
- * free_percpu - final cleanup of per-cpu data
- * @__pdata: object to clean up
- *
- * We simply clean up any per-cpu object left. No need for the client to
- * track and specify through a bis mask which per-cpu objects are to free.
- */
-void free_percpu(void *__pdata)
-{
-       if (unlikely(!__pdata))
-               return;
-       __percpu_depopulate_mask(__pdata, cpu_possible_mask);
-       kfree(__percpu_disguise(__pdata));
-}
-EXPORT_SYMBOL_GPL(free_percpu);
-
-/*
- * Generic percpu area setup.
- */
-#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
-
-EXPORT_SYMBOL(__per_cpu_offset);
-
-void __init setup_per_cpu_areas(void)
-{
-       unsigned long size, i;
-       char *ptr;
-       unsigned long nr_possible_cpus = num_possible_cpus();
-
-       /* Copy section for each CPU (we discard the original) */
-       size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
-       ptr = alloc_bootmem_pages(size * nr_possible_cpus);
-
-       for_each_possible_cpu(i) {
-               __per_cpu_offset[i] = ptr - __per_cpu_start;
-               memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
-               ptr += size;
-       }
-}
-#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
index 5adfc268b408936a3d18ae9014ce70e49590738d..442010cc91c6c82eb8489e64d21500baa52b5911 100644 (file)
@@ -46,8 +46,6 @@
  *
  * To use this allocator, arch code should do the followings.
  *
- * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
- *
  * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
  *   regular address to percpu pointer and back if they need to be
  *   different from the default
@@ -74,6 +72,7 @@
 #include <asm/cacheflush.h>
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
+#include <asm/io.h>
 
 #define PCPU_SLOT_BASE_SHIFT           5       /* 1-31 shares the same slot */
 #define PCPU_DFL_MAP_ALLOC             16      /* start a map with 16 ents */
@@ -1302,6 +1301,27 @@ void free_percpu(void *ptr)
 }
 EXPORT_SYMBOL_GPL(free_percpu);
 
+/**
+ * per_cpu_ptr_to_phys - convert translated percpu address to physical address
+ * @addr: the address to be converted to physical address
+ *
+ * Given @addr which is dereferenceable address obtained via one of
+ * percpu access macros, this function translates it into its physical
+ * address.  The caller is responsible for ensuring @addr stays valid
+ * until this function finishes.
+ *
+ * RETURNS:
+ * The physical address for @addr.
+ */
+phys_addr_t per_cpu_ptr_to_phys(void *addr)
+{
+       if ((unsigned long)addr < VMALLOC_START ||
+                       (unsigned long)addr >= VMALLOC_END)
+               return __pa(addr);
+       else
+               return page_to_phys(vmalloc_to_page(addr));
+}
+
 static inline size_t pcpu_calc_fc_sizes(size_t static_size,
                                        size_t reserved_size,
                                        ssize_t *dyn_sizep)
index a6c9166996a9f389e72f3c71a2df7bacc937fe6f..29b09599af7cb4510c37e02d873eab4e2cd52c79 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -697,7 +697,7 @@ static inline void init_lock_keys(void)
 static DEFINE_MUTEX(cache_chain_mutex);
 static struct list_head cache_chain;
 
-static DEFINE_PER_CPU(struct delayed_work, reap_work);
+static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
 
 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
 {
@@ -838,7 +838,7 @@ __setup("noaliencache", noaliencache_setup);
  * objects freed on different nodes from which they were allocated) and the
  * flushing of remote pcps by calling drain_node_pages.
  */
-static DEFINE_PER_CPU(unsigned long, reap_node);
+static DEFINE_PER_CPU(unsigned long, slab_reap_node);
 
 static void init_reap_node(int cpu)
 {
@@ -848,17 +848,17 @@ static void init_reap_node(int cpu)
        if (node == MAX_NUMNODES)
                node = first_node(node_online_map);
 
-       per_cpu(reap_node, cpu) = node;
+       per_cpu(slab_reap_node, cpu) = node;
 }
 
 static void next_reap_node(void)
 {
-       int node = __get_cpu_var(reap_node);
+       int node = __get_cpu_var(slab_reap_node);
 
        node = next_node(node, node_online_map);
        if (unlikely(node >= MAX_NUMNODES))
                node = first_node(node_online_map);
-       __get_cpu_var(reap_node) = node;
+       __get_cpu_var(slab_reap_node) = node;
 }
 
 #else
@@ -875,7 +875,7 @@ static void next_reap_node(void)
  */
 static void __cpuinit start_cpu_timer(int cpu)
 {
-       struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
+       struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
 
        /*
         * When this gets called from do_initcalls via cpucache_init(),
@@ -1039,7 +1039,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
  */
 static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
 {
-       int node = __get_cpu_var(reap_node);
+       int node = __get_cpu_var(slab_reap_node);
 
        if (l3->alien) {
                struct array_cache *ac = l3->alien[node];
@@ -1300,9 +1300,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
                 * anything expensive but will only modify reap_work
                 * and reschedule the timer.
                */
-               cancel_rearming_delayed_work(&per_cpu(reap_work, cpu));
+               cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu));
                /* Now the cache_reaper is guaranteed to be not running. */
-               per_cpu(reap_work, cpu).work.func = NULL;
+               per_cpu(slab_reap_work, cpu).work.func = NULL;
                break;
        case CPU_DOWN_FAILED:
        case CPU_DOWN_FAILED_FROZEN:
index 0f551a4a44cddc7a042d47bbcd85c7126569ee69..9b08d790df6fe441a2b8c2002ed9f0e1a4677641 100644 (file)
@@ -761,7 +761,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
        spin_lock(&vbq->lock);
        list_add(&vb->free_list, &vbq->free);
        spin_unlock(&vbq->lock);
-       put_cpu_var(vmap_cpu_blocks);
+       put_cpu_var(vmap_block_queue);
 
        return vb;
 }
@@ -826,7 +826,7 @@ again:
                }
                spin_unlock(&vb->lock);
        }
-       put_cpu_var(vmap_cpu_blocks);
+       put_cpu_var(vmap_block_queue);
        rcu_read_unlock();
 
        if (!addr) {
index c81321f9feec1cb28eee1b7e07237017336b89d0..dad2327e45804e16a8ff07564a80eb0cac79f482 100644 (file)
@@ -883,11 +883,10 @@ static void vmstat_update(struct work_struct *w)
 
 static void __cpuinit start_cpu_timer(int cpu)
 {
-       struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);
+       struct delayed_work *work = &per_cpu(vmstat_work, cpu);
 
-       INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update);
-       schedule_delayed_work_on(cpu, vmstat_work,
-                                __round_jiffies_relative(HZ, cpu));
+       INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
+       schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
 }
 
 /*