Merge branch 'this_cpu_ops' into for-2.6.38
authorTejun Heo <tj@kernel.org>
Fri, 17 Dec 2010 14:16:46 +0000 (15:16 +0100)
committerTejun Heo <tj@kernel.org>
Fri, 17 Dec 2010 14:16:46 +0000 (15:16 +0100)
30 files changed:
MAINTAINERS
arch/x86/kernel/kprobes.c
arch/x86/xen/enlighten.c
arch/x86/xen/multicalls.h
arch/x86/xen/spinlock.c
arch/x86/xen/time.c
drivers/acpi/processor_idle.c
drivers/cpuidle/cpuidle.c
drivers/s390/cio/cio.c
drivers/staging/speakup/fakekey.c
drivers/xen/events.c
fs/buffer.c
include/asm-generic/irq_regs.h
include/linux/elevator.h
include/linux/kernel_stat.h
include/linux/kprobes.h
kernel/exit.c
kernel/fork.c
kernel/hrtimer.c
kernel/kprobes.c
kernel/printk.c
kernel/rcutree.c
kernel/softirq.c
kernel/time/tick-common.c
kernel/time/tick-oneshot.c
kernel/watchdog.c
lib/percpu_counter.c
mm/percpu.c
mm/slab.c
mm/vmstat.c

index 6a588873cf8d2da8b00f803a1dcd8d96841292bd..cc27793e87563429947119a905bf8e9f7a71335f 100644 (file)
@@ -4602,6 +4602,16 @@ S:       Maintained
 F:     crypto/pcrypt.c
 F:     include/crypto/pcrypt.h
 
+PER-CPU MEMORY ALLOCATOR
+M:     Tejun Heo <tj@kernel.org>
+M:     Christoph Lameter <cl@linux-foundation.org>
+L:     linux-kernel@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
+S:     Maintained
+F:     include/linux/percpu*.h
+F:     mm/percpu*.c
+F:     arch/*/include/asm/percpu.h
+
 PER-TASK DELAY ACCOUNTING
 M:     Balbir Singh <balbir@linux.vnet.ibm.com>
 S:     Maintained
index 1cbd54c0df99189548a3a03f40fbb75a1703475a..572ecc88ca40784f143a2eca02645e23e0cbb0b1 100644 (file)
@@ -403,7 +403,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
 
 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
-       __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+       __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
        kcb->kprobe_status = kcb->prev_kprobe.status;
        kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
        kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
@@ -412,7 +412,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
                                struct kprobe_ctlblk *kcb)
 {
-       __get_cpu_var(current_kprobe) = p;
+       __this_cpu_write(current_kprobe, p);
        kcb->kprobe_saved_flags = kcb->kprobe_old_flags
                = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
        if (is_IF_modifier(p->ainsn.insn))
@@ -586,7 +586,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
                preempt_enable_no_resched();
                return 1;
        } else if (kprobe_running()) {
-               p = __get_cpu_var(current_kprobe);
+               p = __this_cpu_read(current_kprobe);
                if (p->break_handler && p->break_handler(p, regs)) {
                        setup_singlestep(p, regs, kcb, 0);
                        return 1;
@@ -759,11 +759,11 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
 
                orig_ret_address = (unsigned long)ri->ret_addr;
                if (ri->rp && ri->rp->handler) {
-                       __get_cpu_var(current_kprobe) = &ri->rp->kp;
+                       __this_cpu_write(current_kprobe, &ri->rp->kp);
                        get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
                        ri->ret_addr = correct_ret_addr;
                        ri->rp->handler(ri, regs);
-                       __get_cpu_var(current_kprobe) = NULL;
+                       __this_cpu_write(current_kprobe, NULL);
                }
 
                recycle_rp_inst(ri, &empty_rp);
@@ -1198,10 +1198,10 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
                regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
                regs->orig_ax = ~0UL;
 
-               __get_cpu_var(current_kprobe) = &op->kp;
+               __this_cpu_write(current_kprobe, &op->kp);
                kcb->kprobe_status = KPROBE_HIT_ACTIVE;
                opt_pre_handler(&op->kp, regs);
-               __get_cpu_var(current_kprobe) = NULL;
+               __this_cpu_write(current_kprobe, NULL);
        }
        preempt_enable_no_resched();
 }
index 44dcad43989dc983af51863fac28677e2cfdaaee..aa8c89ae54cfaf08318b3d9718187dbaa4c41c24 100644 (file)
@@ -574,8 +574,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
 
        preempt_disable();
 
-       start = __get_cpu_var(idt_desc).address;
-       end = start + __get_cpu_var(idt_desc).size + 1;
+       start = __this_cpu_read(idt_desc.address);
+       end = start + __this_cpu_read(idt_desc.size) + 1;
 
        xen_mc_flush();
 
index 9e565da5d1f730f50d41fca98f4fbb86c69b5f29..4ec8035e3216208a94c960624befa606aed79884 100644 (file)
@@ -22,7 +22,7 @@ static inline void xen_mc_batch(void)
        unsigned long flags;
        /* need to disable interrupts until this entry is complete */
        local_irq_save(flags);
-       __get_cpu_var(xen_mc_irq_flags) = flags;
+       __this_cpu_write(xen_mc_irq_flags, flags);
 }
 
 static inline struct multicall_space xen_mc_entry(size_t args)
index 23e061b9327bc45b9ba64024559c87202f7602b0..cc9b1e182fcfad86bc67b56a8e172fb73d9e9ecf 100644 (file)
@@ -159,8 +159,8 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
 {
        struct xen_spinlock *prev;
 
-       prev = __get_cpu_var(lock_spinners);
-       __get_cpu_var(lock_spinners) = xl;
+       prev = __this_cpu_read(lock_spinners);
+       __this_cpu_write(lock_spinners, xl);
 
        wmb();                  /* set lock of interest before count */
 
@@ -179,14 +179,14 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock
        asm(LOCK_PREFIX " decw %0"
            : "+m" (xl->spinners) : : "memory");
        wmb();                  /* decrement count before restoring lock */
-       __get_cpu_var(lock_spinners) = prev;
+       __this_cpu_write(lock_spinners, prev);
 }
 
 static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
 {
        struct xen_spinlock *xl = (struct xen_spinlock *)lock;
        struct xen_spinlock *prev;
-       int irq = __get_cpu_var(lock_kicker_irq);
+       int irq = __this_cpu_read(lock_kicker_irq);
        int ret;
        u64 start;
 
index 5da5e53fb94c20bf6c244dce734e515c92d10b34..067759e3d6a525b53198673d6029ec4d6328cae7 100644 (file)
@@ -135,24 +135,24 @@ static void do_stolen_accounting(void)
 
        /* Add the appropriate number of ticks of stolen time,
           including any left-overs from last time. */
-       stolen = runnable + offline + __get_cpu_var(xen_residual_stolen);
+       stolen = runnable + offline + __this_cpu_read(xen_residual_stolen);
 
        if (stolen < 0)
                stolen = 0;
 
        ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
-       __get_cpu_var(xen_residual_stolen) = stolen;
+       __this_cpu_write(xen_residual_stolen, stolen);
        account_steal_ticks(ticks);
 
        /* Add the appropriate number of ticks of blocked time,
           including any left-overs from last time. */
-       blocked += __get_cpu_var(xen_residual_blocked);
+       blocked += __this_cpu_read(xen_residual_blocked);
 
        if (blocked < 0)
                blocked = 0;
 
        ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
-       __get_cpu_var(xen_residual_blocked) = blocked;
+       __this_cpu_write(xen_residual_blocked, blocked);
        account_idle_ticks(ticks);
 }
 
index dcb38f8ddfda09142f5962cfcfe38b9aa9fc5570..a765b823aa9e91066c417dacca634abe9ae9c6e5 100644 (file)
@@ -746,7 +746,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
        struct acpi_processor *pr;
        struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
 
-       pr = __get_cpu_var(processors);
+       pr = __this_cpu_read(processors);
 
        if (unlikely(!pr))
                return 0;
@@ -787,7 +787,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
        s64 idle_time_ns;
        s64 idle_time;
 
-       pr = __get_cpu_var(processors);
+       pr = __this_cpu_read(processors);
 
        if (unlikely(!pr))
                return 0;
@@ -864,7 +864,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
        s64 idle_time;
 
 
-       pr = __get_cpu_var(processors);
+       pr = __this_cpu_read(processors);
 
        if (unlikely(!pr))
                return 0;
index a507108433785f8432b3d15504c3a036b461c1a0..978ff292a3fac8ebd5bef9bda8abc4c0e5b19394 100644 (file)
@@ -49,7 +49,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev);
  */
 static void cpuidle_idle_call(void)
 {
-       struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices);
+       struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
        struct cpuidle_state *target_state;
        int next_state;
 
index f4e6cf3aceb86ac86233cd6cd00485312a945824..430f875006f22b8535d6c179268655e053528962 100644 (file)
@@ -619,7 +619,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
        s390_idle_check(regs, S390_lowcore.int_clock,
                        S390_lowcore.async_enter_timer);
        irq_enter();
-       __get_cpu_var(s390_idle).nohz_delay = 1;
+       __this_cpu_write(s390_idle.nohz_delay, 1);
        if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
                /* Serve timer interrupts first. */
                clock_comparator_work();
index 65b231178f0580d30a4233e17bf31eb73c7df6f7..1b34a87716418b8af6248d0540d9958f2b0cbd2e 100644 (file)
@@ -78,10 +78,10 @@ void speakup_fake_down_arrow(void)
        /* don't change CPU */
        preempt_disable();
 
-       __get_cpu_var(reporting_keystroke) = true;
+       __this_cpu_write(reporting_keystroke, true);
        input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
        input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
-       __get_cpu_var(reporting_keystroke) = false;
+       __this_cpu_write(reporting_keystroke, false);
 
        /* reenable preemption */
        preempt_enable();
@@ -95,10 +95,5 @@ void speakup_fake_down_arrow(void)
         */
 bool speakup_fake_key_pressed(void)
 {
-       bool is_pressed;
-
-       is_pressed = get_cpu_var(reporting_keystroke);
-       put_cpu_var(reporting_keystroke);
-
-       return is_pressed;
+       return this_cpu_read(reporting_keystroke);
 }
index 31af0ac31a98bffc310cfb79bcaa2d7e03393e81..a10c66dc9dda294003ed2ed36735a7b18d12dddf 100644 (file)
@@ -355,7 +355,7 @@ static void unmask_evtchn(int port)
                struct evtchn_unmask unmask = { .port = port };
                (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
        } else {
-               struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
+               struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
 
                sync_clear_bit(port, &s->evtchn_mask[0]);
 
@@ -1101,7 +1101,7 @@ static void __xen_evtchn_do_upcall(void)
 {
        int cpu = get_cpu();
        struct shared_info *s = HYPERVISOR_shared_info;
-       struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
+       struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
        unsigned count;
 
        do {
@@ -1141,8 +1141,8 @@ static void __xen_evtchn_do_upcall(void)
 
                BUG_ON(!irqs_disabled());
 
-               count = __get_cpu_var(xed_nesting_count);
-               __get_cpu_var(xed_nesting_count) = 0;
+               count = __this_cpu_read(xed_nesting_count);
+               __this_cpu_write(xed_nesting_count, 0);
        } while (count != 1 || vcpu_info->evtchn_upcall_pending);
 
 out:
index 5930e382959bc504c58bbb428588a372742d4aa4..137d9de00e24e9d8f88bd82841de16e58eca76d0 100644 (file)
@@ -1270,12 +1270,10 @@ static inline void check_irqs_on(void)
 static void bh_lru_install(struct buffer_head *bh)
 {
        struct buffer_head *evictee = NULL;
-       struct bh_lru *lru;
 
        check_irqs_on();
        bh_lru_lock();
-       lru = &__get_cpu_var(bh_lrus);
-       if (lru->bhs[0] != bh) {
+       if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
                struct buffer_head *bhs[BH_LRU_SIZE];
                int in;
                int out = 0;
@@ -1283,7 +1281,8 @@ static void bh_lru_install(struct buffer_head *bh)
                get_bh(bh);
                bhs[out++] = bh;
                for (in = 0; in < BH_LRU_SIZE; in++) {
-                       struct buffer_head *bh2 = lru->bhs[in];
+                       struct buffer_head *bh2 =
+                               __this_cpu_read(bh_lrus.bhs[in]);
 
                        if (bh2 == bh) {
                                __brelse(bh2);
@@ -1298,7 +1297,7 @@ static void bh_lru_install(struct buffer_head *bh)
                }
                while (out < BH_LRU_SIZE)
                        bhs[out++] = NULL;
-               memcpy(lru->bhs, bhs, sizeof(bhs));
+               memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
        }
        bh_lru_unlock();
 
@@ -1313,23 +1312,22 @@ static struct buffer_head *
 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
 {
        struct buffer_head *ret = NULL;
-       struct bh_lru *lru;
        unsigned int i;
 
        check_irqs_on();
        bh_lru_lock();
-       lru = &__get_cpu_var(bh_lrus);
        for (i = 0; i < BH_LRU_SIZE; i++) {
-               struct buffer_head *bh = lru->bhs[i];
+               struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
 
                if (bh && bh->b_bdev == bdev &&
                                bh->b_blocknr == block && bh->b_size == size) {
                        if (i) {
                                while (i) {
-                                       lru->bhs[i] = lru->bhs[i - 1];
+                                       __this_cpu_write(bh_lrus.bhs[i],
+                                               __this_cpu_read(bh_lrus.bhs[i - 1]));
                                        i--;
                                }
-                               lru->bhs[0] = bh;
+                               __this_cpu_write(bh_lrus.bhs[0], bh);
                        }
                        get_bh(bh);
                        ret = bh;
@@ -3205,20 +3203,21 @@ static void recalc_bh_state(void)
 
        if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
                return;
-       __get_cpu_var(bh_accounting).ratelimit = 0;
+       __this_cpu_write(bh_accounting.ratelimit, 0);
        for_each_online_cpu(i)
                tot += per_cpu(bh_accounting, i).nr;
        buffer_heads_over_limit = (tot > max_buffer_heads);
 }
-       
+
 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
 {
        struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
        if (ret) {
                INIT_LIST_HEAD(&ret->b_assoc_buffers);
-               get_cpu_var(bh_accounting).nr++;
+               preempt_disable();
+               __this_cpu_inc(bh_accounting.nr);
                recalc_bh_state();
-               put_cpu_var(bh_accounting);
+               preempt_enable();
        }
        return ret;
 }
@@ -3228,9 +3227,10 @@ void free_buffer_head(struct buffer_head *bh)
 {
        BUG_ON(!list_empty(&bh->b_assoc_buffers));
        kmem_cache_free(bh_cachep, bh);
-       get_cpu_var(bh_accounting).nr--;
+       preempt_disable();
+       __this_cpu_dec(bh_accounting.nr);
        recalc_bh_state();
-       put_cpu_var(bh_accounting);
+       preempt_enable();
 }
 EXPORT_SYMBOL(free_buffer_head);
 
@@ -3243,9 +3243,8 @@ static void buffer_exit_cpu(int cpu)
                brelse(b->bhs[i]);
                b->bhs[i] = NULL;
        }
-       get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
+       this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
        per_cpu(bh_accounting, cpu).nr = 0;
-       put_cpu_var(bh_accounting);
 }
 
 static int buffer_cpu_notify(struct notifier_block *self,
index 5ae1d07d4a1275a8ac91996b23753270e5033cb3..6bf9355fa7eb5097c59454ac8d98e46e09c1f621 100644 (file)
@@ -22,15 +22,15 @@ DECLARE_PER_CPU(struct pt_regs *, __irq_regs);
 
 static inline struct pt_regs *get_irq_regs(void)
 {
-       return __get_cpu_var(__irq_regs);
+       return __this_cpu_read(__irq_regs);
 }
 
 static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
 {
-       struct pt_regs *old_regs, **pp_regs = &__get_cpu_var(__irq_regs);
+       struct pt_regs *old_regs;
 
-       old_regs = *pp_regs;
-       *pp_regs = new_regs;
+       old_regs = __this_cpu_read(__irq_regs);
+       __this_cpu_write(__irq_regs, new_regs);
        return old_regs;
 }
 
index 4fd978e7eb83ef8d689d0d313b5441b0631ea275..4d857973d2c94317cf11041a4a7070794fc13a99 100644 (file)
@@ -195,15 +195,9 @@ enum {
 /*
  * io context count accounting
  */
-#define elv_ioc_count_mod(name, __val)                         \
-       do {                                                    \
-               preempt_disable();                              \
-               __get_cpu_var(name) += (__val);                 \
-               preempt_enable();                               \
-       } while (0)
-
-#define elv_ioc_count_inc(name)        elv_ioc_count_mod(name, 1)
-#define elv_ioc_count_dec(name)        elv_ioc_count_mod(name, -1)
+#define elv_ioc_count_mod(name, __val) this_cpu_add(name, __val)
+#define elv_ioc_count_inc(name)        this_cpu_inc(name)
+#define elv_ioc_count_dec(name)        this_cpu_dec(name)
 
 #define elv_ioc_count_read(name)                               \
 ({                                                             \
index ad54c846911b91a169b903f7b1f6fee24d4320a2..44e83ba12b5b1076e4a1af7624420a9c2762ed2f 100644 (file)
@@ -47,7 +47,7 @@ extern unsigned long long nr_context_switches(void);
 
 #ifndef CONFIG_GENERIC_HARDIRQS
 #define kstat_irqs_this_cpu(irq) \
-       (kstat_this_cpu.irqs[irq])
+       (this_cpu_read(kstat.irqs[irq])
 
 struct irq_desc;
 
index e7d1b2e0070d3570b7022877a79fa2f0ed081507..0c251e9f0507372cc245817064e9c32ace2fad03 100644 (file)
@@ -303,12 +303,12 @@ struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk);
 /* kprobe_running() will just return the current_kprobe on this CPU */
 static inline struct kprobe *kprobe_running(void)
 {
-       return (__get_cpu_var(current_kprobe));
+       return (__this_cpu_read(current_kprobe));
 }
 
 static inline void reset_current_kprobe(void)
 {
-       __get_cpu_var(current_kprobe) = NULL;
+       __this_cpu_write(current_kprobe, NULL);
 }
 
 static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
index 676149a4ac5ff497367a484e2b66c01e915ccefd..89c74861a3da94ea6b21720b40f59ce69a42b918 100644 (file)
@@ -69,7 +69,7 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
 
                list_del_rcu(&p->tasks);
                list_del_init(&p->sibling);
-               __get_cpu_var(process_counts)--;
+               __this_cpu_dec(process_counts);
        }
        list_del_rcu(&p->thread_group);
 }
index 3b159c5991b7561bdba253eeb479f91622a35fb9..e05e27de67df20749248bd869396b1298fed40cc 100644 (file)
@@ -1282,7 +1282,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                        attach_pid(p, PIDTYPE_SID, task_session(current));
                        list_add_tail(&p->sibling, &p->real_parent->children);
                        list_add_tail_rcu(&p->tasks, &init_task.tasks);
-                       __get_cpu_var(process_counts)++;
+                       __this_cpu_inc(process_counts);
                }
                attach_pid(p, PIDTYPE_PID, pid);
                nr_threads++;
index 72206cf5c6cf854898d889a6a645e44febdd526f..29de5ae4ca956b7f06c0ca56c968493fb22fa316 100644 (file)
@@ -497,7 +497,7 @@ static inline int hrtimer_is_hres_enabled(void)
  */
 static inline int hrtimer_hres_active(void)
 {
-       return __get_cpu_var(hrtimer_bases).hres_active;
+       return __this_cpu_read(hrtimer_bases.hres_active);
 }
 
 /*
index 9737a76e106ff1554ecc2174f0e49a92b5badf45..732f1e9b65ee67df64ed3f90be6470840739457b 100644 (file)
@@ -317,12 +317,12 @@ void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
 /* We have preemption disabled.. so it is safe to use __ versions */
 static inline void set_kprobe_instance(struct kprobe *kp)
 {
-       __get_cpu_var(kprobe_instance) = kp;
+       __this_cpu_write(kprobe_instance, kp);
 }
 
 static inline void reset_kprobe_instance(void)
 {
-       __get_cpu_var(kprobe_instance) = NULL;
+       __this_cpu_write(kprobe_instance, NULL);
 }
 
 /*
@@ -775,7 +775,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
                                        int trapnr)
 {
-       struct kprobe *cur = __get_cpu_var(kprobe_instance);
+       struct kprobe *cur = __this_cpu_read(kprobe_instance);
 
        /*
         * if we faulted "during" the execution of a user specified
@@ -790,7 +790,7 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
 
 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
 {
-       struct kprobe *cur = __get_cpu_var(kprobe_instance);
+       struct kprobe *cur = __this_cpu_read(kprobe_instance);
        int ret = 0;
 
        if (cur && cur->break_handler) {
index a23315dc4498844c113cecc9792eabd063e1d87b..6c39fff387024e17824042965569e9282205d8fb 100644 (file)
@@ -1074,8 +1074,8 @@ static DEFINE_PER_CPU(int, printk_pending);
 
 void printk_tick(void)
 {
-       if (__get_cpu_var(printk_pending)) {
-               __get_cpu_var(printk_pending) = 0;
+       if (__this_cpu_read(printk_pending)) {
+               __this_cpu_write(printk_pending, 0);
                wake_up_interruptible(&log_wait);
        }
 }
index ccdc04c479815addc8dbacea69643174a4636670..aeebf772d6a2567dbf70a20eae34eeedde006f6b 100644 (file)
@@ -367,8 +367,8 @@ void rcu_irq_exit(void)
        WARN_ON_ONCE(rdtp->dynticks & 0x1);
 
        /* If the interrupt queued a callback, get out of dyntick mode. */
-       if (__get_cpu_var(rcu_sched_data).nxtlist ||
-           __get_cpu_var(rcu_bh_data).nxtlist)
+       if (__this_cpu_read(rcu_sched_data.nxtlist) ||
+           __this_cpu_read(rcu_bh_data.nxtlist))
                set_need_resched();
 }
 
index 18f4be0d5fe0bbf853935972d9b441e95bc61c5a..d0a0dda52c1aa574db9bc742b4574a9960aabe8a 100644 (file)
@@ -70,7 +70,7 @@ char *softirq_to_name[NR_SOFTIRQS] = {
 static void wakeup_softirqd(void)
 {
        /* Interrupts are disabled: no need to stop preemption */
-       struct task_struct *tsk = __get_cpu_var(ksoftirqd);
+       struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 
        if (tsk && tsk->state != TASK_RUNNING)
                wake_up_process(tsk);
@@ -388,8 +388,8 @@ void __tasklet_schedule(struct tasklet_struct *t)
 
        local_irq_save(flags);
        t->next = NULL;
-       *__get_cpu_var(tasklet_vec).tail = t;
-       __get_cpu_var(tasklet_vec).tail = &(t->next);
+       *__this_cpu_read(tasklet_vec.tail) = t;
+       __this_cpu_write(tasklet_vec.tail, &(t->next));
        raise_softirq_irqoff(TASKLET_SOFTIRQ);
        local_irq_restore(flags);
 }
@@ -402,8 +402,8 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
 
        local_irq_save(flags);
        t->next = NULL;
-       *__get_cpu_var(tasklet_hi_vec).tail = t;
-       __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
+       *__this_cpu_read(tasklet_hi_vec.tail) = t;
+       __this_cpu_write(tasklet_hi_vec.tail,  &(t->next));
        raise_softirq_irqoff(HI_SOFTIRQ);
        local_irq_restore(flags);
 }
@@ -414,8 +414,8 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
 {
        BUG_ON(!irqs_disabled());
 
-       t->next = __get_cpu_var(tasklet_hi_vec).head;
-       __get_cpu_var(tasklet_hi_vec).head = t;
+       t->next = __this_cpu_read(tasklet_hi_vec.head);
+       __this_cpu_write(tasklet_hi_vec.head, t);
        __raise_softirq_irqoff(HI_SOFTIRQ);
 }
 
@@ -426,9 +426,9 @@ static void tasklet_action(struct softirq_action *a)
        struct tasklet_struct *list;
 
        local_irq_disable();
-       list = __get_cpu_var(tasklet_vec).head;
-       __get_cpu_var(tasklet_vec).head = NULL;
-       __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
+       list = __this_cpu_read(tasklet_vec.head);
+       __this_cpu_write(tasklet_vec.head, NULL);
+       __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
        local_irq_enable();
 
        while (list) {
@@ -449,8 +449,8 @@ static void tasklet_action(struct softirq_action *a)
 
                local_irq_disable();
                t->next = NULL;
-               *__get_cpu_var(tasklet_vec).tail = t;
-               __get_cpu_var(tasklet_vec).tail = &(t->next);
+               *__this_cpu_read(tasklet_vec.tail) = t;
+               __this_cpu_write(tasklet_vec.tail, &(t->next));
                __raise_softirq_irqoff(TASKLET_SOFTIRQ);
                local_irq_enable();
        }
@@ -461,9 +461,9 @@ static void tasklet_hi_action(struct softirq_action *a)
        struct tasklet_struct *list;
 
        local_irq_disable();
-       list = __get_cpu_var(tasklet_hi_vec).head;
-       __get_cpu_var(tasklet_hi_vec).head = NULL;
-       __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
+       list = __this_cpu_read(tasklet_hi_vec.head);
+       __this_cpu_write(tasklet_hi_vec.head, NULL);
+       __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
        local_irq_enable();
 
        while (list) {
@@ -484,8 +484,8 @@ static void tasklet_hi_action(struct softirq_action *a)
 
                local_irq_disable();
                t->next = NULL;
-               *__get_cpu_var(tasklet_hi_vec).tail = t;
-               __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
+               *__this_cpu_read(tasklet_hi_vec.tail) = t;
+               __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
                __raise_softirq_irqoff(HI_SOFTIRQ);
                local_irq_enable();
        }
@@ -802,16 +802,16 @@ static void takeover_tasklets(unsigned int cpu)
 
        /* Find end, append list for that CPU. */
        if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
-               *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
-               __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
+               *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
+               this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
                per_cpu(tasklet_vec, cpu).head = NULL;
                per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
        }
        raise_softirq_irqoff(TASKLET_SOFTIRQ);
 
        if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
-               *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
-               __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
+               *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
+               __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
                per_cpu(tasklet_hi_vec, cpu).head = NULL;
                per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
        }
index b6b898d2eeefc1b0627c613b3d23a9bd4b21876e..051bc80a0c435cf47a8dfb8a0f5d2d49d20e188c 100644 (file)
@@ -49,7 +49,7 @@ struct tick_device *tick_get_device(int cpu)
  */
 int tick_is_oneshot_available(void)
 {
-       struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
+       struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 
        return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
 }
index aada0e52680ace6cc7d5e09a111a3879c1c6bda3..5cbc101f908b8483938c0153fc1ac023bcd1c784 100644 (file)
@@ -95,7 +95,7 @@ int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires,
  */
 int tick_program_event(ktime_t expires, int force)
 {
-       struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
+       struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 
        return tick_dev_program_event(dev, expires, force);
 }
@@ -167,7 +167,7 @@ int tick_oneshot_mode_active(void)
        int ret;
 
        local_irq_save(flags);
-       ret = __get_cpu_var(tick_cpu_device).mode == TICKDEV_MODE_ONESHOT;
+       ret = __this_cpu_read(tick_cpu_device.mode) == TICKDEV_MODE_ONESHOT;
        local_irq_restore(flags);
 
        return ret;
index 6e3c41a4024c1cc66be01218e2c37498498f2469..8037a86106ed5b071bd51711920bc393f10edbb2 100644 (file)
@@ -116,12 +116,12 @@ static void __touch_watchdog(void)
 {
        int this_cpu = smp_processor_id();
 
-       __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu);
+       __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
 }
 
 void touch_softlockup_watchdog(void)
 {
-       __raw_get_cpu_var(watchdog_touch_ts) = 0;
+       __this_cpu_write(watchdog_touch_ts, 0);
 }
 EXPORT_SYMBOL(touch_softlockup_watchdog);
 
@@ -165,12 +165,12 @@ void touch_softlockup_watchdog_sync(void)
 /* watchdog detector functions */
 static int is_hardlockup(void)
 {
-       unsigned long hrint = __get_cpu_var(hrtimer_interrupts);
+       unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
 
-       if (__get_cpu_var(hrtimer_interrupts_saved) == hrint)
+       if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
                return 1;
 
-       __get_cpu_var(hrtimer_interrupts_saved) = hrint;
+       __this_cpu_write(hrtimer_interrupts_saved, hrint);
        return 0;
 }
 #endif
@@ -203,8 +203,8 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
        /* Ensure the watchdog never gets throttled */
        event->hw.interrupts = 0;
 
-       if (__get_cpu_var(watchdog_nmi_touch) == true) {
-               __get_cpu_var(watchdog_nmi_touch) = false;
+       if (__this_cpu_read(watchdog_nmi_touch) == true) {
+               __this_cpu_write(watchdog_nmi_touch, false);
                return;
        }
 
@@ -218,7 +218,7 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
                int this_cpu = smp_processor_id();
 
                /* only print hardlockups once */
-               if (__get_cpu_var(hard_watchdog_warn) == true)
+               if (__this_cpu_read(hard_watchdog_warn) == true)
                        return;
 
                if (hardlockup_panic)
@@ -226,16 +226,16 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
                else
                        WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
 
-               __get_cpu_var(hard_watchdog_warn) = true;
+               __this_cpu_write(hard_watchdog_warn, true);
                return;
        }
 
-       __get_cpu_var(hard_watchdog_warn) = false;
+       __this_cpu_write(hard_watchdog_warn, false);
        return;
 }
 static void watchdog_interrupt_count(void)
 {
-       __get_cpu_var(hrtimer_interrupts)++;
+       __this_cpu_inc(hrtimer_interrupts);
 }
 #else
 static inline void watchdog_interrupt_count(void) { return; }
@@ -244,7 +244,7 @@ static inline void watchdog_interrupt_count(void) { return; }
 /* watchdog kicker functions */
 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 {
-       unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts);
+       unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
        struct pt_regs *regs = get_irq_regs();
        int duration;
 
@@ -252,18 +252,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        watchdog_interrupt_count();
 
        /* kick the softlockup detector */
-       wake_up_process(__get_cpu_var(softlockup_watchdog));
+       wake_up_process(__this_cpu_read(softlockup_watchdog));
 
        /* .. and repeat */
        hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
 
        if (touch_ts == 0) {
-               if (unlikely(__get_cpu_var(softlockup_touch_sync))) {
+               if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
                        /*
                         * If the time stamp was touched atomically
                         * make sure the scheduler tick is up to date.
                         */
-                       __get_cpu_var(softlockup_touch_sync) = false;
+                       __this_cpu_write(softlockup_touch_sync, false);
                        sched_clock_tick();
                }
                __touch_watchdog();
@@ -279,7 +279,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        duration = is_softlockup(touch_ts);
        if (unlikely(duration)) {
                /* only warn once */
-               if (__get_cpu_var(soft_watchdog_warn) == true)
+               if (__this_cpu_read(soft_watchdog_warn) == true)
                        return HRTIMER_RESTART;
 
                printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
@@ -294,9 +294,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 
                if (softlockup_panic)
                        panic("softlockup: hung tasks");
-               __get_cpu_var(soft_watchdog_warn) = true;
+               __this_cpu_write(soft_watchdog_warn, true);
        } else
-               __get_cpu_var(soft_watchdog_warn) = false;
+               __this_cpu_write(soft_watchdog_warn, false);
 
        return HRTIMER_RESTART;
 }
index 604678d7d06d9b101feafb31da72654d5f618500..28f2c33c6b537ac07f5d2692fe08b0d48a8dae91 100644 (file)
@@ -72,18 +72,16 @@ EXPORT_SYMBOL(percpu_counter_set);
 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
 {
        s64 count;
-       s32 *pcount;
 
        preempt_disable();
-       pcount = this_cpu_ptr(fbc->counters);
-       count = *pcount + amount;
+       count = __this_cpu_read(*fbc->counters) + amount;
        if (count >= batch || count <= -batch) {
                spin_lock(&fbc->lock);
                fbc->count += count;
-               *pcount = 0;
+               __this_cpu_write(*fbc->counters, 0);
                spin_unlock(&fbc->lock);
        } else {
-               *pcount = count;
+               __this_cpu_write(*fbc->counters, count);
        }
        preempt_enable();
 }
index efe816856a9d777b284f8bf25cc7548154f374d6..9e16d1c9ebd5e6a05cf91ce21f4b9a248f7d3d4b 100644 (file)
@@ -293,12 +293,8 @@ static void *pcpu_mem_alloc(size_t size)
 
        if (size <= PAGE_SIZE)
                return kzalloc(size, GFP_KERNEL);
-       else {
-               void *ptr = vmalloc(size);
-               if (ptr)
-                       memset(ptr, 0, size);
-               return ptr;
-       }
+       else
+               return vzalloc(size);
 }
 
 /**
index b1e40dafbab3cc6326a6913acf17155cbcd8e7f0..316d75596f3cb85ee3c31c0f54b0062e5d3f000d 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -829,12 +829,12 @@ static void init_reap_node(int cpu)
 
 static void next_reap_node(void)
 {
-       int node = __get_cpu_var(slab_reap_node);
+       int node = __this_cpu_read(slab_reap_node);
 
        node = next_node(node, node_online_map);
        if (unlikely(node >= MAX_NUMNODES))
                node = first_node(node_online_map);
-       __get_cpu_var(slab_reap_node) = node;
+       __this_cpu_write(slab_reap_node, node);
 }
 
 #else
@@ -1012,7 +1012,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
  */
 static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
 {
-       int node = __get_cpu_var(slab_reap_node);
+       int node = __this_cpu_read(slab_reap_node);
 
        if (l3->alien) {
                struct array_cache *ac = l3->alien[node];
index 8f62f17ee1c726fec7fc683082b0807a15b848aa..3ad909d9600f76ad2dc710d5940ad470a26f5d8a 100644 (file)
@@ -167,18 +167,20 @@ static void refresh_zone_stat_thresholds(void)
 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
                                int delta)
 {
-       struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
-
-       s8 *p = pcp->vm_stat_diff + item;
+       struct per_cpu_pageset __percpu *pcp = zone->pageset;
+       s8 __percpu *p = pcp->vm_stat_diff + item;
        long x;
+       long t;
+
+       x = delta + __this_cpu_read(*p);
 
-       x = delta + *p;
+       t = __this_cpu_read(pcp->stat_threshold);
 
-       if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) {
+       if (unlikely(x > t || x < -t)) {
                zone_page_state_add(x, zone, item);
                x = 0;
        }
-       *p = x;
+       __this_cpu_write(*p, x);
 }
 EXPORT_SYMBOL(__mod_zone_page_state);
 
@@ -221,16 +223,19 @@ EXPORT_SYMBOL(mod_zone_page_state);
  */
 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 {
-       struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
-       s8 *p = pcp->vm_stat_diff + item;
+       struct per_cpu_pageset __percpu *pcp = zone->pageset;
+       s8 __percpu *p = pcp->vm_stat_diff + item;
+       s8 v, t;
 
-       (*p)++;
+       __this_cpu_inc(*p);
 
-       if (unlikely(*p > pcp->stat_threshold)) {
-               int overstep = pcp->stat_threshold / 2;
+       v = __this_cpu_read(*p);
+       t = __this_cpu_read(pcp->stat_threshold);
+       if (unlikely(v > t)) {
+               s8 overstep = t >> 1;
 
-               zone_page_state_add(*p + overstep, zone, item);
-               *p = -overstep;
+               zone_page_state_add(v + overstep, zone, item);
+               __this_cpu_write(*p, -overstep);
        }
 }
 
@@ -242,16 +247,19 @@ EXPORT_SYMBOL(__inc_zone_page_state);
 
 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 {
-       struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
-       s8 *p = pcp->vm_stat_diff + item;
+       struct per_cpu_pageset __percpu *pcp = zone->pageset;
+       s8 __percpu *p = pcp->vm_stat_diff + item;
+       s8 v, t;
 
-       (*p)--;
+       __this_cpu_dec(*p);
 
-       if (unlikely(*p < - pcp->stat_threshold)) {
-               int overstep = pcp->stat_threshold / 2;
+       v = __this_cpu_read(*p);
+       t = __this_cpu_read(pcp->stat_threshold);
+       if (unlikely(v < - t)) {
+               s8 overstep = t >> 1;
 
-               zone_page_state_add(*p - overstep, zone, item);
-               *p = overstep;
+               zone_page_state_add(v - overstep, zone, item);
+               __this_cpu_write(*p, overstep);
        }
 }