Merge tag 'regmap-v4.2' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[linux-2.6-block.git] / kernel / time / timer.c
index 2ece3aa5069cade64b8c4982e920a45bea5ba232..520499dd85af42e96b2bbd8c729df36d238ad27a 100644 (file)
@@ -49,6 +49,8 @@
 #include <asm/timex.h>
 #include <asm/io.h>
 
+#include "tick-internal.h"
+
 #define CREATE_TRACE_POINTS
 #include <trace/events/timer.h>
 
@@ -68,11 +70,11 @@ EXPORT_SYMBOL(jiffies_64);
 #define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
 
 struct tvec {
-       struct list_head vec[TVN_SIZE];
+       struct hlist_head vec[TVN_SIZE];
 };
 
 struct tvec_root {
-       struct list_head vec[TVR_SIZE];
+       struct hlist_head vec[TVR_SIZE];
 };
 
 struct tvec_base {
@@ -83,6 +85,8 @@ struct tvec_base {
        unsigned long active_timers;
        unsigned long all_timers;
        int cpu;
+       bool migration_enabled;
+       bool nohz_active;
        struct tvec_root tv1;
        struct tvec tv2;
        struct tvec tv3;
@@ -90,43 +94,60 @@ struct tvec_base {
        struct tvec tv5;
 } ____cacheline_aligned;
 
-/*
- * __TIMER_INITIALIZER() needs to set ->base to a valid pointer (because we've
- * made NULL special, hint: lock_timer_base()) and we cannot get a compile time
- * pointer to per-cpu entries because we don't know where we'll map the section,
- * even for the boot cpu.
- *
- * And so we use boot_tvec_bases for boot CPU and per-cpu __tvec_bases for the
- * rest of them.
- */
-struct tvec_base boot_tvec_bases;
-EXPORT_SYMBOL(boot_tvec_bases);
 
-static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
+static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
+
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+unsigned int sysctl_timer_migration = 1;
 
-/* Functions below help us manage 'deferrable' flag */
-static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
+void timers_update_migration(bool update_nohz)
 {
-       return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE);
+       bool on = sysctl_timer_migration && tick_nohz_active;
+       unsigned int cpu;
+
+       /* Avoid the loop, if nothing to update */
+       if (this_cpu_read(tvec_bases.migration_enabled) == on)
+               return;
+
+       for_each_possible_cpu(cpu) {
+               per_cpu(tvec_bases.migration_enabled, cpu) = on;
+               per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
+               if (!update_nohz)
+                       continue;
+               per_cpu(tvec_bases.nohz_active, cpu) = true;
+               per_cpu(hrtimer_bases.nohz_active, cpu) = true;
+       }
 }
 
-static inline unsigned int tbase_get_irqsafe(struct tvec_base *base)
+int timer_migration_handler(struct ctl_table *table, int write,
+                           void __user *buffer, size_t *lenp,
+                           loff_t *ppos)
 {
-       return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE);
+       static DEFINE_MUTEX(mutex);
+       int ret;
+
+       mutex_lock(&mutex);
+       ret = proc_dointvec(table, write, buffer, lenp, ppos);
+       if (!ret && write)
+               timers_update_migration(false);
+       mutex_unlock(&mutex);
+       return ret;
 }
 
-static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
+static inline struct tvec_base *get_target_base(struct tvec_base *base,
+                                               int pinned)
 {
-       return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK));
+       if (pinned || !base->migration_enabled)
+               return this_cpu_ptr(&tvec_bases);
+       return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
 }
-
-static inline void
-timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
+#else
+static inline struct tvec_base *get_target_base(struct tvec_base *base,
+                                               int pinned)
 {
-       unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK;
-
-       timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags);
+       return this_cpu_ptr(&tvec_bases);
 }
+#endif
 
 static unsigned long round_jiffies_common(unsigned long j, int cpu,
                bool force_up)
@@ -349,26 +370,12 @@ void set_timer_slack(struct timer_list *timer, int slack_hz)
 }
 EXPORT_SYMBOL_GPL(set_timer_slack);
 
-/*
- * If the list is empty, catch up ->timer_jiffies to the current time.
- * The caller must hold the tvec_base lock.  Returns true if the list
- * was empty and therefore ->timer_jiffies was updated.
- */
-static bool catchup_timer_jiffies(struct tvec_base *base)
-{
-       if (!base->all_timers) {
-               base->timer_jiffies = jiffies;
-               return true;
-       }
-       return false;
-}
-
 static void
 __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 {
        unsigned long expires = timer->expires;
        unsigned long idx = expires - base->timer_jiffies;
-       struct list_head *vec;
+       struct hlist_head *vec;
 
        if (idx < TVR_SIZE) {
                int i = expires & TVR_MASK;
@@ -401,25 +408,25 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
                i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
                vec = base->tv5.vec + i;
        }
-       /*
-        * Timers are FIFO:
-        */
-       list_add_tail(&timer->entry, vec);
+
+       hlist_add_head(&timer->entry, vec);
 }
 
 static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 {
-       (void)catchup_timer_jiffies(base);
+       /* Advance base->jiffies, if the base is empty */
+       if (!base->all_timers++)
+               base->timer_jiffies = jiffies;
+
        __internal_add_timer(base, timer);
        /*
         * Update base->active_timers and base->next_timer
         */
-       if (!tbase_get_deferrable(timer->base)) {
+       if (!(timer->flags & TIMER_DEFERRABLE)) {
                if (!base->active_timers++ ||
                    time_before(timer->expires, base->next_timer))
                        base->next_timer = timer->expires;
        }
-       base->all_timers++;
 
        /*
         * Check whether the other CPU is in dynticks mode and needs
@@ -434,8 +441,11 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
         * require special care against races with idle_cpu(), lets deal
         * with that later.
         */
-       if (!tbase_get_deferrable(base) || tick_nohz_full_cpu(base->cpu))
-               wake_up_nohz_cpu(base->cpu);
+       if (base->nohz_active) {
+               if (!(timer->flags & TIMER_DEFERRABLE) ||
+                   tick_nohz_full_cpu(base->cpu))
+                       wake_up_nohz_cpu(base->cpu);
+       }
 }
 
 #ifdef CONFIG_TIMER_STATS
@@ -451,15 +461,12 @@ void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
 
 static void timer_stats_account_timer(struct timer_list *timer)
 {
-       unsigned int flag = 0;
-
        if (likely(!timer->start_site))
                return;
-       if (unlikely(tbase_get_deferrable(timer->base)))
-               flag |= TIMER_STATS_FLAG_DEFERRABLE;
 
        timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
-                                timer->function, timer->start_comm, flag);
+                                timer->function, timer->start_comm,
+                                timer->flags);
 }
 
 #else
@@ -516,8 +523,8 @@ static int timer_fixup_activate(void *addr, enum debug_obj_state state)
                 * statically initialized. We just make sure that it
                 * is tracked in the object tracker.
                 */
-               if (timer->entry.next == NULL &&
-                   timer->entry.prev == TIMER_ENTRY_STATIC) {
+               if (timer->entry.pprev == NULL &&
+                   timer->entry.next == TIMER_ENTRY_STATIC) {
                        debug_object_init(timer, &timer_debug_descr);
                        debug_object_activate(timer, &timer_debug_descr);
                        return 0;
@@ -563,7 +570,7 @@ static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
 
        switch (state) {
        case ODEBUG_STATE_NOTAVAILABLE:
-               if (timer->entry.prev == TIMER_ENTRY_STATIC) {
+               if (timer->entry.next == TIMER_ENTRY_STATIC) {
                        /*
                         * This is not really a fixup. The timer was
                         * statically initialized. We just make sure that it
@@ -648,7 +655,7 @@ static inline void
 debug_activate(struct timer_list *timer, unsigned long expires)
 {
        debug_timer_activate(timer);
-       trace_timer_start(timer, expires);
+       trace_timer_start(timer, expires, timer->flags);
 }
 
 static inline void debug_deactivate(struct timer_list *timer)
@@ -665,10 +672,8 @@ static inline void debug_assert_init(struct timer_list *timer)
 static void do_init_timer(struct timer_list *timer, unsigned int flags,
                          const char *name, struct lock_class_key *key)
 {
-       struct tvec_base *base = raw_cpu_read(tvec_bases);
-
-       timer->entry.next = NULL;
-       timer->base = (void *)((unsigned long)base | flags);
+       timer->entry.pprev = NULL;
+       timer->flags = flags | raw_smp_processor_id();
        timer->slack = -1;
 #ifdef CONFIG_TIMER_STATS
        timer->start_site = NULL;
@@ -699,24 +704,23 @@ EXPORT_SYMBOL(init_timer_key);
 
 static inline void detach_timer(struct timer_list *timer, bool clear_pending)
 {
-       struct list_head *entry = &timer->entry;
+       struct hlist_node *entry = &timer->entry;
 
        debug_deactivate(timer);
 
-       __list_del(entry->prev, entry->next);
+       __hlist_del(entry);
        if (clear_pending)
-               entry->next = NULL;
-       entry->prev = LIST_POISON2;
+               entry->pprev = NULL;
+       entry->next = LIST_POISON2;
 }
 
 static inline void
 detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
 {
        detach_timer(timer, true);
-       if (!tbase_get_deferrable(timer->base))
+       if (!(timer->flags & TIMER_DEFERRABLE))
                base->active_timers--;
        base->all_timers--;
-       (void)catchup_timer_jiffies(base);
 }
 
 static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
@@ -726,13 +730,14 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
                return 0;
 
        detach_timer(timer, clear_pending);
-       if (!tbase_get_deferrable(timer->base)) {
+       if (!(timer->flags & TIMER_DEFERRABLE)) {
                base->active_timers--;
                if (timer->expires == base->next_timer)
                        base->next_timer = base->timer_jiffies;
        }
-       base->all_timers--;
-       (void)catchup_timer_jiffies(base);
+       /* If this was the last timer, advance base->jiffies */
+       if (!--base->all_timers)
+               base->timer_jiffies = jiffies;
        return 1;
 }
 
@@ -744,24 +749,22 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
  * So __run_timers/migrate_timers can safely modify all timers which could
  * be found on ->tvX lists.
  *
- * When the timer's base is locked, and the timer removed from list, it is
- * possible to set timer->base = NULL and drop the lock: the timer remains
- * locked.
+ * When the timer's base is locked and removed from the list, the
+ * TIMER_MIGRATING flag is set, FIXME
  */
 static struct tvec_base *lock_timer_base(struct timer_list *timer,
                                        unsigned long *flags)
        __acquires(timer->base->lock)
 {
-       struct tvec_base *base;
-
        for (;;) {
-               struct tvec_base *prelock_base = timer->base;
-               base = tbase_get_base(prelock_base);
-               if (likely(base != NULL)) {
+               u32 tf = timer->flags;
+               struct tvec_base *base;
+
+               if (!(tf & TIMER_MIGRATING)) {
+                       base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
                        spin_lock_irqsave(&base->lock, *flags);
-                       if (likely(prelock_base == timer->base))
+                       if (timer->flags == tf)
                                return base;
-                       /* The timer has migrated to another CPU */
                        spin_unlock_irqrestore(&base->lock, *flags);
                }
                cpu_relax();
@@ -770,11 +773,11 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
 
 static inline int
 __mod_timer(struct timer_list *timer, unsigned long expires,
-                                               bool pending_only, int pinned)
+           bool pending_only, int pinned)
 {
        struct tvec_base *base, *new_base;
        unsigned long flags;
-       int ret = 0 , cpu;
+       int ret = 0;
 
        timer_stats_timer_set_start_info(timer);
        BUG_ON(!timer->function);
@@ -787,8 +790,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
 
        debug_activate(timer, expires);
 
-       cpu = get_nohz_timer_target(pinned);
-       new_base = per_cpu(tvec_bases, cpu);
+       new_base = get_target_base(base, pinned);
 
        if (base != new_base) {
                /*
@@ -800,11 +802,13 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
                 */
                if (likely(base->running_timer != timer)) {
                        /* See the comment in lock_timer_base() */
-                       timer_set_base(timer, NULL);
+                       timer->flags |= TIMER_MIGRATING;
+
                        spin_unlock(&base->lock);
                        base = new_base;
                        spin_lock(&base->lock);
-                       timer_set_base(timer, base);
+                       timer->flags &= ~TIMER_BASEMASK;
+                       timer->flags |= base->cpu;
                }
        }
 
@@ -966,13 +970,13 @@ EXPORT_SYMBOL(add_timer);
  */
 void add_timer_on(struct timer_list *timer, int cpu)
 {
-       struct tvec_base *base = per_cpu(tvec_bases, cpu);
+       struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
        unsigned long flags;
 
        timer_stats_timer_set_start_info(timer);
        BUG_ON(timer_pending(timer) || !timer->function);
        spin_lock_irqsave(&base->lock, flags);
-       timer_set_base(timer, base);
+       timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
        debug_activate(timer, timer->expires);
        internal_add_timer(base, timer);
        spin_unlock_irqrestore(&base->lock, flags);
@@ -1037,8 +1041,6 @@ int try_to_del_timer_sync(struct timer_list *timer)
 EXPORT_SYMBOL(try_to_del_timer_sync);
 
 #ifdef CONFIG_SMP
-static DEFINE_PER_CPU(struct tvec_base, __tvec_bases);
-
 /**
  * del_timer_sync - deactivate a timer and wait for the handler to finish.
  * @timer: the timer to be deactivated
@@ -1093,7 +1095,7 @@ int del_timer_sync(struct timer_list *timer)
         * don't use it in hardirq context, because it
         * could lead to deadlock.
         */
-       WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base));
+       WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
        for (;;) {
                int ret = try_to_del_timer_sync(timer);
                if (ret >= 0)
@@ -1107,17 +1109,17 @@ EXPORT_SYMBOL(del_timer_sync);
 static int cascade(struct tvec_base *base, struct tvec *tv, int index)
 {
        /* cascade all the timers from tv up one level */
-       struct timer_list *timer, *tmp;
-       struct list_head tv_list;
+       struct timer_list *timer;
+       struct hlist_node *tmp;
+       struct hlist_head tv_list;
 
-       list_replace_init(tv->vec + index, &tv_list);
+       hlist_move_list(tv->vec + index, &tv_list);
 
        /*
         * We are removing _all_ timers from the list, so we
         * don't have to detach them individually.
         */
-       list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
-               BUG_ON(tbase_get_base(timer->base) != base);
+       hlist_for_each_entry_safe(timer, tmp, &tv_list, entry) {
                /* No accounting, while moving them */
                __internal_add_timer(base, timer);
        }
@@ -1182,14 +1184,18 @@ static inline void __run_timers(struct tvec_base *base)
        struct timer_list *timer;
 
        spin_lock_irq(&base->lock);
-       if (catchup_timer_jiffies(base)) {
-               spin_unlock_irq(&base->lock);
-               return;
-       }
+
        while (time_after_eq(jiffies, base->timer_jiffies)) {
-               struct list_head work_list;
-               struct list_head *head = &work_list;
-               int index = base->timer_jiffies & TVR_MASK;
+               struct hlist_head work_list;
+               struct hlist_head *head = &work_list;
+               int index;
+
+               if (!base->all_timers) {
+                       base->timer_jiffies = jiffies;
+                       break;
+               }
+
+               index = base->timer_jiffies & TVR_MASK;
 
                /*
                 * Cascade timers:
@@ -1200,16 +1206,16 @@ static inline void __run_timers(struct tvec_base *base)
                                        !cascade(base, &base->tv4, INDEX(2)))
                        cascade(base, &base->tv5, INDEX(3));
                ++base->timer_jiffies;
-               list_replace_init(base->tv1.vec + index, head);
-               while (!list_empty(head)) {
+               hlist_move_list(base->tv1.vec + index, head);
+               while (!hlist_empty(head)) {
                        void (*fn)(unsigned long);
                        unsigned long data;
                        bool irqsafe;
 
-                       timer = list_first_entry(head, struct timer_list,entry);
+                       timer = hlist_entry(head->first, struct timer_list, entry);
                        fn = timer->function;
                        data = timer->data;
-                       irqsafe = tbase_get_irqsafe(timer->base);
+                       irqsafe = timer->flags & TIMER_IRQSAFE;
 
                        timer_stats_account_timer(timer);
 
@@ -1248,8 +1254,8 @@ static unsigned long __next_timer_interrupt(struct tvec_base *base)
        /* Look for timer events in tv1. */
        index = slot = timer_jiffies & TVR_MASK;
        do {
-               list_for_each_entry(nte, base->tv1.vec + slot, entry) {
-                       if (tbase_get_deferrable(nte->base))
+               hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
+                       if (nte->flags & TIMER_DEFERRABLE)
                                continue;
 
                        found = 1;
@@ -1279,8 +1285,8 @@ cascade:
 
                index = slot = timer_jiffies & TVN_MASK;
                do {
-                       list_for_each_entry(nte, varp->vec + slot, entry) {
-                               if (tbase_get_deferrable(nte->base))
+                       hlist_for_each_entry(nte, varp->vec + slot, entry) {
+                               if (nte->flags & TIMER_DEFERRABLE)
                                        continue;
 
                                found = 1;
@@ -1311,54 +1317,48 @@ cascade:
  * Check, if the next hrtimer event is before the next timer wheel
  * event:
  */
-static unsigned long cmp_next_hrtimer_event(unsigned long now,
-                                           unsigned long expires)
+static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
 {
-       ktime_t hr_delta = hrtimer_get_next_event();
-       struct timespec tsdelta;
-       unsigned long delta;
-
-       if (hr_delta.tv64 == KTIME_MAX)
-               return expires;
+       u64 nextevt = hrtimer_get_next_event();
 
        /*
-        * Expired timer available, let it expire in the next tick
+        * If high resolution timers are enabled
+        * hrtimer_get_next_event() returns KTIME_MAX.
         */
-       if (hr_delta.tv64 <= 0)
-               return now + 1;
-
-       tsdelta = ktime_to_timespec(hr_delta);
-       delta = timespec_to_jiffies(&tsdelta);
+       if (expires <= nextevt)
+               return expires;
 
        /*
-        * Limit the delta to the max value, which is checked in
-        * tick_nohz_stop_sched_tick():
+        * If the next timer is already expired, return the tick base
+        * time so the tick is fired immediately.
         */
-       if (delta > NEXT_TIMER_MAX_DELTA)
-               delta = NEXT_TIMER_MAX_DELTA;
+       if (nextevt <= basem)
+               return basem;
 
        /*
-        * Take rounding errors in to account and make sure, that it
-        * expires in the next tick. Otherwise we go into an endless
-        * ping pong due to tick_nohz_stop_sched_tick() retriggering
-        * the timer softirq
+        * Round up to the next jiffie. High resolution timers are
+        * off, so the hrtimers are expired in the tick and we need to
+        * make sure that this tick really expires the timer to avoid
+        * a ping pong of the nohz stop code.
+        *
+        * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
         */
-       if (delta < 1)
-               delta = 1;
-       now += delta;
-       if (time_before(now, expires))
-               return now;
-       return expires;
+       return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
 }
 
 /**
- * get_next_timer_interrupt - return the jiffy of the next pending timer
- * @now: current time (in jiffies)
+ * get_next_timer_interrupt - return the time (clock mono) of the next timer
+ * @basej:     base time jiffies
+ * @basem:     base time clock monotonic
+ *
+ * Returns the tick aligned clock monotonic time of the next pending
+ * timer or KTIME_MAX if no timer is pending.
  */
-unsigned long get_next_timer_interrupt(unsigned long now)
+u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
 {
-       struct tvec_base *base = __this_cpu_read(tvec_bases);
-       unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
+       struct tvec_base *base = this_cpu_ptr(&tvec_bases);
+       u64 expires = KTIME_MAX;
+       unsigned long nextevt;
 
        /*
         * Pretend that there is no timer pending if the cpu is offline.
@@ -1371,14 +1371,15 @@ unsigned long get_next_timer_interrupt(unsigned long now)
        if (base->active_timers) {
                if (time_before_eq(base->next_timer, base->timer_jiffies))
                        base->next_timer = __next_timer_interrupt(base);
-               expires = base->next_timer;
+               nextevt = base->next_timer;
+               if (time_before_eq(nextevt, basej))
+                       expires = basem;
+               else
+                       expires = basem + (nextevt - basej) * TICK_NSEC;
        }
        spin_unlock(&base->lock);
 
-       if (time_before_eq(expires, now))
-               return now;
-
-       return cmp_next_hrtimer_event(now, expires);
+       return cmp_next_hrtimer_event(basem, expires);
 }
 #endif
 
@@ -1407,9 +1408,7 @@ void update_process_times(int user_tick)
  */
 static void run_timer_softirq(struct softirq_action *h)
 {
-       struct tvec_base *base = __this_cpu_read(tvec_bases);
-
-       hrtimer_run_pending();
+       struct tvec_base *base = this_cpu_ptr(&tvec_bases);
 
        if (time_after_eq(jiffies, base->timer_jiffies))
                __run_timers(base);
@@ -1545,15 +1544,16 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout)
 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
 
 #ifdef CONFIG_HOTPLUG_CPU
-static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
+static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head)
 {
        struct timer_list *timer;
+       int cpu = new_base->cpu;
 
-       while (!list_empty(head)) {
-               timer = list_first_entry(head, struct timer_list, entry);
+       while (!hlist_empty(head)) {
+               timer = hlist_entry(head->first, struct timer_list, entry);
                /* We ignore the accounting on the dying cpu */
                detach_timer(timer, false);
-               timer_set_base(timer, new_base);
+               timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
                internal_add_timer(new_base, timer);
        }
 }
@@ -1565,8 +1565,8 @@ static void migrate_timers(int cpu)
        int i;
 
        BUG_ON(cpu_online(cpu));
-       old_base = per_cpu(tvec_bases, cpu);
-       new_base = get_cpu_var(tvec_bases);
+       old_base = per_cpu_ptr(&tvec_bases, cpu);
+       new_base = this_cpu_ptr(&tvec_bases);
        /*
         * The caller is globally serialized and nobody else
         * takes two locks at once, deadlock is not possible.
@@ -1590,7 +1590,6 @@ static void migrate_timers(int cpu)
 
        spin_unlock(&old_base->lock);
        spin_unlock_irq(&new_base->lock);
-       put_cpu_var(tvec_bases);
 }
 
 static int timer_cpu_notify(struct notifier_block *self,
@@ -1616,52 +1615,27 @@ static inline void timer_register_cpu_notifier(void)
 static inline void timer_register_cpu_notifier(void) { }
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static void __init init_timer_cpu(struct tvec_base *base, int cpu)
+static void __init init_timer_cpu(int cpu)
 {
-       int j;
-
-       BUG_ON(base != tbase_get_base(base));
+       struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
 
        base->cpu = cpu;
-       per_cpu(tvec_bases, cpu) = base;
        spin_lock_init(&base->lock);
 
-       for (j = 0; j < TVN_SIZE; j++) {
-               INIT_LIST_HEAD(base->tv5.vec + j);
-               INIT_LIST_HEAD(base->tv4.vec + j);
-               INIT_LIST_HEAD(base->tv3.vec + j);
-               INIT_LIST_HEAD(base->tv2.vec + j);
-       }
-       for (j = 0; j < TVR_SIZE; j++)
-               INIT_LIST_HEAD(base->tv1.vec + j);
-
        base->timer_jiffies = jiffies;
        base->next_timer = base->timer_jiffies;
 }
 
 static void __init init_timer_cpus(void)
 {
-       struct tvec_base *base;
-       int local_cpu = smp_processor_id();
        int cpu;
 
-       for_each_possible_cpu(cpu) {
-               if (cpu == local_cpu)
-                       base = &boot_tvec_bases;
-#ifdef CONFIG_SMP
-               else
-                       base = per_cpu_ptr(&__tvec_bases, cpu);
-#endif
-
-               init_timer_cpu(base, cpu);
-       }
+       for_each_possible_cpu(cpu)
+               init_timer_cpu(cpu);
 }
 
 void __init init_timers(void)
 {
-       /* ensure there are enough low bits for flags in timer->base pointer */
-       BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);
-
        init_timer_cpus();
        init_timer_stats();
        timer_register_cpu_notifier();
@@ -1697,14 +1671,14 @@ unsigned long msleep_interruptible(unsigned int msecs)
 
 EXPORT_SYMBOL(msleep_interruptible);
 
-static int __sched do_usleep_range(unsigned long min, unsigned long max)
+static void __sched do_usleep_range(unsigned long min, unsigned long max)
 {
        ktime_t kmin;
        unsigned long delta;
 
        kmin = ktime_set(0, min * NSEC_PER_USEC);
        delta = (max - min) * NSEC_PER_USEC;
-       return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
+       schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
 }
 
 /**
@@ -1712,7 +1686,7 @@ static int __sched do_usleep_range(unsigned long min, unsigned long max)
  * @min: Minimum time in usecs to sleep
  * @max: Maximum time in usecs to sleep
  */
-void usleep_range(unsigned long min, unsigned long max)
+void __sched usleep_range(unsigned long min, unsigned long max)
 {
        __set_current_state(TASK_UNINTERRUPTIBLE);
        do_usleep_range(min, max);