Merge branch 'irq-fixes-for-linus-4' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 31 Dec 2008 17:00:59 +0000 (09:00 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 31 Dec 2008 17:00:59 +0000 (09:00 -0800)
* 'irq-fixes-for-linus-4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  sparseirq: move __weak symbols into separate compilation unit
  sparseirq: work around __weak alias bug
  sparseirq: fix hang with !SPARSE_IRQ
  sparseirq: set lock_class for legacy irq when sparse_irq is selected
  sparseirq: work around compiler optimizing away __weak functions
  sparseirq: fix desc->lock init
  sparseirq: do not printk when migrating IRQ descriptors
  sparseirq: remove duplicated arch_early_irq_init()
  irq: simplify for_each_irq_desc() usage
  proc: remove ifdef CONFIG_SPARSE_IRQ from stat.c
  irq: for_each_irq_desc() move to irqnr.h
  hrtimer: remove #include <linux/irq.h>

1  2 
arch/x86/kernel/io_apic.c
include/linux/interrupt.h
init/main.c
kernel/hrtimer.c
kernel/softirq.c

index f6ea94b74da146072cca138aa824ed5a3d5eeec7,976039377846bc62cba1d1fd840a000a4db17ec3..74917658b004aea2eb08dca5c5c9ba8a775eb81e
@@@ -170,7 -170,7 +170,7 @@@ static struct irq_cfg irq_cfgx[NR_IRQS
        [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
  };
  
void __init arch_early_irq_init(void)
int __init arch_early_irq_init(void)
  {
        struct irq_cfg *cfg;
        struct irq_desc *desc;
                desc = irq_to_desc(i);
                desc->chip_data = &cfg[i];
        }
+       return 0;
  }
  
  #ifdef CONFIG_SPARSE_IRQ
@@@ -212,7 -214,7 +214,7 @@@ static struct irq_cfg *get_one_free_irq
        return cfg;
  }
  
void arch_init_chip_data(struct irq_desc *desc, int cpu)
int arch_init_chip_data(struct irq_desc *desc, int cpu)
  {
        struct irq_cfg *cfg;
  
                        BUG_ON(1);
                }
        }
+       return 0;
  }
  
  #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
@@@ -1345,8 -1349,6 +1349,6 @@@ void __setup_vector_irq(int cpu
  
        /* Mark the inuse vectors */
        for_each_irq_desc(irq, desc) {
-               if (!desc)
-                       continue;
                cfg = desc->chip_data;
                if (!cpu_isset(cpu, cfg->domain))
                        continue;
@@@ -1730,8 -1732,6 +1732,6 @@@ __apicdebuginit(void) print_IO_APIC(voi
        for_each_irq_desc(irq, desc) {
                struct irq_pin_list *entry;
  
-               if (!desc)
-                       continue;
                cfg = desc->chip_data;
                entry = cfg->irq_2_pin;
                if (!entry)
@@@ -2378,9 -2378,6 +2378,6 @@@ static void ir_irq_migration(struct wor
        struct irq_desc *desc;
  
        for_each_irq_desc(irq, desc) {
-               if (!desc)
-                       continue;
                if (desc->status & IRQ_MOVE_PENDING) {
                        unsigned long flags;
  
@@@ -2423,9 -2420,10 +2420,9 @@@ static void set_ir_ioapic_affinity_irq(
  asmlinkage void smp_irq_move_cleanup_interrupt(void)
  {
        unsigned vector, me;
 +
        ack_APIC_irq();
 -#ifdef CONFIG_X86_64
        exit_idle();
 -#endif
        irq_enter();
  
        me = smp_processor_id();
@@@ -2670,9 -2668,6 +2667,6 @@@ static inline void init_IO_APIC_traps(v
         * 0x80, because int 0x80 is hm, kind of importantish. ;)
         */
        for_each_irq_desc(irq, desc) {
-               if (!desc)
-                       continue;
                cfg = desc->chip_data;
                if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
                        /*
index be3c484b5242555082763718f4818eacdb37f020,d9a370325ae25ee584d0cc8023c47f560439d671..8cc8ef47f5b63ac3f7460214bdebe346a9b0de7d
@@@ -253,6 -253,9 +253,6 @@@ enu
        BLOCK_SOFTIRQ,
        TASKLET_SOFTIRQ,
        SCHED_SOFTIRQ,
 -#ifdef CONFIG_HIGH_RES_TIMERS
 -      HRTIMER_SOFTIRQ,
 -#endif
        RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
  
        NR_SOFTIRQS
@@@ -464,4 -467,10 +464,10 @@@ static inline void init_irq_proc(void
  
  int show_interrupts(struct seq_file *p, void *v);
  
+ struct irq_desc;
+ extern int early_irq_init(void);
+ extern int arch_early_irq_init(void);
+ extern int arch_init_chip_data(struct irq_desc *desc, int cpu);
  #endif
diff --combined init/main.c
index 2a7ce0f8e45353af2204c017c1921afdd0ba5c91,2c183abbf61c47f14143ceef15f22ee54a97d966..f5e64f20d2b0f40bef664d3ffc2c1c11e384bdeb
@@@ -63,7 -63,6 +63,7 @@@
  #include <linux/signal.h>
  #include <linux/idr.h>
  #include <linux/ftrace.h>
 +#include <trace/boot.h>
  
  #include <asm/io.h>
  #include <asm/bugs.h>
@@@ -540,15 -539,6 +540,6 @@@ void __init __weak thread_info_cache_in
  {
  }
  
- void __init __weak arch_early_irq_init(void)
- {
- }
- void __init __weak early_irq_init(void)
- {
-       arch_early_irq_init();
- }
  asmlinkage void __init start_kernel(void)
  {
        char * command_line;
                efi_enter_virtual_mode();
  #endif
        thread_info_cache_init();
 +      cred_init();
        fork_init(num_physpages);
        proc_caches_init();
        buffer_init();
@@@ -716,35 -705,31 +707,35 @@@ core_param(initcall_debug, initcall_deb
  int do_one_initcall(initcall_t fn)
  {
        int count = preempt_count();
 -      ktime_t delta;
 +      ktime_t calltime, delta, rettime;
        char msgbuf[64];
 -      struct boot_trace it;
 +      struct boot_trace_call call;
 +      struct boot_trace_ret ret;
  
        if (initcall_debug) {
 -              it.caller = task_pid_nr(current);
 -              printk("calling  %pF @ %i\n", fn, it.caller);
 -              it.calltime = ktime_get();
 +              call.caller = task_pid_nr(current);
 +              printk("calling  %pF @ %i\n", fn, call.caller);
 +              calltime = ktime_get();
 +              trace_boot_call(&call, fn);
 +              enable_boot_trace();
        }
  
 -      it.result = fn();
 +      ret.result = fn();
  
        if (initcall_debug) {
 -              it.rettime = ktime_get();
 -              delta = ktime_sub(it.rettime, it.calltime);
 -              it.duration = (unsigned long long) delta.tv64 >> 10;
 +              disable_boot_trace();
 +              rettime = ktime_get();
 +              delta = ktime_sub(rettime, calltime);
 +              ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
 +              trace_boot_ret(&ret, fn);
                printk("initcall %pF returned %d after %Ld usecs\n", fn,
 -                      it.result, it.duration);
 -              trace_boot(&it, fn);
 +                      ret.result, ret.duration);
        }
  
        msgbuf[0] = 0;
  
 -      if (it.result && it.result != -ENODEV && initcall_debug)
 -              sprintf(msgbuf, "error code %d ", it.result);
 +      if (ret.result && ret.result != -ENODEV && initcall_debug)
 +              sprintf(msgbuf, "error code %d ", ret.result);
  
        if (preempt_count() != count) {
                strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
                printk("initcall %pF returned with %s\n", fn, msgbuf);
        }
  
 -      return it.result;
 +      return ret.result;
  }
  
  
@@@ -899,7 -884,7 +890,7 @@@ static int __init kernel_init(void * un
         * we're essentially up and running. Get rid of the
         * initmem segments and start the user-mode stuff..
         */
 -      stop_boot_trace();
 +
        init_post();
        return 0;
  }
diff --combined kernel/hrtimer.c
index bda9cb92427673a4c2e47676596e854d57d7dca3,0ad3f3d6d10d7ef0be062ad6c015d6f5686bfcf8..eb2bfefa6dcc5ac4f332b60af2e7d76f1f7de543
@@@ -32,7 -32,6 +32,6 @@@
   */
  
  #include <linux/cpu.h>
- #include <linux/irq.h>
  #include <linux/module.h>
  #include <linux/percpu.h>
  #include <linux/hrtimer.h>
@@@ -442,6 -441,22 +441,6 @@@ static inline void debug_hrtimer_activa
  static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
  #endif
  
 -/*
 - * Check, whether the timer is on the callback pending list
 - */
 -static inline int hrtimer_cb_pending(const struct hrtimer *timer)
 -{
 -      return timer->state & HRTIMER_STATE_PENDING;
 -}
 -
 -/*
 - * Remove a timer from the callback pending list
 - */
 -static inline void hrtimer_remove_cb_pending(struct hrtimer *timer)
 -{
 -      list_del_init(&timer->cb_entry);
 -}
 -
  /* High resolution timer related functions */
  #ifdef CONFIG_HIGH_RES_TIMERS
  
@@@ -635,8 -650,6 +634,8 @@@ static inline void hrtimer_init_timer_h
  {
  }
  
 +static void __run_hrtimer(struct hrtimer *timer);
 +
  /*
   * When High resolution timers are active, try to reprogram. Note, that in case
   * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
@@@ -647,14 -660,31 +646,14 @@@ static inline int hrtimer_enqueue_repro
                                            struct hrtimer_clock_base *base)
  {
        if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
 -
 -              /* Timer is expired, act upon the callback mode */
 -              switch(timer->cb_mode) {
 -              case HRTIMER_CB_IRQSAFE_PERCPU:
 -              case HRTIMER_CB_IRQSAFE_UNLOCKED:
 -                      /*
 -                       * This is solely for the sched tick emulation with
 -                       * dynamic tick support to ensure that we do not
 -                       * restart the tick right on the edge and end up with
 -                       * the tick timer in the softirq ! The calling site
 -                       * takes care of this. Also used for hrtimer sleeper !
 -                       */
 -                      debug_hrtimer_deactivate(timer);
 -                      return 1;
 -              case HRTIMER_CB_SOFTIRQ:
 -                      /*
 -                       * Move everything else into the softirq pending list !
 -                       */
 -                      list_add_tail(&timer->cb_entry,
 -                                    &base->cpu_base->cb_pending);
 -                      timer->state = HRTIMER_STATE_PENDING;
 -                      return 1;
 -              default:
 -                      BUG();
 -              }
 +              /*
 +               * XXX: recursion check?
 +               * hrtimer_forward() should round up with timer granularity
 +               * so that we never get into inf recursion here,
 +               * it doesn't do that though
 +               */
 +              __run_hrtimer(timer);
 +              return 1;
        }
        return 0;
  }
@@@ -693,6 -723,11 +692,6 @@@ static int hrtimer_switch_to_hres(void
        return 1;
  }
  
 -static inline void hrtimer_raise_softirq(void)
 -{
 -      raise_softirq(HRTIMER_SOFTIRQ);
 -}
 -
  #else
  
  static inline int hrtimer_hres_active(void) { return 0; }
@@@ -711,6 -746,7 +710,6 @@@ static inline int hrtimer_reprogram(str
  {
        return 0;
  }
 -static inline void hrtimer_raise_softirq(void) { }
  
  #endif /* CONFIG_HIGH_RES_TIMERS */
  
@@@ -853,7 -889,10 +852,7 @@@ static void __remove_hrtimer(struct hrt
                             struct hrtimer_clock_base *base,
                             unsigned long newstate, int reprogram)
  {
 -      /* High res. callback list. NOP for !HIGHRES */
 -      if (hrtimer_cb_pending(timer))
 -              hrtimer_remove_cb_pending(timer);
 -      else {
 +      if (timer->state & HRTIMER_STATE_ENQUEUED) {
                /*
                 * Remove the timer from the rbtree and replace the
                 * first entry pointer if necessary.
@@@ -913,7 -952,7 +912,7 @@@ hrtimer_start_range_ns(struct hrtimer *
  {
        struct hrtimer_clock_base *base, *new_base;
        unsigned long flags;
 -      int ret, raise;
 +      int ret;
  
        base = lock_hrtimer_base(timer, &flags);
  
        enqueue_hrtimer(timer, new_base,
                        new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
  
 -      /*
 -       * The timer may be expired and moved to the cb_pending
 -       * list. We can not raise the softirq with base lock held due
 -       * to a possible deadlock with runqueue lock.
 -       */
 -      raise = timer->state == HRTIMER_STATE_PENDING;
 -
 -      /*
 -       * We use preempt_disable to prevent this task from migrating after
 -       * setting up the softirq and raising it. Otherwise, if me migrate
 -       * we will raise the softirq on the wrong CPU.
 -       */
 -      preempt_disable();
 -
        unlock_hrtimer_base(timer, &flags);
  
 -      if (raise)
 -              hrtimer_raise_softirq();
 -      preempt_enable();
 -
        return ret;
  }
  EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
@@@ -1134,6 -1191,75 +1133,6 @@@ int hrtimer_get_res(const clockid_t whi
  }
  EXPORT_SYMBOL_GPL(hrtimer_get_res);
  
 -static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
 -{
 -      spin_lock_irq(&cpu_base->lock);
 -
 -      while (!list_empty(&cpu_base->cb_pending)) {
 -              enum hrtimer_restart (*fn)(struct hrtimer *);
 -              struct hrtimer *timer;
 -              int restart;
 -              int emulate_hardirq_ctx = 0;
 -
 -              timer = list_entry(cpu_base->cb_pending.next,
 -                                 struct hrtimer, cb_entry);
 -
 -              debug_hrtimer_deactivate(timer);
 -              timer_stats_account_hrtimer(timer);
 -
 -              fn = timer->function;
 -              /*
 -               * A timer might have been added to the cb_pending list
 -               * when it was migrated during a cpu-offline operation.
 -               * Emulate hardirq context for such timers.
 -               */
 -              if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
 -                  timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED)
 -                      emulate_hardirq_ctx = 1;
 -
 -              __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
 -              spin_unlock_irq(&cpu_base->lock);
 -
 -              if (unlikely(emulate_hardirq_ctx)) {
 -                      local_irq_disable();
 -                      restart = fn(timer);
 -                      local_irq_enable();
 -              } else
 -                      restart = fn(timer);
 -
 -              spin_lock_irq(&cpu_base->lock);
 -
 -              timer->state &= ~HRTIMER_STATE_CALLBACK;
 -              if (restart == HRTIMER_RESTART) {
 -                      BUG_ON(hrtimer_active(timer));
 -                      /*
 -                       * Enqueue the timer, allow reprogramming of the event
 -                       * device
 -                       */
 -                      enqueue_hrtimer(timer, timer->base, 1);
 -              } else if (hrtimer_active(timer)) {
 -                      /*
 -                       * If the timer was rearmed on another CPU, reprogram
 -                       * the event device.
 -                       */
 -                      struct hrtimer_clock_base *base = timer->base;
 -
 -                      if (base->first == &timer->node &&
 -                          hrtimer_reprogram(timer, base)) {
 -                              /*
 -                               * Timer is expired. Thus move it from tree to
 -                               * pending list again.
 -                               */
 -                              __remove_hrtimer(timer, base,
 -                                               HRTIMER_STATE_PENDING, 0);
 -                              list_add_tail(&timer->cb_entry,
 -                                            &base->cpu_base->cb_pending);
 -                      }
 -              }
 -      }
 -      spin_unlock_irq(&cpu_base->lock);
 -}
 -
  static void __run_hrtimer(struct hrtimer *timer)
  {
        struct hrtimer_clock_base *base = timer->base;
        enum hrtimer_restart (*fn)(struct hrtimer *);
        int restart;
  
 +      WARN_ON(!irqs_disabled());
 +
        debug_hrtimer_deactivate(timer);
        __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
        timer_stats_account_hrtimer(timer);
 -
        fn = timer->function;
 -      if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
 -          timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) {
 -              /*
 -               * Used for scheduler timers, avoid lock inversion with
 -               * rq->lock and tasklist_lock.
 -               *
 -               * These timers are required to deal with enqueue expiry
 -               * themselves and are not allowed to migrate.
 -               */
 -              spin_unlock(&cpu_base->lock);
 -              restart = fn(timer);
 -              spin_lock(&cpu_base->lock);
 -      } else
 -              restart = fn(timer);
 +
 +      /*
 +       * Because we run timers from hardirq context, there is no chance
 +       * they get migrated to another cpu, therefore its safe to unlock
 +       * the timer base.
 +       */
 +      spin_unlock(&cpu_base->lock);
 +      restart = fn(timer);
 +      spin_lock(&cpu_base->lock);
  
        /*
         * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid
@@@ -1180,7 -1310,7 +1179,7 @@@ void hrtimer_interrupt(struct clock_eve
        struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
        struct hrtimer_clock_base *base;
        ktime_t expires_next, now;
 -      int i, raise = 0;
 +      int i;
  
        BUG_ON(!cpu_base->hres_active);
        cpu_base->nr_events++;
                                break;
                        }
  
 -                      /* Move softirq callbacks to the pending list */
 -                      if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
 -                              __remove_hrtimer(timer, base,
 -                                               HRTIMER_STATE_PENDING, 0);
 -                              list_add_tail(&timer->cb_entry,
 -                                            &base->cpu_base->cb_pending);
 -                              raise = 1;
 -                              continue;
 -                      }
 -
                        __run_hrtimer(timer);
                }
                spin_unlock(&cpu_base->lock);
                if (tick_program_event(expires_next, 0))
                        goto retry;
        }
 -
 -      /* Raise softirq ? */
 -      if (raise)
 -              raise_softirq(HRTIMER_SOFTIRQ);
  }
  
  /**
@@@ -1268,6 -1412,11 +1267,6 @@@ void hrtimer_peek_ahead_timers(void
        local_irq_restore(flags);
  }
  
 -static void run_hrtimer_softirq(struct softirq_action *h)
 -{
 -      run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));
 -}
 -
  #endif        /* CONFIG_HIGH_RES_TIMERS */
  
  /*
   */
  void hrtimer_run_pending(void)
  {
 -      struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
 -
        if (hrtimer_hres_active())
                return;
  
         */
        if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
                hrtimer_switch_to_hres();
 -
 -      run_hrtimer_pending(cpu_base);
  }
  
  /*
@@@ -1328,6 -1481,14 +1327,6 @@@ void hrtimer_run_queues(void
                                        hrtimer_get_expires_tv64(timer))
                                break;
  
 -                      if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
 -                              __remove_hrtimer(timer, base,
 -                                      HRTIMER_STATE_PENDING, 0);
 -                              list_add_tail(&timer->cb_entry,
 -                                      &base->cpu_base->cb_pending);
 -                              continue;
 -                      }
 -
                        __run_hrtimer(timer);
                }
                spin_unlock(&cpu_base->lock);
@@@ -1354,6 -1515,9 +1353,6 @@@ void hrtimer_init_sleeper(struct hrtime
  {
        sl->timer.function = hrtimer_wakeup;
        sl->task = task;
 -#ifdef CONFIG_HIGH_RES_TIMERS
 -      sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
 -#endif
  }
  
  static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
@@@ -1490,22 -1654,36 +1489,22 @@@ static void __cpuinit init_hrtimers_cpu
        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
                cpu_base->clock_base[i].cpu_base = cpu_base;
  
 -      INIT_LIST_HEAD(&cpu_base->cb_pending);
        hrtimer_init_hres(cpu_base);
  }
  
  #ifdef CONFIG_HOTPLUG_CPU
  
 -static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
 -                              struct hrtimer_clock_base *new_base, int dcpu)
 +static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
 +                              struct hrtimer_clock_base *new_base)
  {
        struct hrtimer *timer;
        struct rb_node *node;
 -      int raise = 0;
  
        while ((node = rb_first(&old_base->active))) {
                timer = rb_entry(node, struct hrtimer, node);
                BUG_ON(hrtimer_callback_running(timer));
                debug_hrtimer_deactivate(timer);
  
 -              /*
 -               * Should not happen. Per CPU timers should be
 -               * canceled _before_ the migration code is called
 -               */
 -              if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
 -                      __remove_hrtimer(timer, old_base,
 -                                       HRTIMER_STATE_INACTIVE, 0);
 -                      WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
 -                           timer, timer->function, dcpu);
 -                      continue;
 -              }
 -
                /*
                 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
                 * timer could be seen as !active and just vanish away
                __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
                timer->base = new_base;
                /*
 -               * Enqueue the timer. Allow reprogramming of the event device
 +               * Enqueue the timers on the new cpu, but do not reprogram 
 +               * the timer as that would enable a deadlock between
 +               * hrtimer_enqueue_reprogramm() running the timer and us still
 +               * holding a nested base lock.
 +               *
 +               * Instead we tickle the hrtimer interrupt after the migration
 +               * is done, which will run all expired timers and re-programm
 +               * the timer device.
                 */
 -              enqueue_hrtimer(timer, new_base, 1);
 +              enqueue_hrtimer(timer, new_base, 0);
  
 -#ifdef CONFIG_HIGH_RES_TIMERS
 -              /*
 -               * Happens with high res enabled when the timer was
 -               * already expired and the callback mode is
 -               * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
 -               * enqueue code does not move them to the soft irq
 -               * pending list for performance/latency reasons, but
 -               * in the migration state, we need to do that
 -               * otherwise we end up with a stale timer.
 -               */
 -              if (timer->state == HRTIMER_STATE_MIGRATE) {
 -                      timer->state = HRTIMER_STATE_PENDING;
 -                      list_add_tail(&timer->cb_entry,
 -                                    &new_base->cpu_base->cb_pending);
 -                      raise = 1;
 -              }
 -#endif
                /* Clear the migration state bit */
                timer->state &= ~HRTIMER_STATE_MIGRATE;
        }
 -      return raise;
 -}
 -
 -#ifdef CONFIG_HIGH_RES_TIMERS
 -static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
 -                                 struct hrtimer_cpu_base *new_base)
 -{
 -      struct hrtimer *timer;
 -      int raise = 0;
 -
 -      while (!list_empty(&old_base->cb_pending)) {
 -              timer = list_entry(old_base->cb_pending.next,
 -                                 struct hrtimer, cb_entry);
 -
 -              __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0);
 -              timer->base = &new_base->clock_base[timer->base->index];
 -              list_add_tail(&timer->cb_entry, &new_base->cb_pending);
 -              raise = 1;
 -      }
 -      return raise;
 -}
 -#else
 -static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
 -                                 struct hrtimer_cpu_base *new_base)
 -{
 -      return 0;
  }
 -#endif
  
 -static void migrate_hrtimers(int cpu)
 +static int migrate_hrtimers(int scpu)
  {
        struct hrtimer_cpu_base *old_base, *new_base;
 -      int i, raise = 0;
 +      int dcpu, i;
  
 -      BUG_ON(cpu_online(cpu));
 -      old_base = &per_cpu(hrtimer_bases, cpu);
 +      BUG_ON(cpu_online(scpu));
 +      old_base = &per_cpu(hrtimer_bases, scpu);
        new_base = &get_cpu_var(hrtimer_bases);
  
 -      tick_cancel_sched_timer(cpu);
 +      dcpu = smp_processor_id();
 +
 +      tick_cancel_sched_timer(scpu);
        /*
         * The caller is globally serialized and nobody else
         * takes two locks at once, deadlock is not possible.
        spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
  
        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
 -              if (migrate_hrtimer_list(&old_base->clock_base[i],
 -                                       &new_base->clock_base[i], cpu))
 -                      raise = 1;
 +              migrate_hrtimer_list(&old_base->clock_base[i],
 +                                   &new_base->clock_base[i]);
        }
  
 -      if (migrate_hrtimer_pending(old_base, new_base))
 -              raise = 1;
 -
        spin_unlock(&old_base->lock);
        spin_unlock_irq(&new_base->lock);
        put_cpu_var(hrtimer_bases);
  
 -      if (raise)
 -              hrtimer_raise_softirq();
 +      return dcpu;
 +}
 +
 +static void tickle_timers(void *arg)
 +{
 +      hrtimer_peek_ahead_timers();
  }
 +
  #endif /* CONFIG_HOTPLUG_CPU */
  
  static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
                                        unsigned long action, void *hcpu)
  {
 -      unsigned int cpu = (long)hcpu;
 +      int scpu = (long)hcpu;
  
        switch (action) {
  
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
 -              init_hrtimers_cpu(cpu);
 +              init_hrtimers_cpu(scpu);
                break;
  
  #ifdef CONFIG_HOTPLUG_CPU
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
 -              clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);
 -              migrate_hrtimers(cpu);
 +      {
 +              int dcpu;
 +
 +              clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
 +              dcpu = migrate_hrtimers(scpu);
 +              smp_call_function_single(dcpu, tickle_timers, NULL, 0);
                break;
 +      }
  #endif
  
        default:
@@@ -1609,6 -1816,9 +1608,6 @@@ void __init hrtimers_init(void
        hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
                          (void *)(long)smp_processor_id());
        register_cpu_notifier(&hrtimers_nb);
 -#ifdef CONFIG_HIGH_RES_TIMERS
 -      open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
 -#endif
  }
  
  /**
diff --combined kernel/softirq.c
index 466e75ce271aa0795ac50266d9d3bc2acdde7261,daf46358d2dd8a1d3bf57f93d14bd4cc31343001..670c1eca47ec9964e2a8bef73dc3aa4840c427b4
@@@ -102,6 -102,20 +102,6 @@@ void local_bh_disable(void
  
  EXPORT_SYMBOL(local_bh_disable);
  
 -void __local_bh_enable(void)
 -{
 -      WARN_ON_ONCE(in_irq());
 -
 -      /*
 -       * softirqs should never be enabled by __local_bh_enable(),
 -       * it always nests inside local_bh_enable() sections:
 -       */
 -      WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
 -
 -      sub_preempt_count(SOFTIRQ_OFFSET);
 -}
 -EXPORT_SYMBOL_GPL(__local_bh_enable);
 -
  /*
   * Special-case - softirqs can safely be enabled in
   * cond_resched_softirq(), or by __do_softirq(),
@@@ -255,7 -269,6 +255,7 @@@ void irq_enter(void
  {
        int cpu = smp_processor_id();
  
 +      rcu_irq_enter();
        if (idle_cpu(cpu) && !in_interrupt()) {
                __irq_enter();
                tick_check_idle(cpu);
@@@ -282,9 -295,9 +282,9 @@@ void irq_exit(void
  
  #ifdef CONFIG_NO_HZ
        /* Make sure that timer wheel updates are propagated */
 -      if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
 -              tick_nohz_stop_sched_tick(0);
        rcu_irq_exit();
 +      if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
 +              tick_nohz_stop_sched_tick(0);
  #endif
        preempt_enable_no_resched();
  }
@@@ -784,3 -797,23 +784,23 @@@ int on_each_cpu(void (*func) (void *inf
  }
  EXPORT_SYMBOL(on_each_cpu);
  #endif
+ /*
+  * [ These __weak aliases are kept in a separate compilation unit, so that
+  *   GCC does not inline them incorrectly. ]
+  */
+ int __init __weak early_irq_init(void)
+ {
+       return 0;
+ }
+ int __init __weak arch_early_irq_init(void)
+ {
+       return 0;
+ }
+ int __weak arch_init_chip_data(struct irq_desc *desc, int cpu)
+ {
+       return 0;
+ }