Merge branch 'generic-ipi' into generic-ipi-for-linus
authorIngo Molnar <mingo@elte.hu>
Tue, 15 Jul 2008 19:55:59 +0000 (21:55 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 15 Jul 2008 19:55:59 +0000 (21:55 +0200)
Conflicts:

arch/powerpc/Kconfig
arch/s390/kernel/time.c
arch/x86/kernel/apic_32.c
arch/x86/kernel/cpu/perfctr-watchdog.c
arch/x86/kernel/i8259_64.c
arch/x86/kernel/ldt.c
arch/x86/kernel/nmi_64.c
arch/x86/kernel/smpboot.c
arch/x86/xen/smp.c
include/asm-x86/hw_irq_32.h
include/asm-x86/hw_irq_64.h
include/asm-x86/mach-default/irq_vectors.h
include/asm-x86/mach-voyager/irq_vectors.h
include/asm-x86/smp.h
kernel/Makefile

Signed-off-by: Ingo Molnar <mingo@elte.hu>
40 files changed:
1  2 
arch/arm/Kconfig
arch/mips/Kconfig
arch/powerpc/Kconfig
arch/s390/appldata/appldata_base.c
arch/s390/kernel/time.c
arch/x86/Kconfig
arch/x86/kernel/apic_32.c
arch/x86/kernel/cpu/mcheck/mce_64.c
arch/x86/kernel/cpu/mtrr/main.c
arch/x86/kernel/cpu/perfctr-watchdog.c
arch/x86/kernel/cpuid.c
arch/x86/kernel/entry_64.S
arch/x86/kernel/io_apic_32.c
arch/x86/kernel/io_apic_64.c
arch/x86/kernel/irqinit_64.c
arch/x86/kernel/ldt.c
arch/x86/kernel/nmi.c
arch/x86/kernel/process.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/tlb_64.c
arch/x86/kernel/vsyscall_64.c
arch/x86/mach-voyager/voyager_smp.c
arch/x86/mm/pageattr.c
arch/x86/oprofile/nmi_int.c
arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c
arch/x86/xen/smp.c
arch/x86/xen/xen-ops.h
fs/buffer.c
include/asm-x86/hw_irq.h
include/asm-x86/irq_vectors.h
include/asm-x86/smp.h
kernel/Makefile
kernel/hrtimer.c
kernel/softirq.c
kernel/time/tick-broadcast.c
mm/page_alloc.c
mm/slab.c
mm/slub.c
net/iucv/iucv.c

Simple merge
Simple merge
index a5e9912e2d3773fdab23a4bc52ec5e147f47e1cf,852d40c29637ac199e5e539b843e60f9668b932d..20eacf2a842474ab0c4d5c3c39aadd6a85ad5ae7
@@@ -111,7 -109,8 +111,8 @@@ config PP
        select HAVE_KPROBES
        select HAVE_KRETPROBES
        select HAVE_LMB
+       select USE_GENERIC_SMP_HELPERS if SMP
 +      select HAVE_OPROFILE
  
  config EARLY_PRINTK
        bool
index 9cb3d92447a35651c5c67d5f495a5d1fa5981b2f,837a3b3e77599938d4467573b2ea32a1fc8f9d32..a7f8979fb92584bcf45ef94001557b2fe02d9674
@@@ -203,9 -209,10 +203,9 @@@ __appldata_vtimer_setup(int cmd
                        per_cpu(appldata_timer, i).expires = per_cpu_interval;
                        smp_call_function_single(i, add_virt_timer_periodic,
                                                 &per_cpu(appldata_timer, i),
-                                                0, 1);
+                                                1);
                }
                appldata_timer_active = 1;
 -              P_INFO("Monitoring timer started.\n");
                break;
        case APPLDATA_DEL_TIMER:
                for_each_online_cpu(i)
index 7418bebb547fad641cb79a8b209e8d12d8b842c7,6037ed2b747121b6de869d3e7287e58e39a8d306..8051e9326dfcd0b8ba58b6fa44bf356637e7d2d8
@@@ -707,9 -690,9 +707,9 @@@ static int etr_sync_clock(struct etr_ai
         */
        memset(&etr_sync, 0, sizeof(etr_sync));
        preempt_disable();
-       smp_call_function(clock_sync_cpu_start, &etr_sync, 0, 0);
 -      smp_call_function(etr_sync_cpu_start, NULL, 0);
++      smp_call_function(clock_sync_cpu_start, &etr_sync, 0);
        local_irq_disable();
 -      etr_enable_sync_clock();
 +      enable_sync_clock();
  
        /* Set clock to next OTE. */
        __ctl_set_bit(14, 21);
                rc = -EAGAIN;
        }
        local_irq_enable();
-       smp_call_function(clock_sync_cpu_end, NULL, 0, 0);
 -      smp_call_function(etr_sync_cpu_end,NULL,0);
++      smp_call_function(clock_sync_cpu_end, NULL, 0);
        preempt_enable();
        return rc;
  }
@@@ -926,10 -909,10 +926,10 @@@ static void etr_work_fn(struct work_str
        if (!eacr.ea) {
                /* Both ports offline. Reset everything. */
                eacr.dp = eacr.es = eacr.sl = 0;
-               on_each_cpu(disable_sync_clock, NULL, 0, 1);
 -              on_each_cpu(etr_disable_sync_clock, NULL, 1);
++              on_each_cpu(disable_sync_clock, NULL, 1);
                del_timer_sync(&etr_timer);
                etr_update_eacr(eacr);
 -              set_bit(ETR_FLAG_EACCES, &etr_flags);
 +              clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
                return;
        }
  
Simple merge
index 3e58b676d23b8fd96a87823e30fcc4566d60aad6,71017f71f4bc547f2cb92fae69cb0b3f1b765a57..a437d027f20b6d8d7ba3dc88400220e796afe41e
@@@ -1333,13 -1351,17 +1333,17 @@@ void __init smp_intr_init(void
         * The reschedule interrupt is a CPU-to-CPU reschedule-helper
         * IPI, driven by wakeup.
         */
 -      set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
 +      alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
  
        /* IPI for invalidation */
 -      set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
 +      alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
  
        /* IPI for generic function call */
 -      set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
 +      alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
+       /* IPI for single call function */
+       set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
+                               call_function_single_interrupt);
  }
  #endif
  
Simple merge
Simple merge
index 2e9bef6e3aa3d8ee0f778164725f024b6b7c388f,58043f06d7e22ed2f4ad727c7b6a9e9b5fb902fb..6d4bdc02388a2abf86eed64b2d596138f96225c4
@@@ -189,10 -180,8 +189,10 @@@ void disable_lapic_nmi_watchdog(void
        if (atomic_read(&nmi_active) <= 0)
                return;
  
-       on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
+       on_each_cpu(stop_apic_nmi_watchdog, NULL, 1);
 -      wd_ops->unreserve();
 +
 +      if (wd_ops)
 +              wd_ops->unreserve();
  
        BUG_ON(atomic_read(&nmi_active) != 0);
  }
Simple merge
Simple merge
index 603261a5885cf2a1e1f55be2845197641e1a2d61,720640ff36ca0676614b659aa9176ea67fc6676f..558abf4c796afa0d7dd7ad2622e3bd42f28e8d39
@@@ -1567,9 -1563,9 +1567,9 @@@ void /*__init*/ print_local_APIC(void *
        printk("\n");
  }
  
 -void print_all_local_APICs (void)
 +void print_all_local_APICs(void)
  {
-       on_each_cpu(print_local_APIC, NULL, 1, 1);
+       on_each_cpu(print_local_APIC, NULL, 1);
  }
  
  void /*__init*/ print_PIC(void)
Simple merge
index 31f49e8f46a7997aa2fbf8280f545ce318868c9f,0000000000000000000000000000000000000000..0373e88de95ab5be0760b9adb34ace988d253fcc
mode 100644,000000..100644
--- /dev/null
@@@ -1,217 -1,0 +1,221 @@@
 +#include <linux/linkage.h>
 +#include <linux/errno.h>
 +#include <linux/signal.h>
 +#include <linux/sched.h>
 +#include <linux/ioport.h>
 +#include <linux/interrupt.h>
 +#include <linux/timex.h>
 +#include <linux/slab.h>
 +#include <linux/random.h>
 +#include <linux/init.h>
 +#include <linux/kernel_stat.h>
 +#include <linux/sysdev.h>
 +#include <linux/bitops.h>
 +
 +#include <asm/acpi.h>
 +#include <asm/atomic.h>
 +#include <asm/system.h>
 +#include <asm/io.h>
 +#include <asm/hw_irq.h>
 +#include <asm/pgtable.h>
 +#include <asm/delay.h>
 +#include <asm/desc.h>
 +#include <asm/apic.h>
 +#include <asm/i8259.h>
 +
 +/*
 + * Common place to define all x86 IRQ vectors
 + *
 + * This builds up the IRQ handler stubs using some ugly macros in irq.h
 + *
 + * These macros create the low-level assembly IRQ routines that save
 + * register context and call do_IRQ(). do_IRQ() then does all the
 + * operations that are needed to keep the AT (or SMP IOAPIC)
 + * interrupt-controller happy.
 + */
 +
 +#define IRQ_NAME2(nr) nr##_interrupt(void)
 +#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
 +
 +/*
 + *    SMP has a few special interrupts for IPI messages
 + */
 +
 +#define BUILD_IRQ(nr)                         \
 +      asmlinkage void IRQ_NAME(nr);           \
 +      asm("\n.p2align\n"                      \
 +          "IRQ" #nr "_interrupt:\n\t"         \
 +          "push $~(" #nr ") ; "               \
 +          "jmp common_interrupt");
 +
 +#define BI(x,y) \
 +      BUILD_IRQ(x##y)
 +
 +#define BUILD_16_IRQS(x) \
 +      BI(x,0) BI(x,1) BI(x,2) BI(x,3) \
 +      BI(x,4) BI(x,5) BI(x,6) BI(x,7) \
 +      BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
 +      BI(x,c) BI(x,d) BI(x,e) BI(x,f)
 +
 +/*
 + * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
 + * (these are usually mapped to vectors 0x30-0x3f)
 + */
 +
 +/*
 + * The IO-APIC gives us many more interrupt sources. Most of these
 + * are unused but an SMP system is supposed to have enough memory ...
 + * sometimes (mostly wrt. hw bugs) we get corrupted vectors all
 + * across the spectrum, so we really want to be prepared to get all
 + * of these. Plus, more powerful systems might have more than 64
 + * IO-APIC registers.
 + *
 + * (these are usually mapped into the 0x30-0xff vector range)
 + */
 +                                    BUILD_16_IRQS(0x2) BUILD_16_IRQS(0x3)
 +BUILD_16_IRQS(0x4) BUILD_16_IRQS(0x5) BUILD_16_IRQS(0x6) BUILD_16_IRQS(0x7)
 +BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
 +BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) BUILD_16_IRQS(0xe) BUILD_16_IRQS(0xf)
 +
 +#undef BUILD_16_IRQS
 +#undef BI
 +
 +
 +#define IRQ(x,y) \
 +      IRQ##x##y##_interrupt
 +
 +#define IRQLIST_16(x) \
 +      IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \
 +      IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \
 +      IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
 +      IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
 +
 +/* for the irq vectors */
 +static void (*__initdata interrupt[NR_VECTORS - FIRST_EXTERNAL_VECTOR])(void) = {
 +                                        IRQLIST_16(0x2), IRQLIST_16(0x3),
 +      IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7),
 +      IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
 +      IRQLIST_16(0xc), IRQLIST_16(0xd), IRQLIST_16(0xe), IRQLIST_16(0xf)
 +};
 +
 +#undef IRQ
 +#undef IRQLIST_16
 +
 +
 +
 +
 +/*
 + * IRQ2 is cascade interrupt to second interrupt controller
 + */
 +
 +static struct irqaction irq2 = {
 +      .handler = no_action,
 +      .mask = CPU_MASK_NONE,
 +      .name = "cascade",
 +};
 +DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
 +      [0 ... IRQ0_VECTOR - 1] = -1,
 +      [IRQ0_VECTOR] = 0,
 +      [IRQ1_VECTOR] = 1,
 +      [IRQ2_VECTOR] = 2,
 +      [IRQ3_VECTOR] = 3,
 +      [IRQ4_VECTOR] = 4,
 +      [IRQ5_VECTOR] = 5,
 +      [IRQ6_VECTOR] = 6,
 +      [IRQ7_VECTOR] = 7,
 +      [IRQ8_VECTOR] = 8,
 +      [IRQ9_VECTOR] = 9,
 +      [IRQ10_VECTOR] = 10,
 +      [IRQ11_VECTOR] = 11,
 +      [IRQ12_VECTOR] = 12,
 +      [IRQ13_VECTOR] = 13,
 +      [IRQ14_VECTOR] = 14,
 +      [IRQ15_VECTOR] = 15,
 +      [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
 +};
 +
 +static void __init init_ISA_irqs (void)
 +{
 +      int i;
 +
 +      init_bsp_APIC();
 +      init_8259A(0);
 +
 +      for (i = 0; i < NR_IRQS; i++) {
 +              irq_desc[i].status = IRQ_DISABLED;
 +              irq_desc[i].action = NULL;
 +              irq_desc[i].depth = 1;
 +
 +              if (i < 16) {
 +                      /*
 +                       * 16 old-style INTA-cycle interrupts:
 +                       */
 +                      set_irq_chip_and_handler_name(i, &i8259A_chip,
 +                                                    handle_level_irq, "XT");
 +              } else {
 +                      /*
 +                       * 'high' PCI IRQs filled in on demand
 +                       */
 +                      irq_desc[i].chip = &no_irq_chip;
 +              }
 +      }
 +}
 +
 +void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
 +
 +void __init native_init_IRQ(void)
 +{
 +      int i;
 +
 +      init_ISA_irqs();
 +      /*
 +       * Cover the whole vector space, no vector can escape
 +       * us. (some of these will be overridden and become
 +       * 'special' SMP interrupts)
 +       */
 +      for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
 +              int vector = FIRST_EXTERNAL_VECTOR + i;
 +              if (vector != IA32_SYSCALL_VECTOR)
 +                      set_intr_gate(vector, interrupt[i]);
 +      }
 +
 +#ifdef CONFIG_SMP
 +      /*
 +       * The reschedule interrupt is a CPU-to-CPU reschedule-helper
 +       * IPI, driven by wakeup.
 +       */
 +      alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
 +
 +      /* IPIs for invalidation */
 +      alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
 +      alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
 +      alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
 +      alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
 +      alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
 +      alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
 +      alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
 +      alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
 +
 +      /* IPI for generic function call */
 +      alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
 +
++      /* IPI for generic single function call */
++      alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
++                      call_function_single_interrupt);
++
 +      /* Low priority IPI to cleanup after moving an irq */
 +      set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
 +#endif
 +      alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
 +      alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
 +
 +      /* self generated IPI for local APIC timer */
 +      alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
 +
 +      /* IPI vectors for APIC spurious and error interrupts */
 +      alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
 +      alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
 +
 +      if (!acpi_ioapic)
 +              setup_irq(2, &irq2);
 +}
index 21f2bae98c157730eff158c1f921ce3aa23882b3,cb0a6398c64baa5678160d1e818ce9951b13d77c..a8449571858ae9dae444076d7c6b28daff795ee9
@@@ -68,7 -68,7 +68,7 @@@ static int alloc_ldt(mm_context_t *pc, 
                load_LDT(pc);
                mask = cpumask_of_cpu(smp_processor_id());
                if (!cpus_equal(current->mm->cpu_vm_mask, mask))
-                       smp_call_function(flush_ldt, current->mm, 1, 1);
 -                      smp_call_function(flush_ldt, NULL, 1);
++                      smp_call_function(flush_ldt, current->mm, 1);
                preempt_enable();
  #else
                load_LDT(pc);
index 716b89284be02841cf74e81a73ad0d0a203cbfca,0000000000000000000000000000000000000000..ec024b3baad0764821c036d0aa2552397f76f017
mode 100644,000000..100644
--- /dev/null
@@@ -1,516 -1,0 +1,516 @@@
-               smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
 +/*
 + *  NMI watchdog support on APIC systems
 + *
 + *  Started by Ingo Molnar <mingo@redhat.com>
 + *
 + *  Fixes:
 + *  Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
 + *  Mikael Pettersson : Power Management for local APIC NMI watchdog.
 + *  Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog.
 + *  Pavel Machek and
 + *  Mikael Pettersson : PM converted to driver model. Disable/enable API.
 + */
 +
 +#include <asm/apic.h>
 +
 +#include <linux/nmi.h>
 +#include <linux/mm.h>
 +#include <linux/delay.h>
 +#include <linux/interrupt.h>
 +#include <linux/module.h>
 +#include <linux/sysdev.h>
 +#include <linux/sysctl.h>
 +#include <linux/percpu.h>
 +#include <linux/kprobes.h>
 +#include <linux/cpumask.h>
 +#include <linux/kernel_stat.h>
 +#include <linux/kdebug.h>
 +#include <linux/smp.h>
 +
 +#include <asm/i8259.h>
 +#include <asm/io_apic.h>
 +#include <asm/smp.h>
 +#include <asm/nmi.h>
 +#include <asm/proto.h>
 +#include <asm/timer.h>
 +
 +#include <asm/mce.h>
 +
 +#include <mach_traps.h>
 +
 +int unknown_nmi_panic;
 +int nmi_watchdog_enabled;
 +
 +static cpumask_t backtrace_mask = CPU_MASK_NONE;
 +
 +/* nmi_active:
 + * >0: the lapic NMI watchdog is active, but can be disabled
 + * <0: the lapic NMI watchdog has not been set up, and cannot
 + *     be enabled
 + *  0: the lapic NMI watchdog is disabled, but can be enabled
 + */
 +atomic_t nmi_active = ATOMIC_INIT(0);         /* oprofile uses this */
 +EXPORT_SYMBOL(nmi_active);
 +
 +unsigned int nmi_watchdog = NMI_NONE;
 +EXPORT_SYMBOL(nmi_watchdog);
 +
 +static int panic_on_timeout;
 +
 +static unsigned int nmi_hz = HZ;
 +static DEFINE_PER_CPU(short, wd_enabled);
 +static int endflag __initdata;
 +
 +static inline unsigned int get_nmi_count(int cpu)
 +{
 +#ifdef CONFIG_X86_64
 +      return cpu_pda(cpu)->__nmi_count;
 +#else
 +      return nmi_count(cpu);
 +#endif
 +}
 +
 +static inline int mce_in_progress(void)
 +{
 +#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
 +      return atomic_read(&mce_entry) > 0;
 +#endif
 +      return 0;
 +}
 +
 +/*
 + * Take the local apic timer and PIT/HPET into account. We don't
 + * know which one is active, when we have highres/dyntick on
 + */
 +static inline unsigned int get_timer_irqs(int cpu)
 +{
 +#ifdef CONFIG_X86_64
 +      return read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
 +#else
 +      return per_cpu(irq_stat, cpu).apic_timer_irqs +
 +              per_cpu(irq_stat, cpu).irq0_irqs;
 +#endif
 +}
 +
 +#ifdef CONFIG_SMP
 +/*
 + * The performance counters used by NMI_LOCAL_APIC don't trigger when
 + * the CPU is idle. To make sure the NMI watchdog really ticks on all
 + * CPUs during the test make them busy.
 + */
 +static __init void nmi_cpu_busy(void *data)
 +{
 +      local_irq_enable_in_hardirq();
 +      /*
 +       * Intentionally don't use cpu_relax here. This is
 +       * to make sure that the performance counter really ticks,
 +       * even if there is a simulator or similar that catches the
 +       * pause instruction. On a real HT machine this is fine because
 +       * all other CPUs are busy with "useless" delay loops and don't
 +       * care if they get somewhat less cycles.
 +       */
 +      while (endflag == 0)
 +              mb();
 +}
 +#endif
 +
 +int __init check_nmi_watchdog(void)
 +{
 +      unsigned int *prev_nmi_count;
 +      int cpu;
 +
 +      if (!nmi_watchdog_active() || !atomic_read(&nmi_active))
 +              return 0;
 +
 +      prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
 +      if (!prev_nmi_count)
 +              goto error;
 +
 +      printk(KERN_INFO "Testing NMI watchdog ... ");
 +
 +#ifdef CONFIG_SMP
 +      if (nmi_watchdog == NMI_LOCAL_APIC)
-               on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
++              smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
 +#endif
 +
 +      for_each_possible_cpu(cpu)
 +              prev_nmi_count[cpu] = get_nmi_count(cpu);
 +      local_irq_enable();
 +      mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
 +
 +      for_each_online_cpu(cpu) {
 +              if (!per_cpu(wd_enabled, cpu))
 +                      continue;
 +              if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
 +                      printk(KERN_WARNING "WARNING: CPU#%d: NMI "
 +                              "appears to be stuck (%d->%d)!\n",
 +                              cpu,
 +                              prev_nmi_count[cpu],
 +                              get_nmi_count(cpu));
 +                      per_cpu(wd_enabled, cpu) = 0;
 +                      atomic_dec(&nmi_active);
 +              }
 +      }
 +      endflag = 1;
 +      if (!atomic_read(&nmi_active)) {
 +              kfree(prev_nmi_count);
 +              atomic_set(&nmi_active, -1);
 +              goto error;
 +      }
 +      printk("OK.\n");
 +
 +      /*
 +       * now that we know it works we can reduce NMI frequency to
 +       * something more reasonable; makes a difference in some configs
 +       */
 +      if (nmi_watchdog == NMI_LOCAL_APIC)
 +              nmi_hz = lapic_adjust_nmi_hz(1);
 +
 +      kfree(prev_nmi_count);
 +      return 0;
 +error:
 +      if (nmi_watchdog == NMI_IO_APIC && !timer_through_8259)
 +              disable_8259A_irq(0);
 +#ifdef CONFIG_X86_32
 +      timer_ack = 0;
 +#endif
 +      return -1;
 +}
 +
 +static int __init setup_nmi_watchdog(char *str)
 +{
 +      unsigned int nmi;
 +
 +      if (!strncmp(str, "panic", 5)) {
 +              panic_on_timeout = 1;
 +              str = strchr(str, ',');
 +              if (!str)
 +                      return 1;
 +              ++str;
 +      }
 +
 +      get_option(&str, &nmi);
 +
 +      if (nmi >= NMI_INVALID)
 +              return 0;
 +
 +      nmi_watchdog = nmi;
 +      return 1;
 +}
 +__setup("nmi_watchdog=", setup_nmi_watchdog);
 +
 +/*
 + * Suspend/resume support
 + */
 +#ifdef CONFIG_PM
 +
 +static int nmi_pm_active; /* nmi_active before suspend */
 +
 +static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
 +{
 +      /* only CPU0 goes here, other CPUs should be offline */
 +      nmi_pm_active = atomic_read(&nmi_active);
 +      stop_apic_nmi_watchdog(NULL);
 +      BUG_ON(atomic_read(&nmi_active) != 0);
 +      return 0;
 +}
 +
 +static int lapic_nmi_resume(struct sys_device *dev)
 +{
 +      /* only CPU0 goes here, other CPUs should be offline */
 +      if (nmi_pm_active > 0) {
 +              setup_apic_nmi_watchdog(NULL);
 +              touch_nmi_watchdog();
 +      }
 +      return 0;
 +}
 +
 +static struct sysdev_class nmi_sysclass = {
 +      .name           = "lapic_nmi",
 +      .resume         = lapic_nmi_resume,
 +      .suspend        = lapic_nmi_suspend,
 +};
 +
 +static struct sys_device device_lapic_nmi = {
 +      .id     = 0,
 +      .cls    = &nmi_sysclass,
 +};
 +
 +static int __init init_lapic_nmi_sysfs(void)
 +{
 +      int error;
 +
 +      /*
 +       * should really be a BUG_ON but b/c this is an
 +       * init call, it just doesn't work.  -dcz
 +       */
 +      if (nmi_watchdog != NMI_LOCAL_APIC)
 +              return 0;
 +
 +      if (atomic_read(&nmi_active) < 0)
 +              return 0;
 +
 +      error = sysdev_class_register(&nmi_sysclass);
 +      if (!error)
 +              error = sysdev_register(&device_lapic_nmi);
 +      return error;
 +}
 +
 +/* must come after the local APIC's device_initcall() */
 +late_initcall(init_lapic_nmi_sysfs);
 +
 +#endif        /* CONFIG_PM */
 +
 +static void __acpi_nmi_enable(void *__unused)
 +{
 +      apic_write_around(APIC_LVT0, APIC_DM_NMI);
 +}
 +
 +/*
 + * Enable timer based NMIs on all CPUs:
 + */
 +void acpi_nmi_enable(void)
 +{
 +      if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
-               on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
++              on_each_cpu(__acpi_nmi_enable, NULL, 1);
 +}
 +
 +static void __acpi_nmi_disable(void *__unused)
 +{
 +      apic_write_around(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
 +}
 +
 +/*
 + * Disable timer based NMIs on all CPUs:
 + */
 +void acpi_nmi_disable(void)
 +{
 +      if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
++              on_each_cpu(__acpi_nmi_disable, NULL, 1);
 +}
 +
 +void setup_apic_nmi_watchdog(void *unused)
 +{
 +      if (__get_cpu_var(wd_enabled))
 +              return;
 +
 +      /* cheap hack to support suspend/resume */
 +      /* if cpu0 is not active neither should the other cpus */
 +      if (smp_processor_id() != 0 && atomic_read(&nmi_active) <= 0)
 +              return;
 +
 +      switch (nmi_watchdog) {
 +      case NMI_LOCAL_APIC:
 +               /* enable it before to avoid race with handler */
 +              __get_cpu_var(wd_enabled) = 1;
 +              if (lapic_watchdog_init(nmi_hz) < 0) {
 +                      __get_cpu_var(wd_enabled) = 0;
 +                      return;
 +              }
 +              /* FALL THROUGH */
 +      case NMI_IO_APIC:
 +              __get_cpu_var(wd_enabled) = 1;
 +              atomic_inc(&nmi_active);
 +      }
 +}
 +
 +void stop_apic_nmi_watchdog(void *unused)
 +{
 +      /* only support LOCAL and IO APICs for now */
 +      if (!nmi_watchdog_active())
 +              return;
 +      if (__get_cpu_var(wd_enabled) == 0)
 +              return;
 +      if (nmi_watchdog == NMI_LOCAL_APIC)
 +              lapic_watchdog_stop();
 +      __get_cpu_var(wd_enabled) = 0;
 +      atomic_dec(&nmi_active);
 +}
 +
 +/*
 + * the best way to detect whether a CPU has a 'hard lockup' problem
 + * is to check it's local APIC timer IRQ counts. If they are not
 + * changing then that CPU has some problem.
 + *
 + * as these watchdog NMI IRQs are generated on every CPU, we only
 + * have to check the current processor.
 + *
 + * since NMIs don't listen to _any_ locks, we have to be extremely
 + * careful not to rely on unsafe variables. The printk might lock
 + * up though, so we have to break up any console locks first ...
 + * [when there will be more tty-related locks, break them up here too!]
 + */
 +
 +static DEFINE_PER_CPU(unsigned, last_irq_sum);
 +static DEFINE_PER_CPU(local_t, alert_counter);
 +static DEFINE_PER_CPU(int, nmi_touch);
 +
 +void touch_nmi_watchdog(void)
 +{
 +      if (nmi_watchdog_active()) {
 +              unsigned cpu;
 +
 +              /*
 +               * Tell other CPUs to reset their alert counters. We cannot
 +               * do it ourselves because the alert count increase is not
 +               * atomic.
 +               */
 +              for_each_present_cpu(cpu) {
 +                      if (per_cpu(nmi_touch, cpu) != 1)
 +                              per_cpu(nmi_touch, cpu) = 1;
 +              }
 +      }
 +
 +      /*
 +       * Tickle the softlockup detector too:
 +       */
 +      touch_softlockup_watchdog();
 +}
 +EXPORT_SYMBOL(touch_nmi_watchdog);
 +
 +notrace __kprobes int
 +nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
 +{
 +      /*
 +       * Since current_thread_info()-> is always on the stack, and we
 +       * always switch the stack NMI-atomically, it's safe to use
 +       * smp_processor_id().
 +       */
 +      unsigned int sum;
 +      int touched = 0;
 +      int cpu = smp_processor_id();
 +      int rc = 0;
 +
 +      /* check for other users first */
 +      if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
 +                      == NOTIFY_STOP) {
 +              rc = 1;
 +              touched = 1;
 +      }
 +
 +      sum = get_timer_irqs(cpu);
 +
 +      if (__get_cpu_var(nmi_touch)) {
 +              __get_cpu_var(nmi_touch) = 0;
 +              touched = 1;
 +      }
 +
 +      if (cpu_isset(cpu, backtrace_mask)) {
 +              static DEFINE_SPINLOCK(lock);   /* Serialise the printks */
 +
 +              spin_lock(&lock);
 +              printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
 +              dump_stack();
 +              spin_unlock(&lock);
 +              cpu_clear(cpu, backtrace_mask);
 +      }
 +
 +      /* Could check oops_in_progress here too, but it's safer not to */
 +      if (mce_in_progress())
 +              touched = 1;
 +
 +      /* if the none of the timers isn't firing, this cpu isn't doing much */
 +      if (!touched && __get_cpu_var(last_irq_sum) == sum) {
 +              /*
 +               * Ayiee, looks like this CPU is stuck ...
 +               * wait a few IRQs (5 seconds) before doing the oops ...
 +               */
 +              local_inc(&__get_cpu_var(alert_counter));
 +              if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
 +                      /*
 +                       * die_nmi will return ONLY if NOTIFY_STOP happens..
 +                       */
 +                      die_nmi("BUG: NMI Watchdog detected LOCKUP",
 +                              regs, panic_on_timeout);
 +      } else {
 +              __get_cpu_var(last_irq_sum) = sum;
 +              local_set(&__get_cpu_var(alert_counter), 0);
 +      }
 +
 +      /* see if the nmi watchdog went off */
 +      if (!__get_cpu_var(wd_enabled))
 +              return rc;
 +      switch (nmi_watchdog) {
 +      case NMI_LOCAL_APIC:
 +              rc |= lapic_wd_event(nmi_hz);
 +              break;
 +      case NMI_IO_APIC:
 +              /*
 +               * don't know how to accurately check for this.
 +               * just assume it was a watchdog timer interrupt
 +               * This matches the old behaviour.
 +               */
 +              rc = 1;
 +              break;
 +      }
 +      return rc;
 +}
 +
 +#ifdef CONFIG_SYSCTL
 +
 +static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
 +{
 +      unsigned char reason = get_nmi_reason();
 +      char buf[64];
 +
 +      sprintf(buf, "NMI received for unknown reason %02x\n", reason);
 +      die_nmi(buf, regs, 1); /* Always panic here */
 +      return 0;
 +}
 +
 +/*
 + * proc handler for /proc/sys/kernel/nmi
 + */
 +int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
 +                      void __user *buffer, size_t *length, loff_t *ppos)
 +{
 +      int old_state;
 +
 +      nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
 +      old_state = nmi_watchdog_enabled;
 +      proc_dointvec(table, write, file, buffer, length, ppos);
 +      if (!!old_state == !!nmi_watchdog_enabled)
 +              return 0;
 +
 +      if (atomic_read(&nmi_active) < 0 || !nmi_watchdog_active()) {
 +              printk(KERN_WARNING
 +                      "NMI watchdog is permanently disabled\n");
 +              return -EIO;
 +      }
 +
 +      if (nmi_watchdog == NMI_LOCAL_APIC) {
 +              if (nmi_watchdog_enabled)
 +                      enable_lapic_nmi_watchdog();
 +              else
 +                      disable_lapic_nmi_watchdog();
 +      } else {
 +              printk(KERN_WARNING
 +                      "NMI watchdog doesn't know what hardware to touch\n");
 +              return -EIO;
 +      }
 +      return 0;
 +}
 +
 +#endif /* CONFIG_SYSCTL */
 +
 +int do_nmi_callback(struct pt_regs *regs, int cpu)
 +{
 +#ifdef CONFIG_SYSCTL
 +      if (unknown_nmi_panic)
 +              return unknown_nmi_panic_callback(regs, cpu);
 +#endif
 +      return 0;
 +}
 +
 +void __trigger_all_cpu_backtrace(void)
 +{
 +      int i;
 +
 +      backtrace_mask = cpu_online_map;
 +      /* Wait for up to 10 seconds for all CPUs to do the backtrace */
 +      for (i = 0; i < 10 * 1000; i++) {
 +              if (cpus_empty(backtrace_mask))
 +                      break;
 +              mdelay(1);
 +      }
 +}
Simple merge
index f35c2d8016ac412c1c0433bf4382829c1d2cef31,89647898f54606686171e33cccfe53f2e948edf7..687376ab07e82ece4ab1eeb609d738d76245d226
@@@ -327,12 -345,19 +327,12 @@@ static void __cpuinit start_secondary(v
         * lock helps us to not include this cpu in a currently in progress
         * smp_call_function().
         */
-       lock_ipi_call_lock();
+       ipi_call_lock_irq();
 -#ifdef CONFIG_X86_64
 -      spin_lock(&vector_lock);
 -
 -      /* Setup the per cpu irq handling data structures */
 -      __setup_vector_irq(smp_processor_id());
 -      /*
 -       * Allow the master to continue.
 -       */
 -      spin_unlock(&vector_lock);
 +#ifdef CONFIG_X86_IO_APIC
 +      setup_vector_irq(smp_processor_id());
  #endif
        cpu_set(smp_processor_id(), cpu_online_map);
-       unlock_ipi_call_lock();
+       ipi_call_unlock_irq();
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
  
        setup_secondary_clock();
Simple merge
Simple merge
index 8dedd01e909fe4efeb2ff1501518856b23b6e967,abea08459a73b0984f66ae656c546668315c17ca..ee0fba0921572ba89ad56e45e5757e9eb9351385
@@@ -1129,9 -1072,18 +1067,9 @@@ static void do_flush_tlb_all(void *info
  /* flush the TLB of every active CPU in the system */
  void flush_tlb_all(void)
  {
-       on_each_cpu(do_flush_tlb_all, 0, 1, 1);
+       on_each_cpu(do_flush_tlb_all, 0, 1);
  }
  
 -/* used to set up the trampoline for other CPUs when the memory manager
 - * is sorted out */
 -void __init smp_alloc_memory(void)
 -{
 -      trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
 -      if (__pa(trampoline_base) >= 0x93000)
 -              BUG();
 -}
 -
  /* send a reschedule CPI to one CPU by physical CPU number*/
  static void voyager_smp_send_reschedule(int cpu)
  {
Simple merge
index 2b6ad5b9f9d53f0012629f5c255f3d12e6d87f82,3238ad32ffd8a1f2c7c75c53696a2fde8b65f321..7f3329b55d2e3b76eb60886844d7ee846b7a0c60
@@@ -269,9 -269,9 +269,9 @@@ static void nmi_cpu_shutdown(void *dumm
  
  static void nmi_shutdown(void)
  {
 -      struct op_msrs *msrs = &__get_cpu_var(cpu_msrs);
 +      struct op_msrs *msrs = &get_cpu_var(cpu_msrs);
        nmi_enabled = 0;
-       on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
+       on_each_cpu(nmi_cpu_shutdown, NULL, 1);
        unregister_die_notifier(&profile_exceptions_nb);
        model->shutdown(msrs);
        free_msrs();
Simple merge
Simple merge
index d2e3c20127d7608fd0bb5735256994bb56fd01b2,a1651d029ea83fdda9ac8a716c541d13d8de5475..233156f39b7f39f605fdb2c0cbd266da60783e28
  #include "xen-ops.h"
  #include "mmu.h"
  
 -static cpumask_t xen_cpu_initialized_map;
 +cpumask_t xen_cpu_initialized_map;
- static DEFINE_PER_CPU(int, resched_irq) = -1;
- static DEFINE_PER_CPU(int, callfunc_irq) = -1;
- static DEFINE_PER_CPU(int, debug_irq) = -1;
- /*
-  * Structure and data for smp_call_function(). This is designed to minimise
-  * static memory requirements. It also looks cleaner.
-  */
- static DEFINE_SPINLOCK(call_lock);
  
- struct call_data_struct {
-       void (*func) (void *info);
-       void *info;
-       atomic_t started;
-       atomic_t finished;
-       int wait;
- };
+ static DEFINE_PER_CPU(int, resched_irq);
+ static DEFINE_PER_CPU(int, callfunc_irq);
+ static DEFINE_PER_CPU(int, callfuncsingle_irq);
+ static DEFINE_PER_CPU(int, debug_irq) = -1;
  
  static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
- static struct call_data_struct *call_data;
+ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
  
  /*
   * Reschedule call back. Nothing to do,
index d852ddbb3448bf61245f4c16ecd88b8f14a2c5aa,a636ab5e13411835e0de75fbcb43e1bdf96451e3..6f4b1045c1c20768015d1fc7987f9ee3a73b4475
@@@ -55,16 -46,9 +55,11 @@@ void xen_smp_cpus_done(unsigned int max
  
  void xen_smp_send_stop(void);
  void xen_smp_send_reschedule(int cpu);
- int xen_smp_call_function (void (*func) (void *info), void *info, int nonatomic,
-                          int wait);
- int xen_smp_call_function_single(int cpu, void (*func) (void *info), void *info,
-                                int nonatomic, int wait);
- int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
-                              void *info, int wait);
+ void xen_smp_send_call_function_ipi(cpumask_t mask);
+ void xen_smp_send_call_function_single_ipi(int cpu);
  
 +extern cpumask_t xen_cpu_initialized_map;
 +
  
  /* Declare an asm function, along with symbols needed to make it
     inlineable */
diff --cc fs/buffer.c
Simple merge
index 18f067c310f7880be0f8629e45a91add4b380abb,bf025399d9392ed7910d34c8c0b7309c125f92e9..77ba51df56680fcd9e28b4529eb647c0eea07292
 +#ifndef _ASM_HW_IRQ_H
 +#define _ASM_HW_IRQ_H
 +
 +/*
 + * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
 + *
 + * moved some of the old arch/i386/kernel/irq.h to here. VY
 + *
 + * IRQ/IPI changes taken from work by Thomas Radke
 + * <tomsoft@informatik.tu-chemnitz.de>
 + *
 + * hacked by Andi Kleen for x86-64.
 + * unified by tglx
 + */
 +
 +#include <asm/irq_vectors.h>
 +
 +#ifndef __ASSEMBLY__
 +
 +#include <linux/percpu.h>
 +#include <linux/profile.h>
 +#include <linux/smp.h>
 +
 +#include <asm/atomic.h>
 +#include <asm/irq.h>
 +#include <asm/sections.h>
 +
 +#define platform_legacy_irq(irq)      ((irq) < 16)
 +
 +/* Interrupt handlers registered during init_IRQ */
 +extern void apic_timer_interrupt(void);
 +extern void error_interrupt(void);
 +extern void spurious_interrupt(void);
 +extern void thermal_interrupt(void);
 +extern void reschedule_interrupt(void);
 +
 +extern void invalidate_interrupt(void);
 +extern void invalidate_interrupt0(void);
 +extern void invalidate_interrupt1(void);
 +extern void invalidate_interrupt2(void);
 +extern void invalidate_interrupt3(void);
 +extern void invalidate_interrupt4(void);
 +extern void invalidate_interrupt5(void);
 +extern void invalidate_interrupt6(void);
 +extern void invalidate_interrupt7(void);
 +
 +extern void irq_move_cleanup_interrupt(void);
 +extern void threshold_interrupt(void);
 +
 +extern void call_function_interrupt(void);
++extern void call_function_single_interrupt(void);
 +
 +/* PIC specific functions */
 +extern void disable_8259A_irq(unsigned int irq);
 +extern void enable_8259A_irq(unsigned int irq);
 +extern int i8259A_irq_pending(unsigned int irq);
 +extern void make_8259A_irq(unsigned int irq);
 +extern void init_8259A(int aeoi);
 +
 +/* IOAPIC */
 +#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
 +extern unsigned long io_apic_irqs;
 +
 +extern void init_VISWS_APIC_irqs(void);
 +extern void setup_IO_APIC(void);
 +extern void disable_IO_APIC(void);
 +extern void print_IO_APIC(void);
 +extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
 +extern void setup_ioapic_dest(void);
 +
 +#ifdef CONFIG_X86_64
 +extern void enable_IO_APIC(void);
 +#endif
 +
 +/* IPI functions */
 +extern void send_IPI_self(int vector);
 +extern void send_IPI(int dest, int vector);
 +
 +/* Statistics */
 +extern atomic_t irq_err_count;
 +extern atomic_t irq_mis_count;
 +
 +/* EISA */
 +extern void eisa_set_level_irq(unsigned int irq);
 +
 +/* Voyager functions */
 +extern asmlinkage void vic_cpi_interrupt(void);
 +extern asmlinkage void vic_sys_interrupt(void);
 +extern asmlinkage void vic_cmn_interrupt(void);
 +extern asmlinkage void qic_timer_interrupt(void);
 +extern asmlinkage void qic_invalidate_interrupt(void);
 +extern asmlinkage void qic_reschedule_interrupt(void);
 +extern asmlinkage void qic_enable_irq_interrupt(void);
 +extern asmlinkage void qic_call_function_interrupt(void);
 +
  #ifdef CONFIG_X86_32
 -# include "hw_irq_32.h"
 +extern void (*const interrupt[NR_IRQS])(void);
  #else
 -# include "hw_irq_64.h"
 +typedef int vector_irq_t[NR_VECTORS];
 +DECLARE_PER_CPU(vector_irq_t, vector_irq);
 +extern spinlock_t vector_lock;
 +#endif
 +extern void setup_vector_irq(int cpu);
 +
 +#endif /* !ASSEMBLY_ */
 +
  #endif
index 0ac864ef3cd4f112dd03c9e2b6b9600dbb094277,0000000000000000000000000000000000000000..90b1d1f12f08d652d39fef01e241ed321f38c415
mode 100644,000000..100644
--- /dev/null
@@@ -1,169 -1,0 +1,173 @@@
- #define VIC_END_FAKE_CPI              VIC_CALL_FUNCTION_CPI
 +#ifndef _ASM_IRQ_VECTORS_H
 +#define _ASM_IRQ_VECTORS_H
 +
 +#include <linux/threads.h>
 +
 +#define NMI_VECTOR            0x02
 +
 +/*
 + * IDT vectors usable for external interrupt sources start
 + * at 0x20:
 + */
 +#define FIRST_EXTERNAL_VECTOR 0x20
 +
 +#ifdef CONFIG_X86_32
 +# define SYSCALL_VECTOR               0x80
 +#else
 +# define IA32_SYSCALL_VECTOR  0x80
 +#endif
 +
 +/*
 + * Reserve the lowest usable priority level 0x20 - 0x2f for triggering
 + * cleanup after irq migration on 64 bit.
 + */
 +#define IRQ_MOVE_CLEANUP_VECTOR       FIRST_EXTERNAL_VECTOR
 +
 +/*
 + * Vectors 0x20-0x2f are used for ISA interrupts on 32 bit.
 + * Vectors 0x30-0x3f are used for ISA interrupts on 64 bit.
 + */
 +#ifdef CONFIG_X86_32
 +#define IRQ0_VECTOR           (FIRST_EXTERNAL_VECTOR)
 +#else
 +#define IRQ0_VECTOR           (FIRST_EXTERNAL_VECTOR + 0x10)
 +#endif
 +#define IRQ1_VECTOR           (IRQ0_VECTOR + 1)
 +#define IRQ2_VECTOR           (IRQ0_VECTOR + 2)
 +#define IRQ3_VECTOR           (IRQ0_VECTOR + 3)
 +#define IRQ4_VECTOR           (IRQ0_VECTOR + 4)
 +#define IRQ5_VECTOR           (IRQ0_VECTOR + 5)
 +#define IRQ6_VECTOR           (IRQ0_VECTOR + 6)
 +#define IRQ7_VECTOR           (IRQ0_VECTOR + 7)
 +#define IRQ8_VECTOR           (IRQ0_VECTOR + 8)
 +#define IRQ9_VECTOR           (IRQ0_VECTOR + 9)
 +#define IRQ10_VECTOR          (IRQ0_VECTOR + 10)
 +#define IRQ11_VECTOR          (IRQ0_VECTOR + 11)
 +#define IRQ12_VECTOR          (IRQ0_VECTOR + 12)
 +#define IRQ13_VECTOR          (IRQ0_VECTOR + 13)
 +#define IRQ14_VECTOR          (IRQ0_VECTOR + 14)
 +#define IRQ15_VECTOR          (IRQ0_VECTOR + 15)
 +
 +/*
 + * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
 + *
 + *  some of the following vectors are 'rare', they are merged
 + *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
 + *  TLB, reschedule and local APIC vectors are performance-critical.
 + *
 + *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
 + */
 +#ifdef CONFIG_X86_32
 +
 +# define SPURIOUS_APIC_VECTOR         0xff
 +# define ERROR_APIC_VECTOR            0xfe
 +# define INVALIDATE_TLB_VECTOR                0xfd
 +# define RESCHEDULE_VECTOR            0xfc
 +# define CALL_FUNCTION_VECTOR         0xfb
++# define CALL_FUNCTION_SINGLE_VECTOR  0xfa
 +# define THERMAL_APIC_VECTOR          0xf0
 +
 +#else
 +
 +#define SPURIOUS_APIC_VECTOR          0xff
 +#define ERROR_APIC_VECTOR             0xfe
 +#define RESCHEDULE_VECTOR             0xfd
 +#define CALL_FUNCTION_VECTOR          0xfc
++#define CALL_FUNCTION_SINGLE_VECTOR   0xfb
 +#define THERMAL_APIC_VECTOR           0xfa
 +#define THRESHOLD_APIC_VECTOR         0xf9
 +#define INVALIDATE_TLB_VECTOR_END     0xf7
 +#define INVALIDATE_TLB_VECTOR_START   0xf0    /* f0-f7 used for TLB flush */
 +
 +#define NUM_INVALIDATE_TLB_VECTORS    8
 +
 +#endif
 +
 +/*
 + * Local APIC timer IRQ vector is on a different priority level,
 + * to work around the 'lost local interrupt if more than 2 IRQ
 + * sources per level' errata.
 + */
 +#define LOCAL_TIMER_VECTOR    0xef
 +
 +/*
 + * First APIC vector available to drivers: (vectors 0x30-0xee) we
 + * start at 0x31(0x41) to spread out vectors evenly between priority
 + * levels. (0x80 is the syscall vector)
 + */
 +#ifdef CONFIG_X86_32
 +# define FIRST_DEVICE_VECTOR  0x31
 +#else
 +# define FIRST_DEVICE_VECTOR  (IRQ15_VECTOR + 2)
 +#endif
 +
 +#define NR_VECTORS            256
 +
 +#define FPU_IRQ                       13
 +
 +#define       FIRST_VM86_IRQ          3
 +#define LAST_VM86_IRQ         15
 +#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
 +
 +#if !defined(CONFIG_X86_VOYAGER)
 +
 +# if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) || defined(CONFIG_X86_VISWS)
 +
 +#  define NR_IRQS             224
 +
 +#  if (224 >= 32 * NR_CPUS)
 +#   define NR_IRQ_VECTORS     NR_IRQS
 +#  else
 +#   define NR_IRQ_VECTORS     (32 * NR_CPUS)
 +#  endif
 +
 +# else /* IO_APIC || PARAVIRT */
 +
 +#  define NR_IRQS             16
 +#  define NR_IRQ_VECTORS      NR_IRQS
 +
 +# endif
 +
 +#else /* !VISWS && !VOYAGER */
 +
 +# define NR_IRQS              224
 +# define NR_IRQ_VECTORS               NR_IRQS
 +
 +#endif /* VISWS */
 +
 +/* Voyager specific defines */
 +/* These define the CPIs we use in linux */
 +#define VIC_CPI_LEVEL0                        0
 +#define VIC_CPI_LEVEL1                        1
 +/* now the fake CPIs */
 +#define VIC_TIMER_CPI                 2
 +#define VIC_INVALIDATE_CPI            3
 +#define VIC_RESCHEDULE_CPI            4
 +#define VIC_ENABLE_IRQ_CPI            5
 +#define VIC_CALL_FUNCTION_CPI         6
++#define VIC_CALL_FUNCTION_SINGLE_CPI  7
 +
 +/* Now the QIC CPIs:  Since we don't need the two initial levels,
 + * these are 2 less than the VIC CPIs */
 +#define QIC_CPI_OFFSET                        1
 +#define QIC_TIMER_CPI                 (VIC_TIMER_CPI - QIC_CPI_OFFSET)
 +#define QIC_INVALIDATE_CPI            (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
 +#define QIC_RESCHEDULE_CPI            (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
 +#define QIC_ENABLE_IRQ_CPI            (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
 +#define QIC_CALL_FUNCTION_CPI         (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
++#define QIC_CALL_FUNCTION_SINGLE_CPI  (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
 +
 +#define VIC_START_FAKE_CPI            VIC_TIMER_CPI
++#define VIC_END_FAKE_CPI              VIC_CALL_FUNCTION_SINGLE_CPI
 +
 +/* this is the SYS_INT CPI. */
 +#define VIC_SYS_INT                   8
 +#define VIC_CMN_INT                   15
 +
 +/* This is the boot CPI for alternate processors.  It gets overwritten
 + * by the above once the system has activated all available processors */
 +#define VIC_CPU_BOOT_CPI              VIC_CPI_LEVEL0
 +#define VIC_CPU_BOOT_ERRATA_CPI               (VIC_CPI_LEVEL0 + 8)
 +
 +
 +#endif /* _ASM_IRQ_VECTORS_H */
index 2e221f1ce0b21a4ed5cc8c715332455496f34b6a,e3c24807b59b114928b110373a88e952eb031e2b..c2784b3e0b77e23269a1c61407aaeb2db39cdc9d
@@@ -197,7 -205,7 +202,5 @@@ static inline int hard_smp_processor_id
  extern void cpu_uninit(void);
  #endif
  
- extern void lock_ipi_call_lock(void);
- extern void unlock_ipi_call_lock(void);
 -extern void smp_alloc_memory(void);
  #endif /* __ASSEMBLY__ */
  #endif
diff --cc kernel/Makefile
index f6328e16dfdde5749b05279e503f2172a2831544,9fa57976f252fe5604047f39981b0dbf90be357a..0a7ed838984b034a0fcb843dc765f6941c769e72
@@@ -39,7 -27,8 +39,8 @@@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.
  obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
  obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
  obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
 -obj-$(CONFIG_SMP) += cpu.o spinlock.o
+ obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
 +obj-$(CONFIG_SMP) += spinlock.o
  obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
  obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
  obj-$(CONFIG_UID16) += uid16.o
Simple merge
Simple merge
Simple merge
diff --cc mm/page_alloc.c
Simple merge
diff --cc mm/slab.c
Simple merge
diff --cc mm/slub.c
Simple merge
diff --cc net/iucv/iucv.c
Simple merge