seq_printf(p, "%3d: ", i);
for_each_present_cpu(cpu)
-- - seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
++ + seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-");
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
/* Handle bad interrupts */
static struct irq_desc bad_irq_desc = {
.handle_irq = handle_bad_irq,
-- .lock = SPIN_LOCK_UNLOCKED
++ .lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
};
/*
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/trace.h>
++ #include <asm/pda.h>
static atomic_t irq_err_count;
static spinlock_t irq_controller_lock;
goto skip;
seq_printf(p, "%3d: ", i);
for_each_online_cpu(j)
-- - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
++ + seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
seq_printf(p, " %8s", irq_desc[i].chip->name);
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
seq_putc(p, '\n');
skip:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
-- } else if (i == NR_IRQS)
++ } else if (i == NR_IRQS) {
++ seq_printf(p, "NMI: ");
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", cpu_pda[j].__nmi_count);
++ seq_printf(p, " CORE Non Maskable Interrupt\n");
seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
++ }
return 0;
}
seq_printf(p, "%10u ", kstat_irqs(i));
#else
for_each_online_cpu(j)
-- - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
++ + seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif
seq_printf(p, " %14s", irq_desc[i].chip->name);
+ seq_printf(p, "-%-8s", irq_desc[i].name);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
seq_printf(p, "%10u ", kstat_irqs(i));
#else
for_each_online_cpu(j)
-- - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
++ + seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif
seq_printf(p, " %9s", irq_desc[i].chip->typename);
seq_printf(p, " %s", action->name);
seq_putc(p, '\n');
skip:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
++ } else if (i == NR_IRQS) {
++ seq_printf(p, "NMI: ");
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
++ seq_printf(p, " Non-maskable interrupts\n");
}
return 0;
}
local_irq_restore(flags);
}
-- static void unhandled_perf_irq(struct pt_regs *regs)
-- {
-- unsigned long pcr, pic;
--
-- read_pcr(pcr);
-- read_pic(pic);
--
-- write_pcr(0);
--
-- printk(KERN_EMERG "CPU %d: Got unexpected perf counter IRQ.\n",
-- smp_processor_id());
-- printk(KERN_EMERG "CPU %d: PCR[%016lx] PIC[%016lx]\n",
-- smp_processor_id(), pcr, pic);
-- }
--
-- /* Almost a direct copy of the powerpc PMC code. */
-- static DEFINE_SPINLOCK(perf_irq_lock);
-- static void *perf_irq_owner_caller; /* mostly for debugging */
-- static void (*perf_irq)(struct pt_regs *regs) = unhandled_perf_irq;
--
-- /* Invoked from level 15 PIL handler in trap table. */
-- void perfctr_irq(int irq, struct pt_regs *regs)
-- {
-- clear_softint(1 << irq);
-- perf_irq(regs);
-- }
--
-- int register_perfctr_intr(void (*handler)(struct pt_regs *))
-- {
-- int ret;
--
-- if (!handler)
-- return -EINVAL;
--
-- spin_lock(&perf_irq_lock);
-- if (perf_irq != unhandled_perf_irq) {
-- printk(KERN_WARNING "register_perfctr_intr: "
-- "perf IRQ busy (reserved by caller %p)\n",
-- perf_irq_owner_caller);
-- ret = -EBUSY;
-- goto out;
-- }
--
-- perf_irq_owner_caller = __builtin_return_address(0);
-- perf_irq = handler;
--
-- ret = 0;
-- out:
-- spin_unlock(&perf_irq_lock);
--
-- return ret;
-- }
-- EXPORT_SYMBOL_GPL(register_perfctr_intr);
--
-- void release_perfctr_intr(void (*handler)(struct pt_regs *))
-- {
-- spin_lock(&perf_irq_lock);
-- perf_irq_owner_caller = NULL;
-- perf_irq = unhandled_perf_irq;
-- spin_unlock(&perf_irq_lock);
-- }
-- EXPORT_SYMBOL_GPL(release_perfctr_intr);
--
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
{
#include <linux/clocksource.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
++ +#include <linux/irq.h>
#include <asm/oplib.h>
#include <asm/timer.h>
-- -#include <asm/irq.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/starfire.h>
};
struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations;
+ EXPORT_SYMBOL(tick_ops);
static void stick_disable_irq(void)
{
return ft->clock_tick_ref;
return cpu_data(cpu).clock_tick;
}
+ EXPORT_SYMBOL(sparc64_get_clock_tick);
#ifdef CONFIG_CPU_FREQ
desc->handle_irq = handle_bad_irq;
desc->chip = &no_irq_chip;
desc->name = NULL;
++ + clear_kstat_irqs(desc);
spin_unlock_irqrestore(&desc->lock, flags);
}
desc->chip->mask_ack(irq);
else {
desc->chip->mask(irq);
- -- desc->chip->ack(irq);
+ ++ if (desc->chip->ack)
+ ++ desc->chip->ack(irq);
}
}
out_unlock:
spin_unlock(&desc->lock);
}
++ EXPORT_SYMBOL_GPL(handle_level_irq);
/**
* handle_fasteoi_irq - irq handler for transparent controllers
kstat_incr_irqs_this_cpu(irq, desc);
/* Start handling the irq */
- -- desc->chip->ack(irq);
+ ++ if (desc->chip->ack)
+ ++ desc->chip->ack(irq);
desc = irq_remap_to_desc(irq, desc);
/* Mark the IRQ currently in progress.*/
}
spin_unlock_irqrestore(&desc->lock, flags);
}
++ EXPORT_SYMBOL_GPL(__set_irq_handler);
void
set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
ack_bad_irq(irq);
}
++ #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
++ static void __init init_irq_default_affinity(void)
++ {
++ alloc_bootmem_cpumask_var(&irq_default_affinity);
++ cpumask_setall(irq_default_affinity);
++ }
++ #else
++ static void __init init_irq_default_affinity(void)
++ {
++ }
++ #endif
++
/*
* Linux has a controller-independent interrupt architecture.
* Every controller has a 'controller-template', that is used
void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
{
-- - unsigned long bytes;
-- - char *ptr;
int node;
-- -
-- - /* Compute how many bytes we need per irq and allocate them */
-- - bytes = nr * sizeof(unsigned int);
++ + void *ptr;
node = cpu_to_node(cpu);
-- - ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
-- - printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
++ + ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node);
-- - if (ptr)
-- - desc->kstat_irqs = (unsigned int *)ptr;
++ + /*
++ + * don't overwite if can not get new one
++ + * init_copy_kstat_irqs() could still use old one
++ + */
++ + if (ptr) {
++ + printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n",
++ + cpu, node);
++ + desc->kstat_irqs = ptr;
++ + }
}
static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
int legacy_count;
int i;
++ init_irq_default_affinity();
++
desc = irq_desc_legacy;
legacy_count = ARRAY_SIZE(irq_desc_legacy);
}
};
++ +static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
int __init early_irq_init(void)
{
struct irq_desc *desc;
int count;
int i;
++ init_irq_default_affinity();
++
desc = irq_desc;
count = ARRAY_SIZE(irq_desc);
-- - for (i = 0; i < count; i++)
++ + for (i = 0; i < count; i++) {
desc[i].irq = i;
++ + desc[i].kstat_irqs = kstat_irqs_all[i];
++ + }
return arch_early_irq_init();
}
}
#endif /* !CONFIG_SPARSE_IRQ */
++ +void clear_kstat_irqs(struct irq_desc *desc)
++ +{
++ + memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
++ +}
++ +
/*
* What should we do if we get a hw irq event on an illegal vector?
* Each architecture has to answer this themself.
}
}
-- -#ifdef CONFIG_SPARSE_IRQ
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
struct irq_desc *desc = irq_to_desc(irq);
return desc ? desc->kstat_irqs[cpu] : 0;
}
-- -#endif
EXPORT_SYMBOL(kstat_irqs_cpu);
#include "internals.h"
-- #ifdef CONFIG_SMP
++ #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
cpumask_var_t irq_default_affinity;
-- static int init_irq_default_affinity(void)
-- {
-- alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL);
-- cpumask_setall(irq_default_affinity);
-- return 0;
-- }
-- core_initcall(init_irq_default_affinity);
--
/**
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
* @irq: interrupt number to wait for
/*
* Generic version of the affinity autoselector.
*/
--- int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
+++ static int setup_affinity(unsigned int irq, struct irq_desc *desc)
{
if (!irq_can_set_affinity(irq))
return 0;
return 0;
}
#else
--- static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d)
+++ static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
{
return irq_select_affinity(irq);
}
int ret;
spin_lock_irqsave(&desc->lock, flags);
--- ret = do_irq_select_affinity(irq, desc);
+++ ret = setup_affinity(irq, desc);
spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
#else
--- static inline int do_irq_select_affinity(int irq, struct irq_desc *desc)
+++ static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
{
return 0;
}
desc->status |= IRQ_NO_BALANCING;
/* Set default affinity mask once everything is setup */
--- do_irq_select_affinity(irq, desc);
+++ setup_affinity(irq, desc);
} else if ((new->flags & IRQF_TRIGGER_MASK)
&& (new->flags & IRQF_TRIGGER_MASK)
if (!handler)
return -EINVAL;
- -- action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
+ ++ action = kmalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!action)
return -ENOMEM;
struct irq_desc *desc,
int cpu, int nr)
{
-- - unsigned long bytes;
-- -
init_kstat_irqs(desc, cpu, nr);
-- - if (desc->kstat_irqs != old_desc->kstat_irqs) {
-- - /* Compute how many bytes we need per irq and allocate them */
-- - bytes = nr * sizeof(unsigned int);
-- -
-- - memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes);
-- - }
++ + if (desc->kstat_irqs != old_desc->kstat_irqs)
++ + memcpy(desc->kstat_irqs, old_desc->kstat_irqs,
++ + nr * sizeof(*desc->kstat_irqs));
}
static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
desc = irq_desc_ptrs[irq];
if (desc && old_desc != desc)
-- goto out_unlock;
++ goto out_unlock;
node = cpu_to_node(cpu);
desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
init_copy_one_irq_desc(irq, old_desc, desc, cpu);
irq_desc_ptrs[irq] = desc;
++ spin_unlock_irqrestore(&sparse_irq_lock, flags);
/* free the old one */
free_one_irq_desc(old_desc, desc);
++ spin_unlock(&old_desc->lock);
kfree(old_desc);
++ spin_lock(&desc->lock);
++
++ return desc;
out_unlock:
spin_unlock_irqrestore(&sparse_irq_lock, flags);