VERSION = 2
PATCHLEVEL = 6
-SUBLEVEL = 38
-EXTRAVERSION =
+SUBLEVEL = 39
+EXTRAVERSION = -rc1
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
select AUTO_IRQ_AFFINITY if SMP
- select GENERIC_HARDIRQS_NO_DEPRECATED
+ select GENERIC_IRQ_SHOW
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
}
#endif /* CONFIG_SMP */
-int
-show_interrupts(struct seq_file *p, void *v)
+int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
- int irq = *(loff_t *) v;
- struct irqaction * action;
- struct irq_desc *desc;
- unsigned long flags;
#ifdef CONFIG_SMP
- if (irq == 0) {
- seq_puts(p, " ");
- for_each_online_cpu(j)
- seq_printf(p, "CPU%d ", j);
- seq_putc(p, '\n');
- }
-#endif
-
- if (irq < ACTUAL_NR_IRQS) {
- desc = irq_to_desc(irq);
-
- if (!desc)
- return 0;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- action = desc->action;
- if (!action)
- goto unlock;
- seq_printf(p, "%3d: ", irq);
-#ifndef CONFIG_SMP
- seq_printf(p, "%10u ", kstat_irqs(irq));
-#else
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j));
+ seq_puts(p, "IPI: ");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10lu ", cpu_data[j].ipi_count);
+ seq_putc(p, '\n');
#endif
- seq_printf(p, " %14s", get_irq_desc_chip(desc)->name);
- seq_printf(p, " %c%s",
- (action->flags & IRQF_DISABLED)?'+':' ',
- action->name);
-
- for (action=action->next; action; action = action->next) {
- seq_printf(p, ", %c%s",
- (action->flags & IRQF_DISABLED)?'+':' ',
- action->name);
- }
-
- seq_putc(p, '\n');
-unlock:
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- } else if (irq == ACTUAL_NR_IRQS) {
-#ifdef CONFIG_SMP
- seq_puts(p, "IPI: ");
- for_each_online_cpu(j)
- seq_printf(p, "%10lu ", cpu_data[j].ipi_count);
- seq_putc(p, '\n');
-#endif
- seq_puts(p, "PMI: ");
- for_each_online_cpu(j)
- seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j));
- seq_puts(p, " Performance Monitoring\n");
- seq_printf(p, "ERR: %10lu\n", irq_err_count);
- }
+ seq_puts(p, "PMI: ");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j));
+ seq_puts(p, " Performance Monitoring\n");
+ seq_printf(p, "ERR: %10lu\n", irq_err_count);
return 0;
}
void __init
init_rtc_irq(void)
{
- set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
+ irq_set_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
handle_simple_irq, "RTC");
setup_irq(RTC_IRQ, &timer_irqaction);
}
outb(0xff, 0xA1); /* mask all of 8259A-2 */
for (i = 0; i < 16; i++) {
- set_irq_chip_and_handler(i, &i8259a_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &i8259a_irq_type, handle_level_irq);
}
setup_irq(2, &cascade);
for (i = 16; i < 48; ++i) {
if ((ignore_mask >> i) & 1)
continue;
- set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
for (i = 16; i < max; ++i) {
if (i < 64 && ((ignore_mask >> i) & 1))
continue;
- set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &srm_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
on while IRQ probing. */
if (i >= 16+20 && i <= 16+30)
continue;
- set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq;
outb(0xff, 0x806);
for (i = 16; i < 35; ++i) {
- set_irq_chip_and_handler(i, &cabriolet_irq_type,
- handle_level_irq);
+ irq_set_chip_and_handler(i, &cabriolet_irq_type,
+ handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
{
long i;
for (i = imin; i <= imax; ++i) {
- set_irq_chip_and_handler(i, ops, handle_level_irq);
+ irq_set_chip_and_handler(i, ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
init_i8259a_irqs();
for (i = 16; i < 32; ++i) {
- set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
init_i8259a_irqs();
for (i = 16; i < 128; ++i) {
- set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
{
init_i8259a_irqs();
- set_irq_chip_and_handler(1, &jensen_local_irq_type, handle_level_irq);
- set_irq_chip_and_handler(4, &jensen_local_irq_type, handle_level_irq);
- set_irq_chip_and_handler(3, &jensen_local_irq_type, handle_level_irq);
- set_irq_chip_and_handler(7, &jensen_local_irq_type, handle_level_irq);
- set_irq_chip_and_handler(9, &jensen_local_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(1, &jensen_local_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(4, &jensen_local_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(3, &jensen_local_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(7, &jensen_local_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(9, &jensen_local_irq_type, handle_level_irq);
common_init_isa_dma();
}
/* Set up the lsi irqs. */
for (i = 0; i < 128; ++i) {
- set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq);
+ irq_set_chip_and_handler(base + i, lsi_ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
/* Set up the msi irqs. */
for (i = 128; i < (128 + 512); ++i) {
- set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq);
+ irq_set_chip_and_handler(base + i, msi_ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
/* Reserve the legacy irqs. */
for (i = 0; i < 16; ++i) {
- set_irq_chip_and_handler(i, &marvel_legacy_irq_type,
- handle_level_irq);
+ irq_set_chip_and_handler(i, &marvel_legacy_irq_type,
+ handle_level_irq);
}
/* Init the io7 irqs. */
mikasa_update_irq_hw(0);
for (i = 16; i < 32; ++i) {
- set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &mikasa_irq_type,
+ handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
outw(0, 0x54c);
for (i = 16; i < 48; ++i) {
- set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &noritake_irq_type,
+ handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
for (i = 16; i < 128; ++i) {
- set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &rawhide_irq_type,
+ handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
rx164_update_irq_hw(0);
for (i = 16; i < 40; ++i) {
- set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
long i;
for (i = 0; i < nr_of_irqs; ++i) {
- set_irq_chip_and_handler(i, &sable_lynx_irq_type,
- handle_level_irq);
+ irq_set_chip_and_handler(i, &sable_lynx_irq_type,
+ handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
takara_update_irq_hw(i, -1);
for (i = 16; i < 128; ++i) {
- set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &takara_irq_type,
+ handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
{
long i;
for (i = imin; i <= imax; ++i) {
- set_irq_chip_and_handler(i, ops, handle_level_irq);
+ irq_set_chip_and_handler(i, ops, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
}
for (i = 0; i < 16; ++i) {
if (i == 2)
continue;
- set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
- handle_level_irq);
+ irq_set_chip_and_handler(i + irq_bias, &wildfire_irq_type,
+ handle_level_irq);
irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
}
- set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type,
- handle_level_irq);
+ irq_set_chip_and_handler(36 + irq_bias, &wildfire_irq_type,
+ handle_level_irq);
irq_set_status_flags(36 + irq_bias, IRQ_LEVEL);
for (i = 40; i < 64; ++i) {
- set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
- handle_level_irq);
+ irq_set_chip_and_handler(i + irq_bias, &wildfire_irq_type,
+ handle_level_irq);
irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
}
select HAVE_C_RECORDMCOUNT
select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ
+ select GENERIC_IRQ_SHOW
help
The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and
select GENERIC_CLOCKEVENTS
select ARCH_REQUIRE_GPIOLIB
select CLKDEV_LOOKUP
+ select HAVE_SCHED_CLOCK
help
Support for Freescale MXC/iMX-based family of processors
#if defined(CONFIG_DEBUG_ICEDCC)
-#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
.macro loadsp, rb, tmp
.endm
.macro writeb, ch, rb
mcr p14, 0, \ch, c0, c5, 0
.endm
-#elif defined(CONFIG_CPU_V7)
- .macro loadsp, rb, tmp
- .endm
- .macro writeb, ch, rb
-wait: mrc p14, 0, pc, c0, c1, 0
- bcs wait
- mcr p14, 0, \ch, c0, c5, 0
- .endm
#elif defined(CONFIG_CPU_XSCALE)
.macro loadsp, rb, tmp
.endm
#ifdef CONFIG_DEBUG_ICEDCC
-#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
static void icedcc_putc(int ch)
{
asm("mcr p14, 0, %0, c0, c5, 0" : : "r" (ch));
}
-#elif defined(CONFIG_CPU_V7)
-
-static void icedcc_putc(int ch)
-{
- asm(
- "wait: mrc p14, 0, pc, c0, c1, 0 \n\
- bcs wait \n\
- mcr p14, 0, %0, c0, c5, 0 "
- : : "r" (ch));
-}
#elif defined(CONFIG_CPU_XSCALE)
static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
{
- struct gic_chip_data *chip_data = get_irq_data(irq);
- struct irq_chip *chip = get_irq_chip(irq);
+ struct gic_chip_data *chip_data = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_get_chip(irq);
unsigned int cascade_irq, gic_irq;
unsigned long status;
{
if (gic_nr >= MAX_GIC_NR)
BUG();
- if (set_irq_data(irq, &gic_data[gic_nr]) != 0)
+ if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
BUG();
- set_irq_chained_handler(irq, gic_handle_cascade_irq);
+ irq_set_chained_handler(irq, gic_handle_cascade_irq);
}
static void __init gic_dist_init(struct gic_chip_data *gic,
* Setup the Linux IRQ subsystem.
*/
for (i = irq_start; i < irq_limit; i++) {
- set_irq_chip(i, &gic_chip);
- set_irq_chip_data(i, gic);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip_and_handler(i, &gic_chip, handle_level_irq);
+ irq_set_chip_data(i, gic);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
unsigned long flags;
local_irq_save(flags);
- irq_to_desc(irq)->status |= IRQ_NOPROBE;
+ irq_set_status_flags(irq, IRQ_NOPROBE);
gic_unmask_irq(irq_get_irq_data(irq));
local_irq_restore(flags);
}
__raw_writel((0), IT8152_INTC_LDCNIRR);
for (irq = IT8152_IRQ(0); irq <= IT8152_LAST_IRQ; irq++) {
- set_irq_chip(irq, &it8152_irq_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &it8152_irq_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
static void locomo_handler(unsigned int irq, struct irq_desc *desc)
{
- struct locomo *lchip = get_irq_chip_data(irq);
+ struct locomo *lchip = irq_get_chip_data(irq);
int req, i;
/* Acknowledge the parent IRQ */
/*
* Install handler for IRQ_LOCOMO_HW.
*/
- set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING);
- set_irq_chip_data(lchip->irq, lchip);
- set_irq_chained_handler(lchip->irq, locomo_handler);
+ irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING);
+ irq_set_chip_data(lchip->irq, lchip);
+ irq_set_chained_handler(lchip->irq, locomo_handler);
/* Install handlers for IRQ_LOCOMO_* */
for ( ; irq <= lchip->irq_base + 3; irq++) {
- set_irq_chip(irq, &locomo_chip);
- set_irq_chip_data(irq, lchip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &locomo_chip, handle_level_irq);
+ irq_set_chip_data(irq, lchip);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
device_for_each_child(lchip->dev, NULL, locomo_remove_child);
if (lchip->irq != NO_IRQ) {
- set_irq_chained_handler(lchip->irq, NULL);
- set_irq_data(lchip->irq, NULL);
+ irq_set_chained_handler(lchip->irq, NULL);
+ irq_set_handler_data(lchip->irq, NULL);
}
iounmap(lchip->base);
sa1111_irq_handler(unsigned int irq, struct irq_desc *desc)
{
unsigned int stat0, stat1, i;
- struct sa1111 *sachip = get_irq_data(irq);
+ struct sa1111 *sachip = irq_get_handler_data(irq);
void __iomem *mapbase = sachip->base + SA1111_INTC;
stat0 = sa1111_readl(mapbase + SA1111_INTSTATCLR0);
sa1111_writel(~0, irqbase + SA1111_INTSTATCLR1);
for (irq = IRQ_GPAIN0; irq <= SSPROR; irq++) {
- set_irq_chip(irq, &sa1111_low_chip);
- set_irq_chip_data(irq, sachip);
- set_irq_handler(irq, handle_edge_irq);
+ irq_set_chip_and_handler(irq, &sa1111_low_chip,
+ handle_edge_irq);
+ irq_set_chip_data(irq, sachip);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
for (irq = AUDXMTDMADONEA; irq <= IRQ_S1_BVD1_STSCHG; irq++) {
- set_irq_chip(irq, &sa1111_high_chip);
- set_irq_chip_data(irq, sachip);
- set_irq_handler(irq, handle_edge_irq);
+ irq_set_chip_and_handler(irq, &sa1111_high_chip,
+ handle_edge_irq);
+ irq_set_chip_data(irq, sachip);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
/*
* Register SA1111 interrupt
*/
- set_irq_type(sachip->irq, IRQ_TYPE_EDGE_RISING);
- set_irq_data(sachip->irq, sachip);
- set_irq_chained_handler(sachip->irq, sa1111_irq_handler);
+ irq_set_irq_type(sachip->irq, IRQ_TYPE_EDGE_RISING);
+ irq_set_handler_data(sachip->irq, sachip);
+ irq_set_chained_handler(sachip->irq, sa1111_irq_handler);
}
/*
clk_disable(sachip->clk);
if (sachip->irq != NO_IRQ) {
- set_irq_chained_handler(sachip->irq, NULL);
- set_irq_data(sachip->irq, NULL);
+ irq_set_chained_handler(sachip->irq, NULL);
+ irq_set_handler_data(sachip->irq, NULL);
release_mem_region(sachip->phys + SA1111_INTC, 512);
}
if (vic_sources & (1 << i)) {
unsigned int irq = irq_start + i;
- set_irq_chip(irq, &vic_chip);
- set_irq_chip_data(irq, base);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &vic_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, base);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
irq_err_count++;
}
-/*
- * Obsolete inline function for calling irq descriptor handlers.
- */
-static inline void desc_handle_irq(unsigned int irq, struct irq_desc *desc)
-{
- desc->handle_irq(irq, desc);
-}
-
void set_irq_flags(unsigned int irq, unsigned int flags);
#define IRQF_VALID (1 << 0)
* VBUS IRQ and omit the methods above. Store the GPIO number
* here. Note that sometimes the signals go through inverters...
*/
- bool gpio_vbus_inverted;
- int gpio_vbus; /* high == vbus present */
bool gpio_pullup_inverted;
int gpio_pullup; /* high == pullup activated */
};
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_dec21285);
-/*
- * Same as above. The PrPMC800 carrier board for the PrPMC1100
- * card maps the host-bridge @ 00:01:00 for some reason and it
- * ends up getting scanned. Note that we only want to do this
- * fixup when we find the IXP4xx on a PrPMC system, which is why
- * we check the machine type. We could be running on a board
- * with an IXP4xx target device and we don't want to kill the
- * resources in that case.
- */
-static void __devinit pci_fixup_prpmc1100(struct pci_dev *dev)
-{
- int i;
-
- if (machine_is_prpmc1100()) {
- dev->class &= 0xff;
- dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- dev->resource[i].start = 0;
- dev->resource[i].end = 0;
- dev->resource[i].flags = 0;
- }
- }
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IXP4XX, pci_fixup_prpmc1100);
-
/*
* PCI IDE controllers use non-standard I/O port decoding, respect it.
*/
.macro addruart, rp, rv
.endm
-#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
.macro senduart, rd, rx
mcr p14, 0, \rd, c0, c5, 0
1002:
.endm
-#elif defined(CONFIG_CPU_V7)
-
- .macro senduart, rd, rx
- mcr p14, 0, \rd, c0, c5, 0
- .endm
-
- .macro busyuart, rd, rx
-busy: mrc p14, 0, pc, c0, c1, 0
- bcs busy
- .endm
-
- .macro waituart, rd, rx
-wait: mrc p14, 0, pc, c0, c1, 0
- bcs wait
-
- .endm
-
#elif defined(CONFIG_CPU_XSCALE)
.macro senduart, rd, rx
*/
if (slot < 8) {
ec->irq = 32 + slot;
- set_irq_chip(ec->irq, &ecard_chip);
- set_irq_handler(ec->irq, handle_level_irq);
+ irq_set_chip_and_handler(ec->irq, &ecard_chip,
+ handle_level_irq);
set_irq_flags(ec->irq, IRQF_VALID);
}
irqhw = ecard_probeirqhw();
- set_irq_chained_handler(IRQ_EXPANSIONCARD,
+ irq_set_chained_handler(IRQ_EXPANSIONCARD,
irqhw ? ecard_irqexp_handler : ecard_irq_handler);
ecard_proc_init();
.fops = &etb_fops,
};
-static int __init etb_probe(struct amba_device *dev, const struct amba_id *id)
+static int __devinit etb_probe(struct amba_device *dev, const struct amba_id *id)
{
struct tracectx *t = &tracer;
int ret = 0;
static struct kobj_attribute trace_mode_attr =
__ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
-static int __init etm_probe(struct amba_device *dev, const struct amba_id *id)
+static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id)
{
struct tracectx *t = &tracer;
int ret = 0;
unsigned long irq_err_count;
-int show_interrupts(struct seq_file *p, void *v)
+int arch_show_interrupts(struct seq_file *p, int prec)
{
- int i = *(loff_t *) v, cpu;
- struct irq_desc *desc;
- struct irqaction * action;
- unsigned long flags;
- int prec, n;
-
- for (prec = 3, n = 1000; prec < 10 && n <= nr_irqs; prec++)
- n *= 10;
-
-#ifdef CONFIG_SMP
- if (prec < 4)
- prec = 4;
-#endif
-
- if (i == 0) {
- char cpuname[12];
-
- seq_printf(p, "%*s ", prec, "");
- for_each_present_cpu(cpu) {
- sprintf(cpuname, "CPU%d", cpu);
- seq_printf(p, " %10s", cpuname);
- }
- seq_putc(p, '\n');
- }
-
- if (i < nr_irqs) {
- desc = irq_to_desc(i);
- raw_spin_lock_irqsave(&desc->lock, flags);
- action = desc->action;
- if (!action)
- goto unlock;
-
- seq_printf(p, "%*d: ", prec, i);
- for_each_present_cpu(cpu)
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
- seq_printf(p, " %10s", desc->irq_data.chip->name ? : "-");
- seq_printf(p, " %s", action->name);
- for (action = action->next; action; action = action->next)
- seq_printf(p, ", %s", action->name);
-
- seq_putc(p, '\n');
-unlock:
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- } else if (i == nr_irqs) {
#ifdef CONFIG_FIQ
- show_fiq_list(p, prec);
+ show_fiq_list(p, prec);
#endif
#ifdef CONFIG_SMP
- show_ipi_list(p, prec);
+ show_ipi_list(p, prec);
#endif
#ifdef CONFIG_LOCAL_TIMERS
- show_local_irqs(p, prec);
+ show_local_irqs(p, prec);
#endif
- seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
- }
+ seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
return 0;
}
void set_irq_flags(unsigned int irq, unsigned int iflags)
{
- struct irq_desc *desc;
- unsigned long flags;
+ unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
if (irq >= nr_irqs) {
printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
return;
}
- desc = irq_to_desc(irq);
- raw_spin_lock_irqsave(&desc->lock, flags);
- desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
if (iflags & IRQF_VALID)
- desc->status &= ~IRQ_NOREQUEST;
+ clr |= IRQ_NOREQUEST;
if (iflags & IRQF_PROBE)
- desc->status &= ~IRQ_NOPROBE;
+ clr |= IRQ_NOPROBE;
if (!(iflags & IRQF_NOAUTOEN))
- desc->status &= ~IRQ_NOAUTOEN;
- raw_spin_unlock_irqrestore(&desc->lock, flags);
+ clr |= IRQ_NOAUTOEN;
+ /* Order is clear bits in "clr" then set bits in "set" */
+ irq_modify_status(irq, clr, set & ~clr);
}
void __init init_IRQ(void)
long cpsr = regs->ARM_cpsr;
fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn);
- regs->uregs[rn] = fnr.r0; /* Save Rn in case of writeback. */
+ if (rn != 15)
+ regs->uregs[rn] = fnr.r0; /* Save Rn in case of writeback. */
rdv = fnr.r1;
if (rd == 15) {
long rdv = (rd == 15) ? iaddr + str_pc_offset : regs->uregs[rd];
long rnv = (rn == 15) ? iaddr + 8 : regs->uregs[rn];
long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */
+ long rnv_wb;
- /* Save Rn in case of writeback. */
- regs->uregs[rn] =
- insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn);
+ rnv_wb = insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn);
+ if (rn != 15)
+ regs->uregs[rn] = rnv_wb; /* Save Rn in case of writeback. */
}
static void __kprobes emulate_mrrc(struct kprobe *p, struct pt_regs *regs)
void (*write_counter)(int idx, u32 val);
void (*start)(void);
void (*stop)(void);
+ void (*reset)(void *);
const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
static u64
armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
- int idx)
+ int idx, int overflow)
{
- int shift = 64 - 32;
- s64 prev_raw_count, new_raw_count;
- u64 delta;
+ u64 delta, prev_raw_count, new_raw_count;
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count) != prev_raw_count)
goto again;
- delta = (new_raw_count << shift) - (prev_raw_count << shift);
- delta >>= shift;
+ new_raw_count &= armpmu->max_period;
+ prev_raw_count &= armpmu->max_period;
+
+ if (overflow)
+ delta = armpmu->max_period - prev_raw_count + new_raw_count;
+ else
+ delta = new_raw_count - prev_raw_count;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
if (hwc->idx < 0)
return;
- armpmu_event_update(event, hwc, hwc->idx);
+ armpmu_event_update(event, hwc, hwc->idx, 0);
}
static void
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(hwc, hwc->idx);
barrier(); /* why? */
- armpmu_event_update(event, hwc, hwc->idx);
+ armpmu_event_update(event, hwc, hwc->idx, 0);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
#include "perf_event_v6.c"
#include "perf_event_v7.c"
+/*
+ * Ensure the PMU has sane values out of reset.
+ * This requires SMP to be available, so exists as a separate initcall.
+ */
+static int __init
+armpmu_reset(void)
+{
+ if (armpmu && armpmu->reset)
+ return on_each_cpu(armpmu->reset, NULL, 1);
+ return 0;
+}
+arch_initcall(armpmu_reset);
+
static int __init
init_hw_perf_events(void)
{
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx);
+ armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
static inline void armv7_pmnc_write(unsigned long val)
{
val &= ARMV7_PMNC_MASK;
+ isb();
asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
}
val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
+ isb();
return idx;
}
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx);
+ armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
}
}
+static void armv7pmu_reset(void *info)
+{
+ u32 idx, nb_cnt = armpmu->num_events;
+
+ /* The counter and interrupt enable registers are unknown at reset. */
+ for (idx = 1; idx < nb_cnt; ++idx)
+ armv7pmu_disable_event(NULL, idx);
+
+ /* Initialize & Reset PMNC: C and P bits */
+ armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
+}
+
static struct arm_pmu armv7pmu = {
.handle_irq = armv7pmu_handle_irq,
.enable = armv7pmu_enable_event,
.get_event_idx = armv7pmu_get_event_idx,
.start = armv7pmu_start,
.stop = armv7pmu_stop,
+ .reset = armv7pmu_reset,
.raw_event_mask = 0xFF,
.max_period = (1LLU << 32) - 1,
};
-static u32 __init armv7_reset_read_pmnc(void)
+static u32 __init armv7_read_num_pmnc_events(void)
{
u32 nb_cnt;
- /* Initialize & Reset PMNC: C and P bits */
- armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
-
/* Read the nb of CNTx counters supported from PMNC */
nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
armv7pmu.name = "ARMv7 Cortex-A8";
armv7pmu.cache_map = &armv7_a8_perf_cache_map;
armv7pmu.event_map = &armv7_a8_perf_map;
- armv7pmu.num_events = armv7_reset_read_pmnc();
+ armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu;
}
armv7pmu.name = "ARMv7 Cortex-A9";
armv7pmu.cache_map = &armv7_a9_perf_cache_map;
armv7pmu.event_map = &armv7_a9_perf_map;
- armv7pmu.num_events = armv7_reset_read_pmnc();
+ armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu;
}
#else
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx);
+ armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx);
+ armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
#else
ldr r0, sleep_save_sp @ stack phys addr
#endif
- msr cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
+ setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off
#ifdef MULTI_CPU
- ldmia r0!, {r1, sp, lr, pc} @ load v:p, stack, return fn, resume fn
+ @ load v:p, stack, return fn, resume fn
+ ARM( ldmia r0!, {r1, sp, lr, pc} )
+THUMB( ldmia r0!, {r1, r2, r3, r4} )
+THUMB( mov sp, r2 )
+THUMB( mov lr, r3 )
+THUMB( bx r4 )
#else
- ldmia r0!, {r1, sp, lr} @ load v:p, stack, return fn
+ @ load v:p, stack, return fn
+ ARM( ldmia r0!, {r1, sp, lr} )
+THUMB( ldmia r0!, {r1, r2, lr} )
+THUMB( mov sp, r2 )
b cpu_do_resume
#endif
ENDPROC(cpu_resume)
return;
if (cpu_is_at91cap9_revB())
- set_irq_type(AT91CAP9_ID_UHP, IRQ_TYPE_LEVEL_HIGH);
+ irq_set_irq_type(AT91CAP9_ID_UHP, IRQ_TYPE_LEVEL_HIGH);
/* Enable VBus control for UHP ports */
for (i = 0; i < data->ports; i++) {
void __init at91_add_device_usba(struct usba_platform_data *data)
{
if (cpu_is_at91cap9_revB()) {
- set_irq_type(AT91CAP9_ID_UDPHS, IRQ_TYPE_LEVEL_HIGH);
+ irq_set_irq_type(AT91CAP9_ID_UDPHS, IRQ_TYPE_LEVEL_HIGH);
at91_sys_write(AT91_MATRIX_UDPHS, AT91_MATRIX_SELECT_UDPHS |
AT91_MATRIX_UDPHS_BYPASS_LOCK);
}
return;
if (cpu_is_at91cap9_revB())
- set_irq_type(AT91CAP9_ID_LCDC, IRQ_TYPE_LEVEL_HIGH);
+ irq_set_irq_type(AT91CAP9_ID_LCDC, IRQ_TYPE_LEVEL_HIGH);
at91_set_A_periph(AT91_PIN_PC1, 0); /* LCDHSYNC */
at91_set_A_periph(AT91_PIN_PC2, 0); /* LCDDOTCK */
else
wakeups[bank] &= ~mask;
- set_irq_wake(gpio_chip[bank].bank->id, state);
+ irq_set_irq_wake(gpio_chip[bank].bank->id, state);
return 0;
}
static struct irq_chip gpio_irqchip = {
.name = "GPIO",
+ .irq_disable = gpio_irq_mask,
.irq_mask = gpio_irq_mask,
.irq_unmask = gpio_irq_unmask,
.irq_set_type = gpio_irq_type,
static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
unsigned pin;
- struct irq_desc *gpio;
- struct at91_gpio_chip *at91_gpio;
- void __iomem *pio;
+ struct irq_data *idata = irq_desc_get_irq_data(desc);
+ struct irq_chip *chip = irq_data_get_irq_chip(idata);
+ struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(idata);
+ void __iomem *pio = at91_gpio->regbase;
u32 isr;
- at91_gpio = get_irq_chip_data(irq);
- pio = at91_gpio->regbase;
-
/* temporarily mask (level sensitive) parent IRQ */
- desc->irq_data.chip->irq_ack(&desc->irq_data);
+ chip->irq_ack(idata);
for (;;) {
/* Reading ISR acks pending (edge triggered) GPIO interrupts.
* When there none are pending, we're finished unless we need
}
pin = at91_gpio->chip.base;
- gpio = &irq_desc[pin];
while (isr) {
- if (isr & 1) {
- if (unlikely(gpio->depth)) {
- /*
- * The core ARM interrupt handler lazily disables IRQs so
- * another IRQ must be generated before it actually gets
- * here to be disabled on the GPIO controller.
- */
- gpio_irq_mask(irq_get_irq_data(pin));
- }
- else
- generic_handle_irq(pin);
- }
+ if (isr & 1)
+ generic_handle_irq(pin);
pin++;
- gpio++;
isr >>= 1;
}
}
- desc->irq_data.chip->irq_unmask(&desc->irq_data);
+ chip->irq_unmask(idata);
/* now it may re-trigger */
}
__raw_writel(~0, this->regbase + PIO_IDR);
for (i = 0, pin = this->chip.base; i < 32; i++, pin++) {
- lockdep_set_class(&irq_desc[pin].lock, &gpio_lock_class);
+ irq_set_lockdep_class(pin, &gpio_lock_class);
/*
* Can use the "simple" and not "edge" handler since it's
* shorter, and the AIC handles interrupts sanely.
*/
- set_irq_chip(pin, &gpio_irqchip);
- set_irq_handler(pin, handle_simple_irq);
+ irq_set_chip_and_handler(pin, &gpio_irqchip,
+ handle_simple_irq);
set_irq_flags(pin, IRQF_VALID);
}
if (prev && prev->next == this)
continue;
- set_irq_chip_data(id, this);
- set_irq_chained_handler(id, gpio_irq_handler);
+ irq_set_chip_data(id, this);
+ irq_set_chained_handler(id, gpio_irq_handler);
}
pr_info("AT91: %d gpio irqs in %d banks\n", pin - PIN_BASE, gpio_banks);
}
/*
* System Peripherals (offset from AT91_BASE_SYS)
*/
-#define AT91_SDRAMC (0xffffea00 - AT91_BASE_SYS)
+#define AT91_SDRAMC0 (0xffffea00 - AT91_BASE_SYS)
#define AT91_SMC (0xffffec00 - AT91_BASE_SYS)
#define AT91_MATRIX (0xffffee00 - AT91_BASE_SYS)
#define AT91_AIC (0xfffff000 - AT91_BASE_SYS)
/* Active Low interrupt, with the specified priority */
at91_sys_write(AT91_AIC_SMR(i), AT91_AIC_SRCTYPE_LOW | priority[i]);
- set_irq_chip(i, &at91_aic_chip);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip_and_handler(i, &at91_aic_chip, handle_level_irq);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
/* Perform 8 End Of Interrupt Command to make sure AIC will not Lock out nIRQ */
unsigned int i;
for (i = 0; i < 32; i++) {
unsigned int irq = irq_start + i;
- set_irq_chip(irq, chip);
- set_irq_chip_data(irq, base);
+ irq_set_chip(irq, chip);
+ irq_set_chip_data(irq, base);
if (vic_sources & (1 << i)) {
- set_irq_handler(irq, handle_level_irq);
+ irq_set_handler(irq, handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
/* special cases */
if (INTCHW_INTC1_GPIO0 & IRQ_INTC1_VALID_MASK) {
- set_irq_handler(IRQ_GPIO0, handle_simple_irq);
+ irq_set_handler(IRQ_GPIO0, handle_simple_irq);
}
if (INTCHW_INTC1_GPIO1 & IRQ_INTC1_VALID_MASK) {
- set_irq_handler(IRQ_GPIO1, handle_simple_irq);
+ irq_set_handler(IRQ_GPIO1, handle_simple_irq);
}
}
for (i = 0; i < NR_IRQS; i++) {
if (INT1_IRQS & (1 << i)) {
- set_irq_handler(i, handle_level_irq);
- set_irq_chip(i, &int1_chip);
+ irq_set_chip_and_handler(i, &int1_chip,
+ handle_level_irq);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
if (INT2_IRQS & (1 << i)) {
- set_irq_handler(i, handle_level_irq);
- set_irq_chip(i, &int2_chip);
+ irq_set_chip_and_handler(i, &int2_chip,
+ handle_level_irq);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
}
/* Set up genirq dispatching for cp_intc */
for (i = 0; i < num_irq; i++) {
- set_irq_chip(i, &cp_intc_irq_chip);
+ irq_set_chip(i, &cp_intc_irq_chip);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
- set_irq_handler(i, handle_edge_irq);
+ irq_set_handler(i, handle_edge_irq);
}
/* Enable global interrupt */
{
struct davinci_gpio_regs __iomem *g;
- g = (__force struct davinci_gpio_regs __iomem *)get_irq_chip_data(irq);
+ g = (__force struct davinci_gpio_regs __iomem *)irq_get_chip_data(irq);
return g;
}
static void gpio_irq_disable(struct irq_data *d)
{
struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
- u32 mask = (u32) irq_data_get_irq_data(d);
+ u32 mask = (u32) irq_data_get_irq_handler_data(d);
__raw_writel(mask, &g->clr_falling);
__raw_writel(mask, &g->clr_rising);
static void gpio_irq_enable(struct irq_data *d)
{
struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
- u32 mask = (u32) irq_data_get_irq_data(d);
- unsigned status = irq_desc[d->irq].status;
+ u32 mask = (u32) irq_data_get_irq_handler_data(d);
+ unsigned status = irqd_get_trigger_type(d);
status &= IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING;
if (!status)
static int gpio_irq_type(struct irq_data *d, unsigned trigger)
{
struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
- u32 mask = (u32) irq_data_get_irq_data(d);
+ u32 mask = (u32) irq_data_get_irq_handler_data(d);
if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
return -EINVAL;
- irq_desc[d->irq].status &= ~IRQ_TYPE_SENSE_MASK;
- irq_desc[d->irq].status |= trigger;
-
- /* don't enable the IRQ if it's currently disabled */
- if (irq_desc[d->irq].depth == 0) {
- __raw_writel(mask, (trigger & IRQ_TYPE_EDGE_FALLING)
- ? &g->set_falling : &g->clr_falling);
- __raw_writel(mask, (trigger & IRQ_TYPE_EDGE_RISING)
- ? &g->set_rising : &g->clr_rising);
- }
return 0;
}
.irq_enable = gpio_irq_enable,
.irq_disable = gpio_irq_disable,
.irq_set_type = gpio_irq_type,
+ .flags = IRQCHIP_SET_TYPE_MASKED,
};
static void
status >>= 16;
/* now demux them to the right lowlevel handler */
- n = (int)get_irq_data(irq);
+ n = (int)irq_get_handler_data(irq);
while (status) {
res = ffs(status);
n += res;
static int gpio_irq_type_unbanked(struct irq_data *d, unsigned trigger)
{
struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
- u32 mask = (u32) irq_data_get_irq_data(d);
+ u32 mask = (u32) irq_data_get_irq_handler_data(d);
if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
return -EINVAL;
/* AINTC handles mask/unmask; GPIO handles triggering */
irq = bank_irq;
- gpio_irqchip_unbanked = *get_irq_desc_chip(irq_to_desc(irq));
+ gpio_irqchip_unbanked = *irq_get_chip(irq);
gpio_irqchip_unbanked.name = "GPIO-AINTC";
gpio_irqchip_unbanked.irq_set_type = gpio_irq_type_unbanked;
/* set the direct IRQs up to use that irqchip */
for (gpio = 0; gpio < soc_info->gpio_unbanked; gpio++, irq++) {
- set_irq_chip(irq, &gpio_irqchip_unbanked);
- set_irq_data(irq, (void *) __gpio_mask(gpio));
- set_irq_chip_data(irq, (__force void *) g);
- irq_desc[irq].status |= IRQ_TYPE_EDGE_BOTH;
+ irq_set_chip(irq, &gpio_irqchip_unbanked);
+ irq_set_handler_data(irq, (void *)__gpio_mask(gpio));
+ irq_set_chip_data(irq, (__force void *)g);
+ irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH);
}
goto done;
__raw_writel(~0, &g->clr_rising);
/* set up all irqs in this bank */
- set_irq_chained_handler(bank_irq, gpio_irq_handler);
- set_irq_chip_data(bank_irq, (__force void *) g);
- set_irq_data(bank_irq, (void *) irq);
+ irq_set_chained_handler(bank_irq, gpio_irq_handler);
+ irq_set_chip_data(bank_irq, (__force void *)g);
+ irq_set_handler_data(bank_irq, (void *)irq);
for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
- set_irq_chip(irq, &gpio_irqchip);
- set_irq_chip_data(irq, (__force void *) g);
- set_irq_data(irq, (void *) __gpio_mask(gpio));
- set_irq_handler(irq, handle_simple_irq);
+ irq_set_chip(irq, &gpio_irqchip);
+ irq_set_chip_data(irq, (__force void *)g);
+ irq_set_handler_data(irq, (void *)__gpio_mask(gpio));
+ irq_set_handler(irq, handle_simple_irq);
set_irq_flags(irq, IRQF_VALID);
}
/* set up genirq dispatch for ARM INTC */
for (i = 0; i < davinci_soc_info.intc_irq_num; i++) {
- set_irq_chip(i, &davinci_irq_chip_0);
+ irq_set_chip(i, &davinci_irq_chip_0);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
if (i != IRQ_TINT1_TINT34)
- set_irq_handler(i, handle_edge_irq);
+ irq_set_handler(i, handle_edge_irq);
else
- set_irq_handler(i, handle_level_irq);
+ irq_set_handler(i, handle_level_irq);
}
}
#define DOVE_MPP_GENERAL_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE | 0xe803c)
#define DOVE_AU1_SPDIFO_GPIO_EN (1 << 1)
#define DOVE_NAND_GPIO_EN (1 << 0)
-#define DOVE_MPP_CTRL4_VIRT_BASE (DOVE_GPIO_VIRT_BASE + 0x40)
+#define DOVE_MPP_CTRL4_VIRT_BASE (DOVE_GPIO_LO_VIRT_BASE + 0x40)
#define DOVE_SPI_GPIO_SEL (1 << 5)
#define DOVE_UART1_GPIO_SEL (1 << 4)
#define DOVE_AU1_GPIO_SEL (1 << 3)
if (!(cause & (1 << irq)))
continue;
irq = pmu_to_irq(irq);
- desc = irq_desc + irq;
- desc_handle_irq(irq, desc);
+ generic_handle_irq(irq);
}
}
*/
orion_gpio_init(0, 32, DOVE_GPIO_LO_VIRT_BASE, 0,
IRQ_DOVE_GPIO_START);
- set_irq_chained_handler(IRQ_DOVE_GPIO_0_7, gpio_irq_handler);
- set_irq_chained_handler(IRQ_DOVE_GPIO_8_15, gpio_irq_handler);
- set_irq_chained_handler(IRQ_DOVE_GPIO_16_23, gpio_irq_handler);
- set_irq_chained_handler(IRQ_DOVE_GPIO_24_31, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_DOVE_GPIO_0_7, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_DOVE_GPIO_8_15, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_DOVE_GPIO_16_23, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_DOVE_GPIO_24_31, gpio_irq_handler);
orion_gpio_init(32, 32, DOVE_GPIO_HI_VIRT_BASE, 0,
IRQ_DOVE_GPIO_START + 32);
- set_irq_chained_handler(IRQ_DOVE_HIGH_GPIO, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_DOVE_HIGH_GPIO, gpio_irq_handler);
orion_gpio_init(64, 8, DOVE_GPIO2_VIRT_BASE, 0,
IRQ_DOVE_GPIO_START + 64);
writel(0, PMU_INTERRUPT_CAUSE);
for (i = IRQ_DOVE_PMU_START; i < NR_IRQS; i++) {
- set_irq_chip(i, &pmu_irq_chip);
- set_irq_handler(i, handle_level_irq);
- irq_desc[i].status |= IRQ_LEVEL;
+ irq_set_chip_and_handler(i, &pmu_irq_chip, handle_level_irq);
+ irq_set_status_flags(i, IRQ_LEVEL);
set_irq_flags(i, IRQF_VALID);
}
- set_irq_chained_handler(IRQ_DOVE_PMU, pmu_irq_handler);
+ irq_set_chained_handler(IRQ_DOVE_PMU, pmu_irq_handler);
}
u32 pmu_sig_ctrl[PMU_SIG_REGS];
int i;
- /* Initialize gpiolib. */
- orion_gpio_init();
-
for (i = 0; i < MPP_NR_REGS; i++)
mpp_ctrl[i] = readl(MPP_CTRL(i));
local_irq_restore(flags);
for (irq = 0; irq < NR_IRQS; irq++) {
- set_irq_chip(irq, &ebsa110_irq_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &ebsa110_irq_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
int port = line >> 3;
int port_mask = 1 << (line & 7);
- if ((irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
gpio_int_type2[port] ^= port_mask; /* switch edge direction */
ep93xx_gpio_update_int_params(port);
}
int port = line >> 3;
int port_mask = 1 << (line & 7);
- if ((irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
+ if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH)
gpio_int_type2[port] ^= port_mask; /* switch edge direction */
gpio_int_unmasked[port] &= ~port_mask;
*/
static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
{
- struct irq_desc *desc = irq_desc + d->irq;
const int gpio = irq_to_gpio(d->irq);
const int port = gpio >> 3;
const int port_mask = 1 << (gpio & 7);
+ irq_flow_handler_t handler;
gpio_direction_input(gpio);
case IRQ_TYPE_EDGE_RISING:
gpio_int_type1[port] |= port_mask;
gpio_int_type2[port] |= port_mask;
- desc->handle_irq = handle_edge_irq;
+ handler = handle_edge_irq;
break;
case IRQ_TYPE_EDGE_FALLING:
gpio_int_type1[port] |= port_mask;
gpio_int_type2[port] &= ~port_mask;
- desc->handle_irq = handle_edge_irq;
+ handler = handle_edge_irq;
break;
case IRQ_TYPE_LEVEL_HIGH:
gpio_int_type1[port] &= ~port_mask;
gpio_int_type2[port] |= port_mask;
- desc->handle_irq = handle_level_irq;
+ handler = handle_level_irq;
break;
case IRQ_TYPE_LEVEL_LOW:
gpio_int_type1[port] &= ~port_mask;
gpio_int_type2[port] &= ~port_mask;
- desc->handle_irq = handle_level_irq;
+ handler = handle_level_irq;
break;
case IRQ_TYPE_EDGE_BOTH:
gpio_int_type1[port] |= port_mask;
gpio_int_type2[port] &= ~port_mask; /* falling */
else
gpio_int_type2[port] |= port_mask; /* rising */
- desc->handle_irq = handle_edge_irq;
+ handler = handle_edge_irq;
break;
default:
pr_err("failed to set irq type %d for gpio %d\n", type, gpio);
return -EINVAL;
}
- gpio_int_enabled[port] |= port_mask;
+ __irq_set_handler_locked(d->irq, handler);
- desc->status &= ~IRQ_TYPE_SENSE_MASK;
- desc->status |= type & IRQ_TYPE_SENSE_MASK;
+ gpio_int_enabled[port] |= port_mask;
ep93xx_gpio_update_int_params(port);
for (gpio_irq = gpio_to_irq(0);
gpio_irq <= gpio_to_irq(EP93XX_GPIO_LINE_MAX_IRQ); ++gpio_irq) {
- set_irq_chip(gpio_irq, &ep93xx_gpio_irq_chip);
- set_irq_handler(gpio_irq, handle_level_irq);
+ irq_set_chip_and_handler(gpio_irq, &ep93xx_gpio_irq_chip,
+ handle_level_irq);
set_irq_flags(gpio_irq, IRQF_VALID);
}
- set_irq_chained_handler(IRQ_EP93XX_GPIO_AB, ep93xx_gpio_ab_irq_handler);
- set_irq_chained_handler(IRQ_EP93XX_GPIO0MUX, ep93xx_gpio_f_irq_handler);
- set_irq_chained_handler(IRQ_EP93XX_GPIO1MUX, ep93xx_gpio_f_irq_handler);
- set_irq_chained_handler(IRQ_EP93XX_GPIO2MUX, ep93xx_gpio_f_irq_handler);
- set_irq_chained_handler(IRQ_EP93XX_GPIO3MUX, ep93xx_gpio_f_irq_handler);
- set_irq_chained_handler(IRQ_EP93XX_GPIO4MUX, ep93xx_gpio_f_irq_handler);
- set_irq_chained_handler(IRQ_EP93XX_GPIO5MUX, ep93xx_gpio_f_irq_handler);
- set_irq_chained_handler(IRQ_EP93XX_GPIO6MUX, ep93xx_gpio_f_irq_handler);
- set_irq_chained_handler(IRQ_EP93XX_GPIO7MUX, ep93xx_gpio_f_irq_handler);
+ irq_set_chained_handler(IRQ_EP93XX_GPIO_AB,
+ ep93xx_gpio_ab_irq_handler);
+ irq_set_chained_handler(IRQ_EP93XX_GPIO0MUX,
+ ep93xx_gpio_f_irq_handler);
+ irq_set_chained_handler(IRQ_EP93XX_GPIO1MUX,
+ ep93xx_gpio_f_irq_handler);
+ irq_set_chained_handler(IRQ_EP93XX_GPIO2MUX,
+ ep93xx_gpio_f_irq_handler);
+ irq_set_chained_handler(IRQ_EP93XX_GPIO3MUX,
+ ep93xx_gpio_f_irq_handler);
+ irq_set_chained_handler(IRQ_EP93XX_GPIO4MUX,
+ ep93xx_gpio_f_irq_handler);
+ irq_set_chained_handler(IRQ_EP93XX_GPIO5MUX,
+ ep93xx_gpio_f_irq_handler);
+ irq_set_chained_handler(IRQ_EP93XX_GPIO6MUX,
+ ep93xx_gpio_f_irq_handler);
+ irq_set_chained_handler(IRQ_EP93XX_GPIO7MUX,
+ ep93xx_gpio_f_irq_handler);
}
static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
{
- struct combiner_chip_data *chip_data = get_irq_data(irq);
- struct irq_chip *chip = get_irq_chip(irq);
+ struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_get_chip(irq);
unsigned int cascade_irq, combiner_irq;
unsigned long status;
{
if (combiner_nr >= MAX_COMBINER_NR)
BUG();
- if (set_irq_data(irq, &combiner_data[combiner_nr]) != 0)
+ if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
BUG();
- set_irq_chained_handler(irq, combiner_handle_cascade_irq);
+ irq_set_chained_handler(irq, combiner_handle_cascade_irq);
}
void __init combiner_init(unsigned int combiner_nr, void __iomem *base,
for (i = irq_start; i < combiner_data[combiner_nr].irq_offset
+ MAX_IRQ_IN_COMBINER; i++) {
- set_irq_chip(i, &combiner_chip);
- set_irq_chip_data(i, &combiner_data[combiner_nr]);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq);
+ irq_set_chip_data(i, &combiner_data[combiner_nr]);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
}
static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
{
- u32 *irq_data = get_irq_data(irq);
- struct irq_chip *chip = get_irq_chip(irq);
+ u32 *irq_data = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_get_chip(irq);
chip->irq_mask(&desc->irq_data);
int irq;
for (irq = 0 ; irq <= 31 ; irq++) {
- set_irq_chip(IRQ_EINT(irq), &exynos4_irq_eint);
- set_irq_handler(IRQ_EINT(irq), handle_level_irq);
+ irq_set_chip_and_handler(IRQ_EINT(irq), &exynos4_irq_eint,
+ handle_level_irq);
set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
}
- set_irq_chained_handler(IRQ_EINT16_31, exynos4_irq_demux_eint16_31);
+ irq_set_chained_handler(IRQ_EINT16_31, exynos4_irq_demux_eint16_31);
for (irq = 0 ; irq <= 15 ; irq++) {
eint0_15_data[irq] = IRQ_EINT(irq);
- set_irq_data(exynos4_get_irq_nr(irq), &eint0_15_data[irq]);
- set_irq_chained_handler(exynos4_get_irq_nr(irq),
+ irq_set_handler_data(exynos4_get_irq_nr(irq),
+ &eint0_15_data[irq]);
+ irq_set_chained_handler(exynos4_get_irq_nr(irq),
exynos4_irq_eint0_15);
}
*CSR_FIQ_DISABLE = -1;
for (irq = _DC21285_IRQ(0); irq < _DC21285_IRQ(20); irq++) {
- set_irq_chip(irq, &fb_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &fb_chip, handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
return 0;
}
-static int cksrc_dc21285_disable(struct clocksource *cs)
+static void cksrc_dc21285_disable(struct clocksource *cs)
{
*CSR_TIMER2_CNTL = 0;
}
if (host_irq != (unsigned int)-1) {
for (irq = _ISA_IRQ(0); irq < _ISA_IRQ(8); irq++) {
- set_irq_chip(irq, &isa_lo_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &isa_lo_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
for (irq = _ISA_IRQ(8); irq < _ISA_IRQ(16); irq++) {
- set_irq_chip(irq, &isa_hi_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &isa_hi_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
request_resource(&ioport_resource, &pic2_resource);
setup_irq(IRQ_ISA_CASCADE, &irq_cascade);
- set_irq_chained_handler(host_irq, isa_irq_handler);
+ irq_set_chained_handler(host_irq, isa_irq_handler);
/*
* On the NetWinder, don't automatically
static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
+ unsigned int port = (unsigned int)irq_desc_get_handler_data(desc);
unsigned int gpio_irq_no, irq_stat;
- unsigned int port = (unsigned int)get_irq_data(irq);
irq_stat = __raw_readl(GPIO_BASE(port) + GPIO_INT_STAT);
if ((irq_stat & 1) == 0)
continue;
- BUG_ON(!(irq_desc[gpio_irq_no].handle_irq));
- irq_desc[gpio_irq_no].handle_irq(gpio_irq_no,
- &irq_desc[gpio_irq_no]);
+ generic_handle_irq(gpio_irq_no);
}
}
for (j = GPIO_IRQ_BASE + i * 32;
j < GPIO_IRQ_BASE + (i + 1) * 32; j++) {
- set_irq_chip(j, &gpio_irq_chip);
- set_irq_handler(j, handle_edge_irq);
+ irq_set_chip_and_handler(j, &gpio_irq_chip,
+ handle_edge_irq);
set_irq_flags(j, IRQF_VALID);
}
- set_irq_chained_handler(IRQ_GPIO(i), gpio_irq_handler);
- set_irq_data(IRQ_GPIO(i), (void *)i);
+ irq_set_chained_handler(IRQ_GPIO(i), gpio_irq_handler);
+ irq_set_handler_data(IRQ_GPIO(i), (void *)i);
}
BUG_ON(gpiochip_add(&gemini_gpio_chip));
request_resource(&iomem_resource, &irq_resource);
for (i = 0; i < NR_IRQS; i++) {
- set_irq_chip(i, &gemini_irq_chip);
+ irq_set_chip(i, &gemini_irq_chip);
if((i >= IRQ_TIMER1 && i <= IRQ_TIMER3) || (i >= IRQ_SERIRQ0 && i <= IRQ_SERIRQ1)) {
- set_irq_handler(i, handle_edge_irq);
+ irq_set_handler(i, handle_edge_irq);
mode |= 1 << i;
level |= 1 << i;
} else {
- set_irq_handler(i, handle_level_irq);
+ irq_set_handler(i, handle_level_irq);
}
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
/* Initialize global IRQ's, fast path */
for (irq = 0; irq < NR_GLBL_IRQS; irq++) {
- set_irq_chip(irq, &h720x_global_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &h720x_global_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
/* Initialize multiplexed IRQ's, slow path */
for (irq = IRQ_CHAINED_GPIOA(0) ; irq <= IRQ_CHAINED_GPIOD(31); irq++) {
- set_irq_chip(irq, &h720x_gpio_chip);
- set_irq_handler(irq, handle_edge_irq);
+ irq_set_chip_and_handler(irq, &h720x_gpio_chip,
+ handle_edge_irq);
set_irq_flags(irq, IRQF_VALID );
}
- set_irq_chained_handler(IRQ_GPIOA, h720x_gpioa_demux_handler);
- set_irq_chained_handler(IRQ_GPIOB, h720x_gpiob_demux_handler);
- set_irq_chained_handler(IRQ_GPIOC, h720x_gpioc_demux_handler);
- set_irq_chained_handler(IRQ_GPIOD, h720x_gpiod_demux_handler);
+ irq_set_chained_handler(IRQ_GPIOA, h720x_gpioa_demux_handler);
+ irq_set_chained_handler(IRQ_GPIOB, h720x_gpiob_demux_handler);
+ irq_set_chained_handler(IRQ_GPIOC, h720x_gpioc_demux_handler);
+ irq_set_chained_handler(IRQ_GPIOD, h720x_gpiod_demux_handler);
#ifdef CONFIG_CPU_H7202
for (irq = IRQ_CHAINED_GPIOE(0) ; irq <= IRQ_CHAINED_GPIOE(31); irq++) {
- set_irq_chip(irq, &h720x_gpio_chip);
- set_irq_handler(irq, handle_edge_irq);
+ irq_set_chip_and_handler(irq, &h720x_gpio_chip,
+ handle_edge_irq);
set_irq_flags(irq, IRQF_VALID );
}
- set_irq_chained_handler(IRQ_GPIOE, h720x_gpioe_demux_handler);
+ irq_set_chained_handler(IRQ_GPIOE, h720x_gpioe_demux_handler);
#endif
/* Enable multiplexed irq's */
/*
* mask multiplexed timer IRQs
*/
-static void inline mask_timerx_irq(struct irq_data *d)
+static void inline __mask_timerx_irq(unsigned int irq)
{
unsigned int bit;
- bit = 2 << ((d->irq == IRQ_TIMER64B) ? 4 : (d->irq - IRQ_TIMER1));
+ bit = 2 << ((irq == IRQ_TIMER64B) ? 4 : (irq - IRQ_TIMER1));
CPU_REG (TIMER_VIRT, TIMER_TOPCTRL) &= ~bit;
}
+static void inline mask_timerx_irq(struct irq_data *d)
+{
+ __mask_timerx_irq(d->irq);
+}
+
/*
* unmask multiplexed timer IRQs
*/
for (irq = IRQ_TIMER1;
irq < IRQ_CHAINED_TIMERX(NR_TIMERX_IRQS); irq++) {
- mask_timerx_irq(irq);
- set_irq_chip(irq, &h7202_timerx_chip);
- set_irq_handler(irq, handle_edge_irq);
+ __mask_timerx_irq(irq);
+ irq_set_chip_and_handler(irq, &h7202_timerx_chip,
+ handle_edge_irq);
set_irq_flags(irq, IRQF_VALID );
}
- set_irq_chained_handler(IRQ_TIMERX, h7202_timerx_demux_handler);
+ irq_set_chained_handler(IRQ_TIMERX, h7202_timerx_demux_handler);
h720x_init_irq();
}
bool "Vista Silicon i.MX27 Visstrim_m10"
select SOC_IMX27
select IMX_HAVE_PLATFORM_IMX_I2C
+ select IMX_HAVE_PLATFORM_IMX_SSI
select IMX_HAVE_PLATFORM_IMX_UART
select IMX_HAVE_PLATFORM_MXC_MMC
select IMX_HAVE_PLATFORM_MXC_EHCI
#include <mach/mx25.h>
#include <mach/imx-uart.h>
#include <mach/audmux.h>
+#include <mach/esdhc.h>
#include "devices-imx25.h"
.flags = IMX_SSI_SYN | IMX_SSI_NET | IMX_SSI_USE_I2S_SLAVE,
};
+static struct esdhc_platform_data sd1_pdata = {
+ .cd_gpio = GPIO_SD1CD,
+ .wp_gpio = -EINVAL,
+};
+
/*
* system init for baseboard usage. Will be called by cpuimx25 init.
*
imx25_add_imx_ssi(0, &eukrea_mbimxsd_ssi_pdata);
imx25_add_flexcan1(NULL);
- imx25_add_sdhci_esdhc_imx(0, NULL);
+ imx25_add_sdhci_esdhc_imx(0, &sd1_pdata);
gpio_request(GPIO_LED1, "LED1");
gpio_direction_output(GPIO_LED1, 1);
for(i = 0; i <= IRQ_IOP13XX_HPI; i++) {
if (i < 32)
- set_irq_chip(i, &iop13xx_irqchip1);
+ irq_set_chip(i, &iop13xx_irqchip1);
else if (i < 64)
- set_irq_chip(i, &iop13xx_irqchip2);
+ irq_set_chip(i, &iop13xx_irqchip2);
else if (i < 96)
- set_irq_chip(i, &iop13xx_irqchip3);
+ irq_set_chip(i, &iop13xx_irqchip3);
else
- set_irq_chip(i, &iop13xx_irqchip4);
+ irq_set_chip(i, &iop13xx_irqchip4);
- set_irq_handler(i, handle_level_irq);
+ irq_set_handler(i, handle_level_irq);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
void __init iop13xx_msi_init(void)
{
- set_irq_chained_handler(IRQ_IOP13XX_INBD_MSI, iop13xx_msi_handler);
+ irq_set_chained_handler(IRQ_IOP13XX_INBD_MSI, iop13xx_msi_handler);
}
/*
if (irq < 0)
return irq;
- set_irq_msi(irq, desc);
+ irq_set_msi_desc(irq, desc);
msg.address_hi = 0x0;
msg.address_lo = IOP13XX_MU_MIMR_PCI;
msg.data = (id << IOP13XX_MU_MIMR_CORE_SELECT) | (irq & 0x7f);
write_msi_msg(irq, &msg);
- set_irq_chip_and_handler(irq, &iop13xx_msi_chip, handle_simple_irq);
+ irq_set_chip_and_handler(irq, &iop13xx_msi_chip, handle_simple_irq);
return 0;
}
*IOP3XX_PCIIRSR = 0x0f;
for (i = 0; i < NR_IRQS; i++) {
- set_irq_chip(i, &ext_chip);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip_and_handler(i, &ext_chip, handle_level_irq);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
}
*IOP3XX_PCIIRSR = 0x0f;
for (i = 0; i < NR_IRQS; i++) {
- set_irq_chip(i, (i < 32) ? &iop33x_irqchip1 : &iop33x_irqchip2);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip_and_handler(i,
+ (i < 32) ? &iop33x_irqchip1 : &iop33x_irqchip2,
+ handle_level_irq);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
}
*/
for (irq = IRQ_IXP2000_SOFT_INT; irq <= IRQ_IXP2000_THDB3; irq++) {
if ((1 << irq) & IXP2000_VALID_IRQ_MASK) {
- set_irq_chip(irq, &ixp2000_irq_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &ixp2000_irq_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
} else set_irq_flags(irq, 0);
}
for (irq = IRQ_IXP2000_DRAM0_MIN_ERR; irq <= IRQ_IXP2000_SP_INT; irq++) {
if((1 << (irq - IRQ_IXP2000_DRAM0_MIN_ERR)) &
IXP2000_VALID_ERR_IRQ_MASK) {
- set_irq_chip(irq, &ixp2000_err_irq_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &ixp2000_err_irq_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
else
set_irq_flags(irq, 0);
}
- set_irq_chained_handler(IRQ_IXP2000_ERRSUM, ixp2000_err_irq_handler);
+ irq_set_chained_handler(IRQ_IXP2000_ERRSUM, ixp2000_err_irq_handler);
for (irq = IRQ_IXP2000_GPIO0; irq <= IRQ_IXP2000_GPIO7; irq++) {
- set_irq_chip(irq, &ixp2000_GPIO_irq_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &ixp2000_GPIO_irq_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
- set_irq_chained_handler(IRQ_IXP2000_GPIO, ixp2000_GPIO_irq_handler);
+ irq_set_chained_handler(IRQ_IXP2000_GPIO, ixp2000_GPIO_irq_handler);
/*
* Enable PCI irqs. The actual PCI[AB] decoding is done in
*/
ixp2000_reg_write(IXP2000_IRQ_ENABLE_SET, (1 << IRQ_IXP2000_PCI));
for (irq = IRQ_IXP2000_PCIA; irq <= IRQ_IXP2000_PCIB; irq++) {
- set_irq_chip(irq, &ixp2000_pci_irq_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &ixp2000_pci_irq_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
}
*board_irq_mask = 0xffffffff;
for(irq = IXP2000_BOARD_IRQ(0); irq < IXP2000_BOARD_IRQ(board_irq_count); irq++) {
- set_irq_chip(irq, &ixdp2x00_cpld_irq_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &ixdp2x00_cpld_irq_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
/* Hook into PCI interrupt */
- set_irq_chained_handler(IRQ_IXP2000_PCIB, ixdp2x00_irq_handler);
+ irq_set_chained_handler(IRQ_IXP2000_PCIB, ixdp2x00_irq_handler);
}
/*************************************************************************
for (irq = NR_IXP2000_IRQS; irq < NR_IXDP2X01_IRQS; irq++) {
if (irq & valid_irq_mask) {
- set_irq_chip(irq, &ixdp2x01_irq_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &ixdp2x01_irq_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
} else {
set_irq_flags(irq, 0);
}
/* Hook into PCI interrupts */
- set_irq_chained_handler(IRQ_IXP2000_PCIB, ixdp2x01_irq_handler);
+ irq_set_chained_handler(IRQ_IXP2000_PCIB, ixdp2x01_irq_handler);
}
{
switch (type) {
case IXP23XX_IRQ_LEVEL:
- set_irq_chip(irq, &ixp23xx_irq_level_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &ixp23xx_irq_level_chip,
+ handle_level_irq);
break;
case IXP23XX_IRQ_EDGE:
- set_irq_chip(irq, &ixp23xx_irq_edge_chip);
- set_irq_handler(irq, handle_edge_irq);
+ irq_set_chip_and_handler(irq, &ixp23xx_irq_edge_chip,
+ handle_edge_irq);
break;
}
set_irq_flags(irq, IRQF_VALID);
}
for (irq = IRQ_IXP23XX_INTA; irq <= IRQ_IXP23XX_INTB; irq++) {
- set_irq_chip(irq, &ixp23xx_pci_irq_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &ixp23xx_pci_irq_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
- set_irq_chained_handler(IRQ_IXP23XX_PCI_INT_RPH, pci_handler);
+ irq_set_chained_handler(IRQ_IXP23XX_PCI_INT_RPH, pci_handler);
}
irq++) {
if (IXDP2351_INTA_IRQ_MASK(irq) & IXDP2351_INTA_IRQ_VALID) {
set_irq_flags(irq, IRQF_VALID);
- set_irq_handler(irq, handle_level_irq);
- set_irq_chip(irq, &ixdp2351_inta_chip);
+ irq_set_chip_and_handler(irq, &ixdp2351_inta_chip,
+ handle_level_irq);
}
}
irq++) {
if (IXDP2351_INTB_IRQ_MASK(irq) & IXDP2351_INTB_IRQ_VALID) {
set_irq_flags(irq, IRQF_VALID);
- set_irq_handler(irq, handle_level_irq);
- set_irq_chip(irq, &ixdp2351_intb_chip);
+ irq_set_chip_and_handler(irq, &ixdp2351_intb_chip,
+ handle_level_irq);
}
}
- set_irq_chained_handler(IRQ_IXP23XX_INTA, ixdp2351_inta_handler);
- set_irq_chained_handler(IRQ_IXP23XX_INTB, ixdp2351_intb_handler);
+ irq_set_chained_handler(IRQ_IXP23XX_INTA, ixdp2351_inta_handler);
+ irq_set_chained_handler(IRQ_IXP23XX_INTB, ixdp2351_intb_handler);
}
/*
static void __init roadrunner_pci_preinit(void)
{
- set_irq_type(IRQ_ROADRUNNER_PCI_INTC, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_ROADRUNNER_PCI_INTD, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IRQ_ROADRUNNER_PCI_INTC, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IRQ_ROADRUNNER_PCI_INTD, IRQ_TYPE_LEVEL_LOW);
ixp23xx_pci_preinit();
}
void __init avila_pci_preinit(void)
{
- set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
/* Default to all level triggered */
for(i = 0; i < NR_IRQS; i++) {
- set_irq_chip(i, &ixp4xx_irq_chip);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip_and_handler(i, &ixp4xx_irq_chip,
+ handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
}
void __init coyote_pci_preinit(void)
{
- set_irq_type(IXP4XX_GPIO_IRQ(SLOT0_INTA), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(SLOT1_INTA), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(SLOT0_INTA), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(SLOT1_INTA), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
void __init dsmg600_pci_preinit(void)
{
- set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTE), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTF), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTE), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTF), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
void __init fsg_pci_preinit(void)
{
- set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
void __init gateway7001_pci_preinit(void)
{
- set_irq_type(IRQ_IXP4XX_GPIO10, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_IXP4XX_GPIO11, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IRQ_IXP4XX_GPIO10, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IRQ_IXP4XX_GPIO11, IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
gpio_line_config(GPIO_HSS1_RTS_N, IXP4XX_GPIO_OUT);
gpio_line_config(GPIO_HSS0_DCD_N, IXP4XX_GPIO_IN);
gpio_line_config(GPIO_HSS1_DCD_N, IXP4XX_GPIO_IN);
- set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), IRQ_TYPE_EDGE_BOTH);
- set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N), IRQ_TYPE_EDGE_BOTH);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), IRQ_TYPE_EDGE_BOTH);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N), IRQ_TYPE_EDGE_BOTH);
set_control(CONTROL_HSS0_DTR_N, 1);
set_control(CONTROL_HSS1_DTR_N, 1);
#ifdef CONFIG_PCI
static void __init gmlr_pci_preinit(void)
{
- set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
*/
void __init gtwx5715_pci_preinit(void)
{
- set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
void __init ixdp425_pci_preinit(void)
{
- set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
void __init ixdpg425_pci_preinit(void)
{
- set_irq_type(IRQ_IXP4XX_GPIO6, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_IXP4XX_GPIO7, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IRQ_IXP4XX_GPIO6, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IRQ_IXP4XX_GPIO7, IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
void __init nas100d_pci_preinit(void)
{
- set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTE), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTE), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
void __init nslu2_pci_preinit(void)
{
- set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
pr_info("Vulcan PCI: limiting CardBus memory size to %dMB\n",
(int)(pci_cardbus_mem_size >> 20));
#endif
- set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
void __init wg302v2_pci_preinit(void)
{
- set_irq_type(IRQ_IXP4XX_GPIO8, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_IXP4XX_GPIO9, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IRQ_IXP4XX_GPIO8, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IRQ_IXP4XX_GPIO9, IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
*/
orion_gpio_init(0, 32, GPIO_LOW_VIRT_BASE, 0,
IRQ_KIRKWOOD_GPIO_START);
- set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_0_7, gpio_irq_handler);
- set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_8_15, gpio_irq_handler);
- set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_16_23, gpio_irq_handler);
- set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_24_31, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_0_7, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_8_15, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_16_23, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_24_31, gpio_irq_handler);
orion_gpio_init(32, 18, GPIO_HIGH_VIRT_BASE, 0,
IRQ_KIRKWOOD_GPIO_START + 32);
- set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_0_7, gpio_irq_handler);
- set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_8_15, gpio_irq_handler);
- set_irq_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_16_23, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_0_7, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_8_15, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_16_23,
+ gpio_irq_handler);
}
};
static struct gpio_led sheevaplug_led_pins[] = {
+ {
+ .name = "plug:red:misc",
+ .default_trigger = "none",
+ .gpio = 46,
+ .active_low = 1,
+ },
{
.name = "plug:green:health",
.default_trigger = "default-on",
static unsigned int sheevaplug_mpp_config[] __initdata = {
MPP29_GPIO, /* USB Power Enable */
+ MPP46_GPIO, /* LED Red */
MPP49_GPIO, /* LED */
0
};
local_irq_restore(flags);
/* Set IRQ triggering type */
- set_irq_type(gpio_irq[pin], type);
+ irq_set_irq_type(gpio_irq[pin], type);
/* enable interrupt mode */
ks8695_gpio_mode(pin, 0);
}
if (level_triggered) {
- set_irq_chip(d->irq, &ks8695_irq_level_chip);
- set_irq_handler(d->irq, handle_level_irq);
+ irq_set_chip_and_handler(d->irq, &ks8695_irq_level_chip,
+ handle_level_irq);
}
else {
- set_irq_chip(d->irq, &ks8695_irq_edge_chip);
- set_irq_handler(d->irq, handle_edge_irq);
+ irq_set_chip_and_handler(d->irq, &ks8695_irq_edge_chip,
+ handle_edge_irq);
}
__raw_writel(ctrl, KS8695_GPIO_VA + KS8695_IOPC);
case KS8695_IRQ_UART_RX:
case KS8695_IRQ_COMM_TX:
case KS8695_IRQ_COMM_RX:
- set_irq_chip(irq, &ks8695_irq_level_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq,
+ &ks8695_irq_level_chip,
+ handle_level_irq);
break;
/* Edge-triggered interrupts */
default:
/* clear pending bit */
ks8695_irq_ack(irq_get_irq_data(irq));
- set_irq_chip(irq, &ks8695_irq_edge_chip);
- set_irq_handler(irq, handle_edge_irq);
+ irq_set_chip_and_handler(irq,
+ &ks8695_irq_edge_chip,
+ handle_edge_irq);
}
set_irq_flags(irq, IRQF_VALID);
}
/* Ok to use the level handler for all types */
- set_irq_handler(d->irq, handle_level_irq);
+ irq_set_handler(d->irq, handle_level_irq);
return 0;
}
/* Configure supported IRQ's */
for (i = 0; i < NR_IRQS; i++) {
- set_irq_chip(i, &lpc32xx_irq_chip);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip_and_handler(i, &lpc32xx_irq_chip,
+ handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
__raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC2_BASE));
/* MIC SUBIRQx interrupts will route handling to the chain handlers */
- set_irq_chained_handler(IRQ_LPC32XX_SUB1IRQ, lpc32xx_sic1_handler);
- set_irq_chained_handler(IRQ_LPC32XX_SUB2IRQ, lpc32xx_sic2_handler);
+ irq_set_chained_handler(IRQ_LPC32XX_SUB1IRQ, lpc32xx_sic1_handler);
+ irq_set_chained_handler(IRQ_LPC32XX_SUB2IRQ, lpc32xx_sic2_handler);
/* Initially disable all wake events */
__raw_writel(0, LPC32XX_CLKPWR_P01_ER);
if (chip->irq_ack)
chip->irq_ack(d);
- set_irq_chip(irq, chip);
+ irq_set_chip(irq, chip);
set_irq_flags(irq, IRQF_VALID);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_handler(irq, handle_level_irq);
}
}
for (irq = 0; irq < IRQ_MMP2_MUX_BASE; irq++) {
icu_mask_irq(irq_get_irq_data(irq));
- set_irq_chip(irq, &icu_irq_chip);
+ irq_set_chip(irq, &icu_irq_chip);
set_irq_flags(irq, IRQF_VALID);
switch (irq) {
case IRQ_MMP2_SSP_MUX:
break;
default:
- set_irq_handler(irq, handle_level_irq);
+ irq_set_handler(irq, handle_level_irq);
break;
}
}
init_mux_irq(&misc_irq_chip, IRQ_MMP2_MISC_BASE, 15);
init_mux_irq(&ssp_irq_chip, IRQ_MMP2_SSP_BASE, 2);
- set_irq_chained_handler(IRQ_MMP2_PMIC_MUX, pmic_irq_demux);
- set_irq_chained_handler(IRQ_MMP2_RTC_MUX, rtc_irq_demux);
- set_irq_chained_handler(IRQ_MMP2_TWSI_MUX, twsi_irq_demux);
- set_irq_chained_handler(IRQ_MMP2_MISC_MUX, misc_irq_demux);
- set_irq_chained_handler(IRQ_MMP2_SSP_MUX, ssp_irq_demux);
+ irq_set_chained_handler(IRQ_MMP2_PMIC_MUX, pmic_irq_demux);
+ irq_set_chained_handler(IRQ_MMP2_RTC_MUX, rtc_irq_demux);
+ irq_set_chained_handler(IRQ_MMP2_TWSI_MUX, twsi_irq_demux);
+ irq_set_chained_handler(IRQ_MMP2_MISC_MUX, misc_irq_demux);
+ irq_set_chained_handler(IRQ_MMP2_SSP_MUX, ssp_irq_demux);
}
for (irq = 0; irq < 64; irq++) {
icu_mask_irq(irq_get_irq_data(irq));
- set_irq_chip(irq, &icu_irq_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
}
*/
for (i = GIC_PPI_START; i < GIC_SPI_START; i++) {
if (i != AVS_SVICINT && i != AVS_SVICINTSWDONE)
- set_irq_handler(i, handle_percpu_irq);
+ irq_set_handler(i, handle_percpu_irq);
}
}
*/
for (i = GIC_PPI_START; i < GIC_SPI_START; i++) {
if (i != AVS_SVICINT && i != AVS_SVICINTSWDONE)
- set_irq_handler(i, handle_percpu_irq);
+ irq_set_handler(i, handle_percpu_irq);
}
}
{
int i;
for(i = TROUT_INT_START; i <= TROUT_INT_END; i++) {
- set_irq_chip(i, &trout_gpio_irq_chip);
- set_irq_handler(i, handle_edge_irq);
+ irq_set_chip_and_handler(i, &trout_gpio_irq_chip,
+ handle_edge_irq);
set_irq_flags(i, IRQF_VALID);
}
for (i = 0; i < ARRAY_SIZE(msm_gpio_banks); i++)
gpiochip_add(&msm_gpio_banks[i].chip);
- set_irq_type(MSM_GPIO_TO_INT(17), IRQF_TRIGGER_HIGH);
- set_irq_chained_handler(MSM_GPIO_TO_INT(17), trout_gpio_irq_handler);
- set_irq_wake(MSM_GPIO_TO_INT(17), 1);
+ irq_set_irq_type(MSM_GPIO_TO_INT(17), IRQF_TRIGGER_HIGH);
+ irq_set_chained_handler(MSM_GPIO_TO_INT(17), trout_gpio_irq_handler);
+ irq_set_irq_wake(MSM_GPIO_TO_INT(17), 1);
return 0;
}
if (IS_ERR(vreg_sdslot))
return PTR_ERR(vreg_sdslot);
- set_irq_wake(TROUT_GPIO_TO_INT(TROUT_GPIO_SDMC_CD_N), 1);
+ irq_set_irq_wake(TROUT_GPIO_TO_INT(TROUT_GPIO_SDMC_CD_N), 1);
if (!opt_disable_sdcard)
msm_add_sdcc(2, &trout_sdslot_data,
val, val2);
}
-static void msm_gpio_irq_ack(unsigned int irq)
+static void msm_gpio_irq_ack(struct irq_data *d)
{
- int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq);
+ int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
writel(BIT(INTR_STATUS), GPIO_INTR_STATUS(gpio));
if (test_bit(gpio, msm_gpio.dual_edge_irqs))
msm_gpio_update_dual_edge_pos(gpio);
}
-static void msm_gpio_irq_mask(unsigned int irq)
+static void msm_gpio_irq_mask(struct irq_data *d)
{
- int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq);
+ int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
unsigned long irq_flags;
spin_lock_irqsave(&tlmm_lock, irq_flags);
spin_unlock_irqrestore(&tlmm_lock, irq_flags);
}
-static void msm_gpio_irq_unmask(unsigned int irq)
+static void msm_gpio_irq_unmask(struct irq_data *d)
{
- int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq);
+ int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
unsigned long irq_flags;
spin_lock_irqsave(&tlmm_lock, irq_flags);
spin_unlock_irqrestore(&tlmm_lock, irq_flags);
}
-static int msm_gpio_irq_set_type(unsigned int irq, unsigned int flow_type)
+static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
- int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq);
+ int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
unsigned long irq_flags;
uint32_t bits;
if (flow_type & IRQ_TYPE_EDGE_BOTH) {
bits |= BIT(INTR_DECT_CTL);
- irq_desc[irq].handle_irq = handle_edge_irq;
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
__set_bit(gpio, msm_gpio.dual_edge_irqs);
else
__clear_bit(gpio, msm_gpio.dual_edge_irqs);
} else {
bits &= ~BIT(INTR_DECT_CTL);
- irq_desc[irq].handle_irq = handle_level_irq;
+ __irq_set_handler_locked(d->irq, handle_level_irq);
__clear_bit(gpio, msm_gpio.dual_edge_irqs);
}
*/
static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_data *data = irq_desc_get_irq_data(desc);
unsigned long i;
for (i = find_first_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS);
generic_handle_irq(msm_gpio_to_irq(&msm_gpio.gpio_chip,
i));
}
- desc->chip->ack(irq);
+ data->chip->irq_ack(data);
}
-static int msm_gpio_irq_set_wake(unsigned int irq, unsigned int on)
+static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
{
- int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, irq);
+ int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
if (on) {
if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS))
- set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 1);
+ irq_set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 1);
set_bit(gpio, msm_gpio.wake_irqs);
} else {
clear_bit(gpio, msm_gpio.wake_irqs);
if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS))
- set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 0);
+ irq_set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 0);
}
return 0;
static struct irq_chip msm_gpio_irq_chip = {
.name = "msmgpio",
- .mask = msm_gpio_irq_mask,
- .unmask = msm_gpio_irq_unmask,
- .ack = msm_gpio_irq_ack,
- .set_type = msm_gpio_irq_set_type,
- .set_wake = msm_gpio_irq_set_wake,
+ .irq_mask = msm_gpio_irq_mask,
+ .irq_unmask = msm_gpio_irq_unmask,
+ .irq_ack = msm_gpio_irq_ack,
+ .irq_set_type = msm_gpio_irq_set_type,
+ .irq_set_wake = msm_gpio_irq_set_wake,
};
static int __devinit msm_gpio_probe(struct platform_device *dev)
for (i = 0; i < msm_gpio.gpio_chip.ngpio; ++i) {
irq = msm_gpio_to_irq(&msm_gpio.gpio_chip, i);
- set_irq_chip(irq, &msm_gpio_irq_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &msm_gpio_irq_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
- set_irq_chained_handler(TLMM_SCSS_SUMMARY_IRQ,
+ irq_set_chained_handler(TLMM_SCSS_SUMMARY_IRQ,
msm_summary_irq_handler);
return 0;
}
if (ret < 0)
return ret;
- set_irq_handler(TLMM_SCSS_SUMMARY_IRQ, NULL);
+ irq_set_handler(TLMM_SCSS_SUMMARY_IRQ, NULL);
return 0;
}
val = readl(msm_chip->regs.int_edge);
if (flow_type & IRQ_TYPE_EDGE_BOTH) {
writel(val | mask, msm_chip->regs.int_edge);
- irq_desc[d->irq].handle_irq = handle_edge_irq;
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
} else {
writel(val & ~mask, msm_chip->regs.int_edge);
- irq_desc[d->irq].handle_irq = handle_level_irq;
+ __irq_set_handler_locked(d->irq, handle_level_irq);
}
if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
msm_chip->both_edge_detect |= mask;
msm_gpio_chips[j].chip.base +
msm_gpio_chips[j].chip.ngpio)
j++;
- set_irq_chip_data(i, &msm_gpio_chips[j]);
- set_irq_chip(i, &msm_gpio_irq_chip);
- set_irq_handler(i, handle_edge_irq);
+ irq_set_chip_data(i, &msm_gpio_chips[j]);
+ irq_set_chip_and_handler(i, &msm_gpio_irq_chip,
+ handle_edge_irq);
set_irq_flags(i, IRQF_VALID);
}
gpiochip_add(&msm_gpio_chips[i].chip);
}
- set_irq_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler);
- set_irq_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler);
- set_irq_wake(INT_GPIO_GROUP1, 1);
- set_irq_wake(INT_GPIO_GROUP2, 2);
+ irq_set_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler);
+ irq_set_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler);
+ irq_set_irq_wake(INT_GPIO_GROUP1, 1);
+ irq_set_irq_wake(INT_GPIO_GROUP2, 2);
return 0;
}
type = msm_irq_shadow_reg[index].int_type;
if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
type |= b;
- irq_desc[d->irq].handle_irq = handle_edge_irq;
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
}
if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) {
type &= ~b;
- irq_desc[d->irq].handle_irq = handle_level_irq;
+ __irq_set_handler_locked(d->irq, handle_level_irq);
}
writel(type, treg);
msm_irq_shadow_reg[index].int_type = type;
writel(3, VIC_INT_MASTEREN);
for (n = 0; n < NR_MSM_IRQS; n++) {
- set_irq_chip(n, &msm_irq_chip);
- set_irq_handler(n, handle_level_irq);
+ irq_set_chip_and_handler(n, &msm_irq_chip, handle_level_irq);
set_irq_flags(n, IRQF_VALID);
}
}
if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
writel(readl(treg) | b, treg);
- irq_desc[d->irq].handle_irq = handle_edge_irq;
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
}
if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) {
writel(readl(treg) & (~b), treg);
- irq_desc[d->irq].handle_irq = handle_level_irq;
+ __irq_set_handler_locked(d->irq, handle_level_irq);
}
return 0;
}
writel(1, VIC_INT_MASTEREN);
for (n = 0; n < NR_MSM_IRQS; n++) {
- set_irq_chip(n, &msm_irq_chip);
- set_irq_handler(n, handle_level_irq);
+ irq_set_chip_and_handler(n, &msm_irq_chip, handle_level_irq);
set_irq_flags(n, IRQF_VALID);
}
}
val = readl(sirc_regs.int_type);
if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
val |= mask;
- irq_desc[d->irq].handle_irq = handle_edge_irq;
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
} else {
val &= ~mask;
- irq_desc[d->irq].handle_irq = handle_level_irq;
+ __irq_set_handler_locked(d->irq, handle_level_irq);
}
writel(val, sirc_regs.int_type);
wake_enable = 0;
for (i = FIRST_SIRC_IRQ; i < LAST_SIRC_IRQ; i++) {
- set_irq_chip(i, &sirc_irq_chip);
- set_irq_handler(i, handle_edge_irq);
+ irq_set_chip_and_handler(i, &sirc_irq_chip, handle_edge_irq);
set_irq_flags(i, IRQF_VALID);
}
for (i = 0; i < ARRAY_SIZE(sirc_reg_table); i++) {
- set_irq_chained_handler(sirc_reg_table[i].cascade_irq,
+ irq_set_chained_handler(sirc_reg_table[i].cascade_irq,
sirc_irq_handler);
- set_irq_wake(sirc_reg_table[i].cascade_irq, 1);
+ irq_set_irq_wake(sirc_reg_table[i].cascade_irq, 1);
}
return;
}
orion_gpio_init(0, 32, GPIO_VIRT_BASE,
mv78xx0_core_index() ? 0x18 : 0,
IRQ_MV78XX0_GPIO_START);
- set_irq_chained_handler(IRQ_MV78XX0_GPIO_0_7, gpio_irq_handler);
- set_irq_chained_handler(IRQ_MV78XX0_GPIO_8_15, gpio_irq_handler);
- set_irq_chained_handler(IRQ_MV78XX0_GPIO_16_23, gpio_irq_handler);
- set_irq_chained_handler(IRQ_MV78XX0_GPIO_24_31, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_MV78XX0_GPIO_0_7, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_MV78XX0_GPIO_8_15, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_MV78XX0_GPIO_16_23, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_MV78XX0_GPIO_24_31, gpio_irq_handler);
}
#include <mach/ipu.h>
#include <mach/mx3fb.h>
#include <mach/audmux.h>
+#include <mach/esdhc.h>
#include "devices-imx35.h"
#include "devices.h"
MX35_PAD_SD1_DATA1__ESDHC1_DAT1,
MX35_PAD_SD1_DATA2__ESDHC1_DAT2,
MX35_PAD_SD1_DATA3__ESDHC1_DAT3,
+ /* SD1 CD */
+ MX35_PAD_LD18__GPIO3_24,
};
#define GPIO_LED1 IMX_GPIO_NR(3, 29)
#define GPIO_SWITCH1 IMX_GPIO_NR(3, 25)
-#define GPIO_LCDPWR (4)
+#define GPIO_LCDPWR IMX_GPIO_NR(1, 4)
+#define GPIO_SD1CD IMX_GPIO_NR(3, 24)
static void eukrea_mbimxsd_lcd_power_set(struct plat_lcd_data *pd,
unsigned int power)
.flags = IMX_SSI_SYN | IMX_SSI_NET | IMX_SSI_USE_I2S_SLAVE,
};
+static struct esdhc_platform_data sd1_pdata = {
+ .cd_gpio = GPIO_SD1CD,
+ .wp_gpio = -EINVAL,
+};
+
/*
* system init for baseboard usage. Will be called by cpuimx35 init.
*
imx35_add_imx_ssi(0, &eukrea_mbimxsd_ssi_pdata);
imx35_add_flexcan1(NULL);
- imx35_add_sdhci_esdhc_imx(0, NULL);
+ imx35_add_sdhci_esdhc_imx(0, &sd1_pdata);
gpio_request(GPIO_LED1, "LED1");
gpio_direction_output(GPIO_LED1, 1);
gpio_request(GPIO_LCDPWR, "LCDPWR");
gpio_direction_output(GPIO_LCDPWR, 1);
- gpio_free(GPIO_LCDPWR);
i2c_register_board_info(0, eukrea_mbimxsd_i2c_devices,
ARRAY_SIZE(eukrea_mbimxsd_i2c_devices));
__raw_writew(0xFFFF, PBC_INTSTATUS_REG);
for (i = MXC_EXP_IO_BASE; i < (MXC_EXP_IO_BASE + MXC_MAX_EXP_IO_LINES);
i++) {
- set_irq_chip(i, &expio_irq_chip);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip_and_handler(i, &expio_irq_chip, handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
- set_irq_type(EXPIO_PARENT_INT, IRQ_TYPE_LEVEL_HIGH);
- set_irq_chained_handler(EXPIO_PARENT_INT, mx31ads_expio_irq_handler);
+ irq_set_irq_type(EXPIO_PARENT_INT, IRQ_TYPE_LEVEL_HIGH);
+ irq_set_chained_handler(EXPIO_PARENT_INT, mx31ads_expio_irq_handler);
}
#ifdef CONFIG_MACH_MX31ADS_WM1133_EV1
#include <mach/mx3fb.h>
#include <mach/ulpi.h>
#include <mach/audmux.h>
+#include <mach/esdhc.h>
#include "devices-imx35.h"
#include "devices.h"
MX35_PAD_SD1_DATA1__ESDHC1_DAT1,
MX35_PAD_SD1_DATA2__ESDHC1_DAT2,
MX35_PAD_SD1_DATA3__ESDHC1_DAT3,
+ MX35_PAD_ATA_DATA10__GPIO2_23, /* WriteProtect */
+ MX35_PAD_ATA_DATA11__GPIO2_24, /* CardDetect */
};
#define AC97_GPIO_TXFS IMX_GPIO_NR(2, 31)
#define AC97_GPIO_TXD IMX_GPIO_NR(2, 28)
#define AC97_GPIO_RESET IMX_GPIO_NR(2, 0)
+#define SD1_GPIO_WP IMX_GPIO_NR(2, 23)
+#define SD1_GPIO_CD IMX_GPIO_NR(2, 24)
static void pcm043_ac97_warm_reset(struct snd_ac97 *ac97)
{
}
__setup("otg_mode=", pcm043_otg_mode);
+static struct esdhc_platform_data sd1_pdata = {
+ .wp_gpio = SD1_GPIO_WP,
+ .cd_gpio = SD1_GPIO_CD,
+};
+
/*
* Board specific initialization.
*/
imx35_add_fsl_usb2_udc(&otg_device_pdata);
imx35_add_flexcan1(NULL);
- imx35_add_sdhci_esdhc_imx(0, NULL);
+ imx35_add_sdhci_esdhc_imx(0, &sd1_pdata);
}
static void __init pcm043_timer_init(void)
select IMX_HAVE_PLATFORM_IMX_I2C
select IMX_HAVE_PLATFORM_IMX_UART
select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+ select IMX_HAVE_PLATFORM_GPIO_KEYS
help
Include support for MX53 LOCO platform. This includes specific
configurations for the board and its peripherals.
#
# Object file lists.
-obj-y := cpu.o mm.o clock-mx51-mx53.o devices.o ehci.o
+obj-y := cpu.o mm.o clock-mx51-mx53.o devices.o ehci.o system.o
obj-$(CONFIG_SOC_IMX50) += mm-mx50.o
obj-$(CONFIG_CPU_FREQ_IMX) += cpu_op-mx51.o
int ret;
/* reset FEC PHY */
- ret = gpio_request(BABBAGE_FEC_PHY_RESET, "fec-phy-reset");
+ ret = gpio_request_one(BABBAGE_FEC_PHY_RESET,
+ GPIOF_OUT_INIT_LOW, "fec-phy-reset");
if (ret) {
printk(KERN_ERR"failed to get GPIO_FEC_PHY_RESET: %d\n", ret);
return;
}
- gpio_direction_output(BABBAGE_FEC_PHY_RESET, 0);
- gpio_set_value(BABBAGE_FEC_PHY_RESET, 0);
msleep(1);
gpio_set_value(BABBAGE_FEC_PHY_RESET, 1);
}
#include <mach/imx-uart.h>
#include <mach/iomux-mx53.h>
-#define SMD_FEC_PHY_RST IMX_GPIO_NR(7, 6)
+#define MX53_EVK_FEC_PHY_RST IMX_GPIO_NR(7, 6)
#define EVK_ECSPI1_CS0 IMX_GPIO_NR(2, 30)
#define EVK_ECSPI1_CS1 IMX_GPIO_NR(3, 19)
int ret;
/* reset FEC PHY */
- ret = gpio_request(SMD_FEC_PHY_RST, "fec-phy-reset");
+ ret = gpio_request_one(MX53_EVK_FEC_PHY_RST, GPIOF_OUT_INIT_LOW,
+ "fec-phy-reset");
if (ret) {
printk(KERN_ERR"failed to get GPIO_FEC_PHY_RESET: %d\n", ret);
return;
}
- gpio_direction_output(SMD_FEC_PHY_RST, 0);
- gpio_set_value(SMD_FEC_PHY_RST, 0);
msleep(1);
- gpio_set_value(SMD_FEC_PHY_RST, 1);
+ gpio_set_value(MX53_EVK_FEC_PHY_RST, 1);
}
static struct fec_platform_data mx53_evk_fec_pdata = {
#include "crm_regs.h"
#include "devices-imx53.h"
+#define MX53_LOCO_POWER IMX_GPIO_NR(1, 8)
+#define MX53_LOCO_UI1 IMX_GPIO_NR(2, 14)
+#define MX53_LOCO_UI2 IMX_GPIO_NR(2, 15)
#define LOCO_FEC_PHY_RST IMX_GPIO_NR(7, 6)
static iomux_v3_cfg_t mx53_loco_pads[] = {
MX53_PAD_GPIO_8__GPIO1_8,
};
+#define GPIO_BUTTON(gpio_num, ev_code, act_low, descr, wake) \
+{ \
+ .gpio = gpio_num, \
+ .type = EV_KEY, \
+ .code = ev_code, \
+ .active_low = act_low, \
+ .desc = "btn " descr, \
+ .wakeup = wake, \
+}
+
+static const struct gpio_keys_button loco_buttons[] __initconst = {
+ GPIO_BUTTON(MX53_LOCO_POWER, KEY_POWER, 1, "power", 0),
+ GPIO_BUTTON(MX53_LOCO_UI1, KEY_VOLUMEUP, 1, "volume-up", 0),
+ GPIO_BUTTON(MX53_LOCO_UI2, KEY_VOLUMEDOWN, 1, "volume-down", 0),
+};
+
+static const struct gpio_keys_platform_data loco_button_data __initconst = {
+ .buttons = loco_buttons,
+ .nbuttons = ARRAY_SIZE(loco_buttons),
+};
+
static inline void mx53_loco_fec_reset(void)
{
int ret;
imx53_add_imx_i2c(1, &mx53_loco_i2c_data);
imx53_add_sdhci_esdhc_imx(0, NULL);
imx53_add_sdhci_esdhc_imx(2, NULL);
+ imx_add_gpio_keys(&loco_button_data);
}
static void __init mx53_loco_timer_init(void)
.disable = _clk_ccgr_disable_inwait,
};
+static struct clk gpc_dvfs_clk = {
+ .enable_reg = MXC_CCM_CCGR5,
+ .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
+ .enable = _clk_ccgr_enable,
+ .disable = _clk_ccgr_disable,
+};
+
static struct clk gpt_32k_clk = {
.id = 0,
.parent = &ckil_clk,
_REGISTER_CLOCK("imx-ipuv3", NULL, ipu_clk)
_REGISTER_CLOCK("imx-ipuv3", "di0", ipu_di0_clk)
_REGISTER_CLOCK("imx-ipuv3", "di1", ipu_di1_clk)
+ _REGISTER_CLOCK(NULL, "gpc_dvfs", gpc_dvfs_clk)
};
static struct clk_lookup mx53_lookups[] = {
clk_enable(&iim_clk);
mx51_revision();
clk_disable(&iim_clk);
+ mx51_display_revision();
/* move usb_phy_clk to 24MHz */
clk_set_parent(&usb_phy1_clk, &osc_clk);
static int cpu_silicon_rev = -1;
#define IIM_SREV 0x24
+#define MX50_HW_ADADIG_DIGPROG 0xB0
static int get_mx51_srev(void)
{
}
EXPORT_SYMBOL(mx51_revision);
+void mx51_display_revision(void)
+{
+ int rev;
+ char *srev;
+ rev = mx51_revision();
+
+ switch (rev) {
+ case IMX_CHIP_REVISION_2_0:
+ srev = IMX_CHIP_REVISION_2_0_STRING;
+ break;
+ case IMX_CHIP_REVISION_3_0:
+ srev = IMX_CHIP_REVISION_3_0_STRING;
+ break;
+ default:
+ srev = IMX_CHIP_REVISION_UNKNOWN_STRING;
+ }
+ printk(KERN_INFO "CPU identified as i.MX51, silicon rev %s\n", srev);
+}
+EXPORT_SYMBOL(mx51_display_revision);
+
#ifdef CONFIG_NEON
/*
}
EXPORT_SYMBOL(mx53_revision);
+static int get_mx50_srev(void)
+{
+ void __iomem *anatop = ioremap(MX50_ANATOP_BASE_ADDR, SZ_8K);
+ u32 rev;
+
+ if (!anatop) {
+ cpu_silicon_rev = -EINVAL;
+ return 0;
+ }
+
+ rev = readl(anatop + MX50_HW_ADADIG_DIGPROG);
+ rev &= 0xff;
+
+ iounmap(anatop);
+ if (rev == 0x0)
+ return IMX_CHIP_REVISION_1_0;
+ else if (rev == 0x1)
+ return IMX_CHIP_REVISION_1_1;
+ return 0;
+}
+
+/*
+ * Returns:
+ * the silicon revision of the cpu
+ * -EINVAL - not a mx50
+ */
+int mx50_revision(void)
+{
+ if (!cpu_is_mx50())
+ return -EINVAL;
+
+ if (cpu_silicon_rev == -1)
+ cpu_silicon_rev = get_mx50_srev();
+
+ return cpu_silicon_rev;
+}
+EXPORT_SYMBOL(mx50_revision);
+
static int __init post_cpu_init(void)
{
unsigned int reg;
gpio_request(MBIMX51_TSC2007_GPIO, "tsc2007_irq");
gpio_direction_input(MBIMX51_TSC2007_GPIO);
- set_irq_type(MBIMX51_TSC2007_IRQ, IRQF_TRIGGER_FALLING);
+ irq_set_irq_type(MBIMX51_TSC2007_IRQ, IRQF_TRIGGER_FALLING);
i2c_register_board_info(1, mbimx51_i2c_devices,
ARRAY_SIZE(mbimx51_i2c_devices));
MX51_PAD_SD1_DATA1__SD1_DATA1,
MX51_PAD_SD1_DATA2__SD1_DATA2,
MX51_PAD_SD1_DATA3__SD1_DATA3,
+ /* SD1 CD */
+ _MX51_PAD_GPIO1_0__SD1_CD | MUX_PAD_CTRL(PAD_CTL_PUS_22K_UP |
+ PAD_CTL_PKE | PAD_CTL_SRE_FAST |
+ PAD_CTL_DSE_HIGH | PAD_CTL_PUE | PAD_CTL_HYS),
};
#define GPIO_LED1 IMX_GPIO_NR(3, 30)
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
-#include <asm/mach-types.h>
#include "devices-imx51.h"
#include "devices.h"
--- /dev/null
+/*
+ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <mach/hardware.h>
+#include "crm_regs.h"
+
+/* set cpu low power mode before WFI instruction. This function is called
+ * mx5 because it can be used for mx50, mx51, and mx53.*/
+void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode)
+{
+ u32 plat_lpc, arm_srpgcr, ccm_clpcr;
+ u32 empgc0, empgc1;
+ int stop_mode = 0;
+
+ /* always allow platform to issue a deep sleep mode request */
+ plat_lpc = __raw_readl(MXC_CORTEXA8_PLAT_LPC) &
+ ~(MXC_CORTEXA8_PLAT_LPC_DSM);
+ ccm_clpcr = __raw_readl(MXC_CCM_CLPCR) & ~(MXC_CCM_CLPCR_LPM_MASK);
+ arm_srpgcr = __raw_readl(MXC_SRPG_ARM_SRPGCR) & ~(MXC_SRPGCR_PCR);
+ empgc0 = __raw_readl(MXC_SRPG_EMPGC0_SRPGCR) & ~(MXC_SRPGCR_PCR);
+ empgc1 = __raw_readl(MXC_SRPG_EMPGC1_SRPGCR) & ~(MXC_SRPGCR_PCR);
+
+ switch (mode) {
+ case WAIT_CLOCKED:
+ break;
+ case WAIT_UNCLOCKED:
+ ccm_clpcr |= 0x1 << MXC_CCM_CLPCR_LPM_OFFSET;
+ break;
+ case WAIT_UNCLOCKED_POWER_OFF:
+ case STOP_POWER_OFF:
+ plat_lpc |= MXC_CORTEXA8_PLAT_LPC_DSM
+ | MXC_CORTEXA8_PLAT_LPC_DBG_DSM;
+ if (mode == WAIT_UNCLOCKED_POWER_OFF) {
+ ccm_clpcr |= 0x1 << MXC_CCM_CLPCR_LPM_OFFSET;
+ ccm_clpcr &= ~MXC_CCM_CLPCR_VSTBY;
+ ccm_clpcr &= ~MXC_CCM_CLPCR_SBYOS;
+ stop_mode = 0;
+ } else {
+ ccm_clpcr |= 0x2 << MXC_CCM_CLPCR_LPM_OFFSET;
+ ccm_clpcr |= 0x3 << MXC_CCM_CLPCR_STBY_COUNT_OFFSET;
+ ccm_clpcr |= MXC_CCM_CLPCR_VSTBY;
+ ccm_clpcr |= MXC_CCM_CLPCR_SBYOS;
+ stop_mode = 1;
+ }
+ arm_srpgcr |= MXC_SRPGCR_PCR;
+
+ if (tzic_enable_wake(1) != 0)
+ return;
+ break;
+ case STOP_POWER_ON:
+ ccm_clpcr |= 0x2 << MXC_CCM_CLPCR_LPM_OFFSET;
+ break;
+ default:
+ printk(KERN_WARNING "UNKNOWN cpu power mode: %d\n", mode);
+ return;
+ }
+
+ __raw_writel(plat_lpc, MXC_CORTEXA8_PLAT_LPC);
+ __raw_writel(ccm_clpcr, MXC_CCM_CLPCR);
+ __raw_writel(arm_srpgcr, MXC_SRPG_ARM_SRPGCR);
+
+ /* Enable NEON SRPG for all but MX50TO1.0. */
+ if (mx50_revision() != IMX_CHIP_REVISION_1_0)
+ __raw_writel(arm_srpgcr, MXC_SRPG_NEON_SRPGCR);
+
+ if (stop_mode) {
+ empgc0 |= MXC_SRPGCR_PCR;
+ empgc1 |= MXC_SRPGCR_PCR;
+
+ __raw_writel(empgc0, MXC_SRPG_EMPGC0_SRPGCR);
+ __raw_writel(empgc1, MXC_SRPG_EMPGC1_SRPGCR);
+ }
+}
select SOC_IMX23
select MXS_HAVE_AMBA_DUART
select MXS_HAVE_PLATFORM_AUART
+ select MXS_HAVE_PLATFORM_MXS_MMC
select MXS_HAVE_PLATFORM_MXSFB
default y
help
select MXS_HAVE_PLATFORM_AUART
select MXS_HAVE_PLATFORM_FEC
select MXS_HAVE_PLATFORM_FLEXCAN
+ select MXS_HAVE_PLATFORM_MXS_MMC
select MXS_HAVE_PLATFORM_MXSFB
select MXS_OCOTP
default y
__raw_writel(BM_CLKCTRL_CPU_INTERRUPT_WAIT,
CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_SET);
+ /*
+ * 480 MHz seems too high to be ssp clock source directly,
+ * so set frac to get a 288 MHz ref_io.
+ */
+ reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
+ reg &= ~BM_CLKCTRL_FRAC_IOFRAC;
+ reg |= 30 << BP_CLKCTRL_FRAC_IOFRAC;
+ __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
+
return 0;
}
{
clk_misc_init();
+ /*
+ * source ssp clock from ref_io than ref_xtal,
+ * as ref_xtal only provides 24 MHz as maximum.
+ */
+ clk_set_parent(&ssp_clk, &ref_io_clk);
+
clk_enable(&cpu_clk);
clk_enable(&hbus_clk);
clk_enable(&xbus_clk);
_REGISTER_CLOCK("pll2", NULL, pll2_clk)
_REGISTER_CLOCK("mxs-dma-apbh", NULL, hbus_clk)
_REGISTER_CLOCK("mxs-dma-apbx", NULL, xbus_clk)
+ _REGISTER_CLOCK("mxs-mmc.0", NULL, ssp0_clk)
+ _REGISTER_CLOCK("mxs-mmc.1", NULL, ssp1_clk)
_REGISTER_CLOCK("flexcan.0", NULL, can0_clk)
_REGISTER_CLOCK("flexcan.1", NULL, can1_clk)
_REGISTER_CLOCK(NULL, "usb0", usb0_clk)
reg |= BM_CLKCTRL_ENET_CLK_OUT_EN;
__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_ENET);
+ /*
+ * 480 MHz seems too high to be ssp clock source directly,
+ * so set frac0 to get a 288 MHz ref_io0.
+ */
+ reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC0);
+ reg &= ~BM_CLKCTRL_FRAC0_IO0FRAC;
+ reg |= 30 << BP_CLKCTRL_FRAC0_IO0FRAC;
+ __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC0);
+
return 0;
}
{
clk_misc_init();
+ /*
+ * source ssp clock from ref_io0 than ref_xtal,
+ * as ref_xtal only provides 24 MHz as maximum.
+ */
+ clk_set_parent(&ssp0_clk, &ref_io0_clk);
+ clk_set_parent(&ssp1_clk, &ref_io0_clk);
+
clk_enable(&cpu_clk);
clk_enable(&hbus_clk);
clk_enable(&xbus_clk);
#define mx23_add_auart0() mx23_add_auart(0)
#define mx23_add_auart1() mx23_add_auart(1)
+extern const struct mxs_mxs_mmc_data mx23_mxs_mmc_data[] __initconst;
+#define mx23_add_mxs_mmc(id, pdata) \
+ mxs_add_mxs_mmc(&mx23_mxs_mmc_data[id], pdata)
+
#define mx23_add_mxs_pwm(id) mxs_add_mxs_pwm(MX23_PWM_BASE_ADDR, id)
struct platform_device *__init mx23_add_mxsfb(
extern const struct mxs_i2c_data mx28_mxs_i2c_data[] __initconst;
#define mx28_add_mxs_i2c(id) mxs_add_mxs_i2c(&mx28_mxs_i2c_data[id])
+extern const struct mxs_mxs_mmc_data mx28_mxs_mmc_data[] __initconst;
+#define mx28_add_mxs_mmc(id, pdata) \
+ mxs_add_mxs_mmc(&mx28_mxs_mmc_data[id], pdata)
+
#define mx28_add_mxs_pwm(id) mxs_add_mxs_pwm(MX28_PWM_BASE_ADDR, id)
struct platform_device *__init mx28_add_mxsfb(
config MXS_HAVE_PLATFORM_MXS_I2C
bool
+config MXS_HAVE_PLATFORM_MXS_MMC
+ bool
+
config MXS_HAVE_PLATFORM_MXS_PWM
bool
obj-$(CONFIG_MXS_HAVE_PLATFORM_FEC) += platform-fec.o
obj-$(CONFIG_MXS_HAVE_PLATFORM_FLEXCAN) += platform-flexcan.o
obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_I2C) += platform-mxs-i2c.o
+obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_MMC) += platform-mxs-mmc.o
obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_PWM) += platform-mxs-pwm.o
obj-$(CONFIG_MXS_HAVE_PLATFORM_MXSFB) += platform-mxsfb.o
--- /dev/null
+/*
+ * Copyright (C) 2010 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/compiler.h>
+#include <linux/err.h>
+#include <linux/init.h>
+
+#include <mach/mx23.h>
+#include <mach/mx28.h>
+#include <mach/devices-common.h>
+
+#define mxs_mxs_mmc_data_entry_single(soc, _id, hwid) \
+ { \
+ .id = _id, \
+ .iobase = soc ## _SSP ## hwid ## _BASE_ADDR, \
+ .dma = soc ## _DMA_SSP ## hwid, \
+ .irq_err = soc ## _INT_SSP ## hwid ## _ERROR, \
+ .irq_dma = soc ## _INT_SSP ## hwid ## _DMA, \
+ }
+
+#define mxs_mxs_mmc_data_entry(soc, _id, hwid) \
+ [_id] = mxs_mxs_mmc_data_entry_single(soc, _id, hwid)
+
+
+#ifdef CONFIG_SOC_IMX23
+const struct mxs_mxs_mmc_data mx23_mxs_mmc_data[] __initconst = {
+ mxs_mxs_mmc_data_entry(MX23, 0, 1),
+ mxs_mxs_mmc_data_entry(MX23, 1, 2),
+};
+#endif
+
+#ifdef CONFIG_SOC_IMX28
+const struct mxs_mxs_mmc_data mx28_mxs_mmc_data[] __initconst = {
+ mxs_mxs_mmc_data_entry(MX28, 0, 0),
+ mxs_mxs_mmc_data_entry(MX28, 1, 1),
+};
+#endif
+
+struct platform_device *__init mxs_add_mxs_mmc(
+ const struct mxs_mxs_mmc_data *data,
+ const struct mxs_mmc_platform_data *pdata)
+{
+ struct resource res[] = {
+ {
+ .start = data->iobase,
+ .end = data->iobase + SZ_8K - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = data->dma,
+ .end = data->dma,
+ .flags = IORESOURCE_DMA,
+ }, {
+ .start = data->irq_err,
+ .end = data->irq_err,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .start = data->irq_dma,
+ .end = data->irq_dma,
+ .flags = IORESOURCE_IRQ,
+ },
+ };
+
+ return mxs_add_platform_device("mxs-mmc", data->id,
+ res, ARRAY_SIZE(res), pdata, sizeof(*pdata));
+}
static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
{
u32 irq_stat;
- struct mxs_gpio_port *port = (struct mxs_gpio_port *)get_irq_data(irq);
+ struct mxs_gpio_port *port = (struct mxs_gpio_port *)irq_get_handler_data(irq);
u32 gpio_irq_no_base = port->virtual_irq_start;
desc->irq_data.chip->irq_ack(&desc->irq_data);
for (j = port[i].virtual_irq_start;
j < port[i].virtual_irq_start + 32; j++) {
- set_irq_chip(j, &gpio_irq_chip);
- set_irq_handler(j, handle_level_irq);
+ irq_set_chip_and_handler(j, &gpio_irq_chip,
+ handle_level_irq);
set_irq_flags(j, IRQF_VALID);
}
/* setup one handler for each entry */
- set_irq_chained_handler(port[i].irq, mxs_gpio_irq_handler);
- set_irq_data(port[i].irq, &port[i]);
+ irq_set_chained_handler(port[i].irq, mxs_gpio_irq_handler);
+ irq_set_handler_data(port[i].irq, &port[i]);
/* register gpio chip */
port[i].chip.direction_input = mxs_gpio_direction_input;
mxs_reset_block(icoll_base + HW_ICOLL_CTRL);
for (i = 0; i < MXS_INTERNAL_IRQS; i++) {
- set_irq_chip(i, &mxs_icoll_chip);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip_and_handler(i, &mxs_icoll_chip, handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
}
};
struct platform_device * __init mxs_add_mxs_i2c(const struct mxs_i2c_data *data);
+/* mmc */
+#include <mach/mmc.h>
+struct mxs_mxs_mmc_data {
+ int id;
+ resource_size_t iobase;
+ resource_size_t dma;
+ resource_size_t irq_err;
+ resource_size_t irq_dma;
+};
+struct platform_device *__init mxs_add_mxs_mmc(
+ const struct mxs_mxs_mmc_data *data,
+ const struct mxs_mmc_platform_data *pdata);
+
/* pwm */
struct platform_device *__init mxs_add_mxs_pwm(
resource_size_t iobase, int id);
#define MX23EVK_LCD_ENABLE MXS_GPIO_NR(1, 18)
#define MX23EVK_BL_ENABLE MXS_GPIO_NR(1, 28)
+#define MX23EVK_MMC0_WRITE_PROTECT MXS_GPIO_NR(1, 30)
+#define MX23EVK_MMC0_SLOT_POWER MXS_GPIO_NR(1, 29)
static const iomux_cfg_t mx23evk_pads[] __initconst = {
/* duart */
MX23_PAD_LCD_RESET__GPIO_1_18 | MXS_PAD_CTRL,
/* backlight control */
MX23_PAD_PWM2__GPIO_1_28 | MXS_PAD_CTRL,
+
+ /* mmc */
+ MX23_PAD_SSP1_DATA0__SSP1_DATA0 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX23_PAD_SSP1_DATA1__SSP1_DATA1 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX23_PAD_SSP1_DATA2__SSP1_DATA2 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX23_PAD_SSP1_DATA3__SSP1_DATA3 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX23_PAD_GPMI_D08__SSP1_DATA4 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX23_PAD_GPMI_D09__SSP1_DATA5 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX23_PAD_GPMI_D10__SSP1_DATA6 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX23_PAD_GPMI_D11__SSP1_DATA7 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX23_PAD_SSP1_CMD__SSP1_CMD |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX23_PAD_SSP1_DETECT__SSP1_DETECT |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ MX23_PAD_SSP1_SCK__SSP1_SCK |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ /* write protect */
+ MX23_PAD_PWM4__GPIO_1_30 |
+ (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ /* slot power enable */
+ MX23_PAD_PWM3__GPIO_1_29 |
+ (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
};
/* mxsfb (lcdif) */
.ld_intf_width = STMLCDIF_24BIT,
};
+static struct mxs_mmc_platform_data mx23evk_mmc_pdata __initdata = {
+ .wp_gpio = MX23EVK_MMC0_WRITE_PROTECT,
+ .flags = SLOTF_8_BIT_CAPABLE,
+};
+
static void __init mx23evk_init(void)
{
int ret;
mx23_add_duart();
mx23_add_auart0();
+ /* power on mmc slot by writing 0 to the gpio */
+ ret = gpio_request_one(MX23EVK_MMC0_SLOT_POWER, GPIOF_DIR_OUT,
+ "mmc0-slot-power");
+ if (ret)
+ pr_warn("failed to request gpio mmc0-slot-power: %d\n", ret);
+ mx23_add_mxs_mmc(0, &mx23evk_mmc_pdata);
+
ret = gpio_request_one(MX23EVK_LCD_ENABLE, GPIOF_DIR_OUT, "lcd-enable");
if (ret)
pr_warn("failed to request gpio lcd-enable: %d\n", ret);
#define MX28EVK_LCD_ENABLE MXS_GPIO_NR(3, 30)
#define MX28EVK_FEC_PHY_RESET MXS_GPIO_NR(4, 13)
+#define MX28EVK_MMC0_WRITE_PROTECT MXS_GPIO_NR(2, 12)
+#define MX28EVK_MMC1_WRITE_PROTECT MXS_GPIO_NR(0, 28)
+#define MX28EVK_MMC0_SLOT_POWER MXS_GPIO_NR(3, 28)
+#define MX28EVK_MMC1_SLOT_POWER MXS_GPIO_NR(3, 29)
+
static const iomux_cfg_t mx28evk_pads[] __initconst = {
/* duart */
MX28_PAD_PWM0__DUART_RX | MXS_PAD_CTRL,
MX28_PAD_LCD_RESET__GPIO_3_30 | MXS_PAD_CTRL,
/* backlight control */
MX28_PAD_PWM2__GPIO_3_18 | MXS_PAD_CTRL,
+ /* mmc0 */
+ MX28_PAD_SSP0_DATA0__SSP0_D0 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_SSP0_DATA1__SSP0_D1 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_SSP0_DATA2__SSP0_D2 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_SSP0_DATA3__SSP0_D3 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_SSP0_DATA4__SSP0_D4 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_SSP0_DATA5__SSP0_D5 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_SSP0_DATA6__SSP0_D6 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_SSP0_DATA7__SSP0_D7 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_SSP0_CMD__SSP0_CMD |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ MX28_PAD_SSP0_SCK__SSP0_SCK |
+ (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ /* write protect */
+ MX28_PAD_SSP1_SCK__GPIO_2_12 |
+ (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ /* slot power enable */
+ MX28_PAD_PWM3__GPIO_3_28 |
+ (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+
+ /* mmc1 */
+ MX28_PAD_GPMI_D00__SSP1_D0 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_GPMI_D01__SSP1_D1 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_GPMI_D02__SSP1_D2 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_GPMI_D03__SSP1_D3 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_GPMI_D04__SSP1_D4 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_GPMI_D05__SSP1_D5 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_GPMI_D06__SSP1_D6 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_GPMI_D07__SSP1_D7 |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_GPMI_RDY1__SSP1_CMD |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP),
+ MX28_PAD_GPMI_RDY0__SSP1_CARD_DETECT |
+ (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ MX28_PAD_GPMI_WRN__SSP1_SCK |
+ (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ /* write protect */
+ MX28_PAD_GPMI_RESETN__GPIO_0_28 |
+ (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
+ /* slot power enable */
+ MX28_PAD_PWM4__GPIO_3_29 |
+ (MXS_PAD_4MA | MXS_PAD_3V3 | MXS_PAD_NOPULL),
};
/* fec */
.ld_intf_width = STMLCDIF_24BIT,
};
+static struct mxs_mmc_platform_data mx28evk_mmc_pdata[] __initdata = {
+ {
+ /* mmc0 */
+ .wp_gpio = MX28EVK_MMC0_WRITE_PROTECT,
+ .flags = SLOTF_8_BIT_CAPABLE,
+ }, {
+ /* mmc1 */
+ .wp_gpio = MX28EVK_MMC1_WRITE_PROTECT,
+ .flags = SLOTF_8_BIT_CAPABLE,
+ },
+};
+
static void __init mx28evk_init(void)
{
int ret;
gpio_set_value(MX28EVK_BL_ENABLE, 1);
mx28_add_mxsfb(&mx28evk_mxsfb_pdata);
+
+ /* power on mmc slot by writing 0 to the gpio */
+ ret = gpio_request_one(MX28EVK_MMC0_SLOT_POWER, GPIOF_DIR_OUT,
+ "mmc0-slot-power");
+ if (ret)
+ pr_warn("failed to request gpio mmc0-slot-power: %d\n", ret);
+ mx28_add_mxs_mmc(0, &mx28evk_mmc_pdata[0]);
+
+ ret = gpio_request_one(MX28EVK_MMC1_SLOT_POWER, GPIOF_DIR_OUT,
+ "mmc1-slot-power");
+ if (ret)
+ pr_warn("failed to request gpio mmc1-slot-power: %d\n", ret);
+ mx28_add_mxs_mmc(1, &mx28evk_mmc_pdata[1]);
}
static void __init mx28evk_timer_init(void)
};
#define FEC_MODE (MXS_PAD_8MA | MXS_PAD_PULLUP | MXS_PAD_3V3)
-static const iomux_cfg_t tx28_fec_pads[] __initconst = {
+static const iomux_cfg_t tx28_fec0_pads[] __initconst = {
MX28_PAD_ENET0_MDC__ENET0_MDC | FEC_MODE,
MX28_PAD_ENET0_MDIO__ENET0_MDIO | FEC_MODE,
MX28_PAD_ENET0_RX_EN__ENET0_RX_EN | FEC_MODE,
MX28_PAD_ENET_CLK__CLKCTRL_ENET | FEC_MODE,
};
-static const struct fec_platform_data tx28_fec_data __initconst = {
+static const iomux_cfg_t tx28_fec1_pads[] __initconst = {
+ MX28_PAD_ENET0_RXD2__ENET1_RXD0,
+ MX28_PAD_ENET0_RXD3__ENET1_RXD1,
+ MX28_PAD_ENET0_TXD2__ENET1_TXD0,
+ MX28_PAD_ENET0_TXD3__ENET1_TXD1,
+ MX28_PAD_ENET0_COL__ENET1_TX_EN,
+ MX28_PAD_ENET0_CRS__ENET1_RX_EN,
+};
+
+static struct fec_platform_data tx28_fec0_data = {
+ .phy = PHY_INTERFACE_MODE_RMII,
+};
+
+static struct fec_platform_data tx28_fec1_data = {
.phy = PHY_INTERFACE_MODE_RMII,
};
pr_debug("%s: Deasserting FEC PHY RESET\n", __func__);
gpio_set_value(TX28_FEC_PHY_RESET, 1);
- ret = mxs_iomux_setup_multiple_pads(tx28_fec_pads,
- ARRAY_SIZE(tx28_fec_pads));
+ ret = mxs_iomux_setup_multiple_pads(tx28_fec0_pads,
+ ARRAY_SIZE(tx28_fec0_pads));
if (ret) {
pr_debug("%s: mxs_iomux_setup_multiple_pads() failed with rc: %d\n",
__func__, ret);
goto free_gpios;
}
- pr_debug("%s: Registering FEC device\n", __func__);
- mx28_add_fec(0, &tx28_fec_data);
+ pr_debug("%s: Registering FEC0 device\n", __func__);
+ mx28_add_fec(0, &tx28_fec0_data);
return 0;
free_gpios:
return ret;
}
+
+int __init tx28_add_fec1(void)
+{
+ int ret;
+
+ ret = mxs_iomux_setup_multiple_pads(tx28_fec1_pads,
+ ARRAY_SIZE(tx28_fec1_pads));
+ if (ret) {
+ pr_debug("%s: mxs_iomux_setup_multiple_pads() failed with rc: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ pr_debug("%s: Registering FEC1 device\n", __func__);
+ mx28_add_fec(1, &tx28_fec1_data);
+ return 0;
+}
* Free Software Foundation.
*/
int __init tx28_add_fec0(void);
+int __init tx28_add_fec1(void);
vic_init(__io(io_p2v(NETX_PA_VIC)), 0, ~0, 0);
for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) {
- set_irq_chip(irq, &netx_hif_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &netx_hif_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
writel(NETX_DPMAS_INT_EN_GLB_EN, NETX_DPMAS_INT_EN);
- set_irq_chained_handler(NETX_IRQ_HIF, netx_hif_demux_handler);
+ irq_set_chained_handler(NETX_IRQ_HIF, netx_hif_demux_handler);
}
static int __init netx_init(void)
__func__);
for (i = FPGA_IRQ(0); i <= FPGA_IRQ(7); ++i) {
- set_irq_chip(i, &a9m9750dev_fpga_chip);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip_and_handler(i, &a9m9750dev_fpga_chip,
+ handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
REGSET(eic, SYS_EIC, LVEDG, LEVEL);
__raw_writel(eic, SYS_EIC(2));
- set_irq_chained_handler(IRQ_NS9XXX_EXT2,
- a9m9750dev_fpga_demux_handler);
+ irq_set_chained_handler(IRQ_NS9XXX_EXT2,
+ a9m9750dev_fpga_demux_handler);
}
void __init board_a9m9750dev_init_machine(void)
#include <asm/mach-types.h>
#define board_is_a9m9750dev() (0 \
- || machine_is_cc9p9360dev() \
|| machine_is_cc9p9750dev() \
)
#define board_is_a9mvali() (0 \
- || machine_is_cc9p9360val() \
|| machine_is_cc9p9750val() \
)
)
#define module_is_cc9c() (0 \
- || machine_is_cc9c() \
)
#define module_is_cc9p9210() (0 \
)
#define module_is_cc9p9360() (0 \
- || machine_is_a9m9360() \
|| machine_is_cc9p9360dev() \
|| machine_is_cc9p9360js() \
- || machine_is_cc9p9360val() \
)
#define module_is_cc9p9750() (0 \
|| machine_is_a9m9750() \
- || machine_is_cc9p9750dev() \
|| machine_is_cc9p9750js() \
|| machine_is_cc9p9750val() \
)
#define module_is_ccw9c() (0 \
- || machine_is_ccw9c() \
)
#define module_is_inc20otter() (0 \
__raw_writel(prio2irq(i), SYS_IVA(i));
for (i = 0; i <= 31; ++i) {
- set_irq_chip(i, &ns9xxx_chip);
- set_irq_handler(i, handle_fasteoi_irq);
+ irq_set_chip_and_handler(i, &ns9xxx_chip, handle_fasteoi_irq);
set_irq_flags(i, IRQF_VALID);
irq_set_status_flags(i, IRQ_LEVEL);
}
__raw_writel(0xFFFFFFFE, REG_AIC_MDCR);
for (irqno = IRQ_WDT; irqno <= NR_IRQS; irqno++) {
- set_irq_chip(irqno, &nuc93x_irq_chip);
- set_irq_handler(irqno, handle_level_irq);
+ irq_set_chip_and_handler(irqno, &nuc93x_irq_chip,
+ handle_level_irq);
set_irq_flags(irqno, IRQF_VALID);
}
}
return;
}
/* the CF I/O IRQ is really active-low */
- set_irq_type(gpio_to_irq(62), IRQ_TYPE_EDGE_FALLING);
+ irq_set_irq_type(gpio_to_irq(62), IRQ_TYPE_EDGE_FALLING);
}
static void __init osk_init_irq(void)
omap_cfg_reg(P20_1610_GPIO4); /* PENIRQ */
gpio_request(4, "ts_int");
gpio_direction_input(4);
- set_irq_type(gpio_to_irq(4), IRQ_TYPE_EDGE_FALLING);
+ irq_set_irq_type(gpio_to_irq(4), IRQ_TYPE_EDGE_FALLING);
spi_register_board_info(mistral_boardinfo,
ARRAY_SIZE(mistral_boardinfo));
int irq = gpio_to_irq(OMAP_MPUIO(2));
gpio_direction_input(OMAP_MPUIO(2));
- set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
#ifdef CONFIG_PM
/* share the IRQ in case someone wants to use the
* button for more than wakeup from system sleep.
{
if (gpio_get_value(PALMZ71_USBDETECT_GPIO)) {
printk(KERN_INFO "PM: Power cable connected\n");
- set_irq_type(gpio_to_irq(PALMZ71_USBDETECT_GPIO),
- IRQ_TYPE_EDGE_FALLING);
+ irq_set_irq_type(gpio_to_irq(PALMZ71_USBDETECT_GPIO),
+ IRQ_TYPE_EDGE_FALLING);
} else {
printk(KERN_INFO "PM: Power cable disconnected\n");
- set_irq_type(gpio_to_irq(PALMZ71_USBDETECT_GPIO),
- IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(gpio_to_irq(PALMZ71_USBDETECT_GPIO),
+ IRQ_TYPE_EDGE_RISING);
}
return IRQ_HANDLED;
}
gpio_request(13, "16C554 irq");
gpio_request(14, "16C554 irq");
gpio_request(15, "16C554 irq");
- set_irq_type(gpio_to_irq(12), IRQ_TYPE_EDGE_RISING);
- set_irq_type(gpio_to_irq(13), IRQ_TYPE_EDGE_RISING);
- set_irq_type(gpio_to_irq(14), IRQ_TYPE_EDGE_RISING);
- set_irq_type(gpio_to_irq(15), IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(gpio_to_irq(12), IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(gpio_to_irq(13), IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(gpio_to_irq(14), IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(gpio_to_irq(15), IRQ_TYPE_EDGE_RISING);
platform_add_devices(voiceblue_devices, ARRAY_SIZE(voiceblue_devices));
omap_board_config = voiceblue_config;
* The touchscreen interrupt is level-sensitive, so
* we'll use the regular mask_ack routine for it.
*/
- set_irq_chip(i, &omap_fpga_irq_ack);
+ irq_set_chip(i, &omap_fpga_irq_ack);
}
else {
/*
* All FPGA interrupts except the touchscreen are
* edge-sensitive, so we won't mask them.
*/
- set_irq_chip(i, &omap_fpga_irq);
+ irq_set_chip(i, &omap_fpga_irq);
}
- set_irq_handler(i, handle_edge_irq);
+ irq_set_handler(i, handle_edge_irq);
set_irq_flags(i, IRQF_VALID);
}
return;
}
gpio_direction_input(13);
- set_irq_type(gpio_to_irq(13), IRQ_TYPE_EDGE_RISING);
- set_irq_chained_handler(OMAP1510_INT_FPGA, innovator_fpga_IRQ_demux);
+ irq_set_irq_type(gpio_to_irq(13), IRQ_TYPE_EDGE_RISING);
+ irq_set_chained_handler(OMAP1510_INT_FPGA, innovator_fpga_IRQ_demux);
}
irq_trigger = irq_banks[i].trigger_map >> IRQ_BIT(j);
omap_irq_set_cfg(j, 0, 0, irq_trigger);
- set_irq_chip(j, &omap_irq_chip);
- set_irq_handler(j, handle_level_irq);
+ irq_set_chip_and_handler(j, &omap_irq_chip,
+ handle_level_irq);
set_irq_flags(j, IRQF_VALID);
}
}
/* initalize the irq_chained */
irq = OMAP_GPMC_IRQ_BASE;
for (cs = 0; cs < GPMC_CS_NUM; cs++) {
- set_irq_chip_and_handler(irq, &dummy_irq_chip,
+ irq_set_chip_and_handler(irq, &dummy_irq_chip,
handle_simple_irq);
set_irq_flags(irq, IRQF_VALID);
irq++;
nr_of_irqs, nr_banks, nr_banks > 1 ? "s" : "");
for (i = 0; i < nr_of_irqs; i++) {
- set_irq_chip(i, &omap_irq_chip);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip_and_handler(i, &omap_irq_chip, handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
}
pin = DB88F5281_PCI_SLOT0_IRQ_PIN;
if (gpio_request(pin, "PCI Int1") == 0) {
if (gpio_direction_input(pin) == 0) {
- set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
} else {
printk(KERN_ERR "db88f5281_pci_preinit faield to "
"set_irq_type pin %d\n", pin);
pin = DB88F5281_PCI_SLOT1_SLOT2_IRQ_PIN;
if (gpio_request(pin, "PCI Int2") == 0) {
if (gpio_direction_input(pin) == 0) {
- set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
} else {
printk(KERN_ERR "db88f5281_pci_preinit faield "
"to set_irq_type pin %d\n", pin);
* Initialize gpiolib for GPIOs 0-31.
*/
orion_gpio_init(0, 32, GPIO_VIRT_BASE, 0, IRQ_ORION5X_GPIO_START);
- set_irq_chained_handler(IRQ_ORION5X_GPIO_0_7, gpio_irq_handler);
- set_irq_chained_handler(IRQ_ORION5X_GPIO_8_15, gpio_irq_handler);
- set_irq_chained_handler(IRQ_ORION5X_GPIO_16_23, gpio_irq_handler);
- set_irq_chained_handler(IRQ_ORION5X_GPIO_24_31, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_ORION5X_GPIO_0_7, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_ORION5X_GPIO_8_15, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_ORION5X_GPIO_16_23, gpio_irq_handler);
+ irq_set_chained_handler(IRQ_ORION5X_GPIO_24_31, gpio_irq_handler);
}
pin = RD88F5182_PCI_SLOT0_IRQ_A_PIN;
if (gpio_request(pin, "PCI IntA") == 0) {
if (gpio_direction_input(pin) == 0) {
- set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
} else {
printk(KERN_ERR "rd88f5182_pci_preinit faield to "
"set_irq_type pin %d\n", pin);
pin = RD88F5182_PCI_SLOT0_IRQ_B_PIN;
if (gpio_request(pin, "PCI IntB") == 0) {
if (gpio_direction_input(pin) == 0) {
- set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
} else {
printk(KERN_ERR "rd88f5182_pci_preinit faield to "
"set_irq_type pin %d\n", pin);
pin = TSP2_PCI_SLOT0_IRQ_PIN;
if (gpio_request(pin, "PCI Int1") == 0) {
if (gpio_direction_input(pin) == 0) {
- set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
} else {
printk(KERN_ERR "tsp2_pci_preinit failed "
"to set_irq_type pin %d\n", pin);
pin = QNAP_TS209_PCI_SLOT0_IRQ_PIN;
if (gpio_request(pin, "PCI Int1") == 0) {
if (gpio_direction_input(pin) == 0) {
- set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
} else {
printk(KERN_ERR "qnap_ts209_pci_preinit failed to "
"set_irq_type pin %d\n", pin);
pin = QNAP_TS209_PCI_SLOT1_IRQ_PIN;
if (gpio_request(pin, "PCI Int2") == 0) {
if (gpio_direction_input(pin) == 0) {
- set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW);
} else {
printk(KERN_ERR "qnap_ts209_pci_preinit failed "
"to set_irq_type pin %d\n", pin);
/* enable devices if magic matches */
switch ((ts78xx_fpga.id >> 8) & 0xffffff) {
case TS7800_FPGA_MAGIC:
- printk(KERN_WARNING "TS-7800 FPGA: unrecognized revision 0x%.2x\n",
+ pr_warning("TS-7800 FPGA: unrecognized revision 0x%.2x\n",
ts78xx_fpga.id & 0xff);
ts78xx_fpga.supports.ts_rtc.present = 1;
ts78xx_fpga.supports.ts_nand.present = 1;
if (ts78xx_fpga.supports.ts_rtc.present == 1) {
tmp = ts78xx_ts_rtc_load();
if (tmp) {
- printk(KERN_INFO "TS-78xx: RTC not registered\n");
+ pr_info("TS-78xx: RTC not registered\n");
ts78xx_fpga.supports.ts_rtc.present = 0;
}
ret |= tmp;
if (ts78xx_fpga.supports.ts_nand.present == 1) {
tmp = ts78xx_ts_nand_load();
if (tmp) {
- printk(KERN_INFO "TS-78xx: NAND not registered\n");
+ pr_info("TS-78xx: NAND not registered\n");
ts78xx_fpga.supports.ts_nand.present = 0;
}
ret |= tmp;
if (ts78xx_fpga.supports.ts_rng.present == 1) {
tmp = ts78xx_ts_rng_load();
if (tmp) {
- printk(KERN_INFO "TS-78xx: RNG not registered\n");
+ pr_info("TS-78xx: RNG not registered\n");
ts78xx_fpga.supports.ts_rng.present = 0;
}
ret |= tmp;
{
ts78xx_fpga.id = readl(TS78XX_FPGA_REGS_VIRT_BASE);
- printk(KERN_INFO "TS-78xx FPGA: magic=0x%.6x, rev=0x%.2x\n",
+ pr_info("TS-78xx FPGA: magic=0x%.6x, rev=0x%.2x\n",
(ts78xx_fpga.id >> 8) & 0xffffff,
ts78xx_fpga.id & 0xff);
* UrJTAG SVN since r1381 can be used to reprogram the FPGA
*/
if (ts78xx_fpga.id != fpga_id) {
- printk(KERN_ERR "TS-78xx FPGA: magic/rev mismatch\n"
+ pr_err("TS-78xx FPGA: magic/rev mismatch\n"
"TS-78xx FPGA: was 0x%.6x/%.2x but now 0x%.6x/%.2x\n",
(ts78xx_fpga.id >> 8) & 0xffffff, ts78xx_fpga.id & 0xff,
(fpga_id >> 8) & 0xffffff, fpga_id & 0xff);
int value, ret;
if (ts78xx_fpga.state < 0) {
- printk(KERN_ERR "TS-78xx FPGA: borked, you must powercycle asap\n");
+ pr_err("TS-78xx FPGA: borked, you must powercycle asap\n");
return -EBUSY;
}
else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
value = 0;
else {
- printk(KERN_ERR "ts78xx_fpga_store: Invalid value\n");
+ pr_err("ts78xx_fpga_store: Invalid value\n");
return -EINVAL;
}
ret = ts78xx_fpga_load();
ret = sysfs_create_file(power_kobj, &ts78xx_fpga_attr.attr);
if (ret)
- printk(KERN_ERR "sysfs_create_file failed: %d\n", ret);
+ pr_err("sysfs_create_file failed: %d\n", ret);
}
MACHINE_START(TS78XX, "Technologic Systems TS-78xx SBC")
case IRQ_TYPE_EDGE_RISING:
__raw_writel(__raw_readl(INTC_ATR(d->irq)) | INTC_BIT(d->irq), INTC_ATR(d->irq)); /*edge sensitive */
__raw_writel(__raw_readl(INTC_APR(d->irq)) | INTC_BIT(d->irq), INTC_APR(d->irq)); /*rising edge */
- set_irq_handler(d->irq, handle_edge_irq);
+ irq_set_handler(d->irq, handle_edge_irq);
break;
case IRQ_TYPE_EDGE_FALLING:
__raw_writel(__raw_readl(INTC_ATR(d->irq)) | INTC_BIT(d->irq), INTC_ATR(d->irq)); /*edge sensitive */
__raw_writel(__raw_readl(INTC_APR(d->irq)) & ~INTC_BIT(d->irq), INTC_APR(d->irq)); /*falling edge */
- set_irq_handler(d->irq, handle_edge_irq);
+ irq_set_handler(d->irq, handle_edge_irq);
break;
case IRQ_TYPE_LEVEL_LOW:
__raw_writel(__raw_readl(INTC_ATR(d->irq)) & ~INTC_BIT(d->irq), INTC_ATR(d->irq)); /*level sensitive */
__raw_writel(__raw_readl(INTC_APR(d->irq)) & ~INTC_BIT(d->irq), INTC_APR(d->irq)); /*low level */
- set_irq_handler(d->irq, handle_level_irq);
+ irq_set_handler(d->irq, handle_level_irq);
break;
case IRQ_TYPE_LEVEL_HIGH:
__raw_writel(__raw_readl(INTC_ATR(d->irq)) & ~INTC_BIT(d->irq), INTC_ATR(d->irq)); /*level sensitive */
__raw_writel(__raw_readl(INTC_APR(d->irq)) | INTC_BIT(d->irq), INTC_APR(d->irq)); /* high level */
- set_irq_handler(d->irq, handle_level_irq);
+ irq_set_handler(d->irq, handle_level_irq);
break;
/* IRQ_TYPE_EDGE_BOTH is not supported */
/* configure IRQ's */
for (i = 0; i < NR_IRQS; i++) {
set_irq_flags(i, IRQF_VALID);
- set_irq_chip(i, &pnx4008_irq_chip);
+ irq_set_chip(i, &pnx4008_irq_chip);
pnx4008_set_irq_type(irq_get_irq_data(i), pnx4008_irq_type[i]);
}
return 0;
err_req_gpio:
- while (i > 0)
- gpio_free(gpios[i--]);
+ while (--i >= 0)
+ gpio_free(gpios[i]);
return err;
}
};
/* this gets called as part of our init. these steps must be done now so
- * that we can use set_pxa_fb_info */
+ * that we can use pxa_set_fb_info */
static void __init am200_presetup_fb(void)
{
int fw;
/* we divide since we told the LCD controller we're 16bpp */
am200_fb_info.modes->xres /= 2;
- set_pxa_fb_info(&am200_fb_info);
+ pxa_set_fb_info(NULL, &am200_fb_info);
}
if (err) {
dev_err(&am300_device->dev, "failed requesting "
"gpio %d, err=%d\n", i, err);
- while (i >= DB0_GPIO_PIN)
- gpio_free(i--);
- i = ARRAY_SIZE(gpios) - 1;
- goto err_req_gpio;
+ goto err_req_gpio2;
}
}
return 0;
+err_req_gpio2:
+ while (--i >= DB0_GPIO_PIN)
+ gpio_free(i);
+ i = ARRAY_SIZE(gpios);
err_req_gpio:
- while (i > 0)
- gpio_free(gpios[i--]);
+ while (--i >= 0)
+ gpio_free(gpios[i]);
return err;
}
}
balloon3_lcd_screen.pxafb_backlight_power = balloon3_backlight_power;
- set_pxa_fb_info(&balloon3_lcd_screen);
+ pxa_set_fb_info(NULL, &balloon3_lcd_screen);
return;
err2:
pxa27x_init_irq();
/* setup extra Balloon3 irqs */
for (irq = BALLOON3_IRQ(0); irq <= BALLOON3_IRQ(7); irq++) {
- set_irq_chip(irq, &balloon3_irq_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &balloon3_irq_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
- set_irq_chained_handler(BALLOON3_AUX_NIRQ, balloon3_irq_handler);
- set_irq_type(BALLOON3_AUX_NIRQ, IRQ_TYPE_EDGE_FALLING);
+ irq_set_chained_handler(BALLOON3_AUX_NIRQ, balloon3_irq_handler);
+ irq_set_irq_type(BALLOON3_AUX_NIRQ, IRQ_TYPE_EDGE_FALLING);
pr_debug("%s: chained handler installed - irq %d automatically "
"enabled\n", __func__, BALLOON3_AUX_NIRQ);
cmx2xx_it8152_irq_gpio = irq_gpio;
- set_irq_type(gpio_to_irq(irq_gpio), IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(gpio_to_irq(irq_gpio), IRQ_TYPE_EDGE_RISING);
- set_irq_chained_handler(gpio_to_irq(irq_gpio), cmx2xx_it8152_irq_demux);
+ irq_set_chained_handler(gpio_to_irq(irq_gpio),
+ cmx2xx_it8152_irq_demux);
}
#ifdef CONFIG_PM
static void __init cmx2xx_init_display(void)
{
- set_pxa_fb_info(cmx2xx_display);
+ pxa_set_fb_info(NULL, cmx2xx_display);
}
#else
static inline void cmx2xx_init_display(void) {}
static void __init cm_x300_init_lcd(void)
{
- set_pxa_fb_info(&cm_x300_lcd);
+ pxa_set_fb_info(NULL, &cm_x300_lcd);
}
#else
static inline void cm_x300_init_lcd(void) {}
{
pxa3xx_set_i2c_power_info(&cm_x300_pwr_i2c_info);
i2c_register_board_info(1, &cm_x300_pmic_info, 1);
- set_irq_wake(IRQ_WAKEUP0, 1);
+ irq_set_irq_wake(IRQ_WAKEUP0, 1);
}
static void __init cm_x300_init_wi2wi(void)
static void __init income_lcd_init(void)
{
- set_pxa_fb_info(&income_lcd_screen);
+ pxa_set_fb_info(NULL, &income_lcd_screen);
}
#else
static inline void income_lcd_init(void) {}
lcd_bl_pin = bl_pin;
gpio_request(bl_pin, "lcd backlight");
gpio_direction_output(bl_pin, 0);
- set_pxa_fb_info(&sharp_lq43_info);
+ pxa_set_fb_info(NULL, &sharp_lq43_info);
}
#endif
* USB Device Controller
*/
static struct pxa2xx_udc_mach_info udc_info __initdata = {
- .gpio_vbus = -1,
/* no connect GPIO; corgi can't tell connection status */
.gpio_pullup = CORGI_GPIO_USB_PULLUP,
};
static struct pxa2xx_udc_mach_info pxa_udc_info = {
.gpio_pullup = -1,
- .gpio_vbus = -1,
};
void __init pxa_set_udc_info(struct pxa2xx_udc_mach_info *info)
.resource = pxafb_resources,
};
-void __init set_pxa_fb_info(struct pxafb_mach_info *info)
+void __init pxa_set_fb_info(struct device *parent, struct pxafb_mach_info *info)
{
+ pxa_device_fb.dev.parent = parent;
pxa_register_device(&pxa_device_fb, info);
}
-void __init set_pxa_fb_parent(struct device *parent_dev)
-{
- pxa_device_fb.dev.parent = parent_dev;
-}
-
static struct resource pxa_resource_ffuart[] = {
{
.start = 0x40100000,
static void __init em_x270_init_lcd(void)
{
- set_pxa_fb_info(&em_x270_lcd);
+ pxa_set_fb_info(NULL, &em_x270_lcd);
}
#else
static inline void em_x270_init_lcd(void) {}
#include <linux/mfd/t7l66xb.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/usb/gpio_vbus.h>
#include <video/w100fb.h>
mi->bank[0].size = (64*1024*1024);
}
-struct pxa2xx_udc_mach_info e7xx_udc_mach_info = {
+struct gpio_vbus_mach_info e7xx_udc_info = {
.gpio_vbus = GPIO_E7XX_USB_DISC,
.gpio_pullup = GPIO_E7XX_USB_PULLUP,
.gpio_pullup_inverted = 1
};
+static struct platform_device e7xx_gpio_vbus = {
+ .name = "gpio-vbus",
+ .id = -1,
+ .dev = {
+ .platform_data = &e7xx_udc_info,
+ },
+};
+
struct pxaficp_platform_data e7xx_ficp_platform_data = {
.gpio_pwdown = GPIO_E7XX_IR_OFF,
.transceiver_cap = IR_SIRMODE | IR_OFF,
static struct platform_device *e330_devices[] __initdata = {
&e330_tc6387xb_device,
+ &e7xx_gpio_vbus,
};
static void __init e330_init(void)
eseries_register_clks();
eseries_get_tmio_gpios();
platform_add_devices(ARRAY_AND_SIZE(e330_devices));
- pxa_set_udc_info(&e7xx_udc_mach_info);
}
MACHINE_START(E330, "Toshiba e330")
static struct platform_device *e350_devices[] __initdata = {
&e350_t7l66xb_device,
+ &e7xx_gpio_vbus,
};
static void __init e350_init(void)
eseries_register_clks();
eseries_get_tmio_gpios();
platform_add_devices(ARRAY_AND_SIZE(e350_devices));
- pxa_set_udc_info(&e7xx_udc_mach_info);
}
MACHINE_START(E350, "Toshiba e350")
static struct platform_device *e400_devices[] __initdata = {
&e400_t7l66xb_device,
+ &e7xx_gpio_vbus,
};
static void __init e400_init(void)
/* Fixme - e400 may have a switched clock */
eseries_register_clks();
eseries_get_tmio_gpios();
- set_pxa_fb_info(&e400_pxafb_mach_info);
+ pxa_set_fb_info(NULL, &e400_pxafb_mach_info);
platform_add_devices(ARRAY_AND_SIZE(e400_devices));
- pxa_set_udc_info(&e7xx_udc_mach_info);
}
MACHINE_START(E400, "Toshiba e400")
static struct platform_device *e740_devices[] __initdata = {
&e740_fb_device,
&e740_t7l66xb_device,
+ &e7xx_gpio_vbus,
};
static void __init e740_init(void)
"UDCCLK", &pxa25x_device_udc.dev),
eseries_get_tmio_gpios();
platform_add_devices(ARRAY_AND_SIZE(e740_devices));
- pxa_set_udc_info(&e7xx_udc_mach_info);
pxa_set_ac97_info(NULL);
pxa_set_ficp_info(&e7xx_ficp_platform_data);
}
static struct platform_device *e750_devices[] __initdata = {
&e750_fb_device,
&e750_tc6393xb_device,
+ &e7xx_gpio_vbus,
};
static void __init e750_init(void)
"GPIO11_CLK", NULL),
eseries_get_tmio_gpios();
platform_add_devices(ARRAY_AND_SIZE(e750_devices));
- pxa_set_udc_info(&e7xx_udc_mach_info);
pxa_set_ac97_info(NULL);
pxa_set_ficp_info(&e7xx_ficp_platform_data);
}
/* --------------------------- UDC definitions --------------------------- */
-static struct pxa2xx_udc_mach_info e800_udc_mach_info = {
+static struct gpio_vbus_mach_info e800_udc_info = {
.gpio_vbus = GPIO_E800_USB_DISC,
.gpio_pullup = GPIO_E800_USB_PULLUP,
.gpio_pullup_inverted = 1
};
+static struct platform_device e800_gpio_vbus = {
+ .name = "gpio-vbus",
+ .id = -1,
+ .dev = {
+ .platform_data = &e800_udc_info,
+ },
+};
+
+
/* ----------------- e800 tc6393xb parameters ------------------ */
static struct tc6393xb_platform_data e800_tc6393xb_info = {
static struct platform_device *e800_devices[] __initdata = {
&e800_fb_device,
&e800_tc6393xb_device,
+ &e800_gpio_vbus,
};
static void __init e800_init(void)
"GPIO11_CLK", NULL),
eseries_get_tmio_gpios();
platform_add_devices(ARRAY_AND_SIZE(e800_devices));
- pxa_set_udc_info(&e800_udc_mach_info);
pxa_set_ac97_info(NULL);
}
pxa_set_i2c_info(NULL);
- set_pxa_fb_info(&ezx_fb_info_1);
+ pxa_set_fb_info(NULL, &ezx_fb_info_1);
pxa_set_keypad_info(&a780_keypad_platform_data);
pxa_set_i2c_info(NULL);
i2c_register_board_info(0, ARRAY_AND_SIZE(e680_i2c_board_info));
- set_pxa_fb_info(&ezx_fb_info_1);
+ pxa_set_fb_info(NULL, &ezx_fb_info_1);
pxa_set_keypad_info(&e680_keypad_platform_data);
pxa_set_i2c_info(NULL);
i2c_register_board_info(0, ARRAY_AND_SIZE(a1200_i2c_board_info));
- set_pxa_fb_info(&ezx_fb_info_2);
+ pxa_set_fb_info(NULL, &ezx_fb_info_2);
pxa_set_keypad_info(&a1200_keypad_platform_data);
pxa_set_i2c_info(NULL);
i2c_register_board_info(0, ARRAY_AND_SIZE(a910_i2c_board_info));
- set_pxa_fb_info(&ezx_fb_info_2);
+ pxa_set_fb_info(NULL, &ezx_fb_info_2);
pxa_set_keypad_info(&a910_keypad_platform_data);
pxa_set_i2c_info(NULL);
i2c_register_board_info(0, ARRAY_AND_SIZE(e6_i2c_board_info));
- set_pxa_fb_info(&ezx_fb_info_2);
+ pxa_set_fb_info(NULL, &ezx_fb_info_2);
pxa_set_keypad_info(&e6_keypad_platform_data);
pxa_set_i2c_info(NULL);
i2c_register_board_info(0, ARRAY_AND_SIZE(e2_i2c_board_info));
- set_pxa_fb_info(&ezx_fb_info_2);
+ pxa_set_fb_info(NULL, &ezx_fb_info_2);
pxa_set_keypad_info(&e2_keypad_platform_data);
#include <linux/gpio.h>
#include <linux/err.h>
#include <linux/clk.h>
+#include <linux/usb/gpio_vbus.h>
#include <asm/setup.h>
#include <asm/memory.h>
#endif
#ifdef CONFIG_USB_GADGET_PXA25X
-static struct pxa2xx_udc_mach_info gumstix_udc_info __initdata = {
+static struct gpio_vbus_mach_info gumstix_udc_info = {
.gpio_vbus = GPIO_GUMSTIX_USB_GPIOn,
.gpio_pullup = GPIO_GUMSTIX_USB_GPIOx,
};
+static struct platform_device gumstix_gpio_vbus = {
+ .name = "gpio-vbus",
+ .id = -1,
+ .dev = {
+ .platform_data = &gumstix_udc_info,
+ },
+};
+
static void __init gumstix_udc_init(void)
{
- pxa_set_udc_info(&gumstix_udc_info);
+ platform_device_register(&gumstix_gpio_vbus);
}
#else
static void gumstix_udc_init(void)
platform_device_register(&smc91x_device);
//platform_device_register(&mst_audio_device);
- set_pxa_fb_info(&sharp_lm8v31);
+ pxa_set_fb_info(NULL, &sharp_lm8v31);
pxa_set_mci_info(&idp_mci_platform_data);
}
#define GPIO_NR_PALMZ72_BT_POWER 17
#define GPIO_NR_PALMZ72_BT_RESET 83
+/* Camera */
+#define GPIO_NR_PALMZ72_CAM_PWDN 56
+#define GPIO_NR_PALMZ72_CAM_RESET 57
+#define GPIO_NR_PALMZ72_CAM_POWER 91
+
/** Initial values **/
/* Battery */
void (*pxafb_lcd_power)(int, struct fb_var_screeninfo *);
void (*smart_update)(struct fb_info *);
};
-void set_pxa_fb_info(struct pxafb_mach_info *hard_pxa_fb_info);
-void set_pxa_fb_parent(struct device *parent_dev);
+
+void pxa_set_fb_info(struct device *, struct pxafb_mach_info *);
unsigned long pxafb_get_hsync_time(struct device *dev);
extern int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int);
#define GPIO98_ZIPITZ2_LID_BUTTON 98
/* Libertas GSPI8686 WiFi */
-#define GPIO14_ZIPITZ2_WIFI_RESET 14
-#define GPIO15_ZIPITZ2_WIFI_POWER 15
+#define GPIO14_ZIPITZ2_WIFI_POWER 14
#define GPIO24_ZIPITZ2_WIFI_CS 24
#define GPIO36_ZIPITZ2_WIFI_IRQ 36
GEDR0 = 0x3;
for (irq = IRQ_GPIO0; irq <= IRQ_GPIO1; irq++) {
- set_irq_chip(irq, &pxa_low_gpio_chip);
- set_irq_chip_data(irq, irq_base(0));
- set_irq_handler(irq, handle_edge_irq);
+ irq_set_chip_and_handler(irq, &pxa_low_gpio_chip,
+ handle_edge_irq);
+ irq_set_chip_data(irq, irq_base(0));
set_irq_flags(irq, IRQF_VALID);
}
__raw_writel(i | IPR_VALID, IRQ_BASE + IPR(i));
irq = PXA_IRQ(i);
- set_irq_chip(irq, &pxa_internal_irq_chip);
- set_irq_chip_data(irq, base);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &pxa_internal_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, base);
set_irq_flags(irq, IRQF_VALID);
}
}
static void littleton_init_lcd(void)
{
- set_pxa_fb_info(&littleton_lcd_info);
+ pxa_set_fb_info(NULL, &littleton_lcd_info);
}
#else
static inline void littleton_init_lcd(void) {};
/* setup extra LogicPD PXA270 irqs */
for (irq = LPD270_IRQ(2); irq <= LPD270_IRQ(4); irq++) {
- set_irq_chip(irq, &lpd270_irq_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &lpd270_irq_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
- set_irq_chained_handler(IRQ_GPIO(0), lpd270_irq_handler);
- set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING);
+ irq_set_chained_handler(IRQ_GPIO(0), lpd270_irq_handler);
+ irq_set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING);
}
pxa_set_ac97_info(NULL);
if (lpd270_lcd_to_use != NULL)
- set_pxa_fb_info(lpd270_lcd_to_use);
+ pxa_set_fb_info(NULL, lpd270_lcd_to_use);
pxa_set_ohci_info(&lpd270_ohci_platform_data);
}
/* setup extra lubbock irqs */
for (irq = LUBBOCK_IRQ(0); irq <= LUBBOCK_LAST_IRQ; irq++) {
- set_irq_chip(irq, &lubbock_irq_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &lubbock_irq_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
- set_irq_chained_handler(IRQ_GPIO(0), lubbock_irq_handler);
- set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING);
+ irq_set_chained_handler(IRQ_GPIO(0), lubbock_irq_handler);
+ irq_set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING);
}
#ifdef CONFIG_PM
clk_add_alias("SA1111_CLK", NULL, "GPIO11_CLK", NULL);
pxa_set_udc_info(&udc_info);
- set_pxa_fb_info(&sharp_lm8v31);
+ pxa_set_fb_info(NULL, &sharp_lm8v31);
pxa_set_mci_info(&lubbock_mci_platform_data);
pxa_set_ficp_info(&lubbock_ficp_platform_data);
pxa_set_ac97_info(NULL);
gpio_direction_output(GPIO104_MAGICIAN_LCD_POWER_1, 0);
gpio_direction_output(GPIO105_MAGICIAN_LCD_POWER_2, 0);
gpio_direction_output(GPIO106_MAGICIAN_LCD_POWER_3, 0);
- set_pxa_fb_info(lcd_select ? &samsung_info : &toppoly_info);
+ pxa_set_fb_info(NULL, lcd_select ? &samsung_info : &toppoly_info);
} else
pr_err("LCD detection: CPLD mapping failed\n");
}
/* setup extra Mainstone irqs */
for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) {
- set_irq_chip(irq, &mainstone_irq_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &mainstone_irq_chip,
+ handle_level_irq);
if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14))
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN);
else
MST_INTMSKENA = 0;
MST_INTSETCLR = 0;
- set_irq_chained_handler(IRQ_GPIO(0), mainstone_irq_handler);
- set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING);
+ irq_set_chained_handler(IRQ_GPIO(0), mainstone_irq_handler);
+ irq_set_irq_type(IRQ_GPIO(0), IRQ_TYPE_EDGE_FALLING);
}
#ifdef CONFIG_PM
else
mainstone_pxafb_info.modes = &toshiba_ltm035a776c_mode;
- set_pxa_fb_info(&mainstone_pxafb_info);
+ pxa_set_fb_info(NULL, &mainstone_pxafb_info);
mainstone_backlight_register();
pxa_set_mci_info(&mainstone_mci_platform_data);
pxa_set_stuart_info(NULL);
mio_gpio_request(ARRAY_AND_SIZE(global_gpios));
bootstrap_init();
- set_pxa_fb_info(&mioa701_pxafb_info);
+ pxa_set_fb_info(NULL, &mioa701_pxafb_info);
pxa_set_mci_info(&mioa701_mci_info);
pxa_set_keypad_info(&mioa701_keypad_info);
pxa_set_udc_info(&mioa701_udc_info);
/*
* Common code for Palm LD, T5, TX, Z72
*
- * Copyright (C) 2010
- * Marek Vasut <marek.vasut@gmail.com>
+ * Copyright (C) 2010-2011 Marek Vasut <marek.vasut@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
palm27x_lcd_screen.pxafb_lcd_power = palm27x_lcd_ctl;
}
- set_pxa_fb_info(&palm27x_lcd_screen);
+ pxa_set_fb_info(NULL, &palm27x_lcd_screen);
}
#endif
static void __init palmtc_lcd_init(void)
{
- set_pxa_fb_info(&palmtc_lcd_screen);
+ pxa_set_fb_info(NULL, &palmtc_lcd_screen);
}
#else
static inline void palmtc_lcd_init(void) {}
/******************************************************************************
* Backlight
******************************************************************************/
+static struct gpio palmte_bl_gpios[] = {
+ { GPIO_NR_PALMTE2_BL_POWER, GPIOF_INIT_LOW, "Backlight power" },
+ { GPIO_NR_PALMTE2_LCD_POWER, GPIOF_INIT_LOW, "LCD power" },
+};
+
static int palmte2_backlight_init(struct device *dev)
{
- int ret;
-
- ret = gpio_request(GPIO_NR_PALMTE2_BL_POWER, "BL POWER");
- if (ret)
- goto err;
- ret = gpio_direction_output(GPIO_NR_PALMTE2_BL_POWER, 0);
- if (ret)
- goto err2;
- ret = gpio_request(GPIO_NR_PALMTE2_LCD_POWER, "LCD POWER");
- if (ret)
- goto err2;
- ret = gpio_direction_output(GPIO_NR_PALMTE2_LCD_POWER, 0);
- if (ret)
- goto err3;
-
- return 0;
-err3:
- gpio_free(GPIO_NR_PALMTE2_LCD_POWER);
-err2:
- gpio_free(GPIO_NR_PALMTE2_BL_POWER);
-err:
- return ret;
+ return gpio_request_array(ARRAY_AND_SIZE(palmte_bl_gpios));
}
static int palmte2_backlight_notify(struct device *dev, int brightness)
static void palmte2_backlight_exit(struct device *dev)
{
- gpio_free(GPIO_NR_PALMTE2_BL_POWER);
- gpio_free(GPIO_NR_PALMTE2_LCD_POWER);
+ gpio_free_array(ARRAY_AND_SIZE(palmte_bl_gpios));
}
static struct platform_pwm_backlight_data palmte2_backlight_data = {
pxa_set_btuart_info(NULL);
pxa_set_stuart_info(NULL);
- set_pxa_fb_info(&palmte2_lcd_screen);
+ pxa_set_fb_info(NULL, &palmte2_lcd_screen);
pxa_set_mci_info(&palmte2_mci_platform_data);
palmte2_udc_init();
pxa_set_ac97_info(&palmte2_ac97_pdata);
#include <linux/wm97xx.h>
#include <linux/power_supply.h>
#include <linux/usb/gpio_vbus.h>
+#include <linux/i2c-gpio.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/palm27x.h>
#include <mach/pm.h>
+#include <mach/camera.h>
+
+#include <media/soc_camera.h>
#include "generic.h"
#include "devices.h"
GPIO22_GPIO, /* LCD border color */
GPIO96_GPIO, /* lcd power */
+ /* PXA Camera */
+ GPIO81_CIF_DD_0,
+ GPIO48_CIF_DD_5,
+ GPIO50_CIF_DD_3,
+ GPIO51_CIF_DD_2,
+ GPIO52_CIF_DD_4,
+ GPIO53_CIF_MCLK,
+ GPIO54_CIF_PCLK,
+ GPIO55_CIF_DD_1,
+ GPIO84_CIF_FV,
+ GPIO85_CIF_LV,
+ GPIO93_CIF_DD_6,
+ GPIO108_CIF_DD_7,
+
+ GPIO56_GPIO, /* OV9640 Powerdown */
+ GPIO57_GPIO, /* OV9640 Reset */
+ GPIO91_GPIO, /* OV9640 Power */
+
+ /* I2C */
+ GPIO117_GPIO, /* I2C_SCL */
+ GPIO118_GPIO, /* I2C_SDA */
+
/* Misc. */
GPIO0_GPIO | WAKEUP_ON_LEVEL_HIGH, /* power detect */
GPIO88_GPIO, /* green led */
device_initcall(palmz72_pm_init);
#endif
+/******************************************************************************
+ * SoC Camera
+ ******************************************************************************/
+#if defined(CONFIG_SOC_CAMERA_OV9640) || \
+ defined(CONFIG_SOC_CAMERA_OV9640_MODULE)
+static struct pxacamera_platform_data palmz72_pxacamera_platform_data = {
+ .flags = PXA_CAMERA_MASTER | PXA_CAMERA_DATAWIDTH_8 |
+ PXA_CAMERA_PCLK_EN | PXA_CAMERA_MCLK_EN,
+ .mclk_10khz = 2600,
+};
+
+/* Board I2C devices. */
+static struct i2c_board_info palmz72_i2c_device[] = {
+ {
+ I2C_BOARD_INFO("ov9640", 0x30),
+ }
+};
+
+static int palmz72_camera_power(struct device *dev, int power)
+{
+ gpio_set_value(GPIO_NR_PALMZ72_CAM_PWDN, !power);
+ mdelay(50);
+ return 0;
+}
+
+static int palmz72_camera_reset(struct device *dev)
+{
+ gpio_set_value(GPIO_NR_PALMZ72_CAM_RESET, 1);
+ mdelay(50);
+ gpio_set_value(GPIO_NR_PALMZ72_CAM_RESET, 0);
+ mdelay(50);
+ return 0;
+}
+
+static struct soc_camera_link palmz72_iclink = {
+ .bus_id = 0, /* Match id in pxa27x_device_camera in device.c */
+ .board_info = &palmz72_i2c_device[0],
+ .i2c_adapter_id = 0,
+ .module_name = "ov96xx",
+ .power = &palmz72_camera_power,
+ .reset = &palmz72_camera_reset,
+ .flags = SOCAM_DATAWIDTH_8,
+};
+
+static struct i2c_gpio_platform_data palmz72_i2c_bus_data = {
+ .sda_pin = 118,
+ .scl_pin = 117,
+ .udelay = 10,
+ .timeout = 100,
+};
+
+static struct platform_device palmz72_i2c_bus_device = {
+ .name = "i2c-gpio",
+ .id = 0, /* we use this as a replacement for i2c-pxa */
+ .dev = {
+ .platform_data = &palmz72_i2c_bus_data,
+ }
+};
+
+static struct platform_device palmz72_camera = {
+ .name = "soc-camera-pdrv",
+ .id = -1,
+ .dev = {
+ .platform_data = &palmz72_iclink,
+ },
+};
+
+/* Here we request the camera GPIOs and configure them. We power up the camera
+ * module, deassert the reset pin, but put it into powerdown (low to no power
+ * consumption) mode. This allows us to later bring the module up fast. */
+static struct gpio palmz72_camera_gpios[] = {
+ { GPIO_NR_PALMZ72_CAM_POWER, GPIOF_INIT_HIGH,"Camera DVDD" },
+ { GPIO_NR_PALMZ72_CAM_RESET, GPIOF_INIT_LOW, "Camera RESET" },
+ { GPIO_NR_PALMZ72_CAM_PWDN, GPIOF_INIT_LOW, "Camera PWDN" },
+};
+
+static inline void __init palmz72_cam_gpio_init(void)
+{
+ int ret;
+
+ ret = gpio_request_array(ARRAY_AND_SIZE(palmz72_camera_gpios));
+ if (!ret)
+ gpio_free_array(ARRAY_AND_SIZE(palmz72_camera_gpios));
+ else
+ printk(KERN_ERR "Camera GPIO init failed!\n");
+
+ return;
+}
+
+static void __init palmz72_camera_init(void)
+{
+ palmz72_cam_gpio_init();
+ pxa_set_camera_info(&palmz72_pxacamera_platform_data);
+ platform_device_register(&palmz72_i2c_bus_device);
+ platform_device_register(&palmz72_camera);
+}
+#else
+static inline void palmz72_camera_init(void) {}
+#endif
+
/******************************************************************************
* Machine init
******************************************************************************/
palm27x_pmic_init();
palmz72_kpc_init();
palmz72_leds_init();
+ palmz72_camera_init();
}
MACHINE_START(PALMZ72, "Palm Zire72")
/* setup extra PCM990 irqs */
for (irq = PCM027_IRQ(0); irq <= PCM027_IRQ(3); irq++) {
- set_irq_chip(irq, &pcm990_irq_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &pcm990_irq_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
PCM990_INTMSKENA = 0x00; /* disable all Interrupts */
PCM990_INTSETCLR = 0xFF;
- set_irq_chained_handler(PCM990_CTRL_INT_IRQ, pcm990_irq_handler);
- set_irq_type(PCM990_CTRL_INT_IRQ, PCM990_CTRL_INT_IRQ_EDGE);
+ irq_set_chained_handler(PCM990_CTRL_INT_IRQ, pcm990_irq_handler);
+ irq_set_irq_type(PCM990_CTRL_INT_IRQ, PCM990_CTRL_INT_IRQ_EDGE);
}
static int pcm990_mci_init(struct device *dev, irq_handler_t mci_detect_int,
pcm990_init_irq();
#ifndef CONFIG_PCM990_DISPLAY_NONE
- set_pxa_fb_info(&pcm990_fbinfo);
+ pxa_set_fb_info(NULL, &pcm990_fbinfo);
#endif
platform_device_register(&pcm990_backlight_device);
if (ret)
pr_warning("poodle: Unable to register LoCoMo device\n");
- set_pxa_fb_parent(&poodle_locomo_device.dev);
- set_pxa_fb_info(&poodle_fb_info);
+ pxa_set_fb_info(&poodle_locomo_device.dev, &poodle_fb_info);
pxa_set_udc_info(&udc_info);
pxa_set_mci_info(&poodle_mci_platform_data);
pxa_set_ficp_info(&poodle_ficp_platform_data);
int irq;
for (irq = IRQ_WAKEUP0; irq <= IRQ_WAKEUP1; irq++) {
- set_irq_chip(irq, &pxa_ext_wakeup_chip);
- set_irq_handler(irq, handle_edge_irq);
+ irq_set_chip_and_handler(irq, &pxa_ext_wakeup_chip,
+ handle_edge_irq);
set_irq_flags(irq, IRQF_VALID);
}
{
int ret;
- set_pxa_fb_info(&raumfeld_sharp_lcd_info);
+ pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info);
/* Earlier devices had the backlight regulator controlled
* via PWM, later versions use another controller for that */
static void __init saar_init_lcd(void)
{
- set_pxa_fb_info(&saar_lcd_info);
+ pxa_set_fb_info(NULL, &saar_lcd_info);
}
#else
static inline void saar_init_lcd(void) {}
static void __init spitz_lcd_init(void)
{
- set_pxa_fb_info(&spitz_pxafb_info);
+ pxa_set_fb_info(NULL, &spitz_pxafb_info);
}
#else
static inline void spitz_lcd_init(void) {}
{
platform_device_register(&tavorevb_backlight_devices[0]);
platform_device_register(&tavorevb_backlight_devices[1]);
- set_pxa_fb_info(&tavorevb_lcd_info);
+ pxa_set_fb_info(NULL, &tavorevb_lcd_info);
}
#else
static inline void tavorevb_init_lcd(void) {}
static struct clock_event_device ckevt_pxa_osmr0 = {
.name = "osmr0",
.features = CLOCK_EVT_FEAT_ONESHOT,
- .shift = 32,
.rating = 200,
.set_next_event = pxa_osmr0_set_next_event,
.set_mode = pxa_osmr0_set_mode,
init_sched_clock(&cd, pxa_update_sched_clock, 32, clock_tick_rate);
- ckevt_pxa_osmr0.mult =
- div_sc(clock_tick_rate, NSEC_PER_SEC, ckevt_pxa_osmr0.shift);
+ clocksource_calc_mult_shift(&cksrc_pxa_oscr0, clock_tick_rate, 4);
+ clockevents_calc_mult_shift(&ckevt_pxa_osmr0, clock_tick_rate, 4);
ckevt_pxa_osmr0.max_delta_ns =
clockevent_delta2ns(0x7fffffff, &ckevt_pxa_osmr0);
ckevt_pxa_osmr0.min_delta_ns =
#include <linux/spi/pxa2xx_spi.h>
#include <linux/input/matrix_keypad.h>
#include <linux/i2c/pxa-i2c.h>
+#include <linux/usb/gpio_vbus.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
/*
* USB Device Controller
*/
-static struct pxa2xx_udc_mach_info udc_info __initdata = {
+static struct gpio_vbus_mach_info tosa_udc_info = {
.gpio_pullup = TOSA_GPIO_USB_PULLUP,
.gpio_vbus = TOSA_GPIO_USB_IN,
.gpio_vbus_inverted = 1,
};
+static struct platform_device tosa_gpio_vbus = {
+ .name = "gpio-vbus",
+ .id = -1,
+ .dev = {
+ .platform_data = &tosa_udc_info,
+ },
+};
+
/*
* MMC/SD Device
*/
&tosa_bt_device,
&sharpsl_rom_device,
&wm9712_device,
+ &tosa_gpio_vbus,
};
static void tosa_poweroff(void)
dummy = gpiochip_reserve(TOSA_TC6393XB_GPIO_BASE, 16);
pxa_set_mci_info(&tosa_mci_platform_data);
- pxa_set_udc_info(&udc_info);
pxa_set_ficp_info(&tosa_ficp_platform_data);
pxa_set_i2c_info(NULL);
pxa_set_ac97_info(NULL);
pxa_set_stuart_info(NULL);
if (0) /* dont know how to determine LCD */
- set_pxa_fb_info(&sharp_lcd);
+ pxa_set_fb_info(NULL, &sharp_lcd);
else
- set_pxa_fb_info(&toshiba_lcd);
+ pxa_set_fb_info(NULL, &toshiba_lcd);
pxa_set_mci_info(&trizeps4_mci_platform_data);
#ifndef STATUS_LEDS_ON_STUART_PINS
/* setup ISA IRQs */
for (level = 0; level < ARRAY_SIZE(viper_isa_irqs); level++) {
isa_irq = viper_bit_to_irq(level);
- set_irq_chip(isa_irq, &viper_irq_chip);
- set_irq_handler(isa_irq, handle_edge_irq);
+ irq_set_chip_and_handler(isa_irq, &viper_irq_chip,
+ handle_edge_irq);
set_irq_flags(isa_irq, IRQF_VALID | IRQF_PROBE);
}
- set_irq_chained_handler(gpio_to_irq(VIPER_CPLD_GPIO),
+ irq_set_chained_handler(gpio_to_irq(VIPER_CPLD_GPIO),
viper_irq_handler);
- set_irq_type(gpio_to_irq(VIPER_CPLD_GPIO), IRQ_TYPE_EDGE_BOTH);
+ irq_set_irq_type(gpio_to_irq(VIPER_CPLD_GPIO), IRQ_TYPE_EDGE_BOTH);
}
/* Flat Panel */
/* Wake-up serial console */
viper_init_serial_gpio();
- set_pxa_fb_info(&fb_info);
+ pxa_set_fb_info(NULL, &fb_info);
/* v1 hardware cannot use the datacs line */
version = viper_hw_version();
}
vpac270_lcd_screen.pxafb_lcd_power = vpac270_lcd_power;
- set_pxa_fb_info(&vpac270_lcd_screen);
+ pxa_set_fb_info(NULL, &vpac270_lcd_screen);
return;
err2:
GPIO47_STUART_TXD,
/* Keypad */
- GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH,
- GPIO101_KP_MKIN_1 | WAKEUP_ON_LEVEL_HIGH,
- GPIO102_KP_MKIN_2 | WAKEUP_ON_LEVEL_HIGH,
- GPIO34_KP_MKIN_3 | WAKEUP_ON_LEVEL_HIGH,
- GPIO38_KP_MKIN_4 | WAKEUP_ON_LEVEL_HIGH,
- GPIO16_KP_MKIN_5 | WAKEUP_ON_LEVEL_HIGH,
- GPIO17_KP_MKIN_6 | WAKEUP_ON_LEVEL_HIGH,
+ GPIO100_KP_MKIN_0,
+ GPIO101_KP_MKIN_1,
+ GPIO102_KP_MKIN_2,
+ GPIO34_KP_MKIN_3,
+ GPIO38_KP_MKIN_4,
+ GPIO16_KP_MKIN_5,
+ GPIO17_KP_MKIN_6,
GPIO103_KP_MKOUT_0,
GPIO104_KP_MKOUT_1,
GPIO105_KP_MKOUT_2,
GPIO1_GPIO, /* Power button */
GPIO37_GPIO, /* Headphone detect */
GPIO98_GPIO, /* Lid switch */
- GPIO14_GPIO, /* WiFi Reset */
- GPIO15_GPIO, /* WiFi Power */
+ GPIO14_GPIO, /* WiFi Power */
GPIO24_GPIO, /* WiFi CS */
GPIO36_GPIO, /* WiFi IRQ */
GPIO88_GPIO, /* LCD CS */
/* Keypad Backlight */
.pwm_id = 1,
.max_brightness = 1023,
- .dft_brightness = 512,
+ .dft_brightness = 0,
.pwm_period_ns = 1260320,
},
[1] = {
static void __init z2_lcd_init(void)
{
- set_pxa_fb_info(&z2_lcd_screen);
+ pxa_set_fb_info(NULL, &z2_lcd_screen);
}
#else
static inline void z2_lcd_init(void) {}
.active_low = 1,
}, {
.name = "z2:green:charged",
- .default_trigger = "none",
+ .default_trigger = "mmc0",
.gpio = GPIO85_ZIPITZ2_LED_CHARGED,
.active_low = 1,
}, {
.name = "z2:amber:charging",
- .default_trigger = "none",
+ .default_trigger = "Z2-charging-or-full",
.gpio = GPIO83_ZIPITZ2_LED_CHARGING,
.active_low = 1,
},
******************************************************************************/
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
static struct gpio_keys_button z2_pxa_buttons[] = {
- {KEY_POWER, GPIO1_ZIPITZ2_POWER_BUTTON, 0, "Power Button" },
- {KEY_CLOSE, GPIO98_ZIPITZ2_LID_BUTTON, 0, "Lid Button" },
+ {
+ .code = KEY_POWER,
+ .gpio = GPIO1_ZIPITZ2_POWER_BUTTON,
+ .active_low = 0,
+ .desc = "Power Button",
+ .wakeup = 1,
+ .type = EV_KEY,
+ },
+ {
+ .code = SW_LID,
+ .gpio = GPIO98_ZIPITZ2_LID_BUTTON,
+ .active_low = 1,
+ .desc = "Lid Switch",
+ .wakeup = 0,
+ .type = EV_SW,
+ },
};
static struct gpio_keys_platform_data z2_pxa_keys_data = {
.batt_I2C_addr = 0x55,
.batt_I2C_reg = 2,
.charge_gpio = GPIO0_ZIPITZ2_AC_DETECT,
- .min_voltage = 2400000,
- .max_voltage = 3700000,
- .batt_div = 69,
+ .min_voltage = 3475000,
+ .max_voltage = 4190000,
+ .batt_div = 59,
.batt_mult = 1000000,
.batt_tech = POWER_SUPPLY_TECHNOLOGY_LION,
.batt_name = "Z2",
{
int ret = 0;
- ret = gpio_request(GPIO15_ZIPITZ2_WIFI_POWER, "WiFi Power");
+ ret = gpio_request(GPIO14_ZIPITZ2_WIFI_POWER, "WiFi Power");
if (ret)
goto err;
- ret = gpio_direction_output(GPIO15_ZIPITZ2_WIFI_POWER, 1);
+ ret = gpio_direction_output(GPIO14_ZIPITZ2_WIFI_POWER, 1);
if (ret)
goto err2;
- ret = gpio_request(GPIO14_ZIPITZ2_WIFI_RESET, "WiFi Reset");
- if (ret)
- goto err2;
-
- ret = gpio_direction_output(GPIO14_ZIPITZ2_WIFI_RESET, 0);
- if (ret)
- goto err3;
-
- /* Reset the card */
+ /* Wait until card is powered on */
mdelay(180);
- gpio_set_value(GPIO14_ZIPITZ2_WIFI_RESET, 1);
- mdelay(20);
spi->bits_per_word = 16;
spi->mode = SPI_MODE_2,
return 0;
-err3:
- gpio_free(GPIO14_ZIPITZ2_WIFI_RESET);
err2:
- gpio_free(GPIO15_ZIPITZ2_WIFI_POWER);
+ gpio_free(GPIO14_ZIPITZ2_WIFI_POWER);
err:
return ret;
};
static int z2_lbs_spi_teardown(struct spi_device *spi)
{
- gpio_set_value(GPIO14_ZIPITZ2_WIFI_RESET, 0);
- gpio_set_value(GPIO15_ZIPITZ2_WIFI_POWER, 0);
- gpio_free(GPIO14_ZIPITZ2_WIFI_RESET);
- gpio_free(GPIO15_ZIPITZ2_WIFI_POWER);
- return 0;
+ gpio_set_value(GPIO14_ZIPITZ2_WIFI_POWER, 0);
+ gpio_free(GPIO14_ZIPITZ2_WIFI_POWER);
+ return 0;
};
static struct pxa2xx_spi_chip z2_lbs_chip_info = {
/* Peripheral IRQs. It would be nice to move those inside driver
configuration, but it is not supported at the moment. */
- set_irq_type(gpio_to_irq(ZEUS_AC97_GPIO), IRQ_TYPE_EDGE_RISING);
- set_irq_type(gpio_to_irq(ZEUS_WAKEUP_GPIO), IRQ_TYPE_EDGE_RISING);
- set_irq_type(gpio_to_irq(ZEUS_PTT_GPIO), IRQ_TYPE_EDGE_RISING);
- set_irq_type(gpio_to_irq(ZEUS_EXTGPIO_GPIO), IRQ_TYPE_EDGE_FALLING);
- set_irq_type(gpio_to_irq(ZEUS_CAN_GPIO), IRQ_TYPE_EDGE_FALLING);
+ irq_set_irq_type(gpio_to_irq(ZEUS_AC97_GPIO), IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(gpio_to_irq(ZEUS_WAKEUP_GPIO), IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(gpio_to_irq(ZEUS_PTT_GPIO), IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(gpio_to_irq(ZEUS_EXTGPIO_GPIO),
+ IRQ_TYPE_EDGE_FALLING);
+ irq_set_irq_type(gpio_to_irq(ZEUS_CAN_GPIO), IRQ_TYPE_EDGE_FALLING);
/* Setup ISA IRQs */
for (level = 0; level < ARRAY_SIZE(zeus_isa_irqs); level++) {
isa_irq = zeus_bit_to_irq(level);
- set_irq_chip(isa_irq, &zeus_irq_chip);
- set_irq_handler(isa_irq, handle_edge_irq);
+ irq_set_chip_and_handler(isa_irq, &zeus_irq_chip,
+ handle_edge_irq);
set_irq_flags(isa_irq, IRQF_VALID | IRQF_PROBE);
}
- set_irq_type(gpio_to_irq(ZEUS_ISA_GPIO), IRQ_TYPE_EDGE_RISING);
- set_irq_chained_handler(gpio_to_irq(ZEUS_ISA_GPIO), zeus_irq_handler);
+ irq_set_irq_type(gpio_to_irq(ZEUS_ISA_GPIO), IRQ_TYPE_EDGE_RISING);
+ irq_set_chained_handler(gpio_to_irq(ZEUS_ISA_GPIO), zeus_irq_handler);
}
if (zeus_setup_fb_gpios())
pr_err("Failed to setup fb gpios\n");
else
- set_pxa_fb_info(&zeus_fb_info);
+ pxa_set_fb_info(NULL, &zeus_fb_info);
pxa_set_mci_info(&zeus_mci_platform_data);
pxa_set_udc_info(&zeus_udc_info);
platform_device_register(&zylonite_backlight_device);
if (lcd_id & 0x20) {
- set_pxa_fb_info(&zylonite_sharp_lcd_info);
+ pxa_set_fb_info(NULL, &zylonite_sharp_lcd_info);
return;
}
else
zylonite_toshiba_lcd_info.modes = &toshiba_ltm04c380k_mode;
- set_pxa_fb_info(&zylonite_toshiba_lcd_info);
+ pxa_set_fb_info(NULL, &zylonite_toshiba_lcd_info);
}
#else
static inline void zylonite_init_lcd(void) {}
#ifndef CONFIG_REALVIEW_EB_ARM11MP_REVB
/* board GIC, secondary */
- gic_init(1, 64, __io_address(REALVIEW_EB_GIC_DIST_BASE),
+ gic_init(1, 96, __io_address(REALVIEW_EB_GIC_DIST_BASE),
__io_address(REALVIEW_EB_GIC_CPU_BASE));
gic_cascade_irq(1, IRQ_EB11MP_EB_IRQ1);
#endif
switch (irq) {
case 0 ... 7:
- set_irq_chip(irq, &iomd_a_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &iomd_a_chip,
+ handle_level_irq);
set_irq_flags(irq, flags);
break;
case 8 ... 15:
- set_irq_chip(irq, &iomd_b_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &iomd_b_chip,
+ handle_level_irq);
set_irq_flags(irq, flags);
break;
case 16 ... 21:
- set_irq_chip(irq, &iomd_dma_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &iomd_dma_chip,
+ handle_level_irq);
set_irq_flags(irq, flags);
break;
case 64 ... 71:
- set_irq_chip(irq, &iomd_fiq_chip);
+ irq_set_chip(irq, &iomd_fiq_chip);
set_irq_flags(irq, IRQF_VALID);
break;
}
__raw_writeb(0x0, BAST_VA_PC104_IRQMASK);
- set_irq_chained_handler(IRQ_ISA, bast_irq_pc104_demux);
+ irq_set_chained_handler(IRQ_ISA, bast_irq_pc104_demux);
/* register our IRQs */
for (i = 0; i < 4; i++) {
unsigned int irqno = bast_pc104_irqs[i];
- set_irq_chip(irqno, &bast_pc104_chip);
- set_irq_handler(irqno, handle_level_irq);
+ irq_set_chip_and_handler(irqno, &bast_pc104_chip,
+ handle_level_irq);
set_irq_flags(irqno, IRQF_VALID);
}
}
unsigned int irqno;
for (irqno = IRQ_EINT0; irqno <= IRQ_EINT3; irqno++) {
- set_irq_chip(irqno, &s3c2412_irq_eint0t4);
- set_irq_handler(irqno, handle_edge_irq);
+ irq_set_chip_and_handler(irqno, &s3c2412_irq_eint0t4,
+ handle_edge_irq);
set_irq_flags(irqno, IRQF_VALID);
}
/* add demux support for CF/SDI */
- set_irq_chained_handler(IRQ_S3C2412_CFSDI, s3c2412_irq_demux_cfsdi);
+ irq_set_chained_handler(IRQ_S3C2412_CFSDI, s3c2412_irq_demux_cfsdi);
for (irqno = IRQ_S3C2412_SDI; irqno <= IRQ_S3C2412_CF; irqno++) {
- set_irq_chip(irqno, &s3c2412_irq_cfsdi);
- set_irq_handler(irqno, handle_level_irq);
+ irq_set_chip_and_handler(irqno, &s3c2412_irq_cfsdi,
+ handle_level_irq);
set_irq_flags(irqno, IRQF_VALID);
}
s3c2412_irq_rtc_chip = s3c_irq_chip;
s3c2412_irq_rtc_chip.irq_set_wake = s3c2412_irq_rtc_wake;
- set_irq_chip(IRQ_RTC, &s3c2412_irq_rtc_chip);
+ irq_set_chip(IRQ_RTC, &s3c2412_irq_rtc_chip);
return 0;
}
{
unsigned int irqno;
- set_irq_chip(base, &s3c_irq_level_chip);
- set_irq_handler(base, handle_level_irq);
- set_irq_chained_handler(base, demux);
+ irq_set_chip_and_handler(base, &s3c_irq_level_chip, handle_level_irq);
+ irq_set_chained_handler(base, demux);
for (irqno = start; irqno <= end; irqno++) {
- set_irq_chip(irqno, chip);
- set_irq_handler(irqno, handle_level_irq);
+ irq_set_chip_and_handler(irqno, chip, handle_level_irq);
set_irq_flags(irqno, IRQF_VALID);
}
/* add new chained handler for wdt, ac7 */
- set_irq_chip(IRQ_WDT, &s3c_irq_level_chip);
- set_irq_handler(IRQ_WDT, handle_level_irq);
- set_irq_chained_handler(IRQ_WDT, s3c_irq_demux_wdtac97);
+ irq_set_chip_and_handler(IRQ_WDT, &s3c_irq_level_chip,
+ handle_level_irq);
+ irq_set_chained_handler(IRQ_WDT, s3c_irq_demux_wdtac97);
for (irqno = IRQ_S3C2440_WDT; irqno <= IRQ_S3C2440_AC97; irqno++) {
- set_irq_chip(irqno, &s3c_irq_wdtac97);
- set_irq_handler(irqno, handle_level_irq);
+ irq_set_chip_and_handler(irqno, &s3c_irq_wdtac97,
+ handle_level_irq);
set_irq_flags(irqno, IRQF_VALID);
}
{
unsigned int irqno;
- set_irq_chip(IRQ_NFCON, &s3c_irq_level_chip);
- set_irq_handler(IRQ_NFCON, handle_level_irq);
+ irq_set_chip_and_handler(IRQ_NFCON, &s3c_irq_level_chip,
+ handle_level_irq);
set_irq_flags(IRQ_NFCON, IRQF_VALID);
/* add chained handler for camera */
- set_irq_chip(IRQ_CAM, &s3c_irq_level_chip);
- set_irq_handler(IRQ_CAM, handle_level_irq);
- set_irq_chained_handler(IRQ_CAM, s3c_irq_demux_cam);
+ irq_set_chip_and_handler(IRQ_CAM, &s3c_irq_level_chip,
+ handle_level_irq);
+ irq_set_chained_handler(IRQ_CAM, s3c_irq_demux_cam);
for (irqno = IRQ_S3C2440_CAM_C; irqno <= IRQ_S3C2440_CAM_P; irqno++) {
- set_irq_chip(irqno, &s3c_irq_cam);
- set_irq_handler(irqno, handle_level_irq);
+ irq_set_chip_and_handler(irqno, &s3c_irq_cam,
+ handle_level_irq);
set_irq_flags(irqno, IRQF_VALID);
}
{
unsigned int irqno;
- set_irq_chip(base, &s3c_irq_level_chip);
- set_irq_handler(base, handle_level_irq);
- set_irq_chained_handler(base, demux);
+ irq_set_chip_and_handler(base, &s3c_irq_level_chip, handle_level_irq);
+ irq_set_chained_handler(base, demux);
for (irqno = start; irqno <= end; irqno++) {
- set_irq_chip(irqno, chip);
- set_irq_handler(irqno, handle_level_irq);
+ irq_set_chip_and_handler(irqno, chip, handle_level_irq);
set_irq_flags(irqno, IRQF_VALID);
}
int irq;
for (irq = IRQ_EINT(0); irq <= IRQ_EINT(27); irq++) {
- set_irq_chip(irq, &s3c_irq_eint);
- set_irq_chip_data(irq, (void *)eint_irq_to_bit(irq));
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &s3c_irq_eint, handle_level_irq);
+ irq_set_chip_data(irq, (void *)eint_irq_to_bit(irq));
set_irq_flags(irq, IRQF_VALID);
}
- set_irq_chained_handler(IRQ_EINT0_3, s3c_irq_demux_eint0_3);
- set_irq_chained_handler(IRQ_EINT4_11, s3c_irq_demux_eint4_11);
- set_irq_chained_handler(IRQ_EINT12_19, s3c_irq_demux_eint12_19);
- set_irq_chained_handler(IRQ_EINT20_27, s3c_irq_demux_eint20_27);
+ irq_set_chained_handler(IRQ_EINT0_3, s3c_irq_demux_eint0_3);
+ irq_set_chained_handler(IRQ_EINT4_11, s3c_irq_demux_eint4_11);
+ irq_set_chained_handler(IRQ_EINT12_19, s3c_irq_demux_eint12_19);
+ irq_set_chained_handler(IRQ_EINT20_27, s3c_irq_demux_eint20_27);
return 0;
}
static void __init cerf_init_irq(void)
{
sa1100_init_irq();
- set_irq_type(CERF_ETH_IRQ, IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(CERF_ETH_IRQ, IRQ_TYPE_EDGE_RISING);
}
static struct map_desc cerf_io_desc[] __initdata = {
ICCR = 1;
for (irq = 0; irq <= 10; irq++) {
- set_irq_chip(irq, &sa1100_low_gpio_chip);
- set_irq_handler(irq, handle_edge_irq);
+ irq_set_chip_and_handler(irq, &sa1100_low_gpio_chip,
+ handle_edge_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
for (irq = 12; irq <= 31; irq++) {
- set_irq_chip(irq, &sa1100_normal_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &sa1100_normal_chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
for (irq = 32; irq <= 48; irq++) {
- set_irq_chip(irq, &sa1100_high_gpio_chip);
- set_irq_handler(irq, handle_edge_irq);
+ irq_set_chip_and_handler(irq, &sa1100_high_gpio_chip,
+ handle_edge_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
/*
* Install handler for GPIO 11-27 edge detect interrupts
*/
- set_irq_chip(IRQ_GPIO11_27, &sa1100_normal_chip);
- set_irq_chained_handler(IRQ_GPIO11_27, sa1100_high_gpio_handler);
+ irq_set_chip(IRQ_GPIO11_27, &sa1100_normal_chip);
+ irq_set_chained_handler(IRQ_GPIO11_27, sa1100_high_gpio_handler);
sa1100_init_gpio();
}
/*
* Install handler for GPIO25.
*/
- set_irq_type(IRQ_GPIO25, IRQ_TYPE_EDGE_RISING);
- set_irq_chained_handler(IRQ_GPIO25, neponset_irq_handler);
+ irq_set_irq_type(IRQ_GPIO25, IRQ_TYPE_EDGE_RISING);
+ irq_set_chained_handler(IRQ_GPIO25, neponset_irq_handler);
/*
* We would set IRQ_GPIO25 to be a wake-up IRQ, but
* Setup other Neponset IRQs. SA1111 will be done by the
* generic SA1111 code.
*/
- set_irq_handler(IRQ_NEPONSET_SMC9196, handle_simple_irq);
+ irq_set_handler(IRQ_NEPONSET_SMC9196, handle_simple_irq);
set_irq_flags(IRQ_NEPONSET_SMC9196, IRQF_VALID | IRQF_PROBE);
- set_irq_handler(IRQ_NEPONSET_USAR, handle_simple_irq);
+ irq_set_handler(IRQ_NEPONSET_USAR, handle_simple_irq);
set_irq_flags(IRQ_NEPONSET_USAR, IRQF_VALID | IRQF_PROBE);
/*
GPDR &= ~GPIO_ETH0_IRQ;
- set_irq_type(GPIO_ETH0_IRQ, IRQ_TYPE_EDGE_FALLING);
+ irq_set_irq_type(GPIO_ETH0_IRQ, IRQ_TYPE_EDGE_FALLING);
}
MACHINE_START(PLEB, "PLEB")
int irq;
for (irq = 0; irq < NR_IRQS; irq++) {
- set_irq_chip(irq, &fb_chip);
- set_irq_handler(irq, handle_edge_irq);
+ irq_set_chip_and_handler(irq, &fb_chip, handle_edge_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
-#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
[0] = {
.name = "SDHI0",
.start = 0xe6850000,
- .end = 0xe68501ff,
+ .end = 0xe68500ff,
.flags = IORESOURCE_MEM,
},
[1] = {
[0] = {
.name = "SDHI1",
.start = 0xe6860000,
- .end = 0xe68601ff,
+ .end = 0xe68600ff,
.flags = IORESOURCE_MEM,
},
[1] = {
gpio_request(GPIO_FN_KEYIN4, NULL);
/* enable TouchScreen */
- set_irq_type(IRQ28, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IRQ28, IRQ_TYPE_LEVEL_LOW);
tsc_device.irq = IRQ28;
i2c_register_board_info(1, &tsc_device, 1);
lcdc_info.ch[0].lcd_size_cfg.height = 91;
/* enable TouchScreen */
- set_irq_type(IRQ7, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IRQ7, IRQ_TYPE_LEVEL_LOW);
tsc_device.irq = IRQ7;
i2c_register_board_info(0, &tsc_device, 1);
#include <linux/input.h>
#include <linux/input/sh_keysc.h>
#include <linux/mmc/host.h>
-#include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/gpio.h>
#include <mach/sh7377.h>
#include <mach/common.h>
[0] = {
.name = "SDHI0",
.start = 0xe6d50000,
- .end = 0xe6d501ff,
+ .end = 0xe6d50nff,
.flags = IORESOURCE_MEM,
},
[1] = {
[0] = {
.name = "SDHI1",
.start = 0xe6d60000,
- .end = 0xe6d601ff,
+ .end = 0xe6d600ff,
.flags = IORESOURCE_MEM,
},
[1] = {
#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/leds.h>
-#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sh_mmcif.h>
+#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
[0] = {
.name = "SDHI0",
.start = 0xe6850000,
- .end = 0xe68501ff,
+ .end = 0xe68500ff,
.flags = IORESOURCE_MEM,
},
[1] = {
[0] = {
.name = "SDHI1",
.start = 0xe6860000,
- .end = 0xe68601ff,
+ .end = 0xe68600ff,
.flags = IORESOURCE_MEM,
},
[1] = {
[0] = {
.name = "SDHI2",
.start = 0xe6870000,
- .end = 0xe68701ff,
+ .end = 0xe68700ff,
.flags = IORESOURCE_MEM,
},
[1] = {
/* enable Keypad */
gpio_request(GPIO_FN_IRQ9_42, NULL);
- set_irq_type(IRQ9, IRQ_TYPE_LEVEL_HIGH);
+ irq_set_irq_type(IRQ9, IRQ_TYPE_LEVEL_HIGH);
/* enable Touchscreen */
gpio_request(GPIO_FN_IRQ7_40, NULL);
- set_irq_type(IRQ7, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IRQ7, IRQ_TYPE_LEVEL_LOW);
/* enable Accelerometer */
gpio_request(GPIO_FN_IRQ21, NULL);
- set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH);
+ irq_set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH);
/* enable SDHI0 */
gpio_request(GPIO_FN_SDHICD0, NULL);
static void intcs_demux(unsigned int irq, struct irq_desc *desc)
{
- void __iomem *reg = (void *)get_irq_data(irq);
+ void __iomem *reg = (void *)irq_get_handler_data(irq);
unsigned int evtcodeas = ioread32(reg);
generic_handle_irq(intcs_evt2irq(evtcodeas));
register_intc_controller(&intcs_desc);
/* demux using INTEVTSA */
- set_irq_data(evt2irq(0xf80), (void *)intevtsa);
- set_irq_chained_handler(evt2irq(0xf80), intcs_demux);
+ irq_set_handler_data(evt2irq(0xf80), (void *)intevtsa);
+ irq_set_chained_handler(evt2irq(0xf80), intcs_demux);
}
static void intcs_demux(unsigned int irq, struct irq_desc *desc)
{
- void __iomem *reg = (void *)get_irq_data(irq);
+ void __iomem *reg = (void *)irq_get_handler_data(irq);
unsigned int evtcodeas = ioread32(reg);
generic_handle_irq(intcs_evt2irq(evtcodeas));
register_intc_controller(&intcs_desc);
/* demux using INTEVTSA */
- set_irq_data(evt2irq(0xf80), (void *)intevtsa);
- set_irq_chained_handler(evt2irq(0xf80), intcs_demux);
+ irq_set_handler_data(evt2irq(0xf80), (void *)intevtsa);
+ irq_set_chained_handler(evt2irq(0xf80), intcs_demux);
}
static void intcs_demux(unsigned int irq, struct irq_desc *desc)
{
- void __iomem *reg = (void *)get_irq_data(irq);
+ void __iomem *reg = (void *)irq_get_handler_data(irq);
unsigned int evtcodeas = ioread32(reg);
generic_handle_irq(intcs_evt2irq(evtcodeas));
register_intc_controller(&intcs_desc);
/* demux using INTEVTSA */
- set_irq_data(evt2irq(INTCS_INTVECT), (void *)intevtsa);
- set_irq_chained_handler(evt2irq(INTCS_INTVECT), intcs_demux);
+ irq_set_handler_data(evt2irq(INTCS_INTVECT), (void *)intevtsa);
+ irq_set_chained_handler(evt2irq(INTCS_INTVECT), intcs_demux);
}
for (irqno = 0; irqno < NR_IRQS; irqno++) {
if (irqno < 32)
- set_irq_chip(irqno, &tcc8000_irq_chip0);
+ irq_set_chip(irqno, &tcc8000_irq_chip0);
else
- set_irq_chip(irqno, &tcc8000_irq_chip1);
- set_irq_handler(irqno, handle_level_irq);
+ irq_set_chip(irqno, &tcc8000_irq_chip1);
+ irq_set_handler(irqno, handle_level_irq);
set_irq_flags(irqno, IRQF_VALID);
}
}
spin_unlock_irqrestore(&bank->lvl_lock[port], flags);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
- __set_irq_handler_unlocked(d->irq, handle_level_irq);
+ __irq_set_handler_locked(d->irq, handle_level_irq);
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
- __set_irq_handler_unlocked(d->irq, handle_edge_irq);
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
return 0;
}
desc->irq_data.chip->irq_ack(&desc->irq_data);
- bank = get_irq_data(irq);
+ bank = irq_get_handler_data(irq);
for (port = 0; port < 4; port++) {
int gpio = tegra_gpio_compose(bank->bank, port, 0);
}
local_irq_restore(flags);
-
- for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) {
- struct irq_desc *desc = irq_to_desc(i);
- if (!desc || (desc->status & IRQ_WAKEUP))
- continue;
- enable_irq(i);
- }
}
void tegra_gpio_suspend(void)
unsigned long flags;
int b, p, i;
- for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) {
- struct irq_desc *desc = irq_to_desc(i);
- if (!desc)
- continue;
- if (desc->status & IRQ_WAKEUP) {
- int gpio = i - INT_GPIO_BASE;
- pr_debug("gpio %d.%d is wakeup\n", gpio/8, gpio&7);
- continue;
- }
- disable_irq(i);
- }
-
local_irq_save(flags);
for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) {
struct tegra_gpio_bank *bank = &tegra_gpio_banks[b];
static int tegra_gpio_wake_enable(struct irq_data *d, unsigned int enable)
{
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- return set_irq_wake(bank->irq, enable);
+ return irq_set_irq_wake(bank->irq, enable);
}
#endif
for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) {
bank = &tegra_gpio_banks[GPIO_BANK(irq_to_gpio(i))];
- lockdep_set_class(&irq_desc[i].lock, &gpio_lock_class);
- set_irq_chip_data(i, bank);
- set_irq_chip(i, &tegra_gpio_irq_chip);
- set_irq_handler(i, handle_simple_irq);
+ irq_set_lockdep_class(i, &gpio_lock_class);
+ irq_set_chip_data(i, bank);
+ irq_set_chip_and_handler(i, &tegra_gpio_irq_chip,
+ handle_simple_irq);
set_irq_flags(i, IRQF_VALID);
}
for (i = 0; i < ARRAY_SIZE(tegra_gpio_banks); i++) {
bank = &tegra_gpio_banks[i];
- set_irq_chained_handler(bank->irq, tegra_gpio_irq_handler);
- set_irq_data(bank->irq, bank);
+ irq_set_chained_handler(bank->irq, tegra_gpio_irq_handler);
+ irq_set_handler_data(bank->irq, bank);
for (j = 0; j < 4; j++)
spin_lock_init(&bank->lvl_lock[j]);
gic_init(0, 29, IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE),
IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100));
- gic = get_irq_chip(29);
+ gic = irq_get_chip(29);
tegra_gic_unmask_irq = gic->irq_unmask;
tegra_gic_mask_irq = gic->irq_mask;
tegra_gic_ack_irq = gic->irq_ack;
for (i = 0; i < INT_MAIN_NR; i++) {
irq = INT_PRI_BASE + i;
- set_irq_chip(irq, &tegra_irq);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &tegra_irq, handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
}
static void create_virtual_irq(int irq, struct irq_chip *modem_irq_chip)
{
- set_irq_chip(irq, modem_irq_chip);
- set_irq_handler(irq, handle_simple_irq);
+ irq_set_chip_and_handler(irq, modem_irq_chip, handle_simple_irq);
set_irq_flags(irq, IRQF_VALID);
pr_debug("modem_irq: Created virtual IRQ %d\n", irq);
return -EINVAL;
case IRQF_TRIGGER_HIGH:
dctr |= VT8500_TRIGGER_HIGH;
- irq_desc[orig_irq].handle_irq = handle_level_irq;
+ __irq_set_handler_locked(orig_irq, handle_level_irq);
break;
case IRQF_TRIGGER_FALLING:
dctr |= VT8500_TRIGGER_FALLING;
- irq_desc[orig_irq].handle_irq = handle_edge_irq;
+ __irq_set_handler_locked(orig_irq, handle_edge_irq);
break;
case IRQF_TRIGGER_RISING:
dctr |= VT8500_TRIGGER_RISING;
- irq_desc[orig_irq].handle_irq = handle_edge_irq;
+ __irq_set_handler_locked(orig_irq, handle_edge_irq);
break;
}
writeb(dctr, base + VT8500_IC_DCTR + irq);
/* Disable all interrupts and route them to IRQ */
writeb(0x00, ic_regbase + VT8500_IC_DCTR + i);
- set_irq_chip(i, &vt8500_irq_chip);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip_and_handler(i, &vt8500_irq_chip,
+ handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
} else {
writeb(0x00, sic_regbase + VT8500_IC_DCTR
+ i - 64);
- set_irq_chip(i, &vt8500_irq_chip);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip_and_handler(i, &vt8500_irq_chip,
+ handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
} else {
__raw_writel(0xFFFFFFFE, REG_AIC_MDCR);
for (irqno = IRQ_WDT; irqno <= IRQ_ADC; irqno++) {
- set_irq_chip(irqno, &nuc900_irq_chip);
- set_irq_handler(irqno, handle_level_irq);
+ irq_set_chip_and_handler(irqno, &nuc900_irq_chip,
+ handle_level_irq);
set_irq_flags(irqno, IRQF_VALID);
}
}
expio_irq = MXC_BOARD_IRQ_START;
for (; int_valid != 0; int_valid >>= 1, expio_irq++) {
- struct irq_desc *d;
if ((int_valid & 1) == 0)
continue;
- d = irq_desc + expio_irq;
- if (unlikely(!(d->handle_irq)))
- pr_err("\nEXPIO irq: %d unhandled\n", expio_irq);
- else
- d->handle_irq(expio_irq, d);
+ generic_handle_irq(expio_irq);
}
desc->irq_data.chip->irq_ack(&desc->irq_data);
__raw_writew(0x1F, brd_io + INTR_MASK_REG);
for (i = MXC_EXP_IO_BASE;
i < (MXC_EXP_IO_BASE + MXC_MAX_EXP_IO_LINES); i++) {
- set_irq_chip(i, &expio_irq_chip);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip_and_handler(i, &expio_irq_chip, handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
- set_irq_type(p_irq, IRQF_TRIGGER_LOW);
- set_irq_chained_handler(p_irq, mxc_expio_irq_handler);
+ irq_set_irq_type(p_irq, IRQF_TRIGGER_LOW);
+ irq_set_chained_handler(p_irq, mxc_expio_irq_handler);
/* Register Lan device on the debugboard */
smsc911x_resources[0].start = LAN9217_BASE_ADDR(base);
__raw_writel(0, avic_base + AVIC_INTTYPEH);
__raw_writel(0, avic_base + AVIC_INTTYPEL);
for (i = 0; i < MXC_INTERNAL_IRQS; i++) {
- set_irq_chip(i, &mxc_avic_chip.base);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip_and_handler(i, &mxc_avic_chip.base,
+ handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
struct resource res[] = {
{
.start = data->iobase,
- .end = data->iobase + SZ_4K,
+ .end = data->iobase + SZ_4K - 1,
.flags = IORESOURCE_MEM,
}, {
.start = data->irq,
struct resource res[] = {
{
.start = data->iobase,
- .end = data->iobase + SZ_16K,
+ .end = data->iobase + SZ_16K - 1,
.flags = IORESOURCE_MEM,
}, {
.start = data->irq,
static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc)
{
u32 irq_stat;
- struct mxc_gpio_port *port = get_irq_data(irq);
+ struct mxc_gpio_port *port = irq_get_handler_data(irq);
irq_stat = __raw_readl(port->base + GPIO_ISR) &
__raw_readl(port->base + GPIO_IMR);
{
int i;
u32 irq_msk, irq_stat;
- struct mxc_gpio_port *port = get_irq_data(irq);
+ struct mxc_gpio_port *port = irq_get_handler_data(irq);
/* walk through all interrupt status registers */
for (i = 0; i < gpio_table_size; i++) {
__raw_writel(~0, port[i].base + GPIO_ISR);
for (j = port[i].virtual_irq_start;
j < port[i].virtual_irq_start + 32; j++) {
- set_irq_chip(j, &gpio_irq_chip);
- set_irq_handler(j, handle_level_irq);
+ irq_set_chip_and_handler(j, &gpio_irq_chip,
+ handle_level_irq);
set_irq_flags(j, IRQF_VALID);
}
if (cpu_is_mx1() || cpu_is_mx3() || cpu_is_mx25() || cpu_is_mx51()) {
/* setup one handler for each entry */
- set_irq_chained_handler(port[i].irq, mx3_gpio_irq_handler);
- set_irq_data(port[i].irq, &port[i]);
+ irq_set_chained_handler(port[i].irq,
+ mx3_gpio_irq_handler);
+ irq_set_handler_data(port[i].irq, &port[i]);
if (port[i].irq_high) {
/* setup handler for GPIO 16 to 31 */
- set_irq_chained_handler(port[i].irq_high,
- mx3_gpio_irq_handler);
- set_irq_data(port[i].irq_high, &port[i]);
+ irq_set_chained_handler(port[i].irq_high,
+ mx3_gpio_irq_handler);
+ irq_set_handler_data(port[i].irq_high,
+ &port[i]);
}
}
}
if (cpu_is_mx2()) {
/* setup one handler for all GPIO interrupts */
- set_irq_chained_handler(port[0].irq, mx2_gpio_irq_handler);
- set_irq_data(port[0].irq, port);
+ irq_set_chained_handler(port[0].irq, mx2_gpio_irq_handler);
+ irq_set_handler_data(port[0].irq, port);
}
return 0;
#define MX31_AUDMUX_PORT5_SSI_PINS_5 4
#define MX31_AUDMUX_PORT6_SSI_PINS_6 5
+#define MX51_AUDMUX_PORT1_SSI0 0
+#define MX51_AUDMUX_PORT2_SSI1 1
+#define MX51_AUDMUX_PORT3 2
+#define MX51_AUDMUX_PORT4 3
+#define MX51_AUDMUX_PORT5 4
+#define MX51_AUDMUX_PORT6 5
+#define MX51_AUDMUX_PORT7 6
+
/* Register definitions for the i.MX21/27 Digital Audio Multiplexer */
#define MXC_AUDMUX_V1_PCR_INMMASK(x) ((x) & 0xff)
#define MXC_AUDMUX_V1_PCR_INMEN (1 << 8)
#define MXC_AUDMUX_V1_PCR_TCLKDIR (1 << 30)
#define MXC_AUDMUX_V1_PCR_TFSDIR (1 << 31)
-/* Register definitions for the i.MX25/31/35 Digital Audio Multiplexer */
+/* Register definitions for the i.MX25/31/35/51 Digital Audio Multiplexer */
#define MXC_AUDMUX_V2_PTCR_TFSDIR (1 << 31)
#define MXC_AUDMUX_V2_PTCR_TFSEL(x) (((x) & 0xf) << 27)
#define MXC_AUDMUX_V2_PTCR_TCLKDIR (1 << 26)
#define PC31_PF_SSI3_CLK (GPIO_PORTC | GPIO_PF | GPIO_IN | 31)
#define PD17_PF_I2C_DATA (GPIO_PORTD | GPIO_PF | GPIO_OUT | 17)
#define PD18_PF_I2C_CLK (GPIO_PORTD | GPIO_PF | GPIO_OUT | 18)
-#define PD19_PF_CSPI2_SS2 (GPIO_PORTD | GPIO_PF | 19)
-#define PD20_PF_CSPI2_SS1 (GPIO_PORTD | GPIO_PF | 20)
-#define PD21_PF_CSPI2_SS0 (GPIO_PORTD | GPIO_PF | 21)
-#define PD22_PF_CSPI2_SCLK (GPIO_PORTD | GPIO_PF | 22)
-#define PD23_PF_CSPI2_MISO (GPIO_PORTD | GPIO_PF | 23)
-#define PD24_PF_CSPI2_MOSI (GPIO_PORTD | GPIO_PF | 24)
+#define PD19_PF_CSPI2_SS2 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 19)
+#define PD20_PF_CSPI2_SS1 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 20)
+#define PD21_PF_CSPI2_SS0 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 21)
+#define PD22_PF_CSPI2_SCLK (GPIO_PORTD | GPIO_PF | GPIO_OUT | 22)
+#define PD23_PF_CSPI2_MISO (GPIO_PORTD | GPIO_PF | GPIO_IN | 23)
+#define PD24_PF_CSPI2_MOSI (GPIO_PORTD | GPIO_PF | GPIO_OUT | 24)
#define PD25_PF_CSPI1_RDY (GPIO_PORTD | GPIO_PF | GPIO_OUT | 25)
#define PD26_PF_CSPI1_SS2 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 26)
#define PD27_PF_CSPI1_SS1 (GPIO_PORTD | GPIO_PF | GPIO_OUT | 27)
#define MX50_INT_APBHDMA_CHAN6 116
#define MX50_INT_APBHDMA_CHAN7 117
+#if !defined(__ASSEMBLY__) && !defined(__MXC_BOOT_UNCOMPRESS)
+extern int mx50_revision(void);
+#endif
+
#endif /* ifndef __MACH_MX50_H__ */
#if !defined(__ASSEMBLY__) && !defined(__MXC_BOOT_UNCOMPRESS)
extern int mx51_revision(void);
+extern void mx51_display_revision(void);
#endif
/* tape-out 1 defines */
#define IMX_CHIP_REVISION_3_3 0x33
#define IMX_CHIP_REVISION_UNKNOWN 0xff
+#define IMX_CHIP_REVISION_1_0_STRING "1.0"
+#define IMX_CHIP_REVISION_1_1_STRING "1.1"
+#define IMX_CHIP_REVISION_1_2_STRING "1.2"
+#define IMX_CHIP_REVISION_1_3_STRING "1.3"
+#define IMX_CHIP_REVISION_2_0_STRING "2.0"
+#define IMX_CHIP_REVISION_2_1_STRING "2.1"
+#define IMX_CHIP_REVISION_2_2_STRING "2.2"
+#define IMX_CHIP_REVISION_2_3_STRING "2.3"
+#define IMX_CHIP_REVISION_3_0_STRING "3.0"
+#define IMX_CHIP_REVISION_3_1_STRING "3.1"
+#define IMX_CHIP_REVISION_3_2_STRING "3.2"
+#define IMX_CHIP_REVISION_3_3_STRING "3.3"
+#define IMX_CHIP_REVISION_UNKNOWN_STRING "unknown"
+
#ifndef __ASSEMBLY__
extern unsigned int __mxc_cpu_type;
#endif
u32 cpu_rate;
};
+int tzic_enable_wake(int is_idle);
+enum mxc_cpu_pwr_mode {
+ WAIT_CLOCKED, /* wfi only */
+ WAIT_UNCLOCKED, /* WAIT */
+ WAIT_UNCLOCKED_POWER_OFF, /* WAIT + SRPG */
+ STOP_POWER_ON, /* just STOP */
+ STOP_POWER_OFF, /* STOP + SRPG */
+};
+
extern struct cpu_op *(*get_cpu_op)(int *op);
#endif
#include <mach/hardware.h>
#include <mach/common.h>
+extern void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode);
+
static inline void arch_idle(void)
{
#ifdef CONFIG_ARCH_MXC91231
"orr %0, %0, #0x00000004\n"
"mcr p15, 0, %0, c1, c0, 0\n"
: "=r" (reg));
- } else
+ } else if (cpu_is_mx51())
+ mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
+ else
cpu_do_idle();
}
ret = -ENOSYS;
- base = get_irq_chip(irq);
+ base = irq_get_chip(irq);
if (base) {
chip = container_of(base, struct mxc_irq_chip, base);
if (chip->set_priority)
ret = -ENOSYS;
- base = get_irq_chip(irq);
+ base = irq_get_chip(irq);
if (base) {
chip = container_of(base, struct mxc_irq_chip, base);
if (chip->set_irq_fiq)
#include <linux/clk.h>
#include <mach/hardware.h>
+#include <asm/sched_clock.h>
#include <asm/mach/time.h>
#include <mach/common.h>
__raw_writel(V2_TSTAT_OF1, timer_base + V2_TSTAT);
}
+static cycle_t dummy_get_cycles(struct clocksource *cs)
+{
+ return 0;
+}
+
static cycle_t mx1_2_get_cycles(struct clocksource *cs)
{
return __raw_readl(timer_base + MX1_2_TCN);
static struct clocksource clocksource_mxc = {
.name = "mxc_timer1",
.rating = 200,
- .read = mx1_2_get_cycles,
+ .read = dummy_get_cycles,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+static DEFINE_CLOCK_DATA(cd);
+unsigned long long notrace sched_clock(void)
+{
+ cycle_t cyc = clocksource_mxc.read(&clocksource_mxc);
+
+ return cyc_to_sched_clock(&cd, cyc, (u32)~0);
+}
+
+static void notrace mxc_update_sched_clock(void)
+{
+ cycle_t cyc = clocksource_mxc.read(&clocksource_mxc);
+ update_sched_clock(&cd, cyc, (u32)~0);
+}
+
static int __init mxc_clocksource_init(struct clk *timer_clk)
{
unsigned int c = clk_get_rate(timer_clk);
if (timer_is_v2())
clocksource_mxc.read = v2_get_cycles;
+ else
+ clocksource_mxc.read = mx1_2_get_cycles;
+ init_sched_clock(&cd, mxc_update_sched_clock, 32, c);
clocksource_register_hz(&clocksource_mxc, c);
return 0;
/* all IRQ no FIQ Warning :: No selection */
for (i = 0; i < MXC_INTERNAL_IRQS; i++) {
- set_irq_chip(i, &mxc_tzic_chip.base);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip_and_handler(i, &mxc_tzic_chip.base,
+ handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
u32 rwimsc;
u32 fwimsc;
u32 slpm;
+ u32 enabled;
};
static struct nmk_gpio_chip *
struct nmk_gpio_chip *nmk_chip;
int pin = PIN_NUM(cfgs[i]);
- nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(pin));
+ nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(pin));
if (!nmk_chip) {
ret = -EINVAL;
break;
struct nmk_gpio_chip *nmk_chip;
unsigned long flags;
- nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
+ nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
if (!nmk_chip)
return -EINVAL;
struct nmk_gpio_chip *nmk_chip;
unsigned long flags;
- nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
+ nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
if (!nmk_chip)
return -EINVAL;
struct nmk_gpio_chip *nmk_chip;
unsigned long flags;
- nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
+ nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
if (!nmk_chip)
return -EINVAL;
struct nmk_gpio_chip *nmk_chip;
u32 afunc, bfunc, bit;
- nmk_chip = get_irq_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
+ nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio));
if (!nmk_chip)
return -EINVAL;
static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
int gpio, bool on)
{
-#ifdef CONFIG_ARCH_U8500
- if (cpu_is_u8500v2()) {
- __nmk_gpio_set_slpm(nmk_chip, gpio - nmk_chip->chip.base,
- on ? NMK_GPIO_SLPM_WAKEUP_ENABLE
- : NMK_GPIO_SLPM_WAKEUP_DISABLE);
- }
-#endif
__nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on);
}
if (!nmk_chip)
return -EINVAL;
+ if (enable)
+ nmk_chip->enabled |= bitmask;
+ else
+ nmk_chip->enabled &= ~bitmask;
+
spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
spin_lock(&nmk_chip->lock);
static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
{
- struct irq_desc *desc = irq_to_desc(d->irq);
- bool enabled = !(desc->status & IRQ_DISABLED);
struct nmk_gpio_chip *nmk_chip;
unsigned long flags;
u32 bitmask;
spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
spin_lock(&nmk_chip->lock);
- if (!enabled)
+ if (!(nmk_chip->enabled & bitmask))
__nmk_gpio_set_wake(nmk_chip, gpio, on);
if (on)
static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
- struct irq_desc *desc = irq_to_desc(d->irq);
- bool enabled = !(desc->status & IRQ_DISABLED);
- bool wake = desc->wake_depth;
+ bool enabled, wake = irqd_is_wakeup_set(d);
int gpio;
struct nmk_gpio_chip *nmk_chip;
unsigned long flags;
if (type & IRQ_TYPE_LEVEL_LOW)
return -EINVAL;
+ enabled = nmk_chip->enabled & bitmask;
+
spin_lock_irqsave(&nmk_chip->lock, flags);
if (enabled)
u32 status)
{
struct nmk_gpio_chip *nmk_chip;
- struct irq_chip *host_chip = get_irq_chip(irq);
+ struct irq_chip *host_chip = irq_get_chip(irq);
unsigned int first_irq;
if (host_chip->irq_mask_ack)
host_chip->irq_ack(&desc->irq_data);
}
- nmk_chip = get_irq_data(irq);
+ nmk_chip = irq_get_handler_data(irq);
first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base);
while (status) {
int bit = __ffs(status);
static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
- struct nmk_gpio_chip *nmk_chip = get_irq_data(irq);
+ struct nmk_gpio_chip *nmk_chip = irq_get_handler_data(irq);
u32 status = readl(nmk_chip->addr + NMK_GPIO_IS);
__nmk_gpio_irq_handler(irq, desc, status);
static void nmk_gpio_secondary_irq_handler(unsigned int irq,
struct irq_desc *desc)
{
- struct nmk_gpio_chip *nmk_chip = get_irq_data(irq);
+ struct nmk_gpio_chip *nmk_chip = irq_get_handler_data(irq);
u32 status = nmk_chip->get_secondary_status(nmk_chip->bank);
__nmk_gpio_irq_handler(irq, desc, status);
first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base);
for (i = first_irq; i < first_irq + nmk_chip->chip.ngpio; i++) {
- set_irq_chip(i, &nmk_gpio_irq_chip);
- set_irq_handler(i, handle_edge_irq);
+ irq_set_chip_and_handler(i, &nmk_gpio_irq_chip,
+ handle_edge_irq);
set_irq_flags(i, IRQF_VALID);
- set_irq_chip_data(i, nmk_chip);
- set_irq_type(i, IRQ_TYPE_EDGE_FALLING);
+ irq_set_chip_data(i, nmk_chip);
+ irq_set_irq_type(i, IRQ_TYPE_EDGE_FALLING);
}
- set_irq_chained_handler(nmk_chip->parent_irq, nmk_gpio_irq_handler);
- set_irq_data(nmk_chip->parent_irq, nmk_chip);
+ irq_set_chained_handler(nmk_chip->parent_irq, nmk_gpio_irq_handler);
+ irq_set_handler_data(nmk_chip->parent_irq, nmk_chip);
if (nmk_chip->secondary_parent_irq >= 0) {
- set_irq_chained_handler(nmk_chip->secondary_parent_irq,
+ irq_set_chained_handler(nmk_chip->secondary_parent_irq,
nmk_gpio_secondary_irq_handler);
- set_irq_data(nmk_chip->secondary_parent_irq, nmk_chip);
+ irq_set_handler_data(nmk_chip->secondary_parent_irq, nmk_chip);
}
return 0;
bank = irq_data_get_irq_chip_data(d);
spin_lock_irqsave(&bank->lock, flags);
retval = _set_gpio_triggering(bank, get_gpio_index(gpio), type);
- if (retval == 0) {
- struct irq_desc *desc = irq_to_desc(d->irq);
-
- desc->status &= ~IRQ_TYPE_SENSE_MASK;
- desc->status |= type;
- }
spin_unlock_irqrestore(&bank->lock, flags);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
- __set_irq_handler_unlocked(d->irq, handle_level_irq);
+ __irq_set_handler_locked(d->irq, handle_level_irq);
else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
- __set_irq_handler_unlocked(d->irq, handle_edge_irq);
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
return retval;
}
desc->irq_data.chip->irq_ack(&desc->irq_data);
- bank = get_irq_data(irq);
+ bank = irq_get_handler_data(irq);
#ifdef CONFIG_ARCH_OMAP1
if (bank->method == METHOD_MPUIO)
isr_reg = bank->base +
unsigned int gpio = d->irq - IH_GPIO_BASE;
struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
unsigned int irq_mask = 1 << get_gpio_index(gpio);
- struct irq_desc *desc = irq_to_desc(d->irq);
- u32 trigger = desc->status & IRQ_TYPE_SENSE_MASK;
+ u32 trigger = irqd_get_trigger_type(d);
if (trigger)
_set_gpio_triggering(bank, get_gpio_index(gpio), trigger);
for (j = bank->virtual_irq_start;
j < bank->virtual_irq_start + bank_width; j++) {
- struct irq_desc *d = irq_to_desc(j);
-
- lockdep_set_class(&d->lock, &gpio_lock_class);
- set_irq_chip_data(j, bank);
+ irq_set_lockdep_class(j, &gpio_lock_class);
+ irq_set_chip_data(j, bank);
if (bank_is_mpuio(bank))
- set_irq_chip(j, &mpuio_irq_chip);
+ irq_set_chip(j, &mpuio_irq_chip);
else
- set_irq_chip(j, &gpio_irq_chip);
- set_irq_handler(j, handle_simple_irq);
+ irq_set_chip(j, &gpio_irq_chip);
+ irq_set_handler(j, handle_simple_irq);
set_irq_flags(j, IRQF_VALID);
}
- set_irq_chained_handler(bank->irq, gpio_irq_handler);
- set_irq_data(bank->irq, bank);
+ irq_set_chained_handler(bank->irq, gpio_irq_handler);
+ irq_set_handler_data(bank->irq, bank);
}
static int __devinit omap_gpio_probe(struct platform_device *pdev)
static void gpio_irq_ack(struct irq_data *d)
{
struct orion_gpio_chip *ochip = irq_data_get_irq_chip_data(d);
- int type;
+ int type = irqd_get_trigger_type(d);
- type = irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK;
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
int pin = d->irq - ochip->secondary_irq_base;
static void gpio_irq_mask(struct irq_data *d)
{
struct orion_gpio_chip *ochip = irq_data_get_irq_chip_data(d);
- int type;
+ int type = irqd_get_trigger_type(d);
void __iomem *reg;
int pin;
- type = irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK;
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
reg = GPIO_EDGE_MASK(ochip);
else
static void gpio_irq_unmask(struct irq_data *d)
{
struct orion_gpio_chip *ochip = irq_data_get_irq_chip_data(d);
- int type;
+ int type = irqd_get_trigger_type(d);
void __iomem *reg;
int pin;
- type = irq_desc[d->irq].status & IRQ_TYPE_SENSE_MASK;
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
reg = GPIO_EDGE_MASK(ochip);
else
* Set edge/level type.
*/
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
- set_irq_handler(d->irq, handle_edge_irq);
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
} else if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
- set_irq_handler(d->irq, handle_level_irq);
+ __irq_set_handler_locked(d->irq, handle_level_irq);
} else {
printk(KERN_ERR "failed to set irq=%d (type=%d)\n",
d->irq, type);
for (i = 0; i < ngpio; i++) {
unsigned int irq = secondary_irq_base + i;
- set_irq_chip(irq, &orion_gpio_irq_chip);
- set_irq_handler(irq, handle_level_irq);
- set_irq_chip_data(irq, ochip);
- irq_desc[irq].status |= IRQ_LEVEL;
+ irq_set_chip_and_handler(irq, &orion_gpio_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, ochip);
+ irq_set_status_flags(irq, IRQ_LEVEL);
set_irq_flags(irq, IRQF_VALID);
}
}
void orion_gpio_irq_handler(int pinoff)
{
struct orion_gpio_chip *ochip;
- u32 cause;
+ u32 cause, type;
int i;
ochip = orion_gpio_chip_find(pinoff);
for (i = 0; i < ochip->chip.ngpio; i++) {
int irq;
- struct irq_desc *desc;
irq = ochip->secondary_irq_base + i;
if (!(cause & (1 << i)))
continue;
- desc = irq_desc + irq;
- if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ type = irqd_get_trigger_type(irq_get_irq_data(irq));
+ if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
/* Swap polarity (race with GPIO line) */
u32 polarity;
polarity ^= 1 << i;
writel(polarity, GPIO_IN_POL(ochip));
}
-
- desc_handle_irq(irq, desc);
+ generic_handle_irq(irq);
}
}
for (i = 0; i < 32; i++) {
unsigned int irq = irq_start + i;
- set_irq_chip(irq, &orion_irq_chip);
- set_irq_chip_data(irq, maskaddr);
- set_irq_handler(irq, handle_level_irq);
- irq_desc[irq].status |= IRQ_LEVEL;
+ irq_set_chip_and_handler(irq, &orion_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, maskaddr);
+ irq_set_status_flags(irq, IRQ_LEVEL);
set_irq_flags(irq, IRQF_VALID);
}
}
}
for (irq = gpio_to_irq(start); irq <= gpio_to_irq(end); irq++) {
- set_irq_chip(irq, &pxa_muxed_gpio_chip);
- set_irq_handler(irq, handle_edge_irq);
+ irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
+ handle_edge_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
/* Install handler for GPIO>=2 edge detect interrupts */
- set_irq_chained_handler(mux_irq, pxa_gpio_demux_handler);
+ irq_set_chained_handler(mux_irq, pxa_gpio_demux_handler);
pxa_muxed_gpio_chip.irq_set_wake = fn;
}
case IRQ_UART1:
case IRQ_UART2:
case IRQ_ADCPARENT:
- set_irq_chip(irqno, &s3c_irq_level_chip);
- set_irq_handler(irqno, handle_level_irq);
+ irq_set_chip_and_handler(irqno, &s3c_irq_level_chip,
+ handle_level_irq);
break;
case IRQ_RESERVED6:
default:
//irqdbf("registering irq %d (s3c irq)\n", irqno);
- set_irq_chip(irqno, &s3c_irq_chip);
- set_irq_handler(irqno, handle_edge_irq);
+ irq_set_chip_and_handler(irqno, &s3c_irq_chip,
+ handle_edge_irq);
set_irq_flags(irqno, IRQF_VALID);
}
}
/* setup the cascade irq handlers */
- set_irq_chained_handler(IRQ_EINT4t7, s3c_irq_demux_extint4t7);
- set_irq_chained_handler(IRQ_EINT8t23, s3c_irq_demux_extint8);
+ irq_set_chained_handler(IRQ_EINT4t7, s3c_irq_demux_extint4t7);
+ irq_set_chained_handler(IRQ_EINT8t23, s3c_irq_demux_extint8);
- set_irq_chained_handler(IRQ_UART0, s3c_irq_demux_uart0);
- set_irq_chained_handler(IRQ_UART1, s3c_irq_demux_uart1);
- set_irq_chained_handler(IRQ_UART2, s3c_irq_demux_uart2);
- set_irq_chained_handler(IRQ_ADCPARENT, s3c_irq_demux_adc);
+ irq_set_chained_handler(IRQ_UART0, s3c_irq_demux_uart0);
+ irq_set_chained_handler(IRQ_UART1, s3c_irq_demux_uart1);
+ irq_set_chained_handler(IRQ_UART2, s3c_irq_demux_uart2);
+ irq_set_chained_handler(IRQ_ADCPARENT, s3c_irq_demux_adc);
/* external interrupts */
for (irqno = IRQ_EINT0; irqno <= IRQ_EINT3; irqno++) {
irqdbf("registering irq %d (ext int)\n", irqno);
- set_irq_chip(irqno, &s3c_irq_eint0t4);
- set_irq_handler(irqno, handle_edge_irq);
+ irq_set_chip_and_handler(irqno, &s3c_irq_eint0t4,
+ handle_edge_irq);
set_irq_flags(irqno, IRQF_VALID);
}
for (irqno = IRQ_EINT4; irqno <= IRQ_EINT23; irqno++) {
irqdbf("registering irq %d (extended s3c irq)\n", irqno);
- set_irq_chip(irqno, &s3c_irqext_chip);
- set_irq_handler(irqno, handle_edge_irq);
+ irq_set_chip_and_handler(irqno, &s3c_irqext_chip,
+ handle_edge_irq);
set_irq_flags(irqno, IRQF_VALID);
}
for (irqno = IRQ_S3CUART_RX0; irqno <= IRQ_S3CUART_ERR0; irqno++) {
irqdbf("registering irq %d (s3c uart0 irq)\n", irqno);
- set_irq_chip(irqno, &s3c_irq_uart0);
- set_irq_handler(irqno, handle_level_irq);
+ irq_set_chip_and_handler(irqno, &s3c_irq_uart0,
+ handle_level_irq);
set_irq_flags(irqno, IRQF_VALID);
}
for (irqno = IRQ_S3CUART_RX1; irqno <= IRQ_S3CUART_ERR1; irqno++) {
irqdbf("registering irq %d (s3c uart1 irq)\n", irqno);
- set_irq_chip(irqno, &s3c_irq_uart1);
- set_irq_handler(irqno, handle_level_irq);
+ irq_set_chip_and_handler(irqno, &s3c_irq_uart1,
+ handle_level_irq);
set_irq_flags(irqno, IRQF_VALID);
}
for (irqno = IRQ_S3CUART_RX2; irqno <= IRQ_S3CUART_ERR2; irqno++) {
irqdbf("registering irq %d (s3c uart2 irq)\n", irqno);
- set_irq_chip(irqno, &s3c_irq_uart2);
- set_irq_handler(irqno, handle_level_irq);
+ irq_set_chip_and_handler(irqno, &s3c_irq_uart2,
+ handle_level_irq);
set_irq_flags(irqno, IRQF_VALID);
}
for (irqno = IRQ_TC; irqno <= IRQ_ADC; irqno++) {
irqdbf("registering irq %d (s3c adc irq)\n", irqno);
- set_irq_chip(irqno, &s3c_irq_adc);
- set_irq_handler(irqno, handle_edge_irq);
+ irq_set_chip_and_handler(irqno, &s3c_irq_adc, handle_edge_irq);
set_irq_flags(irqno, IRQF_VALID);
}
int irq;
for (irq = IRQ_EINT(0); irq <= IRQ_EINT(15); irq++)
- set_irq_chip(irq, &s5p_irq_vic_eint);
+ irq_set_chip(irq, &s5p_irq_vic_eint);
for (irq = IRQ_EINT(16); irq <= IRQ_EINT(31); irq++) {
- set_irq_chip(irq, &s5p_irq_eint);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &s5p_irq_eint, handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
- set_irq_chained_handler(IRQ_EINT16_31, s5p_irq_demux_eint16_31);
+ irq_set_chained_handler(IRQ_EINT16_31, s5p_irq_demux_eint16_31);
return 0;
}
static int s5p_gpioint_get_offset(struct irq_data *data)
{
- struct s3c_gpio_chip *chip = irq_data_get_irq_data(data);
+ struct s3c_gpio_chip *chip = irq_data_get_irq_handler_data(data);
return data->irq - chip->irq_base;
}
static void s5p_gpioint_ack(struct irq_data *data)
{
- struct s3c_gpio_chip *chip = irq_data_get_irq_data(data);
+ struct s3c_gpio_chip *chip = irq_data_get_irq_handler_data(data);
int group, offset, pend_offset;
unsigned int value;
static void s5p_gpioint_mask(struct irq_data *data)
{
- struct s3c_gpio_chip *chip = irq_data_get_irq_data(data);
+ struct s3c_gpio_chip *chip = irq_data_get_irq_handler_data(data);
int group, offset, mask_offset;
unsigned int value;
static void s5p_gpioint_unmask(struct irq_data *data)
{
- struct s3c_gpio_chip *chip = irq_data_get_irq_data(data);
+ struct s3c_gpio_chip *chip = irq_data_get_irq_handler_data(data);
int group, offset, mask_offset;
unsigned int value;
static int s5p_gpioint_set_type(struct irq_data *data, unsigned int type)
{
- struct s3c_gpio_chip *chip = irq_data_get_irq_data(data);
+ struct s3c_gpio_chip *chip = irq_data_get_irq_handler_data(data);
int group, offset, con_offset;
unsigned int value;
static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc)
{
- struct s5p_gpioint_bank *bank = get_irq_data(irq);
+ struct s5p_gpioint_bank *bank = irq_get_handler_data(irq);
int group, pend_offset, mask_offset;
unsigned int pend, mask;
if (!bank->chips)
return -ENOMEM;
- set_irq_chained_handler(bank->irq, s5p_gpioint_handler);
- set_irq_data(bank->irq, bank);
+ irq_set_chained_handler(bank->irq, s5p_gpioint_handler);
+ irq_set_handler_data(bank->irq, bank);
bank->handler = s5p_gpioint_handler;
printk(KERN_INFO "Registered chained gpio int handler for interrupt %d.\n",
bank->irq);
bank->chips[group - bank->start] = chip;
for (i = 0; i < chip->chip.ngpio; i++) {
irq = chip->irq_base + i;
- set_irq_chip(irq, &s5p_gpioint);
- set_irq_data(irq, chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip(irq, &s5p_gpioint);
+ irq_set_handler_data(irq, chip);
+ irq_set_handler(irq, handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
return 0;
static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq)
{
- struct irq_desc *desc = irq_to_desc(uirq->parent_irq);
void __iomem *reg_base = uirq->regs;
unsigned int irq;
int offs;
for (offs = 0; offs < 3; offs++) {
irq = uirq->base_irq + offs;
- set_irq_chip(irq, &s3c_irq_uart);
- set_irq_chip_data(irq, uirq);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &s3c_irq_uart, handle_level_irq);
+ irq_set_chip_data(irq, uirq);
set_irq_flags(irq, IRQF_VALID);
}
- desc->irq_data.handler_data = uirq;
- set_irq_chained_handler(uirq->parent_irq, s3c_irq_demux_uart);
+ irq_set_handler_data(uirq->parent_irq, uirq);
+ irq_set_chained_handler(uirq->parent_irq, s3c_irq_demux_uart);
}
/**
void __init s3c_init_vic_timer_irq(unsigned int parent_irq,
unsigned int timer_irq)
{
- struct irq_desc *desc = irq_to_desc(parent_irq);
- set_irq_chained_handler(parent_irq, s3c_irq_demux_vic_timer);
+ irq_set_chained_handler(parent_irq, s3c_irq_demux_vic_timer);
+ irq_set_handler_data(parent_irq, (void *)timer_irq);
- set_irq_chip(timer_irq, &s3c_irq_timer);
- set_irq_chip_data(timer_irq, (void *)(1 << (timer_irq - IRQ_TIMER0)));
- set_irq_handler(timer_irq, handle_level_irq);
+ irq_set_chip_and_handler(timer_irq, &s3c_irq_timer, handle_level_irq);
+ irq_set_chip_data(timer_irq, (void *)(1 << (timer_irq - IRQ_TIMER0)));
set_irq_flags(timer_irq, IRQF_VALID);
-
- desc->irq_data.handler_data = (void *)timer_irq;
}
void samsung_sync_wakemask(void __iomem *reg,
struct samsung_wakeup_mask *mask, int nr_mask)
{
- struct irq_desc *desc;
+ struct irq_data *data;
u32 val;
val = __raw_readl(reg);
continue;
}
- desc = irq_to_desc(mask->irq);
+ data = irq_get_irq_data(mask->irq);
- /* bit of a liberty to read this directly from irq_desc. */
- if (desc->wake_depth > 0)
+ /* bit of a liberty to read this directly from irq_data. */
+ if (irqd_is_wakeup_set(data))
val &= ~mask->bit;
else
val |= mask->bit;
static void shirq_handler(unsigned irq, struct irq_desc *desc)
{
u32 i, val, mask;
- struct spear_shirq *shirq = get_irq_data(irq);
+ struct spear_shirq *shirq = irq_get_handler_data(irq);
desc->irq_data.chip->irq_ack(&desc->irq_data);
while ((val = readl(shirq->regs.base + shirq->regs.status_reg) &
if (!shirq->dev_count)
return -EINVAL;
- set_irq_chained_handler(shirq->irq, shirq_handler);
+ irq_set_chained_handler(shirq->irq, shirq_handler);
for (i = 0; i < shirq->dev_count; i++) {
- set_irq_chip(shirq->dev_config[i].virq, &shirq_chip);
- set_irq_handler(shirq->dev_config[i].virq, handle_simple_irq);
+ irq_set_chip_and_handler(shirq->dev_config[i].virq,
+ &shirq_chip, handle_simple_irq);
set_irq_flags(shirq->dev_config[i].virq, IRQF_VALID);
- set_irq_chip_data(shirq->dev_config[i].virq, shirq);
+ irq_set_chip_data(shirq->dev_config[i].virq, shirq);
}
- set_irq_data(shirq->irq, shirq);
+ irq_set_handler_data(shirq->irq, shirq);
return 0;
}
/* Disable all interrupts initially */
for (i = 0; i < NR_REAL_IRQS; i++) {
chip->irq_mask(irq_get_irq_data(i));
- set_irq_chip(i, chip);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip_and_handler(i, chip, handle_level_irq);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
static void stmp3xxx_gpio_irq(u32 irq, struct irq_desc *desc)
{
- struct stmp3xxx_pinmux_bank *pm = get_irq_data(irq);
+ struct stmp3xxx_pinmux_bank *pm = irq_get_handler_data(irq);
int gpio_irq = pm->virq;
u32 stat = __raw_readl(pm->irqstat);
while (stat) {
if (stat & 1)
- irq_desc[gpio_irq].handle_irq(gpio_irq,
- &irq_desc[gpio_irq]);
+ generic_handle_irq(gpio_irq);
gpio_irq++;
stat >>= 1;
}
for (virq = pm->virq; virq < pm->virq; virq++) {
gpio_irq_chip.irq_mask(irq_get_irq_data(virq));
- set_irq_chip(virq, &gpio_irq_chip);
- set_irq_handler(virq, handle_level_irq);
+ irq_set_chip_and_handler(virq, &gpio_irq_chip,
+ handle_level_irq);
set_irq_flags(virq, IRQF_VALID);
}
r = gpiochip_add(&pm->chip);
if (r < 0)
break;
- set_irq_chained_handler(pm->irq, stmp3xxx_gpio_irq);
- set_irq_data(pm->irq, pm);
+ irq_set_chained_handler(pm->irq, stmp3xxx_gpio_irq);
+ irq_set_handler_data(pm->irq, pm);
}
return r;
}
static void fpga_irq_handle(unsigned int irq, struct irq_desc *desc)
{
- struct fpga_irq_data *f = get_irq_desc_data(desc);
+ struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
u32 status = readl(f->base + IRQ_STATUS);
if (status == 0) {
f->chip.irq_unmask = fpga_irq_unmask;
if (parent_irq != -1) {
- set_irq_data(parent_irq, f);
- set_irq_chained_handler(parent_irq, fpga_irq_handle);
+ irq_set_handler_data(parent_irq, f);
+ irq_set_chained_handler(parent_irq, fpga_irq_handle);
}
for (i = 0; i < 32; i++) {
if (valid & (1 << i)) {
unsigned int irq = f->irq_start + i;
- set_irq_chip_data(irq, f);
- set_irq_chip(irq, &f->chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip_data(irq, f);
+ irq_set_chip_and_handler(irq, &f->chip,
+ handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
select GENERIC_IRQ_PROBE
select HARDIRQS_SW_RESEND
select GENERIC_IRQ_SHOW
- select GENERIC_HARDIRQS_NO_DEPRECATED
help
AVR32 is a high-performance 32-bit RISC microprocessor core,
designed for cost-sensitive embedded applications, with particular
static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
- struct pio_device *pio = get_irq_desc_chip_data(desc);
+ struct pio_device *pio = irq_desc_get_chip_data(desc);
unsigned gpio_irq;
gpio_irq = (unsigned) irq_get_handler_data(irq);
select GENERIC_ATOMIC64
select GENERIC_IRQ_PROBE
select IRQ_PER_CPU if SMP
- select GENERIC_HARDIRQS_NO_DEPRECATED
config GENERIC_CSUM
def_bool y
# CONFIG_WIRELESS is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
CONFIG_MTD_PHYSMAP=m
CONFIG_MTD_NAND=m
CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
CONFIG_PHYLIB=y
CONFIG_SMSC_PHY=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=16384
-# CONFIG_MISC_DEVICES is not set
CONFIG_SCSI=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_AT25=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_GPIO_ADDR=y
CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_RAM=y
CONFIG_MTD_PHYSMAP=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PHYSMAP=y
CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_SCSI=m
CONFIG_BLK_DEV_SD=m
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_MTD_NAND_PLATFORM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
CONFIG_DAVICOM_PHY=y
CONFIG_NET_ETHERNET=y
CONFIG_MTD_M25P80=y
# CONFIG_M25PXX_USE_FAST_READ is not set
CONFIG_BLK_DEV_RAM=y
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_AT25=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_MTD_UCLINUX=y
CONFIG_MTD_NAND=m
CONFIG_BLK_DEV_RAM=y
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_AT25=m
CONFIG_NETDEVICES=y
# CONFIG_NETDEV_1000 is not set
#include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/lock.h>
-#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
#ifndef CONFIG_SMP
#endif /* CONFIG_SMP */
+/* Needs to be after test_bit and friends */
+#include <asm-generic/bitops/le.h>
+
/*
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
seq_printf(p, "%3d: ", i);
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
- seq_printf(p, " %8s", get_irq_desc_chip(desc)->name);
+ seq_printf(p, " %8s", irq_desc_get_chip(desc)->name);
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
seq_printf(p, " %s", action->name);
* Licensed under the GPL-2 or later
*/
-#define pr_fmt(fmt) "module %s: " fmt
+#define pr_fmt(fmt) "module %s: " fmt, mod->name
#include <linux/moduleloader.h>
#include <linux/elf.h>
dest = l1_inst_sram_alloc(s->sh_size);
mod->arch.text_l1 = dest;
if (dest == NULL) {
- pr_err("L1 inst memory allocation failed\n",
- mod->name);
+ pr_err("L1 inst memory allocation failed\n");
return -1;
}
dma_memcpy(dest, (void *)s->sh_addr, s->sh_size);
dest = l1_data_sram_alloc(s->sh_size);
mod->arch.data_a_l1 = dest;
if (dest == NULL) {
- pr_err("L1 data memory allocation failed\n",
- mod->name);
+ pr_err("L1 data memory allocation failed\n");
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
dest = l1_data_sram_zalloc(s->sh_size);
mod->arch.bss_a_l1 = dest;
if (dest == NULL) {
- pr_err("L1 data memory allocation failed\n",
- mod->name);
+ pr_err("L1 data memory allocation failed\n");
return -1;
}
dest = l1_data_B_sram_alloc(s->sh_size);
mod->arch.data_b_l1 = dest;
if (dest == NULL) {
- pr_err("L1 data memory allocation failed\n",
- mod->name);
+ pr_err("L1 data memory allocation failed\n");
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
dest = l1_data_B_sram_alloc(s->sh_size);
mod->arch.bss_b_l1 = dest;
if (dest == NULL) {
- pr_err("L1 data memory allocation failed\n",
- mod->name);
+ pr_err("L1 data memory allocation failed\n");
return -1;
}
memset(dest, 0, s->sh_size);
dest = l2_sram_alloc(s->sh_size);
mod->arch.text_l2 = dest;
if (dest == NULL) {
- pr_err("L2 SRAM allocation failed\n",
- mod->name);
+ pr_err("L2 SRAM allocation failed\n");
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
dest = l2_sram_alloc(s->sh_size);
mod->arch.data_l2 = dest;
if (dest == NULL) {
- pr_err("L2 SRAM allocation failed\n",
- mod->name);
+ pr_err("L2 SRAM allocation failed\n");
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
dest = l2_sram_zalloc(s->sh_size);
mod->arch.bss_l2 = dest;
if (dest == NULL) {
- pr_err("L2 SRAM allocation failed\n",
- mod->name);
+ pr_err("L2 SRAM allocation failed\n");
return -1;
}
int
apply_relocate(Elf_Shdr * sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relsec, struct module *me)
+ unsigned int symindex, unsigned int relsec, struct module *mod)
{
- pr_err(".rel unsupported\n", me->name);
+ pr_err(".rel unsupported\n");
return -ENOEXEC;
}
Elf32_Sym *sym;
unsigned long location, value, size;
- pr_debug("applying relocate section %u to %u\n", mod->name,
+ pr_debug("applying relocate section %u to %u\n",
relsec, sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
#ifdef CONFIG_SMP
if (location >= COREB_L1_DATA_A_START) {
- pr_err("cannot relocate in L1: %u (SMP kernel)",
- mod->name, ELF32_R_TYPE(rel[i].r_info));
+ pr_err("cannot relocate in L1: %u (SMP kernel)\n",
+ ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
#endif
pr_debug("location is %lx, value is %lx type is %d\n",
- mod->name, location, value, ELF32_R_TYPE(rel[i].r_info));
+ location, value, ELF32_R_TYPE(rel[i].r_info));
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_BFIN_PCREL12_JUMP_S:
case R_BFIN_PCREL10:
pr_err("unsupported relocation: %u (no -mlong-calls?)\n",
- mod->name, ELF32_R_TYPE(rel[i].r_info));
+ ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
default:
- pr_err("unknown relocation: %u\n", mod->name,
+ pr_err("unknown relocation: %u\n",
ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
isram_memcpy((void *)location, &value, size);
break;
default:
- pr_err("invalid relocation for %#lx\n",
- mod->name, location);
+ pr_err("invalid relocation for %#lx\n", location);
return -ENOEXEC;
}
}
/* if no interrupts are going off, don't print this out */
if (fp->ipend & ~0x3F) {
for (i = 0; i < (NR_IRQS - 1); i++) {
+ struct irq_desc *desc = irq_to_desc(i);
if (!in_atomic)
- raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
- action = irq_desc[i].action;
+ action = desc->action;
if (!action)
goto unlock;
pr_cont("\n");
unlock:
if (!in_atomic)
- raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
void __cpuinit bfin_local_timer_setup(void)
{
#if defined(CONFIG_TICKSOURCE_CORETMR)
- struct irq_chip *chip = get_irq_chip(IRQ_CORETMR);
- struct irq_desc *desc = irq_to_desc(IRQ_CORETMR);
+ struct irq_data *data = irq_get_irq_data(IRQ_CORETMR);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
bfin_coretmr_init();
bfin_coretmr_clockevent_init();
- chip->irq_unmask(&desc->irq_data);
+ chip->irq_unmask(data);
#else
/* Power down the core timer, just to play safe. */
bfin_write_TCNTL(0);
#ifdef CONFIG_IPIPE
handle = handle_level_irq;
#endif
- __set_irq_handler_unlocked(irq, handle);
+ __irq_set_handler_locked(irq, handle);
}
static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
static void bfin_gpio_mask_ack_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
- struct irq_desc *desc = irq_to_desc(irq);
u32 gpionr = irq_to_gpio(irq);
- if (desc->handle_irq == handle_edge_irq)
+ if (!irqd_is_level_type(d))
set_gpio_data(gpionr, 0);
set_gpio_maska(gpionr, 0);
static void bfin_gpio_ack_irq(struct irq_data *d)
{
- struct irq_desc *desc = irq_to_desc(d->irq);
u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
u32 pintbit = PINT_BIT(pint_val);
u32 bank = PINT_2_BANK(pint_val);
- if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
if (pint[bank]->invert_set & pintbit)
pint[bank]->invert_clear = pintbit;
else
static void bfin_gpio_mask_ack_irq(struct irq_data *d)
{
- struct irq_desc *desc = irq_to_desc(d->irq);
u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
u32 pintbit = PINT_BIT(pint_val);
u32 bank = PINT_2_BANK(pint_val);
- if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
if (pint[bank]->invert_set & pintbit)
pint[bank]->invert_clear = pintbit;
else
for (irq = 0; irq <= SYS_IRQS; irq++) {
if (irq <= IRQ_CORETMR)
- set_irq_chip(irq, &bfin_core_irqchip);
+ irq_set_chip(irq, &bfin_core_irqchip);
else
- set_irq_chip(irq, &bfin_internal_irqchip);
+ irq_set_chip(irq, &bfin_internal_irqchip);
switch (irq) {
#if defined(CONFIG_BF53x)
#elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
case IRQ_PORTF_INTA:
#endif
- set_irq_chained_handler(irq,
- bfin_demux_gpio_irq);
+ irq_set_chained_handler(irq, bfin_demux_gpio_irq);
break;
#ifdef BF537_GENERIC_ERROR_INT_DEMUX
case IRQ_GENERIC_ERROR:
- set_irq_chained_handler(irq, bfin_demux_error_irq);
+ irq_set_chained_handler(irq, bfin_demux_error_irq);
break;
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
case IRQ_MAC_ERROR:
- set_irq_chained_handler(irq, bfin_demux_mac_status_irq);
+ irq_set_chained_handler(irq,
+ bfin_demux_mac_status_irq);
break;
#endif
#ifdef CONFIG_SMP
case IRQ_SUPPLE_0:
case IRQ_SUPPLE_1:
- set_irq_handler(irq, handle_percpu_irq);
+ irq_set_handler(irq, handle_percpu_irq);
break;
#endif
#ifdef CONFIG_TICKSOURCE_CORETMR
case IRQ_CORETMR:
# ifdef CONFIG_SMP
- set_irq_handler(irq, handle_percpu_irq);
+ irq_set_handler(irq, handle_percpu_irq);
break;
# else
- set_irq_handler(irq, handle_simple_irq);
+ irq_set_handler(irq, handle_simple_irq);
break;
# endif
#endif
#ifdef CONFIG_TICKSOURCE_GPTMR0
case IRQ_TIMER0:
- set_irq_handler(irq, handle_simple_irq);
+ irq_set_handler(irq, handle_simple_irq);
break;
#endif
#ifdef CONFIG_IPIPE
default:
- set_irq_handler(irq, handle_level_irq);
+ irq_set_handler(irq, handle_level_irq);
break;
#else /* !CONFIG_IPIPE */
default:
- set_irq_handler(irq, handle_simple_irq);
+ irq_set_handler(irq, handle_simple_irq);
break;
#endif /* !CONFIG_IPIPE */
}
#ifdef BF537_GENERIC_ERROR_INT_DEMUX
for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++)
- set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip,
+ irq_set_chip_and_handler(irq, &bfin_generic_error_irqchip,
handle_level_irq);
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
- set_irq_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq);
+ irq_set_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq);
#endif
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
- set_irq_chip_and_handler(irq, &bfin_mac_status_irqchip,
+ irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip,
handle_level_irq);
#endif
/* if configured as edge, then will be changed to do_edge_IRQ */
for (irq = GPIO_IRQ_BASE;
irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
- set_irq_chip_and_handler(irq, &bfin_gpio_irqchip,
+ irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
handle_level_irq);
bfin_write_IMASK(0);
default y
select HAVE_IDE
select HAVE_GENERIC_HARDIRQS
- select GENERIC_HARDIRQS_NO_DEPRECATED
select GENERIC_IRQ_SHOW
config HZ
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select HAVE_GENERIC_HARDIRQS
+ select GENERIC_IRQ_SHOW
config ZONE_DMA
bool
config ARCH_SUSPEND_POSSIBLE
def_bool y
- depends on !SMP
source kernel/power/Kconfig
endmenu
#define wmb() asm volatile ("membar" : : :"memory")
#define read_barrier_depends() do { } while (0)
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
-#define set_mb(var, value) \
- do { xchg(&var, (value)); } while (0)
-#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do {} while(0)
#define set_mb(var, value) \
do { var = (value); barrier(); } while (0)
-#endif
extern void die_if_kernel(const char *, ...) __attribute__((format(printf, 1, 2)));
extern void free_initmem(void);
#define THREAD_SIZE 8192
+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
+
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
#define alloc_thread_info_node(tsk, node) \
kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#else
-#define alloc_thread_info_node(tsk) \
+#define alloc_thread_info_node(tsk, node) \
kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#endif
/*
* on-motherboard FPGA PIC operations
*/
-static void frv_fpga_mask(unsigned int irq)
+static void frv_fpga_mask(struct irq_data *d)
{
uint16_t imr = __get_IMR();
- imr |= 1 << (irq - IRQ_BASE_FPGA);
+ imr |= 1 << (d->irq - IRQ_BASE_FPGA);
__set_IMR(imr);
}
-static void frv_fpga_ack(unsigned int irq)
+static void frv_fpga_ack(struct irq_data *d)
{
- __clr_IFR(1 << (irq - IRQ_BASE_FPGA));
+ __clr_IFR(1 << (d->irq - IRQ_BASE_FPGA));
}
-static void frv_fpga_mask_ack(unsigned int irq)
+static void frv_fpga_mask_ack(struct irq_data *d)
{
uint16_t imr = __get_IMR();
- imr |= 1 << (irq - IRQ_BASE_FPGA);
+ imr |= 1 << (d->irq - IRQ_BASE_FPGA);
__set_IMR(imr);
- __clr_IFR(1 << (irq - IRQ_BASE_FPGA));
+ __clr_IFR(1 << (d->irq - IRQ_BASE_FPGA));
}
-static void frv_fpga_unmask(unsigned int irq)
+static void frv_fpga_unmask(struct irq_data *d)
{
uint16_t imr = __get_IMR();
- imr &= ~(1 << (irq - IRQ_BASE_FPGA));
+ imr &= ~(1 << (d->irq - IRQ_BASE_FPGA));
__set_IMR(imr);
}
static struct irq_chip frv_fpga_pic = {
.name = "mb93091",
- .ack = frv_fpga_ack,
- .mask = frv_fpga_mask,
- .mask_ack = frv_fpga_mask_ack,
- .unmask = frv_fpga_unmask,
+ .irq_ack = frv_fpga_ack,
+ .irq_mask = frv_fpga_mask,
+ .irq_mask_ack = frv_fpga_mask_ack,
+ .irq_unmask = frv_fpga_unmask,
};
/*
__clr_IFR(0x0000);
for (irq = IRQ_BASE_FPGA + 1; irq <= IRQ_BASE_FPGA + 14; irq++)
- set_irq_chip_and_handler(irq, &frv_fpga_pic, handle_level_irq);
+ irq_set_chip_and_handler(irq, &frv_fpga_pic, handle_level_irq);
- set_irq_chip_and_handler(IRQ_FPGA_NMI, &frv_fpga_pic, handle_edge_irq);
+ irq_set_chip_and_handler(IRQ_FPGA_NMI, &frv_fpga_pic, handle_edge_irq);
/* the FPGA drives the first four external IRQ inputs on the CPU PIC */
setup_irq(IRQ_CPU_EXTERNAL0, &fpga_irq[0]);
/*
* off-CPU FPGA PIC operations
*/
-static void frv_fpga_mask(unsigned int irq)
+static void frv_fpga_mask(struct irq_data *d)
{
uint16_t imr = __get_IMR();
- imr |= 1 << (irq - IRQ_BASE_FPGA);
+ imr |= 1 << (d->irq - IRQ_BASE_FPGA);
__set_IMR(imr);
}
-static void frv_fpga_ack(unsigned int irq)
+static void frv_fpga_ack(struct irq_data *d)
{
- __clr_IFR(1 << (irq - IRQ_BASE_FPGA));
+ __clr_IFR(1 << (d->irq - IRQ_BASE_FPGA));
}
-static void frv_fpga_mask_ack(unsigned int irq)
+static void frv_fpga_mask_ack(struct irq_data *d)
{
uint16_t imr = __get_IMR();
- imr |= 1 << (irq - IRQ_BASE_FPGA);
+ imr |= 1 << (d->irq - IRQ_BASE_FPGA);
__set_IMR(imr);
- __clr_IFR(1 << (irq - IRQ_BASE_FPGA));
+ __clr_IFR(1 << (d->irq - IRQ_BASE_FPGA));
}
-static void frv_fpga_unmask(unsigned int irq)
+static void frv_fpga_unmask(struct irq_data *d)
{
uint16_t imr = __get_IMR();
- imr &= ~(1 << (irq - IRQ_BASE_FPGA));
+ imr &= ~(1 << (d->irq - IRQ_BASE_FPGA));
__set_IMR(imr);
}
static struct irq_chip frv_fpga_pic = {
.name = "mb93093",
- .ack = frv_fpga_ack,
- .mask = frv_fpga_mask,
- .mask_ack = frv_fpga_mask_ack,
- .unmask = frv_fpga_unmask,
- .end = frv_fpga_end,
+ .irq_ack = frv_fpga_ack,
+ .irq_mask = frv_fpga_mask,
+ .irq_mask_ack = frv_fpga_mask_ack,
+ .irq_unmask = frv_fpga_unmask,
};
/*
irq = 31 - irq;
mask &= ~(1 << irq);
- generic_irq_handle(IRQ_BASE_FPGA + irq);
+ generic_handle_irq(IRQ_BASE_FPGA + irq);
}
return IRQ_HANDLED;
__clr_IFR(0x0000);
for (irq = IRQ_BASE_FPGA + 8; irq <= IRQ_BASE_FPGA + 10; irq++)
- set_irq_chip_and_handler(irq, &frv_fpga_pic, handle_edge_irq);
+ irq_set_chip_and_handler(irq, &frv_fpga_pic, handle_edge_irq);
/* the FPGA drives external IRQ input #2 on the CPU PIC */
setup_irq(IRQ_CPU_EXTERNAL2, &fpga_irq[0]);
* daughter board PIC operations
* - there is no way to ACK interrupts in the MB93493 chip
*/
-static void frv_mb93493_mask(unsigned int irq)
+static void frv_mb93493_mask(struct irq_data *d)
{
uint32_t iqsr;
volatile void *piqsr;
- if (IRQ_ROUTING & (1 << (irq - IRQ_BASE_MB93493)))
+ if (IRQ_ROUTING & (1 << (d->irq - IRQ_BASE_MB93493)))
piqsr = __addr_MB93493_IQSR(1);
else
piqsr = __addr_MB93493_IQSR(0);
iqsr = readl(piqsr);
- iqsr &= ~(1 << (irq - IRQ_BASE_MB93493 + 16));
+ iqsr &= ~(1 << (d->irq - IRQ_BASE_MB93493 + 16));
writel(iqsr, piqsr);
}
-static void frv_mb93493_ack(unsigned int irq)
+static void frv_mb93493_ack(struct irq_data *d)
{
}
-static void frv_mb93493_unmask(unsigned int irq)
+static void frv_mb93493_unmask(struct irq_data *d)
{
uint32_t iqsr;
volatile void *piqsr;
- if (IRQ_ROUTING & (1 << (irq - IRQ_BASE_MB93493)))
+ if (IRQ_ROUTING & (1 << (d->irq - IRQ_BASE_MB93493)))
piqsr = __addr_MB93493_IQSR(1);
else
piqsr = __addr_MB93493_IQSR(0);
iqsr = readl(piqsr);
- iqsr |= 1 << (irq - IRQ_BASE_MB93493 + 16);
+ iqsr |= 1 << (d->irq - IRQ_BASE_MB93493 + 16);
writel(iqsr, piqsr);
}
static struct irq_chip frv_mb93493_pic = {
.name = "mb93093",
- .ack = frv_mb93493_ack,
- .mask = frv_mb93493_mask,
- .mask_ack = frv_mb93493_mask,
- .unmask = frv_mb93493_unmask,
+ .irq_ack = frv_mb93493_ack,
+ .irq_mask = frv_mb93493_mask,
+ .irq_mask_ack = frv_mb93493_mask,
+ .irq_unmask = frv_mb93493_unmask,
};
/*
int irq;
for (irq = IRQ_BASE_MB93493 + 0; irq <= IRQ_BASE_MB93493 + 10; irq++)
- set_irq_chip_and_handler(irq, &frv_mb93493_pic, handle_edge_irq);
+ irq_set_chip_and_handler(irq, &frv_mb93493_pic,
+ handle_edge_irq);
/* the MB93493 drives external IRQ inputs on the CPU PIC */
setup_irq(IRQ_CPU_MB93493_0, &mb93493_irq[0]);
atomic_t irq_err_count;
-/*
- * Generic, controller-independent functions:
- */
-int show_interrupts(struct seq_file *p, void *v)
+int arch_show_interrupts(struct seq_file *p, int prec)
{
- int i = *(loff_t *) v, cpu;
- struct irqaction * action;
- unsigned long flags;
-
- if (i == 0) {
- char cpuname[12];
-
- seq_printf(p, " ");
- for_each_present_cpu(cpu) {
- sprintf(cpuname, "CPU%d", cpu);
- seq_printf(p, " %10s", cpuname);
- }
- seq_putc(p, '\n');
- }
-
- if (i < NR_IRQS) {
- raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
- action = irq_desc[i].action;
- if (action) {
- seq_printf(p, "%3d: ", i);
- for_each_present_cpu(cpu)
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
- seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-");
- seq_printf(p, " %s", action->name);
- for (action = action->next;
- action;
- action = action->next)
- seq_printf(p, ", %s", action->name);
-
- seq_putc(p, '\n');
- }
-
- raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
- } else if (i == NR_IRQS) {
- seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
- }
-
+ seq_printf(p, "%*s: ", prec, "ERR");
+ seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
return 0;
}
/*
* on-CPU PIC operations
*/
-static void frv_cpupic_ack(unsigned int irqlevel)
+static void frv_cpupic_ack(struct irq_data *d)
{
- __clr_RC(irqlevel);
+ __clr_RC(d->irq);
__clr_IRL();
}
-static void frv_cpupic_mask(unsigned int irqlevel)
+static void frv_cpupic_mask(struct irq_data *d)
{
- __set_MASK(irqlevel);
+ __set_MASK(d->irq);
}
-static void frv_cpupic_mask_ack(unsigned int irqlevel)
+static void frv_cpupic_mask_ack(struct irq_data *d)
{
- __set_MASK(irqlevel);
- __clr_RC(irqlevel);
+ __set_MASK(d->irq);
+ __clr_RC(d->irq);
__clr_IRL();
}
-static void frv_cpupic_unmask(unsigned int irqlevel)
-{
- __clr_MASK(irqlevel);
-}
-
-static void frv_cpupic_end(unsigned int irqlevel)
+static void frv_cpupic_unmask(struct irq_data *d)
{
- __clr_MASK(irqlevel);
+ __clr_MASK(d->irq);
}
static struct irq_chip frv_cpu_pic = {
.name = "cpu",
- .ack = frv_cpupic_ack,
- .mask = frv_cpupic_mask,
- .mask_ack = frv_cpupic_mask_ack,
- .unmask = frv_cpupic_unmask,
- .end = frv_cpupic_end,
+ .irq_ack = frv_cpupic_ack,
+ .irq_mask = frv_cpupic_mask,
+ .irq_mask_ack = frv_cpupic_mask_ack,
+ .irq_unmask = frv_cpupic_unmask,
};
/*
int level;
for (level = 1; level <= 14; level++)
- set_irq_chip_and_handler(level, &frv_cpu_pic,
+ irq_set_chip_and_handler(level, &frv_cpu_pic,
handle_level_irq);
- set_irq_handler(IRQ_CPU_TIMER0, handle_edge_irq);
+ irq_set_handler(IRQ_CPU_TIMER0, handle_edge_irq);
/* set the trigger levels for internal interrupt sources
* - timers all falling-edge
default y
select HAVE_IDE
select HAVE_GENERIC_HARDIRQS
- select GENERIC_HARDIRQS_NO_DEPRECATED
select GENERIC_IRQ_SHOW
config SYMBOL_PREFIX
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
select IRQ_PER_CPU
+ select GENERIC_IRQ_SHOW
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
#include <linux/irq.h>
static unsigned int
-hpsim_irq_startup (unsigned int irq)
+hpsim_irq_startup(struct irq_data *data)
{
return 0;
}
static void
-hpsim_irq_noop (unsigned int irq)
+hpsim_irq_noop(struct irq_data *data)
{
}
static int
-hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b)
+hpsim_set_affinity_noop(struct irq_data *d, const struct cpumask *b, bool f)
{
return 0;
}
static struct irq_chip irq_type_hp_sim = {
- .name = "hpsim",
- .startup = hpsim_irq_startup,
- .shutdown = hpsim_irq_noop,
- .enable = hpsim_irq_noop,
- .disable = hpsim_irq_noop,
- .ack = hpsim_irq_noop,
- .end = hpsim_irq_noop,
- .set_affinity = hpsim_set_affinity_noop,
+ .name = "hpsim",
+ .irq_startup = hpsim_irq_startup,
+ .irq_shutdown = hpsim_irq_noop,
+ .irq_enable = hpsim_irq_noop,
+ .irq_disable = hpsim_irq_noop,
+ .irq_ack = hpsim_irq_noop,
+ .irq_set_affinity = hpsim_set_affinity_noop,
};
void __init
hpsim_irq_init (void)
{
- struct irq_desc *idesc;
int i;
- for (i = 0; i < NR_IRQS; ++i) {
- idesc = irq_desc + i;
- if (idesc->chip == &no_irq_chip)
- idesc->chip = &irq_type_hp_sim;
+ for_each_active_irq(i) {
+ struct irq_chip *chip = irq_get_chip(i);
+
+ if (chip == &no_irq_chip)
+ irq_set_chip(i, &irq_type_hp_sim);
}
}
/*
* Default implementations for the irq-descriptor API:
*/
-
-extern struct irq_desc irq_desc[NR_IRQS];
-
#ifndef CONFIG_IA64_GENERIC
static inline ia64_vector __ia64_irq_to_vector(int irq)
{
}
static void
-nop (unsigned int irq)
+nop (struct irq_data *data)
{
/* do nothing... */
}
#endif
static void
-mask_irq (unsigned int irq)
+mask_irq (struct irq_data *data)
{
+ unsigned int irq = data->irq;
u32 low32;
int rte_index;
struct iosapic_rte_info *rte;
}
static void
-unmask_irq (unsigned int irq)
+unmask_irq (struct irq_data *data)
{
+ unsigned int irq = data->irq;
u32 low32;
int rte_index;
struct iosapic_rte_info *rte;
static int
-iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
+iosapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ bool force)
{
#ifdef CONFIG_SMP
+ unsigned int irq = data->irq;
u32 high32, low32;
int cpu, dest, rte_index;
int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
*/
static unsigned int
-iosapic_startup_level_irq (unsigned int irq)
+iosapic_startup_level_irq (struct irq_data *data)
{
- unmask_irq(irq);
+ unmask_irq(data);
return 0;
}
static void
-iosapic_unmask_level_irq (unsigned int irq)
+iosapic_unmask_level_irq (struct irq_data *data)
{
+ unsigned int irq = data->irq;
ia64_vector vec = irq_to_vector(irq);
struct iosapic_rte_info *rte;
int do_unmask_irq = 0;
irq_complete_move(irq);
- if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
+ if (unlikely(irqd_is_setaffinity_pending(data))) {
do_unmask_irq = 1;
- mask_irq(irq);
+ mask_irq(data);
} else
- unmask_irq(irq);
+ unmask_irq(data);
list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
iosapic_eoi(rte->iosapic->addr, vec);
if (unlikely(do_unmask_irq)) {
- move_masked_irq(irq);
- unmask_irq(irq);
+ irq_move_masked_irq(data);
+ unmask_irq(data);
}
}
#define iosapic_ack_level_irq nop
static struct irq_chip irq_type_iosapic_level = {
- .name = "IO-SAPIC-level",
- .startup = iosapic_startup_level_irq,
- .shutdown = iosapic_shutdown_level_irq,
- .enable = iosapic_enable_level_irq,
- .disable = iosapic_disable_level_irq,
- .ack = iosapic_ack_level_irq,
- .mask = mask_irq,
- .unmask = iosapic_unmask_level_irq,
- .set_affinity = iosapic_set_affinity
+ .name = "IO-SAPIC-level",
+ .irq_startup = iosapic_startup_level_irq,
+ .irq_shutdown = iosapic_shutdown_level_irq,
+ .irq_enable = iosapic_enable_level_irq,
+ .irq_disable = iosapic_disable_level_irq,
+ .irq_ack = iosapic_ack_level_irq,
+ .irq_mask = mask_irq,
+ .irq_unmask = iosapic_unmask_level_irq,
+ .irq_set_affinity = iosapic_set_affinity
};
/*
*/
static unsigned int
-iosapic_startup_edge_irq (unsigned int irq)
+iosapic_startup_edge_irq (struct irq_data *data)
{
- unmask_irq(irq);
+ unmask_irq(data);
/*
* IOSAPIC simply drops interrupts pended while the
* corresponding pin was masked, so we can't know if an
}
static void
-iosapic_ack_edge_irq (unsigned int irq)
+iosapic_ack_edge_irq (struct irq_data *data)
{
- struct irq_desc *idesc = irq_desc + irq;
-
- irq_complete_move(irq);
- move_native_irq(irq);
- /*
- * Once we have recorded IRQ_PENDING already, we can mask the
- * interrupt for real. This prevents IRQ storms from unhandled
- * devices.
- */
- if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) ==
- (IRQ_PENDING|IRQ_DISABLED))
- mask_irq(irq);
+ irq_complete_move(data->irq);
+ irq_move_irq(data);
}
#define iosapic_enable_edge_irq unmask_irq
#define iosapic_disable_edge_irq nop
-#define iosapic_end_edge_irq nop
static struct irq_chip irq_type_iosapic_edge = {
- .name = "IO-SAPIC-edge",
- .startup = iosapic_startup_edge_irq,
- .shutdown = iosapic_disable_edge_irq,
- .enable = iosapic_enable_edge_irq,
- .disable = iosapic_disable_edge_irq,
- .ack = iosapic_ack_edge_irq,
- .end = iosapic_end_edge_irq,
- .mask = mask_irq,
- .unmask = unmask_irq,
- .set_affinity = iosapic_set_affinity
+ .name = "IO-SAPIC-edge",
+ .irq_startup = iosapic_startup_edge_irq,
+ .irq_shutdown = iosapic_disable_edge_irq,
+ .irq_enable = iosapic_enable_edge_irq,
+ .irq_disable = iosapic_disable_edge_irq,
+ .irq_ack = iosapic_ack_edge_irq,
+ .irq_mask = mask_irq,
+ .irq_unmask = unmask_irq,
+ .irq_set_affinity = iosapic_set_affinity
};
static unsigned int
register_intr (unsigned int gsi, int irq, unsigned char delivery,
unsigned long polarity, unsigned long trigger)
{
- struct irq_desc *idesc;
- struct irq_chip *irq_type;
+ struct irq_chip *chip, *irq_type;
int index;
struct iosapic_rte_info *rte;
irq_type = iosapic_get_irq_chip(trigger);
- idesc = irq_desc + irq;
- if (irq_type != NULL && idesc->chip != irq_type) {
- if (idesc->chip != &no_irq_chip)
+ chip = irq_get_chip(irq);
+ if (irq_type != NULL && chip != irq_type) {
+ if (chip != &no_irq_chip)
printk(KERN_WARNING
"%s: changing vector %d from %s to %s\n",
__func__, irq_to_vector(irq),
- idesc->chip->name, irq_type->name);
- idesc->chip = irq_type;
+ chip->name, irq_type->name);
+ chip = irq_type;
}
- if (trigger == IOSAPIC_EDGE)
- __set_irq_handler_unlocked(irq, handle_edge_irq);
- else
- __set_irq_handler_unlocked(irq, handle_level_irq);
+ __irq_set_chip_handler_name_locked(irq, chip, trigger == IOSAPIC_EDGE ?
+ handle_edge_irq : handle_level_irq,
+ NULL);
return 0;
}
struct iosapic_rte_info *rte;
u32 low32;
unsigned char dmode;
+ struct irq_desc *desc;
/*
* If this GSI has already been registered (i.e., it's a
goto unlock_iosapic_lock;
}
- raw_spin_lock(&irq_desc[irq].lock);
+ desc = irq_to_desc(irq);
+ raw_spin_lock(&desc->lock);
dest = get_target_cpu(gsi, irq);
dmode = choose_dmode();
err = register_intr(gsi, irq, dmode, polarity, trigger);
if (err < 0) {
- raw_spin_unlock(&irq_desc[irq].lock);
+ raw_spin_unlock(&desc->lock);
irq = err;
goto unlock_iosapic_lock;
}
(polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
cpu_logical_id(dest), dest, irq_to_vector(irq));
- raw_spin_unlock(&irq_desc[irq].lock);
+ raw_spin_unlock(&desc->lock);
unlock_iosapic_lock:
spin_unlock_irqrestore(&iosapic_lock, flags);
return irq;
{
unsigned long flags;
int irq, index;
- struct irq_desc *idesc;
u32 low32;
unsigned long trigger, polarity;
unsigned int dest;
if (--rte->refcnt > 0)
goto out;
- idesc = irq_desc + irq;
rte->refcnt = NO_REF_RTE;
/* Mask the interrupt */
if (iosapic_intr_info[irq].count == 0) {
#ifdef CONFIG_SMP
/* Clear affinity */
- cpumask_setall(idesc->affinity);
+ cpumask_setall(irq_get_irq_data(irq)->affinity);
#endif
/* Clear the interrupt information */
iosapic_intr_info[irq].dest = 0;
/*
* /proc/interrupts printing:
*/
-
-int show_interrupts(struct seq_file *p, void *v)
+int arch_show_interrupts(struct seq_file *p, int prec)
{
- int i = *(loff_t *) v, j;
- struct irqaction * action;
- unsigned long flags;
-
- if (i == 0) {
- char cpuname[16];
- seq_printf(p, " ");
- for_each_online_cpu(j) {
- snprintf(cpuname, 10, "CPU%d", j);
- seq_printf(p, "%10s ", cpuname);
- }
- seq_putc(p, '\n');
- }
-
- if (i < NR_IRQS) {
- raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
- action = irq_desc[i].action;
- if (!action)
- goto skip;
- seq_printf(p, "%3d: ",i);
-#ifndef CONFIG_SMP
- seq_printf(p, "%10u ", kstat_irqs(i));
-#else
- for_each_online_cpu(j) {
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
- }
-#endif
- seq_printf(p, " %14s", irq_desc[i].chip->name);
- seq_printf(p, " %s", action->name);
-
- for (action=action->next; action; action = action->next)
- seq_printf(p, ", %s", action->name);
-
- seq_putc(p, '\n');
-skip:
- raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
- } else if (i == NR_IRQS)
- seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
+ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
return 0;
}
void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
{
if (irq < NR_IRQS) {
- cpumask_copy(irq_desc[irq].affinity,
+ cpumask_copy(irq_get_irq_data(irq)->affinity,
cpumask_of(cpu_logical_id(hwid)));
irq_redir[irq] = (char) (redir & 0xff);
}
*/
static void migrate_irqs(void)
{
- struct irq_desc *desc;
int irq, new_cpu;
for (irq=0; irq < NR_IRQS; irq++) {
- desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_data *data = irq_desc_get_irq_data(desc);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
- if (desc->status == IRQ_DISABLED)
+ if (irqd_irq_disabled(data))
continue;
/*
* tell CPU not to respond to these local intr sources.
* such as ITV,CPEI,MCA etc.
*/
- if (desc->status == IRQ_PER_CPU)
+ if (irqd_is_per_cpu(data))
continue;
- if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
+ if (cpumask_any_and(data->affinity, cpu_online_mask)
>= nr_cpu_ids) {
/*
* Save it for phase 2 processing
/*
* Al three are essential, currently WARN_ON.. maybe panic?
*/
- if (desc->chip && desc->chip->disable &&
- desc->chip->enable && desc->chip->set_affinity) {
- desc->chip->disable(irq);
- desc->chip->set_affinity(irq,
- cpumask_of(new_cpu));
- desc->chip->enable(irq);
+ if (chip && chip->irq_disable &&
+ chip->irq_enable && chip->irq_set_affinity) {
+ chip->irq_disable(data);
+ chip->irq_set_affinity(data,
+ cpumask_of(new_cpu), false);
+ chip->irq_enable(data);
} else {
- WARN_ON((!(desc->chip) || !(desc->chip->disable) ||
- !(desc->chip->enable) ||
- !(desc->chip->set_affinity)));
+ WARN_ON((!chip || !chip->irq_disable ||
+ !chip->irq_enable ||
+ !chip->irq_set_affinity));
}
}
}
if (irq < 0)
continue;
- desc = irq_desc + irq;
+ desc = irq_to_desc(irq);
cfg = irq_cfg + irq;
raw_spin_lock(&desc->lock);
if (!cfg->move_cleanup_count)
void
ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
{
- struct irq_desc *desc;
unsigned int irq;
irq = vec;
BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
- desc = irq_desc + irq;
- desc->status |= IRQ_PER_CPU;
- set_irq_chip(irq, &irq_type_ia64_lsapic);
+ irq_set_status_flags(irq, IRQ_PER_CPU);
+ irq_set_chip(irq, &irq_type_ia64_lsapic);
if (action)
setup_irq(irq, action);
- set_irq_handler(irq, handle_percpu_irq);
+ irq_set_handler(irq, handle_percpu_irq);
}
void __init
#include <linux/irq.h>
static unsigned int
-lsapic_noop_startup (unsigned int irq)
+lsapic_noop_startup (struct irq_data *data)
{
return 0;
}
static void
-lsapic_noop (unsigned int irq)
+lsapic_noop (struct irq_data *data)
{
/* nothing to do... */
}
-static int lsapic_retrigger(unsigned int irq)
+static int lsapic_retrigger(struct irq_data *data)
{
- ia64_resend_irq(irq);
+ ia64_resend_irq(data->irq);
return 1;
}
struct irq_chip irq_type_ia64_lsapic = {
- .name = "LSAPIC",
- .startup = lsapic_noop_startup,
- .shutdown = lsapic_noop,
- .enable = lsapic_noop,
- .disable = lsapic_noop,
- .ack = lsapic_noop,
- .end = lsapic_noop,
- .retrigger = lsapic_retrigger,
+ .name = "LSAPIC",
+ .irq_startup = lsapic_noop_startup,
+ .irq_shutdown = lsapic_noop,
+ .irq_enable = lsapic_noop,
+ .irq_disable = lsapic_noop,
+ .irq_ack = lsapic_noop,
+ .irq_retrigger = lsapic_retrigger,
};
cpe_poll_timer.function = ia64_mca_cpe_poll;
{
- struct irq_desc *desc;
unsigned int irq;
if (cpe_vector >= 0) {
irq = local_vector_to_irq(cpe_vector);
if (irq > 0) {
cpe_poll_enabled = 0;
- desc = irq_desc + irq;
- desc->status |= IRQ_PER_CPU;
+ irq_set_status_flags(irq, IRQ_PER_CPU);
setup_irq(irq, &mca_cpe_irqaction);
ia64_cpe_irq = irq;
ia64_mca_register_cpev(cpe_vector);
static struct irq_chip ia64_msi_chip;
#ifdef CONFIG_SMP
-static int ia64_set_msi_irq_affinity(unsigned int irq,
- const cpumask_t *cpu_mask)
+static int ia64_set_msi_irq_affinity(struct irq_data *idata,
+ const cpumask_t *cpu_mask, bool force)
{
struct msi_msg msg;
u32 addr, data;
int cpu = first_cpu(*cpu_mask);
+ unsigned int irq = idata->irq;
if (!cpu_online(cpu))
return -1;
msg.data = data;
write_msi_msg(irq, &msg);
- cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
+ cpumask_copy(idata->affinity, cpumask_of(cpu));
return 0;
}
if (irq < 0)
return irq;
- set_irq_msi(irq, desc);
+ irq_set_msi_desc(irq, desc);
cpus_and(mask, irq_to_domain(irq), cpu_online_map);
dest_phys_id = cpu_physical_id(first_cpu(mask));
vector = irq_to_vector(irq);
MSI_DATA_VECTOR(vector);
write_msi_msg(irq, &msg);
- set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
+ irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
return 0;
}
destroy_irq(irq);
}
-static void ia64_ack_msi_irq(unsigned int irq)
+static void ia64_ack_msi_irq(struct irq_data *data)
{
- irq_complete_move(irq);
- move_native_irq(irq);
+ irq_complete_move(data->irq);
+ irq_move_irq(data);
ia64_eoi();
}
-static int ia64_msi_retrigger_irq(unsigned int irq)
+static int ia64_msi_retrigger_irq(struct irq_data *data)
{
- unsigned int vector = irq_to_vector(irq);
+ unsigned int vector = irq_to_vector(data->irq);
ia64_resend_irq(vector);
return 1;
* Generic ops used on most IA64 platforms.
*/
static struct irq_chip ia64_msi_chip = {
- .name = "PCI-MSI",
- .irq_mask = mask_msi_irq,
- .irq_unmask = unmask_msi_irq,
- .ack = ia64_ack_msi_irq,
+ .name = "PCI-MSI",
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+ .irq_ack = ia64_ack_msi_irq,
#ifdef CONFIG_SMP
- .set_affinity = ia64_set_msi_irq_affinity,
+ .irq_set_affinity = ia64_set_msi_irq_affinity,
#endif
- .retrigger = ia64_msi_retrigger_irq,
+ .irq_retrigger = ia64_msi_retrigger_irq,
};
#ifdef CONFIG_DMAR
#ifdef CONFIG_SMP
-static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
+static int dmar_msi_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
{
+ unsigned int irq = data->irq;
struct irq_cfg *cfg = irq_cfg + irq;
struct msi_msg msg;
int cpu = cpumask_first(mask);
msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
dmar_msi_write(irq, &msg);
- cpumask_copy(irq_desc[irq].affinity, mask);
+ cpumask_copy(data->affinity, mask);
return 0;
}
.name = "DMAR_MSI",
.irq_unmask = dmar_msi_unmask,
.irq_mask = dmar_msi_mask,
- .ack = ia64_ack_msi_irq,
+ .irq_ack = ia64_ack_msi_irq,
#ifdef CONFIG_SMP
- .set_affinity = dmar_msi_set_affinity,
+ .irq_set_affinity = dmar_msi_set_affinity,
#endif
- .retrigger = ia64_msi_retrigger_irq,
+ .irq_retrigger = ia64_msi_retrigger_irq,
};
static int
if (ret < 0)
return ret;
dmar_msi_write(irq, &msg);
- set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
- "edge");
+ irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
+ "edge");
return 0;
}
#endif /* CONFIG_DMAR */
int migrate_platform_irqs(unsigned int cpu)
{
int new_cpei_cpu;
- struct irq_desc *desc = NULL;
+ struct irq_data *data = NULL;
const struct cpumask *mask;
int retval = 0;
new_cpei_cpu = any_online_cpu(cpu_online_map);
mask = cpumask_of(new_cpei_cpu);
set_cpei_target_cpu(new_cpei_cpu);
- desc = irq_desc + ia64_cpe_irq;
+ data = irq_get_irq_data(ia64_cpe_irq);
/*
* Switch for now, immediately, we need to do fake intr
* as other interrupts, but need to study CPEI behaviour with
* polling before making changes.
*/
- if (desc) {
- desc->chip->disable(ia64_cpe_irq);
- desc->chip->set_affinity(ia64_cpe_irq, mask);
- desc->chip->enable(ia64_cpe_irq);
+ if (data && data->chip) {
+ data->chip->irq_disable(data);
+ data->chip->irq_set_affinity(data, mask, false);
+ data->chip->irq_enable(data);
printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu);
}
}
- if (!desc) {
+ if (!data) {
printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu);
retval = -EBUSY;
}
#include <asm/sn/sn_sal.h>
#include <asm/sn/sn_feature_sets.h>
-static void force_interrupt(int irq);
static void register_intr_pda(struct sn_irq_info *sn_irq_info);
static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
-int sn_force_interrupt_flag = 1;
extern int sn_ioif_inited;
struct list_head **sn_irq_lh;
static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
return ret_stuff.status;
}
-static unsigned int sn_startup_irq(unsigned int irq)
+static unsigned int sn_startup_irq(struct irq_data *data)
{
return 0;
}
-static void sn_shutdown_irq(unsigned int irq)
+static void sn_shutdown_irq(struct irq_data *data)
{
}
extern void ia64_mca_register_cpev(int);
-static void sn_disable_irq(unsigned int irq)
+static void sn_disable_irq(struct irq_data *data)
{
- if (irq == local_vector_to_irq(IA64_CPE_VECTOR))
+ if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR))
ia64_mca_register_cpev(0);
}
-static void sn_enable_irq(unsigned int irq)
+static void sn_enable_irq(struct irq_data *data)
{
- if (irq == local_vector_to_irq(IA64_CPE_VECTOR))
- ia64_mca_register_cpev(irq);
+ if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR))
+ ia64_mca_register_cpev(data->irq);
}
-static void sn_ack_irq(unsigned int irq)
+static void sn_ack_irq(struct irq_data *data)
{
u64 event_occurred, mask;
+ unsigned int irq = data->irq & 0xff;
- irq = irq & 0xff;
event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
mask = event_occurred & SH_ALL_INT_MASK;
HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask);
__set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
- move_native_irq(irq);
-}
-
-static void sn_end_irq(unsigned int irq)
-{
- int ivec;
- u64 event_occurred;
-
- ivec = irq & 0xff;
- if (ivec == SGI_UART_VECTOR) {
- event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED));
- /* If the UART bit is set here, we may have received an
- * interrupt from the UART that the driver missed. To
- * make sure, we IPI ourselves to force us to look again.
- */
- if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
- platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
- IA64_IPI_DM_INT, 0);
- }
- }
- __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
- if (sn_force_interrupt_flag)
- force_interrupt(irq);
+ irq_move_irq(data);
}
static void sn_irq_info_free(struct rcu_head *head);
return new_irq_info;
}
-static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
+static int sn_set_affinity_irq(struct irq_data *data,
+ const struct cpumask *mask, bool force)
{
struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
+ unsigned int irq = data->irq;
nasid_t nasid;
int slice;
#endif
static void
-sn_mask_irq(unsigned int irq)
+sn_mask_irq(struct irq_data *data)
{
}
static void
-sn_unmask_irq(unsigned int irq)
+sn_unmask_irq(struct irq_data *data)
{
}
struct irq_chip irq_type_sn = {
- .name = "SN hub",
- .startup = sn_startup_irq,
- .shutdown = sn_shutdown_irq,
- .enable = sn_enable_irq,
- .disable = sn_disable_irq,
- .ack = sn_ack_irq,
- .end = sn_end_irq,
- .mask = sn_mask_irq,
- .unmask = sn_unmask_irq,
- .set_affinity = sn_set_affinity_irq
+ .name = "SN hub",
+ .irq_startup = sn_startup_irq,
+ .irq_shutdown = sn_shutdown_irq,
+ .irq_enable = sn_enable_irq,
+ .irq_disable = sn_disable_irq,
+ .irq_ack = sn_ack_irq,
+ .irq_mask = sn_mask_irq,
+ .irq_unmask = sn_unmask_irq,
+ .irq_set_affinity = sn_set_affinity_irq
};
ia64_vector sn_irq_to_vector(int irq)
void sn_irq_init(void)
{
int i;
- struct irq_desc *base_desc = irq_desc;
ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
for (i = 0; i < NR_IRQS; i++) {
- if (base_desc[i].chip == &no_irq_chip) {
- base_desc[i].chip = &irq_type_sn;
- }
+ if (irq_get_chip(i) == &no_irq_chip)
+ irq_set_chip(i, &irq_type_sn);
}
}
int cpu = nasid_slice_to_cpuid(nasid, slice);
#ifdef CONFIG_SMP
int cpuphys;
- struct irq_desc *desc;
#endif
pci_dev_get(pci_dev);
#ifdef CONFIG_SMP
cpuphys = cpu_physical_id(cpu);
set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0);
- desc = irq_to_desc(sn_irq_info->irq_irq);
/*
* Affinity was set by the PROM, prevent it from
* being reset by the request_irq() path.
*/
- desc->status |= IRQ_AFFINITY_SET;
+ irqd_mark_affinity_was_set(irq_get_irq_data(sn_irq_info->irq_irq));
#endif
}
pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type];
/* Don't force an interrupt if the irq has been disabled */
- if (!(irq_desc[sn_irq_info->irq_irq].status & IRQ_DISABLED) &&
+ if (!irqd_irq_disabled(irq_get_irq_data(sn_irq_info->irq_irq)) &&
pci_provider && pci_provider->force_interrupt)
(*pci_provider->force_interrupt)(sn_irq_info);
}
-static void force_interrupt(int irq)
-{
- struct sn_irq_info *sn_irq_info;
-
- if (!sn_ioif_inited)
- return;
-
- rcu_read_lock();
- list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list)
- sn_call_force_intr_provider(sn_irq_info);
-
- rcu_read_unlock();
-}
-
/*
* Check for lost interrupts. If the PIC int_status reg. says that
* an interrupt has been sent, but not handled, and the interrupt
*/
msg.data = 0x100 + irq;
- set_irq_msi(irq, entry);
+ irq_set_msi_desc(irq, entry);
write_msi_msg(irq, &msg);
- set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);
+ irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);
return 0;
}
#ifdef CONFIG_SMP
-static int sn_set_msi_irq_affinity(unsigned int irq,
- const struct cpumask *cpu_mask)
+static int sn_set_msi_irq_affinity(struct irq_data *data,
+ const struct cpumask *cpu_mask, bool force)
{
struct msi_msg msg;
int slice;
struct sn_irq_info *sn_irq_info;
struct sn_irq_info *new_irq_info;
struct sn_pcibus_provider *provider;
- unsigned int cpu;
+ unsigned int cpu, irq = data->irq;
cpu = cpumask_first(cpu_mask);
sn_irq_info = sn_msi_info[irq].sn_irq_info;
msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
write_msi_msg(irq, &msg);
- cpumask_copy(irq_desc[irq].affinity, cpu_mask);
+ cpumask_copy(data->affinity, cpu_mask);
return 0;
}
#endif /* CONFIG_SMP */
-static void sn_ack_msi_irq(unsigned int irq)
+static void sn_ack_msi_irq(struct irq_data *data)
{
- move_native_irq(irq);
+ irq_move_irq(data);
ia64_eoi();
}
-static int sn_msi_retrigger_irq(unsigned int irq)
+static int sn_msi_retrigger_irq(struct irq_data *data)
{
- unsigned int vector = irq;
+ unsigned int vector = data->irq;
ia64_resend_irq(vector);
return 1;
}
static struct irq_chip sn_msi_chip = {
- .name = "PCI-MSI",
- .irq_mask = mask_msi_irq,
- .irq_unmask = unmask_msi_irq,
- .ack = sn_ack_msi_irq,
+ .name = "PCI-MSI",
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+ .irq_ack = sn_ack_msi_irq,
#ifdef CONFIG_SMP
- .set_affinity = sn_set_msi_irq_affinity,
+ .irq_set_affinity = sn_set_msi_irq_affinity,
#endif
- .retrigger = sn_msi_retrigger_irq,
+ .irq_retrigger = sn_msi_retrigger_irq,
};
return single_open(file, licenseID_show, NULL);
}
-/*
- * Enable forced interrupt by default.
- * When set, the sn interrupt handler writes the force interrupt register on
- * the bridge chip. The hardware will then send an interrupt message if the
- * interrupt line is active. This mimics a level sensitive interrupt.
- */
-extern int sn_force_interrupt_flag;
-
-static int sn_force_interrupt_show(struct seq_file *s, void *p)
-{
- seq_printf(s, "Force interrupt is %s\n",
- sn_force_interrupt_flag ? "enabled" : "disabled");
- return 0;
-}
-
-static ssize_t sn_force_interrupt_write_proc(struct file *file,
- const char __user *buffer, size_t count, loff_t *data)
-{
- char val;
-
- if (copy_from_user(&val, buffer, 1))
- return -EFAULT;
-
- sn_force_interrupt_flag = (val == '0') ? 0 : 1;
- return count;
-}
-
-static int sn_force_interrupt_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sn_force_interrupt_show, NULL);
-}
-
static int coherence_id_show(struct seq_file *s, void *p)
{
seq_printf(s, "%d\n", partition_coherence_id());
.release = single_release,
};
-static const struct file_operations proc_sn_force_intr_fops = {
- .open = sn_force_interrupt_open,
- .read = seq_read,
- .write = sn_force_interrupt_write_proc,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static const struct file_operations proc_coherence_id_fops = {
.open = coherence_id_open,
.read = seq_read,
proc_create("system_serial_number", 0444, sgi_proc_dir,
&proc_system_sn_fops);
proc_create("licenseID", 0444, sgi_proc_dir, &proc_license_id_fops);
- proc_create("sn_force_interrupt", 0644, sgi_proc_dir,
- &proc_sn_force_intr_fops);
proc_create("coherence_id", 0444, sgi_proc_dir,
&proc_coherence_id_fops);
proc_create("sn_topology", 0444, sgi_proc_dir, &proc_sn_topo_fops);
__xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
struct irqaction *action, int save)
{
- struct irq_desc *desc;
int irq = 0;
if (xen_slab_ready) {
* mark the interrupt for migrations and trigger it
* on cpu hotplug.
*/
- desc = irq_desc + irq;
- desc->status |= IRQ_PER_CPU;
+ irq_set_status_flags(irq, IRQ_PER_CPU);
}
}
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
select HAVE_GENERIC_HARDIRQS
- select GENERIC_HARDIRQS_NO_DEPRECATED
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select HAVE_AOUT if MMU
select GENERIC_ATOMIC64 if MMU
select HAVE_GENERIC_HARDIRQS if !MMU
- select GENERIC_HARDIRQS_NO_DEPRECATED if !MMU
config RWSEM_GENERIC_SPINLOCK
bool
if (ap) {
seq_printf(p, "%3d: ", irq);
seq_printf(p, "%10u ", kstat_irqs(irq));
- seq_printf(p, "%14s ", get_irq_desc_chip(desc)->name);
+ seq_printf(p, "%14s ", irq_desc_get_chip(desc)->name);
seq_printf(p, "%s", ap->name);
for (ap = ap->next; ap; ap = ap->next)
/* GPIO interrupt sources */
for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) {
- set_irq_chip(irq, &intc2_irq_gpio_chip);
- set_irq_handler(irq, handle_edge_irq);
+ irq_set_chip(irq, &intc2_irq_gpio_chip);
+ irq_set_handler(irq, handle_edge_irq);
}
return 0;
*/
static void intc_external_irq(unsigned int irq, struct irq_desc *desc)
{
- get_irq_desc_chip(desc)->irq_ack(&desc->irq_data);
+ irq_desc_get_chip(desc)->irq_ack(&desc->irq_data);
handle_simple_irq(irq, desc);
}
writel(0x88888888, MCF_MBAR + MCFSIM_ICR4);
for (irq = 0; (irq < NR_IRQS); irq++) {
- set_irq_chip(irq, &intc_irq_chip);
+ irq_set_chip(irq, &intc_irq_chip);
edge = 0;
if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX))
edge = intc_irqmap[irq - MCFINT_VECBASE].ack;
if (edge) {
- set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
- set_irq_handler(irq, intc_external_irq);
+ irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
+ irq_set_handler(irq, intc_external_irq);
} else {
- set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
+ irq_set_handler(irq, handle_level_irq);
}
}
}
IMR = ~0;
for (i = 0; (i < NR_IRQS); i++) {
- set_irq_chip(i, &intc_irq_chip);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip(i, &intc_irq_chip);
+ irq_set_handler(i, handle_level_irq);
}
}
pquicc->intr_cimr = 0x00000000;
for (i = 0; (i < NR_IRQS); i++) {
- set_irq_chip(i, &intc_irq_chip);
- set_irq_handler(i, handle_level_irq);
+ irq_set_chip(i, &intc_irq_chip);
+ irq_set_handler(i, handle_level_irq);
}
}
}
if (tb)
- set_irq_handler(irq, handle_edge_irq);
+ irq_set_handler(irq, handle_edge_irq);
irq -= EINT0;
pa = __raw_readw(MCFEPORT_EPPAR);
for (irq = MCFINT_VECBASE; (irq < MCFINT_VECBASE + NR_VECS); irq++) {
if ((irq >= EINT1) && (irq <=EINT7))
- set_irq_chip(irq, &intc_irq_chip_edge_port);
+ irq_set_chip(irq, &intc_irq_chip_edge_port);
else
- set_irq_chip(irq, &intc_irq_chip);
- set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip(irq, &intc_irq_chip);
+ irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
+ irq_set_handler(irq, handle_level_irq);
}
}
}
if (tb)
- set_irq_handler(irq, handle_edge_irq);
+ irq_set_handler(irq, handle_edge_irq);
ebit = irq2ebit(irq) * 2;
pa = __raw_readw(MCFEPORT_EPPAR);
eirq = MCFINT_VECBASE + 64 + (MCFINTC1_ICR0 ? 64 : 0);
for (irq = MCFINT_VECBASE; (irq < eirq); irq++) {
if ((irq >= EINT1) && (irq <= EINT7))
- set_irq_chip(irq, &intc_irq_chip_edge_port);
+ irq_set_chip(irq, &intc_irq_chip_edge_port);
else
- set_irq_chip(irq, &intc_irq_chip);
- set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip(irq, &intc_irq_chip);
+ irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
+ irq_set_handler(irq, handle_level_irq);
}
}
mcf_maskimr(0xffffffff);
for (irq = 0; (irq < NR_IRQS); irq++) {
- set_irq_chip(irq, &intc_irq_chip);
- set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip(irq, &intc_irq_chip);
+ irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
+ irq_set_handler(irq, handle_level_irq);
}
}
select OF_EARLY_FLATTREE
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
- select GENERIC_HARDIRQS_NO_DEPRECATED
+ select GENERIC_IRQ_SHOW
config SWAP
def_bool n
* ack function since the handle_level_irq function
* acks the irq before calling the interrupt handler
*/
- if (irq_to_desc(d->irq)->status & IRQ_LEVEL)
+ if (irqd_is_level_type(d))
out_be32(INTC_BASE + IAR, mask);
}
for (i = 0; i < nr_irq; ++i) {
if (intr_type & (0x00000001 << i)) {
- set_irq_chip_and_handler_name(i, &intc_dev,
+ irq_set_chip_and_handler_name(i, &intc_dev,
handle_edge_irq, intc_dev.name);
irq_clear_status_flags(i, IRQ_LEVEL);
} else {
- set_irq_chip_and_handler_name(i, &intc_dev,
+ irq_set_chip_and_handler_name(i, &intc_dev,
handle_level_irq, intc_dev.name);
irq_set_status_flags(i, IRQ_LEVEL);
}
trace_hardirqs_on();
}
-int show_interrupts(struct seq_file *p, void *v)
-{
- int i = *(loff_t *) v, j;
- struct irq_desc *desc;
- struct irqaction *action;
- unsigned long flags;
-
- if (i == 0) {
- seq_printf(p, " ");
- for_each_online_cpu(j)
- seq_printf(p, "CPU%-8d", j);
- seq_putc(p, '\n');
- }
-
- if (i < nr_irq) {
- desc = irq_to_desc(i);
- raw_spin_lock_irqsave(&desc->lock, flags);
- action = desc->action;
- if (!action)
- goto skip;
- seq_printf(p, "%3d: ", i);
-#ifndef CONFIG_SMP
- seq_printf(p, "%10u ", kstat_irqs(i));
-#else
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-#endif
- seq_printf(p, " %8s", desc->status &
- IRQ_LEVEL ? "level" : "edge");
- seq_printf(p, " %8s", desc->irq_data.chip->name);
- seq_printf(p, " %s", action->name);
-
- for (action = action->next; action; action = action->next)
- seq_printf(p, ", %s", action->name);
-
- seq_putc(p, '\n');
-skip:
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
- return 0;
-}
-
/* MS: There is no any advance mapping mechanism. We are using simple 32bit
intc without any cascades or any connection that's why mapping is 1:1 */
unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq)
virq = irq_create_mapping(NULL, line);
if (virq != NO_IRQ)
- set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
} else {
pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
oirq.size, oirq.specifier[0], oirq.specifier[1],
bcsr_csc_base = csc_start;
for (irq = csc_start; irq <= csc_end; irq++)
- set_irq_chip_and_handler_name(irq, &bcsr_irq_type,
- handle_level_irq, "level");
+ irq_set_chip_and_handler_name(irq, &bcsr_irq_type,
+ handle_level_irq, "level");
- set_irq_chained_handler(hook_irq, bcsr_csc_handler);
+ irq_set_chained_handler(hook_irq, bcsr_csc_handler);
}
static int __init db1200_arch_init(void)
{
/* GPIO7 is low-level triggered CPLD cascade */
- set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW);
bcsr_init_irq(DB1200_INT_BEGIN, DB1200_INT_END, AU1200_GPIO7_INT);
/* insert/eject pairs: one of both is always screaming. To avoid
* issues they must not be automatically enabled when initially
* requested.
*/
- irq_to_desc(DB1200_SD0_INSERT_INT)->status |= IRQ_NOAUTOEN;
- irq_to_desc(DB1200_SD0_EJECT_INT)->status |= IRQ_NOAUTOEN;
- irq_to_desc(DB1200_PC0_INSERT_INT)->status |= IRQ_NOAUTOEN;
- irq_to_desc(DB1200_PC0_EJECT_INT)->status |= IRQ_NOAUTOEN;
- irq_to_desc(DB1200_PC1_INSERT_INT)->status |= IRQ_NOAUTOEN;
- irq_to_desc(DB1200_PC1_EJECT_INT)->status |= IRQ_NOAUTOEN;
-
+ irq_set_status_flags(DB1200_SD0_INSERT_INT, IRQ_NOAUTOEN);
+ irq_set_status_flags(DB1200_SD0_EJECT_INT, IRQ_NOAUTOEN);
+ irq_set_status_flags(DB1200_PC0_INSERT_INT, IRQ_NOAUTOEN);
+ irq_set_status_flags(DB1200_PC0_EJECT_INT, IRQ_NOAUTOEN);
+ irq_set_status_flags(DB1200_PC1_INSERT_INT, IRQ_NOAUTOEN);
+ irq_set_status_flags(DB1200_PC1_EJECT_INT, IRQ_NOAUTOEN);
return 0;
}
arch_initcall(db1200_arch_init);
static int __init db1x00_init_irq(void)
{
#if defined(CONFIG_MIPS_MIRAGE)
- set_irq_type(AU1500_GPIO7_INT, IRQF_TRIGGER_RISING); /* TS pendown */
+ irq_set_irq_type(AU1500_GPIO7_INT, IRQF_TRIGGER_RISING); /* TS pendown */
#elif defined(CONFIG_MIPS_DB1550)
- set_irq_type(AU1550_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */
- set_irq_type(AU1550_GPIO1_INT, IRQF_TRIGGER_LOW); /* CD1# */
- set_irq_type(AU1550_GPIO3_INT, IRQF_TRIGGER_LOW); /* CARD0# */
- set_irq_type(AU1550_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */
- set_irq_type(AU1550_GPIO21_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
- set_irq_type(AU1550_GPIO22_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */
+ irq_set_irq_type(AU1550_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */
+ irq_set_irq_type(AU1550_GPIO1_INT, IRQF_TRIGGER_LOW); /* CD1# */
+ irq_set_irq_type(AU1550_GPIO3_INT, IRQF_TRIGGER_LOW); /* CARD0# */
+ irq_set_irq_type(AU1550_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */
+ irq_set_irq_type(AU1550_GPIO21_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
+ irq_set_irq_type(AU1550_GPIO22_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */
#elif defined(CONFIG_MIPS_DB1500)
- set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */
- set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */
- set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */
- set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */
- set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
- set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */
+ irq_set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */
+ irq_set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */
+ irq_set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */
+ irq_set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */
+ irq_set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
+ irq_set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */
#elif defined(CONFIG_MIPS_DB1100)
- set_irq_type(AU1100_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */
- set_irq_type(AU1100_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */
- set_irq_type(AU1100_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */
- set_irq_type(AU1100_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */
- set_irq_type(AU1100_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
- set_irq_type(AU1100_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */
+ irq_set_irq_type(AU1100_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */
+ irq_set_irq_type(AU1100_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */
+ irq_set_irq_type(AU1100_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */
+ irq_set_irq_type(AU1100_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */
+ irq_set_irq_type(AU1100_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
+ irq_set_irq_type(AU1100_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */
#elif defined(CONFIG_MIPS_DB1000)
- set_irq_type(AU1000_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */
- set_irq_type(AU1000_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */
- set_irq_type(AU1000_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */
- set_irq_type(AU1000_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */
- set_irq_type(AU1000_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
- set_irq_type(AU1000_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */
+ irq_set_irq_type(AU1000_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */
+ irq_set_irq_type(AU1000_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */
+ irq_set_irq_type(AU1000_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */
+ irq_set_irq_type(AU1000_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */
+ irq_set_irq_type(AU1000_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
+ irq_set_irq_type(AU1000_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */
#endif
return 0;
}
static int __init pb1000_init_irq(void)
{
- set_irq_type(AU1000_GPIO15_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1000_GPIO15_INT, IRQF_TRIGGER_LOW);
return 0;
}
arch_initcall(pb1000_init_irq);
static int __init pb1100_init_irq(void)
{
- set_irq_type(AU1100_GPIO9_INT, IRQF_TRIGGER_LOW); /* PCCD# */
- set_irq_type(AU1100_GPIO10_INT, IRQF_TRIGGER_LOW); /* PCSTSCHG# */
- set_irq_type(AU1100_GPIO11_INT, IRQF_TRIGGER_LOW); /* PCCard# */
- set_irq_type(AU1100_GPIO13_INT, IRQF_TRIGGER_LOW); /* DC_IRQ# */
+ irq_set_irq_type(AU1100_GPIO9_INT, IRQF_TRIGGER_LOW); /* PCCD# */
+ irq_set_irq_type(AU1100_GPIO10_INT, IRQF_TRIGGER_LOW); /* PCSTSCHG# */
+ irq_set_irq_type(AU1100_GPIO11_INT, IRQF_TRIGGER_LOW); /* PCCard# */
+ irq_set_irq_type(AU1100_GPIO13_INT, IRQF_TRIGGER_LOW); /* DC_IRQ# */
return 0;
}
panic("Game over. Your score is 0.");
}
- set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW);
bcsr_init_irq(PB1200_INT_BEGIN, PB1200_INT_END, AU1200_GPIO7_INT);
return 0;
static int __init pb1500_init_irq(void)
{
- set_irq_type(AU1500_GPIO9_INT, IRQF_TRIGGER_LOW); /* CD0# */
- set_irq_type(AU1500_GPIO10_INT, IRQF_TRIGGER_LOW); /* CARD0 */
- set_irq_type(AU1500_GPIO11_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
- set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH);
- set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW);
- set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW);
- set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW);
- set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1500_GPIO9_INT, IRQF_TRIGGER_LOW); /* CD0# */
+ irq_set_irq_type(AU1500_GPIO10_INT, IRQF_TRIGGER_LOW); /* CARD0 */
+ irq_set_irq_type(AU1500_GPIO11_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
+ irq_set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH);
+ irq_set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW);
return 0;
}
static int __init pb1550_init_irq(void)
{
- set_irq_type(AU1550_GPIO0_INT, IRQF_TRIGGER_LOW);
- set_irq_type(AU1550_GPIO1_INT, IRQF_TRIGGER_LOW);
- set_irq_type(AU1550_GPIO201_205_INT, IRQF_TRIGGER_HIGH);
+ irq_set_irq_type(AU1550_GPIO0_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1550_GPIO1_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1550_GPIO201_205_INT, IRQF_TRIGGER_HIGH);
/* enable both PCMCIA card irqs in the shared line */
alchemy_gpio2_enable_int(201);
static int __init mtx1_init_irq(void)
{
- set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH);
- set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW);
- set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW);
- set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW);
- set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH);
+ irq_set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW);
return 0;
}
static int __init xxs1500_init_irq(void)
{
- set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH);
- set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW);
- set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW);
- set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW);
- set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW);
- set_irq_type(AU1500_GPIO207_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH);
+ irq_set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1500_GPIO207_INT, IRQF_TRIGGER_LOW);
- set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW);
- set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW);
- set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW);
- set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW);
- set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* CF irq */
- set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* CF irq */
+ irq_set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW);
return 0;
}
for (i = 0; i < 40; i++) {
writel(i, REG(CHNL_OFFSET(i)));
/* Primary IRQ's */
- set_irq_chip_and_handler(base + i, &ar7_irq_type,
+ irq_set_chip_and_handler(base + i, &ar7_irq_type,
handle_level_irq);
/* Secondary IRQ's */
if (i < 32)
- set_irq_chip_and_handler(base + i + 40,
+ irq_set_chip_and_handler(base + i + 40,
&ar7_sec_irq_type,
handle_level_irq);
}
for (i = ATH79_MISC_IRQ_BASE;
i < ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT; i++) {
- set_irq_chip_and_handler(i, &ath79_misc_irq_chip,
+ irq_set_chip_and_handler(i, &ath79_misc_irq_chip,
handle_level_irq);
}
- set_irq_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler);
+ irq_set_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler);
}
asmlinkage void plat_irq_dispatch(void)
mips_cpu_irq_init();
for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
- set_irq_chip_and_handler(i, &bcm63xx_internal_irq_chip,
+ irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
handle_level_irq);
for (i = IRQ_EXT_BASE; i < IRQ_EXT_BASE + 4; ++i)
- set_irq_chip_and_handler(i, &bcm63xx_external_irq_chip,
+ irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
handle_edge_irq);
setup_irq(IRQ_MIPS_BASE + 2, &cpu_ip2_cascade_action);
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
+ * Copyright (C) 2004-2008, 2009, 2010, 2011 Cavium Networks
*/
-#include <linux/irq.h>
+
#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/percpu.h>
+#include <linux/irq.h>
#include <linux/smp.h>
#include <asm/octeon/octeon.h>
static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock);
static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock);
+static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
+static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
+
+static __read_mostly u8 octeon_irq_ciu_to_irq[8][64];
+
+union octeon_ciu_chip_data {
+ void *p;
+ unsigned long l;
+ struct {
+ unsigned int line:6;
+ unsigned int bit:6;
+ } s;
+};
+
+struct octeon_core_chip_data {
+ struct mutex core_irq_mutex;
+ bool current_en;
+ bool desired_en;
+ u8 bit;
+};
+
+#define MIPS_CORE_IRQ_LINES 8
+
+static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
+
+static void __init octeon_irq_set_ciu_mapping(int irq, int line, int bit,
+ struct irq_chip *chip,
+ irq_flow_handler_t handler)
+{
+ union octeon_ciu_chip_data cd;
+
+ irq_set_chip_and_handler(irq, chip, handler);
+
+ cd.l = 0;
+ cd.s.line = line;
+ cd.s.bit = bit;
+
+ irq_set_chip_data(irq, cd.p);
+ octeon_irq_ciu_to_irq[line][bit] = irq;
+}
+
static int octeon_coreid_for_cpu(int cpu)
{
#ifdef CONFIG_SMP
#endif
}
-static void octeon_irq_core_ack(unsigned int irq)
+static int octeon_cpu_for_coreid(int coreid)
+{
+#ifdef CONFIG_SMP
+ return cpu_number_map(coreid);
+#else
+ return smp_processor_id();
+#endif
+}
+
+static void octeon_irq_core_ack(struct irq_data *data)
{
- unsigned int bit = irq - OCTEON_IRQ_SW0;
+ struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+ unsigned int bit = cd->bit;
+
/*
* We don't need to disable IRQs to make these atomic since
* they are already disabled earlier in the low level
clear_c0_cause(0x100 << bit);
}
-static void octeon_irq_core_eoi(unsigned int irq)
+static void octeon_irq_core_eoi(struct irq_data *data)
{
- struct irq_desc *desc = irq_to_desc(irq);
- unsigned int bit = irq - OCTEON_IRQ_SW0;
- /*
- * If an IRQ is being processed while we are disabling it the
- * handler will attempt to unmask the interrupt after it has
- * been disabled.
- */
- if ((unlikely(desc->status & IRQ_DISABLED)))
- return;
+ struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+
/*
* We don't need to disable IRQs to make these atomic since
* they are already disabled earlier in the low level
* interrupt code.
*/
- set_c0_status(0x100 << bit);
+ set_c0_status(0x100 << cd->bit);
}
-static void octeon_irq_core_enable(unsigned int irq)
+static void octeon_irq_core_set_enable_local(void *arg)
{
- unsigned long flags;
- unsigned int bit = irq - OCTEON_IRQ_SW0;
+ struct irq_data *data = arg;
+ struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+ unsigned int mask = 0x100 << cd->bit;
/*
- * We need to disable interrupts to make sure our updates are
- * atomic.
+ * Interrupts are already disabled, so these are atomic.
*/
- local_irq_save(flags);
- set_c0_status(0x100 << bit);
- local_irq_restore(flags);
+ if (cd->desired_en)
+ set_c0_status(mask);
+ else
+ clear_c0_status(mask);
+
}
-static void octeon_irq_core_disable_local(unsigned int irq)
+static void octeon_irq_core_disable(struct irq_data *data)
{
- unsigned long flags;
- unsigned int bit = irq - OCTEON_IRQ_SW0;
- /*
- * We need to disable interrupts to make sure our updates are
- * atomic.
- */
- local_irq_save(flags);
- clear_c0_status(0x100 << bit);
- local_irq_restore(flags);
+ struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+ cd->desired_en = false;
}
-static void octeon_irq_core_disable(unsigned int irq)
+static void octeon_irq_core_enable(struct irq_data *data)
{
-#ifdef CONFIG_SMP
- on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
- (void *) (long) irq, 1);
-#else
- octeon_irq_core_disable_local(irq);
-#endif
+ struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+ cd->desired_en = true;
}
-static struct irq_chip octeon_irq_chip_core = {
- .name = "Core",
- .enable = octeon_irq_core_enable,
- .disable = octeon_irq_core_disable,
- .ack = octeon_irq_core_ack,
- .eoi = octeon_irq_core_eoi,
-};
+static void octeon_irq_core_bus_lock(struct irq_data *data)
+{
+ struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+ mutex_lock(&cd->core_irq_mutex);
+}
-static void octeon_irq_ciu0_ack(unsigned int irq)
+static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
{
- switch (irq) {
- case OCTEON_IRQ_GMX_DRP0:
- case OCTEON_IRQ_GMX_DRP1:
- case OCTEON_IRQ_IPD_DRP:
- case OCTEON_IRQ_KEY_ZERO:
- case OCTEON_IRQ_TIMER0:
- case OCTEON_IRQ_TIMER1:
- case OCTEON_IRQ_TIMER2:
- case OCTEON_IRQ_TIMER3:
- {
- int index = cvmx_get_core_num() * 2;
- u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
- /*
- * CIU timer type interrupts must be acknoleged by
- * writing a '1' bit to their sum0 bit.
- */
- cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
- break;
- }
- default:
- break;
+ struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+ if (cd->desired_en != cd->current_en) {
+ on_each_cpu(octeon_irq_core_set_enable_local, data, 1);
+
+ cd->current_en = cd->desired_en;
}
- /*
- * In order to avoid any locking accessing the CIU, we
- * acknowledge CIU interrupts by disabling all of them. This
- * way we can use a per core register and avoid any out of
- * core locking requirements. This has the side affect that
- * CIU interrupts can't be processed recursively.
- *
- * We don't need to disable IRQs to make these atomic since
- * they are already disabled earlier in the low level
- * interrupt code.
- */
- clear_c0_status(0x100 << 2);
+ mutex_unlock(&cd->core_irq_mutex);
}
-static void octeon_irq_ciu0_eoi(unsigned int irq)
+static struct irq_chip octeon_irq_chip_core = {
+ .name = "Core",
+ .irq_enable = octeon_irq_core_enable,
+ .irq_disable = octeon_irq_core_disable,
+ .irq_ack = octeon_irq_core_ack,
+ .irq_eoi = octeon_irq_core_eoi,
+ .irq_bus_lock = octeon_irq_core_bus_lock,
+ .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,
+
+ .irq_cpu_online = octeon_irq_core_eoi,
+ .irq_cpu_offline = octeon_irq_core_ack,
+ .flags = IRQCHIP_ONOFFLINE_ENABLED,
+};
+
+static void __init octeon_irq_init_core(void)
{
- /*
- * Enable all CIU interrupts again. We don't need to disable
- * IRQs to make these atomic since they are already disabled
- * earlier in the low level interrupt code.
- */
- set_c0_status(0x100 << 2);
+ int i;
+ int irq;
+ struct octeon_core_chip_data *cd;
+
+ for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
+ cd = &octeon_irq_core_chip_data[i];
+ cd->current_en = false;
+ cd->desired_en = false;
+ cd->bit = i;
+ mutex_init(&cd->core_irq_mutex);
+
+ irq = OCTEON_IRQ_SW0 + i;
+ switch (irq) {
+ case OCTEON_IRQ_TIMER:
+ case OCTEON_IRQ_SW0:
+ case OCTEON_IRQ_SW1:
+ case OCTEON_IRQ_5:
+ case OCTEON_IRQ_PERF:
+ irq_set_chip_data(irq, cd);
+ irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
+ handle_percpu_irq);
+ break;
+ default:
+ break;
+ }
+ }
}
-static int next_coreid_for_irq(struct irq_desc *desc)
+static int next_cpu_for_irq(struct irq_data *data)
{
#ifdef CONFIG_SMP
- int coreid;
- int weight = cpumask_weight(desc->affinity);
+ int cpu;
+ int weight = cpumask_weight(data->affinity);
if (weight > 1) {
- int cpu = smp_processor_id();
+ cpu = smp_processor_id();
for (;;) {
- cpu = cpumask_next(cpu, desc->affinity);
+ cpu = cpumask_next(cpu, data->affinity);
if (cpu >= nr_cpu_ids) {
cpu = -1;
continue;
break;
}
}
- coreid = octeon_coreid_for_cpu(cpu);
} else if (weight == 1) {
- coreid = octeon_coreid_for_cpu(cpumask_first(desc->affinity));
+ cpu = cpumask_first(data->affinity);
} else {
- coreid = cvmx_get_core_num();
+ cpu = smp_processor_id();
}
- return coreid;
+ return cpu;
#else
- return cvmx_get_core_num();
+ return smp_processor_id();
#endif
}
-static void octeon_irq_ciu0_enable(unsigned int irq)
+static void octeon_irq_ciu_enable(struct irq_data *data)
{
- struct irq_desc *desc = irq_to_desc(irq);
- int coreid = next_coreid_for_irq(desc);
+ int cpu = next_cpu_for_irq(data);
+ int coreid = octeon_coreid_for_cpu(cpu);
+ unsigned long *pen;
unsigned long flags;
- uint64_t en0;
- int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
+ union octeon_ciu_chip_data cd;
+
+ cd.p = irq_data_get_irq_chip_data(data);
- raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
- en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
- en0 |= 1ull << bit;
- cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
- cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
- raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
+ if (cd.s.line == 0) {
+ raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
+ pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
+ set_bit(cd.s.bit, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
+ raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
+ } else {
+ raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
+ pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
+ set_bit(cd.s.bit, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
+ raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
+ }
}
-static void octeon_irq_ciu0_enable_mbox(unsigned int irq)
+static void octeon_irq_ciu_enable_local(struct irq_data *data)
{
- int coreid = cvmx_get_core_num();
+ unsigned long *pen;
+ unsigned long flags;
+ union octeon_ciu_chip_data cd;
+
+ cd.p = irq_data_get_irq_chip_data(data);
+
+ if (cd.s.line == 0) {
+ raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
+ pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror);
+ set_bit(cd.s.bit, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
+ raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
+ } else {
+ raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
+ pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror);
+ set_bit(cd.s.bit, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
+ raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
+ }
+}
+
+static void octeon_irq_ciu_disable_local(struct irq_data *data)
+{
+ unsigned long *pen;
unsigned long flags;
- uint64_t en0;
- int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
+ union octeon_ciu_chip_data cd;
+
+ cd.p = irq_data_get_irq_chip_data(data);
- raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
- en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
- en0 |= 1ull << bit;
- cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
- cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
- raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
+ if (cd.s.line == 0) {
+ raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
+ pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror);
+ clear_bit(cd.s.bit, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
+ raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
+ } else {
+ raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
+ pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror);
+ clear_bit(cd.s.bit, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
+ raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
+ }
}
-static void octeon_irq_ciu0_disable(unsigned int irq)
+static void octeon_irq_ciu_disable_all(struct irq_data *data)
{
- int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
unsigned long flags;
- uint64_t en0;
+ unsigned long *pen;
int cpu;
- raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
- for_each_online_cpu(cpu) {
- int coreid = octeon_coreid_for_cpu(cpu);
- en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
- en0 &= ~(1ull << bit);
- cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
+ union octeon_ciu_chip_data cd;
+
+ wmb(); /* Make sure flag changes arrive before register updates. */
+
+ cd.p = irq_data_get_irq_chip_data(data);
+
+ if (cd.s.line == 0) {
+ raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
+ for_each_online_cpu(cpu) {
+ int coreid = octeon_coreid_for_cpu(cpu);
+ pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
+ clear_bit(cd.s.bit, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
+ }
+ raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
+ } else {
+ raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
+ for_each_online_cpu(cpu) {
+ int coreid = octeon_coreid_for_cpu(cpu);
+ pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
+ clear_bit(cd.s.bit, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
+ }
+ raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
+ }
+}
+
+static void octeon_irq_ciu_enable_all(struct irq_data *data)
+{
+ unsigned long flags;
+ unsigned long *pen;
+ int cpu;
+ union octeon_ciu_chip_data cd;
+
+ cd.p = irq_data_get_irq_chip_data(data);
+
+ if (cd.s.line == 0) {
+ raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
+ for_each_online_cpu(cpu) {
+ int coreid = octeon_coreid_for_cpu(cpu);
+ pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
+ set_bit(cd.s.bit, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
+ }
+ raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
+ } else {
+ raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
+ for_each_online_cpu(cpu) {
+ int coreid = octeon_coreid_for_cpu(cpu);
+ pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
+ set_bit(cd.s.bit, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
+ }
+ raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
}
- /*
- * We need to do a read after the last update to make sure all
- * of them are done.
- */
- cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
- raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
}
/*
* Enable the irq on the next core in the affinity set for chips that
* have the EN*_W1{S,C} registers.
*/
-static void octeon_irq_ciu0_enable_v2(unsigned int irq)
+static void octeon_irq_ciu_enable_v2(struct irq_data *data)
{
- int index;
- u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
- struct irq_desc *desc = irq_to_desc(irq);
+ u64 mask;
+ int cpu = next_cpu_for_irq(data);
+ union octeon_ciu_chip_data cd;
+
+ cd.p = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd.s.bit);
- if ((desc->status & IRQ_DISABLED) == 0) {
- index = next_coreid_for_irq(desc) * 2;
+ /*
+ * Called under the desc lock, so these should never get out
+ * of sync.
+ */
+ if (cd.s.line == 0) {
+ int index = octeon_coreid_for_cpu(cpu) * 2;
+ set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
+ } else {
+ int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
+ set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+ cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
}
}
* Enable the irq on the current CPU for chips that
* have the EN*_W1{S,C} registers.
*/
-static void octeon_irq_ciu0_enable_mbox_v2(unsigned int irq)
+static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
+{
+ u64 mask;
+ union octeon_ciu_chip_data cd;
+
+ cd.p = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd.s.bit);
+
+ if (cd.s.line == 0) {
+ int index = cvmx_get_core_num() * 2;
+ set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror));
+ cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
+ } else {
+ int index = cvmx_get_core_num() * 2 + 1;
+ set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror));
+ cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
+ }
+}
+
+static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
{
- int index;
- u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
+ u64 mask;
+ union octeon_ciu_chip_data cd;
- index = cvmx_get_core_num() * 2;
- cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
+ cd.p = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd.s.bit);
+
+ if (cd.s.line == 0) {
+ int index = cvmx_get_core_num() * 2;
+ clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror));
+ cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
+ } else {
+ int index = cvmx_get_core_num() * 2 + 1;
+ clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror));
+ cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
+ }
}
/*
- * Disable the irq on the current core for chips that have the EN*_W1{S,C}
- * registers.
+ * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
*/
-static void octeon_irq_ciu0_ack_v2(unsigned int irq)
-{
- int index = cvmx_get_core_num() * 2;
- u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
-
- switch (irq) {
- case OCTEON_IRQ_GMX_DRP0:
- case OCTEON_IRQ_GMX_DRP1:
- case OCTEON_IRQ_IPD_DRP:
- case OCTEON_IRQ_KEY_ZERO:
- case OCTEON_IRQ_TIMER0:
- case OCTEON_IRQ_TIMER1:
- case OCTEON_IRQ_TIMER2:
- case OCTEON_IRQ_TIMER3:
- /*
- * CIU timer type interrupts must be acknoleged by
- * writing a '1' bit to their sum0 bit.
- */
+static void octeon_irq_ciu_ack(struct irq_data *data)
+{
+ u64 mask;
+ union octeon_ciu_chip_data cd;
+
+ cd.p = data->chip_data;
+ mask = 1ull << (cd.s.bit);
+
+ if (cd.s.line == 0) {
+ int index = cvmx_get_core_num() * 2;
cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
- break;
- default:
- break;
+ } else {
+ cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
}
-
- cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
}
/*
- * Enable the irq on the current core for chips that have the EN*_W1{S,C}
+ * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
* registers.
*/
-static void octeon_irq_ciu0_eoi_mbox_v2(unsigned int irq)
+static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
{
- struct irq_desc *desc = irq_to_desc(irq);
- int index = cvmx_get_core_num() * 2;
- u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
+ int cpu;
+ u64 mask;
+ union octeon_ciu_chip_data cd;
- if (likely((desc->status & IRQ_DISABLED) == 0))
- cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
+ wmb(); /* Make sure flag changes arrive before register updates. */
+
+ cd.p = data->chip_data;
+ mask = 1ull << (cd.s.bit);
+
+ if (cd.s.line == 0) {
+ for_each_online_cpu(cpu) {
+ int index = octeon_coreid_for_cpu(cpu) * 2;
+ clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+ cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
+ }
+ } else {
+ for_each_online_cpu(cpu) {
+ int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
+ clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+ cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
+ }
+ }
}
/*
- * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
+ * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
* registers.
*/
-static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
+static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
{
- u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
- int index;
int cpu;
- for_each_online_cpu(cpu) {
- index = octeon_coreid_for_cpu(cpu) * 2;
- cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
+ u64 mask;
+ union octeon_ciu_chip_data cd;
+
+ cd.p = data->chip_data;
+ mask = 1ull << (cd.s.bit);
+
+ if (cd.s.line == 0) {
+ for_each_online_cpu(cpu) {
+ int index = octeon_coreid_for_cpu(cpu) * 2;
+ set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+ cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
+ }
+ } else {
+ for_each_online_cpu(cpu) {
+ int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
+ set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+ cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
+ }
}
}
#ifdef CONFIG_SMP
-static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
+
+static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
+{
+ int cpu = smp_processor_id();
+ cpumask_t new_affinity;
+
+ if (!cpumask_test_cpu(cpu, data->affinity))
+ return;
+
+ if (cpumask_weight(data->affinity) > 1) {
+ /*
+ * It has multi CPU affinity, just remove this CPU
+ * from the affinity set.
+ */
+ cpumask_copy(&new_affinity, data->affinity);
+ cpumask_clear_cpu(cpu, &new_affinity);
+ } else {
+ /* Otherwise, put it on lowest numbered online CPU. */
+ cpumask_clear(&new_affinity);
+ cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
+ }
+ __irq_set_affinity_locked(data, &new_affinity);
+}
+
+static int octeon_irq_ciu_set_affinity(struct irq_data *data,
+ const struct cpumask *dest, bool force)
{
int cpu;
- struct irq_desc *desc = irq_to_desc(irq);
- int enable_one = (desc->status & IRQ_DISABLED) == 0;
+ bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
unsigned long flags;
- int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
+ union octeon_ciu_chip_data cd;
+
+ cd.p = data->chip_data;
/*
* For non-v2 CIU, we will allow only single CPU affinity.
if (cpumask_weight(dest) != 1)
return -EINVAL;
- raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
- for_each_online_cpu(cpu) {
- int coreid = octeon_coreid_for_cpu(cpu);
- uint64_t en0 =
- cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
- if (cpumask_test_cpu(cpu, dest) && enable_one) {
- enable_one = 0;
- en0 |= 1ull << bit;
- } else {
- en0 &= ~(1ull << bit);
+ if (!enable_one)
+ return 0;
+
+ if (cd.s.line == 0) {
+ raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
+ for_each_online_cpu(cpu) {
+ int coreid = octeon_coreid_for_cpu(cpu);
+ unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
+
+ if (cpumask_test_cpu(cpu, dest) && enable_one) {
+ enable_one = false;
+ set_bit(cd.s.bit, pen);
+ } else {
+ clear_bit(cd.s.bit, pen);
+ }
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
}
- cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
+ raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
+ } else {
+ raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
+ for_each_online_cpu(cpu) {
+ int coreid = octeon_coreid_for_cpu(cpu);
+ unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
+
+ if (cpumask_test_cpu(cpu, dest) && enable_one) {
+ enable_one = false;
+ set_bit(cd.s.bit, pen);
+ } else {
+ clear_bit(cd.s.bit, pen);
+ }
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
+ }
+ raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
}
- /*
- * We need to do a read after the last update to make sure all
- * of them are done.
- */
- cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
- raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
-
return 0;
}
* Set affinity for the irq for chips that have the EN*_W1{S,C}
* registers.
*/
-static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
- const struct cpumask *dest)
+static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force)
{
int cpu;
- int index;
- struct irq_desc *desc = irq_to_desc(irq);
- int enable_one = (desc->status & IRQ_DISABLED) == 0;
- u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
-
- for_each_online_cpu(cpu) {
- index = octeon_coreid_for_cpu(cpu) * 2;
- if (cpumask_test_cpu(cpu, dest) && enable_one) {
- enable_one = 0;
- cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
- } else {
- cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
+ bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
+ u64 mask;
+ union octeon_ciu_chip_data cd;
+
+ if (!enable_one)
+ return 0;
+
+ cd.p = data->chip_data;
+ mask = 1ull << cd.s.bit;
+
+ if (cd.s.line == 0) {
+ for_each_online_cpu(cpu) {
+ unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
+ int index = octeon_coreid_for_cpu(cpu) * 2;
+ if (cpumask_test_cpu(cpu, dest) && enable_one) {
+ enable_one = false;
+ set_bit(cd.s.bit, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
+ } else {
+ clear_bit(cd.s.bit, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
+ }
+ }
+ } else {
+ for_each_online_cpu(cpu) {
+ unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
+ int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
+ if (cpumask_test_cpu(cpu, dest) && enable_one) {
+ enable_one = false;
+ set_bit(cd.s.bit, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
+ } else {
+ clear_bit(cd.s.bit, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
+ }
}
}
return 0;
}
#endif
+/*
+ * The v1 CIU code already masks things, so supply a dummy version to
+ * the core chip code.
+ */
+static void octeon_irq_dummy_mask(struct irq_data *data)
+{
+}
+
/*
* Newer octeon chips have support for lockless CIU operation.
*/
-static struct irq_chip octeon_irq_chip_ciu0_v2 = {
- .name = "CIU0",
- .enable = octeon_irq_ciu0_enable_v2,
- .disable = octeon_irq_ciu0_disable_all_v2,
- .eoi = octeon_irq_ciu0_enable_v2,
+static struct irq_chip octeon_irq_chip_ciu_v2 = {
+ .name = "CIU",
+ .irq_enable = octeon_irq_ciu_enable_v2,
+ .irq_disable = octeon_irq_ciu_disable_all_v2,
+ .irq_mask = octeon_irq_ciu_disable_local_v2,
+ .irq_unmask = octeon_irq_ciu_enable_v2,
#ifdef CONFIG_SMP
- .set_affinity = octeon_irq_ciu0_set_affinity_v2,
+ .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};
-static struct irq_chip octeon_irq_chip_ciu0 = {
- .name = "CIU0",
- .enable = octeon_irq_ciu0_enable,
- .disable = octeon_irq_ciu0_disable,
- .eoi = octeon_irq_ciu0_eoi,
+static struct irq_chip octeon_irq_chip_ciu_edge_v2 = {
+ .name = "CIU-E",
+ .irq_enable = octeon_irq_ciu_enable_v2,
+ .irq_disable = octeon_irq_ciu_disable_all_v2,
+ .irq_ack = octeon_irq_ciu_ack,
+ .irq_mask = octeon_irq_ciu_disable_local_v2,
+ .irq_unmask = octeon_irq_ciu_enable_v2,
#ifdef CONFIG_SMP
- .set_affinity = octeon_irq_ciu0_set_affinity,
+ .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};
-/* The mbox versions don't do any affinity or round-robin. */
-static struct irq_chip octeon_irq_chip_ciu0_mbox_v2 = {
- .name = "CIU0-M",
- .enable = octeon_irq_ciu0_enable_mbox_v2,
- .disable = octeon_irq_ciu0_disable,
- .eoi = octeon_irq_ciu0_eoi_mbox_v2,
+static struct irq_chip octeon_irq_chip_ciu = {
+ .name = "CIU",
+ .irq_enable = octeon_irq_ciu_enable,
+ .irq_disable = octeon_irq_ciu_disable_all,
+ .irq_mask = octeon_irq_dummy_mask,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = octeon_irq_ciu_set_affinity,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
};
-static struct irq_chip octeon_irq_chip_ciu0_mbox = {
- .name = "CIU0-M",
- .enable = octeon_irq_ciu0_enable_mbox,
- .disable = octeon_irq_ciu0_disable,
- .eoi = octeon_irq_ciu0_eoi,
+static struct irq_chip octeon_irq_chip_ciu_edge = {
+ .name = "CIU-E",
+ .irq_enable = octeon_irq_ciu_enable,
+ .irq_disable = octeon_irq_ciu_disable_all,
+ .irq_mask = octeon_irq_dummy_mask,
+ .irq_ack = octeon_irq_ciu_ack,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = octeon_irq_ciu_set_affinity,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
};
-static void octeon_irq_ciu1_ack(unsigned int irq)
-{
- /*
- * In order to avoid any locking accessing the CIU, we
- * acknowledge CIU interrupts by disabling all of them. This
- * way we can use a per core register and avoid any out of
- * core locking requirements. This has the side affect that
- * CIU interrupts can't be processed recursively. We don't
- * need to disable IRQs to make these atomic since they are
- * already disabled earlier in the low level interrupt code.
- */
- clear_c0_status(0x100 << 3);
-}
+/* The mbox versions don't do any affinity or round-robin. */
+static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
+ .name = "CIU-M",
+ .irq_enable = octeon_irq_ciu_enable_all_v2,
+ .irq_disable = octeon_irq_ciu_disable_all_v2,
+ .irq_ack = octeon_irq_ciu_disable_local_v2,
+ .irq_eoi = octeon_irq_ciu_enable_local_v2,
+
+ .irq_cpu_online = octeon_irq_ciu_enable_local_v2,
+ .irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
+ .flags = IRQCHIP_ONOFFLINE_ENABLED,
+};
-static void octeon_irq_ciu1_eoi(unsigned int irq)
-{
- /*
- * Enable all CIU interrupts again. We don't need to disable
- * IRQs to make these atomic since they are already disabled
- * earlier in the low level interrupt code.
- */
- set_c0_status(0x100 << 3);
-}
+static struct irq_chip octeon_irq_chip_ciu_mbox = {
+ .name = "CIU-M",
+ .irq_enable = octeon_irq_ciu_enable_all,
+ .irq_disable = octeon_irq_ciu_disable_all,
+
+ .irq_cpu_online = octeon_irq_ciu_enable_local,
+ .irq_cpu_offline = octeon_irq_ciu_disable_local,
+ .flags = IRQCHIP_ONOFFLINE_ENABLED,
+};
-static void octeon_irq_ciu1_enable(unsigned int irq)
+/*
+ * Watchdog interrupts are special. They are associated with a single
+ * core, so we hardwire the affinity to that core.
+ */
+static void octeon_irq_ciu_wd_enable(struct irq_data *data)
{
- struct irq_desc *desc = irq_to_desc(irq);
- int coreid = next_coreid_for_irq(desc);
unsigned long flags;
- uint64_t en1;
- int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
+ unsigned long *pen;
+ int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
+ int cpu = octeon_cpu_for_coreid(coreid);
raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
- en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
- en1 |= 1ull << bit;
- cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
- cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
+ pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
+ set_bit(coreid, pen);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
}
* Watchdog interrupts are special. They are associated with a single
* core, so we hardwire the affinity to that core.
*/
-static void octeon_irq_ciu1_wd_enable(unsigned int irq)
+static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
{
- unsigned long flags;
- uint64_t en1;
- int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
- int coreid = bit;
+ int coreid = data->irq - OCTEON_IRQ_WDOG0;
+ int cpu = octeon_cpu_for_coreid(coreid);
- raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
- en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
- en1 |= 1ull << bit;
- cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
- cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
- raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
+ set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+ cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
}
-static void octeon_irq_ciu1_disable(unsigned int irq)
+
+static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
+ .name = "CIU-W",
+ .irq_enable = octeon_irq_ciu1_wd_enable_v2,
+ .irq_disable = octeon_irq_ciu_disable_all_v2,
+ .irq_mask = octeon_irq_ciu_disable_local_v2,
+ .irq_unmask = octeon_irq_ciu_enable_local_v2,
+};
+
+static struct irq_chip octeon_irq_chip_ciu_wd = {
+ .name = "CIU-W",
+ .irq_enable = octeon_irq_ciu_wd_enable,
+ .irq_disable = octeon_irq_ciu_disable_all,
+ .irq_mask = octeon_irq_dummy_mask,
+};
+
+static void octeon_irq_ip2_v1(void)
{
- int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
- unsigned long flags;
- uint64_t en1;
- int cpu;
- raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
- for_each_online_cpu(cpu) {
- int coreid = octeon_coreid_for_cpu(cpu);
- en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
- en1 &= ~(1ull << bit);
- cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
+ const unsigned long core_id = cvmx_get_core_num();
+ u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
+
+ ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror);
+ clear_c0_status(STATUSF_IP2);
+ if (likely(ciu_sum)) {
+ int bit = fls64(ciu_sum) - 1;
+ int irq = octeon_irq_ciu_to_irq[0][bit];
+ if (likely(irq))
+ do_IRQ(irq);
+ else
+ spurious_interrupt();
+ } else {
+ spurious_interrupt();
}
- /*
- * We need to do a read after the last update to make sure all
- * of them are done.
- */
- cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
- raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
+ set_c0_status(STATUSF_IP2);
}
-/*
- * Enable the irq on the current core for chips that have the EN*_W1{S,C}
- * registers.
- */
-static void octeon_irq_ciu1_enable_v2(unsigned int irq)
+static void octeon_irq_ip2_v2(void)
{
- int index;
- u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
- struct irq_desc *desc = irq_to_desc(irq);
-
- if ((desc->status & IRQ_DISABLED) == 0) {
- index = next_coreid_for_irq(desc) * 2 + 1;
- cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
+ const unsigned long core_id = cvmx_get_core_num();
+ u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
+
+ ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror);
+ if (likely(ciu_sum)) {
+ int bit = fls64(ciu_sum) - 1;
+ int irq = octeon_irq_ciu_to_irq[0][bit];
+ if (likely(irq))
+ do_IRQ(irq);
+ else
+ spurious_interrupt();
+ } else {
+ spurious_interrupt();
}
}
-
-/*
- * Watchdog interrupts are special. They are associated with a single
- * core, so we hardwire the affinity to that core.
- */
-static void octeon_irq_ciu1_wd_enable_v2(unsigned int irq)
+static void octeon_irq_ip3_v1(void)
{
- int index;
- int coreid = irq - OCTEON_IRQ_WDOG0;
- u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
- struct irq_desc *desc = irq_to_desc(irq);
-
- if ((desc->status & IRQ_DISABLED) == 0) {
- index = coreid * 2 + 1;
- cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
+ u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
+
+ ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror);
+ clear_c0_status(STATUSF_IP3);
+ if (likely(ciu_sum)) {
+ int bit = fls64(ciu_sum) - 1;
+ int irq = octeon_irq_ciu_to_irq[1][bit];
+ if (likely(irq))
+ do_IRQ(irq);
+ else
+ spurious_interrupt();
+ } else {
+ spurious_interrupt();
}
+ set_c0_status(STATUSF_IP3);
}
-/*
- * Disable the irq on the current core for chips that have the EN*_W1{S,C}
- * registers.
- */
-static void octeon_irq_ciu1_ack_v2(unsigned int irq)
+static void octeon_irq_ip3_v2(void)
{
- int index = cvmx_get_core_num() * 2 + 1;
- u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
-
- cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
+ u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
+
+ ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror);
+ if (likely(ciu_sum)) {
+ int bit = fls64(ciu_sum) - 1;
+ int irq = octeon_irq_ciu_to_irq[1][bit];
+ if (likely(irq))
+ do_IRQ(irq);
+ else
+ spurious_interrupt();
+ } else {
+ spurious_interrupt();
+ }
}
-/*
- * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
- * registers.
- */
-static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
+static void octeon_irq_ip4_mask(void)
{
- u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
- int index;
- int cpu;
- for_each_online_cpu(cpu) {
- index = octeon_coreid_for_cpu(cpu) * 2 + 1;
- cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
- }
+ clear_c0_status(STATUSF_IP4);
+ spurious_interrupt();
}
-#ifdef CONFIG_SMP
-static int octeon_irq_ciu1_set_affinity(unsigned int irq,
- const struct cpumask *dest)
-{
- int cpu;
- struct irq_desc *desc = irq_to_desc(irq);
- int enable_one = (desc->status & IRQ_DISABLED) == 0;
- unsigned long flags;
- int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
+static void (*octeon_irq_ip2)(void);
+static void (*octeon_irq_ip3)(void);
+static void (*octeon_irq_ip4)(void);
- /*
- * For non-v2 CIU, we will allow only single CPU affinity.
- * This removes the need to do locking in the .ack/.eoi
- * functions.
- */
- if (cpumask_weight(dest) != 1)
- return -EINVAL;
+void __cpuinitdata (*octeon_irq_setup_secondary)(void);
- raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
- for_each_online_cpu(cpu) {
- int coreid = octeon_coreid_for_cpu(cpu);
- uint64_t en1 =
- cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
- if (cpumask_test_cpu(cpu, dest) && enable_one) {
- enable_one = 0;
- en1 |= 1ull << bit;
- } else {
- en1 &= ~(1ull << bit);
- }
- cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
- }
+static void __cpuinit octeon_irq_percpu_enable(void)
+{
+ irq_cpu_online();
+}
+
+static void __cpuinit octeon_irq_init_ciu_percpu(void)
+{
+ int coreid = cvmx_get_core_num();
/*
- * We need to do a read after the last update to make sure all
- * of them are done.
+ * Disable All CIU Interrupts. The ones we need will be
+ * enabled later. Read the SUM register so we know the write
+ * completed.
*/
- cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
- raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
-
- return 0;
+ cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
+ cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
}
-/*
- * Set affinity for the irq for chips that have the EN*_W1{S,C}
- * registers.
- */
-static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
- const struct cpumask *dest)
+static void __cpuinit octeon_irq_setup_secondary_ciu(void)
{
- int cpu;
- int index;
- struct irq_desc *desc = irq_to_desc(irq);
- int enable_one = (desc->status & IRQ_DISABLED) == 0;
- u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
- for_each_online_cpu(cpu) {
- index = octeon_coreid_for_cpu(cpu) * 2 + 1;
- if (cpumask_test_cpu(cpu, dest) && enable_one) {
- enable_one = 0;
- cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
- } else {
- cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
- }
- }
- return 0;
-}
-#endif
-/*
- * Newer octeon chips have support for lockless CIU operation.
- */
-static struct irq_chip octeon_irq_chip_ciu1_v2 = {
- .name = "CIU1",
- .enable = octeon_irq_ciu1_enable_v2,
- .disable = octeon_irq_ciu1_disable_all_v2,
- .eoi = octeon_irq_ciu1_enable_v2,
-#ifdef CONFIG_SMP
- .set_affinity = octeon_irq_ciu1_set_affinity_v2,
-#endif
-};
+ __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0;
+ __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0;
-static struct irq_chip octeon_irq_chip_ciu1 = {
- .name = "CIU1",
- .enable = octeon_irq_ciu1_enable,
- .disable = octeon_irq_ciu1_disable,
- .eoi = octeon_irq_ciu1_eoi,
-#ifdef CONFIG_SMP
- .set_affinity = octeon_irq_ciu1_set_affinity,
-#endif
-};
+ octeon_irq_init_ciu_percpu();
+ octeon_irq_percpu_enable();
-static struct irq_chip octeon_irq_chip_ciu1_wd_v2 = {
- .name = "CIU1-W",
- .enable = octeon_irq_ciu1_wd_enable_v2,
- .disable = octeon_irq_ciu1_disable_all_v2,
- .eoi = octeon_irq_ciu1_wd_enable_v2,
-};
+ /* Enable the CIU lines */
+ set_c0_status(STATUSF_IP3 | STATUSF_IP2);
+ clear_c0_status(STATUSF_IP4);
+}
-static struct irq_chip octeon_irq_chip_ciu1_wd = {
- .name = "CIU1-W",
- .enable = octeon_irq_ciu1_wd_enable,
- .disable = octeon_irq_ciu1_disable,
- .eoi = octeon_irq_ciu1_eoi,
-};
+static void __init octeon_irq_init_ciu(void)
+{
+ unsigned int i;
+ struct irq_chip *chip;
+ struct irq_chip *chip_edge;
+ struct irq_chip *chip_mbox;
+ struct irq_chip *chip_wd;
+
+ octeon_irq_init_ciu_percpu();
+ octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
-static void (*octeon_ciu0_ack)(unsigned int);
-static void (*octeon_ciu1_ack)(unsigned int);
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
+ OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
+ OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
+ OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ octeon_irq_ip2 = octeon_irq_ip2_v2;
+ octeon_irq_ip3 = octeon_irq_ip3_v2;
+ chip = &octeon_irq_chip_ciu_v2;
+ chip_edge = &octeon_irq_chip_ciu_edge_v2;
+ chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
+ chip_wd = &octeon_irq_chip_ciu_wd_v2;
+ } else {
+ octeon_irq_ip2 = octeon_irq_ip2_v1;
+ octeon_irq_ip3 = octeon_irq_ip3_v1;
+ chip = &octeon_irq_chip_ciu;
+ chip_edge = &octeon_irq_chip_ciu_edge;
+ chip_mbox = &octeon_irq_chip_ciu_mbox;
+ chip_wd = &octeon_irq_chip_ciu_wd;
+ }
+ octeon_irq_ip4 = octeon_irq_ip4_mask;
+
+ /* Mips internal */
+ octeon_irq_init_core();
+
+ /* CIU_0 */
+ for (i = 0; i < 16; i++)
+ octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WORKQ0, 0, i + 0, chip, handle_level_irq);
+ for (i = 0; i < 16; i++)
+ octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_GPIO0, 0, i + 16, chip, handle_level_irq);
+
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq);
+
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART0, 0, 34, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART1, 0, 35, chip, handle_level_irq);
+
+ for (i = 0; i < 4; i++)
+ octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_INT0, 0, i + 36, chip, handle_level_irq);
+ for (i = 0; i < 4; i++)
+ octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_MSI0, 0, i + 40, chip, handle_level_irq);
+
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_TWSI, 0, 45, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_RML, 0, 46, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_TRACE0, 0, 47, chip, handle_level_irq);
+
+ for (i = 0; i < 2; i++)
+ octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_GMX_DRP0, 0, i + 48, chip_edge, handle_edge_irq);
+
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPD_DRP, 0, 50, chip_edge, handle_edge_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_KEY_ZERO, 0, 51, chip_edge, handle_edge_irq);
+
+ for (i = 0; i < 4; i++)
+ octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip_edge, handle_edge_irq);
+
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_PCM, 0, 57, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_MPI, 0, 58, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_TWSI2, 0, 59, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_POWIQ, 0, 60, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPDPPTHR, 0, 61, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII0, 0, 62, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq);
+
+ /* CIU_1 */
+ for (i = 0; i < 16; i++)
+ octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq);
+
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART2, 1, 16, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII1, 1, 18, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_NAND, 1, 19, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_MIO, 1, 20, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_IOB, 1, 21, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_FPA, 1, 22, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_POW, 1, 23, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_L2C, 1, 24, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPD, 1, 25, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_PIP, 1, 26, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_PKO, 1, 27, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_ZIP, 1, 28, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_TIM, 1, 29, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_RAD, 1, 30, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_KEY, 1, 31, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_DFA, 1, 32, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_USBCTL, 1, 33, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_SLI, 1, 34, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_DPI, 1, 35, chip, handle_level_irq);
+
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_AGX0, 1, 36, chip, handle_level_irq);
+
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_AGL, 1, 46, chip, handle_level_irq);
+
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_PTP, 1, 47, chip_edge, handle_edge_irq);
+
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_PEM0, 1, 48, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_PEM1, 1, 49, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_SRIO0, 1, 50, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_SRIO1, 1, 51, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_LMC0, 1, 52, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_DFM, 1, 56, chip, handle_level_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_RST, 1, 63, chip, handle_level_irq);
+
+ /* Enable the CIU lines */
+ set_c0_status(STATUSF_IP3 | STATUSF_IP2);
+ clear_c0_status(STATUSF_IP4);
+}
void __init arch_init_irq(void)
{
- unsigned int irq;
- struct irq_chip *chip0;
- struct irq_chip *chip0_mbox;
- struct irq_chip *chip1;
- struct irq_chip *chip1_wd;
-
#ifdef CONFIG_SMP
/* Set the default affinity to the boot cpu. */
cpumask_clear(irq_default_affinity);
cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
#endif
-
- if (NR_IRQS < OCTEON_IRQ_LAST)
- pr_err("octeon_irq_init: NR_IRQS is set too low\n");
-
- if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
- OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
- OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
- octeon_ciu0_ack = octeon_irq_ciu0_ack_v2;
- octeon_ciu1_ack = octeon_irq_ciu1_ack_v2;
- chip0 = &octeon_irq_chip_ciu0_v2;
- chip0_mbox = &octeon_irq_chip_ciu0_mbox_v2;
- chip1 = &octeon_irq_chip_ciu1_v2;
- chip1_wd = &octeon_irq_chip_ciu1_wd_v2;
- } else {
- octeon_ciu0_ack = octeon_irq_ciu0_ack;
- octeon_ciu1_ack = octeon_irq_ciu1_ack;
- chip0 = &octeon_irq_chip_ciu0;
- chip0_mbox = &octeon_irq_chip_ciu0_mbox;
- chip1 = &octeon_irq_chip_ciu1;
- chip1_wd = &octeon_irq_chip_ciu1_wd;
- }
-
- /* 0 - 15 reserved for i8259 master and slave controller. */
-
- /* 17 - 23 Mips internal */
- for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
- set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
- handle_percpu_irq);
- }
-
- /* 24 - 87 CIU_INT_SUM0 */
- for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
- switch (irq) {
- case OCTEON_IRQ_MBOX0:
- case OCTEON_IRQ_MBOX1:
- set_irq_chip_and_handler(irq, chip0_mbox, handle_percpu_irq);
- break;
- default:
- set_irq_chip_and_handler(irq, chip0, handle_fasteoi_irq);
- break;
- }
- }
-
- /* 88 - 151 CIU_INT_SUM1 */
- for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_WDOG15; irq++)
- set_irq_chip_and_handler(irq, chip1_wd, handle_fasteoi_irq);
-
- for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED151; irq++)
- set_irq_chip_and_handler(irq, chip1, handle_fasteoi_irq);
-
- set_c0_status(0x300 << 2);
+ octeon_irq_init_ciu();
}
asmlinkage void plat_irq_dispatch(void)
{
- const unsigned long core_id = cvmx_get_core_num();
- const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
- const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
- const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
- const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
unsigned long cop0_cause;
unsigned long cop0_status;
- uint64_t ciu_en;
- uint64_t ciu_sum;
- unsigned int irq;
while (1) {
cop0_cause = read_c0_cause();
cop0_cause &= cop0_status;
cop0_cause &= ST0_IM;
- if (unlikely(cop0_cause & STATUSF_IP2)) {
- ciu_sum = cvmx_read_csr(ciu_sum0_address);
- ciu_en = cvmx_read_csr(ciu_en0_address);
- ciu_sum &= ciu_en;
- if (likely(ciu_sum)) {
- irq = fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1;
- octeon_ciu0_ack(irq);
- do_IRQ(irq);
- } else {
- spurious_interrupt();
- }
- } else if (unlikely(cop0_cause & STATUSF_IP3)) {
- ciu_sum = cvmx_read_csr(ciu_sum1_address);
- ciu_en = cvmx_read_csr(ciu_en1_address);
- ciu_sum &= ciu_en;
- if (likely(ciu_sum)) {
- irq = fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1;
- octeon_ciu1_ack(irq);
- do_IRQ(irq);
- } else {
- spurious_interrupt();
- }
- } else if (likely(cop0_cause)) {
+ if (unlikely(cop0_cause & STATUSF_IP2))
+ octeon_irq_ip2();
+ else if (unlikely(cop0_cause & STATUSF_IP3))
+ octeon_irq_ip3();
+ else if (unlikely(cop0_cause & STATUSF_IP4))
+ octeon_irq_ip4();
+ else if (likely(cop0_cause))
do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
- } else {
+ else
break;
- }
}
}
void fixup_irqs(void)
{
- int irq;
- struct irq_desc *desc;
- cpumask_t new_affinity;
- unsigned long flags;
- int do_set_affinity;
- int cpu;
-
- cpu = smp_processor_id();
-
- for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
- octeon_irq_core_disable_local(irq);
-
- for (irq = OCTEON_IRQ_WORKQ0; irq < OCTEON_IRQ_LAST; irq++) {
- desc = irq_to_desc(irq);
- switch (irq) {
- case OCTEON_IRQ_MBOX0:
- case OCTEON_IRQ_MBOX1:
- /* The eoi function will disable them on this CPU. */
- desc->chip->eoi(irq);
- break;
- case OCTEON_IRQ_WDOG0:
- case OCTEON_IRQ_WDOG1:
- case OCTEON_IRQ_WDOG2:
- case OCTEON_IRQ_WDOG3:
- case OCTEON_IRQ_WDOG4:
- case OCTEON_IRQ_WDOG5:
- case OCTEON_IRQ_WDOG6:
- case OCTEON_IRQ_WDOG7:
- case OCTEON_IRQ_WDOG8:
- case OCTEON_IRQ_WDOG9:
- case OCTEON_IRQ_WDOG10:
- case OCTEON_IRQ_WDOG11:
- case OCTEON_IRQ_WDOG12:
- case OCTEON_IRQ_WDOG13:
- case OCTEON_IRQ_WDOG14:
- case OCTEON_IRQ_WDOG15:
- /*
- * These have special per CPU semantics and
- * are handled in the watchdog driver.
- */
- break;
- default:
- raw_spin_lock_irqsave(&desc->lock, flags);
- /*
- * If this irq has an action, it is in use and
- * must be migrated if it has affinity to this
- * cpu.
- */
- if (desc->action && cpumask_test_cpu(cpu, desc->affinity)) {
- if (cpumask_weight(desc->affinity) > 1) {
- /*
- * It has multi CPU affinity,
- * just remove this CPU from
- * the affinity set.
- */
- cpumask_copy(&new_affinity, desc->affinity);
- cpumask_clear_cpu(cpu, &new_affinity);
- } else {
- /*
- * Otherwise, put it on lowest
- * numbered online CPU.
- */
- cpumask_clear(&new_affinity);
- cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
- }
- do_set_affinity = 1;
- } else {
- do_set_affinity = 0;
- }
- raw_spin_unlock_irqrestore(&desc->lock, flags);
-
- if (do_set_affinity)
- irq_set_affinity(irq, &new_affinity);
-
- break;
- }
- }
+ irq_cpu_offline();
}
#endif /* CONFIG_HOTPLUG_CPU */
void __init prom_init(void)
{
struct cvmx_sysinfo *sysinfo;
- const int coreid = cvmx_get_core_num();
int i;
int argc;
#ifdef CONFIG_CAVIUM_RESERVE32
octeon_uart = octeon_get_boot_uart();
- /*
- * Disable All CIU Interrupts. The ones we need will be
- * enabled later. Read the SUM register so we know the write
- * completed.
- */
- cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
- cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
- cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
- cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
- cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
-
#ifdef CONFIG_SMP
octeon_write_lcd("LinuxSMP");
#else
* After we've done initial boot, this function is called to allow the
* board code to clean up state, if needed
*/
-static void octeon_init_secondary(void)
+static void __cpuinit octeon_init_secondary(void)
{
- const int coreid = cvmx_get_core_num();
- union cvmx_ciu_intx_sum0 interrupt_enable;
unsigned int sr;
-#ifdef CONFIG_HOTPLUG_CPU
- struct linux_app_boot_info *labi;
-
- labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
-
- if (labi->labi_signature != LABI_SIGNATURE)
- panic("The bootloader version on this board is incorrect.");
-#endif
-
sr = set_c0_status(ST0_BEV);
write_c0_ebase((u32)ebase);
write_c0_status(sr);
octeon_check_cpu_bist();
octeon_init_cvmcount();
- /*
- pr_info("SMP: CPU%d (CoreId %lu) started\n", cpu, coreid);
- */
- /* Enable Mailbox interrupts to this core. These are the only
- interrupts allowed on line 3 */
- cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), 0xffffffff);
- interrupt_enable.u64 = 0;
- interrupt_enable.s.mbox = 0x3;
- cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), interrupt_enable.u64);
- cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
- cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
- cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
- /* Enable core interrupt processing for 2,3 and 7 */
- set_c0_status(0x8c01);
+
+ octeon_irq_setup_secondary();
+ raw_local_irq_enable();
}
/**
*/
void octeon_prepare_cpus(unsigned int max_cpus)
{
+#ifdef CONFIG_HOTPLUG_CPU
+ struct linux_app_boot_info *labi;
+
+ labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
+
+ if (labi->labi_signature != LABI_SIGNATURE)
+ panic("The bootloader version on this board is incorrect.");
+#endif
+
cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff);
if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_DISABLED,
"mailbox0", mailbox_interrupt)) {
fast_iob();
for (i = base; i < base + IO_INR_DMA; i++)
- set_irq_chip_and_handler(i, &ioasic_irq_type,
+ irq_set_chip_and_handler(i, &ioasic_irq_type,
handle_level_irq);
for (; i < base + IO_IRQ_LINES; i++)
- set_irq_chip(i, &ioasic_dma_irq_type);
+ irq_set_chip(i, &ioasic_dma_irq_type);
ioasic_irq_base = base;
}
iob();
for (i = base; i < base + KN02_IRQ_LINES; i++)
- set_irq_chip_and_handler(i, &kn02_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &kn02_irq_type, handle_level_irq);
kn02_irq_base = base;
}
u32 i;
for (i = 0; i < NUM_EMMA2RH_IRQ; i++)
- set_irq_chip_and_handler_name(EMMA2RH_IRQ_BASE + i,
+ irq_set_chip_and_handler_name(EMMA2RH_IRQ_BASE + i,
&emma2rh_irq_controller,
handle_level_irq, "level");
}
u32 i;
for (i = 0; i < NUM_EMMA2RH_IRQ_SW; i++)
- set_irq_chip_and_handler_name(EMMA2RH_SW_IRQ_BASE + i,
+ irq_set_chip_and_handler_name(EMMA2RH_SW_IRQ_BASE + i,
&emma2rh_sw_irq_controller,
handle_level_irq, "level");
}
u32 i;
for (i = 0; i < NUM_EMMA2RH_IRQ_GPIO; i++)
- set_irq_chip_and_handler_name(EMMA2RH_GPIO_IRQ_BASE + i,
+ irq_set_chip_and_handler_name(EMMA2RH_GPIO_IRQ_BASE + i,
&emma2rh_gpio_irq_controller,
handle_edge_irq, "edge");
}
#define NR_IRQS OCTEON_IRQ_LAST
#define MIPS_CPU_IRQ_BASE OCTEON_IRQ_SW0
-/* 0 - 7 represent the i8259 master */
-#define OCTEON_IRQ_I8259M0 0
-#define OCTEON_IRQ_I8259M1 1
-#define OCTEON_IRQ_I8259M2 2
-#define OCTEON_IRQ_I8259M3 3
-#define OCTEON_IRQ_I8259M4 4
-#define OCTEON_IRQ_I8259M5 5
-#define OCTEON_IRQ_I8259M6 6
-#define OCTEON_IRQ_I8259M7 7
-/* 8 - 15 represent the i8259 slave */
-#define OCTEON_IRQ_I8259S0 8
-#define OCTEON_IRQ_I8259S1 9
-#define OCTEON_IRQ_I8259S2 10
-#define OCTEON_IRQ_I8259S3 11
-#define OCTEON_IRQ_I8259S4 12
-#define OCTEON_IRQ_I8259S5 13
-#define OCTEON_IRQ_I8259S6 14
-#define OCTEON_IRQ_I8259S7 15
-/* 16 - 23 represent the 8 MIPS standard interrupt sources */
-#define OCTEON_IRQ_SW0 16
-#define OCTEON_IRQ_SW1 17
-#define OCTEON_IRQ_CIU0 18
-#define OCTEON_IRQ_CIU1 19
-#define OCTEON_IRQ_CIU4 20
-#define OCTEON_IRQ_5 21
-#define OCTEON_IRQ_PERF 22
-#define OCTEON_IRQ_TIMER 23
-/* 24 - 87 represent the sources in CIU_INTX_EN0 */
-#define OCTEON_IRQ_WORKQ0 24
-#define OCTEON_IRQ_WORKQ1 25
-#define OCTEON_IRQ_WORKQ2 26
-#define OCTEON_IRQ_WORKQ3 27
-#define OCTEON_IRQ_WORKQ4 28
-#define OCTEON_IRQ_WORKQ5 29
-#define OCTEON_IRQ_WORKQ6 30
-#define OCTEON_IRQ_WORKQ7 31
-#define OCTEON_IRQ_WORKQ8 32
-#define OCTEON_IRQ_WORKQ9 33
-#define OCTEON_IRQ_WORKQ10 34
-#define OCTEON_IRQ_WORKQ11 35
-#define OCTEON_IRQ_WORKQ12 36
-#define OCTEON_IRQ_WORKQ13 37
-#define OCTEON_IRQ_WORKQ14 38
-#define OCTEON_IRQ_WORKQ15 39
-#define OCTEON_IRQ_GPIO0 40
-#define OCTEON_IRQ_GPIO1 41
-#define OCTEON_IRQ_GPIO2 42
-#define OCTEON_IRQ_GPIO3 43
-#define OCTEON_IRQ_GPIO4 44
-#define OCTEON_IRQ_GPIO5 45
-#define OCTEON_IRQ_GPIO6 46
-#define OCTEON_IRQ_GPIO7 47
-#define OCTEON_IRQ_GPIO8 48
-#define OCTEON_IRQ_GPIO9 49
-#define OCTEON_IRQ_GPIO10 50
-#define OCTEON_IRQ_GPIO11 51
-#define OCTEON_IRQ_GPIO12 52
-#define OCTEON_IRQ_GPIO13 53
-#define OCTEON_IRQ_GPIO14 54
-#define OCTEON_IRQ_GPIO15 55
-#define OCTEON_IRQ_MBOX0 56
-#define OCTEON_IRQ_MBOX1 57
-#define OCTEON_IRQ_UART0 58
-#define OCTEON_IRQ_UART1 59
-#define OCTEON_IRQ_PCI_INT0 60
-#define OCTEON_IRQ_PCI_INT1 61
-#define OCTEON_IRQ_PCI_INT2 62
-#define OCTEON_IRQ_PCI_INT3 63
-#define OCTEON_IRQ_PCI_MSI0 64
-#define OCTEON_IRQ_PCI_MSI1 65
-#define OCTEON_IRQ_PCI_MSI2 66
-#define OCTEON_IRQ_PCI_MSI3 67
-#define OCTEON_IRQ_RESERVED68 68 /* Summary of CIU_INT_SUM1 */
-#define OCTEON_IRQ_TWSI 69
-#define OCTEON_IRQ_RML 70
-#define OCTEON_IRQ_TRACE 71
-#define OCTEON_IRQ_GMX_DRP0 72
-#define OCTEON_IRQ_GMX_DRP1 73
-#define OCTEON_IRQ_IPD_DRP 74
-#define OCTEON_IRQ_KEY_ZERO 75
-#define OCTEON_IRQ_TIMER0 76
-#define OCTEON_IRQ_TIMER1 77
-#define OCTEON_IRQ_TIMER2 78
-#define OCTEON_IRQ_TIMER3 79
-#define OCTEON_IRQ_USB0 80
-#define OCTEON_IRQ_PCM 81
-#define OCTEON_IRQ_MPI 82
-#define OCTEON_IRQ_TWSI2 83
-#define OCTEON_IRQ_POWIQ 84
-#define OCTEON_IRQ_IPDPPTHR 85
-#define OCTEON_IRQ_MII0 86
-#define OCTEON_IRQ_BOOTDMA 87
-/* 88 - 151 represent the sources in CIU_INTX_EN1 */
-#define OCTEON_IRQ_WDOG0 88
-#define OCTEON_IRQ_WDOG1 89
-#define OCTEON_IRQ_WDOG2 90
-#define OCTEON_IRQ_WDOG3 91
-#define OCTEON_IRQ_WDOG4 92
-#define OCTEON_IRQ_WDOG5 93
-#define OCTEON_IRQ_WDOG6 94
-#define OCTEON_IRQ_WDOG7 95
-#define OCTEON_IRQ_WDOG8 96
-#define OCTEON_IRQ_WDOG9 97
-#define OCTEON_IRQ_WDOG10 98
-#define OCTEON_IRQ_WDOG11 99
-#define OCTEON_IRQ_WDOG12 100
-#define OCTEON_IRQ_WDOG13 101
-#define OCTEON_IRQ_WDOG14 102
-#define OCTEON_IRQ_WDOG15 103
-#define OCTEON_IRQ_UART2 104
-#define OCTEON_IRQ_USB1 105
-#define OCTEON_IRQ_MII1 106
-#define OCTEON_IRQ_RESERVED107 107
-#define OCTEON_IRQ_RESERVED108 108
-#define OCTEON_IRQ_RESERVED109 109
-#define OCTEON_IRQ_RESERVED110 110
-#define OCTEON_IRQ_RESERVED111 111
-#define OCTEON_IRQ_RESERVED112 112
-#define OCTEON_IRQ_RESERVED113 113
-#define OCTEON_IRQ_RESERVED114 114
-#define OCTEON_IRQ_RESERVED115 115
-#define OCTEON_IRQ_RESERVED116 116
-#define OCTEON_IRQ_RESERVED117 117
-#define OCTEON_IRQ_RESERVED118 118
-#define OCTEON_IRQ_RESERVED119 119
-#define OCTEON_IRQ_RESERVED120 120
-#define OCTEON_IRQ_RESERVED121 121
-#define OCTEON_IRQ_RESERVED122 122
-#define OCTEON_IRQ_RESERVED123 123
-#define OCTEON_IRQ_RESERVED124 124
-#define OCTEON_IRQ_RESERVED125 125
-#define OCTEON_IRQ_RESERVED126 126
-#define OCTEON_IRQ_RESERVED127 127
-#define OCTEON_IRQ_RESERVED128 128
-#define OCTEON_IRQ_RESERVED129 129
-#define OCTEON_IRQ_RESERVED130 130
-#define OCTEON_IRQ_RESERVED131 131
-#define OCTEON_IRQ_RESERVED132 132
-#define OCTEON_IRQ_RESERVED133 133
-#define OCTEON_IRQ_RESERVED134 134
-#define OCTEON_IRQ_RESERVED135 135
-#define OCTEON_IRQ_RESERVED136 136
-#define OCTEON_IRQ_RESERVED137 137
-#define OCTEON_IRQ_RESERVED138 138
-#define OCTEON_IRQ_RESERVED139 139
-#define OCTEON_IRQ_RESERVED140 140
-#define OCTEON_IRQ_RESERVED141 141
-#define OCTEON_IRQ_RESERVED142 142
-#define OCTEON_IRQ_RESERVED143 143
-#define OCTEON_IRQ_RESERVED144 144
-#define OCTEON_IRQ_RESERVED145 145
-#define OCTEON_IRQ_RESERVED146 146
-#define OCTEON_IRQ_RESERVED147 147
-#define OCTEON_IRQ_RESERVED148 148
-#define OCTEON_IRQ_RESERVED149 149
-#define OCTEON_IRQ_RESERVED150 150
-#define OCTEON_IRQ_RESERVED151 151
+enum octeon_irq {
+/* 1 - 8 represent the 8 MIPS standard interrupt sources */
+ OCTEON_IRQ_SW0 = 1,
+ OCTEON_IRQ_SW1,
+/* CIU0, CUI2, CIU4 are 3, 4, 5 */
+ OCTEON_IRQ_5 = 6,
+ OCTEON_IRQ_PERF,
+ OCTEON_IRQ_TIMER,
+/* sources in CIU_INTX_EN0 */
+ OCTEON_IRQ_WORKQ0,
+ OCTEON_IRQ_GPIO0 = OCTEON_IRQ_WORKQ0 + 16,
+ OCTEON_IRQ_WDOG0 = OCTEON_IRQ_GPIO0 + 16,
+ OCTEON_IRQ_WDOG15 = OCTEON_IRQ_WDOG0 + 15,
+ OCTEON_IRQ_MBOX0 = OCTEON_IRQ_WDOG0 + 16,
+ OCTEON_IRQ_MBOX1,
+ OCTEON_IRQ_UART0,
+ OCTEON_IRQ_UART1,
+ OCTEON_IRQ_UART2,
+ OCTEON_IRQ_PCI_INT0,
+ OCTEON_IRQ_PCI_INT1,
+ OCTEON_IRQ_PCI_INT2,
+ OCTEON_IRQ_PCI_INT3,
+ OCTEON_IRQ_PCI_MSI0,
+ OCTEON_IRQ_PCI_MSI1,
+ OCTEON_IRQ_PCI_MSI2,
+ OCTEON_IRQ_PCI_MSI3,
+
+ OCTEON_IRQ_TWSI,
+ OCTEON_IRQ_TWSI2,
+ OCTEON_IRQ_RML,
+ OCTEON_IRQ_TRACE0,
+ OCTEON_IRQ_GMX_DRP0 = OCTEON_IRQ_TRACE0 + 4,
+ OCTEON_IRQ_IPD_DRP = OCTEON_IRQ_GMX_DRP0 + 5,
+ OCTEON_IRQ_KEY_ZERO,
+ OCTEON_IRQ_TIMER0,
+ OCTEON_IRQ_TIMER1,
+ OCTEON_IRQ_TIMER2,
+ OCTEON_IRQ_TIMER3,
+ OCTEON_IRQ_USB0,
+ OCTEON_IRQ_USB1,
+ OCTEON_IRQ_PCM,
+ OCTEON_IRQ_MPI,
+ OCTEON_IRQ_POWIQ,
+ OCTEON_IRQ_IPDPPTHR,
+ OCTEON_IRQ_MII0,
+ OCTEON_IRQ_MII1,
+ OCTEON_IRQ_BOOTDMA,
+
+ OCTEON_IRQ_NAND,
+ OCTEON_IRQ_MIO, /* Summary of MIO_BOOT_ERR */
+ OCTEON_IRQ_IOB, /* Summary of IOB_INT_SUM */
+ OCTEON_IRQ_FPA, /* Summary of FPA_INT_SUM */
+ OCTEON_IRQ_POW, /* Summary of POW_ECC_ERR */
+ OCTEON_IRQ_L2C, /* Summary of L2C_INT_STAT */
+ OCTEON_IRQ_IPD, /* Summary of IPD_INT_SUM */
+ OCTEON_IRQ_PIP, /* Summary of PIP_INT_REG */
+ OCTEON_IRQ_PKO, /* Summary of PKO_REG_ERROR */
+ OCTEON_IRQ_ZIP, /* Summary of ZIP_ERROR */
+ OCTEON_IRQ_TIM, /* Summary of TIM_REG_ERROR */
+ OCTEON_IRQ_RAD, /* Summary of RAD_REG_ERROR */
+ OCTEON_IRQ_KEY, /* Summary of KEY_INT_SUM */
+ OCTEON_IRQ_DFA, /* Summary of DFA */
+ OCTEON_IRQ_USBCTL, /* Summary of USBN0_INT_SUM */
+ OCTEON_IRQ_SLI, /* Summary of SLI_INT_SUM */
+ OCTEON_IRQ_DPI, /* Summary of DPI_INT_SUM */
+ OCTEON_IRQ_AGX0, /* Summary of GMX0*+PCS0_INT*_REG */
+ OCTEON_IRQ_AGL = OCTEON_IRQ_AGX0 + 5,
+ OCTEON_IRQ_PTP,
+ OCTEON_IRQ_PEM0,
+ OCTEON_IRQ_PEM1,
+ OCTEON_IRQ_SRIO0,
+ OCTEON_IRQ_SRIO1,
+ OCTEON_IRQ_LMC0,
+ OCTEON_IRQ_DFM = OCTEON_IRQ_LMC0 + 4, /* Summary of DFM */
+ OCTEON_IRQ_RST,
+};
#ifdef CONFIG_PCI_MSI
-/* 152 - 215 represent the MSI interrupts 0-63 */
-#define OCTEON_IRQ_MSI_BIT0 152
-#define OCTEON_IRQ_MSI_LAST (OCTEON_IRQ_MSI_BIT0 + 255)
+/* 152 - 407 represent the MSI interrupts 0-255 */
+#define OCTEON_IRQ_MSI_BIT0 (OCTEON_IRQ_RST + 1)
-#define OCTEON_IRQ_LAST (OCTEON_IRQ_MSI_LAST + 1)
+#define OCTEON_IRQ_MSI_LAST (OCTEON_IRQ_MSI_BIT0 + 255)
+#define OCTEON_IRQ_LAST (OCTEON_IRQ_MSI_LAST + 1)
#else
-#define OCTEON_IRQ_LAST 152
+#define OCTEON_IRQ_LAST (OCTEON_IRQ_RST + 1)
#endif
#endif
extern uint64_t octeon_bootloader_entry_addr;
+extern void (*octeon_irq_setup_secondary)(void);
+
#endif /* __ASM_OCTEON_OCTEON_H */
#define __NR_name_to_handle_at (__NR_Linux + 303)
#define __NR_open_by_handle_at (__NR_Linux + 304)
#define __NR_clock_adjtime (__NR_Linux + 305)
-#define __NR_clock_adjtime (__NR_Linux + 306)
+#define __NR_syncfs (__NR_Linux + 306)
/*
* Offset of the last N32 flavoured syscall
int i;
for (i = JAZZ_IRQ_START; i <= JAZZ_IRQ_END; i++)
- set_irq_chip_and_handler(i, &r4030_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &r4030_irq_type, handle_level_irq);
r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, 0);
r4030_read_reg16(JAZZ_IO_IRQ_SOURCE); /* clear pending IRQs */
uint32_t flag;
unsigned int gpio_irq;
unsigned int gpio_bank;
- struct jz_gpio_chip *chip = get_irq_desc_data(desc);
+ struct jz_gpio_chip *chip = irq_desc_get_handler_data(desc);
gpio_bank = JZ4740_IRQ_GPIO0 - irq;
chip->wakeup &= ~IRQ_TO_BIT(data->irq);
spin_unlock(&chip->lock);
- set_irq_wake(chip->irq, on);
+ irq_set_irq_wake(chip->irq, on);
return 0;
}
gpiochip_add(&chip->gpio_chip);
chip->irq = JZ4740_IRQ_INTC_GPIO(id);
- set_irq_data(chip->irq, chip);
- set_irq_chained_handler(chip->irq, jz_gpio_irq_demux_handler);
+ irq_set_handler_data(chip->irq, chip);
+ irq_set_chained_handler(chip->irq, jz_gpio_irq_demux_handler);
for (irq = chip->irq_base; irq < chip->irq_base + chip->gpio_chip.ngpio; ++irq) {
irq_set_lockdep_class(irq, &gpio_lock_class);
- set_irq_chip_data(irq, chip);
- set_irq_chip_and_handler(irq, &jz_gpio_irq_chip,
- handle_level_irq);
+ irq_set_chip_data(irq, chip);
+ irq_set_chip_and_handler(irq, &jz_gpio_irq_chip,
+ handle_level_irq);
}
return 0;
writel(0xffffffff, jz_intc_base + JZ_REG_INTC_SET_MASK);
for (i = JZ4740_IRQ_BASE; i < JZ4740_IRQ_BASE + 32; i++) {
- set_irq_chip_data(i, (void *)IRQ_BIT(i));
- set_irq_chip_and_handler(i, &intc_irq_type, handle_level_irq);
+ irq_set_chip_data(i, (void *)IRQ_BIT(i));
+ irq_set_chip_and_handler(i, &intc_irq_type, handle_level_irq);
}
setup_irq(2, &jz4740_cascade_action);
void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
+ irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
enable_irq(irq);
}
init_8259A(0);
for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) {
- set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq);
- set_irq_probe(i);
+ irq_set_chip_and_handler(i, &i8259A_chip, handle_level_irq);
+ irq_set_probe(i);
}
setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
vpe_local_setup(numvpes);
for (i = _irqbase; i < (_irqbase + numintrs); i++)
- set_irq_chip(i, &gic_irq_controller);
+ irq_set_chip(i, &gic_irq_controller);
}
void __init gic_init(unsigned long gic_base_addr,
* bit31: logical or of bits[25:1].
*/
for (i = 1; i < 30; i++)
- set_irq_chip_and_handler(GT641XX_IRQ_BASE + i,
- >641xx_irq_chip, handle_level_irq);
+ irq_set_chip_and_handler(GT641XX_IRQ_BASE + i,
+ >641xx_irq_chip, handle_level_irq);
}
switch (imp->im_type) {
case MSC01_IRQ_EDGE:
- set_irq_chip_and_handler_name(irqbase + n,
- &msc_edgeirq_type, handle_edge_irq, "edge");
+ irq_set_chip_and_handler_name(irqbase + n,
+ &msc_edgeirq_type,
+ handle_edge_irq,
+ "edge");
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
else
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
break;
case MSC01_IRQ_LEVEL:
- set_irq_chip_and_handler_name(irqbase+n,
- &msc_levelirq_type, handle_level_irq, "level");
+ irq_set_chip_and_handler_name(irqbase + n,
+ &msc_levelirq_type,
+ handle_level_irq,
+ "level");
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
else
clear_c0_intcontrol(0x00000f00); /* Mask all */
for (i = base; i < base + 4; i++)
- set_irq_chip_and_handler(i, &rm7k_irq_controller,
+ irq_set_chip_and_handler(i, &rm7k_irq_controller,
handle_percpu_irq);
}
clear_c0_intcontrol(0x0000f000); /* Mask all */
for (i = base; i < base + 4; i++)
- set_irq_chip_and_handler(i, &rm9k_irq_controller,
+ irq_set_chip_and_handler(i, &rm9k_irq_controller,
handle_level_irq);
rm9000_perfcount_irq = base + 1;
- set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
+ irq_set_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
handle_percpu_irq);
}
#endif
for (i = 0; i < NR_IRQS; i++)
- set_irq_noprobe(i);
+ irq_set_noprobe(i);
arch_init_irq();
*/
if (cpu_has_mipsmt)
for (i = irq_base; i < irq_base + 2; i++)
- set_irq_chip_and_handler(i, &mips_mt_cpu_irq_controller,
+ irq_set_chip_and_handler(i, &mips_mt_cpu_irq_controller,
handle_percpu_irq);
for (i = irq_base + 2; i < irq_base + 8; i++)
- set_irq_chip_and_handler(i, &mips_cpu_irq_controller,
+ irq_set_chip_and_handler(i, &mips_cpu_irq_controller,
handle_percpu_irq);
}
for (i = 0; i < TXx9_MAX_IR; i++) {
txx9irq[i].level = 4; /* middle level */
txx9irq[i].mode = TXx9_IRCR_LOW;
- set_irq_chip_and_handler(TXX9_IRQ_BASE + i,
- &txx9_irq_chip, handle_level_irq);
+ irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &txx9_irq_chip,
+ handle_level_irq);
}
/* mask all IRC interrupts */
setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
- set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
+ irq_set_handler(cpu_ipi_irq, handle_percpu_irq);
}
/*
mips_cpu_irq_init();
for (i = LASAT_IRQ_BASE; i <= LASAT_IRQ_END; i++)
- set_irq_chip_and_handler(i, &lasat_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &lasat_irq_type, handle_level_irq);
setup_irq(LASAT_CASCADE_IRQ, &cascade);
}
u32 i;
for (i = LOONGSON_IRQ_BASE; i < LOONGSON_IRQ_BASE + 32; i++)
- set_irq_chip_and_handler(i, &bonito_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &bonito_irq_type,
+ handle_level_irq);
#ifdef CONFIG_CPU_LOONGSON2E
setup_irq(LOONGSON_IRQ_BASE + 10, &dma_timeout_irqaction);
void __init arch_init_ipiirq(int irq, struct irqaction *action)
{
setup_irq(irq, action);
- set_irq_handler(irq, handle_percpu_irq);
+ irq_set_handler(irq, handle_percpu_irq);
}
void __init arch_init_irq(void)
set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch);
mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
#ifdef CONFIG_SMP
- set_irq_handler(mips_cpu_perf_irq, handle_percpu_irq);
+ irq_set_handler(mips_cpu_perf_irq, handle_percpu_irq);
#endif
}
}
pci_write_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
control);
- set_irq_msi(irq, desc);
+ irq_set_msi_desc(irq, desc);
write_msi_msg(irq, &msg);
return 0;
}
static u64 msi_rcv_reg[4];
static u64 mis_ena_reg[4];
-static void octeon_irq_msi_enable_pcie(unsigned int irq)
+static void octeon_irq_msi_enable_pcie(struct irq_data *data)
{
u64 en;
unsigned long flags;
- int msi_number = irq - OCTEON_IRQ_MSI_BIT0;
+ int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0;
int irq_index = msi_number >> 6;
int irq_bit = msi_number & 0x3f;
raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
}
-static void octeon_irq_msi_disable_pcie(unsigned int irq)
+static void octeon_irq_msi_disable_pcie(struct irq_data *data)
{
u64 en;
unsigned long flags;
- int msi_number = irq - OCTEON_IRQ_MSI_BIT0;
+ int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0;
int irq_index = msi_number >> 6;
int irq_bit = msi_number & 0x3f;
static struct irq_chip octeon_irq_chip_msi_pcie = {
.name = "MSI",
- .enable = octeon_irq_msi_enable_pcie,
- .disable = octeon_irq_msi_disable_pcie,
+ .irq_enable = octeon_irq_msi_enable_pcie,
+ .irq_disable = octeon_irq_msi_disable_pcie,
};
-static void octeon_irq_msi_enable_pci(unsigned int irq)
+static void octeon_irq_msi_enable_pci(struct irq_data *data)
{
/*
* Octeon PCI doesn't have the ability to mask/unmask MSI
*/
}
-static void octeon_irq_msi_disable_pci(unsigned int irq)
+static void octeon_irq_msi_disable_pci(struct irq_data *data)
{
/* See comment in enable */
}
static struct irq_chip octeon_irq_chip_msi_pci = {
.name = "MSI",
- .enable = octeon_irq_msi_enable_pci,
- .disable = octeon_irq_msi_disable_pci,
+ .irq_enable = octeon_irq_msi_enable_pci,
+ .irq_disable = octeon_irq_msi_disable_pci,
};
/*
}
for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_LAST; irq++)
- set_irq_chip_and_handler(irq, msi, handle_simple_irq);
+ irq_set_chip_and_handler(irq, msi, handle_simple_irq);
if (octeon_has_feature(OCTEON_FEATURE_PCIE)) {
if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt0,
/* initialize all the IRQ descriptors */
for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) {
- set_irq_chip_and_handler(i, &msp_cic_irq_controller,
+ irq_set_chip_and_handler(i, &msp_cic_irq_controller,
handle_level_irq);
#ifdef CONFIG_MIPS_MT_SMTC
/* Mask of CIC interrupt */
/* initialize all the IRQ descriptors */
for (i = MSP_SLP_INTBASE; i < MSP_PER_INTBASE + 32; i++)
- set_irq_chip_and_handler(i, &msp_slp_irq_controller,
+ irq_set_chip_and_handler(i, &msp_slp_irq_controller,
handle_level_irq);
}
void __init arch_init_ipiirq(int irq, struct irqaction *action)
{
setup_irq(irq, action);
- set_irq_handler(irq, handle_percpu_irq);
+ irq_set_handler(irq, handle_percpu_irq);
}
void __init msp_vsmp_int_init(void)
/* Set IRQ information in irq_desc */
for (irq = PNX833X_PIC_IRQ_BASE; irq < (PNX833X_PIC_IRQ_BASE + PNX833X_PIC_NUM_IRQ); irq++) {
pnx833x_hard_disable_pic_irq(irq);
- set_irq_chip_and_handler(irq, &pnx833x_pic_irq_type, handle_simple_irq);
+ irq_set_chip_and_handler(irq, &pnx833x_pic_irq_type,
+ handle_simple_irq);
}
for (irq = PNX833X_GPIO_IRQ_BASE; irq < (PNX833X_GPIO_IRQ_BASE + PNX833X_GPIO_NUM_IRQ); irq++)
- set_irq_chip_and_handler(irq, &pnx833x_gpio_irq_type, handle_simple_irq);
+ irq_set_chip_and_handler(irq, &pnx833x_gpio_irq_type,
+ handle_simple_irq);
/* Set PIC priority limiter register to 0 */
PNX833X_PIC_INT_PRIORITY = 0;
int configPR;
for (i = 0; i < PNX8550_INT_CP0_TOTINT; i++)
- set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq);
/* init of GIC/IPC interrupts */
/* should be done before cp0 since cp0 init enables the GIC int */
/* mask/priority is still 0 so we will not get any
* interrupts until it is unmasked */
- set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq);
}
/* Priority level 0 */
/* Set int vector table address */
PNX8550_GIC_VECTOR_0 = PNX8550_GIC_VECTOR_1 = 0;
- set_irq_chip_and_handler(MIPS_CPU_GIC_IRQ, &level_irq_type,
+ irq_set_chip_and_handler(MIPS_CPU_GIC_IRQ, &level_irq_type,
handle_level_irq);
setup_irq(MIPS_CPU_GIC_IRQ, &gic_action);
/* init of Timer interrupts */
for (i = PNX8550_INT_TIMER_MIN; i <= PNX8550_INT_TIMER_MAX; i++)
- set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq);
/* Stop Timer 1-3 */
configPR = read_c0_config7();
configPR |= 0x00000038;
write_c0_config7(configPR);
- set_irq_chip_and_handler(MIPS_CPU_TIMER_IRQ, &level_irq_type,
+ irq_set_chip_and_handler(MIPS_CPU_TIMER_IRQ, &level_irq_type,
handle_level_irq);
setup_irq(MIPS_CPU_TIMER_IRQ, &timer_action);
}
* Initialize interrupt handlers.
*/
for (i = 0; i < NR_IRQS; i++)
- set_irq_chip_and_handler(i, &asic_irq_chip, handle_level_irq);
+ irq_set_chip_and_handler(i, &asic_irq_chip, handle_level_irq);
}
pr_info("Initializing IRQ's: %d out of %d\n", RC32434_NR_IRQS, NR_IRQS);
for (i = 0; i < RC32434_NR_IRQS; i++)
- set_irq_chip_and_handler(i, &rc32434_irq_type,
- handle_level_irq);
+ irq_set_chip_and_handler(i, &rc32434_irq_type,
+ handle_level_irq);
}
/* Main Interrupt dispatcher */
else
handler = &ip22_local3_irq_type;
- set_irq_chip_and_handler(i, handler, handle_level_irq);
+ irq_set_chip_and_handler(i, handler, handle_level_irq);
}
/* vector handler. this register the IRQ as non-sharable */
void __devinit register_bridge_irq(unsigned int irq)
{
- set_irq_chip_and_handler(irq, &bridge_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(irq, &bridge_irq_type, handle_level_irq);
}
int __devinit request_bridge_irq(struct bridge_controller *bc)
panic("Allocation of irq number for timer failed");
} while (xchg(&rt_timer_irq, irq));
- set_irq_chip_and_handler(irq, &rt_irq_type, handle_percpu_irq);
+ irq_set_chip_and_handler(irq, &rt_irq_type, handle_percpu_irq);
setup_irq(irq, &hub_rt_irqaction);
}
for (irq = CRIME_IRQ_BASE; irq <= IP32_IRQ_MAX; irq++) {
switch (irq) {
case MACE_VID_IN1_IRQ ... MACE_PCI_BRIDGE_IRQ:
- set_irq_chip_and_handler_name(irq,&ip32_mace_interrupt,
- handle_level_irq, "level");
+ irq_set_chip_and_handler_name(irq,
+ &ip32_mace_interrupt,
+ handle_level_irq,
+ "level");
break;
case MACEPCI_SCSI0_IRQ ... MACEPCI_SHARED2_IRQ:
- set_irq_chip_and_handler_name(irq,
- &ip32_macepci_interrupt, handle_level_irq,
- "level");
+ irq_set_chip_and_handler_name(irq,
+ &ip32_macepci_interrupt,
+ handle_level_irq,
+ "level");
break;
case CRIME_CPUERR_IRQ:
case CRIME_MEMERR_IRQ:
- set_irq_chip_and_handler_name(irq,
- &crime_level_interrupt, handle_level_irq,
- "level");
+ irq_set_chip_and_handler_name(irq,
+ &crime_level_interrupt,
+ handle_level_irq,
+ "level");
break;
case CRIME_GBE0_IRQ ... CRIME_GBE3_IRQ:
case CRIME_RE_EMPTY_E_IRQ ... CRIME_RE_IDLE_E_IRQ:
case CRIME_SOFT0_IRQ ... CRIME_SOFT2_IRQ:
case CRIME_VICE_IRQ:
- set_irq_chip_and_handler_name(irq,
- &crime_edge_interrupt, handle_edge_irq, "edge");
+ irq_set_chip_and_handler_name(irq,
+ &crime_edge_interrupt,
+ handle_edge_irq,
+ "edge");
break;
case MACEISA_PARALLEL_IRQ:
case MACEISA_SERIAL1_TDMAPR_IRQ:
case MACEISA_SERIAL2_TDMAPR_IRQ:
- set_irq_chip_and_handler_name(irq,
- &ip32_maceisa_edge_interrupt, handle_edge_irq,
- "edge");
+ irq_set_chip_and_handler_name(irq,
+ &ip32_maceisa_edge_interrupt,
+ handle_edge_irq,
+ "edge");
break;
default:
- set_irq_chip_and_handler_name(irq,
- &ip32_maceisa_level_interrupt, handle_level_irq,
- "level");
+ irq_set_chip_and_handler_name(irq,
+ &ip32_maceisa_level_interrupt,
+ handle_level_irq,
+ "level");
break;
}
}
int i;
for (i = 0; i < BCM1480_NR_IRQS; i++) {
- set_irq_chip_and_handler(i, &bcm1480_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &bcm1480_irq_type,
+ handle_level_irq);
bcm1480_irq_owner[i] = 0;
}
}
int i;
for (i = 0; i < SB1250_NR_IRQS; i++) {
- set_irq_chip_and_handler(i, &sb1250_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &sb1250_irq_type,
+ handle_level_irq);
sb1250_irq_owner[i] = 0;
}
}
int i;
for (i = SNI_A20R_IRQ_BASE + 2 ; i < SNI_A20R_IRQ_BASE + 8; i++)
- set_irq_chip_and_handler(i, &a20r_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &a20r_irq_type, handle_level_irq);
sni_hwint = a20r_hwint;
change_c0_status(ST0_IM, IE_IRQ0);
setup_irq(SNI_A20R_IRQ_BASE + 3, &sni_isa_irq);
mips_cpu_irq_init();
/* Actually we've got more interrupts to handle ... */
for (i = PCIMT_IRQ_INT2; i <= PCIMT_IRQ_SCSI; i++)
- set_irq_chip_and_handler(i, &pcimt_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &pcimt_irq_type, handle_level_irq);
sni_hwint = sni_pcimt_hwint;
change_c0_status(ST0_IM, IE_IRQ1|IE_IRQ3);
}
mips_cpu_irq_init();
for (i = SNI_PCIT_INT_START; i <= SNI_PCIT_INT_END; i++)
- set_irq_chip_and_handler(i, &pcit_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &pcit_irq_type, handle_level_irq);
*(volatile u32 *)SNI_PCIT_INT_REG = 0;
sni_hwint = sni_pcit_hwint;
change_c0_status(ST0_IM, IE_IRQ1);
mips_cpu_irq_init();
for (i = SNI_PCIT_INT_START; i <= SNI_PCIT_INT_END; i++)
- set_irq_chip_and_handler(i, &pcit_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &pcit_irq_type, handle_level_irq);
*(volatile u32 *)SNI_PCIT_INT_REG = 0x40000000;
sni_hwint = sni_pcit_hwint_cplus;
change_c0_status(ST0_IM, IE_IRQ0);
sni_rm200_init_8259A();
for (i = RM200_I8259A_IRQ_BASE; i < RM200_I8259A_IRQ_BASE + 16; i++)
- set_irq_chip_and_handler(i, &sni_rm200_i8259A_chip,
+ irq_set_chip_and_handler(i, &sni_rm200_i8259A_chip,
handle_level_irq);
setup_irq(RM200_I8259A_IRQ_BASE + PIC_CASCADE_IR, &sni_rm200_irq2);
mips_cpu_irq_init();
/* Actually we've got more interrupts to handle ... */
for (i = SNI_RM200_INT_START; i <= SNI_RM200_INT_END; i++)
- set_irq_chip_and_handler(i, &rm200_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &rm200_irq_type, handle_level_irq);
sni_hwint = sni_rm200_hwint;
change_c0_status(ST0_IM, IE_IRQ0);
setup_irq(SNI_RM200_INT_START + 0, &sni_rm200_i8259A_irq);
mips_cpu_irq_init();
txx9_irq_init(TX4927_IRC_REG & 0xfffffffffULL);
- set_irq_chained_handler(MIPS_CPU_IRQ_BASE + TX4927_IRC_INT,
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + TX4927_IRC_INT,
handle_simple_irq);
/* raise priority for errors, timers, SIO */
txx9_irq_set_pri(TX4927_IR_ECCERR, 7);
mips_cpu_irq_init();
txx9_irq_init(TX4938_IRC_REG & 0xfffffffffULL);
- set_irq_chained_handler(MIPS_CPU_IRQ_BASE + TX4938_IRC_INT,
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + TX4938_IRC_INT,
handle_simple_irq);
/* raise priority for errors, timers, SIO */
txx9_irq_set_pri(TX4938_IR_ECCERR, 7);
for (i = 1; i < TX4939_NUM_IR; i++) {
tx4939irq[i].level = 4; /* middle level */
tx4939irq[i].mode = TXx9_IRCR_LOW;
- set_irq_chip_and_handler(TXX9_IRQ_BASE + i,
- &tx4939_irq_chip, handle_level_irq);
+ irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &tx4939_irq_chip,
+ handle_level_irq);
}
/* mask all IRC interrupts */
__raw_writel(TXx9_IRCER_ICE, &tx4939_ircptr->den.r);
__raw_writel(irc_elevel, &tx4939_ircptr->msk.r);
- set_irq_chained_handler(MIPS_CPU_IRQ_BASE + TX4939_IRC_INT,
+ irq_set_chained_handler(MIPS_CPU_IRQ_BASE + TX4939_IRC_INT,
handle_simple_irq);
/* raise priority for errors, timers, sio */
tx3927_irq_init();
for (i = JMR3927_IRQ_IOC; i < JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC; i++)
- set_irq_chip_and_handler(i, &jmr3927_irq_ioc, handle_level_irq);
+ irq_set_chip_and_handler(i, &jmr3927_irq_ioc,
+ handle_level_irq);
/* setup IOC interrupt 1 (PCI, MODEM) */
- set_irq_chained_handler(JMR3927_IRQ_IOCINT, handle_simple_irq);
+ irq_set_chained_handler(JMR3927_IRQ_IOCINT, handle_simple_irq);
}
for (i = RBTX4927_IRQ_IOC;
i < RBTX4927_IRQ_IOC + RBTX4927_NR_IRQ_IOC; i++)
- set_irq_chip_and_handler(i, &toshiba_rbtx4927_irq_ioc_type,
+ irq_set_chip_and_handler(i, &toshiba_rbtx4927_irq_ioc_type,
handle_level_irq);
- set_irq_chained_handler(RBTX4927_IRQ_IOCINT, handle_simple_irq);
+ irq_set_chained_handler(RBTX4927_IRQ_IOCINT, handle_simple_irq);
}
static int rbtx4927_irq_dispatch(int pending)
tx4927_irq_init();
toshiba_rbtx4927_irq_ioc_init();
/* Onboard 10M Ether: High Active */
- set_irq_type(RBTX4927_RTL_8019_IRQ, IRQF_TRIGGER_HIGH);
+ irq_set_irq_type(RBTX4927_RTL_8019_IRQ, IRQF_TRIGGER_HIGH);
}
for (i = RBTX4938_IRQ_IOC;
i < RBTX4938_IRQ_IOC + RBTX4938_NR_IRQ_IOC; i++)
- set_irq_chip_and_handler(i, &toshiba_rbtx4938_irq_ioc_type,
+ irq_set_chip_and_handler(i, &toshiba_rbtx4938_irq_ioc_type,
handle_level_irq);
- set_irq_chained_handler(RBTX4938_IRQ_IOCINT, handle_simple_irq);
+ irq_set_chained_handler(RBTX4938_IRQ_IOCINT, handle_simple_irq);
}
void __init rbtx4938_irq_setup(void)
tx4938_irq_init();
toshiba_rbtx4938_irq_ioc_init();
/* Onboard 10M Ether: High Active */
- set_irq_type(RBTX4938_IRQ_ETHER, IRQF_TRIGGER_HIGH);
+ irq_set_irq_type(RBTX4938_IRQ_ETHER, IRQF_TRIGGER_HIGH);
}
tx4939_irq_init();
for (i = RBTX4939_IRQ_IOC;
i < RBTX4939_IRQ_IOC + RBTX4939_NR_IRQ_IOC; i++)
- set_irq_chip_and_handler(i, &rbtx4939_ioc_irq_chip,
+ irq_set_chip_and_handler(i, &rbtx4939_ioc_irq_chip,
handle_level_irq);
- set_irq_chained_handler(RBTX4939_IRQ_IOCINT, handle_simple_irq);
+ irq_set_chained_handler(RBTX4939_IRQ_IOCINT, handle_simple_irq);
}
icu2_write(MGIUINTHREG, 0xffff);
for (i = SYSINT1_IRQ_BASE; i <= SYSINT1_IRQ_LAST; i++)
- set_irq_chip_and_handler(i, &sysint1_irq_type,
+ irq_set_chip_and_handler(i, &sysint1_irq_type,
handle_level_irq);
for (i = SYSINT2_IRQ_BASE; i <= SYSINT2_IRQ_LAST; i++)
- set_irq_chip_and_handler(i, &sysint2_irq_type,
+ irq_set_chip_and_handler(i, &sysint2_irq_type,
handle_level_irq);
cascade_irq(INT0_IRQ, icu_get_irq);
atomic_inc(&irq_err_count);
else
irq_dispatch(irq);
- if (!(desc->status & IRQ_DISABLED) && chip->irq_unmask)
+ if (!irqd_irq_disabled(idata) && chip->irq_unmask)
chip->irq_unmask(idata);
} else
do_IRQ(irq);
def_bool y
select HAVE_OPROFILE
select HAVE_GENERIC_HARDIRQS
- select GENERIC_HARDIRQS_NO_DEPRECATED
+ select GENERIC_IRQ_SHOW
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_KGDB
*/
void mn10300_set_lateack_irq_type(int irq)
{
- set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level,
+ irq_set_chip_and_handler(irq, &mn10300_cpu_pic_level,
handle_level_irq);
}
int irq;
for (irq = 0; irq < NR_IRQS; irq++)
- if (get_irq_chip(irq) == &no_irq_chip)
+ if (irq_get_chip(irq) == &no_irq_chip)
/* due to the PIC latching interrupt requests, even
* when the IRQ is disabled, IRQ_PENDING is superfluous
* and we can use handle_level_irq() for edge-triggered
* interrupts */
- set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
+ irq_set_chip_and_handler(irq, &mn10300_cpu_pic_edge,
handle_level_irq);
unit_init_IRQ();
/*
* Display interrupt management information through /proc/interrupts
*/
-int show_interrupts(struct seq_file *p, void *v)
+int arch_show_interrupts(struct seq_file *p, int prec)
{
- int i = *(loff_t *) v, j, cpu;
- struct irqaction *action;
- unsigned long flags;
-
- switch (i) {
- /* display column title bar naming CPUs */
- case 0:
- seq_printf(p, " ");
- for (j = 0; j < NR_CPUS; j++)
- if (cpu_online(j))
- seq_printf(p, "CPU%d ", j);
- seq_putc(p, '\n');
- break;
-
- /* display information rows, one per active CPU */
- case 1 ... NR_IRQS - 1:
- raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
-
- action = irq_desc[i].action;
- if (action) {
- seq_printf(p, "%3d: ", i);
- for_each_present_cpu(cpu)
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
-
- if (i < NR_CPU_IRQS)
- seq_printf(p, " %14s.%u",
- irq_desc[i].irq_data.chip->name,
- (GxICR(i) & GxICR_LEVEL) >>
- GxICR_LEVEL_SHIFT);
- else
- seq_printf(p, " %14s",
- irq_desc[i].irq_data.chip->name);
-
- seq_printf(p, " %s", action->name);
-
- for (action = action->next;
- action;
- action = action->next)
- seq_printf(p, ", %s", action->name);
-
- seq_putc(p, '\n');
- }
-
- raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
- break;
-
- /* polish off with NMI and error counters */
- case NR_IRQS:
#ifdef CONFIG_MN10300_WD_TIMER
- seq_printf(p, "NMI: ");
- for (j = 0; j < NR_CPUS; j++)
- if (cpu_online(j))
- seq_printf(p, "%10u ", nmi_count(j));
- seq_putc(p, '\n');
-#endif
+ int j;
- seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
- break;
- }
+ seq_printf(p, "%*s: ", prec, "NMI");
+ for (j = 0; j < NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "%10u ", nmi_count(j));
+ seq_putc(p, '\n');
+#endif
+ seq_printf(p, "%*s: ", prec, "ERR");
+ seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
void migrate_irqs(void)
{
- irq_desc_t *desc;
int irq;
unsigned int self, new;
unsigned long flags;
self = smp_processor_id();
for (irq = 0; irq < NR_IRQS; irq++) {
- desc = irq_desc + irq;
+ struct irq_data *data = irq_get_irq_data(irq);
- if (desc->status == IRQ_PER_CPU)
+ if (irqd_is_per_cpu(data))
continue;
- if (cpu_isset(self, irq_desc[irq].affinity) &&
+ if (cpu_isset(self, data->affinity) &&
!cpus_intersects(irq_affinity[irq], cpu_online_map)) {
int cpu_id;
cpu_id = first_cpu(cpu_online_map);
- cpu_set(cpu_id, irq_desc[irq].affinity);
+ cpu_set(cpu_id, data->affinity);
}
/* We need to operate irq_affinity_online atomically. */
arch_local_cli_save(flags);
GxICR(irq) = x & GxICR_LEVEL;
tmp = GxICR(irq);
- new = any_online_cpu(irq_desc[irq].affinity);
+ new = any_online_cpu(data->affinity);
irq_affinity_online[irq] = new;
CROSS_GxICR(irq, new) =
NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL));
set_intr_level(port->tx_irq,
NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL));
- set_irq_chip(port->tm_irq, &mn10300_serial_pic);
+ irq_set_chip(port->tm_irq, &mn10300_serial_pic);
if (request_irq(port->rx_irq, mn10300_serial_interrupt,
IRQF_DISABLED, port->rx_name, port) < 0)
u16 tmp16;
/* set up the reschedule IPI */
- set_irq_chip_and_handler(RESCHEDULE_IPI,
- &mn10300_ipi_type, handle_percpu_irq);
+ irq_set_chip_and_handler(RESCHEDULE_IPI, &mn10300_ipi_type,
+ handle_percpu_irq);
setup_irq(RESCHEDULE_IPI, &reschedule_ipi);
set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV);
mn10300_ipi_enable(RESCHEDULE_IPI);
/* set up the call function IPI */
- set_irq_chip_and_handler(CALL_FUNC_SINGLE_IPI,
- &mn10300_ipi_type, handle_percpu_irq);
+ irq_set_chip_and_handler(CALL_FUNC_SINGLE_IPI, &mn10300_ipi_type,
+ handle_percpu_irq);
setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);
set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV);
mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
/* set up the local timer IPI */
#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
- set_irq_chip_and_handler(LOCAL_TIMER_IPI,
- &mn10300_ipi_type, handle_percpu_irq);
+ irq_set_chip_and_handler(LOCAL_TIMER_IPI, &mn10300_ipi_type,
+ handle_percpu_irq);
setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);
set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV);
mn10300_ipi_enable(LOCAL_TIMER_IPI);
SyncExBus();
for (irq = NR_CPU_IRQS; irq < NR_IRQS; irq++)
- set_irq_chip_and_handler(irq, &asb2364_fpga_pic, handle_level_irq);
+ irq_set_chip_and_handler(irq, &asb2364_fpga_pic,
+ handle_level_irq);
/* the FPGA drives the XIRQ1 input on the CPU PIC */
setup_irq(XIRQ1, &fpga_irq[0]);
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
select IRQ_PER_CPU
- select GENERIC_HARDIRQS_NO_DEPRECATED
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
int cpu_dest;
/* timer and ipi have to always be received on all CPUs */
- if (CHECK_IRQ_PER_CPU(irq_to_desc(d->irq)->status)) {
- /* Bad linux design decision. The mask has already
- * been set; we must reset it. Will fix - tglx
- */
- cpumask_setall(d->affinity);
+ if (irqd_is_per_cpu(d))
return -EINVAL;
- }
/* whatever mask they set, we just allow one CPU */
cpu_dest = first_cpu(*dest);
}
if (i < NR_IRQS) {
+ struct irq_desc *desc = irq_to_desc(i);
struct irqaction *action;
- raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
- action = irq_desc[i].action;
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ action = desc->action;
if (!action)
goto skip;
seq_printf(p, "%3d: ", i);
seq_printf(p, "%10u ", kstat_irqs(i));
#endif
- seq_printf(p, " %14s", irq_desc[i].irq_data.chip->name);
+ seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);
#ifndef PARISC_IRQ_CR16_COUNTS
seq_printf(p, " %s", action->name);
seq_putc(p, '\n');
skip:
- raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
return 0;
int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
{
- if (irq_desc[irq].action)
+ if (irq_has_action(irq))
return -EBUSY;
- if (get_irq_chip(irq) != &cpu_interrupt_type)
+ if (irq_get_chip(irq) != &cpu_interrupt_type)
return -EBUSY;
/* for iosapic interrupts */
if (type) {
- set_irq_chip_and_handler(irq, type, handle_percpu_irq);
- set_irq_chip_data(irq, data);
+ irq_set_chip_and_handler(irq, type, handle_percpu_irq);
+ irq_set_chip_data(irq, data);
__cpu_unmask_irq(irq);
}
return 0;
#ifdef CONFIG_SMP
desc = irq_to_desc(irq);
cpumask_copy(&dest, desc->irq_data.affinity);
- if (CHECK_IRQ_PER_CPU(desc->status) &&
+ if (irqd_is_per_cpu(&desc->irq_data) &&
!cpu_isset(smp_processor_id(), dest)) {
int cpu = first_cpu(dest);
{
int i;
for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
- set_irq_chip_and_handler(i, &cpu_interrupt_type,
+ irq_set_chip_and_handler(i, &cpu_interrupt_type,
handle_percpu_irq);
}
- set_irq_handler(TIMER_IRQ, handle_percpu_irq);
+ irq_set_handler(TIMER_IRQ, handle_percpu_irq);
setup_irq(TIMER_IRQ, &timer_action);
#ifdef CONFIG_SMP
- set_irq_handler(IPI_IRQ, handle_percpu_irq);
+ irq_set_handler(IPI_IRQ, handle_percpu_irq);
setup_irq(IPI_IRQ, &ipi_action);
#endif
}
select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ
select IRQ_PER_CPU
- select GENERIC_HARDIRQS_NO_DEPRECATED
+ select GENERIC_IRQ_SHOW
+ select GENERIC_IRQ_SHOW_LEVEL
config EARLY_PRINTK
bool
CONFIG_MTD_UBI=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_AT24=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_SCSI_TGT=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
CONFIG_DS1682=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECS=y
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
CONFIG_DS1682=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECS=y
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
CONFIG_DS1682=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
CONFIG_BLK_DEV_RAM_COUNT=1
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_BLK_DEV_XIP=y
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_AT24=y
CONFIG_SCSI=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_AT24=y
CONFIG_SCSI_TGT=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_VIRTIO_BLK=m
CONFIG_BLK_DEV_HD=y
+CONFIG_MISC_DEVICES=y
CONFIG_ENCLOSURE_SERVICES=m
CONFIG_SENSORS_TSL2550=m
CONFIG_EEPROM_AT24=m
CONFIG_BNX2=m
CONFIG_CHELSIO_T1=m
CONFIG_CHELSIO_T3=m
+CONFIG_CHELSIO_T4=m
CONFIG_EHEA=y
CONFIG_IXGBE=m
CONFIG_IXGB=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_INFINIBAND_MTHCA=m
CONFIG_INFINIBAND_EHCA=m
+CONFIG_INFINIBAND_CXGB3=m
+CONFIG_INFINIBAND_CXGB4=m
CONFIG_MLX4_INFINIBAND=m
CONFIG_INFINIBAND_IPOIB=m
CONFIG_INFINIBAND_IPOIB_CM=y
extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset,
size_t size, int direction);
+extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr);
#else /* ! CONFIG_NOT_COHERENT_CACHE */
/*
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+extern int dma_mmap_coherent(struct device *, struct vm_area_struct *,
+ void *, dma_addr_t, size_t);
+#define ARCH_HAS_DMA_MMAP_COHERENT
+
+
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
int (*probe)(void);
void (*kick_cpu)(int nr);
void (*setup_cpu)(int nr);
+ void (*bringup_done)(void);
void (*take_timebase)(void);
void (*give_timebase)(void);
- int (*cpu_enable)(unsigned int nr);
int (*cpu_disable)(void);
void (*cpu_die)(unsigned int nr);
int (*cpu_bootable)(unsigned int nr);
extern void e500_idle(void);
extern void power4_idle(void);
-extern void power4_cpu_offline_powersave(void);
extern void ppc6xx_idle(void);
extern void book3e_idle(void);
#define STE_VSID_SHIFT 12
/* Location of cpu0's segment table */
-#define STAB0_PAGE 0x6
+#define STAB0_PAGE 0x8
#define STAB0_OFFSET (STAB0_PAGE << 12)
#define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
#endif
#ifdef CONFIG_FLATMEM
-#define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT)
+#define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
#endif
static inline void qe_ic_cascade_low_ipic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = get_irq_desc_data(desc);
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
if (cascade_irq != NO_IRQ)
static inline void qe_ic_cascade_high_ipic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = get_irq_desc_data(desc);
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
if (cascade_irq != NO_IRQ)
static inline void qe_ic_cascade_low_mpic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = get_irq_desc_data(desc);
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
static inline void qe_ic_cascade_high_mpic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = get_irq_desc_data(desc);
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
static inline void qe_ic_cascade_muxed_mpic(unsigned int irq,
struct irq_desc *desc)
{
- struct qe_ic *qe_ic = get_irq_desc_data(desc);
+ struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq;
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
cascade_irq = qe_ic_get_high_irq(qe_ic);
if (cascade_irq == NO_IRQ)
#define SPRN_MAS2 0x272 /* MMU Assist Register 2 */
#define SPRN_MAS3 0x273 /* MMU Assist Register 3 */
#define SPRN_MAS4 0x274 /* MMU Assist Register 4 */
-#define SPRN_MAS5 0x275 /* MMU Assist Register 5 */
+#define SPRN_MAS5 0x153 /* MMU Assist Register 5 */
#define SPRN_MAS6 0x276 /* MMU Assist Register 6 */
#define SPRN_PID1 0x279 /* Process ID Register 1 */
#define SPRN_PID2 0x27A /* Process ID Register 2 */
extern void smp_send_debugger_break(int cpu);
extern void smp_message_recv(int);
+extern void start_secondary_resume(void);
DECLARE_PER_CPU(unsigned int, cpu_pvr);
#ifdef CONFIG_HOTPLUG_CPU
-extern void fixup_irqs(const struct cpumask *map);
+extern void migrate_irqs(void);
int generic_cpu_disable(void);
-int generic_cpu_enable(unsigned int cpu);
void generic_cpu_die(unsigned int cpu);
void generic_mach_cpu_die(void);
+void generic_set_cpu_dead(unsigned int cpu);
#endif
#ifdef CONFIG_PPC64
COMPAT_SYS_SPU(recvmsg)
COMPAT_SYS_SPU(recvmmsg)
SYSCALL_SPU(accept4)
+SYSCALL_SPU(name_to_handle_at)
+COMPAT_SYS_SPU(open_by_handle_at)
+COMPAT_SYS_SPU(clock_adjtime)
+SYSCALL_SPU(syncfs)
#define __NR_recvmsg 342
#define __NR_recvmmsg 343
#define __NR_accept4 344
+#define __NR_name_to_handle_at 345
+#define __NR_open_by_handle_at 346
+#define __NR_clock_adjtime 347
+#define __NR_syncfs 348
#ifdef __KERNEL__
-#define __NR_syscalls 345
+#define __NR_syscalls 349
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
return 0;
}
fs_initcall(dma_init);
+
+int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t handle, size_t size)
+{
+ unsigned long pfn;
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
+#else
+ pfn = page_to_pfn(virt_to_page(cpu_addr));
+#endif
+ return remap_pfn_range(vma, vma->vm_start,
+ pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+EXPORT_SYMBOL_GPL(dma_mmap_coherent);
rfid
b . /* prevent speculative execution */
-/*
- * Space for CPU0's segment table.
- *
- * On iSeries, the hypervisor must fill in at least one entry before
- * we get control (with relocate on). The address is given to the hv
- * as a page number (see xLparMap below), so this must be at a
- * fixed address (the linker can't compute (u64)&initial_stab >>
- * PAGE_SHIFT).
- */
- . = STAB0_OFFSET /* 0x6000 */
- .globl initial_stab
-initial_stab:
- .space 4096
-
#ifdef CONFIG_PPC_PSERIES
/*
* Data area reserved for FWNMI option.
#ifdef CONFIG_PPC_PSERIES
. = 0x8000
#endif /* CONFIG_PPC_PSERIES */
+
+/*
+ * Space for CPU0's segment table.
+ *
+ * On iSeries, the hypervisor must fill in at least one entry before
+ * we get control (with relocate on). The address is given to the hv
+ * as a page number (see xLparMap above), so this must be at a
+ * fixed address (the linker can't compute (u64)&initial_stab >>
+ * PAGE_SHIFT).
+ */
+ . = STAB0_OFFSET /* 0x8000 */
+ .globl initial_stab
+initial_stab:
+ .space 4096
mtspr SPRN_SRR1,r4
SYNC
RFI
+
+_GLOBAL(start_secondary_resume)
+ /* Reset stack */
+ rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
+ addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+ li r3,0
+ std r3,0(r1) /* Zero the stack frame pointer */
+ bl start_secondary
+ b .
#endif /* CONFIG_SMP */
#ifdef CONFIG_KVM_BOOK3S_HANDLER
add r13,r13,r4 /* for this processor. */
mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/
+ /* Mark interrupts soft and hard disabled (they might be enabled
+ * in the PACA when doing hotplug)
+ */
+ li r0,0
+ stb r0,PACASOFTIRQEN(r13)
+ stb r0,PACAHARDIRQEN(r13)
+
/* Create a temp kernel stack for use before relocation is on. */
ld r1,PACAEMERGSP(r13)
subi r1,r1,STACK_FRAME_OVERHEAD
isync
b 1b
-_GLOBAL(power4_cpu_offline_powersave)
- /* Go to NAP now */
- mfmsr r7
- rldicl r0,r7,48,1
- rotldi r0,r0,16
- mtmsrd r0,1 /* hard-disable interrupts */
- li r0,1
- li r6,0
- stb r0,PACAHARDIRQEN(r13) /* we'll hard-enable shortly */
- stb r6,PACASOFTIRQEN(r13) /* soft-disable irqs */
-BEGIN_FTR_SECTION
- DSSALL
- sync
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
- ori r7,r7,MSR_EE
- oris r7,r7,MSR_POW@h
- sync
- isync
- mtmsrd r7
- isync
- blr
EXPORT_SYMBOL(arch_local_irq_restore);
#endif /* CONFIG_PPC64 */
-static int show_other_interrupts(struct seq_file *p, int prec)
+int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
return 0;
}
-int show_interrupts(struct seq_file *p, void *v)
-{
- unsigned long flags, any_count = 0;
- int i = *(loff_t *) v, j, prec;
- struct irqaction *action;
- struct irq_desc *desc;
- struct irq_chip *chip;
-
- if (i > nr_irqs)
- return 0;
-
- for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
- j *= 10;
-
- if (i == nr_irqs)
- return show_other_interrupts(p, prec);
-
- /* print header */
- if (i == 0) {
- seq_printf(p, "%*s", prec + 8, "");
- for_each_online_cpu(j)
- seq_printf(p, "CPU%-8d", j);
- seq_putc(p, '\n');
- }
-
- desc = irq_to_desc(i);
- if (!desc)
- return 0;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- for_each_online_cpu(j)
- any_count |= kstat_irqs_cpu(i, j);
- action = desc->action;
- if (!action && !any_count)
- goto out;
-
- seq_printf(p, "%*d: ", prec, i);
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
-
- chip = get_irq_desc_chip(desc);
- if (chip)
- seq_printf(p, " %-16s", chip->name);
- else
- seq_printf(p, " %-16s", "None");
- seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge");
-
- if (action) {
- seq_printf(p, " %s", action->name);
- while ((action = action->next) != NULL)
- seq_printf(p, ", %s", action->name);
- }
-
- seq_putc(p, '\n');
-out:
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- return 0;
-}
-
/*
* /proc/stat helpers
*/
}
#ifdef CONFIG_HOTPLUG_CPU
-void fixup_irqs(const struct cpumask *map)
+void migrate_irqs(void)
{
struct irq_desc *desc;
unsigned int irq;
static int warned;
cpumask_var_t mask;
+ const struct cpumask *map = cpu_online_mask;
alloc_cpumask_var(&mask, GFP_KERNEL);
for_each_irq(irq) {
+ struct irq_data *data;
struct irq_chip *chip;
desc = irq_to_desc(irq);
if (!desc)
continue;
- if (desc->status & IRQ_PER_CPU)
+ data = irq_desc_get_irq_data(desc);
+ if (irqd_is_per_cpu(data))
continue;
- chip = get_irq_desc_chip(desc);
+ chip = irq_data_get_irq_chip(data);
- cpumask_and(mask, desc->irq_data.affinity, map);
+ cpumask_and(mask, data->affinity, map);
if (cpumask_any(mask) >= nr_cpu_ids) {
printk("Breaking affinity for irq %i\n", irq);
cpumask_copy(mask, map);
}
if (chip->irq_set_affinity)
- chip->irq_set_affinity(&desc->irq_data, mask, true);
+ chip->irq_set_affinity(data, mask, true);
else if (desc->action && !(warned++))
printk("Cannot set affinity for irq %i\n", irq);
}
smp_wmb();
/* Clear norequest flags */
- irq_to_desc(i)->status &= ~IRQ_NOREQUEST;
+ irq_clear_status_flags(i, IRQ_NOREQUEST);
/* Legacy flags are left to default at this point,
* one can then use irq_create_mapping() to
/* Set type if specified and different than the current one */
if (type != IRQ_TYPE_NONE &&
- type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK))
- set_irq_type(virq, type);
+ type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
+ irq_set_irq_type(virq, type);
return virq;
}
EXPORT_SYMBOL_GPL(irq_create_of_mapping);
return;
/* remove chip and handler */
- set_irq_chip_and_handler(virq, NULL, NULL);
+ irq_set_chip_and_handler(virq, NULL, NULL);
/* Make sure it's completed */
synchronize_irq(virq);
seq_printf(m, "%5d ", i);
seq_printf(m, "0x%05lx ", virq_to_hw(i));
- chip = get_irq_desc_chip(desc);
+ chip = irq_desc_get_chip(desc);
if (chip && chip->name)
p = chip->name;
else
if (!desc)
continue;
- chip = get_irq_desc_chip(desc);
+ chip = irq_desc_get_chip(desc);
if (!chip)
continue;
- if (chip->irq_eoi && desc->status & IRQ_INPROGRESS)
+ if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
chip->irq_eoi(&desc->irq_data);
if (chip->irq_mask)
chip->irq_mask(&desc->irq_data);
- if (chip->irq_disable && !(desc->status & IRQ_DISABLED))
+ if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
chip->irq_disable(&desc->irq_data);
}
}
virq = irq_create_mapping(NULL, line);
if (virq != NO_IRQ)
- set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
} else {
pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
oirq.size, oirq.specifier[0], oirq.specifier[1],
#define DBG(fmt...)
#endif
+
+/* Store all idle threads, this can be reused instead of creating
+* a new thread. Also avoids complicated thread destroy functionality
+* for idle threads.
+*/
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
+ * removed after init for !CONFIG_HOTPLUG_CPU.
+ */
+static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
+#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
+#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
+#else
+static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
+#define get_idle_for_cpu(x) (idle_thread_array[(x)])
+#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
+#endif
+
struct thread_info *secondary_ti;
DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
}
-static void __init smp_create_idle(unsigned int cpu)
-{
- struct task_struct *p;
-
- /* create a process for the processor */
- p = fork_idle(cpu);
- if (IS_ERR(p))
- panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
-#ifdef CONFIG_PPC64
- paca[cpu].__current = p;
- paca[cpu].kstack = (unsigned long) task_thread_info(p)
- + THREAD_SIZE - STACK_FRAME_OVERHEAD;
-#endif
- current_set[cpu] = task_thread_info(p);
- task_thread_info(p)->cpu = cpu;
-}
-
void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned int cpu;
max_cpus = NR_CPUS;
else
max_cpus = 1;
-
- for_each_possible_cpu(cpu)
- if (cpu != boot_cpuid)
- smp_create_idle(cpu);
}
void __devinit smp_prepare_boot_cpu(void)
#ifdef CONFIG_HOTPLUG_CPU
/* State of each CPU during hotplug phases */
-DEFINE_PER_CPU(int, cpu_state) = { 0 };
+static DEFINE_PER_CPU(int, cpu_state) = { 0 };
int generic_cpu_disable(void)
{
set_cpu_online(cpu, false);
#ifdef CONFIG_PPC64
vdso_data->processorCount--;
- fixup_irqs(cpu_online_mask);
-#endif
- return 0;
-}
-
-int generic_cpu_enable(unsigned int cpu)
-{
- /* Do the normal bootup if we haven't
- * already bootstrapped. */
- if (system_state != SYSTEM_RUNNING)
- return -ENOSYS;
-
- /* get the target out of it's holding state */
- per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
- smp_wmb();
-
- while (!cpu_online(cpu))
- cpu_relax();
-
-#ifdef CONFIG_PPC64
- fixup_irqs(cpu_online_mask);
- /* counter the irq disable in fixup_irqs */
- local_irq_enable();
#endif
+ migrate_irqs();
return 0;
}
unsigned int cpu;
local_irq_disable();
+ idle_task_exit();
cpu = smp_processor_id();
printk(KERN_DEBUG "CPU%d offline\n", cpu);
__get_cpu_var(cpu_state) = CPU_DEAD;
smp_wmb();
while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
cpu_relax();
- set_cpu_online(cpu, true);
- local_irq_enable();
+}
+
+void generic_set_cpu_dead(unsigned int cpu)
+{
+ per_cpu(cpu_state, cpu) = CPU_DEAD;
}
#endif
-static int __devinit cpu_enable(unsigned int cpu)
+struct create_idle {
+ struct work_struct work;
+ struct task_struct *idle;
+ struct completion done;
+ int cpu;
+};
+
+static void __cpuinit do_fork_idle(struct work_struct *work)
{
- if (smp_ops && smp_ops->cpu_enable)
- return smp_ops->cpu_enable(cpu);
+ struct create_idle *c_idle =
+ container_of(work, struct create_idle, work);
+
+ c_idle->idle = fork_idle(c_idle->cpu);
+ complete(&c_idle->done);
+}
+
+static int __cpuinit create_idle(unsigned int cpu)
+{
+ struct thread_info *ti;
+ struct create_idle c_idle = {
+ .cpu = cpu,
+ .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
+ };
+ INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
+
+ c_idle.idle = get_idle_for_cpu(cpu);
+
+ /* We can't use kernel_thread since we must avoid to
+ * reschedule the child. We use a workqueue because
+ * we want to fork from a kernel thread, not whatever
+ * userspace process happens to be trying to online us.
+ */
+ if (!c_idle.idle) {
+ schedule_work(&c_idle.work);
+ wait_for_completion(&c_idle.done);
+ } else
+ init_idle(c_idle.idle, cpu);
+ if (IS_ERR(c_idle.idle)) {
+ pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
+ return PTR_ERR(c_idle.idle);
+ }
+ ti = task_thread_info(c_idle.idle);
+
+#ifdef CONFIG_PPC64
+ paca[cpu].__current = c_idle.idle;
+ paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
+#endif
+ ti->cpu = cpu;
+ current_set[cpu] = ti;
- return -ENOSYS;
+ return 0;
}
int __cpuinit __cpu_up(unsigned int cpu)
{
- int c;
+ int rc, c;
secondary_ti = current_set[cpu];
- if (!cpu_enable(cpu))
- return 0;
if (smp_ops == NULL ||
(smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
return -EINVAL;
+ /* Make sure we have an idle thread */
+ rc = create_idle(cpu);
+ if (rc)
+ return rc;
+
/* Make sure callin-map entry is 0 (can be leftover a CPU
* hotplug
*/
}
/* Activate a secondary processor. */
-int __devinit start_secondary(void *unused)
+void __devinit start_secondary(void *unused)
{
unsigned int cpu = smp_processor_id();
struct device_node *l2_cache;
secondary_cpu_time_init();
+#ifdef CONFIG_PPC64
+ if (system_state == SYSTEM_RUNNING)
+ vdso_data->processorCount++;
+#endif
ipi_call_lock();
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
local_irq_enable();
cpu_idle();
- return 0;
+
+ BUG();
}
int setup_profiling_timer(unsigned int multiplier)
free_cpumask_var(old_mask);
+ if (smp_ops && smp_ops->bringup_done)
+ smp_ops->bringup_done();
+
dump_numa_cpu_topology();
+
}
int arch_sd_sibling_asym_packing(void)
{
if (ppc_md.cpu_die)
ppc_md.cpu_die();
+
+ /* If we return, we re-enter start_secondary */
+ start_secondary_resume();
}
+
#endif
}
get_paca()->user_time_scaled += user_scaled;
- if (in_irq() || idle_task(smp_processor_id()) != tsk) {
+ if (in_interrupt() || idle_task(smp_processor_id()) != tsk) {
account_system_time(tsk, 0, delta, sys_scaled);
if (stolen)
account_steal_time(stolen);
struct clock_event_device *evt = &decrementer->event;
u64 now;
+ /* Ensure a positive value is written to the decrementer, or else
+ * some CPUs will continue to take decrementer exceptions.
+ */
+ set_dec(DECREMENTER_MAX);
+
+ /* Some implementations of hotplug will get timer interrupts while
+ * offline, just ignore these
+ */
+ if (!cpu_online(smp_processor_id()))
+ return;
+
trace_timer_interrupt_entry(regs);
__get_cpu_var(irq_stat).timer_irqs++;
- /* Ensure a positive value is written to the decrementer, or else
- * some CPUs will continuue to take decrementer exceptions */
- set_dec(DECREMENTER_MAX);
-
#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
#endif
}
EXPORT_SYMBOL(__dma_sync_page);
+
+/*
+ * Return the PFN for a given cpu virtual address returned by
+ * __dma_alloc_coherent. This is used by dma_mmap_coherent()
+ */
+unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
+{
+ /* This should always be populated, so we don't test every
+ * level. If that fails, we'll have a nice crash which
+ * will be as good as a BUG_ON()
+ */
+ pgd_t *pgd = pgd_offset_k(cpu_addr);
+ pud_t *pud = pud_offset(pgd, cpu_addr);
+ pmd_t *pmd = pmd_offset(pud, cpu_addr);
+ pte_t *ptep = pte_offset_kernel(pmd, cpu_addr);
+
+ if (pte_none(*ptep) || !pte_present(*ptep))
+ return 0;
+ return pte_pfn(*ptep);
+}
cpld_pic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &cpld_pic, handle_level_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &cpld_pic, handle_level_irq);
return 0;
}
goto end;
}
- set_irq_chained_handler(cascade_irq, cpld_pic_cascade);
+ irq_set_chained_handler(cascade_irq, cpld_pic_cascade);
end:
of_node_put(np);
}
void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
int sub_virq, val;
u32 status, enable;
/* Processing done; can reenable the cascade now */
raw_spin_lock(&desc->lock);
chip->irq_ack(&desc->irq_data);
- if (!(desc->status & IRQ_DISABLED))
+ if (!irqd_irq_disabled(&desc->irq_data))
chip->irq_unmask(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
static int media5200_irq_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- struct irq_desc *desc = irq_to_desc(virq);
-
pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw);
- set_irq_chip_data(virq, &media5200_irq);
- set_irq_chip_and_handler(virq, &media5200_irq_chip, handle_level_irq);
- set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
- desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
- desc->status |= IRQ_TYPE_LEVEL_LOW | IRQ_LEVEL;
-
+ irq_set_chip_data(virq, &media5200_irq);
+ irq_set_chip_and_handler(virq, &media5200_irq_chip, handle_level_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
return 0;
}
media5200_irq.irqhost->host_data = &media5200_irq;
- set_irq_data(cascade_virq, &media5200_irq);
- set_irq_chained_handler(cascade_virq, media5200_irq_cascade);
+ irq_set_handler_data(cascade_virq, &media5200_irq);
+ irq_set_chained_handler(cascade_virq, media5200_irq_cascade);
return;
void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc)
{
- struct mpc52xx_gpt_priv *gpt = get_irq_data(virq);
+ struct mpc52xx_gpt_priv *gpt = irq_get_handler_data(virq);
int sub_virq;
u32 status;
struct mpc52xx_gpt_priv *gpt = h->host_data;
dev_dbg(gpt->dev, "%s: h=%p, virq=%i\n", __func__, h, virq);
- set_irq_chip_data(virq, gpt);
- set_irq_chip_and_handler(virq, &mpc52xx_gpt_irq_chip, handle_edge_irq);
+ irq_set_chip_data(virq, gpt);
+ irq_set_chip_and_handler(virq, &mpc52xx_gpt_irq_chip, handle_edge_irq);
return 0;
}
}
gpt->irqhost->host_data = gpt;
- set_irq_data(cascade_virq, gpt);
- set_irq_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade);
+ irq_set_handler_data(cascade_virq, gpt);
+ irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade);
/* If the GPT is currently disabled, then change it to be in Input
* Capture mode. If the mode is non-zero, then the pin could be
ctrl_reg |= (type << (22 - (l2irq * 2)));
out_be32(&intr->ctrl, ctrl_reg);
- __set_irq_handler_unlocked(d->irq, handler);
+ __irq_set_handler_locked(d->irq, handler);
return 0;
}
else
hndlr = handle_level_irq;
- set_irq_chip_and_handler(virq, &mpc52xx_extirq_irqchip, hndlr);
+ irq_set_chip_and_handler(virq, &mpc52xx_extirq_irqchip, hndlr);
pr_debug("%s: External IRQ%i virq=%x, hw=%x. type=%x\n",
__func__, l2irq, virq, (int)irq, type);
return 0;
return -EINVAL;
}
- set_irq_chip_and_handler(virq, irqchip, handle_level_irq);
+ irq_set_chip_and_handler(virq, irqchip, handle_level_irq);
pr_debug("%s: virq=%x, l1=%i, l2=%i\n", __func__, virq, l1irq, l2irq);
return 0;
static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc)
{
- struct pq2ads_pci_pic *priv = get_irq_desc_data(desc);
+ struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc);
u32 stat, mask, pend;
int bit;
static int pci_pic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_data(virq, h->host_data);
- set_irq_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq);
return 0;
}
static void pci_host_unmap(struct irq_host *h, unsigned int virq)
{
/* remove chip and handler */
- set_irq_chip_data(virq, NULL);
- set_irq_chip(virq, NULL);
+ irq_set_chip_data(virq, NULL);
+ irq_set_chip(virq, NULL);
}
static struct irq_host_ops pci_pic_host_ops = {
priv->host = host;
host->host_data = priv;
- set_irq_data(irq, priv);
- set_irq_chained_handler(irq, pq2ads_pci_irq_demux);
+ irq_set_handler_data(irq, priv);
+ irq_set_chained_handler(irq, pq2ads_pci_irq_demux);
of_node_put(np);
return 0;
static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
int cascade_irq;
while ((cascade_irq = cpm2_get_irq()) >= 0)
cpm2_pic_init(np);
of_node_put(np);
- set_irq_chained_handler(irq, cpm2_cascade);
+ irq_set_chained_handler(irq, cpm2_cascade);
#endif
}
static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
int cascade_irq;
while ((cascade_irq = cpm2_get_irq()) >= 0)
cpm2_pic_init(np);
of_node_put(np);
- set_irq_chained_handler(irq, cpm2_cascade);
+ irq_set_chained_handler(irq, cpm2_cascade);
#endif
}
}
/* Success. Connect our low-level cascade handler. */
- set_irq_handler(cascade_irq, mpc85xx_8259_cascade_handler);
+ irq_set_handler(cascade_irq, mpc85xx_8259_cascade_handler);
return 0;
}
#ifdef CONFIG_PPC_I8259
static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
if (cascade_irq != NO_IRQ) {
i8259_init(cascade_node, 0);
of_node_put(cascade_node);
- set_irq_chained_handler(cascade_irq, mpc85xx_8259_cascade);
+ irq_set_chained_handler(cascade_irq, mpc85xx_8259_cascade);
#endif /* CONFIG_PPC_I8259 */
}
static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
int cascade_irq;
while ((cascade_irq = cpm2_get_irq()) >= 0)
cpm2_pic_init(np);
of_node_put(np);
- set_irq_chained_handler(irq, cpm2_cascade);
+ irq_set_chained_handler(irq, cpm2_cascade);
#endif
}
void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq;
/*
irq_hw_number_t hwirq)
{
/* All interrupts are LEVEL sensitive */
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &socrates_fpga_pic_chip,
- handle_fasteoi_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &socrates_fpga_pic_chip,
+ handle_fasteoi_irq);
return 0;
}
pr_warning("FPGA PIC: can't get irq%d.\n", i);
continue;
}
- set_irq_chained_handler(socrates_fpga_irqs[i],
- socrates_fpga_pic_cascade);
+ irq_set_chained_handler(socrates_fpga_irqs[i],
+ socrates_fpga_pic_cascade);
}
socrates_fpga_pic_iobase = of_iomap(pic, 0);
static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
int cascade_irq;
while ((cascade_irq = cpm2_get_irq()) >= 0)
cpm2_pic_init(np);
of_node_put(np);
- set_irq_chained_handler(irq, cpm2_cascade);
+ irq_set_chained_handler(irq, cpm2_cascade);
#endif
}
static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
int cascade_irq;
while ((cascade_irq = cpm2_get_irq()) >= 0)
cpm2_pic_init(np);
of_node_put(np);
- set_irq_chained_handler(irq, cpm2_cascade);
+ irq_set_chained_handler(irq, cpm2_cascade);
#endif
}
void gef_pic_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq;
/*
irq_hw_number_t hwirq)
{
/* All interrupts are LEVEL sensitive */
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &gef_pic_chip, handle_level_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &gef_pic_chip, handle_level_irq);
return 0;
}
return;
/* Chain with parent controller */
- set_irq_chained_handler(gef_pic_cascade_irq, gef_pic_cascade);
+ irq_set_chained_handler(gef_pic_cascade_irq, gef_pic_cascade);
}
/*
#ifdef CONFIG_PPC_I8259
static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
if (cascade_irq != NO_IRQ)
i8259_init(cascade_node, 0);
of_node_put(cascade_node);
- set_irq_chained_handler(cascade_irq, mpc86xx_8259_cascade);
+ irq_set_chained_handler(cascade_irq, mpc86xx_8259_cascade);
#endif
}
generic_handle_irq(cascade_irq);
- chip = get_irq_desc_chip(cdesc);
+ chip = irq_desc_get_chip(cdesc);
chip->irq_eoi(&cdesc->irq_data);
}
- chip = get_irq_desc_chip(desc);
+ chip = irq_desc_get_chip(desc);
chip->irq_eoi(&desc->irq_data);
}
irq = cpm_pic_init();
if (irq != NO_IRQ)
- set_irq_chained_handler(irq, cpm_cascade);
+ irq_set_chained_handler(irq, cpm_cascade);
}
static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
- struct axon_msic *msic = get_irq_data(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct axon_msic *msic = irq_get_handler_data(irq);
u32 write_offset, msi;
int idx;
int retry = 0;
}
dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq);
- set_irq_msi(virq, entry);
+ irq_set_msi_desc(virq, entry);
msg.data = virq;
write_msi_msg(virq, &msg);
}
if (entry->irq == NO_IRQ)
continue;
- set_irq_msi(entry->irq, NULL);
+ irq_set_msi_desc(entry->irq, NULL);
irq_dispose_mapping(entry->irq);
}
}
static int msic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- set_irq_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
+ irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
return 0;
}
msic->irq_host->host_data = msic;
- set_irq_data(virq, msic);
- set_irq_chained_handler(virq, axon_msi_cascade);
+ irq_set_handler_data(virq, msic);
+ irq_set_chained_handler(virq, axon_msi_cascade);
pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
/* Enable the MSIC hardware */
static int beatic_pic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- struct irq_desc *desc = irq_to_desc(virq);
int64_t err;
err = beat_construct_and_connect_irq_plug(virq, hw);
if (err < 0)
return -EIO;
- desc->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq);
return 0;
}
static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct cbe_iic_regs __iomem *node_iic =
- (void __iomem *)get_irq_desc_data(desc);
+ (void __iomem *)irq_desc_get_handler_data(desc);
unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
unsigned long bits, ack;
int cascade;
{
switch (hw & IIC_IRQ_TYPE_MASK) {
case IIC_IRQ_TYPE_IPI:
- set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
+ irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
break;
case IIC_IRQ_TYPE_IOEXC:
- set_irq_chip_and_handler(virq, &iic_ioexc_chip,
- handle_iic_irq);
+ irq_set_chip_and_handler(virq, &iic_ioexc_chip,
+ handle_edge_eoi_irq);
break;
default:
- set_irq_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq);
+ irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq);
}
return 0;
}
* irq_data is a generic pointer that gets passed back
* to us later, so the forced cast is fine.
*/
- set_irq_data(cascade, (void __force *)node_iic);
- set_irq_chained_handler(cascade , iic_ioexc_cascade);
+ irq_set_handler_data(cascade, (void __force *)node_iic);
+ irq_set_chained_handler(cascade, iic_ioexc_cascade);
out_be64(&node_iic->iic_ir,
(1 << 12) /* priority */ |
(node << 4) /* dest node */ |
static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
- struct mpic *mpic = get_irq_desc_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct mpic *mpic = irq_desc_get_handler_data(desc);
unsigned int virq;
virq = mpic_get_one_irq(mpic);
printk(KERN_INFO "%s : hooking up to IRQ %d\n",
dn->full_name, virq);
- set_irq_data(virq, mpic);
- set_irq_chained_handler(virq, cell_mpic_cascade);
+ irq_set_handler_data(virq, mpic);
+ irq_set_chained_handler(virq, cell_mpic_cascade);
}
}
/* Reset edge detection logic if necessary
*/
- if (irq_to_desc(d->irq)->status & IRQ_LEVEL)
+ if (irqd_is_level_type(d))
return;
/* Only interrupts 47 to 50 can be set to edge */
struct spider_pic *pic = spider_virq_to_pic(d->irq);
unsigned int hw = irq_map[d->irq].hwirq;
void __iomem *cfg = spider_get_irq_config(pic, hw);
- struct irq_desc *desc = irq_to_desc(d->irq);
u32 old_mask;
u32 ic;
return -EINVAL;
}
- /* Update irq_desc */
- desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
- desc->status |= type & IRQ_TYPE_SENSE_MASK;
- if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
- desc->status |= IRQ_LEVEL;
-
/* Configure the source. One gross hack that was there before and
* that I've kept around is the priority to the BE which I set to
* be the same as the interrupt source number. I don't know wether
static int spider_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- set_irq_chip_and_handler(virq, &spider_pic, handle_level_irq);
+ irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq);
/* Set default irq type */
- set_irq_type(virq, IRQ_TYPE_NONE);
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
- struct spider_pic *pic = get_irq_desc_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct spider_pic *pic = irq_desc_get_handler_data(desc);
unsigned int cs, virq;
cs = in_be32(pic->regs + TIR_CS) >> 24;
virq = spider_find_cascade_and_node(pic);
if (virq == NO_IRQ)
return;
- set_irq_data(virq, pic);
- set_irq_chained_handler(virq, spider_irq_cascade);
+ irq_set_handler_data(virq, pic);
+ irq_set_chained_handler(virq, spider_irq_cascade);
printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n",
pic->node_id, addr, of_node->full_name);
static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
if (cascade_irq != NO_IRQ)
if (cascade_irq == NO_IRQ)
printk(KERN_ERR "i8259: failed to map cascade irq\n");
else
- set_irq_chained_handler(cascade_irq,
+ irq_set_chained_handler(cascade_irq,
chrp_8259_cascade);
}
}
static int flipper_pic_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hwirq)
{
- set_irq_chip_data(virq, h->host_data);
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &flipper_pic, handle_level_irq);
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &flipper_pic, handle_level_irq);
return 0;
}
static void flipper_pic_unmap(struct irq_host *h, unsigned int irq)
{
- set_irq_chip_data(irq, NULL);
- set_irq_chip(irq, NULL);
+ irq_set_chip_data(irq, NULL);
+ irq_set_chip(irq, NULL);
}
static int flipper_pic_match(struct irq_host *h, struct device_node *np)
static int hlwd_pic_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hwirq)
{
- set_irq_chip_data(virq, h->host_data);
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &hlwd_pic, handle_level_irq);
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &hlwd_pic, handle_level_irq);
return 0;
}
static void hlwd_pic_unmap(struct irq_host *h, unsigned int irq)
{
- set_irq_chip_data(irq, NULL);
- set_irq_chip(irq, NULL);
+ irq_set_chip_data(irq, NULL);
+ irq_set_chip(irq, NULL);
}
static struct irq_host_ops hlwd_irq_host_ops = {
static void hlwd_pic_irq_cascade(unsigned int cascade_virq,
struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
- struct irq_host *irq_host = get_irq_data(cascade_virq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_host *irq_host = irq_get_handler_data(cascade_virq);
unsigned int virq;
raw_spin_lock(&desc->lock);
raw_spin_lock(&desc->lock);
chip->irq_ack(&desc->irq_data); /* IRQ_LEVEL */
- if (!(desc->status & IRQ_DISABLED) && chip->irq_unmask)
+ if (!irqd_irq_disabled(&desc->irq_data) && chip->irq_unmask)
chip->irq_unmask(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
host = hlwd_pic_init(np);
BUG_ON(!host);
cascade_virq = irq_of_parse_and_map(np, 0);
- set_irq_data(cascade_virq, host);
- set_irq_chained_handler(cascade_virq,
+ irq_set_handler_data(cascade_virq, host);
+ irq_set_chained_handler(cascade_virq,
hlwd_pic_irq_cascade);
hlwd_irq_host = host;
break;
cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0);
pr_debug("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, (u32) cascade_pci_irq);
tsi108_pci_int_init(cascade_node);
- set_irq_data(cascade_pci_irq, mpic);
- set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
+ irq_set_handler_data(cascade_pci_irq, mpic);
+ irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
#endif
/* Configure MPIC outputs to CPU0 */
tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
DBG("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__,
(u32) cascade_pci_irq);
tsi108_pci_int_init(cascade_node);
- set_irq_data(cascade_pci_irq, mpic);
- set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
+ irq_set_handler_data(cascade_pci_irq, mpic);
+ irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
#endif
/* Configure MPIC outputs to CPU0 */
tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
if (!desc)
continue;
- chip = get_irq_desc_chip(desc);
+ chip = irq_desc_get_chip(desc);
if (chip && chip->irq_startup) {
raw_spin_lock_irqsave(&desc->lock, flags);
chip->irq_startup(&desc->irq_data);
static int iseries_irq_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- set_irq_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq);
+ irq_set_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq);
return 0;
}
printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n");
dev->irq = irq_create_mapping(NULL, 1);
if (dev->irq != NO_IRQ)
- set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
}
/* Hide AMD8111 IDE interrupt when in legacy mode so
if (nmiprop) {
nmi_virq = irq_create_mapping(NULL, *nmiprop);
mpic_irq_set_priority(nmi_virq, 15);
- set_irq_type(nmi_virq, IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(nmi_virq, IRQ_TYPE_EDGE_RISING);
mpic_unmask_irq(irq_get_irq_data(nmi_virq));
}
dev->vendor == PCI_VENDOR_ID_DEC &&
dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) {
dev->irq = irq_create_mapping(NULL, 60);
- set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
}
#endif /* CONFIG_PPC32 */
}
int i = src >> 5;
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
- if ((irq_to_desc(d->irq)->status & IRQ_LEVEL) == 0)
+ if (!irqd_is_level_type(d))
out_le32(&pmac_irq_hw[i]->ack, bit);
__set_bit(src, ppc_cached_irq_mask);
__pmac_set_irq_mask(src, 0);
static int pmac_pic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- struct irq_desc *desc = irq_to_desc(virq);
int level;
if (hw >= max_irqs)
*/
level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f)));
if (level)
- desc->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &pmac_pic, level ?
- handle_level_irq : handle_edge_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &pmac_pic,
+ level ? handle_level_irq : handle_edge_irq);
return 0;
}
static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
- struct mpic *mpic = get_irq_desc_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct mpic *mpic = irq_desc_get_handler_data(desc);
unsigned int cascade_irq = mpic_get_one_irq(mpic);
if (cascade_irq != NO_IRQ)
of_node_put(slave);
return 0;
}
- set_irq_data(cascade, mpic2);
- set_irq_chained_handler(cascade, pmac_u3_cascade);
+ irq_set_handler_data(cascade, mpic2);
+ irq_set_chained_handler(cascade, pmac_u3_cascade);
of_node_put(slave);
return 0;
extern void pmac_check_ht_link(void);
extern void pmac_setup_smp(void);
-extern void pmac32_cpu_die(void);
extern void low_cpu_die(void) __attribute__((noreturn));
extern int pmac_nvram_init(void);
return PCI_PROBE_NORMAL;
return PCI_PROBE_DEVTREE;
}
-
-#ifdef CONFIG_HOTPLUG_CPU
-/* access per cpu vars from generic smp.c */
-DECLARE_PER_CPU(int, cpu_state);
-
-static void pmac64_cpu_die(void)
-{
- /*
- * turn off as much as possible, we'll be
- * kicked out as this will only be invoked
- * on core99 platforms for now ...
- */
-
- printk(KERN_INFO "CPU#%d offline\n", smp_processor_id());
- __get_cpu_var(cpu_state) = CPU_DEAD;
- smp_wmb();
-
- /*
- * during the path that leads here preemption is disabled,
- * reenable it now so that when coming up preempt count is
- * zero correctly
- */
- preempt_enable();
-
- /*
- * hard-disable interrupts for the non-NAP case, the NAP code
- * needs to re-enable interrupts (but soft-disables them)
- */
- hard_irq_disable();
-
- while (1) {
- /* let's not take timer interrupts too often ... */
- set_dec(0x7fffffff);
-
- /* should always be true at this point */
- if (cpu_has_feature(CPU_FTR_CAN_NAP))
- power4_cpu_offline_powersave();
- else {
- HMT_low();
- HMT_very_low();
- }
- }
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
#endif /* CONFIG_PPC64 */
define_machine(powermac) {
.pcibios_after_init = pmac_pcibios_after_init,
.phys_mem_access_prot = pci_phys_mem_access_prot,
#endif
-#ifdef CONFIG_HOTPLUG_CPU
-#ifdef CONFIG_PPC64
- .cpu_die = pmac64_cpu_die,
-#endif
-#ifdef CONFIG_PPC32
- .cpu_die = pmac32_cpu_die,
-#endif
-#endif
-#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
- .cpu_die = generic_mach_cpu_die,
-#endif
};
/* Setup openpic */
mpic_setup_this_cpu();
+}
- if (cpu_nr == 0) {
-#ifdef CONFIG_PPC64
- extern void g5_phy_disable_cpu1(void);
+#ifdef CONFIG_HOTPLUG_CPU
+static int smp_core99_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ int rc;
- /* Close i2c bus if it was used for tb sync */
+ switch(action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ /* Open i2c bus if it was used for tb sync */
if (pmac_tb_clock_chip_host) {
- pmac_i2c_close(pmac_tb_clock_chip_host);
- pmac_tb_clock_chip_host = NULL;
+ rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1);
+ if (rc) {
+ pr_err("Failed to open i2c bus for time sync\n");
+ return notifier_from_errno(rc);
+ }
}
+ break;
+ case CPU_ONLINE:
+ case CPU_UP_CANCELED:
+ /* Close i2c bus if it was used for tb sync */
+ if (pmac_tb_clock_chip_host)
+ pmac_i2c_close(pmac_tb_clock_chip_host);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
- /* If we didn't start the second CPU, we must take
- * it off the bus
- */
- if (of_machine_is_compatible("MacRISC4") &&
- num_online_cpus() < 2)
- g5_phy_disable_cpu1();
-#endif /* CONFIG_PPC64 */
+static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
+ .notifier_call = smp_core99_cpu_notify,
+};
+#endif /* CONFIG_HOTPLUG_CPU */
+
+static void __init smp_core99_bringup_done(void)
+{
+#ifdef CONFIG_PPC64
+ extern void g5_phy_disable_cpu1(void);
+
+ /* Close i2c bus if it was used for tb sync */
+ if (pmac_tb_clock_chip_host)
+ pmac_i2c_close(pmac_tb_clock_chip_host);
- if (ppc_md.progress)
- ppc_md.progress("core99_setup_cpu 0 done", 0x349);
+ /* If we didn't start the second CPU, we must take
+ * it off the bus.
+ */
+ if (of_machine_is_compatible("MacRISC4") &&
+ num_online_cpus() < 2) {
+ set_cpu_present(1, false);
+ g5_phy_disable_cpu1();
}
-}
+#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_HOTPLUG_CPU
+ register_cpu_notifier(&smp_core99_cpu_nb);
+#endif
+ if (ppc_md.progress)
+ ppc_md.progress("smp_core99_bringup_done", 0x349);
+}
-#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
+#ifdef CONFIG_HOTPLUG_CPU
-int smp_core99_cpu_disable(void)
+static int smp_core99_cpu_disable(void)
{
- set_cpu_online(smp_processor_id(), false);
+ int rc = generic_cpu_disable();
+ if (rc)
+ return rc;
- /* XXX reset cpu affinity here */
mpic_cpu_set_priority(0xf);
- asm volatile("mtdec %0" : : "r" (0x7fffffff));
- mb();
- udelay(20);
- asm volatile("mtdec %0" : : "r" (0x7fffffff));
+
return 0;
}
-static int cpu_dead[NR_CPUS];
+#ifdef CONFIG_PPC32
-void pmac32_cpu_die(void)
+static void pmac_cpu_die(void)
{
+ int cpu = smp_processor_id();
+
local_irq_disable();
- cpu_dead[smp_processor_id()] = 1;
+ idle_task_exit();
+ pr_debug("CPU%d offline\n", cpu);
+ generic_set_cpu_dead(cpu);
+ smp_wmb();
mb();
low_cpu_die();
}
-void smp_core99_cpu_die(unsigned int cpu)
+#else /* CONFIG_PPC32 */
+
+static void pmac_cpu_die(void)
{
- int timeout;
+ int cpu = smp_processor_id();
- timeout = 1000;
- while (!cpu_dead[cpu]) {
- if (--timeout == 0) {
- printk("CPU %u refused to die!\n", cpu);
- break;
- }
- msleep(1);
+ local_irq_disable();
+ idle_task_exit();
+
+ /*
+ * turn off as much as possible, we'll be
+ * kicked out as this will only be invoked
+ * on core99 platforms for now ...
+ */
+
+ printk(KERN_INFO "CPU#%d offline\n", cpu);
+ generic_set_cpu_dead(cpu);
+ smp_wmb();
+
+ /*
+ * Re-enable interrupts. The NAP code needs to enable them
+ * anyways, do it now so we deal with the case where one already
+ * happened while soft-disabled.
+ * We shouldn't get any external interrupts, only decrementer, and the
+ * decrementer handler is safe for use on offline CPUs
+ */
+ local_irq_enable();
+
+ while (1) {
+ /* let's not take timer interrupts too often ... */
+ set_dec(0x7fffffff);
+
+ /* Enter NAP mode */
+ power4_idle();
}
- cpu_dead[cpu] = 0;
}
-#endif /* CONFIG_HOTPLUG_CPU && CONFIG_PP32 */
+#endif /* else CONFIG_PPC32 */
+#endif /* CONFIG_HOTPLUG_CPU */
/* Core99 Macs (dual G4s and G5s) */
struct smp_ops_t core99_smp_ops = {
.message_pass = smp_mpic_message_pass,
.probe = smp_core99_probe,
+ .bringup_done = smp_core99_bringup_done,
.kick_cpu = smp_core99_kick_cpu,
.setup_cpu = smp_core99_setup_cpu,
.give_timebase = smp_core99_give_timebase,
.take_timebase = smp_core99_take_timebase,
#if defined(CONFIG_HOTPLUG_CPU)
-# if defined(CONFIG_PPC32)
.cpu_disable = smp_core99_cpu_disable,
- .cpu_die = smp_core99_cpu_die,
-# endif
-# if defined(CONFIG_PPC64)
- .cpu_disable = generic_cpu_disable,
.cpu_die = generic_cpu_die,
- /* intentionally do *NOT* assign cpu_enable,
- * the generic code will use kick_cpu then! */
-# endif
#endif
};
smp_ops = &psurge_smp_ops;
}
#endif /* CONFIG_PPC32 */
+
+#ifdef CONFIG_HOTPLUG_CPU
+ ppc_md.cpu_die = pmac_cpu_die;
+#endif
}
+
pr_debug("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__,
outlet, cpu, *virq);
- result = set_irq_chip_data(*virq, pd);
+ result = irq_set_chip_data(*virq, pd);
if (result) {
pr_debug("%s:%d: set_irq_chip_data failed\n",
static int ps3_virq_destroy(unsigned int virq)
{
- const struct ps3_private *pd = get_irq_chip_data(virq);
+ const struct ps3_private *pd = irq_get_chip_data(virq);
pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
__LINE__, pd->ppe_id, pd->thread_id, virq);
- set_irq_chip_data(virq, NULL);
+ irq_set_chip_data(virq, NULL);
irq_dispose_mapping(virq);
pr_debug("%s:%d <-\n", __func__, __LINE__);
goto fail_setup;
}
- pd = get_irq_chip_data(*virq);
+ pd = irq_get_chip_data(*virq);
/* Binds outlet to cpu + virq. */
int ps3_irq_plug_destroy(unsigned int virq)
{
int result;
- const struct ps3_private *pd = get_irq_chip_data(virq);
+ const struct ps3_private *pd = irq_get_chip_data(virq);
pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
__LINE__, pd->ppe_id, pd->thread_id, virq);
static void ps3_host_unmap(struct irq_host *h, unsigned int virq)
{
- set_irq_chip_data(virq, NULL);
+ irq_set_chip_data(virq, NULL);
}
static int ps3_host_map(struct irq_host *h, unsigned int virq,
pr_debug("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq,
virq);
- set_irq_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq);
+ irq_set_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq);
return 0;
}
if (entry->irq == NO_IRQ)
continue;
- set_irq_msi(entry->irq, NULL);
+ irq_set_msi_desc(entry->irq, NULL);
irq_dispose_mapping(entry->irq);
}
}
dev_dbg(&pdev->dev, "rtas_msi: allocated virq %d\n", virq);
- set_irq_msi(virq, entry);
+ irq_set_msi_desc(virq, entry);
/* Read config space back so we can restore after reset */
read_msi_msg(virq, &msg);
const char *new_msgs, unsigned long new_len)
{
static unsigned int oops_count = 0;
+ static bool panicking = false;
size_t text_len;
+ switch (reason) {
+ case KMSG_DUMP_RESTART:
+ case KMSG_DUMP_HALT:
+ case KMSG_DUMP_POWEROFF:
+ /* These are almost always orderly shutdowns. */
+ return;
+ case KMSG_DUMP_OOPS:
+ case KMSG_DUMP_KEXEC:
+ break;
+ case KMSG_DUMP_PANIC:
+ panicking = true;
+ break;
+ case KMSG_DUMP_EMERG:
+ if (panicking)
+ /* Panic report already captured. */
+ return;
+ break;
+ default:
+ pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
+ __FUNCTION__, (int) reason);
+ return;
+ }
+
if (clobbering_unread_rtas_event())
return;
#endif
extern enum cpu_state_vals get_preferred_offline_state(int cpu);
-extern int start_secondary(void);
-extern void start_secondary_resume(void);
#endif
static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
if (cascade_irq != NO_IRQ)
printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack);
i8259_init(found, intack);
of_node_put(found);
- set_irq_chained_handler(cascade, pseries_8259_cascade);
+ irq_set_chained_handler(cascade, pseries_8259_cascade);
}
static void __init pseries_mpic_init_IRQ(void)
int qcss_tok = rtas_token("query-cpu-stopped-state");
if (qcss_tok == RTAS_UNKNOWN_SERVICE) {
- printk(KERN_INFO "Firmware doesn't support "
- "query-cpu-stopped-state\n");
+ printk_once(KERN_INFO
+ "Firmware doesn't support query-cpu-stopped-state\n");
return QCSS_HARDWARE_ERROR;
}
static void xics_unmask_irq(struct irq_data *d)
{
- unsigned int irq;
+ unsigned int hwirq;
int call_status;
int server;
pr_devel("xics: unmask virq %d\n", d->irq);
- irq = (unsigned int)irq_map[d->irq].hwirq;
- pr_devel(" -> map to hwirq 0x%x\n", irq);
- if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
+ hwirq = (unsigned int)irq_map[d->irq].hwirq;
+ pr_devel(" -> map to hwirq 0x%x\n", hwirq);
+ if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS)
return;
server = get_irq_server(d->irq, d->affinity, 0);
- call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
+ call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, server,
DEFAULT_PRIORITY);
if (call_status != 0) {
printk(KERN_ERR
"%s: ibm_set_xive irq %u server %x returned %d\n",
- __func__, irq, server, call_status);
+ __func__, hwirq, server, call_status);
return;
}
/* Now unmask the interrupt (often a no-op) */
- call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
+ call_status = rtas_call(ibm_int_on, 1, 1, NULL, hwirq);
if (call_status != 0) {
printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n",
- __func__, irq, call_status);
+ __func__, hwirq, call_status);
return;
}
}
return 0;
}
-static void xics_mask_real_irq(struct irq_data *d)
+static void xics_mask_real_irq(unsigned int hwirq)
{
int call_status;
- if (d->irq == XICS_IPI)
+ if (hwirq == XICS_IPI)
return;
- call_status = rtas_call(ibm_int_off, 1, 1, NULL, d->irq);
+ call_status = rtas_call(ibm_int_off, 1, 1, NULL, hwirq);
if (call_status != 0) {
printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n",
- __func__, d->irq, call_status);
+ __func__, hwirq, call_status);
return;
}
/* Have to set XIVE to 0xff to be able to remove a slot */
- call_status = rtas_call(ibm_set_xive, 3, 1, NULL, d->irq,
+ call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq,
default_server, 0xff);
if (call_status != 0) {
printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n",
- __func__, d->irq, call_status);
+ __func__, hwirq, call_status);
return;
}
}
static void xics_mask_irq(struct irq_data *d)
{
- unsigned int irq;
+ unsigned int hwirq;
pr_devel("xics: mask virq %d\n", d->irq);
- irq = (unsigned int)irq_map[d->irq].hwirq;
- if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
+ hwirq = (unsigned int)irq_map[d->irq].hwirq;
+ if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS)
return;
- xics_mask_real_irq(d);
+ xics_mask_real_irq(hwirq);
}
static void xics_mask_unknown_vec(unsigned int vec)
{
printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec);
- xics_mask_real_irq(irq_get_irq_data(vec));
+ xics_mask_real_irq(vec);
}
static inline unsigned int xics_xirr_vector(unsigned int xirr)
static void xics_eoi_direct(struct irq_data *d)
{
- unsigned int irq = (unsigned int)irq_map[d->irq].hwirq;
+ unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq;
iosync();
- direct_xirr_info_set((pop_cppr() << 24) | irq);
+ direct_xirr_info_set((pop_cppr() << 24) | hwirq);
}
static void xics_eoi_lpar(struct irq_data *d)
{
- unsigned int irq = (unsigned int)irq_map[d->irq].hwirq;
+ unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq;
iosync();
- lpar_xirr_info_set((pop_cppr() << 24) | irq);
+ lpar_xirr_info_set((pop_cppr() << 24) | hwirq);
}
static int
xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force)
{
- unsigned int irq;
+ unsigned int hwirq;
int status;
int xics_status[2];
int irq_server;
- irq = (unsigned int)irq_map[d->irq].hwirq;
- if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
+ hwirq = (unsigned int)irq_map[d->irq].hwirq;
+ if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS)
return -1;
- status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
+ status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq);
if (status) {
printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
- __func__, irq, status);
+ __func__, hwirq, status);
return -1;
}
}
status = rtas_call(ibm_set_xive, 3, 1, NULL,
- irq, irq_server, xics_status[1]);
+ hwirq, irq_server, xics_status[1]);
if (status) {
printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n",
- __func__, irq, status);
+ __func__, hwirq, status);
return -1;
}
/* Insert the interrupt mapping into the radix tree for fast lookup */
irq_radix_revmap_insert(xics_host, virq, hw);
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq);
return 0;
}
* IPIs are marked IRQF_DISABLED as they must run with irqs
* disabled
*/
- set_irq_handler(ipi, handle_percpu_irq);
+ irq_set_handler(ipi, handle_percpu_irq);
if (firmware_has_feature(FW_FEATURE_LPAR))
rc = request_irq(ipi, xics_ipi_action_lpar,
IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL);
void xics_migrate_irqs_away(void)
{
int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
- unsigned int irq, virq;
+ int virq;
/* If we used to be the default server, move to the new "boot_cpuid" */
if (hw_cpu == default_server)
for_each_irq(virq) {
struct irq_desc *desc;
struct irq_chip *chip;
+ unsigned int hwirq;
int xics_status[2];
int status;
unsigned long flags;
continue;
if (irq_map[virq].host != xics_host)
continue;
- irq = (unsigned int)irq_map[virq].hwirq;
+ hwirq = (unsigned int)irq_map[virq].hwirq;
/* We need to get IPIs still. */
- if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
+ if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS)
continue;
desc = irq_to_desc(virq);
if (desc == NULL || desc->action == NULL)
continue;
- chip = get_irq_desc_chip(desc);
+ chip = irq_desc_get_chip(desc);
if (chip == NULL || chip->irq_set_affinity == NULL)
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
- status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
+ status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq);
if (status) {
printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
- __func__, irq, status);
+ __func__, hwirq, status);
goto unlock;
}
{
pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw);
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq);
return 0;
}
static void cpm2_end_irq(struct irq_data *d)
{
- struct irq_desc *desc;
int bit, word;
unsigned int irq_nr = virq_to_hw(d->irq);
- desc = irq_to_desc(irq_nr);
- if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))
- && desc->action) {
-
- bit = irq_to_siubit[irq_nr];
- word = irq_to_siureg[irq_nr];
+ bit = irq_to_siubit[irq_nr];
+ word = irq_to_siureg[irq_nr];
- ppc_cached_irq_mask[word] |= 1 << bit;
- out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]);
+ ppc_cached_irq_mask[word] |= 1 << bit;
+ out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]);
- /*
- * Work around large numbers of spurious IRQs on PowerPC 82xx
- * systems.
- */
- mb();
- }
+ /*
+ * Work around large numbers of spurious IRQs on PowerPC 82xx
+ * systems.
+ */
+ mb();
}
static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
unsigned int src = virq_to_hw(d->irq);
- struct irq_desc *desc = irq_to_desc(d->irq);
unsigned int vold, vnew, edibit;
/* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or
goto err_sense;
}
- desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
- desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
- if (flow_type & IRQ_TYPE_LEVEL_LOW) {
- desc->status |= IRQ_LEVEL;
- desc->handle_irq = handle_level_irq;
- } else
- desc->handle_irq = handle_edge_irq;
+ irqd_set_trigger_type(d, flow_type);
+ if (flow_type & IRQ_TYPE_LEVEL_LOW)
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ else
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
/* internal IRQ senses are LEVEL_LOW
* EXT IRQ and Port C IRQ senses are programmable
if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0)
edibit = (31 - (CPM2_IRQ_PORTC0 - src));
else
- return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL;
+ return (flow_type & IRQ_TYPE_LEVEL_LOW) ?
+ IRQ_SET_MASK_OK_NOCOPY : -EINVAL;
vold = in_be32(&cpm2_intctl->ic_siexr);
if (vold != vnew)
out_be32(&cpm2_intctl->ic_siexr, vnew);
- return 0;
+ return IRQ_SET_MASK_OK_NOCOPY;
err_sense:
pr_err("CPM2 PIC: sense type 0x%x not supported\n", flow_type);
.irq_ack = cpm2_ack,
.irq_eoi = cpm2_end_irq,
.irq_set_type = cpm2_set_irq_type,
+ .flags = IRQCHIP_EOI_IF_HANDLED,
};
unsigned int cpm2_get_irq(void)
{
pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw);
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &cpm2_pic, handle_level_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &cpm2_pic, handle_level_irq);
return 0;
}
struct fsl_msi *msi_data = h->host_data;
struct irq_chip *chip = &fsl_msi_chip;
- irq_to_desc(virq)->status |= IRQ_TYPE_EDGE_FALLING;
+ irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
- set_irq_chip_data(virq, msi_data);
- set_irq_chip_and_handler(virq, chip, handle_edge_irq);
+ irq_set_chip_data(virq, msi_data);
+ irq_set_chip_and_handler(virq, chip, handle_edge_irq);
return 0;
}
list_for_each_entry(entry, &pdev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
- msi_data = get_irq_data(entry->irq);
- set_irq_msi(entry->irq, NULL);
+ msi_data = irq_get_handler_data(entry->irq);
+ irq_set_msi_desc(entry->irq, NULL);
msi_bitmap_free_hwirqs(&msi_data->bitmap,
virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
rc = -ENOSPC;
goto out_free;
}
- set_irq_data(virq, msi_data);
- set_irq_msi(virq, entry);
+ irq_set_handler_data(virq, msi_data);
+ irq_set_msi_desc(virq, entry);
fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
write_msi_msg(virq, &msg);
static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_data *idata = irq_desc_get_irq_data(desc);
unsigned int cascade_irq;
struct fsl_msi *msi_data;
int msir_index = -1;
u32 have_shift = 0;
struct fsl_msi_cascade_data *cascade_data;
- cascade_data = (struct fsl_msi_cascade_data *)get_irq_data(irq);
+ cascade_data = (struct fsl_msi_cascade_data *)irq_get_handler_data(irq);
msi_data = cascade_data->msi_data;
raw_spin_lock(&desc->lock);
if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
if (chip->irq_mask_ack)
- chip->irq_mask_ack(&desc->irq_data);
+ chip->irq_mask_ack(idata);
else {
- chip->irq_mask(&desc->irq_data);
- chip->irq_ack(&desc->irq_data);
+ chip->irq_mask(idata);
+ chip->irq_ack(idata);
}
}
- if (unlikely(desc->status & IRQ_INPROGRESS))
+ if (unlikely(irqd_irq_inprogress(idata)))
goto unlock;
msir_index = cascade_data->index;
if (msir_index >= NR_MSI_REG)
cascade_irq = NO_IRQ;
- desc->status |= IRQ_INPROGRESS;
+ irqd_set_chained_irq_inprogress(idata);
switch (msi_data->feature & FSL_PIC_IP_MASK) {
case FSL_PIC_IP_MPIC:
msir_value = fsl_msi_read(msi_data->msi_regs,
have_shift += intr_index + 1;
msir_value = msir_value >> (intr_index + 1);
}
- desc->status &= ~IRQ_INPROGRESS;
+ irqd_clr_chained_irq_inprogress(idata);
switch (msi_data->feature & FSL_PIC_IP_MASK) {
case FSL_PIC_IP_MPIC:
- chip->irq_eoi(&desc->irq_data);
+ chip->irq_eoi(idata);
break;
case FSL_PIC_IP_IPIC:
- if (!(desc->status & IRQ_DISABLED) && chip->irq_unmask)
- chip->irq_unmask(&desc->irq_data);
+ if (!irqd_irq_disabled(idata) && chip->irq_unmask)
+ chip->irq_unmask(idata);
break;
}
unlock:
for (i = 0; i < NR_MSI_REG; i++) {
virq = msi->msi_virqs[i];
if (virq != NO_IRQ) {
- cascade_data = get_irq_data(virq);
+ cascade_data = irq_get_handler_data(virq);
kfree(cascade_data);
irq_dispose_mapping(virq);
}
msi->msi_virqs[irq_index] = virt_msir;
cascade_data->index = offset + irq_index;
cascade_data->msi_data = msi;
- set_irq_data(virt_msir, cascade_data);
- set_irq_chained_handler(virt_msir, fsl_msi_cascade);
+ irq_set_handler_data(virt_msir, cascade_data);
+ irq_set_chained_handler(virt_msir, fsl_msi_cascade);
return 0;
}
/* We block the internal cascade */
if (hw == 2)
- irq_to_desc(virq)->status |= IRQ_NOREQUEST;
+ irq_set_status_flags(virq, IRQ_NOREQUEST);
/* We use the level handler only for now, we might want to
* be more cautious here but that works for now
*/
- irq_to_desc(virq)->status |= IRQ_LEVEL;
- set_irq_chip_and_handler(virq, &i8259_pic, handle_level_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &i8259_pic, handle_level_irq);
return 0;
}
i8259_mask_irq(irq_get_irq_data(virq));
/* remove chip and handler */
- set_irq_chip_and_handler(virq, NULL, NULL);
+ irq_set_chip_and_handler(virq, NULL, NULL);
/* Make sure it's completed */
synchronize_irq(virq);
{
struct ipic *ipic = ipic_from_irq(d->irq);
unsigned int src = ipic_irq_to_hw(d->irq);
- struct irq_desc *desc = irq_to_desc(d->irq);
unsigned int vold, vnew, edibit;
if (flow_type == IRQ_TYPE_NONE)
printk(KERN_ERR "ipic: edge sense not supported on internal "
"interrupts\n");
return -EINVAL;
+
}
- desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
- desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
+ irqd_set_trigger_type(d, flow_type);
if (flow_type & IRQ_TYPE_LEVEL_LOW) {
- desc->status |= IRQ_LEVEL;
- desc->handle_irq = handle_level_irq;
- desc->irq_data.chip = &ipic_level_irq_chip;
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ d->chip = &ipic_level_irq_chip;
} else {
- desc->handle_irq = handle_edge_irq;
- desc->irq_data.chip = &ipic_edge_irq_chip;
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+ d->chip = &ipic_edge_irq_chip;
}
/* only EXT IRQ senses are programmable on ipic
}
if (vold != vnew)
ipic_write(ipic->regs, IPIC_SECNR, vnew);
- return 0;
+ return IRQ_SET_MASK_OK_NOCOPY;
}
/* level interrupts and edge interrupts have different ack operations */
{
struct ipic *ipic = h->host_data;
- set_irq_chip_data(virq, ipic);
- set_irq_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq);
+ irq_set_chip_data(virq, ipic);
+ irq_set_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq);
/* Set default irq type */
- set_irq_type(virq, IRQ_TYPE_NONE);
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
- struct irq_desc *desc = irq_to_desc(d->irq);
-
- desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
- desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
- if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
- desc->status |= IRQ_LEVEL;
-
if (flow_type & IRQ_TYPE_EDGE_FALLING) {
irq_hw_number_t hw = (unsigned int)irq_map[d->irq].hwirq;
unsigned int siel = in_be32(&siu_reg->sc_siel);
if ((hw & 1) == 0) {
siel |= (0x80000000 >> hw);
out_be32(&siu_reg->sc_siel, siel);
- desc->handle_irq = handle_edge_irq;
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
}
}
return 0;
pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw);
/* Set default irq handle */
- set_irq_chip_and_handler(virq, &mpc8xx_pic, handle_level_irq);
+ irq_set_chip_and_handler(virq, &mpc8xx_pic, handle_level_irq);
return 0;
}
static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_desc_data(desc);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc);
struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
unsigned int mask;
if (mpc8xxx_gc->of_dev_id_data)
mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data;
- set_irq_chip_data(virq, h->host_data);
- set_irq_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
- set_irq_type(virq, IRQ_TYPE_NONE);
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
out_be32(mm_gc->regs + GPIO_IER, 0xffffffff);
out_be32(mm_gc->regs + GPIO_IMR, 0);
- set_irq_data(hwirq, mpc8xxx_gc);
- set_irq_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade);
+ irq_set_handler_data(hwirq, mpc8xxx_gc);
+ irq_set_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade);
skip_irq:
return;
}
static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
- unsigned int irqflags)
+ bool level)
{
struct mpic_irq_fixup *fixup = &mpic->fixups[source];
unsigned long flags;
if (fixup->base == NULL)
return;
- DBG("startup_ht_interrupt(0x%x, 0x%x) index: %d\n",
- source, irqflags, fixup->index);
+ DBG("startup_ht_interrupt(0x%x) index: %d\n",
+ source, fixup->index);
raw_spin_lock_irqsave(&mpic->fixup_lock, flags);
/* Enable and configure */
writeb(0x10 + 2 * fixup->index, fixup->base + 2);
tmp = readl(fixup->base + 4);
tmp &= ~(0x23U);
- if (irqflags & IRQ_LEVEL)
+ if (level)
tmp |= 0x22;
writel(tmp, fixup->base + 4);
raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags);
#endif
}
-static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source,
- unsigned int irqflags)
+static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source)
{
struct mpic_irq_fixup *fixup = &mpic->fixups[source];
unsigned long flags;
if (fixup->base == NULL)
return;
- DBG("shutdown_ht_interrupt(0x%x, 0x%x)\n", source, irqflags);
+ DBG("shutdown_ht_interrupt(0x%x)\n", source);
/* Disable */
raw_spin_lock_irqsave(&mpic->fixup_lock, flags);
if (irq < NUM_ISA_INTERRUPTS)
return NULL;
- return get_irq_chip_data(irq);
+ return irq_get_chip_data(irq);
}
/* Determine if the linux irq is an IPI */
/* Get the mpic structure from the irq number */
static inline struct mpic * mpic_from_irq(unsigned int irq)
{
- return get_irq_chip_data(irq);
+ return irq_get_chip_data(irq);
}
/* Get the mpic structure from the irq data */
mpic_unmask_irq(d);
- if (irq_to_desc(d->irq)->status & IRQ_LEVEL)
+ if (irqd_is_level_type(d))
mpic_ht_end_irq(mpic, src);
}
unsigned int src = mpic_irq_to_hw(d->irq);
mpic_unmask_irq(d);
- mpic_startup_ht_interrupt(mpic, src, irq_to_desc(d->irq)->status);
+ mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d));
return 0;
}
struct mpic *mpic = mpic_from_irq_data(d);
unsigned int src = mpic_irq_to_hw(d->irq);
- mpic_shutdown_ht_interrupt(mpic, src, irq_to_desc(d->irq)->status);
+ mpic_shutdown_ht_interrupt(mpic, src);
mpic_mask_irq(d);
}
* latched another edge interrupt coming in anyway
*/
- if (irq_to_desc(d->irq)->status & IRQ_LEVEL)
+ if (irqd_is_level_type(d))
mpic_ht_end_irq(mpic, src);
mpic_eoi(mpic);
}
{
struct mpic *mpic = mpic_from_irq_data(d);
unsigned int src = mpic_irq_to_hw(d->irq);
- struct irq_desc *desc = irq_to_desc(d->irq);
unsigned int vecpri, vold, vnew;
DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n",
if (flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_LEVEL_LOW;
- desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
- desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
- if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
- desc->status |= IRQ_LEVEL;
+ irqd_set_trigger_type(d, flow_type);
if (mpic_is_ht_interrupt(mpic, src))
vecpri = MPIC_VECPRI_POLARITY_POSITIVE |
if (vold != vnew)
mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew);
- return 0;
+ return IRQ_SET_MASK_OK_NOCOPY;;
}
void mpic_set_vector(unsigned int virq, unsigned int vector)
WARN_ON(!(mpic->flags & MPIC_PRIMARY));
DBG("mpic: mapping as IPI\n");
- set_irq_chip_data(virq, mpic);
- set_irq_chip_and_handler(virq, &mpic->hc_ipi,
+ irq_set_chip_data(virq, mpic);
+ irq_set_chip_and_handler(virq, &mpic->hc_ipi,
handle_percpu_irq);
return 0;
}
DBG("mpic: mapping to irq chip @%p\n", chip);
- set_irq_chip_data(virq, mpic);
- set_irq_chip_and_handler(virq, chip, handle_fasteoi_irq);
+ irq_set_chip_data(virq, mpic);
+ irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
/* Set default irq type */
- set_irq_type(virq, IRQ_TYPE_NONE);
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
/* If the MPIC was reset, then all vectors have already been
* initialized. Otherwise, a per source lazy initialization
if (entry->irq == NO_IRQ)
continue;
- set_irq_msi(entry->irq, NULL);
+ irq_set_msi_desc(entry->irq, NULL);
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
virq_to_hw(entry->irq), ALLOC_CHUNK);
irq_dispose_mapping(entry->irq);
*/
mpic_set_vector(virq, 0);
- set_irq_msi(virq, entry);
- set_irq_chip(virq, &mpic_pasemi_msi_chip);
- set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
+ irq_set_msi_desc(virq, entry);
+ irq_set_chip(virq, &mpic_pasemi_msi_chip);
+ irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
pr_debug("pasemi_msi: allocated virq 0x%x (hw 0x%x) " \
"addr 0x%x\n", virq, hwirq, msg.address_lo);
if (entry->irq == NO_IRQ)
continue;
- set_irq_msi(entry->irq, NULL);
+ irq_set_msi_desc(entry->irq, NULL);
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
return -ENOSPC;
}
- set_irq_msi(virq, entry);
- set_irq_chip(virq, &mpic_u3msi_chip);
- set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
+ irq_set_msi_desc(virq, entry);
+ irq_set_chip(virq, &mpic_u3msi_chip);
+ irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
pr_debug("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n",
virq, hwirq, (unsigned long)addr);
{
int level1;
- irq_to_desc(virq)->status |= IRQ_LEVEL;
+ irq_set_status_flags(virq, IRQ_LEVEL);
level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET;
BUG_ON(level1 > MV64x60_LEVEL1_GPP);
- set_irq_chip_and_handler(virq, mv64x60_chips[level1], handle_level_irq);
+ irq_set_chip_and_handler(virq, mv64x60_chips[level1],
+ handle_level_irq);
return 0;
}
static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
{
- return get_irq_chip_data(virq);
+ return irq_get_chip_data(virq);
}
static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
/* Default chip */
chip = &qe_ic->hc_irq;
- set_irq_chip_data(virq, qe_ic);
- irq_to_desc(virq)->status |= IRQ_LEVEL;
+ irq_set_chip_data(virq, qe_ic);
+ irq_set_status_flags(virq, IRQ_LEVEL);
- set_irq_chip_and_handler(virq, chip, handle_level_irq);
+ irq_set_chip_and_handler(virq, chip, handle_level_irq);
return 0;
}
qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
- set_irq_data(qe_ic->virq_low, qe_ic);
- set_irq_chained_handler(qe_ic->virq_low, low_handler);
+ irq_set_handler_data(qe_ic->virq_low, qe_ic);
+ irq_set_chained_handler(qe_ic->virq_low, low_handler);
if (qe_ic->virq_high != NO_IRQ &&
qe_ic->virq_high != qe_ic->virq_low) {
- set_irq_data(qe_ic->virq_high, qe_ic);
- set_irq_chained_handler(qe_ic->virq_high, high_handler);
+ irq_set_handler_data(qe_ic->virq_high, qe_ic);
+ irq_set_chained_handler(qe_ic->virq_high, high_handler);
}
}
DBG("%s(%d, 0x%lx)\n", __func__, virq, hw);
if ((virq >= 1) && (virq <= 4)){
irq = virq + IRQ_PCI_INTAD_BASE - 1;
- irq_to_desc(irq)->status |= IRQ_LEVEL;
- set_irq_chip(irq, &tsi108_pci_irq);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ irq_set_chip(irq, &tsi108_pci_irq);
}
return 0;
}
void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = get_pci_source();
if (cascade_irq != NO_IRQ)
static void uic_unmask_irq(struct irq_data *d)
{
- struct irq_desc *desc = irq_to_desc(d->irq);
struct uic *uic = irq_data_get_irq_chip_data(d);
unsigned int src = uic_irq_to_hw(d->irq);
unsigned long flags;
sr = 1 << (31-src);
spin_lock_irqsave(&uic->lock, flags);
/* ack level-triggered interrupts here */
- if (desc->status & IRQ_LEVEL)
+ if (irqd_is_level_type(d))
mtdcr(uic->dcrbase + UIC_SR, sr);
er = mfdcr(uic->dcrbase + UIC_ER);
er |= sr;
static void uic_mask_ack_irq(struct irq_data *d)
{
- struct irq_desc *desc = irq_to_desc(d->irq);
struct uic *uic = irq_data_get_irq_chip_data(d);
unsigned int src = uic_irq_to_hw(d->irq);
unsigned long flags;
* level interrupts are ack'ed after the actual
* isr call in the uic_unmask_irq()
*/
- if (!(desc->status & IRQ_LEVEL))
+ if (!irqd_is_level_type(d))
mtdcr(uic->dcrbase + UIC_SR, sr);
spin_unlock_irqrestore(&uic->lock, flags);
}
{
struct uic *uic = irq_data_get_irq_chip_data(d);
unsigned int src = uic_irq_to_hw(d->irq);
- struct irq_desc *desc = irq_to_desc(d->irq);
unsigned long flags;
int trigger, polarity;
u32 tr, pr, mask;
mtdcr(uic->dcrbase + UIC_PR, pr);
mtdcr(uic->dcrbase + UIC_TR, tr);
- desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
- desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
- if (!trigger)
- desc->status |= IRQ_LEVEL;
-
spin_unlock_irqrestore(&uic->lock, flags);
return 0;
{
struct uic *uic = h->host_data;
- set_irq_chip_data(virq, uic);
+ irq_set_chip_data(virq, uic);
/* Despite the name, handle_level_irq() works for both level
* and edge irqs on UIC. FIXME: check this is correct */
- set_irq_chip_and_handler(virq, &uic_irq_chip, handle_level_irq);
+ irq_set_chip_and_handler(virq, &uic_irq_chip, handle_level_irq);
/* Set default irq type */
- set_irq_type(virq, IRQ_TYPE_NONE);
+ irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
- struct uic *uic = get_irq_data(virq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_data *idata = irq_desc_get_irq_data(desc);
+ struct uic *uic = irq_get_handler_data(virq);
u32 msr;
int src;
int subvirq;
raw_spin_lock(&desc->lock);
- if (desc->status & IRQ_LEVEL)
- chip->irq_mask(&desc->irq_data);
+ if (irqd_is_level_type(idata))
+ chip->irq_mask(idata);
else
- chip->irq_mask_ack(&desc->irq_data);
+ chip->irq_mask_ack(idata);
raw_spin_unlock(&desc->lock);
msr = mfdcr(uic->dcrbase + UIC_MSR);
uic_irq_ret:
raw_spin_lock(&desc->lock);
- if (desc->status & IRQ_LEVEL)
- chip->irq_ack(&desc->irq_data);
- if (!(desc->status & IRQ_DISABLED) && chip->irq_unmask)
- chip->irq_unmask(&desc->irq_data);
+ if (irqd_is_level_type(idata))
+ chip->irq_ack(idata);
+ if (!irqd_irq_disabled(idata) && chip->irq_unmask)
+ chip->irq_unmask(idata);
raw_spin_unlock(&desc->lock);
}
cascade_virq = irq_of_parse_and_map(np, 0);
- set_irq_data(cascade_virq, uic);
- set_irq_chained_handler(cascade_virq, uic_irq_cascade);
+ irq_set_handler_data(cascade_virq, uic);
+ irq_set_chained_handler(cascade_virq, uic_irq_cascade);
/* FIXME: setup critical cascade?? */
}
static int xilinx_intc_set_type(struct irq_data *d, unsigned int flow_type)
{
- struct irq_desc *desc = irq_to_desc(d->irq);
-
- desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
- desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
- if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
- desc->status |= IRQ_LEVEL;
return 0;
}
static int xilinx_intc_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t irq)
{
- set_irq_chip_data(virq, h->host_data);
+ irq_set_chip_data(virq, h->host_data);
if (xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_HIGH ||
xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_LOW) {
- set_irq_chip_and_handler(virq, &xilinx_intc_level_irqchip,
- handle_level_irq);
+ irq_set_chip_and_handler(virq, &xilinx_intc_level_irqchip,
+ handle_level_irq);
} else {
- set_irq_chip_and_handler(virq, &xilinx_intc_edge_irqchip,
- handle_edge_irq);
+ irq_set_chip_and_handler(virq, &xilinx_intc_edge_irqchip,
+ handle_edge_irq);
}
return 0;
}
*/
static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc)
{
- struct irq_chip *chip = get_irq_desc_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
if (cascade_irq)
}
i8259_init(cascade_node, 0);
- set_irq_chained_handler(cascade_irq, xilinx_i8259_cascade);
+ irq_set_chained_handler(cascade_irq, xilinx_i8259_cascade);
/* Program irq 7 (usb/audio), 14/15 (ide) to level sensitive */
/* This looks like a dirty hack to me --gcl */
config SCORE
def_bool y
select HAVE_GENERIC_HARDIRQS
- select GENERIC_HARDIRQS_NO_DEPRECATED
select GENERIC_IRQ_SHOW
choice
select HAVE_SPARSE_IRQ
select RTC_LIB
select GENERIC_ATOMIC64
- select GENERIC_HARDIRQS_NO_DEPRECATED
select GENERIC_IRQ_SHOW
help
The SuperH is a RISC processor targeted for use in embedded systems
{
plat_irq_setup_pins(IRQ_MODE_IRQ); /* install handlers for IRQ0-5 */
- set_irq_type(32, IRQ_TYPE_LEVEL_LOW); /* IRQ0 CAN1 */
- set_irq_type(33, IRQ_TYPE_LEVEL_LOW); /* IRQ1 CAN2 */
- set_irq_type(34, IRQ_TYPE_LEVEL_LOW); /* IRQ2 CAN3 */
- set_irq_type(35, IRQ_TYPE_LEVEL_LOW); /* IRQ3 SMSC9115 */
- set_irq_type(36, IRQ_TYPE_EDGE_RISING); /* IRQ4 touchscreen */
- set_irq_type(37, IRQ_TYPE_EDGE_FALLING); /* IRQ5 touchscreen */
+ irq_set_irq_type(32, IRQ_TYPE_LEVEL_LOW); /* IRQ0 CAN1 */
+ irq_set_irq_type(33, IRQ_TYPE_LEVEL_LOW); /* IRQ1 CAN2 */
+ irq_set_irq_type(34, IRQ_TYPE_LEVEL_LOW); /* IRQ2 CAN3 */
+ irq_set_irq_type(35, IRQ_TYPE_LEVEL_LOW); /* IRQ3 SMSC9115 */
+ irq_set_irq_type(36, IRQ_TYPE_EDGE_RISING); /* IRQ4 touchscreen */
+ irq_set_irq_type(37, IRQ_TYPE_EDGE_FALLING); /* IRQ5 touchscreen */
intc_set_priority(32, 13); /* IRQ0 CAN1 */
intc_set_priority(33, 13); /* IRQ0 CAN2 */
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/sh_flctl.h>
#include <linux/delay.h>
[0] = {
.name = "SDHI0",
.start = 0x04ce0000,
- .end = 0x04ce01ff,
+ .end = 0x04ce00ff,
.flags = IORESOURCE_MEM,
},
[1] = {
[0] = {
.name = "SDHI1",
.start = 0x04cf0000,
- .end = 0x04cf01ff,
+ .end = 0x04cf00ff,
.flags = IORESOURCE_MEM,
},
[1] = {
}
for (i = 0; i < NR_EXT_IRQS; i++) {
- set_irq_chip_and_handler(START_EXT_IRQS + i, &cayman_irq_type,
- handle_level_irq);
+ irq_set_chip_and_handler(START_EXT_IRQS + i,
+ &cayman_irq_type, handle_level_irq);
}
/* Setup the SMSC interrupt */
return;
}
- set_irq_chip_and_handler(i, &systemasic_int,
- handle_level_irq);
+ irq_set_chip_and_handler(i, &systemasic_int, handle_level_irq);
}
}
#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
-#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sh_mmcif.h>
+#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mtd/physmap.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
.irq = IRQ0,
};
-#ifdef CONFIG_MFD_SH_MOBILE_SDHI
+#if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE)
/* SDHI0 */
static void sdhi0_set_pwr(struct platform_device *pdev, int state)
{
[0] = {
.name = "SDHI0",
.start = 0x04ce0000,
- .end = 0x04ce01ff,
+ .end = 0x04ce00ff,
.flags = IORESOURCE_MEM,
},
[1] = {
[0] = {
.name = "SDHI1",
.start = 0x04cf0000,
- .end = 0x04cf01ff,
+ .end = 0x04cf00ff,
.flags = IORESOURCE_MEM,
},
[1] = {
&ceu0_device,
&ceu1_device,
&keysc_device,
-#ifdef CONFIG_MFD_SH_MOBILE_SDHI
+#if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE)
&sdhi0_device,
#if !defined(CONFIG_MMC_SH_MMCIF)
&sdhi1_device,
/* enable TouchScreen */
i2c_register_board_info(0, &ts_i2c_clients, 1);
- set_irq_type(IRQ0, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(IRQ0, IRQ_TYPE_LEVEL_LOW);
}
/* enable CEU0 */
gpio_direction_input(GPIO_PTR5);
gpio_direction_input(GPIO_PTR6);
-#ifdef CONFIG_MFD_SH_MOBILE_SDHI
+#if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE)
/* enable SDHI0 on CN11 (needs DS2.4 set to ON) */
gpio_request(GPIO_FN_SDHI0CD, NULL);
gpio_request(GPIO_FN_SDHI0WP, NULL);
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
-#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mfd/tmio.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/onenand.h>
[0] = {
.name = "SDHI0",
.start = 0x04ce0000,
- .end = 0x04ce01ff,
+ .end = 0x04ce00ff,
.flags = IORESOURCE_MEM,
},
[1] = {
static void __init make_microdev_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- set_irq_chip_and_handler(irq, µdev_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(irq, µdev_irq_type, handle_level_irq);
disable_microdev_irq(irq_get_irq_data(irq));
}
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/input/sh_keysc.h>
-#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/nand.h>
#include <linux/i2c.h>
[0] = {
.name = "SDHI",
.start = 0x04ce0000,
- .end = 0x04ce01ff,
+ .end = 0x04ce00ff,
.flags = IORESOURCE_MEM,
},
[1] = {
{
unsigned short sts0,sts1;
unsigned int irq = data->irq;
- struct irq_desc *desc = irq_to_desc(irq);
- if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ if (!irqd_irq_disabled(data) && !irqd_irq_inprogress(data))
enable_se7206_irq(data);
/* FPGA isr clear */
sts0 = __raw_readw(INTSTS0);
static void make_se7206_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- set_irq_chip_and_handler_name(irq, &se7206_irq_chip,
+ irq_set_chip_and_handler_name(irq, &se7206_irq_chip,
handle_level_irq, "level");
disable_se7206_irq(irq_get_irq_data(irq));
}
return;
se7343_fpga_irq[i] = irq;
- set_irq_chip_and_handler_name(se7343_fpga_irq[i],
+ irq_set_chip_and_handler_name(se7343_fpga_irq[i],
&se7343_irq_chip,
- handle_level_irq, "level");
+ handle_level_irq,
+ "level");
- set_irq_chip_data(se7343_fpga_irq[i], (void *)i);
+ irq_set_chip_data(se7343_fpga_irq[i], (void *)i);
}
- set_irq_chained_handler(IRQ0_IRQ, se7343_irq_demux);
- set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);
- set_irq_chained_handler(IRQ1_IRQ, se7343_irq_demux);
- set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW);
- set_irq_chained_handler(IRQ4_IRQ, se7343_irq_demux);
- set_irq_type(IRQ4_IRQ, IRQ_TYPE_LEVEL_LOW);
- set_irq_chained_handler(IRQ5_IRQ, se7343_irq_demux);
- set_irq_type(IRQ5_IRQ, IRQ_TYPE_LEVEL_LOW);
+ irq_set_chained_handler(IRQ0_IRQ, se7343_irq_demux);
+ irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);
+ irq_set_chained_handler(IRQ1_IRQ, se7343_irq_demux);
+ irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW);
+ irq_set_chained_handler(IRQ4_IRQ, se7343_irq_demux);
+ irq_set_irq_type(IRQ4_IRQ, IRQ_TYPE_LEVEL_LOW);
+ irq_set_chained_handler(IRQ5_IRQ, se7343_irq_demux);
+ irq_set_irq_type(IRQ5_IRQ, IRQ_TYPE_LEVEL_LOW);
}
return;
se7722_fpga_irq[i] = irq;
- set_irq_chip_and_handler_name(se7722_fpga_irq[i],
+ irq_set_chip_and_handler_name(se7722_fpga_irq[i],
&se7722_irq_chip,
- handle_level_irq, "level");
+ handle_level_irq,
+ "level");
- set_irq_chip_data(se7722_fpga_irq[i], (void *)i);
+ irq_set_chip_data(se7722_fpga_irq[i], (void *)i);
}
- set_irq_chained_handler(IRQ0_IRQ, se7722_irq_demux);
- set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);
+ irq_set_chained_handler(IRQ0_IRQ, se7722_irq_demux);
+ irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);
- set_irq_chained_handler(IRQ1_IRQ, se7722_irq_demux);
- set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW);
+ irq_set_chained_handler(IRQ1_IRQ, se7722_irq_demux);
+ irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW);
}
return;
}
- set_irq_chip_and_handler_name(irq,
- &se7724_irq_chip,
+ irq_set_chip_and_handler_name(irq, &se7724_irq_chip,
handle_level_irq, "level");
}
- set_irq_chained_handler(IRQ0_IRQ, se7724_irq_demux);
- set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);
+ irq_set_chained_handler(IRQ0_IRQ, se7724_irq_demux);
+ irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);
- set_irq_chained_handler(IRQ1_IRQ, se7724_irq_demux);
- set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW);
+ irq_set_chained_handler(IRQ1_IRQ, se7724_irq_demux);
+ irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW);
- set_irq_chained_handler(IRQ2_IRQ, se7724_irq_demux);
- set_irq_type(IRQ2_IRQ, IRQ_TYPE_LEVEL_LOW);
+ irq_set_chained_handler(IRQ2_IRQ, se7724_irq_demux);
+ irq_set_irq_type(IRQ2_IRQ, IRQ_TYPE_LEVEL_LOW);
}
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mtd/physmap.h>
#include <linux/delay.h>
#include <linux/smc91x.h>
[0] = {
.name = "SDHI0",
.start = 0x04ce0000,
- .end = 0x04ce01ff,
+ .end = 0x04ce00ff,
.flags = IORESOURCE_MEM,
},
[1] = {
[0] = {
.name = "SDHI1",
.start = 0x04cf0000,
- .end = 0x04cf01ff,
+ .end = 0x04cf00ff,
.flags = IORESOURCE_MEM,
},
[1] = {
spin_lock_irqsave(&x3proto_gpio_lock, flags);
x3proto_gpio_irq_map[i] = irq;
- set_irq_chip_and_handler_name(irq, &dummy_irq_chip,
- handle_simple_irq, "gpio");
+ irq_set_chip_and_handler_name(irq, &dummy_irq_chip,
+ handle_simple_irq, "gpio");
spin_unlock_irqrestore(&x3proto_gpio_lock, flags);
}
x3proto_gpio_chip.base + x3proto_gpio_chip.ngpio,
ilsel);
- set_irq_chained_handler(ilsel, x3proto_gpio_irq_handler);
- set_irq_wake(ilsel, 1);
+ irq_set_chained_handler(ilsel, x3proto_gpio_irq_handler);
+ irq_set_irq_wake(ilsel, 1);
return 0;
return -EINVAL;
}
- set_irq_chip_and_handler(i, &hd64461_irq_chip,
+ irq_set_chip_and_handler(i, &hd64461_irq_chip,
handle_level_irq);
}
- set_irq_chained_handler(CONFIG_HD64461_IRQ, hd64461_irq_demux);
- set_irq_type(CONFIG_HD64461_IRQ, IRQ_TYPE_LEVEL_LOW);
+ irq_set_chained_handler(CONFIG_HD64461_IRQ, hd64461_irq_demux);
+ irq_set_irq_type(CONFIG_HD64461_IRQ, IRQ_TYPE_LEVEL_LOW);
#ifdef CONFIG_HD64461_ENABLER
printk(KERN_INFO "HD64461: enabling PCMCIA devices\n");
void make_imask_irq(unsigned int irq)
{
- set_irq_chip_and_handler_name(irq, &imask_irq_chip,
- handle_level_irq, "level");
+ irq_set_chip_and_handler_name(irq, &imask_irq_chip, handle_level_irq,
+ "level");
}
/* Set default: per-line enable/disable, priority driven ack/eoi */
for (i = 0; i < NR_INTC_IRQS; i++)
- set_irq_chip_and_handler(i, &intc_irq_type, handle_level_irq);
+ irq_set_chip_and_handler(i, &intc_irq_type, handle_level_irq);
/* Disable all interrupts and set all priorities to 0 to avoid trouble */
}
disable_irq_nosync(p->irq);
- set_irq_chip_and_handler_name(p->irq, &desc->chip,
- handle_level_irq, "level");
- set_irq_chip_data(p->irq, p);
+ irq_set_chip_and_handler_name(p->irq, &desc->chip,
+ handle_level_irq, "level");
+ irq_set_chip_data(p->irq, p);
disable_ipr_irq(irq_get_irq_data(p->irq));
}
}
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
select HAVE_GENERIC_HARDIRQS
- select GENERIC_HARDIRQS_NO_DEPRECATED
+ select GENERIC_IRQ_SHOW
+ select IRQ_PREFLOW_FASTEOI
config ARCH_DEFCONFIG
string
#define __NR_name_to_handle_at 332
#define __NR_open_by_handle_at 333
#define __NR_clock_adjtime 334
+#define __NR_syncfs 335
-#define NR_syscalls 335
+#define NR_syscalls 336
#ifdef __32bit_syscall_numbers__
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
}
EXPORT_SYMBOL(auxio_set_lte);
-static struct of_device_id __initdata auxio_match[] = {
+static const struct of_device_id auxio_match[] = {
{
.name = "auxio",
},
goto out;
}
-static struct of_device_id __initdata clock_board_match[] = {
+static const struct of_device_id clock_board_match[] = {
{
.name = "clock-board",
},
goto out;
}
-static struct of_device_id __initdata fhc_match[] = {
+static const struct of_device_id fhc_match[] = {
{
.name = "fhc",
},
return 0;
}
-static struct vio_device_id __initdata ds_match[] = {
+static const struct vio_device_id ds_match[] = {
{
.type = "domain-services-port",
},
.globl ret_from_fork
ret_from_fork:
call schedule_tail
- mov %g3, %o0
+ ld [%g3 + TI_TASK], %o0
b ret_sys_call
ld [%sp + STACKFRAME_SZ + PT_I0], %o0
/*
* /proc/interrupts printing:
*/
-
-int show_interrupts(struct seq_file *p, void *v)
+int arch_show_interrupts(struct seq_file *p, int prec)
{
- int i = *(loff_t *) v, j;
- struct irqaction * action;
- unsigned long flags;
+ int j;
- if (i == 0) {
- seq_printf(p, " ");
- for_each_online_cpu(j)
- seq_printf(p, "CPU%d ",j);
- seq_putc(p, '\n');
- }
-
- if (i < NR_IRQS) {
- raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
- action = irq_desc[i].action;
- if (!action)
- goto skip;
- seq_printf(p, "%3d: ",i);
-#ifndef CONFIG_SMP
- seq_printf(p, "%10u ", kstat_irqs(i));
-#else
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
-#endif
- seq_printf(p, " %9s", irq_desc[i].irq_data.chip->name);
- seq_printf(p, " %s", action->name);
-
- for (action=action->next; action; action = action->next)
- seq_printf(p, ", %s", action->name);
-
- seq_putc(p, '\n');
-skip:
- raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
- } else if (i == NR_IRQS) {
- seq_printf(p, "NMI: ");
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
- seq_printf(p, " Non-maskable interrupts\n");
- }
+ seq_printf(p, "NMI: ");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
+ seq_printf(p, " Non-maskable interrupts\n");
return 0;
}
static void sun4u_irq_eoi(struct irq_data *data)
{
struct irq_handler_data *handler_data = data->handler_data;
- struct irq_desc *desc = irq_desc + data->irq;
-
- if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- return;
if (likely(handler_data))
upa_writeq(ICLR_IDLE, handler_data->iclr);
static void sun4v_irq_eoi(struct irq_data *data)
{
unsigned int ino = irq_table[data->irq].dev_ino;
- struct irq_desc *desc = irq_desc + data->irq;
int err;
- if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- return;
-
err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
if (err != HV_EOK)
printk(KERN_ERR "sun4v_intr_setstate(%x): "
static void sun4v_virq_eoi(struct irq_data *data)
{
- struct irq_desc *desc = irq_desc + data->irq;
unsigned long dev_handle, dev_ino;
int err;
- if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- return;
-
dev_handle = irq_table[data->irq].dev_handle;
dev_ino = irq_table[data->irq].dev_ino;
.irq_disable = sun4u_irq_disable,
.irq_eoi = sun4u_irq_eoi,
.irq_set_affinity = sun4u_set_affinity,
+ .flags = IRQCHIP_EOI_IF_HANDLED,
};
static struct irq_chip sun4v_irq = {
.irq_disable = sun4v_irq_disable,
.irq_eoi = sun4v_irq_eoi,
.irq_set_affinity = sun4v_set_affinity,
+ .flags = IRQCHIP_EOI_IF_HANDLED,
};
static struct irq_chip sun4v_virq = {
.irq_disable = sun4v_virq_disable,
.irq_eoi = sun4v_virq_eoi,
.irq_set_affinity = sun4v_virt_set_affinity,
+ .flags = IRQCHIP_EOI_IF_HANDLED,
};
-static void pre_flow_handler(unsigned int irq, struct irq_desc *desc)
+static void pre_flow_handler(struct irq_data *d)
{
- struct irq_handler_data *handler_data = get_irq_data(irq);
- unsigned int ino = irq_table[irq].dev_ino;
+ struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d);
+ unsigned int ino = irq_table[d->irq].dev_ino;
handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
-
- handle_fasteoi_irq(irq, desc);
}
void irq_install_pre_handler(int irq,
void (*func)(unsigned int, void *, void *),
void *arg1, void *arg2)
{
- struct irq_handler_data *handler_data = get_irq_data(irq);
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_handler_data *handler_data = irq_get_handler_data(irq);
handler_data->pre_handler = func;
handler_data->arg1 = arg1;
handler_data->arg2 = arg2;
- desc->handle_irq = pre_flow_handler;
+ __irq_set_preflow_handler(irq, pre_flow_handler);
}
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
if (!irq) {
irq = irq_alloc(0, ino);
bucket_set_irq(__pa(bucket), irq);
- set_irq_chip_and_handler_name(irq,
- &sun4u_irq,
- handle_fasteoi_irq,
- "IVEC");
+ irq_set_chip_and_handler_name(irq, &sun4u_irq,
+ handle_fasteoi_irq, "IVEC");
}
- handler_data = get_irq_data(irq);
+ handler_data = irq_get_handler_data(irq);
if (unlikely(handler_data))
goto out;
prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
prom_halt();
}
- set_irq_data(irq, handler_data);
+ irq_set_handler_data(irq, handler_data);
handler_data->imap = imap;
handler_data->iclr = iclr;
if (!irq) {
irq = irq_alloc(0, sysino);
bucket_set_irq(__pa(bucket), irq);
- set_irq_chip_and_handler_name(irq, chip,
- handle_fasteoi_irq,
+ irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq,
"IVEC");
}
- handler_data = get_irq_data(irq);
+ handler_data = irq_get_handler_data(irq);
if (unlikely(handler_data))
goto out;
prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
prom_halt();
}
- set_irq_data(irq, handler_data);
+ irq_set_handler_data(irq, handler_data);
/* Catch accidental accesses to these things. IMAP/ICLR handling
* is done by hypervisor calls on sun4v platforms, not by direct
struct irq_handler_data *handler_data;
unsigned long hv_err, cookie;
struct ino_bucket *bucket;
- struct irq_desc *desc;
unsigned int irq;
bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
irq = irq_alloc(devhandle, devino);
bucket_set_irq(__pa(bucket), irq);
- set_irq_chip_and_handler_name(irq, &sun4v_virq,
- handle_fasteoi_irq,
+ irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq,
"IVEC");
handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
* especially wrt. locking, we do not let request_irq() enable
* the interrupt.
*/
- desc = irq_desc + irq;
- desc->status |= IRQ_NOAUTOEN;
-
- set_irq_data(irq, handler_data);
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ irq_set_handler_data(irq, handler_data);
/* Catch accidental accesses to these things. IMAP/ICLR handling
* is done by hypervisor calls on sun4v platforms, not by direct
orig_sp = set_hardirq_stack();
while (bucket_pa) {
- struct irq_desc *desc;
unsigned long next_pa;
unsigned int irq;
irq = bucket_get_irq(bucket_pa);
bucket_clear_chain_pa(bucket_pa);
- desc = irq_desc + irq;
-
- if (!(desc->status & IRQ_DISABLED))
- desc->handle_irq(irq, desc);
+ generic_handle_irq(irq);
bucket_pa = next_pa;
}
unsigned int irq;
for (irq = 0; irq < NR_IRQS; irq++) {
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_data *data = irq_desc_get_irq_data(desc);
unsigned long flags;
- raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
- if (irq_desc[irq].action &&
- !(irq_desc[irq].status & IRQ_PER_CPU)) {
- struct irq_data *data = irq_get_irq_data(irq);
-
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ if (desc->action && !irqd_is_per_cpu(data)) {
if (data->chip->irq_set_affinity)
data->chip->irq_set_affinity(data,
- data->affinity,
- false);
+ data->affinity,
+ false);
}
- raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
tick_ops->disable_irq();
: "i" (PSTATE_IE)
: "g1");
- irq_desc[0].action = &timer_irq_action;
+ irq_to_desc(0)->action = &timer_irq_action;
}
return hp;
}
-static void mdesc_memblock_free(struct mdesc_handle *hp)
+static void __init mdesc_memblock_free(struct mdesc_handle *hp)
{
unsigned int alloc_size;
unsigned long start;
void arch_teardown_msi_irq(unsigned int irq)
{
- struct msi_desc *entry = get_irq_msi(irq);
+ struct msi_desc *entry = irq_get_msi_desc(irq);
struct pci_dev *pdev = entry->dev;
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
return err;
}
-static struct of_device_id __initdata fire_match[] = {
+static const struct of_device_id fire_match[] = {
{
.name = "pci",
.compatible = "pciex108e,80f0",
err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
if (likely(err > 0)) {
- struct irq_desc *desc;
unsigned int irq;
irq = pbm->msi_irq_table[msi - pbm->msi_first];
- desc = irq_desc + irq;
-
- desc->handle_irq(irq, desc);
+ generic_handle_irq(irq);
}
if (unlikely(err < 0))
if (!*irq_p)
goto out_err;
- set_irq_chip_and_handler_name(*irq_p, &msi_irq,
- handle_simple_irq, "MSI");
+ irq_set_chip_and_handler_name(*irq_p, &msi_irq, handle_simple_irq,
+ "MSI");
err = alloc_msi(pbm);
if (unlikely(err < 0))
}
msg.data = msi;
- set_irq_msi(*irq_p, entry);
+ irq_set_msi_desc(*irq_p, entry);
write_msi_msg(*irq_p, &msg);
return 0;
free_msi(pbm, msi);
out_irq_free:
- set_irq_chip(*irq_p, NULL);
+ irq_set_chip(*irq_p, NULL);
irq_free(*irq_p);
*irq_p = 0;
free_msi(pbm, msi_num);
- set_irq_chip(irq, NULL);
+ irq_set_chip(irq, NULL);
irq_free(irq);
}
return err;
}
-static struct of_device_id __initdata psycho_match[] = {
+static const struct of_device_id psycho_match[] = {
{
.name = "pci",
.compatible = "pci108e,8000",
return err;
}
-static struct of_device_id __initdata sabre_match[] = {
+static const struct of_device_id sabre_match[] = {
{
.name = "pci",
.compatible = "pci108e,a001",
* and pci108e,8001. So list the chips in reverse chronological
* order.
*/
-static struct of_device_id __initdata schizo_match[] = {
+static const struct of_device_id schizo_match[] = {
{
.name = "pci",
.compatible = "pci108e,a801",
return err;
}
-static struct of_device_id __initdata pci_sun4v_match[] = {
+static const struct of_device_id pci_sun4v_match[] = {
{
.name = "pci",
.compatible = "SUNW,sun4v-pci",
return 0;
}
-static struct of_device_id __initdata power_match[] = {
+static const struct of_device_id power_match[] = {
{
.name = "power",
},
/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
-
+/*335*/ .long sys_syncfs
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
.word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
+ .word sys_syncfs
#endif /* CONFIG_COMPAT */
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
.word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
+ .word sys_syncfs
return platform_device_register(&rtc_cmos_device);
}
-static struct of_device_id __initdata rtc_match[] = {
+static const struct of_device_id rtc_match[] = {
{
.name = "rtc",
.compatible = "m5819",
return platform_device_register(&rtc_bq4802_device);
}
-static struct of_device_id __initdata bq4802_match[] = {
+static const struct of_device_id bq4802_match[] = {
{
.name = "rtc",
.compatible = "bq4802",
return platform_device_register(&m48t59_rtc);
}
-static struct of_device_id __initdata mostek_match[] = {
+static const struct of_device_id mostek_match[] = {
{
.name = "eeprom",
},
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
- select GENERIC_HARDIRQS_NO_DEPRECATED
select GENERIC_IRQ_SHOW
# FIXME: investigate whether we need/want these options.
bool
default y
select HAVE_GENERIC_HARDIRQS
- select GENERIC_HARDIRQS_NO_DEPRECATED
select GENERIC_IRQ_SHOW
config MMU
select HAVE_KERNEL_LZMA
select GENERIC_FIND_FIRST_BIT
select GENERIC_IRQ_PROBE
- select GENERIC_HARDIRQS_NO_DEPRECATED
+ select GENERIC_IRQ_SHOW
select ARCH_WANT_FRAME_POINTERS
help
UniCore-32 is 32-bit Instruction Set Architecture,
writel(1, INTC_ICCR);
for (irq = 0; irq < IRQ_GPIOHIGH; irq++) {
- set_irq_chip(irq, &puv3_low_gpio_chip);
- set_irq_handler(irq, handle_edge_irq);
+ irq_set_chip(irq, &puv3_low_gpio_chip);
+ irq_set_handler(irq, handle_edge_irq);
irq_modify_status(irq,
IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN,
0);
}
for (irq = IRQ_GPIOHIGH + 1; irq < IRQ_GPIO0; irq++) {
- set_irq_chip(irq, &puv3_normal_chip);
- set_irq_handler(irq, handle_level_irq);
+ irq_set_chip(irq, &puv3_normal_chip);
+ irq_set_handler(irq, handle_level_irq);
irq_modify_status(irq,
IRQ_NOREQUEST | IRQ_NOAUTOEN,
IRQ_NOPROBE);
}
for (irq = IRQ_GPIO0; irq <= IRQ_GPIO27; irq++) {
- set_irq_chip(irq, &puv3_high_gpio_chip);
- set_irq_handler(irq, handle_edge_irq);
+ irq_set_chip(irq, &puv3_high_gpio_chip);
+ irq_set_handler(irq, handle_edge_irq);
irq_modify_status(irq,
IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN,
0);
/*
* Install handler for GPIO 0-27 edge detect interrupts
*/
- set_irq_chip(IRQ_GPIOHIGH, &puv3_normal_chip);
- set_irq_chained_handler(IRQ_GPIOHIGH, puv3_gpio_handler);
+ irq_set_chip(IRQ_GPIOHIGH, &puv3_normal_chip);
+ irq_set_chained_handler(IRQ_GPIOHIGH, puv3_gpio_handler);
#ifdef CONFIG_PUV3_GPIO
puv3_init_gpio();
#endif
}
-int show_interrupts(struct seq_file *p, void *v)
-{
- int i = *(loff_t *) v, cpu;
- struct irq_desc *desc;
- struct irqaction *action;
- unsigned long flags;
-
- if (i == 0) {
- char cpuname[12];
-
- seq_printf(p, " ");
- for_each_present_cpu(cpu) {
- sprintf(cpuname, "CPU%d", cpu);
- seq_printf(p, " %10s", cpuname);
- }
- seq_putc(p, '\n');
- }
-
- if (i < nr_irqs) {
- desc = irq_to_desc(i);
- raw_spin_lock_irqsave(&desc->lock, flags);
- action = desc->action;
- if (!action)
- goto unlock;
-
- seq_printf(p, "%3d: ", i);
- for_each_present_cpu(cpu)
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
- seq_printf(p, " %10s", desc->irq_data.chip->name ? : "-");
- seq_printf(p, " %s", action->name);
- for (action = action->next; action; action = action->next)
- seq_printf(p, ", %s", action->name);
-
- seq_putc(p, '\n');
-unlock:
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- } else if (i == nr_irqs) {
- seq_printf(p, "Error in interrupt!\n");
- }
- return 0;
-}
-
/*
* do_IRQ handles all hardware IRQ's. Decoded IRQs should not
* come via this function. Instead, they should provide their
irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
/* APB timer irqs are set up as mp_irqs, timer is edge type */
- __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge");
+ __irq_set_handler(adev->irq, handle_edge_irq, 0, "edge");
if (system_state == SYSTEM_BOOTING) {
if (request_irq(adev->irq, apbt_interrupt_handler,
return true;
}
-bool __early_alloc_p2m(unsigned long pfn)
+static bool __init __early_alloc_p2m(unsigned long pfn)
{
unsigned topidx, mididx, idx;
}
return idx != 0;
}
-unsigned long set_phys_range_identity(unsigned long pfn_s,
+unsigned long __init set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e)
{
unsigned long pfn;
page->private = mfn;
page->index = pfn_to_mfn(pfn);
- __set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
+ if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
+ return -ENOMEM;
+
if (!PageHighMem(page))
/* Just zap old mapping for now */
pte_clear(&init_mm, address, ptep);
spin_lock_irqsave(&m2p_override_lock, flags);
list_del(&page->lru);
spin_unlock_irqrestore(&m2p_override_lock, flags);
- __set_phys_to_machine(pfn, page->index);
+ set_phys_to_machine(pfn, page->index);
if (!PageHighMem(page))
set_pte_at(&init_mm, address, ptep,
select HAVE_IDE
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
- select GENERIC_HARDIRQS_NO_DEPRECATED
help
Xtensa processors are 32-bit RISC machines designed by Tensilica
primarily for embedded systems. These processors are both
irq = platform_get_irq(pdev, 0);
if (irq)
- set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
/* Setup expansion bus chip selects */
*data->cs0_cfg = data->cs0_bits;
#define DRV_NAME "pata_palmld"
+static struct gpio palmld_hdd_gpios[] = {
+ { GPIO_NR_PALMLD_IDE_PWEN, GPIOF_INIT_HIGH, "HDD Power" },
+ { GPIO_NR_PALMLD_IDE_RESET, GPIOF_INIT_LOW, "HDD Reset" },
+};
+
static struct scsi_host_template palmld_sht = {
ATA_PIO_SHT(DRV_NAME),
};
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
- if (!host)
- return -ENOMEM;
+ if (!host) {
+ ret = -ENOMEM;
+ goto err1;
+ }
/* remap drive's physical memory address */
mem = devm_ioremap(&pdev->dev, PALMLD_IDE_PHYS, 0x1000);
- if (!mem)
- return -ENOMEM;
+ if (!mem) {
+ ret = -ENOMEM;
+ goto err1;
+ }
/* request and activate power GPIO, IRQ GPIO */
- ret = gpio_request(GPIO_NR_PALMLD_IDE_PWEN, "HDD PWR");
+ ret = gpio_request_array(palmld_hdd_gpios,
+ ARRAY_SIZE(palmld_hdd_gpios));
if (ret)
goto err1;
- ret = gpio_direction_output(GPIO_NR_PALMLD_IDE_PWEN, 1);
- if (ret)
- goto err2;
-
- ret = gpio_request(GPIO_NR_PALMLD_IDE_RESET, "HDD RST");
- if (ret)
- goto err2;
- ret = gpio_direction_output(GPIO_NR_PALMLD_IDE_RESET, 0);
- if (ret)
- goto err3;
/* reset the drive */
gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 0);
ata_sff_std_ports(&ap->ioaddr);
/* activate host */
- return ata_host_activate(host, 0, NULL, IRQF_TRIGGER_RISING,
+ ret = ata_host_activate(host, 0, NULL, IRQF_TRIGGER_RISING,
&palmld_sht);
+ if (ret)
+ goto err2;
+
+ return ret;
-err3:
- gpio_free(GPIO_NR_PALMLD_IDE_RESET);
err2:
- gpio_free(GPIO_NR_PALMLD_IDE_PWEN);
+ gpio_free_array(palmld_hdd_gpios, ARRAY_SIZE(palmld_hdd_gpios));
err1:
return ret;
}
/* power down the HDD */
gpio_set_value(GPIO_NR_PALMLD_IDE_PWEN, 0);
- gpio_free(GPIO_NR_PALMLD_IDE_RESET);
- gpio_free(GPIO_NR_PALMLD_IDE_PWEN);
+ gpio_free_array(palmld_hdd_gpios, ARRAY_SIZE(palmld_hdd_gpios));
return 0;
}
struct rb532_cf_info *info = ah->private_data;
if (gpio_get_value(info->gpio_line)) {
- set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW);
ata_sff_interrupt(info->irq, dev_instance);
} else {
- set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
+ irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
}
return IRQ_HANDLED;
static irqreturn_t solos_irq(int irq, void *dev_id);
static struct atm_vcc* find_vcc(struct atm_dev *dev, short vpi, int vci);
static int list_vccs(int vci);
-static void release_vccs(struct atm_dev *dev);
static int atm_init(struct solos_card *, struct device *);
static void atm_remove(struct solos_card *);
static int send_command(struct solos_card *card, int dev, const char *buf, size_t size);
/* Anything but 'Showtime' is down */
if (strcmp(state_str, "Showtime")) {
atm_dev_signal_change(card->atmdev[port], ATM_PHY_SIG_LOST);
- release_vccs(card->atmdev[port]);
dev_info(&card->dev->dev, "Port %d: %s\n", port, state_str);
return 0;
}
size);
}
if (atmdebug) {
- dev_info(&card->dev->dev, "Received: device %d\n", port);
+ dev_info(&card->dev->dev, "Received: port %d\n", port);
dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n",
size, le16_to_cpu(header->vpi),
le16_to_cpu(header->vci));
le16_to_cpu(header->vci));
if (!vcc) {
if (net_ratelimit())
- dev_warn(&card->dev->dev, "Received packet for unknown VCI.VPI %d.%d on port %d\n",
- le16_to_cpu(header->vci), le16_to_cpu(header->vpi),
+ dev_warn(&card->dev->dev, "Received packet for unknown VPI.VCI %d.%d on port %d\n",
+ le16_to_cpu(header->vpi), le16_to_cpu(header->vci),
port);
continue;
}
return num_found;
}
-static void release_vccs(struct atm_dev *dev)
-{
- int i;
-
- write_lock_irq(&vcc_sklist_lock);
- for (i = 0; i < VCC_HTABLE_SIZE; i++) {
- struct hlist_head *head = &vcc_hash[i];
- struct hlist_node *node, *tmp;
- struct sock *s;
- struct atm_vcc *vcc;
-
- sk_for_each_safe(s, node, tmp, head) {
- vcc = atm_sk(s);
- if (vcc->dev == dev) {
- vcc_release_async(vcc, -EPIPE);
- sk_del_node_init(s);
- }
- }
- }
- write_unlock_irq(&vcc_sklist_lock);
-}
-
static int popen(struct atm_vcc *vcc)
{
/* Clean up and free oldskb now it's gone */
if (atmdebug) {
+ struct pkt_hdr *header = (void *)oldskb->data;
+ int size = le16_to_cpu(header->size);
+
+ skb_pull(oldskb, sizeof(*header));
dev_info(&card->dev->dev, "Transmitted: port %d\n",
port);
+ dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n",
+ size, le16_to_cpu(header->vpi),
+ le16_to_cpu(header->vci));
print_buffer(oldskb);
}
card->atmdev[i]->ci_range.vci_bits = 16;
card->atmdev[i]->dev_data = card;
card->atmdev[i]->phy_data = (void *)(unsigned long)i;
- atm_dev_signal_change(card->atmdev[i], ATM_PHY_SIG_UNKNOWN);
+ atm_dev_signal_change(card->atmdev[i], ATM_PHY_SIG_FOUND);
skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
if (!skb) {
return -EBUSY;
}
- chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL);
+ chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL);
if (chip->data_buffer == NULL) {
clear_bit(0, &chip->is_open);
put_device(chip->dev);
#include <linux/connector.h>
#include <linux/delay.h>
-void cn_queue_wrapper(struct work_struct *work)
-{
- struct cn_callback_entry *cbq =
- container_of(work, struct cn_callback_entry, work);
- struct cn_callback_data *d = &cbq->data;
- struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(d->skb));
- struct netlink_skb_parms *nsp = &NETLINK_CB(d->skb);
-
- d->callback(msg, nsp);
-
- kfree_skb(d->skb);
- d->skb = NULL;
-
- kfree(d->free);
-}
-
static struct cn_callback_entry *
-cn_queue_alloc_callback_entry(const char *name, struct cb_id *id,
+cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
+ struct cb_id *id,
void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
{
struct cn_callback_entry *cbq;
return NULL;
}
+ atomic_set(&cbq->refcnt, 1);
+
+ atomic_inc(&dev->refcnt);
+ cbq->pdev = dev;
+
snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
memcpy(&cbq->id.id, id, sizeof(struct cb_id));
- cbq->data.callback = callback;
-
- INIT_WORK(&cbq->work, &cn_queue_wrapper);
+ cbq->callback = callback;
return cbq;
}
-static void cn_queue_free_callback(struct cn_callback_entry *cbq)
+void cn_queue_release_callback(struct cn_callback_entry *cbq)
{
- flush_workqueue(cbq->pdev->cn_queue);
+ if (!atomic_dec_and_test(&cbq->refcnt))
+ return;
+
+ atomic_dec(&cbq->pdev->refcnt);
kfree(cbq);
}
struct cn_callback_entry *cbq, *__cbq;
int found = 0;
- cbq = cn_queue_alloc_callback_entry(name, id, callback);
+ cbq = cn_queue_alloc_callback_entry(dev, name, id, callback);
if (!cbq)
return -ENOMEM;
- atomic_inc(&dev->refcnt);
- cbq->pdev = dev;
-
spin_lock_bh(&dev->queue_lock);
list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
if (cn_cb_equal(&__cbq->id.id, id)) {
spin_unlock_bh(&dev->queue_lock);
if (found) {
- cn_queue_free_callback(cbq);
- atomic_dec(&dev->refcnt);
+ cn_queue_release_callback(cbq);
return -EINVAL;
}
}
spin_unlock_bh(&dev->queue_lock);
- if (found) {
- cn_queue_free_callback(cbq);
- atomic_dec(&dev->refcnt);
- }
+ if (found)
+ cn_queue_release_callback(cbq);
}
struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls)
dev->nls = nls;
- dev->cn_queue = alloc_ordered_workqueue(dev->name, 0);
- if (!dev->cn_queue) {
- kfree(dev);
- return NULL;
- }
-
return dev;
}
{
struct cn_callback_entry *cbq, *n;
- flush_workqueue(dev->cn_queue);
- destroy_workqueue(dev->cn_queue);
-
spin_lock_bh(&dev->queue_lock);
list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
list_del(&cbq->callback_entry);
*/
static int cn_call_callback(struct sk_buff *skb)
{
- struct cn_callback_entry *__cbq, *__new_cbq;
+ struct cn_callback_entry *i, *cbq = NULL;
struct cn_dev *dev = &cdev;
struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb));
+ struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
int err = -ENODEV;
spin_lock_bh(&dev->cbdev->queue_lock);
- list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
- if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
- if (likely(!work_pending(&__cbq->work) &&
- __cbq->data.skb == NULL)) {
- __cbq->data.skb = skb;
-
- if (queue_work(dev->cbdev->cn_queue,
- &__cbq->work))
- err = 0;
- else
- err = -EINVAL;
- } else {
- struct cn_callback_data *d;
-
- err = -ENOMEM;
- __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC);
- if (__new_cbq) {
- d = &__new_cbq->data;
- d->skb = skb;
- d->callback = __cbq->data.callback;
- d->free = __new_cbq;
-
- INIT_WORK(&__new_cbq->work,
- &cn_queue_wrapper);
-
- if (queue_work(dev->cbdev->cn_queue,
- &__new_cbq->work))
- err = 0;
- else {
- kfree(__new_cbq);
- err = -EINVAL;
- }
- }
- }
+ list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
+ if (cn_cb_equal(&i->id.id, &msg->id)) {
+ atomic_inc(&i->refcnt);
+ cbq = i;
break;
}
}
spin_unlock_bh(&dev->cbdev->queue_lock);
+ if (cbq != NULL) {
+ cbq->callback(msg, nsp);
+ kfree_skb(skb);
+ cn_queue_release_callback(cbq);
+ }
+
return err;
}
mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
if (!(mcis && ecc_stngs))
- goto err_ret;
+ goto err_free;
msrs = msrs_alloc();
if (!msrs)
config AB8500_GPIO
bool "ST-Ericsson AB8500 Mixed Signal Circuit gpio functions"
- depends on AB8500_CORE
+ depends on AB8500_CORE && BROKEN
help
Select this to enable the AB8500 IC GPIO driver
endif
return 0;
INIT_WORK(&fan_data->alarm_work, fan_alarm_notify);
- set_irq_type(alarm_irq, IRQ_TYPE_EDGE_BOTH);
+ irq_set_irq_type(alarm_irq, IRQ_TYPE_EDGE_BOTH);
err = request_irq(alarm_irq, fan_alarm_irq_handler, IRQF_SHARED,
"GPIO fan alarm", fan_data);
if (err)
struct lm8323_chip *lm = i2c_get_clientdata(client);
int i;
- set_irq_wake(client->irq, 0);
+ irq_set_irq_wake(client->irq, 0);
disable_irq(client->irq);
mutex_lock(&lm->lock);
led_classdev_resume(&lm->pwm[i].cdev);
enable_irq(client->irq);
- set_irq_wake(client->irq, 1);
+ irq_set_irq_wake(client->irq, 1);
return 0;
}
* at FIQ level, switch back from edge to simple interrupt handler
* to avoid bad interaction.
*/
- set_irq_handler(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK),
+ irq_set_handler(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK),
handle_simple_irq);
serio_register_port(ams_delta_serio);
}
wm->pen_irq = gpio_to_irq(irq);
- set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH);
+ irq_set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH);
} else /* pen irq not supported */
pen_int = 0;
gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO26);
wm->pen_irq = IRQ_GPIO(gpio_touch_irq);
- set_irq_type(IRQ_GPIO(gpio_touch_irq), IRQ_TYPE_EDGE_BOTH);
+ irq_set_irq_type(IRQ_GPIO(gpio_touch_irq), IRQ_TYPE_EDGE_BOTH);
wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN,
WM97XX_GPIO_POL_HIGH,
static struct i2c_adapter * u3_1;
static struct i2c_adapter * k2;
static struct i2c_client * fcu;
-static struct cpu_pid_state cpu_state[2];
+static struct cpu_pid_state processor_state[2];
static struct basckside_pid_params backside_params;
static struct backside_pid_state backside_state;
static struct drives_pid_state drives_state;
static void fetch_cpu_pumps_minmax(void)
{
- struct cpu_pid_state *state0 = &cpu_state[0];
- struct cpu_pid_state *state1 = &cpu_state[1];
+ struct cpu_pid_state *state0 = &processor_state[0];
+ struct cpu_pid_state *state1 = &processor_state[1];
u16 pump_min = 0, pump_max = 0xffff;
u16 tmp[4];
return sprintf(buf, "%d", data); \
}
-BUILD_SHOW_FUNC_FIX(cpu0_temperature, cpu_state[0].last_temp)
-BUILD_SHOW_FUNC_FIX(cpu0_voltage, cpu_state[0].voltage)
-BUILD_SHOW_FUNC_FIX(cpu0_current, cpu_state[0].current_a)
-BUILD_SHOW_FUNC_INT(cpu0_exhaust_fan_rpm, cpu_state[0].rpm)
-BUILD_SHOW_FUNC_INT(cpu0_intake_fan_rpm, cpu_state[0].intake_rpm)
+BUILD_SHOW_FUNC_FIX(cpu0_temperature, processor_state[0].last_temp)
+BUILD_SHOW_FUNC_FIX(cpu0_voltage, processor_state[0].voltage)
+BUILD_SHOW_FUNC_FIX(cpu0_current, processor_state[0].current_a)
+BUILD_SHOW_FUNC_INT(cpu0_exhaust_fan_rpm, processor_state[0].rpm)
+BUILD_SHOW_FUNC_INT(cpu0_intake_fan_rpm, processor_state[0].intake_rpm)
-BUILD_SHOW_FUNC_FIX(cpu1_temperature, cpu_state[1].last_temp)
-BUILD_SHOW_FUNC_FIX(cpu1_voltage, cpu_state[1].voltage)
-BUILD_SHOW_FUNC_FIX(cpu1_current, cpu_state[1].current_a)
-BUILD_SHOW_FUNC_INT(cpu1_exhaust_fan_rpm, cpu_state[1].rpm)
-BUILD_SHOW_FUNC_INT(cpu1_intake_fan_rpm, cpu_state[1].intake_rpm)
+BUILD_SHOW_FUNC_FIX(cpu1_temperature, processor_state[1].last_temp)
+BUILD_SHOW_FUNC_FIX(cpu1_voltage, processor_state[1].voltage)
+BUILD_SHOW_FUNC_FIX(cpu1_current, processor_state[1].current_a)
+BUILD_SHOW_FUNC_INT(cpu1_exhaust_fan_rpm, processor_state[1].rpm)
+BUILD_SHOW_FUNC_INT(cpu1_intake_fan_rpm, processor_state[1].intake_rpm)
BUILD_SHOW_FUNC_FIX(backside_temperature, backside_state.last_temp)
BUILD_SHOW_FUNC_INT(backside_fan_pwm, backside_state.pwm)
static void do_monitor_cpu_combined(void)
{
- struct cpu_pid_state *state0 = &cpu_state[0];
- struct cpu_pid_state *state1 = &cpu_state[1];
+ struct cpu_pid_state *state0 = &processor_state[0];
+ struct cpu_pid_state *state1 = &processor_state[1];
s32 temp0, power0, temp1, power1;
s32 temp_combi, power_combi;
int rc, intake, pump;
/*
* Initialize the state structure for one CPU control loop
*/
-static int init_cpu_state(struct cpu_pid_state *state, int index)
+static int init_processor_state(struct cpu_pid_state *state, int index)
{
int err;
/*
* Dispose of the state data for one CPU control loop
*/
-static void dispose_cpu_state(struct cpu_pid_state *state)
+static void dispose_processor_state(struct cpu_pid_state *state)
{
if (state->monitor == NULL)
return;
set_pwm_fan(SLOTS_FAN_PWM_INDEX, SLOTS_FAN_DEFAULT_PWM);
/* Initialize ADCs */
- initialize_adc(&cpu_state[0]);
- if (cpu_state[1].monitor != NULL)
- initialize_adc(&cpu_state[1]);
+ initialize_adc(&processor_state[0]);
+ if (processor_state[1].monitor != NULL)
+ initialize_adc(&processor_state[1]);
fcu_tickle_ticks = FCU_TICKLE_TICKS;
if (cpu_pid_type == CPU_PID_TYPE_COMBINED)
do_monitor_cpu_combined();
else if (cpu_pid_type == CPU_PID_TYPE_RACKMAC) {
- do_monitor_cpu_rack(&cpu_state[0]);
- if (cpu_state[1].monitor != NULL)
- do_monitor_cpu_rack(&cpu_state[1]);
+ do_monitor_cpu_rack(&processor_state[0]);
+ if (processor_state[1].monitor != NULL)
+ do_monitor_cpu_rack(&processor_state[1]);
// better deal with UP
} else {
- do_monitor_cpu_split(&cpu_state[0]);
- if (cpu_state[1].monitor != NULL)
- do_monitor_cpu_split(&cpu_state[1]);
+ do_monitor_cpu_split(&processor_state[0]);
+ if (processor_state[1].monitor != NULL)
+ do_monitor_cpu_split(&processor_state[1]);
// better deal with UP
}
/* Then, the rest */
*/
static void dispose_control_loops(void)
{
- dispose_cpu_state(&cpu_state[0]);
- dispose_cpu_state(&cpu_state[1]);
+ dispose_processor_state(&processor_state[0]);
+ dispose_processor_state(&processor_state[1]);
dispose_backside_state(&backside_state);
dispose_drives_state(&drives_state);
dispose_slots_state(&slots_state);
/* Create control loops for everything. If any fail, everything
* fails
*/
- if (init_cpu_state(&cpu_state[0], 0))
+ if (init_processor_state(&processor_state[0], 0))
goto fail;
if (cpu_pid_type == CPU_PID_TYPE_COMBINED)
fetch_cpu_pumps_minmax();
- if (cpu_count > 1 && init_cpu_state(&cpu_state[1], 1))
+ if (cpu_count > 1 && init_processor_state(&processor_state[1], 1))
goto fail;
if (init_backside_state(&backside_state))
goto fail;
This driver supports the ASIC3 multifunction chip found on many
PDAs (mainly iPAQ and HTC based ones)
-config MFD_SH_MOBILE_SDHI
- bool "Support for SuperH Mobile SDHI"
- depends on SUPERH || ARCH_SHMOBILE
- select MFD_CORE
- select TMIO_MMC_DMA
- ---help---
- This driver supports the SDHI hardware block found in many
- SuperH Mobile SoCs.
-
config MFD_DAVINCI_VOICECODEC
tristate
select MFD_CORE
bool
default n
-config TMIO_MMC_DMA
- bool
- select DMA_ENGINE
- select DMADEVICES
-
config MFD_T7L66XB
bool "Support Toshiba T7L66XB"
depends on ARM && HAVE_CLK
obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o
obj-$(CONFIG_MFD_SM501) += sm501.o
obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o
-obj-$(CONFIG_MFD_SH_MOBILE_SDHI) += sh_mobile_sdhi.o
obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
+++ /dev/null
-/*
- * SuperH Mobile SDHI
- *
- * Copyright (C) 2009 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Based on "Compaq ASIC3 support":
- *
- * Copyright 2001 Compaq Computer Corporation.
- * Copyright 2004-2005 Phil Blundell
- * Copyright 2007-2008 OpenedHand Ltd.
- *
- * Authors: Phil Blundell <pb@handhelds.org>,
- * Samuel Ortiz <sameo@openedhand.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/mmc/host.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/tmio.h>
-#include <linux/mfd/sh_mobile_sdhi.h>
-#include <linux/sh_dma.h>
-
-struct sh_mobile_sdhi {
- struct clk *clk;
- struct tmio_mmc_data mmc_data;
- struct mfd_cell cell_mmc;
- struct sh_dmae_slave param_tx;
- struct sh_dmae_slave param_rx;
- struct tmio_mmc_dma dma_priv;
-};
-
-static struct resource sh_mobile_sdhi_resources[] = {
- {
- .start = 0x000,
- .end = 0x1ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct mfd_cell sh_mobile_sdhi_cell = {
- .name = "tmio-mmc",
- .num_resources = ARRAY_SIZE(sh_mobile_sdhi_resources),
- .resources = sh_mobile_sdhi_resources,
-};
-
-static void sh_mobile_sdhi_set_pwr(struct platform_device *tmio, int state)
-{
- struct platform_device *pdev = to_platform_device(tmio->dev.parent);
- struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
-
- if (p && p->set_pwr)
- p->set_pwr(pdev, state);
-}
-
-static int sh_mobile_sdhi_get_cd(struct platform_device *tmio)
-{
- struct platform_device *pdev = to_platform_device(tmio->dev.parent);
- struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
-
- if (p && p->get_cd)
- return p->get_cd(pdev);
- else
- return -ENOSYS;
-}
-
-static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
-{
- struct sh_mobile_sdhi *priv;
- struct tmio_mmc_data *mmc_data;
- struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
- struct resource *mem;
- char clk_name[8];
- int ret, irq;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- dev_err(&pdev->dev, "missing MEM resource\n");
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- dev_err(&pdev->dev, "missing IRQ resource\n");
-
- if (!mem || (irq < 0))
- return -EINVAL;
-
- priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
- if (priv == NULL) {
- dev_err(&pdev->dev, "kzalloc failed\n");
- return -ENOMEM;
- }
-
- mmc_data = &priv->mmc_data;
-
- snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id);
- priv->clk = clk_get(&pdev->dev, clk_name);
- if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
- ret = PTR_ERR(priv->clk);
- kfree(priv);
- return ret;
- }
-
- clk_enable(priv->clk);
-
- mmc_data->hclk = clk_get_rate(priv->clk);
- mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
- mmc_data->get_cd = sh_mobile_sdhi_get_cd;
- mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
- if (p) {
- mmc_data->flags = p->tmio_flags;
- mmc_data->ocr_mask = p->tmio_ocr_mask;
- mmc_data->capabilities |= p->tmio_caps;
- }
-
- /*
- * All SDHI blocks support 2-byte and larger block sizes in 4-bit
- * bus width mode.
- */
- mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES;
-
- /*
- * All SDHI blocks support SDIO IRQ signalling.
- */
- mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
-
- if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
- priv->param_tx.slave_id = p->dma_slave_tx;
- priv->param_rx.slave_id = p->dma_slave_rx;
- priv->dma_priv.chan_priv_tx = &priv->param_tx;
- priv->dma_priv.chan_priv_rx = &priv->param_rx;
- priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */
- mmc_data->dma = &priv->dma_priv;
- }
-
- memcpy(&priv->cell_mmc, &sh_mobile_sdhi_cell, sizeof(priv->cell_mmc));
- priv->cell_mmc.mfd_data = mmc_data;
-
- platform_set_drvdata(pdev, priv);
-
- ret = mfd_add_devices(&pdev->dev, pdev->id,
- &priv->cell_mmc, 1, mem, irq);
- if (ret) {
- clk_disable(priv->clk);
- clk_put(priv->clk);
- kfree(priv);
- }
-
- return ret;
-}
-
-static int sh_mobile_sdhi_remove(struct platform_device *pdev)
-{
- struct sh_mobile_sdhi *priv = platform_get_drvdata(pdev);
-
- mfd_remove_devices(&pdev->dev);
- clk_disable(priv->clk);
- clk_put(priv->clk);
- kfree(priv);
-
- return 0;
-}
-
-static struct platform_driver sh_mobile_sdhi_driver = {
- .driver = {
- .name = "sh_mobile_sdhi",
- .owner = THIS_MODULE,
- },
- .probe = sh_mobile_sdhi_probe,
- .remove = __devexit_p(sh_mobile_sdhi_remove),
-};
-
-static int __init sh_mobile_sdhi_init(void)
-{
- return platform_driver_register(&sh_mobile_sdhi_driver);
-}
-
-static void __exit sh_mobile_sdhi_exit(void)
-{
- platform_driver_unregister(&sh_mobile_sdhi_driver);
-}
-
-module_init(sh_mobile_sdhi_init);
-module_exit(sh_mobile_sdhi_exit);
-
-MODULE_DESCRIPTION("SuperH Mobile SDHI driver");
-MODULE_AUTHOR("Magnus Damm");
-MODULE_LICENSE("GPL v2");
if (gru_irq_count[chiplet] == 0) {
gru_chip[chiplet].name = irq_name;
- ret = set_irq_chip(irq, &gru_chip[chiplet]);
+ ret = irq_set_chip(irq, &gru_chip[chiplet]);
if (ret) {
printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n",
GRU_DRIVER_ID_STR, -ret);
unsigned int tot_sz, int max_scatter)
{
unsigned int dev_addr, i, cnt, sz, ssz;
- struct timespec ts1, ts2, ts;
+ struct timespec ts1, ts2;
int ret;
sz = test->area.max_tfr;
}
getnstimeofday(&ts2);
- ts = timespec_sub(ts2, ts1);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
* your option) any later version.
*/
+#include <linux/slab.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
struct mmc_command cmd;
struct mmc_data data;
struct scatterlist sg;
+ void *data_buf;
BUG_ON(!card);
BUG_ON(!card->host);
if (err)
return err;
+ /* dma onto stack is unsafe/nonportable, but callers to this
+ * routine normally provide temporary on-stack buffers ...
+ */
+ data_buf = kmalloc(sizeof(card->raw_scr), GFP_KERNEL);
+ if (data_buf == NULL)
+ return -ENOMEM;
+
memset(&mrq, 0, sizeof(struct mmc_request));
memset(&cmd, 0, sizeof(struct mmc_command));
memset(&data, 0, sizeof(struct mmc_data));
data.sg = &sg;
data.sg_len = 1;
- sg_init_one(&sg, scr, 8);
+ sg_init_one(&sg, data_buf, 8);
mmc_set_data_timeout(&data, card);
mmc_wait_for_req(card->host, &mrq);
+ memcpy(scr, data_buf, sizeof(card->raw_scr));
+ kfree(data_buf);
+
if (cmd.error)
return cmd.error;
if (data.error)
To compile this driver as a module, choose M here: the
module will be called sdricoh_cs.
+config MMC_TMIO_CORE
+ tristate
+
config MMC_TMIO
tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support"
- depends on MFD_TMIO || MFD_ASIC3 || MFD_SH_MOBILE_SDHI
+ depends on MFD_TMIO || MFD_ASIC3
+ select MMC_TMIO_CORE
help
This provides support for the SD/MMC cell found in TC6393XB,
T7L66XB and also HTC ASIC3
+config MMC_SDHI
+ tristate "SH-Mobile SDHI SD/SDIO controller support"
+ depends on SUPERH || ARCH_SHMOBILE
+ select MMC_TMIO_CORE
+ help
+ This provides support for the SDHI SD/SDIO controller found in
+ SuperH and ARM SH-Mobile SoCs
+
config MMC_CB710
tristate "ENE CB710 MMC/SD Interface support"
depends on PCI
obj-$(CONFIG_MMC_S3C) += s3cmci.o
obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
-obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
+obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o
+tmio_mmc_core-y := tmio_mmc_pio.o
+ifneq ($(CONFIG_MMC_SDHI),n)
+tmio_mmc_core-y += tmio_mmc_dma.o
+endif
+obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o
+obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
obj-$(CONFIG_MMC_DW) += dw_mmc.o
/* Stop the IDMAC running */
temp = mci_readl(host, BMOD);
- temp &= ~SDMMC_IDMAC_ENABLE;
+ temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
mci_writel(host, BMOD, temp);
}
/* Enable the IDMAC */
temp = mci_readl(host, BMOD);
- temp |= SDMMC_IDMAC_ENABLE;
+ temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
mci_writel(host, BMOD, temp);
/* Start it running */
.datalength_bits = 16,
};
+static struct variant_data variant_arm_extended_fifo = {
+ .fifosize = 128 * 4,
+ .fifohalfsize = 64 * 4,
+ .datalength_bits = 16,
+};
+
static struct variant_data variant_u300 = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
static struct amba_id mmci_ids[] = {
{
.id = 0x00041180,
- .mask = 0x000fffff,
+ .mask = 0xff0fffff,
.data = &variant_arm,
},
+ {
+ .id = 0x01041180,
+ .mask = 0xff0fffff,
+ .data = &variant_arm_extended_fifo,
+ },
{
.id = 0x00041181,
.mask = 0x000fffff,
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
+#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
#include <linux/spi/spi.h>
#include <linux/spi/mmc_spi.h>
#include <linux/mmc/core.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/gpio.h>
+#include <linux/slab.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdhci-pltfm.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
#include <mach/hardware.h>
#include <mach/esdhc.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
+/* VENDOR SPEC register */
+#define SDHCI_VENDOR_SPEC 0xC0
+#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002
+
+#define ESDHC_FLAG_GPIO_FOR_CD_WP (1 << 0)
+/*
+ * The CMDTYPE of the CMD register (offset 0xE) should be set to
+ * "11" when the STOP CMD12 is issued on imx53 to abort one
+ * open ended multi-blk IO. Otherwise the TC INT wouldn't
+ * be generated.
+ * In exact block transfer, the controller doesn't complete the
+ * operations automatically as required at the end of the
+ * transfer and remains on hold if the abort command is not sent.
+ * As a result, the TC flag is not asserted and SW received timeout
+ * exeception. Bit1 of Vendor Spec registor is used to fix it.
+ */
+#define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1)
+
+struct pltfm_imx_data {
+ int flags;
+ u32 scratchpad;
+};
+
static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg)
{
void __iomem *base = host->ioaddr + (reg & ~0x3);
static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+
/* fake CARD_PRESENT flag on mx25/35 */
u32 val = readl(host->ioaddr + reg);
- if (unlikely(reg == SDHCI_PRESENT_STATE)) {
+ if (unlikely((reg == SDHCI_PRESENT_STATE)
+ && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) {
struct esdhc_platform_data *boarddata =
host->mmc->parent->platform_data;
static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
{
- if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE))
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+
+ if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)
+ && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP)))
/*
* these interrupts won't work with a custom card_detect gpio
* (only applied to mx25/35)
*/
val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
+ if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
+ && (reg == SDHCI_INT_STATUS)
+ && (val & SDHCI_INT_DATA_END))) {
+ u32 v;
+ v = readl(host->ioaddr + SDHCI_VENDOR_SPEC);
+ v &= ~SDHCI_VENDOR_SPEC_SDIO_QUIRK;
+ writel(v, host->ioaddr + SDHCI_VENDOR_SPEC);
+ }
+
writel(val, host->ioaddr + reg);
}
static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
switch (reg) {
case SDHCI_TRANSFER_MODE:
* Postpone this write, we must do it together with a
* command write that is down below.
*/
- pltfm_host->scratchpad = val;
+ if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
+ && (host->cmd->opcode == SD_IO_RW_EXTENDED)
+ && (host->cmd->data->blocks > 1)
+ && (host->cmd->data->flags & MMC_DATA_READ)) {
+ u32 v;
+ v = readl(host->ioaddr + SDHCI_VENDOR_SPEC);
+ v |= SDHCI_VENDOR_SPEC_SDIO_QUIRK;
+ writel(v, host->ioaddr + SDHCI_VENDOR_SPEC);
+ }
+ imx_data->scratchpad = val;
return;
case SDHCI_COMMAND:
- writel(val << 16 | pltfm_host->scratchpad,
+ if ((host->cmd->opcode == MMC_STOP_TRANSMISSION)
+ && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
+ val |= SDHCI_CMD_ABORTCMD;
+ writel(val << 16 | imx_data->scratchpad,
host->ioaddr + SDHCI_TRANSFER_MODE);
return;
case SDHCI_BLOCK_SIZE:
}
static struct sdhci_ops sdhci_esdhc_ops = {
+ .read_l = esdhc_readl_le,
.read_w = esdhc_readw_le,
+ .write_l = esdhc_writel_le,
.write_w = esdhc_writew_le,
.write_b = esdhc_writeb_le,
.set_clock = esdhc_set_clock,
struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
struct clk *clk;
int err;
+ struct pltfm_imx_data *imx_data;
clk = clk_get(mmc_dev(host->mmc), NULL);
if (IS_ERR(clk)) {
clk_enable(clk);
pltfm_host->clk = clk;
- if (cpu_is_mx35() || cpu_is_mx51())
+ imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL);
+ if (!imx_data) {
+ clk_disable(pltfm_host->clk);
+ clk_put(pltfm_host->clk);
+ return -ENOMEM;
+ }
+ pltfm_host->priv = imx_data;
+
+ if (!cpu_is_mx25())
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
if (cpu_is_mx25() || cpu_is_mx35()) {
sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro;
}
+ if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51()))
+ imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT;
+
if (boarddata) {
err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP");
if (err) {
goto no_card_detect_irq;
}
- sdhci_esdhc_ops.write_l = esdhc_writel_le;
- sdhci_esdhc_ops.read_l = esdhc_readl_le;
+ imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD_WP;
/* Now we have a working card_detect again */
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
}
no_card_detect_pin:
boarddata->cd_gpio = err;
not_supported:
+ kfree(imx_data);
return 0;
}
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
if (boarddata && gpio_is_valid(boarddata->wp_gpio))
gpio_free(boarddata->wp_gpio);
clk_disable(pltfm_host->clk);
clk_put(pltfm_host->clk);
+ kfree(imx_data);
}
struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
SDHCI_QUIRK_NONSTANDARD_CLOCK | \
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
SDHCI_QUIRK_PIO_NEEDS_DELAY | \
- SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | \
- SDHCI_QUIRK_NO_CARD_NO_RESET)
+ SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
#define ESDHC_SYSTEM_CONTROL 0x2c
#define ESDHC_CLOCK_MASK 0x0000fff0
struct sdhci_of_data sdhci_esdhc = {
/* card detection could be handled via GPIO */
- .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION,
+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
+ | SDHCI_QUIRK_NO_CARD_NO_RESET,
.ops = {
.read_l = sdhci_be32bs_readl,
.read_w = esdhc_readw,
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
- u8 slots, rev, first_bar;
+ u8 slots, first_bar;
int ret, i;
BUG_ON(pdev == NULL);
BUG_ON(ent == NULL);
- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev);
-
dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n",
- (int)pdev->vendor, (int)pdev->device, (int)rev);
+ (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
if (ret)
struct sdhci_pltfm_host {
struct clk *clk;
- u32 scratchpad; /* to handle quirks across io-accessor calls */
+ void *priv; /* to handle quirks across io-accessor calls */
};
extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
/* val == 1 -> card removed, val == 0 -> card inserted */
/* if card removed - set irq for low level, else vice versa */
gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH;
- set_irq_type(irq, gpio_irq_type);
+ irq_set_irq_type(irq, gpio_irq_type);
if (sdhci->data->card_power_gpio >= 0) {
if (!sdhci->data->power_always_enb) {
#define SDHCI_CMD_CRC 0x08
#define SDHCI_CMD_INDEX 0x10
#define SDHCI_CMD_DATA 0x20
+#define SDHCI_CMD_ABORTCMD 0xC0
#define SDHCI_CMD_RESP_NONE 0x00
#define SDHCI_CMD_RESP_LONG 0x01
--- /dev/null
+/*
+ * SuperH Mobile SDHI
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on "Compaq ASIC3 support":
+ *
+ * Copyright 2001 Compaq Computer Corporation.
+ * Copyright 2004-2005 Phil Blundell
+ * Copyright 2007-2008 OpenedHand Ltd.
+ *
+ * Authors: Phil Blundell <pb@handhelds.org>,
+ * Samuel Ortiz <sameo@openedhand.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sh_mobile_sdhi.h>
+#include <linux/mfd/tmio.h>
+#include <linux/sh_dma.h>
+
+#include "tmio_mmc.h"
+
+struct sh_mobile_sdhi {
+ struct clk *clk;
+ struct tmio_mmc_data mmc_data;
+ struct sh_dmae_slave param_tx;
+ struct sh_dmae_slave param_rx;
+ struct tmio_mmc_dma dma_priv;
+};
+
+static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state)
+{
+ struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
+
+ if (p && p->set_pwr)
+ p->set_pwr(pdev, state);
+}
+
+static int sh_mobile_sdhi_get_cd(struct platform_device *pdev)
+{
+ struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
+
+ if (p && p->get_cd)
+ return p->get_cd(pdev);
+ else
+ return -ENOSYS;
+}
+
+static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
+{
+ struct sh_mobile_sdhi *priv;
+ struct tmio_mmc_data *mmc_data;
+ struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
+ struct tmio_mmc_host *host;
+ char clk_name[8];
+ int ret;
+
+ priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
+ if (priv == NULL) {
+ dev_err(&pdev->dev, "kzalloc failed\n");
+ return -ENOMEM;
+ }
+
+ mmc_data = &priv->mmc_data;
+
+ snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id);
+ priv->clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ ret = PTR_ERR(priv->clk);
+ goto eclkget;
+ }
+
+ clk_enable(priv->clk);
+
+ mmc_data->hclk = clk_get_rate(priv->clk);
+ mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
+ mmc_data->get_cd = sh_mobile_sdhi_get_cd;
+ mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
+ if (p) {
+ mmc_data->flags = p->tmio_flags;
+ mmc_data->ocr_mask = p->tmio_ocr_mask;
+ mmc_data->capabilities |= p->tmio_caps;
+
+ if (p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
+ priv->param_tx.slave_id = p->dma_slave_tx;
+ priv->param_rx.slave_id = p->dma_slave_rx;
+ priv->dma_priv.chan_priv_tx = &priv->param_tx;
+ priv->dma_priv.chan_priv_rx = &priv->param_rx;
+ priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */
+ mmc_data->dma = &priv->dma_priv;
+ }
+ }
+
+ /*
+ * All SDHI blocks support 2-byte and larger block sizes in 4-bit
+ * bus width mode.
+ */
+ mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES;
+
+ /*
+ * All SDHI blocks support SDIO IRQ signalling.
+ */
+ mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
+
+ ret = tmio_mmc_host_probe(&host, pdev, mmc_data);
+ if (ret < 0)
+ goto eprobe;
+
+ pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
+ (unsigned long)host->ctl, host->irq);
+
+ return ret;
+
+eprobe:
+ clk_disable(priv->clk);
+ clk_put(priv->clk);
+eclkget:
+ kfree(priv);
+ return ret;
+}
+
+static int sh_mobile_sdhi_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
+
+ tmio_mmc_host_remove(host);
+ clk_disable(priv->clk);
+ clk_put(priv->clk);
+ kfree(priv);
+
+ return 0;
+}
+
+static struct platform_driver sh_mobile_sdhi_driver = {
+ .driver = {
+ .name = "sh_mobile_sdhi",
+ .owner = THIS_MODULE,
+ },
+ .probe = sh_mobile_sdhi_probe,
+ .remove = __devexit_p(sh_mobile_sdhi_remove),
+};
+
+static int __init sh_mobile_sdhi_init(void)
+{
+ return platform_driver_register(&sh_mobile_sdhi_driver);
+}
+
+static void __exit sh_mobile_sdhi_exit(void)
+{
+ platform_driver_unregister(&sh_mobile_sdhi_driver);
+}
+
+module_init(sh_mobile_sdhi_init);
+module_exit(sh_mobile_sdhi_exit);
+
+MODULE_DESCRIPTION("SuperH Mobile SDHI driver");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sh_mobile_sdhi");
/*
- * linux/drivers/mmc/tmio_mmc.c
+ * linux/drivers/mmc/host/tmio_mmc.c
*
- * Copyright (C) 2004 Ian Molton
- * Copyright (C) 2007 Ian Molton
+ * Copyright (C) 2007 Ian Molton
+ * Copyright (C) 2004 Ian Molton
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* Driver for the MMC / SD / SDIO cell found in:
*
* TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
- *
- * This driver draws mainly on scattered spec sheets, Reverse engineering
- * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
- * support). (Further 4 bit support from a later datasheet).
- *
- * TODO:
- * Investigate using a workqueue for PIO transfers
- * Eliminate FIXMEs
- * SDIO support
- * Better Power management
- * Handle MMC errors better
- * double buffer support
- *
*/
-#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/dmaengine.h>
-#include <linux/highmem.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
-#include <linux/workqueue.h>
-#include <linux/spinlock.h>
-
-#define CTL_SD_CMD 0x00
-#define CTL_ARG_REG 0x04
-#define CTL_STOP_INTERNAL_ACTION 0x08
-#define CTL_XFER_BLK_COUNT 0xa
-#define CTL_RESPONSE 0x0c
-#define CTL_STATUS 0x1c
-#define CTL_IRQ_MASK 0x20
-#define CTL_SD_CARD_CLK_CTL 0x24
-#define CTL_SD_XFER_LEN 0x26
-#define CTL_SD_MEM_CARD_OPT 0x28
-#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
-#define CTL_SD_DATA_PORT 0x30
-#define CTL_TRANSACTION_CTL 0x34
-#define CTL_SDIO_STATUS 0x36
-#define CTL_SDIO_IRQ_MASK 0x38
-#define CTL_RESET_SD 0xe0
-#define CTL_SDIO_REGS 0x100
-#define CTL_CLK_AND_WAIT_CTL 0x138
-#define CTL_RESET_SDIO 0x1e0
-
-/* Definitions for values the CTRL_STATUS register can take. */
-#define TMIO_STAT_CMDRESPEND 0x00000001
-#define TMIO_STAT_DATAEND 0x00000004
-#define TMIO_STAT_CARD_REMOVE 0x00000008
-#define TMIO_STAT_CARD_INSERT 0x00000010
-#define TMIO_STAT_SIGSTATE 0x00000020
-#define TMIO_STAT_WRPROTECT 0x00000080
-#define TMIO_STAT_CARD_REMOVE_A 0x00000100
-#define TMIO_STAT_CARD_INSERT_A 0x00000200
-#define TMIO_STAT_SIGSTATE_A 0x00000400
-#define TMIO_STAT_CMD_IDX_ERR 0x00010000
-#define TMIO_STAT_CRCFAIL 0x00020000
-#define TMIO_STAT_STOPBIT_ERR 0x00040000
-#define TMIO_STAT_DATATIMEOUT 0x00080000
-#define TMIO_STAT_RXOVERFLOW 0x00100000
-#define TMIO_STAT_TXUNDERRUN 0x00200000
-#define TMIO_STAT_CMDTIMEOUT 0x00400000
-#define TMIO_STAT_RXRDY 0x01000000
-#define TMIO_STAT_TXRQ 0x02000000
-#define TMIO_STAT_ILL_FUNC 0x20000000
-#define TMIO_STAT_CMD_BUSY 0x40000000
-#define TMIO_STAT_ILL_ACCESS 0x80000000
-
-/* Definitions for values the CTRL_SDIO_STATUS register can take. */
-#define TMIO_SDIO_STAT_IOIRQ 0x0001
-#define TMIO_SDIO_STAT_EXPUB52 0x4000
-#define TMIO_SDIO_STAT_EXWT 0x8000
-#define TMIO_SDIO_MASK_ALL 0xc007
-
-/* Define some IRQ masks */
-/* This is the mask used at reset by the chip */
-#define TMIO_MASK_ALL 0x837f031d
-#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
-#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
-#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
- TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
-#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
-
-#define enable_mmc_irqs(host, i) \
- do { \
- u32 mask;\
- mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
- mask &= ~((i) & TMIO_MASK_IRQ); \
- sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
- } while (0)
-
-#define disable_mmc_irqs(host, i) \
- do { \
- u32 mask;\
- mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
- mask |= ((i) & TMIO_MASK_IRQ); \
- sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
- } while (0)
-
-#define ack_mmc_irqs(host, i) \
- do { \
- sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
- } while (0)
-
-/* This is arbitrary, just noone needed any higher alignment yet */
-#define MAX_ALIGN 4
-
-struct tmio_mmc_host {
- void __iomem *ctl;
- unsigned long bus_shift;
- struct mmc_command *cmd;
- struct mmc_request *mrq;
- struct mmc_data *data;
- struct mmc_host *mmc;
- int irq;
- unsigned int sdio_irq_enabled;
-
- /* Callbacks for clock / power control */
- void (*set_pwr)(struct platform_device *host, int state);
- void (*set_clk_div)(struct platform_device *host, int state);
-
- /* pio related stuff */
- struct scatterlist *sg_ptr;
- struct scatterlist *sg_orig;
- unsigned int sg_len;
- unsigned int sg_off;
-
- struct platform_device *pdev;
-
- /* DMA support */
- struct dma_chan *chan_rx;
- struct dma_chan *chan_tx;
- struct tasklet_struct dma_complete;
- struct tasklet_struct dma_issue;
-#ifdef CONFIG_TMIO_MMC_DMA
- u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN)));
- struct scatterlist bounce_sg;
-#endif
-
- /* Track lost interrupts */
- struct delayed_work delayed_reset_work;
- spinlock_t lock;
- unsigned long last_req_ts;
-};
-
-static void tmio_check_bounce_buffer(struct tmio_mmc_host *host);
-
-static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
-{
- return readw(host->ctl + (addr << host->bus_shift));
-}
-
-static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
- u16 *buf, int count)
-{
- readsw(host->ctl + (addr << host->bus_shift), buf, count);
-}
-
-static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
-{
- return readw(host->ctl + (addr << host->bus_shift)) |
- readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
-}
-
-static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
-{
- writew(val, host->ctl + (addr << host->bus_shift));
-}
-
-static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
- u16 *buf, int count)
-{
- writesw(host->ctl + (addr << host->bus_shift), buf, count);
-}
-
-static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
-{
- writew(val, host->ctl + (addr << host->bus_shift));
- writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
-}
-
-static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
-{
- host->sg_len = data->sg_len;
- host->sg_ptr = data->sg;
- host->sg_orig = data->sg;
- host->sg_off = 0;
-}
-
-static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
-{
- host->sg_ptr = sg_next(host->sg_ptr);
- host->sg_off = 0;
- return --host->sg_len;
-}
-
-static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
-{
- local_irq_save(*flags);
- return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
-}
-
-static void tmio_mmc_kunmap_atomic(struct scatterlist *sg, unsigned long *flags, void *virt)
-{
- kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ);
- local_irq_restore(*flags);
-}
-
-#ifdef CONFIG_MMC_DEBUG
-
-#define STATUS_TO_TEXT(a, status, i) \
- do { \
- if (status & TMIO_STAT_##a) { \
- if (i++) \
- printk(" | "); \
- printk(#a); \
- } \
- } while (0)
-
-void pr_debug_status(u32 status)
-{
- int i = 0;
- printk(KERN_DEBUG "status: %08x = ", status);
- STATUS_TO_TEXT(CARD_REMOVE, status, i);
- STATUS_TO_TEXT(CARD_INSERT, status, i);
- STATUS_TO_TEXT(SIGSTATE, status, i);
- STATUS_TO_TEXT(WRPROTECT, status, i);
- STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
- STATUS_TO_TEXT(CARD_INSERT_A, status, i);
- STATUS_TO_TEXT(SIGSTATE_A, status, i);
- STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
- STATUS_TO_TEXT(STOPBIT_ERR, status, i);
- STATUS_TO_TEXT(ILL_FUNC, status, i);
- STATUS_TO_TEXT(CMD_BUSY, status, i);
- STATUS_TO_TEXT(CMDRESPEND, status, i);
- STATUS_TO_TEXT(DATAEND, status, i);
- STATUS_TO_TEXT(CRCFAIL, status, i);
- STATUS_TO_TEXT(DATATIMEOUT, status, i);
- STATUS_TO_TEXT(CMDTIMEOUT, status, i);
- STATUS_TO_TEXT(RXOVERFLOW, status, i);
- STATUS_TO_TEXT(TXUNDERRUN, status, i);
- STATUS_TO_TEXT(RXRDY, status, i);
- STATUS_TO_TEXT(TXRQ, status, i);
- STATUS_TO_TEXT(ILL_ACCESS, status, i);
- printk("\n");
-}
-
-#else
-#define pr_debug_status(s) do { } while (0)
-#endif
-
-static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
-{
- struct tmio_mmc_host *host = mmc_priv(mmc);
-
- if (enable) {
- host->sdio_irq_enabled = 1;
- sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
- sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
- (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
- } else {
- sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
- sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
- host->sdio_irq_enabled = 0;
- }
-}
-
-static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
-{
- u32 clk = 0, clock;
-
- if (new_clock) {
- for (clock = host->mmc->f_min, clk = 0x80000080;
- new_clock >= (clock<<1); clk >>= 1)
- clock <<= 1;
- clk |= 0x100;
- }
-
- if (host->set_clk_div)
- host->set_clk_div(host->pdev, (clk>>22) & 1);
-
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
-}
-
-static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
-{
- struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
-
- /*
- * Testing on sh-mobile showed that SDIO IRQs are unmasked when
- * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the
- * device IRQ here and restore the SDIO IRQ mask before
- * re-enabling the device IRQ.
- */
- if (pdata->flags & TMIO_MMC_SDIO_IRQ)
- disable_irq(host->irq);
- sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
- msleep(10);
- if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
- tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
- enable_irq(host->irq);
- }
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
- sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
- msleep(10);
-}
-
-static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
-{
- struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
-
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
- sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
- msleep(10);
- /* see comment in tmio_mmc_clk_stop above */
- if (pdata->flags & TMIO_MMC_SDIO_IRQ)
- disable_irq(host->irq);
- sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
- msleep(10);
- if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
- tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
- enable_irq(host->irq);
- }
-}
-
-static void reset(struct tmio_mmc_host *host)
-{
- /* FIXME - should we set stop clock reg here */
- sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
- sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
- msleep(10);
- sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
- sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
- msleep(10);
-}
-
-static void tmio_mmc_reset_work(struct work_struct *work)
-{
- struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
- delayed_reset_work.work);
- struct mmc_request *mrq;
- unsigned long flags;
-
- spin_lock_irqsave(&host->lock, flags);
- mrq = host->mrq;
-
- /* request already finished */
- if (!mrq
- || time_is_after_jiffies(host->last_req_ts +
- msecs_to_jiffies(2000))) {
- spin_unlock_irqrestore(&host->lock, flags);
- return;
- }
-
- dev_warn(&host->pdev->dev,
- "timeout waiting for hardware interrupt (CMD%u)\n",
- mrq->cmd->opcode);
-
- if (host->data)
- host->data->error = -ETIMEDOUT;
- else if (host->cmd)
- host->cmd->error = -ETIMEDOUT;
- else
- mrq->cmd->error = -ETIMEDOUT;
-
- host->cmd = NULL;
- host->data = NULL;
- host->mrq = NULL;
-
- spin_unlock_irqrestore(&host->lock, flags);
-
- reset(host);
-
- mmc_request_done(host->mmc, mrq);
-}
-
-static void
-tmio_mmc_finish_request(struct tmio_mmc_host *host)
-{
- struct mmc_request *mrq = host->mrq;
-
- if (!mrq)
- return;
-
- host->mrq = NULL;
- host->cmd = NULL;
- host->data = NULL;
-
- cancel_delayed_work(&host->delayed_reset_work);
-
- mmc_request_done(host->mmc, mrq);
-}
-
-/* These are the bitmasks the tmio chip requires to implement the MMC response
- * types. Note that R1 and R6 are the same in this scheme. */
-#define APP_CMD 0x0040
-#define RESP_NONE 0x0300
-#define RESP_R1 0x0400
-#define RESP_R1B 0x0500
-#define RESP_R2 0x0600
-#define RESP_R3 0x0700
-#define DATA_PRESENT 0x0800
-#define TRANSFER_READ 0x1000
-#define TRANSFER_MULTI 0x2000
-#define SECURITY_CMD 0x4000
-
-static int
-tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
-{
- struct mmc_data *data = host->data;
- int c = cmd->opcode;
-
- /* Command 12 is handled by hardware */
- if (cmd->opcode == 12 && !cmd->arg) {
- sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
- return 0;
- }
-
- switch (mmc_resp_type(cmd)) {
- case MMC_RSP_NONE: c |= RESP_NONE; break;
- case MMC_RSP_R1: c |= RESP_R1; break;
- case MMC_RSP_R1B: c |= RESP_R1B; break;
- case MMC_RSP_R2: c |= RESP_R2; break;
- case MMC_RSP_R3: c |= RESP_R3; break;
- default:
- pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
- return -EINVAL;
- }
-
- host->cmd = cmd;
-
-/* FIXME - this seems to be ok commented out but the spec suggest this bit
- * should be set when issuing app commands.
- * if(cmd->flags & MMC_FLAG_ACMD)
- * c |= APP_CMD;
- */
- if (data) {
- c |= DATA_PRESENT;
- if (data->blocks > 1) {
- sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
- c |= TRANSFER_MULTI;
- }
- if (data->flags & MMC_DATA_READ)
- c |= TRANSFER_READ;
- }
-
- enable_mmc_irqs(host, TMIO_MASK_CMD);
-
- /* Fire off the command */
- sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
- sd_ctrl_write16(host, CTL_SD_CMD, c);
-
- return 0;
-}
-
-/*
- * This chip always returns (at least?) as much data as you ask for.
- * I'm unsure what happens if you ask for less than a block. This should be
- * looked into to ensure that a funny length read doesnt hose the controller.
- */
-static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
-{
- struct mmc_data *data = host->data;
- void *sg_virt;
- unsigned short *buf;
- unsigned int count;
- unsigned long flags;
-
- if (!data) {
- pr_debug("Spurious PIO IRQ\n");
- return;
- }
-
- sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
- buf = (unsigned short *)(sg_virt + host->sg_off);
-
- count = host->sg_ptr->length - host->sg_off;
- if (count > data->blksz)
- count = data->blksz;
-
- pr_debug("count: %08x offset: %08x flags %08x\n",
- count, host->sg_off, data->flags);
-
- /* Transfer the data */
- if (data->flags & MMC_DATA_READ)
- sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
- else
- sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
-
- host->sg_off += count;
-
- tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
-
- if (host->sg_off == host->sg_ptr->length)
- tmio_mmc_next_sg(host);
-
- return;
-}
-
-/* needs to be called with host->lock held */
-static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
-{
- struct mmc_data *data = host->data;
- struct mmc_command *stop;
-
- host->data = NULL;
-
- if (!data) {
- dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
- return;
- }
- stop = data->stop;
-
- /* FIXME - return correct transfer count on errors */
- if (!data->error)
- data->bytes_xfered = data->blocks * data->blksz;
- else
- data->bytes_xfered = 0;
-
- pr_debug("Completed data request\n");
-
- /*
- * FIXME: other drivers allow an optional stop command of any given type
- * which we dont do, as the chip can auto generate them.
- * Perhaps we can be smarter about when to use auto CMD12 and
- * only issue the auto request when we know this is the desired
- * stop command, allowing fallback to the stop command the
- * upper layers expect. For now, we do what works.
- */
-
- if (data->flags & MMC_DATA_READ) {
- if (!host->chan_rx)
- disable_mmc_irqs(host, TMIO_MASK_READOP);
- else
- tmio_check_bounce_buffer(host);
- dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
- host->mrq);
- } else {
- if (!host->chan_tx)
- disable_mmc_irqs(host, TMIO_MASK_WRITEOP);
- dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
- host->mrq);
- }
-
- if (stop) {
- if (stop->opcode == 12 && !stop->arg)
- sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
- else
- BUG();
- }
-
- tmio_mmc_finish_request(host);
-}
-
-static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
-{
- struct mmc_data *data;
- spin_lock(&host->lock);
- data = host->data;
-
- if (!data)
- goto out;
-
- if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
- /*
- * Has all data been written out yet? Testing on SuperH showed,
- * that in most cases the first interrupt comes already with the
- * BUSY status bit clear, but on some operations, like mount or
- * in the beginning of a write / sync / umount, there is one
- * DATAEND interrupt with the BUSY bit set, in this cases
- * waiting for one more interrupt fixes the problem.
- */
- if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
- disable_mmc_irqs(host, TMIO_STAT_DATAEND);
- tasklet_schedule(&host->dma_complete);
- }
- } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) {
- disable_mmc_irqs(host, TMIO_STAT_DATAEND);
- tasklet_schedule(&host->dma_complete);
- } else {
- tmio_mmc_do_data_irq(host);
- }
-out:
- spin_unlock(&host->lock);
-}
-
-static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
- unsigned int stat)
-{
- struct mmc_command *cmd = host->cmd;
- int i, addr;
-
- spin_lock(&host->lock);
-
- if (!host->cmd) {
- pr_debug("Spurious CMD irq\n");
- goto out;
- }
-
- host->cmd = NULL;
-
- /* This controller is sicker than the PXA one. Not only do we need to
- * drop the top 8 bits of the first response word, we also need to
- * modify the order of the response for short response command types.
- */
-
- for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
- cmd->resp[i] = sd_ctrl_read32(host, addr);
-
- if (cmd->flags & MMC_RSP_136) {
- cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
- cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
- cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
- cmd->resp[3] <<= 8;
- } else if (cmd->flags & MMC_RSP_R3) {
- cmd->resp[0] = cmd->resp[3];
- }
-
- if (stat & TMIO_STAT_CMDTIMEOUT)
- cmd->error = -ETIMEDOUT;
- else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
- cmd->error = -EILSEQ;
-
- /* If there is data to handle we enable data IRQs here, and
- * we will ultimatley finish the request in the data_end handler.
- * If theres no data or we encountered an error, finish now.
- */
- if (host->data && !cmd->error) {
- if (host->data->flags & MMC_DATA_READ) {
- if (!host->chan_rx)
- enable_mmc_irqs(host, TMIO_MASK_READOP);
- } else {
- if (!host->chan_tx)
- enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
- else
- tasklet_schedule(&host->dma_issue);
- }
- } else {
- tmio_mmc_finish_request(host);
- }
-
-out:
- spin_unlock(&host->lock);
-
- return;
-}
-
-static irqreturn_t tmio_mmc_irq(int irq, void *devid)
-{
- struct tmio_mmc_host *host = devid;
- struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
- unsigned int ireg, irq_mask, status;
- unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
-
- pr_debug("MMC IRQ begin\n");
-
- status = sd_ctrl_read32(host, CTL_STATUS);
- irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
- ireg = status & TMIO_MASK_IRQ & ~irq_mask;
-
- sdio_ireg = 0;
- if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
- sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
- sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
- sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
-
- sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
-
- if (sdio_ireg && !host->sdio_irq_enabled) {
- pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
- sdio_status, sdio_irq_mask, sdio_ireg);
- tmio_mmc_enable_sdio_irq(host->mmc, 0);
- goto out;
- }
- if (host->mmc->caps & MMC_CAP_SDIO_IRQ &&
- sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
- mmc_signal_sdio_irq(host->mmc);
-
- if (sdio_ireg)
- goto out;
- }
-
- pr_debug_status(status);
- pr_debug_status(ireg);
-
- if (!ireg) {
- disable_mmc_irqs(host, status & ~irq_mask);
-
- pr_warning("tmio_mmc: Spurious irq, disabling! "
- "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
- pr_debug_status(status);
-
- goto out;
- }
-
- while (ireg) {
- /* Card insert / remove attempts */
- if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
- ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
- TMIO_STAT_CARD_REMOVE);
- mmc_detect_change(host->mmc, msecs_to_jiffies(100));
- }
-
- /* CRC and other errors */
-/* if (ireg & TMIO_STAT_ERR_IRQ)
- * handled |= tmio_error_irq(host, irq, stat);
- */
-
- /* Command completion */
- if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
- ack_mmc_irqs(host,
- TMIO_STAT_CMDRESPEND |
- TMIO_STAT_CMDTIMEOUT);
- tmio_mmc_cmd_irq(host, status);
- }
-
- /* Data transfer */
- if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
- ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
- tmio_mmc_pio_irq(host);
- }
-
- /* Data transfer completion */
- if (ireg & TMIO_STAT_DATAEND) {
- ack_mmc_irqs(host, TMIO_STAT_DATAEND);
- tmio_mmc_data_irq(host);
- }
-
- /* Check status - keep going until we've handled it all */
- status = sd_ctrl_read32(host, CTL_STATUS);
- irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
- ireg = status & TMIO_MASK_IRQ & ~irq_mask;
-
- pr_debug("Status at end of loop: %08x\n", status);
- pr_debug_status(status);
- }
- pr_debug("MMC IRQ end\n");
-
-out:
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_TMIO_MMC_DMA
-static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
-{
- if (host->sg_ptr == &host->bounce_sg) {
- unsigned long flags;
- void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
- memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
- tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
- }
-}
-
-static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
-{
-#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
- /* Switch DMA mode on or off - SuperH specific? */
- sd_ctrl_write16(host, 0xd8, enable ? 2 : 0);
-#endif
-}
-
-static void tmio_dma_complete(void *arg)
-{
- struct tmio_mmc_host *host = arg;
-
- dev_dbg(&host->pdev->dev, "Command completed\n");
-
- if (!host->data)
- dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n");
- else
- enable_mmc_irqs(host, TMIO_STAT_DATAEND);
-}
-
-static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
-{
- struct scatterlist *sg = host->sg_ptr, *sg_tmp;
- struct dma_async_tx_descriptor *desc = NULL;
- struct dma_chan *chan = host->chan_rx;
- struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
- dma_cookie_t cookie;
- int ret, i;
- bool aligned = true, multiple = true;
- unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
-
- for_each_sg(sg, sg_tmp, host->sg_len, i) {
- if (sg_tmp->offset & align)
- aligned = false;
- if (sg_tmp->length & align) {
- multiple = false;
- break;
- }
- }
-
- if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
- align >= MAX_ALIGN)) || !multiple) {
- ret = -EINVAL;
- goto pio;
- }
-
- /* The only sg element can be unaligned, use our bounce buffer then */
- if (!aligned) {
- sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
- host->sg_ptr = &host->bounce_sg;
- sg = host->sg_ptr;
- }
-
- ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
- if (ret > 0)
- desc = chan->device->device_prep_slave_sg(chan, sg, ret,
- DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-
- if (desc) {
- desc->callback = tmio_dma_complete;
- desc->callback_param = host;
- cookie = dmaengine_submit(desc);
- dma_async_issue_pending(chan);
- }
- dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
- __func__, host->sg_len, ret, cookie, host->mrq);
-
-pio:
- if (!desc) {
- /* DMA failed, fall back to PIO */
- if (ret >= 0)
- ret = -EIO;
- host->chan_rx = NULL;
- dma_release_channel(chan);
- /* Free the Tx channel too */
- chan = host->chan_tx;
- if (chan) {
- host->chan_tx = NULL;
- dma_release_channel(chan);
- }
- dev_warn(&host->pdev->dev,
- "DMA failed: %d, falling back to PIO\n", ret);
- tmio_mmc_enable_dma(host, false);
- }
-
- dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
- desc, cookie, host->sg_len);
-}
-
-static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
-{
- struct scatterlist *sg = host->sg_ptr, *sg_tmp;
- struct dma_async_tx_descriptor *desc = NULL;
- struct dma_chan *chan = host->chan_tx;
- struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
- dma_cookie_t cookie;
- int ret, i;
- bool aligned = true, multiple = true;
- unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
-
- for_each_sg(sg, sg_tmp, host->sg_len, i) {
- if (sg_tmp->offset & align)
- aligned = false;
- if (sg_tmp->length & align) {
- multiple = false;
- break;
- }
- }
-
- if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
- align >= MAX_ALIGN)) || !multiple) {
- ret = -EINVAL;
- goto pio;
- }
-
- /* The only sg element can be unaligned, use our bounce buffer then */
- if (!aligned) {
- unsigned long flags;
- void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
- sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
- memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
- tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
- host->sg_ptr = &host->bounce_sg;
- sg = host->sg_ptr;
- }
-
- ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
- if (ret > 0)
- desc = chan->device->device_prep_slave_sg(chan, sg, ret,
- DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-
- if (desc) {
- desc->callback = tmio_dma_complete;
- desc->callback_param = host;
- cookie = dmaengine_submit(desc);
- }
- dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
- __func__, host->sg_len, ret, cookie, host->mrq);
-
-pio:
- if (!desc) {
- /* DMA failed, fall back to PIO */
- if (ret >= 0)
- ret = -EIO;
- host->chan_tx = NULL;
- dma_release_channel(chan);
- /* Free the Rx channel too */
- chan = host->chan_rx;
- if (chan) {
- host->chan_rx = NULL;
- dma_release_channel(chan);
- }
- dev_warn(&host->pdev->dev,
- "DMA failed: %d, falling back to PIO\n", ret);
- tmio_mmc_enable_dma(host, false);
- }
-
- dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
- desc, cookie);
-}
-
-static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
- struct mmc_data *data)
-{
- if (data->flags & MMC_DATA_READ) {
- if (host->chan_rx)
- tmio_mmc_start_dma_rx(host);
- } else {
- if (host->chan_tx)
- tmio_mmc_start_dma_tx(host);
- }
-}
-
-static void tmio_issue_tasklet_fn(unsigned long priv)
-{
- struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
- struct dma_chan *chan = host->chan_tx;
-
- dma_async_issue_pending(chan);
-}
-
-static void tmio_tasklet_fn(unsigned long arg)
-{
- struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
- unsigned long flags;
-
- spin_lock_irqsave(&host->lock, flags);
-
- if (!host->data)
- goto out;
-
- if (host->data->flags & MMC_DATA_READ)
- dma_unmap_sg(host->chan_rx->device->dev,
- host->sg_ptr, host->sg_len,
- DMA_FROM_DEVICE);
- else
- dma_unmap_sg(host->chan_tx->device->dev,
- host->sg_ptr, host->sg_len,
- DMA_TO_DEVICE);
-
- tmio_mmc_do_data_irq(host);
-out:
- spin_unlock_irqrestore(&host->lock, flags);
-}
-
-/* It might be necessary to make filter MFD specific */
-static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
-{
- dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
- chan->private = arg;
- return true;
-}
-
-static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
- struct tmio_mmc_data *pdata)
-{
- /* We can only either use DMA for both Tx and Rx or not use it at all */
- if (pdata->dma) {
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
- pdata->dma->chan_priv_tx);
- dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
- host->chan_tx);
-
- if (!host->chan_tx)
- return;
-
- host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
- pdata->dma->chan_priv_rx);
- dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
- host->chan_rx);
-
- if (!host->chan_rx) {
- dma_release_channel(host->chan_tx);
- host->chan_tx = NULL;
- return;
- }
-
- tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host);
- tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host);
-
- tmio_mmc_enable_dma(host, true);
- }
-}
-
-static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
-{
- if (host->chan_tx) {
- struct dma_chan *chan = host->chan_tx;
- host->chan_tx = NULL;
- dma_release_channel(chan);
- }
- if (host->chan_rx) {
- struct dma_chan *chan = host->chan_rx;
- host->chan_rx = NULL;
- dma_release_channel(chan);
- }
-}
-#else
-static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
-{
-}
-
-static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
- struct mmc_data *data)
-{
-}
-
-static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
- struct tmio_mmc_data *pdata)
-{
- host->chan_tx = NULL;
- host->chan_rx = NULL;
-}
-
-static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
-{
-}
-#endif
-
-static int tmio_mmc_start_data(struct tmio_mmc_host *host,
- struct mmc_data *data)
-{
- struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
-
- pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
- data->blksz, data->blocks);
-
- /* Some hardware cannot perform 2 byte requests in 4 bit mode */
- if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
- int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
-
- if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
- pr_err("%s: %d byte block unsupported in 4 bit mode\n",
- mmc_hostname(host->mmc), data->blksz);
- return -EINVAL;
- }
- }
-
- tmio_mmc_init_sg(host, data);
- host->data = data;
-
- /* Set transfer length / blocksize */
- sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
- sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
-
- tmio_mmc_start_dma(host, data);
-
- return 0;
-}
-
-/* Process requests from the MMC layer */
-static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
-{
- struct tmio_mmc_host *host = mmc_priv(mmc);
- int ret;
-
- if (host->mrq)
- pr_debug("request not null\n");
-
- host->last_req_ts = jiffies;
- wmb();
- host->mrq = mrq;
-
- if (mrq->data) {
- ret = tmio_mmc_start_data(host, mrq->data);
- if (ret)
- goto fail;
- }
-
- ret = tmio_mmc_start_command(host, mrq->cmd);
- if (!ret) {
- schedule_delayed_work(&host->delayed_reset_work,
- msecs_to_jiffies(2000));
- return;
- }
-
-fail:
- host->mrq = NULL;
- mrq->cmd->error = ret;
- mmc_request_done(mmc, mrq);
-}
-
-/* Set MMC clock / power.
- * Note: This controller uses a simple divider scheme therefore it cannot
- * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
- * MMC wont run that fast, it has to be clocked at 12MHz which is the next
- * slowest setting.
- */
-static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
-{
- struct tmio_mmc_host *host = mmc_priv(mmc);
-
- if (ios->clock)
- tmio_mmc_set_clock(host, ios->clock);
-
- /* Power sequence - OFF -> ON -> UP */
- switch (ios->power_mode) {
- case MMC_POWER_OFF: /* power down SD bus */
- if (host->set_pwr)
- host->set_pwr(host->pdev, 0);
- tmio_mmc_clk_stop(host);
- break;
- case MMC_POWER_ON: /* power up SD bus */
- if (host->set_pwr)
- host->set_pwr(host->pdev, 1);
- break;
- case MMC_POWER_UP: /* start bus clock */
- tmio_mmc_clk_start(host);
- break;
- }
-
- switch (ios->bus_width) {
- case MMC_BUS_WIDTH_1:
- sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
- break;
- case MMC_BUS_WIDTH_4:
- sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
- break;
- }
-
- /* Let things settle. delay taken from winCE driver */
- udelay(140);
-}
-
-static int tmio_mmc_get_ro(struct mmc_host *mmc)
-{
- struct tmio_mmc_host *host = mmc_priv(mmc);
- struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
-
- return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
- (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1;
-}
-
-static int tmio_mmc_get_cd(struct mmc_host *mmc)
-{
- struct tmio_mmc_host *host = mmc_priv(mmc);
- struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
-
- if (!pdata->get_cd)
- return -ENOSYS;
- else
- return pdata->get_cd(host->pdev);
-}
-
-static const struct mmc_host_ops tmio_mmc_ops = {
- .request = tmio_mmc_request,
- .set_ios = tmio_mmc_set_ios,
- .get_ro = tmio_mmc_get_ro,
- .get_cd = tmio_mmc_get_cd,
- .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
-};
+#include "tmio_mmc.h"
#ifdef CONFIG_PM
static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
#define tmio_mmc_resume NULL
#endif
-static int __devinit tmio_mmc_probe(struct platform_device *dev)
+static int __devinit tmio_mmc_probe(struct platform_device *pdev)
{
- const struct mfd_cell *cell = mfd_get_cell(dev);
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
struct tmio_mmc_data *pdata;
- struct resource *res_ctl;
struct tmio_mmc_host *host;
- struct mmc_host *mmc;
int ret = -EINVAL;
- u32 irq_mask = TMIO_MASK_CMD;
- if (dev->num_resources != 2)
+ if (pdev->num_resources != 2)
goto out;
- res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res_ctl)
- goto out;
-
- pdata = mfd_get_data(dev);
+ pdata = mfd_get_data(pdev);
if (!pdata || !pdata->hclk)
goto out;
- ret = -ENOMEM;
-
- mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev);
- if (!mmc)
- goto out;
-
- host = mmc_priv(mmc);
- host->mmc = mmc;
- host->pdev = dev;
- platform_set_drvdata(dev, mmc);
-
- host->set_pwr = pdata->set_pwr;
- host->set_clk_div = pdata->set_clk_div;
-
- /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
- host->bus_shift = resource_size(res_ctl) >> 10;
-
- host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
- if (!host->ctl)
- goto host_free;
-
- mmc->ops = &tmio_mmc_ops;
- mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
- mmc->f_max = pdata->hclk;
- mmc->f_min = mmc->f_max / 512;
- mmc->max_segs = 32;
- mmc->max_blk_size = 512;
- mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
- mmc->max_segs;
- mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
- mmc->max_seg_size = mmc->max_req_size;
- if (pdata->ocr_mask)
- mmc->ocr_avail = pdata->ocr_mask;
- else
- mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
-
/* Tell the MFD core we are ready to be enabled */
if (cell->enable) {
- ret = cell->enable(dev);
+ ret = cell->enable(pdev);
if (ret)
- goto unmap_ctl;
+ goto out;
}
- tmio_mmc_clk_stop(host);
- reset(host);
-
- ret = platform_get_irq(dev, 0);
- if (ret >= 0)
- host->irq = ret;
- else
- goto cell_disable;
-
- disable_mmc_irqs(host, TMIO_MASK_ALL);
- if (pdata->flags & TMIO_MMC_SDIO_IRQ)
- tmio_mmc_enable_sdio_irq(mmc, 0);
-
- ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
- IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
+ ret = tmio_mmc_host_probe(&host, pdev, pdata);
if (ret)
goto cell_disable;
- spin_lock_init(&host->lock);
-
- /* Init delayed work for request timeouts */
- INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work);
-
- /* See if we also get DMA */
- tmio_mmc_request_dma(host, pdata);
-
- mmc_add_host(mmc);
-
pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
(unsigned long)host->ctl, host->irq);
- /* Unmask the IRQs we want to know about */
- if (!host->chan_rx)
- irq_mask |= TMIO_MASK_READOP;
- if (!host->chan_tx)
- irq_mask |= TMIO_MASK_WRITEOP;
- enable_mmc_irqs(host, irq_mask);
-
return 0;
cell_disable:
if (cell->disable)
- cell->disable(dev);
-unmap_ctl:
- iounmap(host->ctl);
-host_free:
- mmc_free_host(mmc);
+ cell->disable(pdev);
out:
return ret;
}
-static int __devexit tmio_mmc_remove(struct platform_device *dev)
+static int __devexit tmio_mmc_remove(struct platform_device *pdev)
{
- const struct mfd_cell *cell = mfd_get_cell(dev);
- struct mmc_host *mmc = platform_get_drvdata(dev);
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
- platform_set_drvdata(dev, NULL);
+ platform_set_drvdata(pdev, NULL);
if (mmc) {
- struct tmio_mmc_host *host = mmc_priv(mmc);
- mmc_remove_host(mmc);
- cancel_delayed_work_sync(&host->delayed_reset_work);
- tmio_mmc_release_dma(host);
- free_irq(host->irq, host);
+ tmio_mmc_host_remove(mmc_priv(mmc));
if (cell->disable)
- cell->disable(dev);
- iounmap(host->ctl);
- mmc_free_host(mmc);
+ cell->disable(pdev);
}
return 0;
--- /dev/null
+/*
+ * linux/drivers/mmc/host/tmio_mmc.h
+ *
+ * Copyright (C) 2007 Ian Molton
+ * Copyright (C) 2004 Ian Molton
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Driver for the MMC / SD / SDIO cell found in:
+ *
+ * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
+ */
+
+#ifndef TMIO_MMC_H
+#define TMIO_MMC_H
+
+#include <linux/highmem.h>
+#include <linux/mmc/tmio.h>
+#include <linux/pagemap.h>
+
+/* Definitions for values the CTRL_SDIO_STATUS register can take. */
+#define TMIO_SDIO_STAT_IOIRQ 0x0001
+#define TMIO_SDIO_STAT_EXPUB52 0x4000
+#define TMIO_SDIO_STAT_EXWT 0x8000
+#define TMIO_SDIO_MASK_ALL 0xc007
+
+/* Define some IRQ masks */
+/* This is the mask used at reset by the chip */
+#define TMIO_MASK_ALL 0x837f031d
+#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
+#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
+#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
+ TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
+#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
+
+struct tmio_mmc_data;
+
+struct tmio_mmc_host {
+ void __iomem *ctl;
+ unsigned long bus_shift;
+ struct mmc_command *cmd;
+ struct mmc_request *mrq;
+ struct mmc_data *data;
+ struct mmc_host *mmc;
+ int irq;
+ unsigned int sdio_irq_enabled;
+
+ /* Callbacks for clock / power control */
+ void (*set_pwr)(struct platform_device *host, int state);
+ void (*set_clk_div)(struct platform_device *host, int state);
+
+ /* pio related stuff */
+ struct scatterlist *sg_ptr;
+ struct scatterlist *sg_orig;
+ unsigned int sg_len;
+ unsigned int sg_off;
+
+ struct platform_device *pdev;
+ struct tmio_mmc_data *pdata;
+
+ /* DMA support */
+ bool force_pio;
+ struct dma_chan *chan_rx;
+ struct dma_chan *chan_tx;
+ struct tasklet_struct dma_complete;
+ struct tasklet_struct dma_issue;
+ struct scatterlist bounce_sg;
+ u8 *bounce_buf;
+
+ /* Track lost interrupts */
+ struct delayed_work delayed_reset_work;
+ spinlock_t lock;
+ unsigned long last_req_ts;
+};
+
+int tmio_mmc_host_probe(struct tmio_mmc_host **host,
+ struct platform_device *pdev,
+ struct tmio_mmc_data *pdata);
+void tmio_mmc_host_remove(struct tmio_mmc_host *host);
+void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
+
+void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
+void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
+
+static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
+ unsigned long *flags)
+{
+ local_irq_save(*flags);
+ return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
+}
+
+static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg,
+ unsigned long *flags, void *virt)
+{
+ kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ);
+ local_irq_restore(*flags);
+}
+
+#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
+void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data);
+void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata);
+void tmio_mmc_release_dma(struct tmio_mmc_host *host);
+#else
+static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
+ struct mmc_data *data)
+{
+}
+
+static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
+ struct tmio_mmc_data *pdata)
+{
+ host->chan_tx = NULL;
+ host->chan_rx = NULL;
+}
+
+static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
+{
+}
+#endif
+
+#endif
--- /dev/null
+/*
+ * linux/drivers/mmc/tmio_mmc_dma.c
+ *
+ * Copyright (C) 2010-2011 Guennadi Liakhovetski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * DMA function for TMIO MMC implementations
+ */
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/mfd/tmio.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/tmio.h>
+#include <linux/pagemap.h>
+#include <linux/scatterlist.h>
+
+#include "tmio_mmc.h"
+
+#define TMIO_MMC_MIN_DMA_LEN 8
+
+static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
+{
+#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
+ /* Switch DMA mode on or off - SuperH specific? */
+ writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift));
+#endif
+}
+
+static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
+{
+ struct scatterlist *sg = host->sg_ptr, *sg_tmp;
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *chan = host->chan_rx;
+ struct tmio_mmc_data *pdata = host->pdata;
+ dma_cookie_t cookie;
+ int ret, i;
+ bool aligned = true, multiple = true;
+ unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
+
+ for_each_sg(sg, sg_tmp, host->sg_len, i) {
+ if (sg_tmp->offset & align)
+ aligned = false;
+ if (sg_tmp->length & align) {
+ multiple = false;
+ break;
+ }
+ }
+
+ if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+ (align & PAGE_MASK))) || !multiple) {
+ ret = -EINVAL;
+ goto pio;
+ }
+
+ if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
+ host->force_pio = true;
+ return;
+ }
+
+ tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
+
+ /* The only sg element can be unaligned, use our bounce buffer then */
+ if (!aligned) {
+ sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
+ host->sg_ptr = &host->bounce_sg;
+ sg = host->sg_ptr;
+ }
+
+ ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
+ if (ret > 0)
+ desc = chan->device->device_prep_slave_sg(chan, sg, ret,
+ DMA_FROM_DEVICE, DMA_CTRL_ACK);
+
+ if (desc) {
+ cookie = dmaengine_submit(desc);
+ if (cookie < 0) {
+ desc = NULL;
+ ret = cookie;
+ }
+ }
+ dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
+ __func__, host->sg_len, ret, cookie, host->mrq);
+
+pio:
+ if (!desc) {
+ /* DMA failed, fall back to PIO */
+ if (ret >= 0)
+ ret = -EIO;
+ host->chan_rx = NULL;
+ dma_release_channel(chan);
+ /* Free the Tx channel too */
+ chan = host->chan_tx;
+ if (chan) {
+ host->chan_tx = NULL;
+ dma_release_channel(chan);
+ }
+ dev_warn(&host->pdev->dev,
+ "DMA failed: %d, falling back to PIO\n", ret);
+ tmio_mmc_enable_dma(host, false);
+ }
+
+ dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
+ desc, cookie, host->sg_len);
+}
+
+static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
+{
+ struct scatterlist *sg = host->sg_ptr, *sg_tmp;
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_chan *chan = host->chan_tx;
+ struct tmio_mmc_data *pdata = host->pdata;
+ dma_cookie_t cookie;
+ int ret, i;
+ bool aligned = true, multiple = true;
+ unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
+
+ for_each_sg(sg, sg_tmp, host->sg_len, i) {
+ if (sg_tmp->offset & align)
+ aligned = false;
+ if (sg_tmp->length & align) {
+ multiple = false;
+ break;
+ }
+ }
+
+ if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+ (align & PAGE_MASK))) || !multiple) {
+ ret = -EINVAL;
+ goto pio;
+ }
+
+ if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
+ host->force_pio = true;
+ return;
+ }
+
+ tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
+
+ /* The only sg element can be unaligned, use our bounce buffer then */
+ if (!aligned) {
+ unsigned long flags;
+ void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
+ sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
+ memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
+ tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
+ host->sg_ptr = &host->bounce_sg;
+ sg = host->sg_ptr;
+ }
+
+ ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
+ if (ret > 0)
+ desc = chan->device->device_prep_slave_sg(chan, sg, ret,
+ DMA_TO_DEVICE, DMA_CTRL_ACK);
+
+ if (desc) {
+ cookie = dmaengine_submit(desc);
+ if (cookie < 0) {
+ desc = NULL;
+ ret = cookie;
+ }
+ }
+ dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
+ __func__, host->sg_len, ret, cookie, host->mrq);
+
+pio:
+ if (!desc) {
+ /* DMA failed, fall back to PIO */
+ if (ret >= 0)
+ ret = -EIO;
+ host->chan_tx = NULL;
+ dma_release_channel(chan);
+ /* Free the Rx channel too */
+ chan = host->chan_rx;
+ if (chan) {
+ host->chan_rx = NULL;
+ dma_release_channel(chan);
+ }
+ dev_warn(&host->pdev->dev,
+ "DMA failed: %d, falling back to PIO\n", ret);
+ tmio_mmc_enable_dma(host, false);
+ }
+
+ dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
+ desc, cookie);
+}
+
+void tmio_mmc_start_dma(struct tmio_mmc_host *host,
+ struct mmc_data *data)
+{
+ if (data->flags & MMC_DATA_READ) {
+ if (host->chan_rx)
+ tmio_mmc_start_dma_rx(host);
+ } else {
+ if (host->chan_tx)
+ tmio_mmc_start_dma_tx(host);
+ }
+}
+
+static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
+{
+ struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
+ struct dma_chan *chan = NULL;
+
+ spin_lock_irq(&host->lock);
+
+ if (host && host->data) {
+ if (host->data->flags & MMC_DATA_READ)
+ chan = host->chan_rx;
+ else
+ chan = host->chan_tx;
+ }
+
+ spin_unlock_irq(&host->lock);
+
+ tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
+
+ if (chan)
+ dma_async_issue_pending(chan);
+}
+
+static void tmio_mmc_tasklet_fn(unsigned long arg)
+{
+ struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
+
+ spin_lock_irq(&host->lock);
+
+ if (!host->data)
+ goto out;
+
+ if (host->data->flags & MMC_DATA_READ)
+ dma_unmap_sg(host->chan_rx->device->dev,
+ host->sg_ptr, host->sg_len,
+ DMA_FROM_DEVICE);
+ else
+ dma_unmap_sg(host->chan_tx->device->dev,
+ host->sg_ptr, host->sg_len,
+ DMA_TO_DEVICE);
+
+ tmio_mmc_do_data_irq(host);
+out:
+ spin_unlock_irq(&host->lock);
+}
+
+/* It might be necessary to make filter MFD specific */
+static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
+{
+ dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
+ chan->private = arg;
+ return true;
+}
+
+void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
+{
+ /* We can only either use DMA for both Tx and Rx or not use it at all */
+ if (pdata->dma) {
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
+ pdata->dma->chan_priv_tx);
+ dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
+ host->chan_tx);
+
+ if (!host->chan_tx)
+ return;
+
+ host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
+ pdata->dma->chan_priv_rx);
+ dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
+ host->chan_rx);
+
+ if (!host->chan_rx)
+ goto ereqrx;
+
+ host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ if (!host->bounce_buf)
+ goto ebouncebuf;
+
+ tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
+ tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
+
+ tmio_mmc_enable_dma(host, true);
+
+ return;
+ebouncebuf:
+ dma_release_channel(host->chan_rx);
+ host->chan_rx = NULL;
+ereqrx:
+ dma_release_channel(host->chan_tx);
+ host->chan_tx = NULL;
+ return;
+ }
+}
+
+void tmio_mmc_release_dma(struct tmio_mmc_host *host)
+{
+ if (host->chan_tx) {
+ struct dma_chan *chan = host->chan_tx;
+ host->chan_tx = NULL;
+ dma_release_channel(chan);
+ }
+ if (host->chan_rx) {
+ struct dma_chan *chan = host->chan_rx;
+ host->chan_rx = NULL;
+ dma_release_channel(chan);
+ }
+ if (host->bounce_buf) {
+ free_pages((unsigned long)host->bounce_buf, 0);
+ host->bounce_buf = NULL;
+ }
+}
--- /dev/null
+/*
+ * linux/drivers/mmc/host/tmio_mmc_pio.c
+ *
+ * Copyright (C) 2011 Guennadi Liakhovetski
+ * Copyright (C) 2007 Ian Molton
+ * Copyright (C) 2004 Ian Molton
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Driver for the MMC / SD / SDIO IP found in:
+ *
+ * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
+ *
+ * This driver draws mainly on scattered spec sheets, Reverse engineering
+ * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
+ * support). (Further 4 bit support from a later datasheet).
+ *
+ * TODO:
+ * Investigate using a workqueue for PIO transfers
+ * Eliminate FIXMEs
+ * SDIO support
+ * Better Power management
+ * Handle MMC errors better
+ * double buffer support
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/mfd/tmio.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/tmio.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+
+#include "tmio_mmc.h"
+
+static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
+{
+ return readw(host->ctl + (addr << host->bus_shift));
+}
+
+static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
+ u16 *buf, int count)
+{
+ readsw(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
+{
+ return readw(host->ctl + (addr << host->bus_shift)) |
+ readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
+}
+
+static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
+{
+ writew(val, host->ctl + (addr << host->bus_shift));
+}
+
+static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
+ u16 *buf, int count)
+{
+ writesw(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+{
+ writew(val, host->ctl + (addr << host->bus_shift));
+ writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
+}
+
+void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
+{
+ u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ);
+ sd_ctrl_write32(host, CTL_IRQ_MASK, mask);
+}
+
+void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
+{
+ u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ);
+ sd_ctrl_write32(host, CTL_IRQ_MASK, mask);
+}
+
+static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
+{
+ sd_ctrl_write32(host, CTL_STATUS, ~i);
+}
+
+static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
+{
+ host->sg_len = data->sg_len;
+ host->sg_ptr = data->sg;
+ host->sg_orig = data->sg;
+ host->sg_off = 0;
+}
+
+static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
+{
+ host->sg_ptr = sg_next(host->sg_ptr);
+ host->sg_off = 0;
+ return --host->sg_len;
+}
+
+#ifdef CONFIG_MMC_DEBUG
+
+#define STATUS_TO_TEXT(a, status, i) \
+ do { \
+ if (status & TMIO_STAT_##a) { \
+ if (i++) \
+ printk(" | "); \
+ printk(#a); \
+ } \
+ } while (0)
+
+static void pr_debug_status(u32 status)
+{
+ int i = 0;
+ printk(KERN_DEBUG "status: %08x = ", status);
+ STATUS_TO_TEXT(CARD_REMOVE, status, i);
+ STATUS_TO_TEXT(CARD_INSERT, status, i);
+ STATUS_TO_TEXT(SIGSTATE, status, i);
+ STATUS_TO_TEXT(WRPROTECT, status, i);
+ STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
+ STATUS_TO_TEXT(CARD_INSERT_A, status, i);
+ STATUS_TO_TEXT(SIGSTATE_A, status, i);
+ STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
+ STATUS_TO_TEXT(STOPBIT_ERR, status, i);
+ STATUS_TO_TEXT(ILL_FUNC, status, i);
+ STATUS_TO_TEXT(CMD_BUSY, status, i);
+ STATUS_TO_TEXT(CMDRESPEND, status, i);
+ STATUS_TO_TEXT(DATAEND, status, i);
+ STATUS_TO_TEXT(CRCFAIL, status, i);
+ STATUS_TO_TEXT(DATATIMEOUT, status, i);
+ STATUS_TO_TEXT(CMDTIMEOUT, status, i);
+ STATUS_TO_TEXT(RXOVERFLOW, status, i);
+ STATUS_TO_TEXT(TXUNDERRUN, status, i);
+ STATUS_TO_TEXT(RXRDY, status, i);
+ STATUS_TO_TEXT(TXRQ, status, i);
+ STATUS_TO_TEXT(ILL_ACCESS, status, i);
+ printk("\n");
+}
+
+#else
+#define pr_debug_status(s) do { } while (0)
+#endif
+
+static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+
+ if (enable) {
+ host->sdio_irq_enabled = 1;
+ sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
+ sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
+ (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
+ } else {
+ sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
+ sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
+ host->sdio_irq_enabled = 0;
+ }
+}
+
+static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
+{
+ u32 clk = 0, clock;
+
+ if (new_clock) {
+ for (clock = host->mmc->f_min, clk = 0x80000080;
+ new_clock >= (clock<<1); clk >>= 1)
+ clock <<= 1;
+ clk |= 0x100;
+ }
+
+ if (host->set_clk_div)
+ host->set_clk_div(host->pdev, (clk>>22) & 1);
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
+}
+
+static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
+{
+ struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
+
+ /* implicit BUG_ON(!res) */
+ if (resource_size(res) > 0x100) {
+ sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
+ msleep(10);
+ }
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+ msleep(10);
+}
+
+static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
+{
+ struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+ msleep(10);
+
+ /* implicit BUG_ON(!res) */
+ if (resource_size(res) > 0x100) {
+ sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
+ msleep(10);
+ }
+}
+
+static void tmio_mmc_reset(struct tmio_mmc_host *host)
+{
+ struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
+
+ /* FIXME - should we set stop clock reg here */
+ sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
+ /* implicit BUG_ON(!res) */
+ if (resource_size(res) > 0x100)
+ sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
+ msleep(10);
+ sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
+ if (resource_size(res) > 0x100)
+ sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
+ msleep(10);
+}
+
+static void tmio_mmc_reset_work(struct work_struct *work)
+{
+ struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
+ delayed_reset_work.work);
+ struct mmc_request *mrq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ mrq = host->mrq;
+
+ /* request already finished */
+ if (!mrq
+ || time_is_after_jiffies(host->last_req_ts +
+ msecs_to_jiffies(2000))) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+
+ dev_warn(&host->pdev->dev,
+ "timeout waiting for hardware interrupt (CMD%u)\n",
+ mrq->cmd->opcode);
+
+ if (host->data)
+ host->data->error = -ETIMEDOUT;
+ else if (host->cmd)
+ host->cmd->error = -ETIMEDOUT;
+ else
+ mrq->cmd->error = -ETIMEDOUT;
+
+ host->cmd = NULL;
+ host->data = NULL;
+ host->mrq = NULL;
+ host->force_pio = false;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ tmio_mmc_reset(host);
+
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
+{
+ struct mmc_request *mrq = host->mrq;
+
+ if (!mrq)
+ return;
+
+ host->mrq = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+ host->force_pio = false;
+
+ cancel_delayed_work(&host->delayed_reset_work);
+
+ mmc_request_done(host->mmc, mrq);
+}
+
+/* These are the bitmasks the tmio chip requires to implement the MMC response
+ * types. Note that R1 and R6 are the same in this scheme. */
+#define APP_CMD 0x0040
+#define RESP_NONE 0x0300
+#define RESP_R1 0x0400
+#define RESP_R1B 0x0500
+#define RESP_R2 0x0600
+#define RESP_R3 0x0700
+#define DATA_PRESENT 0x0800
+#define TRANSFER_READ 0x1000
+#define TRANSFER_MULTI 0x2000
+#define SECURITY_CMD 0x4000
+
+static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
+{
+ struct mmc_data *data = host->data;
+ int c = cmd->opcode;
+
+ /* Command 12 is handled by hardware */
+ if (cmd->opcode == 12 && !cmd->arg) {
+ sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
+ return 0;
+ }
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE: c |= RESP_NONE; break;
+ case MMC_RSP_R1: c |= RESP_R1; break;
+ case MMC_RSP_R1B: c |= RESP_R1B; break;
+ case MMC_RSP_R2: c |= RESP_R2; break;
+ case MMC_RSP_R3: c |= RESP_R3; break;
+ default:
+ pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
+ return -EINVAL;
+ }
+
+ host->cmd = cmd;
+
+/* FIXME - this seems to be ok commented out but the spec suggest this bit
+ * should be set when issuing app commands.
+ * if(cmd->flags & MMC_FLAG_ACMD)
+ * c |= APP_CMD;
+ */
+ if (data) {
+ c |= DATA_PRESENT;
+ if (data->blocks > 1) {
+ sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
+ c |= TRANSFER_MULTI;
+ }
+ if (data->flags & MMC_DATA_READ)
+ c |= TRANSFER_READ;
+ }
+
+ tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD);
+
+ /* Fire off the command */
+ sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
+ sd_ctrl_write16(host, CTL_SD_CMD, c);
+
+ return 0;
+}
+
+/*
+ * This chip always returns (at least?) as much data as you ask for.
+ * I'm unsure what happens if you ask for less than a block. This should be
+ * looked into to ensure that a funny length read doesnt hose the controller.
+ */
+static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
+{
+ struct mmc_data *data = host->data;
+ void *sg_virt;
+ unsigned short *buf;
+ unsigned int count;
+ unsigned long flags;
+
+ if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
+ pr_err("PIO IRQ in DMA mode!\n");
+ return;
+ } else if (!data) {
+ pr_debug("Spurious PIO IRQ\n");
+ return;
+ }
+
+ sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
+ buf = (unsigned short *)(sg_virt + host->sg_off);
+
+ count = host->sg_ptr->length - host->sg_off;
+ if (count > data->blksz)
+ count = data->blksz;
+
+ pr_debug("count: %08x offset: %08x flags %08x\n",
+ count, host->sg_off, data->flags);
+
+ /* Transfer the data */
+ if (data->flags & MMC_DATA_READ)
+ sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
+ else
+ sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
+
+ host->sg_off += count;
+
+ tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
+
+ if (host->sg_off == host->sg_ptr->length)
+ tmio_mmc_next_sg(host);
+
+ return;
+}
+
+static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
+{
+ if (host->sg_ptr == &host->bounce_sg) {
+ unsigned long flags;
+ void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
+ memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
+ tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
+ }
+}
+
+/* needs to be called with host->lock held */
+void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
+{
+ struct mmc_data *data = host->data;
+ struct mmc_command *stop;
+
+ host->data = NULL;
+
+ if (!data) {
+ dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
+ return;
+ }
+ stop = data->stop;
+
+ /* FIXME - return correct transfer count on errors */
+ if (!data->error)
+ data->bytes_xfered = data->blocks * data->blksz;
+ else
+ data->bytes_xfered = 0;
+
+ pr_debug("Completed data request\n");
+
+ /*
+ * FIXME: other drivers allow an optional stop command of any given type
+ * which we dont do, as the chip can auto generate them.
+ * Perhaps we can be smarter about when to use auto CMD12 and
+ * only issue the auto request when we know this is the desired
+ * stop command, allowing fallback to the stop command the
+ * upper layers expect. For now, we do what works.
+ */
+
+ if (data->flags & MMC_DATA_READ) {
+ if (host->chan_rx && !host->force_pio)
+ tmio_mmc_check_bounce_buffer(host);
+ dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
+ host->mrq);
+ } else {
+ dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
+ host->mrq);
+ }
+
+ if (stop) {
+ if (stop->opcode == 12 && !stop->arg)
+ sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
+ else
+ BUG();
+ }
+
+ tmio_mmc_finish_request(host);
+}
+
+static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
+{
+ struct mmc_data *data;
+ spin_lock(&host->lock);
+ data = host->data;
+
+ if (!data)
+ goto out;
+
+ if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
+ /*
+ * Has all data been written out yet? Testing on SuperH showed,
+ * that in most cases the first interrupt comes already with the
+ * BUSY status bit clear, but on some operations, like mount or
+ * in the beginning of a write / sync / umount, there is one
+ * DATAEND interrupt with the BUSY bit set, in this cases
+ * waiting for one more interrupt fixes the problem.
+ */
+ if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
+ tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
+ tasklet_schedule(&host->dma_complete);
+ }
+ } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
+ tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
+ tasklet_schedule(&host->dma_complete);
+ } else {
+ tmio_mmc_do_data_irq(host);
+ tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
+ }
+out:
+ spin_unlock(&host->lock);
+}
+
+static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
+ unsigned int stat)
+{
+ struct mmc_command *cmd = host->cmd;
+ int i, addr;
+
+ spin_lock(&host->lock);
+
+ if (!host->cmd) {
+ pr_debug("Spurious CMD irq\n");
+ goto out;
+ }
+
+ host->cmd = NULL;
+
+ /* This controller is sicker than the PXA one. Not only do we need to
+ * drop the top 8 bits of the first response word, we also need to
+ * modify the order of the response for short response command types.
+ */
+
+ for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
+ cmd->resp[i] = sd_ctrl_read32(host, addr);
+
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
+ cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
+ cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
+ cmd->resp[3] <<= 8;
+ } else if (cmd->flags & MMC_RSP_R3) {
+ cmd->resp[0] = cmd->resp[3];
+ }
+
+ if (stat & TMIO_STAT_CMDTIMEOUT)
+ cmd->error = -ETIMEDOUT;
+ else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
+ cmd->error = -EILSEQ;
+
+ /* If there is data to handle we enable data IRQs here, and
+ * we will ultimatley finish the request in the data_end handler.
+ * If theres no data or we encountered an error, finish now.
+ */
+ if (host->data && !cmd->error) {
+ if (host->data->flags & MMC_DATA_READ) {
+ if (host->force_pio || !host->chan_rx)
+ tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
+ else
+ tasklet_schedule(&host->dma_issue);
+ } else {
+ if (host->force_pio || !host->chan_tx)
+ tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
+ else
+ tasklet_schedule(&host->dma_issue);
+ }
+ } else {
+ tmio_mmc_finish_request(host);
+ }
+
+out:
+ spin_unlock(&host->lock);
+}
+
+static irqreturn_t tmio_mmc_irq(int irq, void *devid)
+{
+ struct tmio_mmc_host *host = devid;
+ struct tmio_mmc_data *pdata = host->pdata;
+ unsigned int ireg, irq_mask, status;
+ unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
+
+ pr_debug("MMC IRQ begin\n");
+
+ status = sd_ctrl_read32(host, CTL_STATUS);
+ irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
+ ireg = status & TMIO_MASK_IRQ & ~irq_mask;
+
+ sdio_ireg = 0;
+ if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
+ sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
+ sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
+ sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
+
+ sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
+
+ if (sdio_ireg && !host->sdio_irq_enabled) {
+ pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
+ sdio_status, sdio_irq_mask, sdio_ireg);
+ tmio_mmc_enable_sdio_irq(host->mmc, 0);
+ goto out;
+ }
+
+ if (host->mmc->caps & MMC_CAP_SDIO_IRQ &&
+ sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
+ mmc_signal_sdio_irq(host->mmc);
+
+ if (sdio_ireg)
+ goto out;
+ }
+
+ pr_debug_status(status);
+ pr_debug_status(ireg);
+
+ if (!ireg) {
+ tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask);
+
+ pr_warning("tmio_mmc: Spurious irq, disabling! "
+ "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
+ pr_debug_status(status);
+
+ goto out;
+ }
+
+ while (ireg) {
+ /* Card insert / remove attempts */
+ if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
+ tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
+ TMIO_STAT_CARD_REMOVE);
+ mmc_detect_change(host->mmc, msecs_to_jiffies(100));
+ }
+
+ /* CRC and other errors */
+/* if (ireg & TMIO_STAT_ERR_IRQ)
+ * handled |= tmio_error_irq(host, irq, stat);
+ */
+
+ /* Command completion */
+ if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
+ tmio_mmc_ack_mmc_irqs(host,
+ TMIO_STAT_CMDRESPEND |
+ TMIO_STAT_CMDTIMEOUT);
+ tmio_mmc_cmd_irq(host, status);
+ }
+
+ /* Data transfer */
+ if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
+ tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
+ tmio_mmc_pio_irq(host);
+ }
+
+ /* Data transfer completion */
+ if (ireg & TMIO_STAT_DATAEND) {
+ tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
+ tmio_mmc_data_irq(host);
+ }
+
+ /* Check status - keep going until we've handled it all */
+ status = sd_ctrl_read32(host, CTL_STATUS);
+ irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
+ ireg = status & TMIO_MASK_IRQ & ~irq_mask;
+
+ pr_debug("Status at end of loop: %08x\n", status);
+ pr_debug_status(status);
+ }
+ pr_debug("MMC IRQ end\n");
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int tmio_mmc_start_data(struct tmio_mmc_host *host,
+ struct mmc_data *data)
+{
+ struct tmio_mmc_data *pdata = host->pdata;
+
+ pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
+ data->blksz, data->blocks);
+
+ /* Some hardware cannot perform 2 byte requests in 4 bit mode */
+ if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
+ int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
+
+ if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
+ pr_err("%s: %d byte block unsupported in 4 bit mode\n",
+ mmc_hostname(host->mmc), data->blksz);
+ return -EINVAL;
+ }
+ }
+
+ tmio_mmc_init_sg(host, data);
+ host->data = data;
+
+ /* Set transfer length / blocksize */
+ sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
+ sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
+
+ tmio_mmc_start_dma(host, data);
+
+ return 0;
+}
+
+/* Process requests from the MMC layer */
+static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ int ret;
+
+ if (host->mrq)
+ pr_debug("request not null\n");
+
+ host->last_req_ts = jiffies;
+ wmb();
+ host->mrq = mrq;
+
+ if (mrq->data) {
+ ret = tmio_mmc_start_data(host, mrq->data);
+ if (ret)
+ goto fail;
+ }
+
+ ret = tmio_mmc_start_command(host, mrq->cmd);
+ if (!ret) {
+ schedule_delayed_work(&host->delayed_reset_work,
+ msecs_to_jiffies(2000));
+ return;
+ }
+
+fail:
+ host->mrq = NULL;
+ host->force_pio = false;
+ mrq->cmd->error = ret;
+ mmc_request_done(mmc, mrq);
+}
+
+/* Set MMC clock / power.
+ * Note: This controller uses a simple divider scheme therefore it cannot
+ * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
+ * MMC wont run that fast, it has to be clocked at 12MHz which is the next
+ * slowest setting.
+ */
+static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+
+ if (ios->clock)
+ tmio_mmc_set_clock(host, ios->clock);
+
+ /* Power sequence - OFF -> UP -> ON */
+ if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
+ /* power down SD bus */
+ if (ios->power_mode == MMC_POWER_OFF && host->set_pwr)
+ host->set_pwr(host->pdev, 0);
+ tmio_mmc_clk_stop(host);
+ } else if (ios->power_mode == MMC_POWER_UP) {
+ /* power up SD bus */
+ if (host->set_pwr)
+ host->set_pwr(host->pdev, 1);
+ } else {
+ /* start bus clock */
+ tmio_mmc_clk_start(host);
+ }
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
+ break;
+ case MMC_BUS_WIDTH_4:
+ sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
+ break;
+ }
+
+ /* Let things settle. delay taken from winCE driver */
+ udelay(140);
+}
+
+static int tmio_mmc_get_ro(struct mmc_host *mmc)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_data *pdata = host->pdata;
+
+ return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
+ !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
+}
+
+static int tmio_mmc_get_cd(struct mmc_host *mmc)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_data *pdata = host->pdata;
+
+ if (!pdata->get_cd)
+ return -ENOSYS;
+ else
+ return pdata->get_cd(host->pdev);
+}
+
+static const struct mmc_host_ops tmio_mmc_ops = {
+ .request = tmio_mmc_request,
+ .set_ios = tmio_mmc_set_ios,
+ .get_ro = tmio_mmc_get_ro,
+ .get_cd = tmio_mmc_get_cd,
+ .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
+};
+
+int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
+ struct platform_device *pdev,
+ struct tmio_mmc_data *pdata)
+{
+ struct tmio_mmc_host *_host;
+ struct mmc_host *mmc;
+ struct resource *res_ctl;
+ int ret;
+ u32 irq_mask = TMIO_MASK_CMD;
+
+ res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res_ctl)
+ return -EINVAL;
+
+ mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
+ if (!mmc)
+ return -ENOMEM;
+
+ _host = mmc_priv(mmc);
+ _host->pdata = pdata;
+ _host->mmc = mmc;
+ _host->pdev = pdev;
+ platform_set_drvdata(pdev, mmc);
+
+ _host->set_pwr = pdata->set_pwr;
+ _host->set_clk_div = pdata->set_clk_div;
+
+ /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
+ _host->bus_shift = resource_size(res_ctl) >> 10;
+
+ _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
+ if (!_host->ctl) {
+ ret = -ENOMEM;
+ goto host_free;
+ }
+
+ mmc->ops = &tmio_mmc_ops;
+ mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
+ mmc->f_max = pdata->hclk;
+ mmc->f_min = mmc->f_max / 512;
+ mmc->max_segs = 32;
+ mmc->max_blk_size = 512;
+ mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
+ mmc->max_segs;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_req_size;
+ if (pdata->ocr_mask)
+ mmc->ocr_avail = pdata->ocr_mask;
+ else
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+
+ tmio_mmc_clk_stop(_host);
+ tmio_mmc_reset(_host);
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto unmap_ctl;
+
+ _host->irq = ret;
+
+ tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
+ if (pdata->flags & TMIO_MMC_SDIO_IRQ)
+ tmio_mmc_enable_sdio_irq(mmc, 0);
+
+ ret = request_irq(_host->irq, tmio_mmc_irq, IRQF_DISABLED |
+ IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), _host);
+ if (ret)
+ goto unmap_ctl;
+
+ spin_lock_init(&_host->lock);
+
+ /* Init delayed work for request timeouts */
+ INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
+
+ /* See if we also get DMA */
+ tmio_mmc_request_dma(_host, pdata);
+
+ mmc_add_host(mmc);
+
+ /* Unmask the IRQs we want to know about */
+ if (!_host->chan_rx)
+ irq_mask |= TMIO_MASK_READOP;
+ if (!_host->chan_tx)
+ irq_mask |= TMIO_MASK_WRITEOP;
+
+ tmio_mmc_enable_mmc_irqs(_host, irq_mask);
+
+ *host = _host;
+
+ return 0;
+
+unmap_ctl:
+ iounmap(_host->ctl);
+host_free:
+ mmc_free_host(mmc);
+
+ return ret;
+}
+EXPORT_SYMBOL(tmio_mmc_host_probe);
+
+void tmio_mmc_host_remove(struct tmio_mmc_host *host)
+{
+ mmc_remove_host(host->mmc);
+ cancel_delayed_work_sync(&host->delayed_reset_work);
+ tmio_mmc_release_dma(host);
+ free_irq(host->irq, host);
+ iounmap(host->ctl);
+ mmc_free_host(host->mmc);
+}
+EXPORT_SYMBOL(tmio_mmc_host_remove);
+
+MODULE_LICENSE("GPL v2");
struct mmc_host *mmc;
struct via_crdr_mmc_host *sdhost;
u32 base, len;
- u8 rev, gatt;
+ u8 gatt;
int ret;
- pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &rev);
pr_info(DRV_NAME
": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n",
pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
- (int)rev);
+ (int)pcidev->revision);
ret = pci_enable_device(pcidev);
if (ret)
if (!eeprom_buff)
return -ENOMEM;
- ptr = (u32 *)eeprom_buff;
+ ptr = eeprom_buff;
if (eeprom->offset & 3) {
/* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */
- if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0])))
- return -EIO;
+ if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0]))) {
+ ret_val = -EIO;
+ goto out;
+ }
ptr++;
}
if (((eeprom->offset + eeprom->len) & 3)) {
* only the first byte of the word is being modified
*/
if (!atl2_read_eeprom(hw, last_dword * 4,
- &(eeprom_buff[last_dword - first_dword])))
- return -EIO;
+ &(eeprom_buff[last_dword - first_dword]))) {
+ ret_val = -EIO;
+ goto out;
+ }
}
/* Device's eeprom is always little-endian, word addressable */
memcpy(ptr, bytes, eeprom->len);
for (i = 0; i < last_dword - first_dword + 1; i++) {
- if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i]))
- return -EIO;
+ if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i])) {
+ ret_val = -EIO;
+ goto out;
+ }
}
-
+ out:
kfree(eeprom_buff);
return ret_val;
}
if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
opmode |= RMII; /* For Now only 100MBit are supported */
-#if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2
- opmode |= TE;
+#if defined(CONFIG_BF537) || defined(CONFIG_BF536)
+ if (__SILICON_REVISION__ < 3) {
+ /*
+ * This isn't publicly documented (fun times!), but in
+ * silicon <=0.2, the RX and TX pins are clocked together.
+ * So in order to recv, we must enable the transmit side
+ * as well. This will cause a spurious TX interrupt too,
+ * but we can easily consume that.
+ */
+ opmode |= TE;
+ }
#endif
}
#endif
};
-static void inline vlan_features_add(struct net_device *dev, u32 flags)
+static inline void vlan_features_add(struct net_device *dev, u32 flags)
{
dev->vlan_features |= flags;
}
* packets to a Client that the Hash function
* gave this entry index.
*/
- u32 tx_bytes; /* Each Client acumulates the BytesTx that
+ u32 tx_bytes; /* Each Client accumulates the BytesTx that
* were tranmitted to it, and after each
* CallBack the LoadHistory is devided
* by the balance interval
{
struct c_can_priv *priv = netdev_priv(dev);
- if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
- /* disable automatic retransmission */
- priv->write_reg(priv, &priv->regs->control,
- CONTROL_DISABLE_AR);
- else
- /* enable automatic retransmission */
- priv->write_reg(priv, &priv->regs->control,
- CONTROL_ENABLE_AR);
+ /* enable automatic retransmission */
+ priv->write_reg(priv, &priv->regs->control,
+ CONTROL_ENABLE_AR);
if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
CAN_CTRLMODE_LOOPBACK)) {
for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
msg_obj_no = get_tx_echo_msg_obj(priv);
- c_can_inval_msg_object(dev, 0, msg_obj_no);
val = c_can_read_reg32(priv, &priv->regs->txrqst1);
if (!(val & (1 << msg_obj_no))) {
can_get_echo_skb(dev,
&priv->regs->ifregs[0].msg_cntrl)
& IF_MCONT_DLC_MASK;
stats->tx_packets++;
+ c_can_inval_msg_object(dev, 0, msg_obj_no);
}
}
priv->can.bittiming_const = &c_can_bittiming_const;
priv->can.do_set_mode = c_can_set_mode;
priv->can.do_get_berr_counter = c_can_get_berr_counter;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_ONE_SHOT |
- CAN_CTRLMODE_LOOPBACK |
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING;
void __iomem *addr;
struct net_device *dev;
struct c_can_priv *priv;
- struct resource *mem, *irq;
+ struct resource *mem;
+ int irq;
#ifdef CONFIG_HAVE_CLK
struct clk *clk;
/* get the platform data */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!mem || (irq <= 0)) {
+ irq = platform_get_irq(pdev, 0);
+ if (!mem || irq <= 0) {
ret = -ENODEV;
goto exit_free_clk;
}
priv = netdev_priv(dev);
- dev->irq = irq->start;
+ dev->irq = irq;
priv->regs = addr;
#ifdef CONFIG_HAVE_CLK
priv->can.clock.freq = clk_get_rate(clk);
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- struct qset_params *qsp = &adapter->params.sge.qset[0];
- struct sge_qset *qs = &adapter->sge.qs[0];
+ struct qset_params *qsp;
+ struct sge_qset *qs;
+ int i;
if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
return -EINVAL;
- qsp->coalesce_usecs = c->rx_coalesce_usecs;
- t3_update_qset_coalesce(qs, qsp);
+ for (i = 0; i < pi->nqsets; i++) {
+ qsp = &adapter->params.sge.qset[i];
+ qs = &adapter->sge.qs[i];
+ qsp->coalesce_usecs = c->rx_coalesce_usecs;
+ t3_update_qset_coalesce(qs, qsp);
+ }
+
return 0;
}
/* change in wol state, update IRQ state */
if (!dm->wake_state)
- set_irq_wake(dm->irq_wake, 1);
+ irq_set_irq_wake(dm->irq_wake, 1);
else if (dm->wake_state & !opts)
- set_irq_wake(dm->irq_wake, 0);
+ irq_set_irq_wake(dm->irq_wake, 0);
}
dm->wake_state = opts;
} else {
/* test to see if irq is really wakeup capable */
- ret = set_irq_wake(db->irq_wake, 1);
+ ret = irq_set_irq_wake(db->irq_wake, 1);
if (ret) {
dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
db->irq_wake, ret);
ret = 0;
} else {
- set_irq_wake(db->irq_wake, 0);
+ irq_set_irq_wake(db->irq_wake, 0);
db->wake_supported = 1;
}
}
/* We can't guess the type of connected dongle, user *must* supply it. */
module_param(dongle_id, int, 0);
-/* FIXME : we should not need this, because instances should be automatically
- * managed by the PCI layer. Especially that we seem to only be using the
- * first entry. Jean II */
-/* Max 4 instances for now */
-static struct via_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
-
/* Some prototypes */
-static int via_ircc_open(int i, chipio_t * info, unsigned int id);
-static int via_ircc_close(struct via_ircc_cb *self);
+static int via_ircc_open(struct pci_dev *pdev, chipio_t * info,
+ unsigned int id);
static int via_ircc_dma_receive(struct via_ircc_cb *self);
static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
int iobase);
pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
pci_write_config_byte(pcidev,0x5a,0xc0);
WriteLPCReg(0x28, 0x70 );
- if (via_ircc_open(0, &info,0x3076) == 0)
+ if (via_ircc_open(pcidev, &info, 0x3076) == 0)
rc=0;
} else
rc = -ENODEV; //IR not turn on
info.irq=FirIRQ;
info.dma=FirDRQ1;
info.dma2=FirDRQ0;
- if (via_ircc_open(0, &info,0x3096) == 0)
+ if (via_ircc_open(pcidev, &info, 0x3096) == 0)
rc=0;
} else
rc = -ENODEV; //IR not turn on !!!!!
return rc;
}
-/*
- * Function via_ircc_clean ()
- *
- * Close all configured chips
- *
- */
-static void via_ircc_clean(void)
-{
- int i;
-
- IRDA_DEBUG(3, "%s()\n", __func__);
-
- for (i=0; i < ARRAY_SIZE(dev_self); i++) {
- if (dev_self[i])
- via_ircc_close(dev_self[i]);
- }
-}
-
-static void __devexit via_remove_one (struct pci_dev *pdev)
-{
- IRDA_DEBUG(3, "%s()\n", __func__);
-
- /* FIXME : This is ugly. We should use pci_get_drvdata(pdev);
- * to get our driver instance and call directly via_ircc_close().
- * See vlsi_ir for details...
- * Jean II */
- via_ircc_clean();
-
- /* FIXME : This should be in via_ircc_close(), because here we may
- * theoritically disable still configured devices :-( - Jean II */
- pci_disable_device(pdev);
-}
-
static void __exit via_ircc_cleanup(void)
{
IRDA_DEBUG(3, "%s()\n", __func__);
- /* FIXME : This should be redundant, as pci_unregister_driver()
- * should call via_remove_one() on each device.
- * Jean II */
- via_ircc_clean();
-
/* Cleanup all instances of the driver */
pci_unregister_driver (&via_driver);
}
};
/*
- * Function via_ircc_open (iobase, irq)
+ * Function via_ircc_open(pdev, iobase, irq)
*
* Open driver instance
*
*/
-static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
+static __devinit int via_ircc_open(struct pci_dev *pdev, chipio_t * info,
+ unsigned int id)
{
struct net_device *dev;
struct via_ircc_cb *self;
IRDA_DEBUG(3, "%s()\n", __func__);
- if (i >= ARRAY_SIZE(dev_self))
- return -ENOMEM;
-
/* Allocate new instance of the driver */
dev = alloc_irdadev(sizeof(struct via_ircc_cb));
if (dev == NULL)
self->netdev = dev;
spin_lock_init(&self->lock);
- /* FIXME : We should store our driver instance in the PCI layer,
- * using pci_set_drvdata(), not in this array.
- * See vlsi_ir for details... - Jean II */
- /* FIXME : 'i' is always 0 (see via_init_one()) :-( - Jean II */
- /* Need to store self somewhere */
- dev_self[i] = self;
- self->index = i;
+ pci_set_drvdata(pdev, self);
+
/* Initialize Resource */
self->io.cfg_base = info->cfg_base;
self->io.fir_base = info->fir_base;
/* Allocate memory if needed */
self->rx_buff.head =
- dma_alloc_coherent(NULL, self->rx_buff.truesize,
+ dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize,
&self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head =
- dma_alloc_coherent(NULL, self->tx_buff.truesize,
+ dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize,
&self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
via_hw_init(self);
return 0;
err_out4:
- dma_free_coherent(NULL, self->tx_buff.truesize,
+ dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
err_out3:
- dma_free_coherent(NULL, self->rx_buff.truesize,
+ dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
err_out2:
release_region(self->io.fir_base, self->io.fir_ext);
err_out1:
+ pci_set_drvdata(pdev, NULL);
free_netdev(dev);
- dev_self[i] = NULL;
return err;
}
/*
- * Function via_ircc_close (self)
+ * Function via_remove_one(pdev)
*
* Close driver instance
*
*/
-static int via_ircc_close(struct via_ircc_cb *self)
+static void __devexit via_remove_one(struct pci_dev *pdev)
{
+ struct via_ircc_cb *self = pci_get_drvdata(pdev);
int iobase;
IRDA_DEBUG(3, "%s()\n", __func__);
- IRDA_ASSERT(self != NULL, return -1;);
-
iobase = self->io.fir_base;
ResetChip(iobase, 5); //hardware reset.
__func__, self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
- dma_free_coherent(NULL, self->tx_buff.truesize,
+ dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
if (self->rx_buff.head)
- dma_free_coherent(NULL, self->rx_buff.truesize,
+ dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
- dev_self[self->index] = NULL;
+ pci_set_drvdata(pdev, NULL);
free_netdev(self->netdev);
- return 0;
+ pci_disable_device(pdev);
}
/*
{
jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
pci_set_power_state(jme->pdev, PCI_D0);
- pci_enable_wake(jme->pdev, PCI_D0, false);
+ device_set_wakeup_enable(&jme->pdev->dev, false);
}
static int
jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+ device_set_wakeup_enable(&jme->pdev->dev, jme->reg_pmcs);
+
return 0;
}
}
#ifdef CONFIG_PM
-static int
-jme_suspend(struct pci_dev *pdev, pm_message_t state)
+static int jme_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct jme_adapter *jme = netdev_priv(netdev);
tasklet_hi_enable(&jme->rxclean_task);
tasklet_hi_enable(&jme->rxempty_task);
- pci_save_state(pdev);
jme_powersave_phy(jme);
- pci_enable_wake(jme->pdev, PCI_D3hot, true);
- pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
-static int
-jme_resume(struct pci_dev *pdev)
+static int jme_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct jme_adapter *jme = netdev_priv(netdev);
- jme_clear_pm(jme);
- pci_restore_state(pdev);
+ jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
jme_phy_on(jme);
if (test_bit(JME_FLAG_SSET, &jme->flags))
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume);
+#define JME_PM_OPS (&jme_pm_ops)
+
+#else
+
+#define JME_PM_OPS NULL
#endif
static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = {
.id_table = jme_pci_tbl,
.probe = jme_init_one,
.remove = __devexit_p(jme_remove_one),
-#ifdef CONFIG_PM
- .suspend = jme_suspend,
- .resume = jme_resume,
-#endif /* CONFIG_PM */
.shutdown = jme_shutdown,
+ .driver.pm = JME_PM_OPS,
};
static int __init
goto unlock;
}
skb_copy_and_csum_dev(org_skb, skb->data);
- org_skb->ip_summed = 0;
+ org_skb->ip_summed = CHECKSUM_NONE;
skb->len = org_skb->len;
copy_old_skb(org_skb, skb);
}
0, MLX4_PROT_ETH))
mlx4_warn(mdev, "Failed Attaching Broadcast\n");
+ /* Must redo promiscuous mode setup. */
+ priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
+
/* Schedule multicast task to populate multicast list */
queue_work(mdev->workqueue, &priv->mcast_task);
}
for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
- err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
+ err = mlx4_create_eq(dev, dev->caps.num_cqs -
+ dev->caps.reserved_cqs +
+ MLX4_NUM_SPARE_EQE,
(dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
&priv->eq_table.eq[i]);
if (err) {
/*remove from list of promisc qps */
list_del(&pqp->list);
- kfree(pqp);
/* set the default entry not to include the removed one */
mailbox = mlx4_alloc_cmd_mailbox(dev);
out_list:
if (back_to_list)
list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
+ else
+ kfree(pqp);
out_mutex:
mutex_unlock(&priv->mcg_table.mutex);
return err;
* page into an skb */
static inline int
-myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx,
- int bytes, int len, __wsum csum)
+myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
+ int lro_enabled)
{
struct myri10ge_priv *mgp = ss->mgp;
struct sk_buff *skb;
struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME];
- int i, idx, hlen, remainder;
+ struct myri10ge_rx_buf *rx;
+ int i, idx, hlen, remainder, bytes;
struct pci_dev *pdev = mgp->pdev;
struct net_device *dev = mgp->dev;
u8 *va;
+ if (len <= mgp->small_bytes) {
+ rx = &ss->rx_small;
+ bytes = mgp->small_bytes;
+ } else {
+ rx = &ss->rx_big;
+ bytes = mgp->big_bytes;
+ }
+
len += MXGEFW_PAD;
idx = rx->cnt & rx->mask;
va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
remainder -= MYRI10GE_ALLOC_SIZE;
}
- if (dev->features & NETIF_F_LRO) {
+ if (lro_enabled) {
rx_frags[0].page_offset += MXGEFW_PAD;
rx_frags[0].size -= MXGEFW_PAD;
len -= MXGEFW_PAD;
{
struct myri10ge_rx_done *rx_done = &ss->rx_done;
struct myri10ge_priv *mgp = ss->mgp;
- struct net_device *netdev = mgp->dev;
+
unsigned long rx_bytes = 0;
unsigned long rx_packets = 0;
unsigned long rx_ok;
u16 length;
__wsum checksum;
+ /*
+ * Prevent compiler from generating more than one ->features memory
+ * access to avoid theoretical race condition with functions that
+ * change NETIF_F_LRO flag at runtime.
+ */
+ bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO;
+
while (rx_done->entry[idx].length != 0 && work_done < budget) {
length = ntohs(rx_done->entry[idx].length);
rx_done->entry[idx].length = 0;
checksum = csum_unfold(rx_done->entry[idx].checksum);
- if (length <= mgp->small_bytes)
- rx_ok = myri10ge_rx_done(ss, &ss->rx_small,
- mgp->small_bytes,
- length, checksum);
- else
- rx_ok = myri10ge_rx_done(ss, &ss->rx_big,
- mgp->big_bytes,
- length, checksum);
+ rx_ok = myri10ge_rx_done(ss, length, checksum, lro_enabled);
rx_packets += rx_ok;
rx_bytes += rx_ok * (unsigned long)length;
cnt++;
ss->stats.rx_packets += rx_packets;
ss->stats.rx_bytes += rx_bytes;
- if (netdev->features & NETIF_F_LRO)
+ if (lro_enabled)
lro_flush_all(&rx_done->lro_mgr);
/* restock receive rings if needed */
struct netxen_adapter *adapter = netdev_priv(netdev);
int hw_lro;
- if (data & ~ETH_FLAG_LRO)
+ if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO))
return -EINVAL;
if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO))
u32 flags, phy_interface_t interface)
{
struct device *d = &phydev->dev;
+ int err;
/* Assume that if there is no driver, that it doesn't
* exist, and we should use the genphy driver. */
if (NULL == d->driver) {
- int err;
d->driver = &genphy_driver.driver;
err = d->driver->probe(d);
/* Do initial configuration here, now that
* we have certain key parameters
* (dev_flags and interface) */
- return phy_init_hw(phydev);
+ err = phy_init_hw(phydev);
+ if (err)
+ phy_detach(phydev);
+
+ return err;
}
/**
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int hw_lro;
- if (data & ~ETH_FLAG_LRO)
+ if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO))
return -EINVAL;
if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
int rc = 0;
int changed = 0;
- if (data & ~ETH_FLAG_LRO)
+ if (ethtool_invalid_flags(dev, data, ETH_FLAG_LRO))
return -EINVAL;
if (data & ETH_FLAG_LRO) {
#include <net/ip.h>
#include <asm/system.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#ifdef CONFIG_SPARC
#include <asm/idprom.h>
static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
-static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
+static inline void vlan_features_add(struct net_device *dev, unsigned long flags)
{
dev->vlan_features |= flags;
}
To compile this driver as a module, choose M here: the
module will be called sierra_net.
+config USB_VL600
+ tristate "LG VL600 modem dongle"
+ depends on USB_NET_CDCETHER
+ select USB_ACM
+ help
+ Select this if you want to use an LG Electronics 4G/LTE usb modem
+ called VL600. This driver only handles the ethernet
+ interface exposed by the modem firmware. To establish a connection
+ you will first need a userspace program that sends the right
+ command to the modem through its CDC ACM port, and most
+ likely also a DHCP client. See this thread about using the
+ 4G modem from Verizon:
+
+ http://ubuntuforums.org/showpost.php?p=10589647&postcount=17
+
endmenu
obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o
+obj-$(CONFIG_USB_VL600) += lg-vl600.o
__le32_to_cpu(speeds[1]) / 1000);
}
-static void cdc_status(struct usbnet *dev, struct urb *urb)
+void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
{
struct usb_cdc_notification *event;
break;
}
}
+EXPORT_SYMBOL_GPL(usbnet_cdc_status);
-static int cdc_bind(struct usbnet *dev, struct usb_interface *intf)
+int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
{
int status;
struct cdc_state *info = (void *) &dev->data;
*/
return 0;
}
+EXPORT_SYMBOL_GPL(usbnet_cdc_bind);
static int cdc_manage_power(struct usbnet *dev, int on)
{
.description = "CDC Ethernet Device",
.flags = FLAG_ETHER,
// .check_connect = cdc_check_connect,
- .bind = cdc_bind,
+ .bind = usbnet_cdc_bind,
.unbind = usbnet_cdc_unbind,
- .status = cdc_status,
+ .status = usbnet_cdc_status,
.manage_power = cdc_manage_power,
};
static const struct driver_info mbm_info = {
.description = "Mobile Broadband Network Device",
.flags = FLAG_WWAN,
- .bind = cdc_bind,
+ .bind = usbnet_cdc_bind,
.unbind = usbnet_cdc_unbind,
- .status = cdc_status,
+ .status = usbnet_cdc_status,
.manage_power = cdc_manage_power,
};
.driver_info = 0,
},
+/* LG Electronics VL600 wants additional headers on every frame */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/*
* WHITELIST!!!
*
--- /dev/null
+/*
+ * Ethernet interface part of the LG VL600 LTE modem (4G dongle)
+ *
+ * Copyright (C) 2011 Intel Corporation
+ * Author: Andrzej Zaborowski <balrogg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+#include <linux/if_ether.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+
+/*
+ * The device has a CDC ACM port for modem control (it claims to be
+ * CDC ACM anyway) and a CDC Ethernet port for actual network data.
+ * It will however ignore data on both ports that is not encapsulated
+ * in a specific way, any data returned is also encapsulated the same
+ * way. The headers don't seem to follow any popular standard.
+ *
+ * This driver adds and strips these headers from the ethernet frames
+ * sent/received from the CDC Ethernet port. The proprietary header
+ * replaces the standard ethernet header in a packet so only actual
+ * ethernet frames are allowed. The headers allow some form of
+ * multiplexing by using non standard values of the .h_proto field.
+ * Windows/Mac drivers do send a couple of such frames to the device
+ * during initialisation, with protocol set to 0x0906 or 0x0b06 and (what
+ * seems to be) a flag in the .dummy_flags. This doesn't seem necessary
+ * for modem operation but can possibly be used for GPS or other funcitons.
+ */
+
+struct vl600_frame_hdr {
+ __le32 len;
+ __le32 serial;
+ __le32 pkt_cnt;
+ __le32 dummy_flags;
+ __le32 dummy;
+ __le32 magic;
+} __attribute__((packed));
+
+struct vl600_pkt_hdr {
+ __le32 dummy[2];
+ __le32 len;
+ __be16 h_proto;
+} __attribute__((packed));
+
+struct vl600_state {
+ struct sk_buff *current_rx_buf;
+};
+
+static int vl600_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int ret;
+ struct vl600_state *s = kzalloc(sizeof(struct vl600_state), GFP_KERNEL);
+
+ if (!s)
+ return -ENOMEM;
+
+ ret = usbnet_cdc_bind(dev, intf);
+ if (ret) {
+ kfree(s);
+ return ret;
+ }
+
+ dev->driver_priv = s;
+
+ /* ARP packets don't go through, but they're also of no use. The
+ * subnet has only two hosts anyway: us and the gateway / DHCP
+ * server (probably simulated by modem firmware or network operator)
+ * whose address changes everytime we connect to the intarwebz and
+ * who doesn't bother answering ARP requests either. So hardware
+ * addresses have no meaning, the destination and the source of every
+ * packet depend only on whether it is on the IN or OUT endpoint. */
+ dev->net->flags |= IFF_NOARP;
+
+ return ret;
+}
+
+static void vl600_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct vl600_state *s = dev->driver_priv;
+
+ if (s->current_rx_buf)
+ dev_kfree_skb(s->current_rx_buf);
+
+ kfree(s);
+
+ return usbnet_cdc_unbind(dev, intf);
+}
+
+static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ struct vl600_frame_hdr *frame;
+ struct vl600_pkt_hdr *packet;
+ struct ethhdr *ethhdr;
+ int packet_len, count;
+ struct sk_buff *buf = skb;
+ struct sk_buff *clone;
+ struct vl600_state *s = dev->driver_priv;
+
+ /* Frame lengths are generally 4B multiplies but every couple of
+ * hours there's an odd number of bytes sized yet correct frame,
+ * so don't require this. */
+
+ /* Allow a packet (or multiple packets batched together) to be
+ * split across many frames. We don't allow a new batch to
+ * begin in the same frame another one is ending however, and no
+ * leading or trailing pad bytes. */
+ if (s->current_rx_buf) {
+ frame = (struct vl600_frame_hdr *) s->current_rx_buf->data;
+ if (skb->len + s->current_rx_buf->len >
+ le32_to_cpup(&frame->len)) {
+ netif_err(dev, ifup, dev->net, "Fragment too long\n");
+ dev->net->stats.rx_length_errors++;
+ goto error;
+ }
+
+ buf = s->current_rx_buf;
+ memcpy(skb_put(buf, skb->len), skb->data, skb->len);
+ } else if (skb->len < 4) {
+ netif_err(dev, ifup, dev->net, "Frame too short\n");
+ dev->net->stats.rx_length_errors++;
+ goto error;
+ }
+
+ frame = (struct vl600_frame_hdr *) buf->data;
+ /* NOTE: Should check that frame->magic == 0x53544448?
+ * Otherwise if we receive garbage at the beginning of the frame
+ * we may end up allocating a huge buffer and saving all the
+ * future incoming data into it. */
+
+ if (buf->len < sizeof(*frame) ||
+ buf->len != le32_to_cpup(&frame->len)) {
+ /* Save this fragment for later assembly */
+ if (s->current_rx_buf)
+ return 0;
+
+ s->current_rx_buf = skb_copy_expand(skb, 0,
+ le32_to_cpup(&frame->len), GFP_ATOMIC);
+ if (!s->current_rx_buf) {
+ netif_err(dev, ifup, dev->net, "Reserving %i bytes "
+ "for packet assembly failed.\n",
+ le32_to_cpup(&frame->len));
+ dev->net->stats.rx_errors++;
+ }
+
+ return 0;
+ }
+
+ count = le32_to_cpup(&frame->pkt_cnt);
+
+ skb_pull(buf, sizeof(*frame));
+
+ while (count--) {
+ if (buf->len < sizeof(*packet)) {
+ netif_err(dev, ifup, dev->net, "Packet too short\n");
+ goto error;
+ }
+
+ packet = (struct vl600_pkt_hdr *) buf->data;
+ packet_len = sizeof(*packet) + le32_to_cpup(&packet->len);
+ if (packet_len > buf->len) {
+ netif_err(dev, ifup, dev->net,
+ "Bad packet length stored in header\n");
+ goto error;
+ }
+
+ /* Packet header is same size as the ethernet header
+ * (sizeof(*packet) == sizeof(*ethhdr)), additionally
+ * the h_proto field is in the same place so we just leave it
+ * alone and fill in the remaining fields.
+ */
+ ethhdr = (struct ethhdr *) skb->data;
+ if (be16_to_cpup(ðhdr->h_proto) == ETH_P_ARP &&
+ buf->len > 0x26) {
+ /* Copy the addresses from packet contents */
+ memcpy(ethhdr->h_source,
+ &buf->data[sizeof(*ethhdr) + 0x8],
+ ETH_ALEN);
+ memcpy(ethhdr->h_dest,
+ &buf->data[sizeof(*ethhdr) + 0x12],
+ ETH_ALEN);
+ } else {
+ memset(ethhdr->h_source, 0, ETH_ALEN);
+ memcpy(ethhdr->h_dest, dev->net->dev_addr, ETH_ALEN);
+ }
+
+ if (count) {
+ /* Not the last packet in this batch */
+ clone = skb_clone(buf, GFP_ATOMIC);
+ if (!clone)
+ goto error;
+
+ skb_trim(clone, packet_len);
+ usbnet_skb_return(dev, clone);
+
+ skb_pull(buf, (packet_len + 3) & ~3);
+ } else {
+ skb_trim(buf, packet_len);
+
+ if (s->current_rx_buf) {
+ usbnet_skb_return(dev, buf);
+ s->current_rx_buf = NULL;
+ return 0;
+ }
+
+ return 1;
+ }
+ }
+
+error:
+ if (s->current_rx_buf) {
+ dev_kfree_skb_any(s->current_rx_buf);
+ s->current_rx_buf = NULL;
+ }
+ dev->net->stats.rx_errors++;
+ return 0;
+}
+
+static struct sk_buff *vl600_tx_fixup(struct usbnet *dev,
+ struct sk_buff *skb, gfp_t flags)
+{
+ struct sk_buff *ret;
+ struct vl600_frame_hdr *frame;
+ struct vl600_pkt_hdr *packet;
+ static uint32_t serial = 1;
+ int orig_len = skb->len - sizeof(struct ethhdr);
+ int full_len = (skb->len + sizeof(struct vl600_frame_hdr) + 3) & ~3;
+
+ frame = (struct vl600_frame_hdr *) skb->data;
+ if (skb->len > sizeof(*frame) && skb->len == le32_to_cpup(&frame->len))
+ return skb; /* Already encapsulated? */
+
+ if (skb->len < sizeof(struct ethhdr))
+ /* Drop, device can only deal with ethernet packets */
+ return NULL;
+
+ if (!skb_cloned(skb)) {
+ int headroom = skb_headroom(skb);
+ int tailroom = skb_tailroom(skb);
+
+ if (tailroom >= full_len - skb->len - sizeof(*frame) &&
+ headroom >= sizeof(*frame))
+ /* There's enough head and tail room */
+ goto encapsulate;
+
+ if (headroom + tailroom + skb->len >= full_len) {
+ /* There's enough total room, just readjust */
+ skb->data = memmove(skb->head + sizeof(*frame),
+ skb->data, skb->len);
+ skb_set_tail_pointer(skb, skb->len);
+ goto encapsulate;
+ }
+ }
+
+ /* Alloc a new skb with the required size */
+ ret = skb_copy_expand(skb, sizeof(struct vl600_frame_hdr), full_len -
+ skb->len - sizeof(struct vl600_frame_hdr), flags);
+ dev_kfree_skb_any(skb);
+ if (!ret)
+ return ret;
+ skb = ret;
+
+encapsulate:
+ /* Packet header is same size as ethernet packet header
+ * (sizeof(*packet) == sizeof(struct ethhdr)), additionally the
+ * h_proto field is in the same place so we just leave it alone and
+ * overwrite the remaining fields.
+ */
+ packet = (struct vl600_pkt_hdr *) skb->data;
+ memset(&packet->dummy, 0, sizeof(packet->dummy));
+ packet->len = cpu_to_le32(orig_len);
+
+ frame = (struct vl600_frame_hdr *) skb_push(skb, sizeof(*frame));
+ memset(frame, 0, sizeof(*frame));
+ frame->len = cpu_to_le32(full_len);
+ frame->serial = cpu_to_le32(serial++);
+ frame->pkt_cnt = cpu_to_le32(1);
+
+ if (skb->len < full_len) /* Pad */
+ skb_put(skb, full_len - skb->len);
+
+ return skb;
+}
+
+static const struct driver_info vl600_info = {
+ .description = "LG VL600 modem",
+ .flags = FLAG_ETHER | FLAG_RX_ASSEMBLE,
+ .bind = vl600_bind,
+ .unbind = vl600_unbind,
+ .status = usbnet_cdc_status,
+ .rx_fixup = vl600_rx_fixup,
+ .tx_fixup = vl600_tx_fixup,
+};
+
+static const struct usb_device_id products[] = {
+ {
+ USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &vl600_info,
+ },
+ {}, /* End */
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver lg_vl600_driver = {
+ .name = "lg-vl600",
+ .id_table = products,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+};
+
+static int __init vl600_init(void)
+{
+ return usb_register(&lg_vl600_driver);
+}
+module_init(vl600_init);
+
+static void __exit vl600_exit(void)
+{
+ usb_deregister(&lg_vl600_driver);
+}
+module_exit(vl600_exit);
+
+MODULE_AUTHOR("Anrzej Zaborowski");
+MODULE_DESCRIPTION("LG-VL600 modem's ethernet link");
+MODULE_LICENSE("GPL");
static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
{
if (dev->driver_info->rx_fixup &&
- !dev->driver_info->rx_fixup (dev, skb))
- goto error;
+ !dev->driver_info->rx_fixup (dev, skb)) {
+ /* With RX_ASSEMBLE, rx_fixup() must update counters */
+ if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE))
+ dev->net->stats.rx_errors++;
+ goto done;
+ }
// else network stack removes extra byte if we forced a short packet
if (skb->len) {
}
netif_dbg(dev, rx_err, dev->net, "drop\n");
-error:
dev->net->stats.rx_errors++;
+done:
skb_queue_tail(&dev->done, skb);
}
u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
unsigned long flags;
- if (data & ~ETH_FLAG_LRO)
- return -EOPNOTSUPP;
+ if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO))
+ return -EINVAL;
if (lro_requested ^ lro_present) {
/* toggle the LRO feature*/
struct vxgedev *vdev = netdev_priv(dev);
enum vxge_hw_status status;
- if (data & ~ETH_FLAG_RXHASH)
- return -EOPNOTSUPP;
+ if (ethtool_invalid_flags(dev, data, ETH_FLAG_RXHASH))
+ return -EINVAL;
if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
return 0;
goto err_free_common;
}
- set_irq_type(gpio_to_irq(p54spi_gpio_irq),
- IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(gpio_to_irq(p54spi_gpio_irq), IRQ_TYPE_EDGE_RISING);
disable_irq(gpio_to_irq(p54spi_gpio_irq));
goto disable;
}
- set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
disable_irq(wl->irq);
wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
goto out_free;
}
- set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
disable_irq(wl->irq);
/* Reserve IRQ2 */
setup_irq(2, &irq2_action);
for (i = 0; i < 16; i++) {
- set_irq_chip_and_handler(i, &eisa_interrupt_type,
+ irq_set_chip_and_handler(i, &eisa_interrupt_type,
handle_simple_irq);
}
if (irq > GSC_IRQ_MAX)
return NO_IRQ;
- set_irq_chip_and_handler(irq, type, handle_simple_irq);
- set_irq_chip_data(irq, data);
+ irq_set_chip_and_handler(irq, type, handle_simple_irq);
+ irq_set_chip_data(irq, data);
return irq++;
}
#endif
for (i = 0; i < 16; i++) {
- set_irq_chip_and_handler(i, &superio_interrupt_type, handle_simple_irq);
+ irq_set_chip_and_handler(i, &superio_interrupt_type,
+ handle_simple_irq);
}
/*
void dmar_msi_unmask(struct irq_data *data)
{
- struct intel_iommu *iommu = irq_data_get_irq_data(data);
+ struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
unsigned long flag;
/* unmask it */
void dmar_msi_mask(struct irq_data *data)
{
unsigned long flag;
- struct intel_iommu *iommu = irq_data_get_irq_data(data);
+ struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
/* mask it */
spin_lock_irqsave(&iommu->register_lock, flag);
void dmar_msi_write(int irq, struct msi_msg *msg)
{
- struct intel_iommu *iommu = get_irq_data(irq);
+ struct intel_iommu *iommu = irq_get_handler_data(irq);
unsigned long flag;
spin_lock_irqsave(&iommu->register_lock, flag);
void dmar_msi_read(int irq, struct msi_msg *msg)
{
- struct intel_iommu *iommu = get_irq_data(irq);
+ struct intel_iommu *iommu = irq_get_handler_data(irq);
unsigned long flag;
spin_lock_irqsave(&iommu->register_lock, flag);
return -EINVAL;
}
- set_irq_data(irq, iommu);
+ irq_set_handler_data(irq, iommu);
iommu->irq = irq;
ret = arch_setup_dmar_msi(irq);
if (ret) {
- set_irq_data(irq, NULL);
+ irq_set_handler_data(irq, NULL);
iommu->irq = 0;
destroy_irq(irq);
return ret;
void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
{
- struct ht_irq_cfg *cfg = get_irq_data(irq);
+ struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
unsigned long flags;
spin_lock_irqsave(&ht_irq_lock, flags);
if (cfg->msg.address_lo != msg->address_lo) {
void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
{
- struct ht_irq_cfg *cfg = get_irq_data(irq);
+ struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
*msg = cfg->msg;
}
void mask_ht_irq(struct irq_data *data)
{
- struct ht_irq_cfg *cfg = irq_data_get_irq_data(data);
+ struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
struct ht_irq_msg msg = cfg->msg;
msg.address_lo |= 1;
void unmask_ht_irq(struct irq_data *data)
{
- struct ht_irq_cfg *cfg = irq_data_get_irq_data(data);
+ struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
struct ht_irq_msg msg = cfg->msg;
msg.address_lo &= ~1;
kfree(cfg);
return -EBUSY;
}
- set_irq_data(irq, cfg);
+ irq_set_handler_data(irq, cfg);
if (arch_setup_ht_irq(irq, dev) < 0) {
ht_destroy_irq(irq);
{
struct ht_irq_cfg *cfg;
- cfg = get_irq_data(irq);
- set_irq_chip(irq, NULL);
- set_irq_data(irq, NULL);
+ cfg = irq_get_handler_data(irq);
+ irq_set_chip(irq, NULL);
+ irq_set_handler_data(irq, NULL);
destroy_irq(irq);
kfree(cfg);
iommu_disable_translation(iommu);
if (iommu->irq) {
- set_irq_data(iommu->irq, NULL);
+ irq_set_handler_data(iommu->irq, NULL);
/* This will mask the irq */
free_irq(iommu->irq, iommu);
destroy_irq(iommu->irq);
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
- struct irq_cfg *cfg = get_irq_chip_data(irq);
+ struct irq_cfg *cfg = irq_get_chip_data(irq);
return cfg ? &cfg->irq_2_iommu : NULL;
}
void read_msi_msg(unsigned int irq, struct msi_msg *msg)
{
- struct msi_desc *entry = get_irq_msi(irq);
+ struct msi_desc *entry = irq_get_msi_desc(irq);
__read_msi_msg(entry, msg);
}
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
{
- struct msi_desc *entry = get_irq_msi(irq);
+ struct msi_desc *entry = irq_get_msi_desc(irq);
__get_cached_msi_msg(entry, msg);
}
void write_msi_msg(unsigned int irq, struct msi_msg *msg)
{
- struct msi_desc *entry = get_irq_msi(irq);
+ struct msi_desc *entry = irq_get_msi_desc(irq);
__write_msi_msg(entry, msg);
}
if (!dev->msi_enabled)
return;
- entry = get_irq_msi(dev->irq);
+ entry = irq_get_msi_desc(dev->irq);
pos = entry->msi_attrib.pos;
pci_intx_for_msi(dev, 0);
PCI_MSIX_ENTRY_VECTOR_CTRL;
entries[i].vector = entry->irq;
- set_irq_msi(entry->irq, entry);
+ irq_set_msi_desc(entry->irq, entry);
entry->masked = readl(entry->mask_base + offset);
msix_mask_irq(entry, 1);
i++;
cf->irq = irq;
cf->socket.pci_irq = irq;
- set_irq_type(irq, IRQF_TRIGGER_LOW);
+ irq_set_irq_type(irq, IRQF_TRIGGER_LOW);
io_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
attr_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
/* all other (older) Db1x00 boards use a GPIO to show
* card detection status: use both-edge triggers.
*/
- set_irq_type(sock->insert_irq, IRQ_TYPE_EDGE_BOTH);
+ irq_set_irq_type(sock->insert_irq, IRQ_TYPE_EDGE_BOTH);
ret = request_irq(sock->insert_irq, db1000_pcmcia_cdirq,
0, "pcmcia_carddetect", sock);
#define COLIBRI320_DETECT_GPIO 81
#define COLIBRI320_READY_GPIO 29
-static struct {
- int reset_gpio;
- int ppen_gpio;
- int bvd1_gpio;
- int bvd2_gpio;
- int detect_gpio;
- int ready_gpio;
-} colibri_pcmcia_gpio;
+enum {
+ DETECT = 0,
+ READY = 1,
+ BVD1 = 2,
+ BVD2 = 3,
+ PPEN = 4,
+ RESET = 5,
+};
+
+/* Contents of this array are configured on-the-fly in init function */
+static struct gpio colibri_pcmcia_gpios[] = {
+ { 0, GPIOF_IN, "PCMCIA Detect" },
+ { 0, GPIOF_IN, "PCMCIA Ready" },
+ { 0, GPIOF_IN, "PCMCIA BVD1" },
+ { 0, GPIOF_IN, "PCMCIA BVD2" },
+ { 0, GPIOF_INIT_LOW, "PCMCIA PPEN" },
+ { 0, GPIOF_INIT_HIGH,"PCMCIA Reset" },
+};
static struct pcmcia_irqs colibri_irqs[] = {
{
{
int ret;
- ret = gpio_request(colibri_pcmcia_gpio.detect_gpio, "DETECT");
+ ret = gpio_request_array(colibri_pcmcia_gpios,
+ ARRAY_SIZE(colibri_pcmcia_gpios));
if (ret)
goto err1;
- ret = gpio_direction_input(colibri_pcmcia_gpio.detect_gpio);
- if (ret)
- goto err2;
-
- ret = gpio_request(colibri_pcmcia_gpio.ready_gpio, "READY");
- if (ret)
- goto err2;
- ret = gpio_direction_input(colibri_pcmcia_gpio.ready_gpio);
- if (ret)
- goto err3;
- ret = gpio_request(colibri_pcmcia_gpio.bvd1_gpio, "BVD1");
- if (ret)
- goto err3;
- ret = gpio_direction_input(colibri_pcmcia_gpio.bvd1_gpio);
- if (ret)
- goto err4;
+ colibri_irqs[0].irq = gpio_to_irq(colibri_pcmcia_gpios[DETECT].gpio);
+ skt->socket.pci_irq = gpio_to_irq(colibri_pcmcia_gpios[READY].gpio);
- ret = gpio_request(colibri_pcmcia_gpio.bvd2_gpio, "BVD2");
- if (ret)
- goto err4;
- ret = gpio_direction_input(colibri_pcmcia_gpio.bvd2_gpio);
- if (ret)
- goto err5;
-
- ret = gpio_request(colibri_pcmcia_gpio.ppen_gpio, "PPEN");
- if (ret)
- goto err5;
- ret = gpio_direction_output(colibri_pcmcia_gpio.ppen_gpio, 0);
- if (ret)
- goto err6;
-
- ret = gpio_request(colibri_pcmcia_gpio.reset_gpio, "RESET");
- if (ret)
- goto err6;
- ret = gpio_direction_output(colibri_pcmcia_gpio.reset_gpio, 1);
+ ret = soc_pcmcia_request_irqs(skt, colibri_irqs,
+ ARRAY_SIZE(colibri_irqs));
if (ret)
- goto err7;
-
- colibri_irqs[0].irq = gpio_to_irq(colibri_pcmcia_gpio.detect_gpio);
- skt->socket.pci_irq = gpio_to_irq(colibri_pcmcia_gpio.ready_gpio);
+ goto err2;
- return soc_pcmcia_request_irqs(skt, colibri_irqs,
- ARRAY_SIZE(colibri_irqs));
+ return ret;
-err7:
- gpio_free(colibri_pcmcia_gpio.detect_gpio);
-err6:
- gpio_free(colibri_pcmcia_gpio.ready_gpio);
-err5:
- gpio_free(colibri_pcmcia_gpio.bvd1_gpio);
-err4:
- gpio_free(colibri_pcmcia_gpio.bvd2_gpio);
-err3:
- gpio_free(colibri_pcmcia_gpio.reset_gpio);
err2:
- gpio_free(colibri_pcmcia_gpio.ppen_gpio);
+ gpio_free_array(colibri_pcmcia_gpios,
+ ARRAY_SIZE(colibri_pcmcia_gpios));
err1:
return ret;
}
static void colibri_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
{
- gpio_free(colibri_pcmcia_gpio.detect_gpio);
- gpio_free(colibri_pcmcia_gpio.ready_gpio);
- gpio_free(colibri_pcmcia_gpio.bvd1_gpio);
- gpio_free(colibri_pcmcia_gpio.bvd2_gpio);
- gpio_free(colibri_pcmcia_gpio.reset_gpio);
- gpio_free(colibri_pcmcia_gpio.ppen_gpio);
+ gpio_free_array(colibri_pcmcia_gpios,
+ ARRAY_SIZE(colibri_pcmcia_gpios));
}
static void colibri_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
struct pcmcia_state *state)
{
- state->detect = !!gpio_get_value(colibri_pcmcia_gpio.detect_gpio);
- state->ready = !!gpio_get_value(colibri_pcmcia_gpio.ready_gpio);
- state->bvd1 = !!gpio_get_value(colibri_pcmcia_gpio.bvd1_gpio);
- state->bvd2 = !!gpio_get_value(colibri_pcmcia_gpio.bvd2_gpio);
+ state->detect = !!gpio_get_value(colibri_pcmcia_gpios[DETECT].gpio);
+ state->ready = !!gpio_get_value(colibri_pcmcia_gpios[READY].gpio);
+ state->bvd1 = !!gpio_get_value(colibri_pcmcia_gpios[BVD1].gpio);
+ state->bvd2 = !!gpio_get_value(colibri_pcmcia_gpios[BVD2].gpio);
state->wrprot = 0;
state->vs_3v = 1;
state->vs_Xv = 0;
colibri_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
const socket_state_t *state)
{
- gpio_set_value(colibri_pcmcia_gpio.ppen_gpio,
+ gpio_set_value(colibri_pcmcia_gpios[PPEN].gpio,
!(state->Vcc == 33 && state->Vpp < 50));
- gpio_set_value(colibri_pcmcia_gpio.reset_gpio, state->flags & SS_RESET);
+ gpio_set_value(colibri_pcmcia_gpios[RESET].gpio,
+ state->flags & SS_RESET);
return 0;
}
/* Colibri PXA270 */
if (machine_is_colibri()) {
- colibri_pcmcia_gpio.reset_gpio = COLIBRI270_RESET_GPIO;
- colibri_pcmcia_gpio.ppen_gpio = COLIBRI270_PPEN_GPIO;
- colibri_pcmcia_gpio.bvd1_gpio = COLIBRI270_BVD1_GPIO;
- colibri_pcmcia_gpio.bvd2_gpio = COLIBRI270_BVD2_GPIO;
- colibri_pcmcia_gpio.detect_gpio = COLIBRI270_DETECT_GPIO;
- colibri_pcmcia_gpio.ready_gpio = COLIBRI270_READY_GPIO;
+ colibri_pcmcia_gpios[RESET].gpio = COLIBRI270_RESET_GPIO;
+ colibri_pcmcia_gpios[PPEN].gpio = COLIBRI270_PPEN_GPIO;
+ colibri_pcmcia_gpios[BVD1].gpio = COLIBRI270_BVD1_GPIO;
+ colibri_pcmcia_gpios[BVD2].gpio = COLIBRI270_BVD2_GPIO;
+ colibri_pcmcia_gpios[DETECT].gpio = COLIBRI270_DETECT_GPIO;
+ colibri_pcmcia_gpios[READY].gpio = COLIBRI270_READY_GPIO;
/* Colibri PXA320 */
} else if (machine_is_colibri320()) {
- colibri_pcmcia_gpio.reset_gpio = COLIBRI320_RESET_GPIO;
- colibri_pcmcia_gpio.ppen_gpio = COLIBRI320_PPEN_GPIO;
- colibri_pcmcia_gpio.bvd1_gpio = COLIBRI320_BVD1_GPIO;
- colibri_pcmcia_gpio.bvd2_gpio = COLIBRI320_BVD2_GPIO;
- colibri_pcmcia_gpio.detect_gpio = COLIBRI320_DETECT_GPIO;
- colibri_pcmcia_gpio.ready_gpio = COLIBRI320_READY_GPIO;
+ colibri_pcmcia_gpios[RESET].gpio = COLIBRI320_RESET_GPIO;
+ colibri_pcmcia_gpios[PPEN].gpio = COLIBRI320_PPEN_GPIO;
+ colibri_pcmcia_gpios[BVD1].gpio = COLIBRI320_BVD1_GPIO;
+ colibri_pcmcia_gpios[BVD2].gpio = COLIBRI320_BVD2_GPIO;
+ colibri_pcmcia_gpios[DETECT].gpio = COLIBRI320_DETECT_GPIO;
+ colibri_pcmcia_gpios[READY].gpio = COLIBRI320_READY_GPIO;
}
ret = platform_device_add_data(colibri_pcmcia_device,
* Driver for Palm LifeDrive PCMCIA
*
* Copyright (C) 2006 Alex Osborne <ato@meshy.org>
- * Copyright (C) 2007-2008 Marek Vasut <marek.vasut@gmail.com>
+ * Copyright (C) 2007-2011 Marek Vasut <marek.vasut@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
#include <mach/palmld.h>
#include "soc_common.h"
+static struct gpio palmld_pcmcia_gpios[] = {
+ { GPIO_NR_PALMLD_PCMCIA_POWER, GPIOF_INIT_LOW, "PCMCIA Power" },
+ { GPIO_NR_PALMLD_PCMCIA_RESET, GPIOF_INIT_HIGH,"PCMCIA Reset" },
+ { GPIO_NR_PALMLD_PCMCIA_READY, GPIOF_IN, "PCMCIA Ready" },
+};
+
static int palmld_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
int ret;
- ret = gpio_request(GPIO_NR_PALMLD_PCMCIA_POWER, "PCMCIA PWR");
- if (ret)
- goto err1;
- ret = gpio_direction_output(GPIO_NR_PALMLD_PCMCIA_POWER, 0);
- if (ret)
- goto err2;
-
- ret = gpio_request(GPIO_NR_PALMLD_PCMCIA_RESET, "PCMCIA RST");
- if (ret)
- goto err2;
- ret = gpio_direction_output(GPIO_NR_PALMLD_PCMCIA_RESET, 1);
- if (ret)
- goto err3;
-
- ret = gpio_request(GPIO_NR_PALMLD_PCMCIA_READY, "PCMCIA RDY");
- if (ret)
- goto err3;
- ret = gpio_direction_input(GPIO_NR_PALMLD_PCMCIA_READY);
- if (ret)
- goto err4;
+ ret = gpio_request_array(palmld_pcmcia_gpios,
+ ARRAY_SIZE(palmld_pcmcia_gpios));
skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMLD_PCMCIA_READY);
- return 0;
-err4:
- gpio_free(GPIO_NR_PALMLD_PCMCIA_READY);
-err3:
- gpio_free(GPIO_NR_PALMLD_PCMCIA_RESET);
-err2:
- gpio_free(GPIO_NR_PALMLD_PCMCIA_POWER);
-err1:
return ret;
}
static void palmld_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
{
- gpio_free(GPIO_NR_PALMLD_PCMCIA_READY);
- gpio_free(GPIO_NR_PALMLD_PCMCIA_RESET);
- gpio_free(GPIO_NR_PALMLD_PCMCIA_POWER);
+ gpio_free_array(palmld_pcmcia_gpios, ARRAY_SIZE(palmld_pcmcia_gpios));
}
static void palmld_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
* Driver for Palm Tungsten|C PCMCIA
*
* Copyright (C) 2008 Alex Osborne <ato@meshy.org>
- * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com>
+ * Copyright (C) 2009-2011 Marek Vasut <marek.vasut@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
#include <mach/palmtc.h>
#include "soc_common.h"
+static struct gpio palmtc_pcmcia_gpios[] = {
+ { GPIO_NR_PALMTC_PCMCIA_POWER1, GPIOF_INIT_LOW, "PCMCIA Power 1" },
+ { GPIO_NR_PALMTC_PCMCIA_POWER2, GPIOF_INIT_LOW, "PCMCIA Power 2" },
+ { GPIO_NR_PALMTC_PCMCIA_POWER3, GPIOF_INIT_LOW, "PCMCIA Power 3" },
+ { GPIO_NR_PALMTC_PCMCIA_RESET, GPIOF_INIT_HIGH,"PCMCIA Reset" },
+ { GPIO_NR_PALMTC_PCMCIA_READY, GPIOF_IN, "PCMCIA Ready" },
+ { GPIO_NR_PALMTC_PCMCIA_PWRREADY, GPIOF_IN, "PCMCIA Power Ready" },
+};
+
static int palmtc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
int ret;
- ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER1, "PCMCIA PWR1");
- if (ret)
- goto err1;
- ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER1, 0);
- if (ret)
- goto err2;
-
- ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER2, "PCMCIA PWR2");
- if (ret)
- goto err2;
- ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER2, 0);
- if (ret)
- goto err3;
-
- ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER3, "PCMCIA PWR3");
- if (ret)
- goto err3;
- ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER3, 0);
- if (ret)
- goto err4;
-
- ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_RESET, "PCMCIA RST");
- if (ret)
- goto err4;
- ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_RESET, 1);
- if (ret)
- goto err5;
-
- ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_READY, "PCMCIA RDY");
- if (ret)
- goto err5;
- ret = gpio_direction_input(GPIO_NR_PALMTC_PCMCIA_READY);
- if (ret)
- goto err6;
-
- ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_PWRREADY, "PCMCIA PWRRDY");
- if (ret)
- goto err6;
- ret = gpio_direction_input(GPIO_NR_PALMTC_PCMCIA_PWRREADY);
- if (ret)
- goto err7;
+ ret = gpio_request_array(palmtc_pcmcia_gpios,
+ ARRAY_SIZE(palmtc_pcmcia_gpios));
skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMTC_PCMCIA_READY);
- return 0;
-err7:
- gpio_free(GPIO_NR_PALMTC_PCMCIA_PWRREADY);
-err6:
- gpio_free(GPIO_NR_PALMTC_PCMCIA_READY);
-err5:
- gpio_free(GPIO_NR_PALMTC_PCMCIA_RESET);
-err4:
- gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER3);
-err3:
- gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER2);
-err2:
- gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER1);
-err1:
return ret;
}
static void palmtc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
{
- gpio_free(GPIO_NR_PALMTC_PCMCIA_PWRREADY);
- gpio_free(GPIO_NR_PALMTC_PCMCIA_READY);
- gpio_free(GPIO_NR_PALMTC_PCMCIA_RESET);
- gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER3);
- gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER2);
- gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER1);
+ gpio_free_array(palmtc_pcmcia_gpios, ARRAY_SIZE(palmtc_pcmcia_gpios));
}
static void palmtc_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
*
* Driver for Palm T|X PCMCIA
*
- * Copyright (C) 2007-2008 Marek Vasut <marek.vasut@gmail.com>
+ * Copyright (C) 2007-2011 Marek Vasut <marek.vasut@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/gpio.h>
#include <asm/mach-types.h>
-
-#include <mach/gpio.h>
#include <mach/palmtx.h>
-
#include "soc_common.h"
+static struct gpio palmtx_pcmcia_gpios[] = {
+ { GPIO_NR_PALMTX_PCMCIA_POWER1, GPIOF_INIT_LOW, "PCMCIA Power 1" },
+ { GPIO_NR_PALMTX_PCMCIA_POWER2, GPIOF_INIT_LOW, "PCMCIA Power 2" },
+ { GPIO_NR_PALMTX_PCMCIA_RESET, GPIOF_INIT_HIGH,"PCMCIA Reset" },
+ { GPIO_NR_PALMTX_PCMCIA_READY, GPIOF_IN, "PCMCIA Ready" },
+};
+
static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
int ret;
- ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_POWER1, "PCMCIA PWR1");
- if (ret)
- goto err1;
- ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_POWER1, 0);
- if (ret)
- goto err2;
-
- ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_POWER2, "PCMCIA PWR2");
- if (ret)
- goto err2;
- ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_POWER2, 0);
- if (ret)
- goto err3;
-
- ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_RESET, "PCMCIA RST");
- if (ret)
- goto err3;
- ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_RESET, 1);
- if (ret)
- goto err4;
-
- ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_READY, "PCMCIA RDY");
- if (ret)
- goto err4;
- ret = gpio_direction_input(GPIO_NR_PALMTX_PCMCIA_READY);
- if (ret)
- goto err5;
+ ret = gpio_request_array(palmtx_pcmcia_gpios,
+ ARRAY_SIZE(palmtx_pcmcia_gpios));
skt->socket.pci_irq = gpio_to_irq(GPIO_NR_PALMTX_PCMCIA_READY);
- return 0;
-err5:
- gpio_free(GPIO_NR_PALMTX_PCMCIA_READY);
-err4:
- gpio_free(GPIO_NR_PALMTX_PCMCIA_RESET);
-err3:
- gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER2);
-err2:
- gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER1);
-err1:
return ret;
}
static void palmtx_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
{
- gpio_free(GPIO_NR_PALMTX_PCMCIA_READY);
- gpio_free(GPIO_NR_PALMTX_PCMCIA_RESET);
- gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER2);
- gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER1);
+ gpio_free_array(palmtx_pcmcia_gpios, ARRAY_SIZE(palmtx_pcmcia_gpios));
}
static void palmtx_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
*
* Driver for Voipac PXA270 PCMCIA and CF sockets
*
- * Copyright (C) 2010
- * Marek Vasut <marek.vasut@gmail.com>
+ * Copyright (C) 2010-2011 Marek Vasut <marek.vasut@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
#include "soc_common.h"
+static struct gpio vpac270_pcmcia_gpios[] = {
+ { GPIO84_VPAC270_PCMCIA_CD, GPIOF_IN, "PCMCIA Card Detect" },
+ { GPIO35_VPAC270_PCMCIA_RDY, GPIOF_IN, "PCMCIA Ready" },
+ { GPIO107_VPAC270_PCMCIA_PPEN, GPIOF_INIT_LOW, "PCMCIA PPEN" },
+ { GPIO11_VPAC270_PCMCIA_RESET, GPIOF_INIT_LOW, "PCMCIA Reset" },
+};
+
+static struct gpio vpac270_cf_gpios[] = {
+ { GPIO17_VPAC270_CF_CD, GPIOF_IN, "CF Card Detect" },
+ { GPIO12_VPAC270_CF_RDY, GPIOF_IN, "CF Ready" },
+ { GPIO16_VPAC270_CF_RESET, GPIOF_INIT_LOW, "CF Reset" },
+};
+
static struct pcmcia_irqs cd_irqs[] = {
{
.sock = 0,
int ret;
if (skt->nr == 0) {
- ret = gpio_request(GPIO84_VPAC270_PCMCIA_CD, "PCMCIA CD");
- if (ret)
- goto err1;
- ret = gpio_direction_input(GPIO84_VPAC270_PCMCIA_CD);
- if (ret)
- goto err2;
-
- ret = gpio_request(GPIO35_VPAC270_PCMCIA_RDY, "PCMCIA RDY");
- if (ret)
- goto err2;
- ret = gpio_direction_input(GPIO35_VPAC270_PCMCIA_RDY);
- if (ret)
- goto err3;
-
- ret = gpio_request(GPIO107_VPAC270_PCMCIA_PPEN, "PCMCIA PPEN");
- if (ret)
- goto err3;
- ret = gpio_direction_output(GPIO107_VPAC270_PCMCIA_PPEN, 0);
- if (ret)
- goto err4;
-
- ret = gpio_request(GPIO11_VPAC270_PCMCIA_RESET, "PCMCIA RESET");
- if (ret)
- goto err4;
- ret = gpio_direction_output(GPIO11_VPAC270_PCMCIA_RESET, 0);
- if (ret)
- goto err5;
+ ret = gpio_request_array(vpac270_pcmcia_gpios,
+ ARRAY_SIZE(vpac270_pcmcia_gpios));
skt->socket.pci_irq = gpio_to_irq(GPIO35_VPAC270_PCMCIA_RDY);
- return soc_pcmcia_request_irqs(skt, &cd_irqs[0], 1);
-
-err5:
- gpio_free(GPIO11_VPAC270_PCMCIA_RESET);
-err4:
- gpio_free(GPIO107_VPAC270_PCMCIA_PPEN);
-err3:
- gpio_free(GPIO35_VPAC270_PCMCIA_RDY);
-err2:
- gpio_free(GPIO84_VPAC270_PCMCIA_CD);
-err1:
- return ret;
-
+ if (!ret)
+ ret = soc_pcmcia_request_irqs(skt, &cd_irqs[0], 1);
} else {
- ret = gpio_request(GPIO17_VPAC270_CF_CD, "CF CD");
- if (ret)
- goto err6;
- ret = gpio_direction_input(GPIO17_VPAC270_CF_CD);
- if (ret)
- goto err7;
-
- ret = gpio_request(GPIO12_VPAC270_CF_RDY, "CF RDY");
- if (ret)
- goto err7;
- ret = gpio_direction_input(GPIO12_VPAC270_CF_RDY);
- if (ret)
- goto err8;
-
- ret = gpio_request(GPIO16_VPAC270_CF_RESET, "CF RESET");
- if (ret)
- goto err8;
- ret = gpio_direction_output(GPIO16_VPAC270_CF_RESET, 0);
- if (ret)
- goto err9;
+ ret = gpio_request_array(vpac270_cf_gpios,
+ ARRAY_SIZE(vpac270_cf_gpios));
skt->socket.pci_irq = gpio_to_irq(GPIO12_VPAC270_CF_RDY);
- return soc_pcmcia_request_irqs(skt, &cd_irqs[1], 1);
-
-err9:
- gpio_free(GPIO16_VPAC270_CF_RESET);
-err8:
- gpio_free(GPIO12_VPAC270_CF_RDY);
-err7:
- gpio_free(GPIO17_VPAC270_CF_CD);
-err6:
- return ret;
-
+ if (!ret)
+ ret = soc_pcmcia_request_irqs(skt, &cd_irqs[1], 1);
}
+
+ return ret;
}
static void vpac270_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
{
- gpio_free(GPIO11_VPAC270_PCMCIA_RESET);
- gpio_free(GPIO107_VPAC270_PCMCIA_PPEN);
- gpio_free(GPIO35_VPAC270_PCMCIA_RDY);
- gpio_free(GPIO84_VPAC270_PCMCIA_CD);
- gpio_free(GPIO16_VPAC270_CF_RESET);
- gpio_free(GPIO12_VPAC270_CF_RDY);
- gpio_free(GPIO17_VPAC270_CF_CD);
+ if (skt->nr == 0)
+ gpio_request_array(vpac270_pcmcia_gpios,
+ ARRAY_SIZE(vpac270_pcmcia_gpios));
+ else
+ gpio_request_array(vpac270_cf_gpios,
+ ARRAY_SIZE(vpac270_cf_gpios));
}
static void vpac270_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
GPDR &= ~nano_skts[i].input_pins;
GPDR |= nano_skts[i].output_pins;
GPCR = nano_skts[i].clear_outputs;
- set_irq_type(nano_skts[i].transition_pins, IRQ_TYPE_EDGE_BOTH);
+ irq_set_irq_type(nano_skts[i].transition_pins, IRQ_TYPE_EDGE_BOTH);
skt->socket.pci_irq = nano_skts[i].pci_irq;
return soc_pcmcia_request_irqs(skt,
*/
if (skt->irq_state != 1 && state->io_irq) {
skt->irq_state = 1;
- set_irq_type(skt->socket.pci_irq,
- IRQ_TYPE_EDGE_FALLING);
+ irq_set_irq_type(skt->socket.pci_irq,
+ IRQ_TYPE_EDGE_FALLING);
} else if (skt->irq_state == 1 && state->io_irq == 0) {
skt->irq_state = 0;
- set_irq_type(skt->socket.pci_irq, IRQ_TYPE_NONE);
+ irq_set_irq_type(skt->socket.pci_irq, IRQ_TYPE_NONE);
}
skt->cs_state = *state;
IRQF_DISABLED, irqs[i].str, skt);
if (res)
break;
- set_irq_type(irqs[i].irq, IRQ_TYPE_NONE);
+ irq_set_irq_type(irqs[i].irq, IRQ_TYPE_NONE);
}
if (res) {
for (i = 0; i < nr; i++)
if (irqs[i].sock == skt->nr)
- set_irq_type(irqs[i].irq, IRQ_TYPE_NONE);
+ irq_set_irq_type(irqs[i].irq, IRQ_TYPE_NONE);
}
EXPORT_SYMBOL(soc_pcmcia_disable_irqs);
for (i = 0; i < nr; i++)
if (irqs[i].sock == skt->nr) {
- set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_RISING);
- set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_BOTH);
+ irq_set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(irqs[i].irq, IRQ_TYPE_EDGE_BOTH);
}
}
EXPORT_SYMBOL(soc_pcmcia_enable_irqs);
* edge detector.
*/
irq = gpio_to_irq(GPIO_CDA);
- set_irq_type(irq, IRQ_TYPE_EDGE_BOTH);
+ irq_set_irq_type(irq, IRQ_TYPE_EDGE_BOTH);
ret = request_irq(irq, cdirq, 0, "pcmcia_carddetect", sock);
if (ret) {
dev_err(&pdev->dev, "cannot setup cd irq\n");
}
for (i = 0; i < 8; i++) {
- set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip,
- handle_simple_irq, "demux");
- set_irq_chip_data(i + pg->irq_base, pg);
+ irq_set_chip_and_handler_name(i + pg->irq_base,
+ &pmic_irqchip,
+ handle_simple_irq,
+ "demux");
+ irq_set_chip_data(i + pg->irq_base, pg);
}
return 0;
err:
if (ret)
goto err2;
- set_irq_type(gpio_to_irq(info->charge_gpio),
- IRQ_TYPE_EDGE_BOTH);
+ irq_set_irq_type(gpio_to_irq(info->charge_gpio),
+ IRQ_TYPE_EDGE_BOTH);
ret = request_irq(gpio_to_irq(info->charge_gpio),
z2_charge_switch_irq, IRQF_DISABLED,
"AC Detect", charger);
struct platform_device *pdev = to_platform_device(dev);
struct sh_rtc *rtc = platform_get_drvdata(pdev);
- set_irq_wake(rtc->periodic_irq, enabled);
+ irq_set_irq_wake(rtc->periodic_irq, enabled);
if (rtc->carry_irq > 0) {
- set_irq_wake(rtc->carry_irq, enabled);
- set_irq_wake(rtc->alarm_irq, enabled);
+ irq_set_irq_wake(rtc->carry_irq, enabled);
+ irq_set_irq_wake(rtc->alarm_irq, enabled);
}
}
static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
{
- generic_handle_irq((unsigned int)get_irq_data(irq));
+ generic_handle_irq((unsigned int)irq_get_handler_data(irq));
}
static void __init intc_register_irq(struct intc_desc *desc,
irq_data = irq_get_irq_data(irq);
disable_irq_nosync(irq);
- set_irq_chip_and_handler_name(irq, &d->chip,
- handle_level_irq, "level");
- set_irq_chip_data(irq, (void *)data[primary]);
+ irq_set_chip_and_handler_name(irq, &d->chip, handle_level_irq,
+ "level");
+ irq_set_chip_data(irq, (void *)data[primary]);
/*
* set priority level
vect2->enum_id = 0;
/* redirect this interrupts to the first one */
- set_irq_chip(irq2, &dummy_irq_chip);
- set_irq_chained_handler(irq2, intc_redirect_irq);
- set_irq_data(irq2, (void *)irq);
+ irq_set_chip(irq2, &dummy_irq_chip);
+ irq_set_chained_handler(irq2, intc_redirect_irq);
+ irq_set_handler_data(irq2, (void *)irq);
}
}
/* enable wakeup irqs belonging to this intc controller */
for_each_active_irq(irq) {
struct irq_data *data;
- struct irq_desc *desc;
struct irq_chip *chip;
data = irq_get_irq_data(irq);
chip = irq_data_get_irq_chip(data);
if (chip != &d->chip)
continue;
- desc = irq_to_desc(irq);
- if ((desc->status & IRQ_WAKEUP))
+ if (irqd_is_wakeup_set(data))
chip->irq_enable(data);
}
}
-
return 0;
}
for_each_active_irq(irq) {
struct irq_data *data;
- struct irq_desc *desc;
struct irq_chip *chip;
data = irq_get_irq_data(irq);
*/
if (chip != &d->chip)
continue;
- desc = irq_to_desc(irq);
- if (desc->status & IRQ_DISABLED)
+ if (irqd_irq_disabled(data))
chip->irq_disable(data);
else
chip->irq_enable(data);
static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
{
- struct irq_chip *chip = get_irq_chip(irq);
+ struct irq_chip *chip = irq_get_chip(irq);
return container_of(chip, struct intc_desc_int, chip);
}
set_irq_flags(irq, IRQF_VALID);
#else
/* same effect on other architectures */
- set_irq_noprobe(irq);
+ irq_set_noprobe(irq);
#endif
}
{
struct irq_data *data = irq_get_irq_data(irq);
struct irq_chip *chip = irq_data_get_irq_chip(data);
- struct intc_virq_list *entry, *vlist = irq_data_get_irq_data(data);
+ struct intc_virq_list *entry, *vlist = irq_data_get_irq_handler_data(data);
struct intc_desc_int *d = get_intc_desc(irq);
chip->irq_mask_ack(data);
for_each_virq(entry, vlist) {
unsigned long addr, handle;
- handle = (unsigned long)get_irq_data(entry->irq);
+ handle = (unsigned long)irq_get_handler_data(entry->irq);
addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
intc_irq_xlate_set(irq, entry->enum_id, d);
- set_irq_chip_and_handler_name(irq, get_irq_chip(entry->pirq),
+ irq_set_chip_and_handler_name(irq, irq_get_chip(entry->pirq),
handle_simple_irq, "virq");
- set_irq_chip_data(irq, get_irq_chip_data(entry->pirq));
+ irq_set_chip_data(irq, irq_get_chip_data(entry->pirq));
- set_irq_data(irq, (void *)entry->handle);
+ irq_set_handler_data(irq, (void *)entry->handle);
- set_irq_chained_handler(entry->pirq, intc_virq_handler);
+ irq_set_chained_handler(entry->pirq, intc_virq_handler);
add_virq_to_pirq(entry->pirq, irq);
radix_tree_tag_clear(&d->tree, entry->enum_id,
if (error)
return -ENODEV;
- set_irq_wake(sdhcinfo->oob_irq, 1);
+ irq_set_irq_wake(sdhcinfo->oob_irq, 1);
sdhcinfo->oob_irq_registered = true;
}
{
SDLX_MSG(("%s: Enter\n", __func__));
- set_irq_wake(sdhcinfo->oob_irq, 0);
+ irq_set_irq_wake(sdhcinfo->oob_irq, 0);
disable_irq(sdhcinfo->oob_irq); /* just in case.. */
free_irq(sdhcinfo->oob_irq, NULL);
sdhcinfo->oob_irq_registered = false;
int result;
int irq_pin = AST_INT;
- set_irq_type(OMAP_GPIO_IRQ(irq_pin), IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(OMAP_GPIO_IRQ(irq_pin), IRQ_TYPE_LEVEL_LOW);
/*
* for shared IRQS must provide non NULL device ptr
if (xencons_irq < 0)
xencons_irq = 0; /* NO_IRQ */
else
- set_irq_noprobe(xencons_irq);
+ irq_set_noprobe(xencons_irq);
hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256);
if (IS_ERR(hp))
config SERIAL_GRLIB_GAISLER_APBUART
tristate "GRLIB APBUART serial support"
- depends on OF
+ depends on OF && SPARC
select SERIAL_CORE
---help---
Add support for the GRLIB APBUART serial port.
static int __devinit apbuart_probe(struct platform_device *op)
{
- int i = -1;
+ int i;
struct uart_port *port = NULL;
- i = 0;
for (i = 0; i < grlib_apbuart_port_nr; i++) {
if (op->dev.of_node == grlib_apbuart_nodes[i])
break;
port = &grlib_apbuart_ports[i];
port->dev = &op->dev;
+ port->irq = op->archdata.irqs[0];
uart_add_one_port(&grlib_apbuart_driver, (struct uart_port *) port);
static int grlib_apbuart_configure(void)
{
- struct device_node *np, *rp;
- const u32 *prop;
- int freq_khz, line = 0;
-
- /* Get bus frequency */
- rp = of_find_node_by_path("/");
- if (!rp)
- return -ENODEV;
- rp = of_get_next_child(rp, NULL);
- if (!rp)
- return -ENODEV;
- prop = of_get_property(rp, "clock-frequency", NULL);
- if (!prop)
- return -ENODEV;
- freq_khz = *prop;
+ struct device_node *np;
+ int line = 0;
for_each_matching_node(np, apbuart_match) {
- const int *irqs, *ampopts;
+ const int *ampopts;
+ const u32 *freq_hz;
const struct amba_prom_registers *regs;
struct uart_port *port;
unsigned long addr;
ampopts = of_get_property(np, "ampopts", NULL);
if (ampopts && (*ampopts == 0))
continue; /* Ignore if used by another OS instance */
-
- irqs = of_get_property(np, "interrupts", NULL);
regs = of_get_property(np, "reg", NULL);
+ /* Frequency of APB Bus is frequency of UART */
+ freq_hz = of_get_property(np, "freq", NULL);
- if (!irqs || !regs)
+ if (!regs || !freq_hz || (*freq_hz == 0))
continue;
grlib_apbuart_nodes[line] = np;
port->mapbase = addr;
port->membase = ioremap(addr, sizeof(struct grlib_apbuart_regs_map));
- port->irq = *irqs;
+ port->irq = 0;
port->iotype = UPIO_MEM;
port->ops = &grlib_apbuart_ops;
port->flags = UPF_BOOT_AUTOCONF;
port->line = line;
- port->uartclk = freq_khz * 1000;
+ port->uartclk = *freq_hz;
port->fifosize = apbuart_scan_fifo_size((struct uart_port *) port, line);
line++;
if (unlikely(uport->irq < 0))
return -ENXIO;
- if (unlikely(set_irq_wake(uport->irq, 1)))
+ if (unlikely(irq_set_irq_wake(uport->irq, 1)))
return -ENXIO;
if (pdata == NULL || pdata->rx_wakeup_irq < 0)
if (unlikely(msm_uport->rx_wakeup.irq < 0))
return -ENXIO;
- if (unlikely(set_irq_wake(msm_uport->rx_wakeup.irq, 1)))
+ if (unlikely(irq_set_irq_wake(msm_uport->rx_wakeup.irq, 1)))
return -ENXIO;
}
static void pxa25x_ep_fifo_flush (struct usb_ep *ep);
static void nuke (struct pxa25x_ep *, int status);
-/* one GPIO should be used to detect VBUS from the host */
-static int is_vbus_present(void)
-{
- struct pxa2xx_udc_mach_info *mach = the_controller->mach;
-
- if (gpio_is_valid(mach->gpio_vbus)) {
- int value = gpio_get_value(mach->gpio_vbus);
-
- if (mach->gpio_vbus_inverted)
- return !value;
- else
- return !!value;
- }
- if (mach->udc_is_connected)
- return mach->udc_is_connected();
- return 1;
-}
-
/* one GPIO should control a D+ pullup, so host sees this device (or not) */
static void pullup_off(void)
{
"%s version: %s\nGadget driver: %s\nHost %s\n\n",
driver_name, DRIVER_VERSION SIZE_STR "(pio)",
dev->driver ? dev->driver->driver.name : "(none)",
- is_vbus_present() ? "full speed" : "disconnected");
+ dev->gadget.speed == USB_SPEED_FULL ? "full speed" : "disconnected");
/* registers for device and ep0 */
seq_printf(m,
(tmp & UDCCFR_ACM) ? " acm" : "");
}
- if (!is_vbus_present() || !dev->driver)
+ if (dev->gadget.speed != USB_SPEED_FULL || !dev->driver)
goto done;
seq_printf(m, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n",
#endif
-static irqreturn_t udc_vbus_irq(int irq, void *_dev)
-{
- struct pxa25x_udc *dev = _dev;
-
- pxa25x_udc_vbus_session(&dev->gadget, is_vbus_present());
- return IRQ_HANDLED;
-}
-
/*-------------------------------------------------------------------------*/
if (unlikely(udccr & UDCCR_SUSIR)) {
udc_ack_int_UDCCR(UDCCR_SUSIR);
handled = 1;
- DBG(DBG_VERBOSE, "USB suspend%s\n", is_vbus_present()
- ? "" : "+disconnect");
+ DBG(DBG_VERBOSE, "USB suspend\n");
- if (!is_vbus_present())
- stop_activity(dev, dev->driver);
- else if (dev->gadget.speed != USB_SPEED_UNKNOWN
+ if (dev->gadget.speed != USB_SPEED_UNKNOWN
&& dev->driver
&& dev->driver->suspend)
dev->driver->suspend(&dev->gadget);
if (dev->gadget.speed != USB_SPEED_UNKNOWN
&& dev->driver
- && dev->driver->resume
- && is_vbus_present())
+ && dev->driver->resume)
dev->driver->resume(&dev->gadget);
}
static int __init pxa25x_udc_probe(struct platform_device *pdev)
{
struct pxa25x_udc *dev = &memory;
- int retval, vbus_irq, irq;
+ int retval, irq;
u32 chiprev;
/* insist on Intel/ARM/XScale */
dev->transceiver = otg_get_transceiver();
- if (gpio_is_valid(dev->mach->gpio_vbus)) {
- if ((retval = gpio_request(dev->mach->gpio_vbus,
- "pxa25x_udc GPIO VBUS"))) {
- dev_dbg(&pdev->dev,
- "can't get vbus gpio %d, err: %d\n",
- dev->mach->gpio_vbus, retval);
- goto err_gpio_vbus;
- }
- gpio_direction_input(dev->mach->gpio_vbus);
- vbus_irq = gpio_to_irq(dev->mach->gpio_vbus);
- } else
- vbus_irq = 0;
-
if (gpio_is_valid(dev->mach->gpio_pullup)) {
if ((retval = gpio_request(dev->mach->gpio_pullup,
"pca25x_udc GPIO PULLUP"))) {
udc_disable(dev);
udc_reinit(dev);
- dev->vbus = !!is_vbus_present();
+ dev->vbus = 0;
/* irq setup after old hardware state is cleaned up */
retval = request_irq(irq, pxa25x_udc_irq,
}
} else
#endif
- if (vbus_irq) {
- retval = request_irq(vbus_irq, udc_vbus_irq,
- IRQF_DISABLED | IRQF_SAMPLE_RANDOM |
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- driver_name, dev);
- if (retval != 0) {
- pr_err("%s: can't get irq %i, err %d\n",
- driver_name, vbus_irq, retval);
- goto err_vbus_irq;
- }
- }
create_debug_files(dev);
return 0;
- err_vbus_irq:
#ifdef CONFIG_ARCH_LUBBOCK
free_irq(LUBBOCK_USB_DISC_IRQ, dev);
err_irq_lub:
if (gpio_is_valid(dev->mach->gpio_pullup))
gpio_free(dev->mach->gpio_pullup);
err_gpio_pullup:
- if (gpio_is_valid(dev->mach->gpio_vbus))
- gpio_free(dev->mach->gpio_vbus);
- err_gpio_vbus:
if (dev->transceiver) {
otg_put_transceiver(dev->transceiver);
dev->transceiver = NULL;
free_irq(LUBBOCK_USB_IRQ, dev);
}
#endif
- if (gpio_is_valid(dev->mach->gpio_vbus)) {
- free_irq(gpio_to_irq(dev->mach->gpio_vbus), dev);
- gpio_free(dev->mach->gpio_vbus);
- }
if (gpio_is_valid(dev->mach->gpio_pullup))
gpio_free(dev->mach->gpio_pullup);
return -EBUSY;
}
- ret = set_irq_type(irq, IRQF_TRIGGER_FALLING);
+ ret = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING);
if (ret) {
dev_err(&pdev->dev, "error setting irq type\n");
ret = -EFAULT;
musb_writel(tbase, TUSB_INT_CTRL_CONF,
TUSB_INT_CTRL_CONF_INT_RELCYC(0));
- set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW);
+ irq_set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW);
/* maybe force into the Default-A OTG state machine */
if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT)
static void overlay1fb_disable(struct pxafb_layer *ofb)
{
- uint32_t lccr5 = lcd_readl(ofb->fbi, LCCR5);
+ uint32_t lccr5;
+
+ if (!(lcd_readl(ofb->fbi, OVL1C1) & OVLxC1_OEN))
+ return;
+
+ lccr5 = lcd_readl(ofb->fbi, LCCR5);
lcd_writel(ofb->fbi, OVL1C1, ofb->control[0] & ~OVLxC1_OEN);
static void overlay2fb_disable(struct pxafb_layer *ofb)
{
- uint32_t lccr5 = lcd_readl(ofb->fbi, LCCR5);
+ uint32_t lccr5;
+
+ if (!(lcd_readl(ofb->fbi, OVL2C1) & OVLxC1_OEN))
+ return;
+
+ lccr5 = lcd_readl(ofb->fbi, LCCR5);
lcd_writel(ofb->fbi, OVL2C1, ofb->control[0] & ~OVLxC1_OEN);
if (user == 0)
return -ENODEV;
- /* allow only one user at a time */
- if (atomic_inc_and_test(&ofb->usage))
- return -EBUSY;
+ if (ofb->usage++ == 0)
+ /* unblank the base framebuffer */
+ fb_blank(&ofb->fbi->fb, FB_BLANK_UNBLANK);
- /* unblank the base framebuffer */
- fb_blank(&ofb->fbi->fb, FB_BLANK_UNBLANK);
return 0;
}
{
struct pxafb_layer *ofb = (struct pxafb_layer*) info;
- atomic_dec(&ofb->usage);
- ofb->ops->disable(ofb);
+ if (ofb->usage == 1) {
+ ofb->ops->disable(ofb);
+ ofb->fb.var.height = -1;
+ ofb->fb.var.width = -1;
+ ofb->fb.var.xres = ofb->fb.var.xres_virtual = 0;
+ ofb->fb.var.yres = ofb->fb.var.yres_virtual = 0;
- free_pages_exact(ofb->video_mem, ofb->video_mem_size);
- ofb->video_mem = NULL;
- ofb->video_mem_size = 0;
+ ofb->usage--;
+ }
return 0;
}
int xpos, ypos, pfor, bpp;
xpos = NONSTD_TO_XPOS(var->nonstd);
- ypos = NONSTD_TO_XPOS(var->nonstd);
+ ypos = NONSTD_TO_YPOS(var->nonstd);
pfor = NONSTD_TO_PFOR(var->nonstd);
bpp = pxafb_var_to_bpp(var);
return 0;
}
-static int overlayfb_map_video_memory(struct pxafb_layer *ofb)
+static int overlayfb_check_video_memory(struct pxafb_layer *ofb)
{
struct fb_var_screeninfo *var = &ofb->fb.var;
int pfor = NONSTD_TO_PFOR(var->nonstd);
size = PAGE_ALIGN(ofb->fb.fix.line_length * var->yres_virtual);
- /* don't re-allocate if the original video memory is enough */
if (ofb->video_mem) {
if (ofb->video_mem_size >= size)
return 0;
-
- free_pages_exact(ofb->video_mem, ofb->video_mem_size);
}
-
- ofb->video_mem = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
- if (ofb->video_mem == NULL)
- return -ENOMEM;
-
- ofb->video_mem_phys = virt_to_phys(ofb->video_mem);
- ofb->video_mem_size = size;
-
- mutex_lock(&ofb->fb.mm_lock);
- ofb->fb.fix.smem_start = ofb->video_mem_phys;
- ofb->fb.fix.smem_len = ofb->fb.fix.line_length * var->yres_virtual;
- mutex_unlock(&ofb->fb.mm_lock);
- ofb->fb.screen_base = ofb->video_mem;
- return 0;
+ return -EINVAL;
}
static int overlayfb_set_par(struct fb_info *info)
struct fb_var_screeninfo *var = &info->var;
int xpos, ypos, pfor, bpp, ret;
- ret = overlayfb_map_video_memory(ofb);
+ ret = overlayfb_check_video_memory(ofb);
if (ret)
return ret;
bpp = pxafb_var_to_bpp(var);
xpos = NONSTD_TO_XPOS(var->nonstd);
- ypos = NONSTD_TO_XPOS(var->nonstd);
+ ypos = NONSTD_TO_YPOS(var->nonstd);
pfor = NONSTD_TO_PFOR(var->nonstd);
ofb->control[0] = OVLxC1_PPL(var->xres) | OVLxC1_LPO(var->yres) |
ofb->id = id;
ofb->ops = &ofb_ops[id];
- atomic_set(&ofb->usage, 0);
+ ofb->usage = 0;
ofb->fbi = fbi;
init_completion(&ofb->branch_done);
}
return 0;
}
-static int __devinit pxafb_overlay_init(struct pxafb_info *fbi)
+static int __devinit pxafb_overlay_map_video_memory(struct pxafb_info *pxafb,
+ struct pxafb_layer *ofb)
+{
+ /* We assume that user will use at most video_mem_size for overlay fb,
+ * anyway, it's useless to use 16bpp main plane and 24bpp overlay
+ */
+ ofb->video_mem = alloc_pages_exact(PAGE_ALIGN(pxafb->video_mem_size),
+ GFP_KERNEL | __GFP_ZERO);
+ if (ofb->video_mem == NULL)
+ return -ENOMEM;
+
+ ofb->video_mem_phys = virt_to_phys(ofb->video_mem);
+ ofb->video_mem_size = PAGE_ALIGN(pxafb->video_mem_size);
+
+ mutex_lock(&ofb->fb.mm_lock);
+ ofb->fb.fix.smem_start = ofb->video_mem_phys;
+ ofb->fb.fix.smem_len = pxafb->video_mem_size;
+ mutex_unlock(&ofb->fb.mm_lock);
+
+ ofb->fb.screen_base = ofb->video_mem;
+
+ return 0;
+}
+
+static void __devinit pxafb_overlay_init(struct pxafb_info *fbi)
{
int i, ret;
if (!pxafb_overlay_supported())
- return 0;
+ return;
for (i = 0; i < 2; i++) {
- init_pxafb_overlay(fbi, &fbi->overlay[i], i);
- ret = register_framebuffer(&fbi->overlay[i].fb);
+ struct pxafb_layer *ofb = &fbi->overlay[i];
+ init_pxafb_overlay(fbi, ofb, i);
+ ret = register_framebuffer(&ofb->fb);
if (ret) {
dev_err(fbi->dev, "failed to register overlay %d\n", i);
- return ret;
+ continue;
}
+ ret = pxafb_overlay_map_video_memory(fbi, ofb);
+ if (ret) {
+ dev_err(fbi->dev,
+ "failed to map video memory for overlay %d\n",
+ i);
+ unregister_framebuffer(&ofb->fb);
+ continue;
+ }
+ ofb->registered = 1;
}
/* mask all IU/BS/EOF/SOF interrupts */
lcd_writel(fbi, LCCR5, ~0);
- /* place overlay(s) on top of base */
- fbi->lccr0 |= LCCR0_OUC;
pr_info("PXA Overlay driver loaded successfully!\n");
- return 0;
}
static void __devexit pxafb_overlay_exit(struct pxafb_info *fbi)
if (!pxafb_overlay_supported())
return;
- for (i = 0; i < 2; i++)
- unregister_framebuffer(&fbi->overlay[i].fb);
+ for (i = 0; i < 2; i++) {
+ struct pxafb_layer *ofb = &fbi->overlay[i];
+ if (ofb->registered) {
+ if (ofb->video_mem)
+ free_pages_exact(ofb->video_mem,
+ ofb->video_mem_size);
+ unregister_framebuffer(&ofb->fb);
+ }
+ }
}
#else
static inline void pxafb_overlay_init(struct pxafb_info *fbi) {}
(lcd_readl(fbi, LCCR3) != fbi->reg_lccr3) ||
(lcd_readl(fbi, LCCR4) != fbi->reg_lccr4) ||
(lcd_readl(fbi, FDADR0) != fbi->fdadr[0]) ||
- (lcd_readl(fbi, FDADR1) != fbi->fdadr[1]))
+ ((fbi->lccr0 & LCCR0_SDS) &&
+ (lcd_readl(fbi, FDADR1) != fbi->fdadr[1])))
pxafb_schedule_work(fbi, C_REENABLE);
return 0;
lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB);
lcd_writel(fbi, FDADR0, fbi->fdadr[0]);
- lcd_writel(fbi, FDADR1, fbi->fdadr[1]);
+ if (fbi->lccr0 & LCCR0_SDS)
+ lcd_writel(fbi, FDADR1, fbi->fdadr[1]);
lcd_writel(fbi, LCCR0, fbi->reg_lccr0 | LCCR0_ENB);
}
switch (val) {
case CPUFREQ_PRECHANGE:
- set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE);
+ if (!fbi->overlay[0].usage && !fbi->overlay[1].usage)
+ set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE);
break;
case CPUFREQ_POSTCHANGE:
pxafb_decode_mach_info(fbi, inf);
+#ifdef CONFIG_FB_PXA_OVERLAY
+ /* place overlay(s) on top of base */
+ if (pxafb_overlay_supported())
+ fbi->lccr0 |= LCCR0_OUC;
+#endif
+
init_waitqueue_head(&fbi->ctrlr_wait);
INIT_WORK(&fbi->task, pxafb_task);
mutex_init(&fbi->ctrlr_lock);
struct pxafb_layer {
struct fb_info fb;
int id;
- atomic_t usage;
+ int registered;
+ uint32_t usage;
uint32_t control[2];
struct pxafb_layer_ops *ops;
ds1wm_data->active_high = plat->active_high;
if (res->flags & IORESOURCE_IRQ_HIGHEDGE)
- set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING);
+ irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING);
if (res->flags & IORESOURCE_IRQ_LOWEDGE)
- set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING);
+ irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING);
ret = request_irq(ds1wm_data->irq, ds1wm_isr, IRQF_DISABLED,
"ds1wm", ds1wm_data);
static int __devinit davinci_wdt_probe(struct platform_device *pdev)
{
int ret = 0, size;
- struct resource *res;
struct device *dev = &pdev->dev;
wdt_clk = clk_get(dev, NULL);
dev_info(dev, "heartbeat %d sec\n", heartbeat);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
+ wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (wdt_mem == NULL) {
dev_err(dev, "failed to get memory region resource\n");
return -ENOENT;
}
- size = resource_size(res);
- wdt_mem = request_mem_region(res->start, size, pdev->name);
-
- if (wdt_mem == NULL) {
+ size = resource_size(wdt_mem);
+ if (!request_mem_region(wdt_mem->start, size, pdev->name)) {
dev_err(dev, "failed to get memory region\n");
return -ENOENT;
}
- wdt_base = ioremap(res->start, size);
+ wdt_base = ioremap(wdt_mem->start, size);
if (!wdt_base) {
dev_err(dev, "failed to map memory region\n");
+ release_mem_region(wdt_mem->start, size);
+ wdt_mem = NULL;
return -ENOMEM;
}
ret = misc_register(&davinci_wdt_miscdev);
if (ret < 0) {
dev_err(dev, "cannot register misc device\n");
- release_resource(wdt_mem);
- kfree(wdt_mem);
+ release_mem_region(wdt_mem->start, size);
+ wdt_mem = NULL;
} else {
set_bit(WDT_DEVICE_INITED, &wdt_status);
}
{
misc_deregister(&davinci_wdt_miscdev);
if (wdt_mem) {
- release_resource(wdt_mem);
- kfree(wdt_mem);
+ release_mem_region(wdt_mem->start, resource_size(wdt_mem));
wdt_mem = NULL;
}
{
int ret = 0;
int size;
- struct resource *res;
struct device *dev = &pdev->dev;
struct max63xx_timeout *table;
max63xx_pdev = pdev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
+ wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (wdt_mem == NULL) {
dev_err(dev, "failed to get memory region resource\n");
return -ENOENT;
}
- size = resource_size(res);
- wdt_mem = request_mem_region(res->start, size, pdev->name);
-
- if (wdt_mem == NULL) {
+ size = resource_size(wdt_mem);
+ if (!request_mem_region(wdt_mem->start, size, pdev->name)) {
dev_err(dev, "failed to get memory region\n");
return -ENOENT;
}
- wdt_base = ioremap(res->start, size);
+ wdt_base = ioremap(wdt_mem->start, size);
if (!wdt_base) {
dev_err(dev, "failed to map memory region\n");
ret = -ENOMEM;
out_unmap:
iounmap(wdt_base);
out_request:
- release_resource(wdt_mem);
- kfree(wdt_mem);
+ release_mem_region(wdt_mem->start, size);
+ wdt_mem = NULL;
return ret;
}
{
misc_deregister(&max63xx_wdt_miscdev);
if (wdt_mem) {
- release_resource(wdt_mem);
- kfree(wdt_mem);
+ release_mem_region(wdt_mem->start, resource_size(wdt_mem));
wdt_mem = NULL;
}
* Init & exit routines
*/
-static unsigned char __init nv_tco_getdevice(void)
+static unsigned char __devinit nv_tco_getdevice(void)
{
struct pci_dev *dev = NULL;
u32 val;
static int __devinit pnx4008_wdt_probe(struct platform_device *pdev)
{
int ret = 0, size;
- struct resource *res;
if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
heartbeat = DEFAULT_HEARTBEAT;
printk(KERN_INFO MODULE_NAME
"PNX4008 Watchdog Timer: heartbeat %d sec\n", heartbeat);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
+ wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (wdt_mem == NULL) {
printk(KERN_INFO MODULE_NAME
"failed to get memory region resouce\n");
return -ENOENT;
}
- size = resource_size(res);
- wdt_mem = request_mem_region(res->start, size, pdev->name);
+ size = resource_size(wdt_mem);
- if (wdt_mem == NULL) {
+ if (!request_mem_region(wdt_mem->start, size, pdev->name)) {
printk(KERN_INFO MODULE_NAME "failed to get memory region\n");
return -ENOENT;
}
- wdt_base = (void __iomem *)IO_ADDRESS(res->start);
+ wdt_base = (void __iomem *)IO_ADDRESS(wdt_mem->start);
wdt_clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(wdt_clk)) {
ret = PTR_ERR(wdt_clk);
- release_resource(wdt_mem);
- kfree(wdt_mem);
+ release_mem_region(wdt_mem->start, size);
+ wdt_mem = NULL;
goto out;
}
ret = clk_enable(wdt_clk);
if (ret) {
- release_resource(wdt_mem);
- kfree(wdt_mem);
+ release_mem_region(wdt_mem->start, size);
+ wdt_mem = NULL;
+ clk_put(wdt_clk);
goto out;
}
ret = misc_register(&pnx4008_wdt_miscdev);
if (ret < 0) {
printk(KERN_ERR MODULE_NAME "cannot register misc device\n");
- release_resource(wdt_mem);
- kfree(wdt_mem);
+ release_mem_region(wdt_mem->start, size);
+ wdt_mem = NULL;
clk_disable(wdt_clk);
clk_put(wdt_clk);
} else {
clk_put(wdt_clk);
if (wdt_mem) {
- release_resource(wdt_mem);
- kfree(wdt_mem);
+ release_mem_region(wdt_mem->start, resource_size(wdt_mem));
wdt_mem = NULL;
}
return 0;
static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
struct device *dev;
unsigned int wtcon;
int started = 0;
/* get the memory region for the watchdog timer */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
+ wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (wdt_mem == NULL) {
dev_err(dev, "no memory resource specified\n");
return -ENOENT;
}
- size = resource_size(res);
- wdt_mem = request_mem_region(res->start, size, pdev->name);
- if (wdt_mem == NULL) {
+ size = resource_size(wdt_mem);
+ if (!request_mem_region(wdt_mem->start, size, pdev->name)) {
dev_err(dev, "failed to get memory region\n");
return -EBUSY;
}
- wdt_base = ioremap(res->start, size);
+ wdt_base = ioremap(wdt_mem->start, size);
if (wdt_base == NULL) {
dev_err(dev, "failed to ioremap() region\n");
ret = -EINVAL;
iounmap(wdt_base);
err_req:
- release_resource(wdt_mem);
- kfree(wdt_mem);
+ release_mem_region(wdt_mem->start, size);
+ wdt_mem = NULL;
return ret;
}
iounmap(wdt_base);
- release_resource(wdt_mem);
- kfree(wdt_mem);
+ release_mem_region(wdt_mem->start, resource_size(wdt_mem));
wdt_mem = NULL;
return 0;
}
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/uaccess.h>
+#include <linux/kernel.h>
#define PFX "SoftDog: "
"Softdog action, set to 1 to ignore reboots, 0 to reboot "
"(default depends on ONLY_TESTING)");
+static int soft_panic;
+module_param(soft_panic, int, 0);
+MODULE_PARM_DESC(soft_panic,
+ "Softdog action, set to 1 to panic, 0 to reboot (default=0)");
+
/*
* Our timer
*/
if (soft_noboot)
printk(KERN_CRIT PFX "Triggered - Reboot ignored.\n");
- else {
+ else if (soft_panic) {
+ printk(KERN_CRIT PFX "Initiating panic.\n");
+ panic("Software Watchdog Timer expired.");
+ } else {
printk(KERN_CRIT PFX "Initiating system reboot.\n");
emergency_restart();
printk(KERN_CRIT PFX "Reboot didn't ?????\n");
};
static char banner[] __initdata = KERN_INFO "Software Watchdog Timer: 0.07 "
- "initialized. soft_noboot=%d soft_margin=%d sec (nowayout= %d)\n";
+ "initialized. soft_noboot=%d soft_margin=%d sec soft_panic=%d "
+ "(nowayout= %d)\n";
static int __init watchdog_init(void)
{
return ret;
}
- printk(banner, soft_noboot, soft_margin, nowayout);
+ printk(banner, soft_noboot, soft_margin, soft_panic, nowayout);
return 0;
}
#define PFX TCO_MODULE_NAME ": "
/* internal variables */
+static u32 tcobase_phys;
static void __iomem *tcobase;
static unsigned int pm_iobase;
static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */
/* Low three bits of BASE0 are reserved. */
val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8);
+ if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE,
+ "SP5100 TCO")) {
+ printk(KERN_ERR PFX "mmio address 0x%04x already in use\n",
+ val);
+ goto unreg_region;
+ }
+ tcobase_phys = val;
+
tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE);
if (tcobase == 0) {
printk(KERN_ERR PFX "failed to get tcobase address\n");
- goto unreg_region;
+ goto unreg_mem_region;
}
/* Enable watchdog decode bit */
/* Done */
return 1;
- iounmap(tcobase);
+unreg_mem_region:
+ release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
unreg_region:
release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
exit:
exit:
iounmap(tcobase);
+ release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
return ret;
}
/* Deregister */
misc_deregister(&sp5100_tco_miscdev);
iounmap(tcobase);
+ release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
}
/* Get info for IRQ */
static struct irq_info *info_for_irq(unsigned irq)
{
- return get_irq_data(irq);
+ return irq_get_handler_data(irq);
}
/* Constructors for packed IRQ information. */
info->type = IRQT_UNBOUND;
- set_irq_data(irq, info);
+ irq_set_handler_data(irq, info);
list_add_tail(&info->list, &xen_irq_list_head);
}
static void xen_free_irq(unsigned irq)
{
- struct irq_info *info = get_irq_data(irq);
+ struct irq_info *info = irq_get_handler_data(irq);
list_del(&info->list);
- set_irq_data(irq, NULL);
+ irq_set_handler_data(irq, NULL);
kfree(info);
{
int evtchn = evtchn_from_irq(data->irq);
- move_native_irq(data->irq);
+ irq_move_irq(data);
if (VALID_EVTCHN(evtchn)) {
mask_evtchn(evtchn);
if (irq < 0)
goto out;
- set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
- handle_level_irq, name);
+ irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq,
+ name);
irq_op.irq = irq;
irq_op.vector = 0;
if (irq == -1)
goto out;
- set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
- handle_level_irq, name);
+ irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq,
+ name);
xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0);
ret = irq_set_msi_desc(irq, msidesc);
if (irq == -1)
goto out;
- set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
+ irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
handle_fasteoi_irq, "event");
xen_irq_info_evtchn_init(irq, evtchn);
if (irq < 0)
goto out;
- set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
+ irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
handle_percpu_irq, "ipi");
bind_ipi.vcpu = cpu;
if (irq == -1)
goto out;
- set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
+ irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
handle_percpu_irq, "virq");
bind_virq.virq = virq;
{
int evtchn = evtchn_from_irq(data->irq);
- move_masked_irq(data->irq);
+ irq_move_masked_irq(data);
if (VALID_EVTCHN(evtchn))
unmask_evtchn(evtchn);
map->vma->vm_start + map->notify.addr;
err = copy_to_user(tmp, &err, 1);
if (err)
- return err;
+ return -EFAULT;
map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
} else if (pgno >= offset && pgno < offset + pages) {
uint8_t *tmp = kmap(map->pages[pgno]);
if (map->flags) {
if ((vma->vm_flags & VM_WRITE) &&
(map->flags & GNTMAP_readonly))
- return -EINVAL;
+ goto out_unlock_put;
} else {
map->flags = GNTMAP_host_map;
if (!(vma->vm_flags & VM_WRITE))
spin_unlock(&priv->lock);
return err;
+out_unlock_put:
+ spin_unlock(&priv->lock);
out_put_map:
if (use_ptemod)
map->vma = NULL;
ci->i_head_snapc = ceph_get_snap_context(snapc);
++ci->i_wrbuffer_ref_head;
if (ci->i_wrbuffer_ref == 0)
- igrab(inode);
+ ihold(inode);
++ci->i_wrbuffer_ref;
dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
"snapc %p seq %lld (%d snaps)\n",
{
struct ceph_mds_client *mdsc = fsc->mdsc;
+ dout("mdsc_destroy %p\n", mdsc);
ceph_mdsc_stop(mdsc);
+
+ /* flush out any connection work with references to us */
+ ceph_msgr_flush();
+
fsc->mdsc = NULL;
kfree(mdsc);
+ dout("mdsc_destroy %p done\n", mdsc);
}
dout("queue_cap_snap %p cap_snap %p queuing under %p\n", inode,
capsnap, snapc);
- igrab(inode);
-
+ ihold(inode);
+
atomic_set(&capsnap->nref, 1);
capsnap->ci = ci;
INIT_LIST_HEAD(&capsnap->ci_item);
if (opt->name)
seq_printf(m, ",name=%s", opt->name);
- if (opt->secret)
+ if (opt->key)
seq_puts(m, ",secret=<hidden>");
if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
state->owner = owner;
atomic_inc(&owner->so_count);
list_add(&state->inode_states, &nfsi->open_states);
- state->inode = igrab(inode);
+ ihold(inode);
+ state->inode = inode;
spin_unlock(&inode->i_lock);
/* Note: The reclaim code dictates that we add stateless
* and read-only stateids to the end of the list */
/*
* check to see if the page is mapped already (no holes)
*/
- if (PageMappedToDisk(page)) {
- unlock_page(page);
+ if (PageMappedToDisk(page))
goto mapped;
- }
+
if (page_has_buffers(page)) {
struct buffer_head *bh, *head;
int fully_mapped = 1;
if (fully_mapped) {
SetPageMappedToDisk(page);
- unlock_page(page);
goto mapped;
}
}
return VM_FAULT_SIGBUS;
ret = block_page_mkwrite(vma, vmf, nilfs_get_block);
- if (unlikely(ret)) {
+ if (ret != VM_FAULT_LOCKED) {
nilfs_transaction_abort(inode->i_sb);
return ret;
}
+ nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits));
nilfs_transaction_commit(inode->i_sb);
mapped:
SetPageChecked(page);
wait_on_page_writeback(page);
- return 0;
+ return VM_FAULT_LOCKED;
}
static const struct vm_operations_struct nilfs_file_vm_ops = {
* Macros to check inode numbers
*/
#define NILFS_MDT_INO_BITS \
- ((unsigned int)(1 << NILFS_DAT_INO | 1 << NILFS_CPFILE_INO | \
- 1 << NILFS_SUFILE_INO | 1 << NILFS_IFILE_INO | \
- 1 << NILFS_ATIME_INO | 1 << NILFS_SKETCH_INO))
+ ((unsigned int)(1 << NILFS_DAT_INO | 1 << NILFS_CPFILE_INO | \
+ 1 << NILFS_SUFILE_INO | 1 << NILFS_IFILE_INO | \
+ 1 << NILFS_ATIME_INO | 1 << NILFS_SKETCH_INO))
#define NILFS_SYS_INO_BITS \
- ((unsigned int)(1 << NILFS_ROOT_INO) | NILFS_MDT_INO_BITS)
+ ((unsigned int)(1 << NILFS_ROOT_INO) | NILFS_MDT_INO_BITS)
#define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino)
#define NILFS_MDT_INODE(sb, ino) \
- ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & (1 << (ino))))
+ ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & (1 << (ino))))
#define NILFS_VALID_INODE(sb, ino) \
- ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & (1 << (ino))))
+ ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & (1 << (ino))))
/**
* struct nilfs_transaction_info: context information for synchronization
extern void nilfs_error(struct super_block *, const char *, const char *, ...)
__attribute__ ((format (printf, 3, 4)));
extern void nilfs_warning(struct super_block *, const char *, const char *, ...)
- __attribute__ ((format (printf, 3, 4)));
+ __attribute__ ((format (printf, 3, 4)));
extern struct nilfs_super_block *
nilfs_read_super_block(struct super_block *, u64, int, struct buffer_head **);
extern int nilfs_store_magic_and_option(struct super_block *,
void nilfs_mapping_init(struct address_space *mapping,
struct backing_dev_info *bdi)
{
+ static const struct address_space_operations empty_aops;
+
mapping->host = NULL;
mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS);
mapping->assoc_mapping = NULL;
mapping->backing_dev_info = bdi;
- mapping->a_ops = NULL;
+ mapping->a_ops = &empty_aops;
}
/*
--- /dev/null
+#ifndef _KEYS_CEPH_TYPE_H
+#define _KEYS_CEPH_TYPE_H
+
+#include <linux/key.h>
+
+extern struct key_type key_type_ceph;
+
+#endif
void vcc_insert_socket(struct sock *sk);
+void atm_dev_release_vccs(struct atm_dev *dev);
/*
* This is approximately the algorithm used by alloc_skb.
* @prot: pointer to struct proto structure.
*/
struct can_proto {
- int type;
- int protocol;
- struct proto_ops *ops;
- struct proto *prot;
+ int type;
+ int protocol;
+ const struct proto_ops *ops;
+ struct proto *prot;
};
/* function prototypes for the CAN networklayer core (af_can.c) */
void *data);
extern int can_send(struct sk_buff *skb, int loop);
+extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
#endif /* CAN_CORE_H */
bool negotiating; /* true if negotiating protocol */
const char *name; /* entity name */
u64 global_id; /* our unique id in system */
- const char *secret; /* our secret key */
+ const struct ceph_crypto_key *key; /* our secret key */
unsigned want_keys; /* which services we want */
};
extern struct ceph_auth_client *ceph_auth_init(const char *name,
- const char *secret);
+ const struct ceph_crypto_key *key);
extern void ceph_auth_destroy(struct ceph_auth_client *ac);
extern void ceph_auth_reset(struct ceph_auth_client *ac);
pointer type of args */
int num_mon;
char *name;
- char *secret;
+ struct ceph_crypto_key *key;
};
/*
atomic_t refcnt;
unsigned char name[CN_CBQ_NAMELEN];
- struct workqueue_struct *cn_queue;
-
struct list_head queue_list;
spinlock_t queue_lock;
struct cb_id id;
};
-struct cn_callback_data {
- struct sk_buff *skb;
- void (*callback) (struct cn_msg *, struct netlink_skb_parms *);
-
- void *free;
-};
-
struct cn_callback_entry {
struct list_head callback_entry;
- struct work_struct work;
+ atomic_t refcnt;
struct cn_queue_dev *pdev;
struct cn_callback_id id;
- struct cn_callback_data data;
+ void (*callback) (struct cn_msg *, struct netlink_skb_parms *);
u32 seq, group;
};
struct cb_id *id,
void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
+void cn_queue_release_callback(struct cn_callback_entry *);
struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *);
void cn_queue_free_dev(struct cn_queue_dev *dev);
int cn_cb_equal(struct cb_id *, struct cb_id *);
-void cn_queue_wrapper(struct work_struct *work);
-
#endif /* __KERNEL__ */
#endif /* __CONNECTOR_H */
u32 ethtool_op_get_flags(struct net_device *dev);
int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported);
void ethtool_ntuple_flush(struct net_device *dev);
+bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
/**
* ðtool_ops - Alter and report network device settings
/* IRQ wakeup (PM) control: */
extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
-#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
-/* Please do not use: Use the replacement functions instead */
-static inline int set_irq_wake(unsigned int irq, unsigned int on)
-{
- return irq_set_irq_wake(irq, on);
-}
-#endif
-
static inline int enable_irq_wake(unsigned int irq)
{
return irq_set_irq_wake(irq, 1);
* IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
* IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
* IRQ_NESTED_TRHEAD - Interrupt nests into another thread
- *
- * Deprecated bits. They are kept updated as long as
- * CONFIG_GENERIC_HARDIRQS_NO_COMPAT is not set. Will go away soon. These bits
- * are internal state of the core code and if you really need to acces
- * them then talk to the genirq maintainer instead of hacking
- * something weird.
- *
*/
enum {
IRQ_TYPE_NONE = 0x00000000,
IRQ_NO_BALANCING = (1 << 13),
IRQ_MOVE_PCNTXT = (1 << 14),
IRQ_NESTED_THREAD = (1 << 15),
-
-#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
- IRQ_INPROGRESS = (1 << 16),
- IRQ_REPLAY = (1 << 17),
- IRQ_WAITING = (1 << 18),
- IRQ_DISABLED = (1 << 19),
- IRQ_PENDING = (1 << 20),
- IRQ_MASKED = (1 << 21),
- IRQ_MOVE_PENDING = (1 << 22),
- IRQ_AFFINITY_SET = (1 << 23),
- IRQ_WAKEUP = (1 << 24),
-#endif
};
#define IRQF_MODIFY_MASK \
*/
struct irq_chip {
const char *name;
-#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
- unsigned int (*startup)(unsigned int irq);
- void (*shutdown)(unsigned int irq);
- void (*enable)(unsigned int irq);
- void (*disable)(unsigned int irq);
-
- void (*ack)(unsigned int irq);
- void (*mask)(unsigned int irq);
- void (*mask_ack)(unsigned int irq);
- void (*unmask)(unsigned int irq);
- void (*eoi)(unsigned int irq);
-
- void (*end)(unsigned int irq);
- int (*set_affinity)(unsigned int irq,
- const struct cpumask *dest);
- int (*retrigger)(unsigned int irq);
- int (*set_type)(unsigned int irq, unsigned int flow_type);
- int (*set_wake)(unsigned int irq, unsigned int on);
-
- void (*bus_lock)(unsigned int irq);
- void (*bus_sync_unlock)(unsigned int irq);
-#endif
unsigned int (*irq_startup)(struct irq_data *data);
void (*irq_shutdown)(struct irq_data *data);
void (*irq_enable)(struct irq_data *data);
#ifdef CONFIG_GENERIC_HARDIRQS
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
-void move_native_irq(int irq);
-void move_masked_irq(int irq);
void irq_move_irq(struct irq_data *data);
void irq_move_masked_irq(struct irq_data *data);
#else
-static inline void move_native_irq(int irq) { }
-static inline void move_masked_irq(int irq) { }
static inline void irq_move_irq(struct irq_data *data) { }
static inline void irq_move_masked_irq(struct irq_data *data) { }
#endif
return d->msi_desc;
}
-#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
-/* Please do not use: Use the replacement functions instead */
-static inline int set_irq_chip(unsigned int irq, struct irq_chip *chip)
-{
- return irq_set_chip(irq, chip);
-}
-static inline int set_irq_data(unsigned int irq, void *data)
-{
- return irq_set_handler_data(irq, data);
-}
-static inline int set_irq_chip_data(unsigned int irq, void *data)
-{
- return irq_set_chip_data(irq, data);
-}
-static inline int set_irq_type(unsigned int irq, unsigned int type)
-{
- return irq_set_irq_type(irq, type);
-}
-static inline int set_irq_msi(unsigned int irq, struct msi_desc *entry)
-{
- return irq_set_msi_desc(irq, entry);
-}
-static inline struct irq_chip *get_irq_chip(unsigned int irq)
-{
- return irq_get_chip(irq);
-}
-static inline void *get_irq_chip_data(unsigned int irq)
-{
- return irq_get_chip_data(irq);
-}
-static inline void *get_irq_data(unsigned int irq)
-{
- return irq_get_handler_data(irq);
-}
-static inline void *irq_data_get_irq_data(struct irq_data *d)
-{
- return irq_data_get_irq_handler_data(d);
-}
-static inline struct msi_desc *get_irq_msi(unsigned int irq)
-{
- return irq_get_msi_desc(irq);
-}
-static inline void set_irq_noprobe(unsigned int irq)
-{
- irq_set_noprobe(irq);
-}
-static inline void set_irq_probe(unsigned int irq)
-{
- irq_set_probe(irq);
-}
-static inline void set_irq_nested_thread(unsigned int irq, int nest)
-{
- irq_set_nested_thread(irq, nest);
-}
-static inline void
-set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
- irq_flow_handler_t handle, const char *name)
-{
- irq_set_chip_and_handler_name(irq, chip, handle, name);
-}
-static inline void
-set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
- irq_flow_handler_t handle)
-{
- irq_set_chip_and_handler(irq, chip, handle);
-}
-static inline void
-__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
- const char *name)
-{
- __irq_set_handler(irq, handle, is_chained, name);
-}
-static inline void set_irq_handler(unsigned int irq, irq_flow_handler_t handle)
-{
- irq_set_handler(irq, handle);
-}
-static inline void
-set_irq_chained_handler(unsigned int irq, irq_flow_handler_t handle)
-{
- irq_set_chained_handler(irq, handle);
-}
-#endif
-
int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node);
void irq_free_descs(unsigned int irq, unsigned int cnt);
int irq_reserve_irqs(unsigned int from, unsigned int cnt);
* @name: flow handler name for /proc/interrupts output
*/
struct irq_desc {
-
-#ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
struct irq_data irq_data;
-#else
- /*
- * This union will go away, once we fixed the direct access to
- * irq_desc all over the place. The direct fields are a 1:1
- * overlay of irq_data.
- */
- union {
- struct irq_data irq_data;
- struct {
- unsigned int irq;
- unsigned int node;
- unsigned int pad_do_not_even_think_about_it;
- struct irq_chip *chip;
- void *handler_data;
- void *chip_data;
- struct msi_desc *msi_desc;
-#ifdef CONFIG_SMP
- cpumask_var_t affinity;
-#endif
- };
- };
-#endif
-
struct timer_rand_state *timer_rand_state;
unsigned int __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
irq_preflow_handler_t preflow_handler;
#endif
struct irqaction *action; /* IRQ action list */
-#ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
unsigned int status_use_accessors;
-#else
- unsigned int status; /* IRQ status */
-#endif
unsigned int core_internal_state__do_not_mess_with_it;
unsigned int depth; /* nested irq disables */
unsigned int wake_depth; /* nested wake enables */
return desc->irq_data.msi_desc;
}
-#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
-static inline struct irq_chip *get_irq_desc_chip(struct irq_desc *desc)
-{
- return irq_desc_get_chip(desc);
-}
-static inline void *get_irq_desc_data(struct irq_desc *desc)
-{
- return irq_desc_get_handler_data(desc);
-}
-
-static inline void *get_irq_desc_chip_data(struct irq_desc *desc)
-{
- return irq_desc_get_chip_data(desc);
-}
-
-static inline struct msi_desc *get_irq_desc_msi(struct irq_desc *desc)
-{
- return irq_desc_get_msi_desc(desc);
-}
-#endif
-
/*
* Architectures call this to let the generic IRQ layer
* handle an interrupt. If the descriptor is attached to an
desc->name = name;
}
-#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
-static inline void __set_irq_handler_unlocked(int irq,
- irq_flow_handler_t handler)
-{
- __irq_set_handler_locked(irq, handler);
-}
-
static inline int irq_balancing_disabled(unsigned int irq)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
- return desc->status & IRQ_NO_BALANCING_MASK;
+ return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
}
-#endif
static inline void
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
unsigned long long *crash_size, unsigned long long *crash_base);
int crash_shrink_memory(unsigned long new_size);
size_t crash_get_memory_size(void);
+void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
#else /* !CONFIG_KEXEC */
struct pt_regs;
+++ /dev/null
-#ifndef __SH_MOBILE_SDHI_H__
-#define __SH_MOBILE_SDHI_H__
-
-#include <linux/types.h>
-
-struct sh_mobile_sdhi_info {
- int dma_slave_tx;
- int dma_slave_rx;
- unsigned long tmio_flags;
- unsigned long tmio_caps;
- u32 tmio_ocr_mask; /* available MMC voltages */
- void (*set_pwr)(struct platform_device *pdev, int state);
- int (*get_cd)(struct platform_device *pdev);
-};
-
-#endif /* __SH_MOBILE_SDHI_H__ */
--- /dev/null
+#ifndef __SH_MOBILE_SDHI_H__
+#define __SH_MOBILE_SDHI_H__
+
+#include <linux/types.h>
+
+struct sh_mobile_sdhi_info {
+ int dma_slave_tx;
+ int dma_slave_rx;
+ unsigned long tmio_flags;
+ unsigned long tmio_caps;
+ u32 tmio_ocr_mask; /* available MMC voltages */
+ void (*set_pwr)(struct platform_device *pdev, int state);
+ int (*get_cd)(struct platform_device *pdev);
+};
+
+#endif /* __SH_MOBILE_SDHI_H__ */
--- /dev/null
+/*
+ * include/linux/mmc/tmio.h
+ *
+ * Copyright (C) 2007 Ian Molton
+ * Copyright (C) 2004 Ian Molton
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Driver for the MMC / SD / SDIO cell found in:
+ *
+ * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
+ */
+#ifndef _LINUX_MMC_TMIO_H_
+#define _LINUX_MMC_TMIO_H_
+
+#define CTL_SD_CMD 0x00
+#define CTL_ARG_REG 0x04
+#define CTL_STOP_INTERNAL_ACTION 0x08
+#define CTL_XFER_BLK_COUNT 0xa
+#define CTL_RESPONSE 0x0c
+#define CTL_STATUS 0x1c
+#define CTL_IRQ_MASK 0x20
+#define CTL_SD_CARD_CLK_CTL 0x24
+#define CTL_SD_XFER_LEN 0x26
+#define CTL_SD_MEM_CARD_OPT 0x28
+#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
+#define CTL_SD_DATA_PORT 0x30
+#define CTL_TRANSACTION_CTL 0x34
+#define CTL_SDIO_STATUS 0x36
+#define CTL_SDIO_IRQ_MASK 0x38
+#define CTL_RESET_SD 0xe0
+#define CTL_SDIO_REGS 0x100
+#define CTL_CLK_AND_WAIT_CTL 0x138
+#define CTL_RESET_SDIO 0x1e0
+
+/* Definitions for values the CTRL_STATUS register can take. */
+#define TMIO_STAT_CMDRESPEND 0x00000001
+#define TMIO_STAT_DATAEND 0x00000004
+#define TMIO_STAT_CARD_REMOVE 0x00000008
+#define TMIO_STAT_CARD_INSERT 0x00000010
+#define TMIO_STAT_SIGSTATE 0x00000020
+#define TMIO_STAT_WRPROTECT 0x00000080
+#define TMIO_STAT_CARD_REMOVE_A 0x00000100
+#define TMIO_STAT_CARD_INSERT_A 0x00000200
+#define TMIO_STAT_SIGSTATE_A 0x00000400
+#define TMIO_STAT_CMD_IDX_ERR 0x00010000
+#define TMIO_STAT_CRCFAIL 0x00020000
+#define TMIO_STAT_STOPBIT_ERR 0x00040000
+#define TMIO_STAT_DATATIMEOUT 0x00080000
+#define TMIO_STAT_RXOVERFLOW 0x00100000
+#define TMIO_STAT_TXUNDERRUN 0x00200000
+#define TMIO_STAT_CMDTIMEOUT 0x00400000
+#define TMIO_STAT_RXRDY 0x01000000
+#define TMIO_STAT_TXRQ 0x02000000
+#define TMIO_STAT_ILL_FUNC 0x20000000
+#define TMIO_STAT_CMD_BUSY 0x40000000
+#define TMIO_STAT_ILL_ACCESS 0x80000000
+
+#define TMIO_BBS 512 /* Boot block size */
+
+#endif /* _LINUX_MMC_TMIO_H_ */
struct sk_buff;
-/* To allow 64K frame to be packed as single skb without frag_list */
+/* To allow 64K frame to be packed as single skb without frag_list. Since
+ * GRO uses frags we allocate at least 16 regardless of page size.
+ */
+#if (65536/PAGE_SIZE + 2) < 16
+#define MAX_SKB_FRAGS 16UL
+#else
#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
+#endif
typedef struct skb_frag_struct skb_frag_t;
* Affects statistic (counters) and short packet handling.
*/
#define FLAG_MULTI_PACKET 0x1000
+#define FLAG_RX_ASSEMBLE 0x2000 /* rx packets may span >1 frames */
/* init device ... can sleep, or cause probe() failure */
int (*bind)(struct usbnet *, struct usb_interface *);
};
extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *);
+extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *);
extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *);
+extern void usbnet_cdc_status(struct usbnet *, struct urb *);
/* CDC and RNDIS support the same host-chosen packet filters for IN transfers */
#define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages);
+#ifdef CONFIG_MMU
extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages);
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
+#else
+static inline int
+map_kernel_range_noflush(unsigned long start, unsigned long size,
+ pgprot_t prot, struct page **pages)
+{
+ return size >> PAGE_SHIFT;
+}
+static inline void
+unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
+{
+}
+static inline void
+unmap_kernel_range(unsigned long addr, unsigned long size)
+{
+}
+#endif
/* Allocate/destroy a 'vmalloc' VM area. */
extern struct vm_struct *alloc_vm_area(size_t size);
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
#ifdef CONFIG_SMP
+# ifdef CONFIG_MMU
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms,
size_t align);
void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
+# else
+static inline struct vm_struct **
+pcpu_get_vm_areas(const unsigned long *offsets,
+ const size_t *sizes, int nr_vms,
+ size_t align)
+{
+ return NULL;
+}
+
+static inline void
+pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
+{
+}
+# endif
#endif
#endif /* _LINUX_VMALLOC_H */
static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
{
- struct dst_entry *child = skb_dst(skb)->child;
+ struct dst_entry *child = dst_clone(skb_dst(skb)->child);
skb_dst_drop(skb);
return child;
buf[9] = broadcast[9];
memcpy(buf + 10, addr->s6_addr + 6, 10);
}
+
+static inline int ipv6_ipgre_mc_map(const struct in6_addr *addr,
+ const unsigned char *broadcast, char *buf)
+{
+ if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0) {
+ memcpy(buf, broadcast, 4);
+ } else {
+ /* v4mapped? */
+ if ((addr->s6_addr32[0] | addr->s6_addr32[1] |
+ (addr->s6_addr32[2] ^ htonl(0x0000ffff))) != 0)
+ return -EINVAL;
+ memcpy(buf, &addr->s6_addr32[3], 4);
+ }
+ return 0;
+}
+
#endif
#endif
buf[16] = addr & 0x0f;
}
+static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
+{
+ if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
+ memcpy(buf, broadcast, 4);
+ else
+ memcpy(buf, &naddr, sizeof(naddr));
+}
+
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#include <linux/ipv6.h>
#endif
#define ROSE_MIN_LEN 3
+#define ROSE_CALL_REQ_ADDR_LEN_OFF 3
+#define ROSE_CALL_REQ_ADDR_LEN_VAL 0xAA /* each address is 10 digits */
+#define ROSE_CALL_REQ_DEST_ADDR_OFF 4
+#define ROSE_CALL_REQ_SRC_ADDR_OFF 9
+#define ROSE_CALL_REQ_FACILITIES_OFF 14
+
#define ROSE_GFI 0x10
#define ROSE_Q_BIT 0x80
#define ROSE_D_BIT 0x40
extern int rose_validate_nr(struct sock *, unsigned short);
extern void rose_write_internal(struct sock *, int);
extern int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *);
-extern int rose_parse_facilities(unsigned char *, struct rose_facilities_struct *);
+extern int rose_parse_facilities(unsigned char *, unsigned int, struct rose_facilities_struct *);
extern void rose_disconnect(struct sock *, int, int, int);
/* rose_timer.c */
}
#ifdef CONFIG_XFRM_MIGRATE
+static inline int xfrm_replay_clone(struct xfrm_state *x,
+ struct xfrm_state *orig)
+{
+ x->replay_esn = kzalloc(xfrm_replay_state_esn_len(orig->replay_esn),
+ GFP_KERNEL);
+ if (!x->replay_esn)
+ return -ENOMEM;
+
+ x->replay_esn->bmp_len = orig->replay_esn->bmp_len;
+ x->replay_esn->replay_window = orig->replay_esn->replay_window;
+
+ x->preplay_esn = kmemdup(x->replay_esn,
+ xfrm_replay_state_esn_len(x->replay_esn),
+ GFP_KERNEL);
+ if (!x->preplay_esn) {
+ kfree(x->replay_esn);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
{
return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
/* platform domain */
#define SND_SOC_DAPM_INPUT(wname) \
{ .id = snd_soc_dapm_input, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0}
+ .num_kcontrols = 0, .reg = SND_SOC_NOPM }
#define SND_SOC_DAPM_OUTPUT(wname) \
{ .id = snd_soc_dapm_output, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0}
+ .num_kcontrols = 0, .reg = SND_SOC_NOPM }
#define SND_SOC_DAPM_MIC(wname, wevent) \
{ .id = snd_soc_dapm_mic, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .event = wevent, \
+ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD}
#define SND_SOC_DAPM_HP(wname, wevent) \
{ .id = snd_soc_dapm_hp, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .event = wevent, \
+ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
#define SND_SOC_DAPM_SPK(wname, wevent) \
{ .id = snd_soc_dapm_spk, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .event = wevent, \
+ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
#define SND_SOC_DAPM_LINE(wname, wevent) \
{ .id = snd_soc_dapm_line, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .event = wevent, \
+ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
/* path domain */
/* events that are pre and post DAPM */
#define SND_SOC_DAPM_PRE(wname, wevent) \
{ .id = snd_soc_dapm_pre, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .event = wevent, \
+ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD}
#define SND_SOC_DAPM_POST(wname, wevent) \
{ .id = snd_soc_dapm_post, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .event = wevent, \
+ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD}
/* stream domain */
config GENERIC_HARDIRQS
def_bool y
-# Select this to disable the deprecated stuff
-config GENERIC_HARDIRQS_NO_DEPRECATED
- bool
-
-config GENERIC_HARDIRQS_NO_COMPAT
- bool
-
# Options selectable by the architecture code
# Make sparse irq Kconfig switch below available
raw_spin_lock_irq(&desc->lock);
if (!desc->action && irq_settings_can_probe(desc)) {
desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
- if (irq_startup(desc)) {
- irq_compat_set_pending(desc);
+ if (irq_startup(desc))
desc->istate |= IRQS_PENDING;
- }
}
raw_spin_unlock_irq(&desc->lock);
}
if (!chip)
chip = &no_irq_chip;
- irq_chip_set_defaults(chip);
desc->irq_data.chip = chip;
irq_put_desc_unlock(desc, flags);
/*
static void irq_state_clr_disabled(struct irq_desc *desc)
{
irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
- irq_compat_clr_disabled(desc);
}
static void irq_state_set_disabled(struct irq_desc *desc)
{
irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
- irq_compat_set_disabled(desc);
}
static void irq_state_clr_masked(struct irq_desc *desc)
{
irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
- irq_compat_clr_masked(desc);
}
static void irq_state_set_masked(struct irq_desc *desc)
{
irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
- irq_compat_set_masked(desc);
}
int irq_startup(struct irq_desc *desc)
}
}
-#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
-/* Temporary migration helpers */
-static void compat_irq_mask(struct irq_data *data)
-{
- data->chip->mask(data->irq);
-}
-
-static void compat_irq_unmask(struct irq_data *data)
-{
- data->chip->unmask(data->irq);
-}
-
-static void compat_irq_ack(struct irq_data *data)
-{
- data->chip->ack(data->irq);
-}
-
-static void compat_irq_mask_ack(struct irq_data *data)
-{
- data->chip->mask_ack(data->irq);
-}
-
-static void compat_irq_eoi(struct irq_data *data)
-{
- data->chip->eoi(data->irq);
-}
-
-static void compat_irq_enable(struct irq_data *data)
-{
- data->chip->enable(data->irq);
-}
-
-static void compat_irq_disable(struct irq_data *data)
-{
- data->chip->disable(data->irq);
-}
-
-static void compat_irq_shutdown(struct irq_data *data)
-{
- data->chip->shutdown(data->irq);
-}
-
-static unsigned int compat_irq_startup(struct irq_data *data)
-{
- return data->chip->startup(data->irq);
-}
-
-static int compat_irq_set_affinity(struct irq_data *data,
- const struct cpumask *dest, bool force)
-{
- return data->chip->set_affinity(data->irq, dest);
-}
-
-static int compat_irq_set_type(struct irq_data *data, unsigned int type)
-{
- return data->chip->set_type(data->irq, type);
-}
-
-static int compat_irq_set_wake(struct irq_data *data, unsigned int on)
-{
- return data->chip->set_wake(data->irq, on);
-}
-
-static int compat_irq_retrigger(struct irq_data *data)
-{
- return data->chip->retrigger(data->irq);
-}
-
-static void compat_bus_lock(struct irq_data *data)
-{
- data->chip->bus_lock(data->irq);
-}
-
-static void compat_bus_sync_unlock(struct irq_data *data)
-{
- data->chip->bus_sync_unlock(data->irq);
-}
-#endif
-
-/*
- * Fixup enable/disable function pointers
- */
-void irq_chip_set_defaults(struct irq_chip *chip)
-{
-#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
- if (chip->enable)
- chip->irq_enable = compat_irq_enable;
- if (chip->disable)
- chip->irq_disable = compat_irq_disable;
- if (chip->shutdown)
- chip->irq_shutdown = compat_irq_shutdown;
- if (chip->startup)
- chip->irq_startup = compat_irq_startup;
- if (!chip->end)
- chip->end = dummy_irq_chip.end;
- if (chip->bus_lock)
- chip->irq_bus_lock = compat_bus_lock;
- if (chip->bus_sync_unlock)
- chip->irq_bus_sync_unlock = compat_bus_sync_unlock;
- if (chip->mask)
- chip->irq_mask = compat_irq_mask;
- if (chip->unmask)
- chip->irq_unmask = compat_irq_unmask;
- if (chip->ack)
- chip->irq_ack = compat_irq_ack;
- if (chip->mask_ack)
- chip->irq_mask_ack = compat_irq_mask_ack;
- if (chip->eoi)
- chip->irq_eoi = compat_irq_eoi;
- if (chip->set_affinity)
- chip->irq_set_affinity = compat_irq_set_affinity;
- if (chip->set_type)
- chip->irq_set_type = compat_irq_set_type;
- if (chip->set_wake)
- chip->irq_set_wake = compat_irq_set_wake;
- if (chip->retrigger)
- chip->irq_retrigger = compat_irq_retrigger;
-#endif
-}
-
static inline void mask_ack_irq(struct irq_desc *desc)
{
if (desc->irq_data.chip->irq_mask_ack)
if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
goto out_unlock;
- irq_compat_set_progress(desc);
irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
raw_spin_unlock_irq(&desc->lock);
raw_spin_lock_irq(&desc->lock);
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
- irq_compat_clr_progress(desc);
out_unlock:
raw_spin_unlock_irq(&desc->lock);
* then mask it and get out of here:
*/
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
- irq_compat_set_pending(desc);
desc->istate |= IRQS_PENDING;
mask_irq(desc);
goto out;
if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
if (!irq_check_poll(desc)) {
- irq_compat_set_pending(desc);
desc->istate |= IRQS_PENDING;
mask_ack_irq(desc);
goto out_unlock;
} while ((desc->istate & IRQS_PENDING) &&
!irqd_irq_disabled(&desc->irq_data));
-out_unlock:
+out_eoi:
chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
+++ /dev/null
-/*
- * Compat layer for transition period
- */
-#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
-static inline void irq_compat_set_progress(struct irq_desc *desc)
-{
- desc->status |= IRQ_INPROGRESS;
-}
-
-static inline void irq_compat_clr_progress(struct irq_desc *desc)
-{
- desc->status &= ~IRQ_INPROGRESS;
-}
-static inline void irq_compat_set_disabled(struct irq_desc *desc)
-{
- desc->status |= IRQ_DISABLED;
-}
-static inline void irq_compat_clr_disabled(struct irq_desc *desc)
-{
- desc->status &= ~IRQ_DISABLED;
-}
-static inline void irq_compat_set_pending(struct irq_desc *desc)
-{
- desc->status |= IRQ_PENDING;
-}
-
-static inline void irq_compat_clr_pending(struct irq_desc *desc)
-{
- desc->status &= ~IRQ_PENDING;
-}
-static inline void irq_compat_set_masked(struct irq_desc *desc)
-{
- desc->status |= IRQ_MASKED;
-}
-
-static inline void irq_compat_clr_masked(struct irq_desc *desc)
-{
- desc->status &= ~IRQ_MASKED;
-}
-static inline void irq_compat_set_move_pending(struct irq_desc *desc)
-{
- desc->status |= IRQ_MOVE_PENDING;
-}
-
-static inline void irq_compat_clr_move_pending(struct irq_desc *desc)
-{
- desc->status &= ~IRQ_MOVE_PENDING;
-}
-static inline void irq_compat_set_affinity(struct irq_desc *desc)
-{
- desc->status |= IRQ_AFFINITY_SET;
-}
-
-static inline void irq_compat_clr_affinity(struct irq_desc *desc)
-{
- desc->status &= ~IRQ_AFFINITY_SET;
-}
-#else
-static inline void irq_compat_set_progress(struct irq_desc *desc) { }
-static inline void irq_compat_clr_progress(struct irq_desc *desc) { }
-static inline void irq_compat_set_disabled(struct irq_desc *desc) { }
-static inline void irq_compat_clr_disabled(struct irq_desc *desc) { }
-static inline void irq_compat_set_pending(struct irq_desc *desc) { }
-static inline void irq_compat_clr_pending(struct irq_desc *desc) { }
-static inline void irq_compat_set_masked(struct irq_desc *desc) { }
-static inline void irq_compat_clr_masked(struct irq_desc *desc) { }
-static inline void irq_compat_set_move_pending(struct irq_desc *desc) { }
-static inline void irq_compat_clr_move_pending(struct irq_desc *desc) { }
-static inline void irq_compat_set_affinity(struct irq_desc *desc) { }
-static inline void irq_compat_clr_affinity(struct irq_desc *desc) { }
-#endif
-
#include <linux/kallsyms.h>
-#define P(f) if (desc->status & f) printk("%14s set\n", #f)
+#define P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f)
#define PS(f) if (desc->istate & f) printk("%14s set\n", #f)
/* FIXME */
#define PD(f) do { } while (0)
return 0;
}
-#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
-static void compat_noop(unsigned int irq) { }
-#define END_INIT .end = compat_noop
-#else
-#define END_INIT
-#endif
-
/*
* Generic no controller implementation
*/
.irq_enable = noop,
.irq_disable = noop,
.irq_ack = ack_bad,
- END_INIT
};
/*
.irq_ack = noop,
.irq_mask = noop,
.irq_unmask = noop,
- END_INIT
};
struct irqaction *action = desc->action;
irqreturn_t ret;
- irq_compat_clr_pending(desc);
desc->istate &= ~IRQS_PENDING;
- irq_compat_set_progress(desc);
irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
raw_spin_unlock(&desc->lock);
raw_spin_lock(&desc->lock);
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
- irq_compat_clr_progress(desc);
return ret;
}
#define istate core_internal_state__do_not_mess_with_it
-#ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT
-# define status status_use_accessors
-#endif
-
extern int noirqdebug;
/*
IRQS_SUSPENDED = 0x00000800,
};
-#include "compat.h"
#include "debug.h"
#include "settings.h"
#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
-/* Set default functions for irq_chip structures: */
-extern void irq_chip_set_defaults(struct irq_chip *chip);
-
extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
unsigned long flags);
extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
static inline void irqd_set_move_pending(struct irq_data *d)
{
d->state_use_accessors |= IRQD_SETAFFINITY_PENDING;
- irq_compat_set_move_pending(irq_data_to_desc(d));
}
static inline void irqd_clr_move_pending(struct irq_data *d)
{
d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING;
- irq_compat_clr_move_pending(irq_data_to_desc(d));
}
static inline void irqd_clear(struct irq_data *d, unsigned int mask)
}
#else
static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
-static inline bool irq_move_pending(struct irq_desc *data) { return false; }
+static inline bool irq_move_pending(struct irq_data *data) { return false; }
static inline void
irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
static inline void
kref_get(&desc->affinity_notify->kref);
schedule_work(&desc->affinity_notify->work);
}
- irq_compat_set_affinity(desc);
irqd_set(data, IRQD_AFFINITY_SET);
return ret;
if (cpumask_intersects(desc->irq_data.affinity,
cpu_online_mask))
set = desc->irq_data.affinity;
- else {
- irq_compat_clr_affinity(desc);
+ else
irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
- }
}
cpumask_and(mask, cpu_online_mask, set);
irqd_set(&desc->irq_data, IRQD_LEVEL);
}
- if (chip != desc->irq_data.chip)
- irq_chip_set_defaults(desc->irq_data.chip);
ret = 0;
break;
default:
* but AFAICT IRQS_PENDING should be fine as it
* retriggers the interrupt itself --- tglx
*/
- irq_compat_set_pending(desc);
desc->istate |= IRQS_PENDING;
raw_spin_unlock_irq(&desc->lock);
} else {
new->thread_mask = 1 << ffz(thread_mask);
if (!shared) {
- irq_chip_set_defaults(desc->irq_data.chip);
-
init_waitqueue_head(&desc->wait_for_threads);
/* Setup the type (level, edge polarity) if configured: */
cpumask_clear(desc->pending_mask);
}
-void move_masked_irq(int irq)
-{
- irq_move_masked_irq(irq_get_irq_data(irq));
-}
-
void irq_move_irq(struct irq_data *idata)
{
bool masked;
if (!masked)
idata->chip->irq_unmask(idata);
}
-
-void move_native_irq(int irq)
-{
- irq_move_irq(irq_get_irq_data(irq));
-}
return 0;
}
+#ifndef ACTUAL_NR_IRQS
+# define ACTUAL_NR_IRQS nr_irqs
+#endif
+
int show_interrupts(struct seq_file *p, void *v)
{
static int prec;
struct irqaction *action;
struct irq_desc *desc;
- if (i > nr_irqs)
+ if (i > ACTUAL_NR_IRQS)
return 0;
- if (i == nr_irqs)
+ if (i == ACTUAL_NR_IRQS)
return arch_show_interrupts(p, prec);
/* print header and calculate the width of the first column */
if (desc->istate & IRQS_REPLAY)
return;
if (desc->istate & IRQS_PENDING) {
- irq_compat_clr_pending(desc);
desc->istate &= ~IRQS_PENDING;
desc->istate |= IRQS_REPLAY;
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
};
-#define IRQ_INPROGRESS GOT_YOU_MORON
-#define IRQ_REPLAY GOT_YOU_MORON
-#define IRQ_WAITING GOT_YOU_MORON
-#define IRQ_DISABLED GOT_YOU_MORON
-#define IRQ_PENDING GOT_YOU_MORON
-#define IRQ_MASKED GOT_YOU_MORON
-#define IRQ_WAKEUP GOT_YOU_MORON
-#define IRQ_MOVE_PENDING GOT_YOU_MORON
#define IRQ_PER_CPU GOT_YOU_MORON
#define IRQ_NO_BALANCING GOT_YOU_MORON
-#define IRQ_AFFINITY_SET GOT_YOU_MORON
#define IRQ_LEVEL GOT_YOU_MORON
#define IRQ_NOPROBE GOT_YOU_MORON
#define IRQ_NOREQUEST GOT_YOU_MORON
static inline void
irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
{
- desc->status &= ~(clr & _IRQF_MODIFY_MASK);
- desc->status |= (set & _IRQF_MODIFY_MASK);
+ desc->status_use_accessors &= ~(clr & _IRQF_MODIFY_MASK);
+ desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
}
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
{
- return desc->status & _IRQ_PER_CPU;
+ return desc->status_use_accessors & _IRQ_PER_CPU;
}
static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
{
- desc->status |= _IRQ_PER_CPU;
+ desc->status_use_accessors |= _IRQ_PER_CPU;
}
static inline void irq_settings_set_no_balancing(struct irq_desc *desc)
{
- desc->status |= _IRQ_NO_BALANCING;
+ desc->status_use_accessors |= _IRQ_NO_BALANCING;
}
static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc)
{
- return desc->status & _IRQ_NO_BALANCING;
+ return desc->status_use_accessors & _IRQ_NO_BALANCING;
}
static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc)
{
- return desc->status & IRQ_TYPE_SENSE_MASK;
+ return desc->status_use_accessors & IRQ_TYPE_SENSE_MASK;
}
static inline void
irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask)
{
- desc->status &= ~IRQ_TYPE_SENSE_MASK;
- desc->status |= mask & IRQ_TYPE_SENSE_MASK;
+ desc->status_use_accessors &= ~IRQ_TYPE_SENSE_MASK;
+ desc->status_use_accessors |= mask & IRQ_TYPE_SENSE_MASK;
}
static inline bool irq_settings_is_level(struct irq_desc *desc)
{
- return desc->status & _IRQ_LEVEL;
+ return desc->status_use_accessors & _IRQ_LEVEL;
}
static inline void irq_settings_clr_level(struct irq_desc *desc)
{
- desc->status &= ~_IRQ_LEVEL;
+ desc->status_use_accessors &= ~_IRQ_LEVEL;
}
static inline void irq_settings_set_level(struct irq_desc *desc)
{
- desc->status |= _IRQ_LEVEL;
+ desc->status_use_accessors |= _IRQ_LEVEL;
}
static inline bool irq_settings_can_request(struct irq_desc *desc)
{
- return !(desc->status & _IRQ_NOREQUEST);
+ return !(desc->status_use_accessors & _IRQ_NOREQUEST);
}
static inline void irq_settings_clr_norequest(struct irq_desc *desc)
{
- desc->status &= ~_IRQ_NOREQUEST;
+ desc->status_use_accessors &= ~_IRQ_NOREQUEST;
}
static inline void irq_settings_set_norequest(struct irq_desc *desc)
{
- desc->status |= _IRQ_NOREQUEST;
+ desc->status_use_accessors |= _IRQ_NOREQUEST;
}
static inline bool irq_settings_can_probe(struct irq_desc *desc)
{
- return !(desc->status & _IRQ_NOPROBE);
+ return !(desc->status_use_accessors & _IRQ_NOPROBE);
}
static inline void irq_settings_clr_noprobe(struct irq_desc *desc)
{
- desc->status &= ~_IRQ_NOPROBE;
+ desc->status_use_accessors &= ~_IRQ_NOPROBE;
}
static inline void irq_settings_set_noprobe(struct irq_desc *desc)
{
- desc->status |= _IRQ_NOPROBE;
+ desc->status_use_accessors |= _IRQ_NOPROBE;
}
static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc)
{
- return desc->status & _IRQ_MOVE_PCNTXT;
+ return desc->status_use_accessors & _IRQ_MOVE_PCNTXT;
}
static inline bool irq_settings_can_autoenable(struct irq_desc *desc)
{
- return !(desc->status & _IRQ_NOAUTOEN);
+ return !(desc->status_use_accessors & _IRQ_NOAUTOEN);
}
static inline bool irq_settings_is_nested_thread(struct irq_desc *desc)
{
- return desc->status & _IRQ_NESTED_THREAD;
+ return desc->status_use_accessors & _IRQ_NESTED_THREAD;
}
-
-/* Nothing should touch desc->status from now on */
-#undef status
-#define status USE_THE_PROPER_WRAPPERS_YOU_MORON
* Already running: If it is shared get the other
* CPU to go looking for our mystery interrupt too
*/
- irq_compat_set_pending(desc);
desc->istate |= IRQS_PENDING;
goto out;
}
return size;
}
-static void free_reserved_phys_range(unsigned long begin, unsigned long end)
+void __weak crash_free_reserved_phys_range(unsigned long begin,
+ unsigned long end)
{
unsigned long addr;
start = roundup(start, PAGE_SIZE);
end = roundup(start + new_size, PAGE_SIZE);
- free_reserved_phys_range(end, crashk_res.end);
+ crash_free_reserved_phys_range(end, crashk_res.end);
if ((start == end) && (crashk_res.parent != NULL))
release_resource(&crashk_res);
}
EXPORT_SYMBOL(filemap_fault);
-/*
- * Access another process' address space.
- * - source/target buffer must be kernel space
- */
-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
+static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long addr, void *buf, int len, int write)
{
struct vm_area_struct *vma;
- struct mm_struct *mm;
-
- if (addr + len < addr)
- return 0;
-
- mm = get_task_mm(tsk);
- if (!mm)
- return 0;
down_read(&mm->mmap_sem);
}
up_read(&mm->mmap_sem);
+
+ return len;
+}
+
+/**
+ * @access_remote_vm - access another process' address space
+ * @mm: the mm_struct of the target address space
+ * @addr: start address to access
+ * @buf: source or destination buffer
+ * @len: number of bytes to transfer
+ * @write: whether the access is a write
+ *
+ * The caller must hold a reference on @mm.
+ */
+int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+ void *buf, int len, int write)
+{
+ return __access_remote_vm(NULL, mm, addr, buf, len, write);
+}
+
+/*
+ * Access another process' address space.
+ * - source/target buffer must be kernel space
+ */
+int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
+{
+ struct mm_struct *mm;
+
+ if (addr + len < addr)
+ return 0;
+
+ mm = get_task_mm(tsk);
+ if (!mm)
+ return 0;
+
+ len = __access_remote_vm(tsk, mm, addr, buf, len, write);
+
mmput(mm);
return len;
}
}
if (in_first_chunk) {
- if ((unsigned long)addr < VMALLOC_START ||
- (unsigned long)addr >= VMALLOC_END)
+ if (!is_vmalloc_addr(addr))
return __pa(addr);
else
return page_to_phys(vmalloc_to_page(addr));
{
struct sock *sk = sock->sk;
- sock_hold(sk);
- lock_sock(sk);
if (sk) {
+ sock_hold(sk);
+ lock_sock(sk);
+
sock_orphan(sk);
sock->sk = NULL;
atalk_destroy_socket(sk);
- }
- release_sock(sk);
- sock_put(sk);
+ release_sock(sk);
+ sock_put(sk);
+ }
return 0;
}
}
write_unlock_irq(&vcc_sklist_lock);
}
+EXPORT_SYMBOL(atm_dev_release_vccs);
static int adjust_tp(struct atm_trafprm *tp, unsigned char aal)
{
{
struct net_bridge_port *p;
int err = 0;
+ bool changed_addr;
/* Don't allow bridging non-ethernet like devices */
if ((dev->flags & IFF_LOOPBACK) ||
list_add_rcu(&p->list, &br->port_list);
spin_lock_bh(&br->lock);
- br_stp_recalculate_bridge_id(br);
+ changed_addr = br_stp_recalculate_bridge_id(br);
br_features_recompute(br);
if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) &&
br_ifinfo_notify(RTM_NEWLINK, p);
+ if (changed_addr)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+
dev_set_mtu(br->dev, br_min_mtu(br));
kobject_uevent(&p->kobj, KOBJ_ADD);
ip6h->payload_len == 0)
return 0;
- len = ntohs(ip6h->payload_len);
+ len = ntohs(ip6h->payload_len) + sizeof(*ip6h);
if (skb->len < len)
return -EINVAL;
extern void br_stp_set_enabled(struct net_bridge *br, unsigned long val);
extern void br_stp_enable_port(struct net_bridge_port *p);
extern void br_stp_disable_port(struct net_bridge_port *p);
-extern void br_stp_recalculate_bridge_id(struct net_bridge *br);
+extern bool br_stp_recalculate_bridge_id(struct net_bridge *br);
extern void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a);
extern void br_stp_set_bridge_priority(struct net_bridge *br,
u16 newprio);
static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1];
/* called under bridge lock */
-void br_stp_recalculate_bridge_id(struct net_bridge *br)
+bool br_stp_recalculate_bridge_id(struct net_bridge *br)
{
const unsigned char *br_mac_zero =
(const unsigned char *)br_mac_zero_aligned;
/* user has chosen a value so keep it */
if (br->flags & BR_SET_MAC_ADDR)
- return;
+ return false;
list_for_each_entry(p, &br->port_list, list) {
if (addr == br_mac_zero ||
}
- if (compare_ether_addr(br->bridge_id.addr, addr))
- br_stp_change_bridge_id(br, addr);
+ if (compare_ether_addr(br->bridge_id.addr, addr) == 0)
+ return false; /* no change */
+
+ br_stp_change_bridge_id(br, addr);
+ return true;
}
/* called under bridge lock */
* af_can socket functions
*/
-static int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
return -ENOIOCTLCMD;
}
}
+EXPORT_SYMBOL(can_ioctl);
static void can_sock_destruct(struct sock *sk)
{
printk(KERN_ERR "can: protocol %d already registered\n",
proto);
err = -EBUSY;
- } else {
+ } else
proto_tab[proto] = cp;
- /* use generic ioctl function if not defined by module */
- if (!cp->ops->ioctl)
- cp->ops->ioctl = can_ioctl;
- }
spin_unlock(&proto_tab_lock);
if (err < 0)
return size;
}
-static struct proto_ops bcm_ops __read_mostly = {
+static const struct proto_ops bcm_ops = {
.family = PF_CAN,
.release = bcm_release,
.bind = sock_no_bind,
.accept = sock_no_accept,
.getname = sock_no_getname,
.poll = datagram_poll,
- .ioctl = NULL, /* use can_ioctl() from af_can.c */
+ .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = sock_no_setsockopt,
return size;
}
-static struct proto_ops raw_ops __read_mostly = {
+static const struct proto_ops raw_ops = {
.family = PF_CAN,
.release = raw_release,
.bind = raw_bind,
.accept = sock_no_accept,
.getname = raw_getname,
.poll = datagram_poll,
- .ioctl = NULL, /* use can_ioctl() from af_can.c */
+ .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = raw_setsockopt,
select LIBCRC32C
select CRYPTO_AES
select CRYPTO
+ select KEYS
default n
help
Choose Y or M here to include cephlib, which provides the
/*
* setup, teardown.
*/
-struct ceph_auth_client *ceph_auth_init(const char *name, const char *secret)
+struct ceph_auth_client *ceph_auth_init(const char *name, const struct ceph_crypto_key *key)
{
struct ceph_auth_client *ac;
int ret;
- dout("auth_init name '%s' secret '%s'\n", name, secret);
+ dout("auth_init name '%s'\n", name);
ret = -ENOMEM;
ac = kzalloc(sizeof(*ac), GFP_NOFS);
ac->name = name;
else
ac->name = CEPH_AUTH_NAME_DEFAULT;
- dout("auth_init name %s secret %s\n", ac->name, secret);
- ac->secret = secret;
+ dout("auth_init name %s\n", ac->name);
+ ac->key = key;
return ac;
out:
goto out;
ret = -EINVAL;
- if (!ac->secret) {
+ if (!ac->key) {
pr_err("no secret set (for auth_x protocol)\n");
goto out_nomem;
}
- ret = ceph_crypto_key_unarmor(&xi->secret, ac->secret);
- if (ret)
+ ret = ceph_crypto_key_clone(&xi->secret, ac->key);
+ if (ret < 0) {
+ pr_err("cannot clone key: %d\n", ret);
goto out_nomem;
+ }
xi->starting = true;
xi->ticket_handlers = RB_ROOT;
#include <linux/fs.h>
#include <linux/inet.h>
#include <linux/in6.h>
+#include <linux/key.h>
+#include <keys/ceph-type.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/parser.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/mon_client.h>
#include <linux/ceph/auth.h>
+#include "crypto.h"
if (ret)
return ret;
- ret = strcmp_null(opt1->secret, opt2->secret);
- if (ret)
- return ret;
+ if (opt1->key && !opt2->key)
+ return -1;
+ if (!opt1->key && opt2->key)
+ return 1;
+ if (opt1->key && opt2->key) {
+ if (opt1->key->type != opt2->key->type)
+ return -1;
+ if (opt1->key->created.tv_sec != opt2->key->created.tv_sec)
+ return -1;
+ if (opt1->key->created.tv_nsec != opt2->key->created.tv_nsec)
+ return -1;
+ if (opt1->key->len != opt2->key->len)
+ return -1;
+ if (opt1->key->key && !opt2->key->key)
+ return -1;
+ if (!opt1->key->key && opt2->key->key)
+ return 1;
+ if (opt1->key->key && opt2->key->key) {
+ ret = memcmp(opt1->key->key, opt2->key->key, opt1->key->len);
+ if (ret)
+ return ret;
+ }
+ }
/* any matching mon ip implies a match */
for (i = 0; i < opt1->num_mon; i++) {
Opt_fsid,
Opt_name,
Opt_secret,
+ Opt_key,
Opt_ip,
Opt_last_string,
/* string args above */
{Opt_fsid, "fsid=%s"},
{Opt_name, "name=%s"},
{Opt_secret, "secret=%s"},
+ {Opt_key, "key=%s"},
{Opt_ip, "ip=%s"},
/* string args above */
{Opt_noshare, "noshare"},
{
dout("destroy_options %p\n", opt);
kfree(opt->name);
- kfree(opt->secret);
+ if (opt->key) {
+ ceph_crypto_key_destroy(opt->key);
+ kfree(opt->key);
+ }
kfree(opt);
}
EXPORT_SYMBOL(ceph_destroy_options);
+/* get secret from key store */
+static int get_secret(struct ceph_crypto_key *dst, const char *name) {
+ struct key *ukey;
+ int key_err;
+ int err = 0;
+ struct ceph_crypto_key *ckey;
+
+ ukey = request_key(&key_type_ceph, name, NULL);
+ if (!ukey || IS_ERR(ukey)) {
+ /* request_key errors don't map nicely to mount(2)
+ errors; don't even try, but still printk */
+ key_err = PTR_ERR(ukey);
+ switch (key_err) {
+ case -ENOKEY:
+ pr_warning("ceph: Mount failed due to key not found: %s\n", name);
+ break;
+ case -EKEYEXPIRED:
+ pr_warning("ceph: Mount failed due to expired key: %s\n", name);
+ break;
+ case -EKEYREVOKED:
+ pr_warning("ceph: Mount failed due to revoked key: %s\n", name);
+ break;
+ default:
+ pr_warning("ceph: Mount failed due to unknown key error"
+ " %d: %s\n", key_err, name);
+ }
+ err = -EPERM;
+ goto out;
+ }
+
+ ckey = ukey->payload.data;
+ err = ceph_crypto_key_clone(dst, ckey);
+ if (err)
+ goto out_key;
+ /* pass through, err is 0 */
+
+out_key:
+ key_put(ukey);
+out:
+ return err;
+}
+
int ceph_parse_options(struct ceph_options **popt, char *options,
const char *dev_name, const char *dev_name_end,
int (*parse_extra_token)(char *c, void *private),
GFP_KERNEL);
break;
case Opt_secret:
- opt->secret = kstrndup(argstr[0].from,
- argstr[0].to-argstr[0].from,
- GFP_KERNEL);
+ opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL);
+ if (!opt->key) {
+ err = -ENOMEM;
+ goto out;
+ }
+ err = ceph_crypto_key_unarmor(opt->key, argstr[0].from);
+ if (err < 0)
+ goto out;
+ break;
+ case Opt_key:
+ opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL);
+ if (!opt->key) {
+ err = -ENOMEM;
+ goto out;
+ }
+ err = get_secret(opt->key, argstr[0].from);
+ if (err < 0)
+ goto out;
break;
/* misc */
ceph_osdc_stop(&client->osdc);
/*
- * make sure mds and osd connections close out before destroying
- * the auth module, which is needed to free those connections'
+ * make sure osd connections close out before destroying the
+ * auth module, which is needed to free those connections'
* ceph_authorizers.
*/
ceph_msgr_flush();
if (ret < 0)
goto out;
- ret = ceph_msgr_init();
+ ret = ceph_crypto_init();
if (ret < 0)
goto out_debugfs;
+ ret = ceph_msgr_init();
+ if (ret < 0)
+ goto out_crypto;
+
pr_info("loaded (mon/osd proto %d/%d, osdmap %d/%d %d/%d)\n",
CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL,
CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT,
return 0;
+out_crypto:
+ ceph_crypto_shutdown();
out_debugfs:
ceph_debugfs_cleanup();
out:
{
dout("exit_ceph_lib\n");
ceph_msgr_exit();
+ ceph_crypto_shutdown();
ceph_debugfs_cleanup();
}
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <crypto/hash.h>
+#include <linux/key-type.h>
+#include <keys/ceph-type.h>
#include <linux/ceph/decode.h>
#include "crypto.h"
+int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
+ const struct ceph_crypto_key *src)
+{
+ memcpy(dst, src, sizeof(struct ceph_crypto_key));
+ dst->key = kmalloc(src->len, GFP_NOFS);
+ if (!dst->key)
+ return -ENOMEM;
+ memcpy(dst->key, src->key, src->len);
+ return 0;
+}
+
int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
{
if (*p + sizeof(u16) + sizeof(key->created) +
return -EINVAL;
}
}
+
+int ceph_key_instantiate(struct key *key, const void *data, size_t datalen)
+{
+ struct ceph_crypto_key *ckey;
+ int ret;
+ void *p;
+
+ ret = -EINVAL;
+ if (datalen <= 0 || datalen > 32767 || !data)
+ goto err;
+
+ ret = key_payload_reserve(key, datalen);
+ if (ret < 0)
+ goto err;
+
+ ret = -ENOMEM;
+ ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
+ if (!ckey)
+ goto err;
+
+ /* TODO ceph_crypto_key_decode should really take const input */
+ p = (void*)data;
+ ret = ceph_crypto_key_decode(ckey, &p, (char*)data+datalen);
+ if (ret < 0)
+ goto err_ckey;
+
+ key->payload.data = ckey;
+ return 0;
+
+err_ckey:
+ kfree(ckey);
+err:
+ return ret;
+}
+
+int ceph_key_match(const struct key *key, const void *description)
+{
+ return strcmp(key->description, description) == 0;
+}
+
+void ceph_key_destroy(struct key *key) {
+ struct ceph_crypto_key *ckey = key->payload.data;
+
+ ceph_crypto_key_destroy(ckey);
+}
+
+struct key_type key_type_ceph = {
+ .name = "ceph",
+ .instantiate = ceph_key_instantiate,
+ .match = ceph_key_match,
+ .destroy = ceph_key_destroy,
+};
+
+int ceph_crypto_init(void) {
+ return register_key_type(&key_type_ceph);
+}
+
+void ceph_crypto_shutdown(void) {
+ unregister_key_type(&key_type_ceph);
+}
kfree(key->key);
}
+extern int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
+ const struct ceph_crypto_key *src);
extern int ceph_crypto_key_encode(struct ceph_crypto_key *key,
void **p, void *end);
extern int ceph_crypto_key_decode(struct ceph_crypto_key *key,
void *dst, size_t *dst_len,
const void *src1, size_t src1_len,
const void *src2, size_t src2_len);
+extern int ceph_crypto_init(void);
+extern void ceph_crypto_shutdown(void);
/* armor.c */
extern int ceph_armor(char *dst, const char *src, const char *end);
/* authentication */
monc->auth = ceph_auth_init(cl->options->name,
- cl->options->secret);
+ cl->options->key);
if (IS_ERR(monc->auth))
return PTR_ERR(monc->auth);
monc->auth->want_keys =
dout("moving osd to %p lru\n", req->r_osd);
__move_osd_to_lru(osdc, req->r_osd);
}
- if (list_empty(&req->r_osd_item) &&
- list_empty(&req->r_linger_item))
+ if (list_empty(&req->r_linger_item))
req->r_osd = NULL;
}
dout("moving osd to %p lru\n", req->r_osd);
__move_osd_to_lru(osdc, req->r_osd);
}
- req->r_osd = NULL;
+ if (list_empty(&req->r_osd_item))
+ req->r_osd = NULL;
}
}
cookie, ver, event);
if (event) {
event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
- INIT_WORK(&event_work->work, do_event_work);
if (!event_work) {
dout("ERROR: could not allocate event_work\n");
goto done_err;
}
+ INIT_WORK(&event_work->work, do_event_work);
event_work->event = event;
event_work->ver = ver;
event_work->notify_id = notify_id;
if (req->r_sent == 0) {
rc = __map_request(osdc, req);
if (rc < 0)
- return rc;
+ goto out_unlock;
if (req->r_osd == NULL) {
dout("send_request %p no up osds in pg\n", req);
ceph_monc_request_next_osdmap(&osdc->client->monc);
}
}
}
+
+out_unlock:
mutex_unlock(&osdc->request_mutex);
up_read(&osdc->map_sem);
return rc;
ASSERT_RTNL();
- /*
- * Is it even present?
- */
if (!netif_device_present(dev))
return -ENODEV;
if (ret)
return ret;
- /*
- * Call device private open method
- */
set_bit(__LINK_STATE_START, &dev->state);
if (ops->ndo_validate_addr)
if (!ret && ops->ndo_open)
ret = ops->ndo_open(dev);
- /*
- * If it went open OK then:
- */
-
if (ret)
clear_bit(__LINK_STATE_START, &dev->state);
else {
- /*
- * Set the flags.
- */
dev->flags |= IFF_UP;
-
- /*
- * Enable NET_DMA
- */
net_dmaengine_get();
-
- /*
- * Initialize multicasting status
- */
dev_set_rx_mode(dev);
-
- /*
- * Wakeup transmit queue engine
- */
dev_activate(dev);
}
{
int ret;
- /*
- * Is it already up?
- */
if (dev->flags & IFF_UP)
return 0;
- /*
- * Open device
- */
ret = __dev_open(dev);
if (ret < 0)
return ret;
- /*
- * ... and announce new interface.
- */
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
call_netdevice_notifiers(NETDEV_UP, dev);
might_sleep();
list_for_each_entry(dev, head, unreg_list) {
- /*
- * Tell people we are going down, so that they can
- * prepare to death, when device is still operating.
- */
call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
clear_bit(__LINK_STATE_START, &dev->state);
if (ops->ndo_stop)
ops->ndo_stop(dev);
- /*
- * Device is now down.
- */
-
dev->flags &= ~IFF_UP;
-
- /*
- * Shutdown NET_DMA
- */
net_dmaengine_put();
}
__dev_close_many(head);
- /*
- * Tell people we are down
- */
list_for_each_entry(dev, head, unreg_list) {
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
call_netdevice_notifiers(NETDEV_DOWN, dev);
static int dev_boot_phase = 1;
-/*
- * Device change register/unregister. These are not inline or static
- * as we export them to the world.
- */
-
/**
* register_netdevice_notifier - register a network notifier block
* @nb: notifier
ASSERT_RTNL();
return raw_notifier_call_chain(&netdev_chain, val, dev);
}
+EXPORT_SYMBOL(call_netdevice_notifiers);
/* When > 0 there are consumers of rx skb time stamps */
static atomic_t netstamp_needed = ATOMIC_INIT(0);
__net_timestamp(skb);
}
+static inline bool is_skb_forwardable(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ unsigned int len;
+
+ if (!(dev->flags & IFF_UP))
+ return false;
+
+ len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
+ if (skb->len <= len)
+ return true;
+
+ /* if TSO is enabled, we don't care about the length as the packet
+ * could be forwarded without being segmented before
+ */
+ if (skb_is_gso(skb))
+ return true;
+
+ return false;
+}
+
/**
* dev_forward_skb - loopback an skb to another netif
*
skb_orphan(skb);
nf_reset(skb);
- if (unlikely(!(dev->flags & IFF_UP) ||
- (skb->len > (dev->mtu + dev->hard_header_len + VLAN_HLEN)))) {
+ if (unlikely(!is_skb_forwardable(dev, skb))) {
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
}
EXPORT_SYMBOL(ethtool_op_get_flags);
+/* Check if device can enable (or disable) particular feature coded in "data"
+ * argument. Flags "supported" describe features that can be toggled by device.
+ * If feature can not be toggled, it state (enabled or disabled) must match
+ * hardcoded device features state, otherwise flags are marked as invalid.
+ */
+bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported)
+{
+ u32 features = dev->features & flags_dup_features;
+ /* "data" can contain only flags_dup_features bits,
+ * see __ethtool_set_flags */
+
+ return (features & ~supported) != (data & ~supported);
+}
+EXPORT_SYMBOL(ethtool_invalid_flags);
+
int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
{
- if (data & ~supported)
+ if (ethtool_invalid_flags(dev, data, supported))
return -EINVAL;
dev->features = ((dev->features & ~flags_dup_features) |
case ARPHRD_INFINIBAND:
ip_ib_mc_map(addr, dev->broadcast, haddr);
return 0;
+ case ARPHRD_IPGRE:
+ ip_ipgre_mc_map(addr, dev->broadcast, haddr);
+ return 0;
default:
if (dir) {
memcpy(haddr, dev->broadcast, dev->addr_len);
fib4_rules_exit(net);
#endif
+ rtnl_lock();
for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
struct fib_table *tb;
struct hlist_head *head;
fib_free_table(tb);
}
}
+ rtnl_unlock();
kfree(net->ipv4.fib_table_hash);
}
err = fib_props[fa->fa_type].error;
if (err) {
#ifdef CONFIG_IP_FIB_TRIE_STATS
- t->stats.semantic_match_miss++;
+ t->stats.semantic_match_passed++;
#endif
- return 1;
+ return err;
}
if (fi->fib_flags & RTNH_F_DEAD)
continue;
} else {
dopt->ts_needtime = 0;
- if (soffset + 8 <= optlen) {
+ if (soffset + 7 <= optlen) {
__be32 addr;
- memcpy(&addr, sptr+soffset-1, 4);
- if (inet_addr_type(dev_net(skb_dst(skb)->dev), addr) != RTN_LOCAL) {
+ memcpy(&addr, dptr+soffset-1, 4);
+ if (inet_addr_type(dev_net(skb_dst(skb)->dev), addr) != RTN_UNICAST) {
dopt->ts_needtime = 1;
soffset += 8;
}
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
+ rt = NULL;
goto done;
}
}
skb_pull(skb, (u8 *)encap - skb->data);
skb_reset_network_header(skb);
skb->protocol = htons(ETH_P_IPV6);
- skb->ip_summed = 0;
+ skb->ip_summed = CHECKSUM_NONE;
skb->pkt_type = PACKET_HOST;
skb_tunnel_rx(skb, reg_dev);
case ARPHRD_INFINIBAND:
ipv6_ib_mc_map(addr, dev->broadcast, buf);
return 0;
+ case ARPHRD_IPGRE:
+ return ipv6_ipgre_mc_map(addr, dev->broadcast, buf);
default:
if (dir) {
memcpy(buf, dev->broadcast, dev->addr_len);
n = 1;
name_len = fp[n++];
+
+ IRDA_ASSERT(name_len < IAS_MAX_CLASSNAME + 1, return;);
+
memcpy(name, fp+n, name_len); n+=name_len;
name[name_len] = '\0';
attr_len = fp[n++];
+
+ IRDA_ASSERT(attr_len < IAS_MAX_ATTRIBNAME + 1, return;);
+
memcpy(attr, fp+n, attr_len); n+=attr_len;
attr[attr_len] = '\0';
while(isspace(start[length - 1]))
length--;
+ DABORT(length < 5 || length > NICKNAME_MAX_LEN + 5,
+ -EINVAL, CTRL_ERROR, "Invalid nickname.\n");
+
/* Copy the name for later reuse */
memcpy(ap->rname, start + 5, length - 5);
ap->rname[length - 5] = '\0';
struct sock *make;
struct rose_sock *make_rose;
struct rose_facilities_struct facilities;
- int n, len;
+ int n;
skb->sk = NULL; /* Initially we don't know who it's for */
*/
memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
- len = (((skb->data[3] >> 4) & 0x0F) + 1) >> 1;
- len += (((skb->data[3] >> 0) & 0x0F) + 1) >> 1;
- if (!rose_parse_facilities(skb->data + len + 4, &facilities)) {
+ if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
+ skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
+ &facilities)) {
rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
return 0;
}
unsigned int lci_i, lci_o;
while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
+ if (skb->len < ROSE_MIN_LEN) {
+ kfree_skb(skb);
+ continue;
+ }
lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
frametype = skb->data[2];
- dest = (rose_address *)(skb->data + 4);
+ if (frametype == ROSE_CALL_REQUEST &&
+ (skb->len <= ROSE_CALL_REQ_FACILITIES_OFF ||
+ skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] !=
+ ROSE_CALL_REQ_ADDR_LEN_VAL)) {
+ kfree_skb(skb);
+ continue;
+ }
+ dest = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF);
lci_o = ROSE_DEFAULT_MAXVC + 1 - lci_i;
skb_reset_transport_header(skb);
unsigned int lci, new_lci;
unsigned char cause, diagnostic;
struct net_device *dev;
- int len, res = 0;
+ int res = 0;
char buf[11];
#if 0
return res;
#endif
+ if (skb->len < ROSE_MIN_LEN)
+ return res;
frametype = skb->data[2];
lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
- src_addr = (rose_address *)(skb->data + 9);
- dest_addr = (rose_address *)(skb->data + 4);
+ if (frametype == ROSE_CALL_REQUEST &&
+ (skb->len <= ROSE_CALL_REQ_FACILITIES_OFF ||
+ skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] !=
+ ROSE_CALL_REQ_ADDR_LEN_VAL))
+ return res;
+ src_addr = (rose_address *)(skb->data + ROSE_CALL_REQ_SRC_ADDR_OFF);
+ dest_addr = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF);
spin_lock_bh(&rose_neigh_list_lock);
spin_lock_bh(&rose_route_list_lock);
goto out;
}
- len = (((skb->data[3] >> 4) & 0x0F) + 1) >> 1;
- len += (((skb->data[3] >> 0) & 0x0F) + 1) >> 1;
-
memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
- if (!rose_parse_facilities(skb->data + len + 4, &facilities)) {
+ if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
+ skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
+ &facilities)) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_INVALID_FACILITY, 76);
goto out;
}
*dptr++ = ROSE_GFI | lci1;
*dptr++ = lci2;
*dptr++ = frametype;
- *dptr++ = 0xAA;
+ *dptr++ = ROSE_CALL_REQ_ADDR_LEN_VAL;
memcpy(dptr, &rose->dest_addr, ROSE_ADDR_LEN);
dptr += ROSE_ADDR_LEN;
memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
do {
switch (*p & 0xC0) {
case 0x00:
+ if (len < 2)
+ return -1;
p += 2;
n += 2;
len -= 2;
break;
case 0x40:
+ if (len < 3)
+ return -1;
if (*p == FAC_NATIONAL_RAND)
facilities->rand = ((p[1] << 8) & 0xFF00) + ((p[2] << 0) & 0x00FF);
p += 3;
break;
case 0x80:
+ if (len < 4)
+ return -1;
p += 4;
n += 4;
len -= 4;
break;
case 0xC0:
+ if (len < 2)
+ return -1;
l = p[1];
+ if (len < 2 + l)
+ return -1;
if (*p == FAC_NATIONAL_DEST_DIGI) {
if (!fac_national_digis_received) {
+ if (l < AX25_ADDR_LEN)
+ return -1;
memcpy(&facilities->source_digis[0], p + 2, AX25_ADDR_LEN);
facilities->source_ndigis = 1;
}
}
else if (*p == FAC_NATIONAL_SRC_DIGI) {
if (!fac_national_digis_received) {
+ if (l < AX25_ADDR_LEN)
+ return -1;
memcpy(&facilities->dest_digis[0], p + 2, AX25_ADDR_LEN);
facilities->dest_ndigis = 1;
}
}
else if (*p == FAC_NATIONAL_FAIL_CALL) {
+ if (l < AX25_ADDR_LEN)
+ return -1;
memcpy(&facilities->fail_call, p + 2, AX25_ADDR_LEN);
}
else if (*p == FAC_NATIONAL_FAIL_ADD) {
+ if (l < 1 + ROSE_ADDR_LEN)
+ return -1;
memcpy(&facilities->fail_addr, p + 3, ROSE_ADDR_LEN);
}
else if (*p == FAC_NATIONAL_DIGIS) {
+ if (l % AX25_ADDR_LEN)
+ return -1;
fac_national_digis_received = 1;
facilities->source_ndigis = 0;
facilities->dest_ndigis = 0;
for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) {
- if (pt[6] & AX25_HBIT)
+ if (pt[6] & AX25_HBIT) {
+ if (facilities->dest_ndigis >= ROSE_MAX_DIGIS)
+ return -1;
memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN);
- else
+ } else {
+ if (facilities->source_ndigis >= ROSE_MAX_DIGIS)
+ return -1;
memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN);
+ }
}
}
p += l + 2;
do {
switch (*p & 0xC0) {
case 0x00:
+ if (len < 2)
+ return -1;
p += 2;
n += 2;
len -= 2;
break;
case 0x40:
+ if (len < 3)
+ return -1;
p += 3;
n += 3;
len -= 3;
break;
case 0x80:
+ if (len < 4)
+ return -1;
p += 4;
n += 4;
len -= 4;
break;
case 0xC0:
+ if (len < 2)
+ return -1;
l = p[1];
+
+ /* Prevent overflows*/
+ if (l < 10 || l > 20)
+ return -1;
+
if (*p == FAC_CCITT_DEST_NSAP) {
memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN);
memcpy(callsign, p + 12, l - 10);
return n;
}
-int rose_parse_facilities(unsigned char *p,
+int rose_parse_facilities(unsigned char *p, unsigned packet_len,
struct rose_facilities_struct *facilities)
{
int facilities_len, len;
facilities_len = *p++;
- if (facilities_len == 0)
+ if (facilities_len == 0 || (unsigned)facilities_len > packet_len)
return 0;
- while (facilities_len > 0) {
- if (*p == 0x00) {
- facilities_len--;
- p++;
-
- switch (*p) {
- case FAC_NATIONAL: /* National */
- len = rose_parse_national(p + 1, facilities, facilities_len - 1);
- facilities_len -= len + 1;
- p += len + 1;
- break;
-
- case FAC_CCITT: /* CCITT */
- len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1);
- facilities_len -= len + 1;
- p += len + 1;
- break;
-
- default:
- printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p);
- facilities_len--;
- p++;
- break;
- }
- } else
- break; /* Error in facilities format */
+ while (facilities_len >= 3 && *p == 0x00) {
+ facilities_len--;
+ p++;
+
+ switch (*p) {
+ case FAC_NATIONAL: /* National */
+ len = rose_parse_national(p + 1, facilities, facilities_len - 1);
+ break;
+
+ case FAC_CCITT: /* CCITT */
+ len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1);
+ break;
+
+ default:
+ printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p);
+ len = 1;
+ break;
+ }
+
+ if (len < 0)
+ return 0;
+ if (WARN_ON(len >= facilities_len))
+ return 0;
+ facilities_len -= len + 1;
+ p += len + 1;
}
- return 1;
+ return facilities_len == 0;
}
static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose)
if ((sctp_assoc_hashsize > (64 * 1024)) && order > 0)
continue;
sctp_assoc_hashtable = (struct sctp_hashbucket *)
- __get_free_pages(GFP_ATOMIC, order);
+ __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order);
} while (!sctp_assoc_hashtable && --order > 0);
if (!sctp_assoc_hashtable) {
pr_err("Failed association hash alloc\n");
if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
continue;
sctp_port_hashtable = (struct sctp_bind_hashbucket *)
- __get_free_pages(GFP_ATOMIC, order);
+ __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order);
} while (!sctp_port_hashtable && --order > 0);
if (!sctp_port_hashtable) {
pr_err("Failed bind hash alloc\n");
goto drop_unlock;
}
- if (x->props.replay_window && x->repl->check(x, skb, seq)) {
+ if (x->repl->check(x, skb, seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
goto drop_unlock;
}
XFRM_SKB_CB(skb)->seq.input.low = seq;
XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
+ skb_dst_force(skb);
+
nexthdr = x->type->input(x, skb);
if (nexthdr == -EINPROGRESS)
spin_unlock_bh(&x->lock);
+ skb_dst_force(skb);
+
err = x->type->output(x, skb);
if (err == -EINPROGRESS)
goto out_exit;
err = -EHOSTUNREACH;
goto error_nolock;
}
- skb_dst_set(skb, dst_clone(dst));
+ skb_dst_set(skb, dst);
x = dst->xfrm;
} while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
u32 diff;
u32 seq = ntohl(net_seq);
+ if (!x->props.replay_window)
+ return 0;
+
if (unlikely(seq == 0))
goto err;
{
unsigned int bitnr, nr;
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+ u32 pos;
u32 seq = ntohl(net_seq);
u32 diff = replay_esn->seq - seq;
- u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
+
+ if (!replay_esn->replay_window)
+ return 0;
+
+ pos = (replay_esn->seq - 1) % replay_esn->replay_window;
if (unlikely(seq == 0))
goto err;
unsigned int bitnr, nr;
u32 diff;
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+ u32 pos;
u32 seq = ntohl(net_seq);
- u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
u32 wsize = replay_esn->replay_window;
u32 top = replay_esn->seq;
u32 bottom = top - wsize + 1;
+ if (!wsize)
+ return 0;
+
+ pos = (replay_esn->seq - 1) % replay_esn->replay_window;
+
if (unlikely(seq == 0 && replay_esn->seq_hi == 0 &&
(replay_esn->seq < replay_esn->replay_window - 1)))
goto err;
goto error;
}
+ if (orig->replay_esn) {
+ err = xfrm_replay_clone(x, orig);
+ if (err)
+ goto error;
+ }
+
memcpy(&x->mark, &orig->mark, sizeof(x->mark));
err = xfrm_init_state(x);
if (!rt)
return 0;
+ if (p->id.proto != IPPROTO_ESP)
+ return -EINVAL;
+
if (p->replay_window != 0)
return -EINVAL;
return 0;
}
+static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn,
+ struct nlattr *rp)
+{
+ struct xfrm_replay_state_esn *up;
+
+ if (!replay_esn || !rp)
+ return 0;
+
+ up = nla_data(rp);
+
+ if (xfrm_replay_state_esn_len(replay_esn) !=
+ xfrm_replay_state_esn_len(up))
+ return -EINVAL;
+
+ return 0;
+}
+
static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
struct xfrm_replay_state_esn **preplay_esn,
struct nlattr *rta)
if (x->km.state != XFRM_STATE_VALID)
goto out;
+ err = xfrm_replay_verify_len(x->replay_esn, rp);
+ if (err)
+ goto out;
+
spin_lock_bh(&x->lock);
xfrm_update_ae_params(x, attrs);
spin_unlock_bh(&x->lock);
}
if (runtime->no_period_wakeup) {
+ snd_pcm_sframes_t xrun_threshold;
/*
* Without regular period interrupts, we have to check
* the elapsed time to detect xruns.
if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
goto no_delta_check;
hdelta = jdelta - delta * HZ / runtime->rate;
- while (hdelta > runtime->hw_ptr_buffer_jiffies / 2 + 1) {
+ xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
+ while (hdelta > xrun_threshold) {
delta += runtime->buffer_size;
hw_base += runtime->buffer_size;
if (hw_base >= runtime->boundary)
{
struct fwspk *fwspk = dev_get_drvdata(dev);
- snd_card_disconnect(fwspk->card);
-
mutex_lock(&fwspk->mutex);
amdtp_out_stream_pcm_abort(&fwspk->stream);
+ snd_card_disconnect(fwspk->card);
fwspk_stop_stream(fwspk);
mutex_unlock(&fwspk->mutex);
#define ES_REG_1371_CODEC 0x14 /* W/R: Codec Read/Write register address */
#define ES_1371_CODEC_RDY (1<<31) /* codec ready */
#define ES_1371_CODEC_WIP (1<<30) /* codec register access in progress */
+#define EV_1938_CODEC_MAGIC (1<<26)
#define ES_1371_CODEC_PIRD (1<<23) /* codec read/write select register */
#define ES_1371_CODEC_WRITE(a,d) ((((a)&0x7f)<<16)|(((d)&0xffff)<<0))
#define ES_1371_CODEC_READS(a) ((((a)&0x7f)<<16)|ES_1371_CODEC_PIRD)
#ifdef CHIP1371
+static inline bool is_ev1938(struct ensoniq *ensoniq)
+{
+ return ensoniq->pci->device == 0x8938;
+}
+
static void snd_es1371_codec_write(struct snd_ac97 *ac97,
unsigned short reg, unsigned short val)
{
struct ensoniq *ensoniq = ac97->private_data;
- unsigned int t, x;
+ unsigned int t, x, flag;
+ flag = is_ev1938(ensoniq) ? EV_1938_CODEC_MAGIC : 0;
mutex_lock(&ensoniq->src_mutex);
for (t = 0; t < POLL_COUNT; t++) {
if (!(inl(ES_REG(ensoniq, 1371_CODEC)) & ES_1371_CODEC_WIP)) {
0x00010000)
break;
}
- outl(ES_1371_CODEC_WRITE(reg, val), ES_REG(ensoniq, 1371_CODEC));
+ outl(ES_1371_CODEC_WRITE(reg, val) | flag,
+ ES_REG(ensoniq, 1371_CODEC));
/* restore SRC reg */
snd_es1371_wait_src_ready(ensoniq);
outl(x, ES_REG(ensoniq, 1371_SMPRATE));
unsigned short reg)
{
struct ensoniq *ensoniq = ac97->private_data;
- unsigned int t, x, fail = 0;
+ unsigned int t, x, flag, fail = 0;
+ flag = is_ev1938(ensoniq) ? EV_1938_CODEC_MAGIC : 0;
__again:
mutex_lock(&ensoniq->src_mutex);
for (t = 0; t < POLL_COUNT; t++) {
0x00010000)
break;
}
- outl(ES_1371_CODEC_READS(reg), ES_REG(ensoniq, 1371_CODEC));
+ outl(ES_1371_CODEC_READS(reg) | flag,
+ ES_REG(ensoniq, 1371_CODEC));
/* restore SRC reg */
snd_es1371_wait_src_ready(ensoniq);
outl(x, ES_REG(ensoniq, 1371_SMPRATE));
/* now wait for the stinkin' data (RDY) */
for (t = 0; t < POLL_COUNT; t++) {
if ((x = inl(ES_REG(ensoniq, 1371_CODEC))) & ES_1371_CODEC_RDY) {
+ if (is_ev1938(ensoniq)) {
+ for (t = 0; t < 100; t++)
+ inl(ES_REG(ensoniq, CONTROL));
+ x = inl(ES_REG(ensoniq, 1371_CODEC));
+ }
mutex_unlock(&ensoniq->src_mutex);
return ES_1371_CODEC_READ(x);
}
SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
+ SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */
{}
SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC883_LAPTOP_EAPD),
SND_PCI_QUIRK(0x10f1, 0x2350, "TYAN-S2350", ALC888_6ST_DELL),
SND_PCI_QUIRK(0x108e, 0x534d, NULL, ALC883_3ST_6ch),
- SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte P35 DS3R", ALC882_6ST_DIG),
SND_PCI_QUIRK(0x1462, 0x0349, "MSI", ALC883_TARGA_2ch_DIG),
SND_PCI_QUIRK(0x1462, 0x040d, "MSI", ALC883_TARGA_2ch_DIG),
PINFIX_LENOVO_Y530,
PINFIX_PB_M5210,
PINFIX_ACER_ASPIRE_7736,
+ PINFIX_GIGABYTE_880GM,
};
static const struct alc_fixup alc882_fixups[] = {
.type = ALC_FIXUP_SKU,
.v.sku = ALC_FIXUP_SKU_IGNORE,
},
+ [PINFIX_GIGABYTE_880GM] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x14, 0x1114410 }, /* set as speaker */
+ { }
+ }
+ },
};
static struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", PINFIX_LENOVO_Y530),
SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX),
SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", PINFIX_ACER_ASPIRE_7736),
+ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte", PINFIX_GIGABYTE_880GM),
{}
};
ALC662_3ST_6ch_DIG),
SND_PCI_QUIRK(0x1179, 0xff6e, "Toshiba NB20x", ALC662_AUTO),
SND_PCI_QUIRK(0x144d, 0xca00, "Samsung NC10", ALC272_SAMSUNG_NC10),
- SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte 945GCM-S2L",
- ALC662_3ST_6ch_DIG),
SND_PCI_QUIRK(0x152d, 0x2304, "Quanta WH1", ALC663_ASUS_H13),
SND_PCI_QUIRK(0x1565, 0x820f, "Biostar TA780G M2+", ALC662_3ST_6ch_DIG),
SND_PCI_QUIRK(0x1631, 0xc10c, "PB RS65", ALC663_ASUS_M51VA),
ALC662_FIXUP_IDEAPAD,
ALC272_FIXUP_MARIO,
ALC662_FIXUP_CZC_P10T,
+ ALC662_FIXUP_GIGABYTE,
};
static const struct alc_fixup alc662_fixups[] = {
{}
}
},
+ [ALC662_FIXUP_GIGABYTE] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x14, 0x1114410 }, /* set as speaker */
+ { }
+ }
+ },
};
static struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte", ALC662_FIXUP_GIGABYTE),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
dac33_write(codec, DAC33_OUT_AMP_CTRL,
dac33_read_reg_cache(codec, DAC33_OUT_AMP_CTRL));
+ dac33_write(codec, DAC33_LDAC_PWR_CTRL,
+ dac33_read_reg_cache(codec, DAC33_LDAC_PWR_CTRL));
+ dac33_write(codec, DAC33_RDAC_PWR_CTRL,
+ dac33_read_reg_cache(codec, DAC33_RDAC_PWR_CTRL));
}
static inline int dac33_read_id(struct snd_soc_codec *codec)
{
struct snd_soc_codec *codec = dac33->codec;
unsigned int delay;
+ unsigned long flags;
switch (dac33->fifo_mode) {
case DAC33_FIFO_MODE1:
DAC33_THRREG(dac33->nsample));
/* Take the timestamps */
- spin_lock_irq(&dac33->lock);
+ spin_lock_irqsave(&dac33->lock, flags);
dac33->t_stamp2 = ktime_to_us(ktime_get());
dac33->t_stamp1 = dac33->t_stamp2;
- spin_unlock_irq(&dac33->lock);
+ spin_unlock_irqrestore(&dac33->lock, flags);
dac33_write16(codec, DAC33_PREFILL_MSB,
DAC33_THRREG(dac33->alarm_threshold));
break;
case DAC33_FIFO_MODE7:
/* Take the timestamp */
- spin_lock_irq(&dac33->lock);
+ spin_lock_irqsave(&dac33->lock, flags);
dac33->t_stamp1 = ktime_to_us(ktime_get());
/* Move back the timestamp with drain time */
dac33->t_stamp1 -= dac33->mode7_us_to_lthr;
- spin_unlock_irq(&dac33->lock);
+ spin_unlock_irqrestore(&dac33->lock, flags);
dac33_write16(codec, DAC33_PREFILL_MSB,
DAC33_THRREG(DAC33_MODE7_MARGIN));
static inline void dac33_playback_handler(struct tlv320dac33_priv *dac33)
{
struct snd_soc_codec *codec = dac33->codec;
+ unsigned long flags;
switch (dac33->fifo_mode) {
case DAC33_FIFO_MODE1:
/* Take the timestamp */
- spin_lock_irq(&dac33->lock);
+ spin_lock_irqsave(&dac33->lock, flags);
dac33->t_stamp2 = ktime_to_us(ktime_get());
- spin_unlock_irq(&dac33->lock);
+ spin_unlock_irqrestore(&dac33->lock, flags);
dac33_write16(codec, DAC33_NSAMPLE_MSB,
DAC33_THRREG(dac33->nsample));
{
struct snd_soc_codec *codec = dev;
struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
+ unsigned long flags;
- spin_lock(&dac33->lock);
+ spin_lock_irqsave(&dac33->lock, flags);
dac33->t_stamp1 = ktime_to_us(ktime_get());
- spin_unlock(&dac33->lock);
+ spin_unlock_irqrestore(&dac33->lock, flags);
/* Do not schedule the workqueue in Mode7 */
if (dac33->fifo_mode != DAC33_FIFO_MODE7)
unsigned int time_delta, uthr;
int samples_out, samples_in, samples;
snd_pcm_sframes_t delay = 0;
+ unsigned long flags;
switch (dac33->fifo_mode) {
case DAC33_FIFO_BYPASS:
break;
case DAC33_FIFO_MODE1:
- spin_lock(&dac33->lock);
+ spin_lock_irqsave(&dac33->lock, flags);
t0 = dac33->t_stamp1;
t1 = dac33->t_stamp2;
- spin_unlock(&dac33->lock);
+ spin_unlock_irqrestore(&dac33->lock, flags);
t_now = ktime_to_us(ktime_get());
/* We have not started to fill the FIFO yet, delay is 0 */
}
break;
case DAC33_FIFO_MODE7:
- spin_lock(&dac33->lock);
+ spin_lock_irqsave(&dac33->lock, flags);
t0 = dac33->t_stamp1;
uthr = dac33->uthr;
- spin_unlock(&dac33->lock);
+ spin_unlock_irqrestore(&dac33->lock, flags);
t_now = ktime_to_us(ktime_get());
/* We have not started to fill the FIFO yet, delay is 0 */
priv->naudint = naudint;
priv->workqueue = create_singlethread_workqueue("twl6040-codec");
- if (!priv->workqueue)
+ if (!priv->workqueue) {
+ ret = -ENOMEM;
goto work_err;
+ }
INIT_DELAYED_WORK(&priv->delayed_work, twl6040_accessory_work);
slave_config.direction = DMA_TO_DEVICE;
slave_config.dst_addr = dma_params->dma_addr;
slave_config.dst_addr_width = buswidth;
- slave_config.dst_maxburst = dma_params->burstsize;
+ slave_config.dst_maxburst = dma_params->burstsize * buswidth;
} else {
slave_config.direction = DMA_FROM_DEVICE;
slave_config.src_addr = dma_params->dma_addr;
slave_config.src_addr_width = buswidth;
- slave_config.src_maxburst = dma_params->burstsize;
+ slave_config.src_maxburst = dma_params->burstsize * buswidth;
}
ret = dmaengine_slave_config(iprtd->dma_chan, &slave_config);
static int __devinit imx_soc_platform_probe(struct platform_device *pdev)
{
+ struct imx_ssi *ssi = platform_get_drvdata(pdev);
+
+ ssi->dma_params_tx.burstsize = 6;
+ ssi->dma_params_rx.burstsize = 4;
+
return snd_soc_register_platform(&pdev->dev, &imx_soc_platform_mx2);
}
*/
#define IMX_SSI_DMABUF_SIZE (64 * 1024)
-#define DMA_RXFIFO_BURST 0x4
-#define DMA_TXFIFO_BURST 0x6
-
#endif /* _IMX_SSI_H */
.cpu_dai_name = "pxa2xx-i2s",
.codec_dai_name = "wm8731-hifi",
.platform_name = "pxa-pcm-audio",
- .codec_name = "wm8731-codec-0.001b",
+ .codec_name = "wm8731-codec.0-001b",
.init = corgi_wm8731_init,
.ops = &corgi_ops,
};
goto err;
if (gpios[i].wake) {
- ret = set_irq_wake(gpio_to_irq(gpios[i].gpio), 1);
+ ret = irq_set_irq_wake(gpio_to_irq(gpios[i].gpio), 1);
if (ret != 0)
printk(KERN_ERR
"Failed to mark GPIO %d as wake source: %d\n",