2 * EDMA3 support for DaVinci
4 * Copyright (C) 2006-2009 Texas Instruments.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <linux/kernel.h>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/platform_device.h>
26 #include <linux/slab.h>
28 #include <mach/edma.h>
30 /* Offsets matching "struct edmacc_param" */
33 #define PARM_A_B_CNT 0x08
35 #define PARM_SRC_DST_BIDX 0x10
36 #define PARM_LINK_BCNTRLD 0x14
37 #define PARM_SRC_DST_CIDX 0x18
38 #define PARM_CCNT 0x1c
40 #define PARM_SIZE 0x20
42 /* Offsets for EDMA CC global channel registers and their shadows */
43 #define SH_ER 0x00 /* 64 bits */
44 #define SH_ECR 0x08 /* 64 bits */
45 #define SH_ESR 0x10 /* 64 bits */
46 #define SH_CER 0x18 /* 64 bits */
47 #define SH_EER 0x20 /* 64 bits */
48 #define SH_EECR 0x28 /* 64 bits */
49 #define SH_EESR 0x30 /* 64 bits */
50 #define SH_SER 0x38 /* 64 bits */
51 #define SH_SECR 0x40 /* 64 bits */
52 #define SH_IER 0x50 /* 64 bits */
53 #define SH_IECR 0x58 /* 64 bits */
54 #define SH_IESR 0x60 /* 64 bits */
55 #define SH_IPR 0x68 /* 64 bits */
56 #define SH_ICR 0x70 /* 64 bits */
66 /* Offsets for EDMA CC global registers */
67 #define EDMA_REV 0x0000
68 #define EDMA_CCCFG 0x0004
69 #define EDMA_QCHMAP 0x0200 /* 8 registers */
70 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
71 #define EDMA_QDMAQNUM 0x0260
72 #define EDMA_QUETCMAP 0x0280
73 #define EDMA_QUEPRI 0x0284
74 #define EDMA_EMR 0x0300 /* 64 bits */
75 #define EDMA_EMCR 0x0308 /* 64 bits */
76 #define EDMA_QEMR 0x0310
77 #define EDMA_QEMCR 0x0314
78 #define EDMA_CCERR 0x0318
79 #define EDMA_CCERRCLR 0x031c
80 #define EDMA_EEVAL 0x0320
81 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
82 #define EDMA_QRAE 0x0380 /* 4 registers */
83 #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
84 #define EDMA_QSTAT 0x0600 /* 2 registers */
85 #define EDMA_QWMTHRA 0x0620
86 #define EDMA_QWMTHRB 0x0624
87 #define EDMA_CCSTAT 0x0640
89 #define EDMA_M 0x1000 /* global channel registers */
90 #define EDMA_ECR 0x1008
91 #define EDMA_ECRH 0x100C
92 #define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */
93 #define EDMA_PARM 0x4000 /* 128 param entries */
95 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
97 #define EDMA_DCHMAP 0x0100 /* 64 registers */
98 #define CHMAP_EXIST BIT(24)
100 #define EDMA_MAX_DMACH 64
101 #define EDMA_MAX_PARAMENTRY 512
102 #define EDMA_MAX_CC 2
105 /*****************************************************************************/
107 static void __iomem *edmacc_regs_base[EDMA_MAX_CC];
109 static inline unsigned int edma_read(unsigned ctlr, int offset)
111 return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset);
114 static inline void edma_write(unsigned ctlr, int offset, int val)
116 __raw_writel(val, edmacc_regs_base[ctlr] + offset);
118 static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
121 unsigned val = edma_read(ctlr, offset);
124 edma_write(ctlr, offset, val);
126 static inline void edma_and(unsigned ctlr, int offset, unsigned and)
128 unsigned val = edma_read(ctlr, offset);
130 edma_write(ctlr, offset, val);
132 static inline void edma_or(unsigned ctlr, int offset, unsigned or)
134 unsigned val = edma_read(ctlr, offset);
136 edma_write(ctlr, offset, val);
138 static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
140 return edma_read(ctlr, offset + (i << 2));
142 static inline void edma_write_array(unsigned ctlr, int offset, int i,
145 edma_write(ctlr, offset + (i << 2), val);
147 static inline void edma_modify_array(unsigned ctlr, int offset, int i,
148 unsigned and, unsigned or)
150 edma_modify(ctlr, offset + (i << 2), and, or);
152 static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
154 edma_or(ctlr, offset + (i << 2), or);
156 static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
159 edma_or(ctlr, offset + ((i*2 + j) << 2), or);
161 static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
164 edma_write(ctlr, offset + ((i*2 + j) << 2), val);
166 static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
168 return edma_read(ctlr, EDMA_SHADOW0 + offset);
170 static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
173 return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
175 static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
177 edma_write(ctlr, EDMA_SHADOW0 + offset, val);
179 static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
182 edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
184 static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
187 return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
189 static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
192 edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
194 static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
195 unsigned and, unsigned or)
197 edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
199 static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
202 edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
204 static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
207 edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
210 /*****************************************************************************/
212 /* actual number of DMA channels and slots on this silicon */
214 /* how many dma resources of each type */
215 unsigned num_channels;
220 enum dma_event_q default_queue;
222 /* list of channels with no even trigger; terminated by "-1" */
225 /* The edma_inuse bit for each PaRAM slot is clear unless the
226 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
228 DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
230 /* The edma_unused bit for each channel is clear unless
231 * it is not being used on this platform. It uses a bit
232 * of SOC-specific initialization code.
234 DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
236 unsigned irq_res_start;
237 unsigned irq_res_end;
239 struct dma_interrupt_data {
240 void (*callback)(unsigned channel, unsigned short ch_status,
243 } intr_data[EDMA_MAX_DMACH];
246 static struct edma *edma_cc[EDMA_MAX_CC];
247 static int arch_num_cc;
249 /* dummy param set used to (re)initialize parameter RAM slots */
250 static const struct edmacc_param dummy_paramset = {
251 .link_bcntrld = 0xffff,
255 /*****************************************************************************/
257 static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
258 enum dma_event_q queue_no)
260 int bit = (ch_no & 0x7) * 4;
262 /* default to low priority queue */
263 if (queue_no == EVENTQ_DEFAULT)
264 queue_no = edma_cc[ctlr]->default_queue;
267 edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
268 ~(0x7 << bit), queue_no << bit);
271 static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no)
273 int bit = queue_no * 4;
274 edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
277 static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
280 int bit = queue_no * 4;
281 edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
282 ((priority & 0x7) << bit));
286 * map_dmach_param - Maps channel number to param entry number
288 * This maps the dma channel number to param entry numberter. In
289 * other words using the DMA channel mapping registers a param entry
290 * can be mapped to any channel
292 * Callers are responsible for ensuring the channel mapping logic is
293 * included in that particular EDMA variant (Eg : dm646x)
296 static void __init map_dmach_param(unsigned ctlr)
299 for (i = 0; i < EDMA_MAX_DMACH; i++)
300 edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
304 setup_dma_interrupt(unsigned lch,
305 void (*callback)(unsigned channel, u16 ch_status, void *data),
310 ctlr = EDMA_CTLR(lch);
311 lch = EDMA_CHAN_SLOT(lch);
314 edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
317 edma_cc[ctlr]->intr_data[lch].callback = callback;
318 edma_cc[ctlr]->intr_data[lch].data = data;
321 edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
323 edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
328 static int irq2ctlr(int irq)
330 if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end)
332 else if (irq >= edma_cc[1]->irq_res_start &&
333 irq <= edma_cc[1]->irq_res_end)
339 /******************************************************************************
341 * DMA interrupt handler
343 *****************************************************************************/
344 static irqreturn_t dma_irq_handler(int irq, void *data)
348 unsigned int cnt = 0;
350 ctlr = irq2ctlr(irq);
352 dev_dbg(data, "dma_irq_handler\n");
354 if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0) &&
355 (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0))
360 if (edma_shadow0_read_array(ctlr, SH_IPR, 0) &
361 edma_shadow0_read_array(ctlr, SH_IER, 0))
363 else if (edma_shadow0_read_array(ctlr, SH_IPR, 1) &
364 edma_shadow0_read_array(ctlr, SH_IER, 1))
368 dev_dbg(data, "IPR%d %08x\n", j,
369 edma_shadow0_read_array(ctlr, SH_IPR, j));
370 for (i = 0; i < 32; i++) {
371 int k = (j << 5) + i;
372 if ((edma_shadow0_read_array(ctlr, SH_IPR, j) & BIT(i))
373 && (edma_shadow0_read_array(ctlr,
374 SH_IER, j) & BIT(i))) {
375 /* Clear the corresponding IPR bits */
376 edma_shadow0_write_array(ctlr, SH_ICR, j,
378 if (edma_cc[ctlr]->intr_data[k].callback)
379 edma_cc[ctlr]->intr_data[k].callback(
381 edma_cc[ctlr]->intr_data[k].
389 edma_shadow0_write(ctlr, SH_IEVAL, 1);
393 /******************************************************************************
395 * DMA error interrupt handler
397 *****************************************************************************/
398 static irqreturn_t dma_ccerr_handler(int irq, void *data)
402 unsigned int cnt = 0;
404 ctlr = irq2ctlr(irq);
406 dev_dbg(data, "dma_ccerr_handler\n");
408 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
409 (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
410 (edma_read(ctlr, EDMA_QEMR) == 0) &&
411 (edma_read(ctlr, EDMA_CCERR) == 0))
416 if (edma_read_array(ctlr, EDMA_EMR, 0))
418 else if (edma_read_array(ctlr, EDMA_EMR, 1))
421 dev_dbg(data, "EMR%d %08x\n", j,
422 edma_read_array(ctlr, EDMA_EMR, j));
423 for (i = 0; i < 32; i++) {
424 int k = (j << 5) + i;
425 if (edma_read_array(ctlr, EDMA_EMR, j) &
427 /* Clear the corresponding EMR bits */
428 edma_write_array(ctlr, EDMA_EMCR, j,
431 edma_shadow0_write_array(ctlr, SH_SECR,
433 if (edma_cc[ctlr]->intr_data[k].
435 edma_cc[ctlr]->intr_data[k].
438 edma_cc[ctlr]->intr_data
443 } else if (edma_read(ctlr, EDMA_QEMR)) {
444 dev_dbg(data, "QEMR %02x\n",
445 edma_read(ctlr, EDMA_QEMR));
446 for (i = 0; i < 8; i++) {
447 if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) {
448 /* Clear the corresponding IPR bits */
449 edma_write(ctlr, EDMA_QEMCR, BIT(i));
450 edma_shadow0_write(ctlr, SH_QSECR,
453 /* NOTE: not reported!! */
456 } else if (edma_read(ctlr, EDMA_CCERR)) {
457 dev_dbg(data, "CCERR %08x\n",
458 edma_read(ctlr, EDMA_CCERR));
459 /* FIXME: CCERR.BIT(16) ignored! much better
460 * to just write CCERRCLR with CCERR value...
462 for (i = 0; i < 8; i++) {
463 if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) {
464 /* Clear the corresponding IPR bits */
465 edma_write(ctlr, EDMA_CCERRCLR, BIT(i));
467 /* NOTE: not reported!! */
471 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
472 (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
473 (edma_read(ctlr, EDMA_QEMR) == 0) &&
474 (edma_read(ctlr, EDMA_CCERR) == 0))
480 edma_write(ctlr, EDMA_EEVAL, 1);
484 /******************************************************************************
486 * Transfer controller error interrupt handlers
488 *****************************************************************************/
490 #define tc_errs_handled false /* disabled as long as they're NOPs */
492 static irqreturn_t dma_tc0err_handler(int irq, void *data)
494 dev_dbg(data, "dma_tc0err_handler\n");
498 static irqreturn_t dma_tc1err_handler(int irq, void *data)
500 dev_dbg(data, "dma_tc1err_handler\n");
504 static int reserve_contiguous_slots(int ctlr, unsigned int id,
505 unsigned int num_slots,
506 unsigned int start_slot)
509 unsigned int count = num_slots;
510 int stop_slot = start_slot;
511 DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
513 for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) {
514 j = EDMA_CHAN_SLOT(i);
515 if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) {
516 /* Record our current beginning slot */
517 if (count == num_slots)
521 set_bit(j, tmp_inuse);
526 clear_bit(j, tmp_inuse);
528 if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
538 * We have to clear any bits that we set
539 * if we run out parameter RAM slots, i.e we do find a set
540 * of contiguous parameter RAM slots but do not find the exact number
541 * requested as we may reach the total number of parameter RAM slots
543 if (i == edma_cc[ctlr]->num_slots)
546 for (j = start_slot; j < stop_slot; j++)
547 if (test_bit(j, tmp_inuse))
548 clear_bit(j, edma_cc[ctlr]->edma_inuse);
553 for (j = i - num_slots + 1; j <= i; ++j)
554 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j),
555 &dummy_paramset, PARM_SIZE);
557 return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
560 static int prepare_unused_channel_list(struct device *dev, void *data)
562 struct platform_device *pdev = to_platform_device(dev);
565 for (i = 0; i < pdev->num_resources; i++) {
566 if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
567 (int)pdev->resource[i].start >= 0) {
568 ctlr = EDMA_CTLR(pdev->resource[i].start);
569 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
570 edma_cc[ctlr]->edma_unused);
577 /*-----------------------------------------------------------------------*/
579 static bool unused_chan_list_done;
581 /* Resource alloc/free: dma channels, parameter RAM slots */
584 * edma_alloc_channel - allocate DMA channel and paired parameter RAM
585 * @channel: specific channel to allocate; negative for "any unmapped channel"
586 * @callback: optional; to be issued on DMA completion or errors
587 * @data: passed to callback
588 * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
589 * Controller (TC) executes requests using this channel. Use
590 * EVENTQ_DEFAULT unless you really need a high priority queue.
592 * This allocates a DMA channel and its associated parameter RAM slot.
593 * The parameter RAM is initialized to hold a dummy transfer.
595 * Normal use is to pass a specific channel number as @channel, to make
596 * use of hardware events mapped to that channel. When the channel will
597 * be used only for software triggering or event chaining, channels not
598 * mapped to hardware events (or mapped to unused events) are preferable.
600 * DMA transfers start from a channel using edma_start(), or by
601 * chaining. When the transfer described in that channel's parameter RAM
602 * slot completes, that slot's data may be reloaded through a link.
604 * DMA errors are only reported to the @callback associated with the
605 * channel driving that transfer, but transfer completion callbacks can
606 * be sent to another channel under control of the TCC field in
607 * the option word of the transfer's parameter RAM set. Drivers must not
608 * use DMA transfer completion callbacks for channels they did not allocate.
609 * (The same applies to TCC codes used in transfer chaining.)
611 * Returns the number of the channel, else negative errno.
613 int edma_alloc_channel(int channel,
614 void (*callback)(unsigned channel, u16 ch_status, void *data),
616 enum dma_event_q eventq_no)
618 unsigned i, done = 0, ctlr = 0;
621 if (!unused_chan_list_done) {
623 * Scan all the platform devices to find out the EDMA channels
624 * used and clear them in the unused list, making the rest
625 * available for ARM usage.
627 ret = bus_for_each_dev(&platform_bus_type, NULL, NULL,
628 prepare_unused_channel_list);
632 unused_chan_list_done = true;
636 ctlr = EDMA_CTLR(channel);
637 channel = EDMA_CHAN_SLOT(channel);
641 for (i = 0; i < arch_num_cc; i++) {
644 channel = find_next_bit(edma_cc[i]->edma_unused,
645 edma_cc[i]->num_channels,
647 if (channel == edma_cc[i]->num_channels)
649 if (!test_and_set_bit(channel,
650 edma_cc[i]->edma_inuse)) {
662 } else if (channel >= edma_cc[ctlr]->num_channels) {
664 } else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) {
668 /* ensure access through shadow region 0 */
669 edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
671 /* ensure no events are pending */
672 edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
673 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
674 &dummy_paramset, PARM_SIZE);
677 setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
680 map_dmach_queue(ctlr, channel, eventq_no);
682 return EDMA_CTLR_CHAN(ctlr, channel);
684 EXPORT_SYMBOL(edma_alloc_channel);
688 * edma_free_channel - deallocate DMA channel
689 * @channel: dma channel returned from edma_alloc_channel()
691 * This deallocates the DMA channel and associated parameter RAM slot
692 * allocated by edma_alloc_channel().
694 * Callers are responsible for ensuring the channel is inactive, and
695 * will not be reactivated by linking, chaining, or software calls to
698 void edma_free_channel(unsigned channel)
702 ctlr = EDMA_CTLR(channel);
703 channel = EDMA_CHAN_SLOT(channel);
705 if (channel >= edma_cc[ctlr]->num_channels)
708 setup_dma_interrupt(channel, NULL, NULL);
709 /* REVISIT should probably take out of shadow region 0 */
711 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
712 &dummy_paramset, PARM_SIZE);
713 clear_bit(channel, edma_cc[ctlr]->edma_inuse);
715 EXPORT_SYMBOL(edma_free_channel);
718 * edma_alloc_slot - allocate DMA parameter RAM
719 * @slot: specific slot to allocate; negative for "any unused slot"
721 * This allocates a parameter RAM slot, initializing it to hold a
722 * dummy transfer. Slots allocated using this routine have not been
723 * mapped to a hardware DMA channel, and will normally be used by
724 * linking to them from a slot associated with a DMA channel.
726 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
727 * slots may be allocated on behalf of DSP firmware.
729 * Returns the number of the slot, else negative errno.
731 int edma_alloc_slot(unsigned ctlr, int slot)
734 slot = EDMA_CHAN_SLOT(slot);
737 slot = edma_cc[ctlr]->num_channels;
739 slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse,
740 edma_cc[ctlr]->num_slots, slot);
741 if (slot == edma_cc[ctlr]->num_slots)
743 if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse))
746 } else if (slot < edma_cc[ctlr]->num_channels ||
747 slot >= edma_cc[ctlr]->num_slots) {
749 } else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) {
753 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
754 &dummy_paramset, PARM_SIZE);
756 return EDMA_CTLR_CHAN(ctlr, slot);
758 EXPORT_SYMBOL(edma_alloc_slot);
761 * edma_free_slot - deallocate DMA parameter RAM
762 * @slot: parameter RAM slot returned from edma_alloc_slot()
764 * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
765 * Callers are responsible for ensuring the slot is inactive, and will
768 void edma_free_slot(unsigned slot)
772 ctlr = EDMA_CTLR(slot);
773 slot = EDMA_CHAN_SLOT(slot);
775 if (slot < edma_cc[ctlr]->num_channels ||
776 slot >= edma_cc[ctlr]->num_slots)
779 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
780 &dummy_paramset, PARM_SIZE);
781 clear_bit(slot, edma_cc[ctlr]->edma_inuse);
783 EXPORT_SYMBOL(edma_free_slot);
787 * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
788 * The API will return the starting point of a set of
789 * contiguous parameter RAM slots that have been requested
791 * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
792 * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
793 * @count: number of contiguous Paramter RAM slots
794 * @slot - the start value of Parameter RAM slot that should be passed if id
795 * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
797 * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
798 * contiguous Parameter RAM slots from parameter RAM 64 in the case of
799 * DaVinci SOCs and 32 in the case of DA8xx SOCs.
801 * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
802 * set of contiguous parameter RAM slots from the "slot" that is passed as an
803 * argument to the API.
805 * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
806 * starts looking for a set of contiguous parameter RAMs from the "slot"
807 * that is passed as an argument to the API. On failure the API will try to
808 * find a set of contiguous Parameter RAM slots from the remaining Parameter
811 int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
814 * The start slot requested should be greater than
815 * the number of channels and lesser than the total number
818 if ((id != EDMA_CONT_PARAMS_ANY) &&
819 (slot < edma_cc[ctlr]->num_channels ||
820 slot >= edma_cc[ctlr]->num_slots))
824 * The number of parameter RAM slots requested cannot be less than 1
825 * and cannot be more than the number of slots minus the number of
828 if (count < 1 || count >
829 (edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels))
833 case EDMA_CONT_PARAMS_ANY:
834 return reserve_contiguous_slots(ctlr, id, count,
835 edma_cc[ctlr]->num_channels);
836 case EDMA_CONT_PARAMS_FIXED_EXACT:
837 case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
838 return reserve_contiguous_slots(ctlr, id, count, slot);
844 EXPORT_SYMBOL(edma_alloc_cont_slots);
847 * edma_free_cont_slots - deallocate DMA parameter RAM slots
848 * @slot: first parameter RAM of a set of parameter RAM slots to be freed
849 * @count: the number of contiguous parameter RAM slots to be freed
851 * This deallocates the parameter RAM slots allocated by
852 * edma_alloc_cont_slots.
853 * Callers/applications need to keep track of sets of contiguous
854 * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
856 * Callers are responsible for ensuring the slots are inactive, and will
859 int edma_free_cont_slots(unsigned slot, int count)
861 unsigned ctlr, slot_to_free;
864 ctlr = EDMA_CTLR(slot);
865 slot = EDMA_CHAN_SLOT(slot);
867 if (slot < edma_cc[ctlr]->num_channels ||
868 slot >= edma_cc[ctlr]->num_slots ||
872 for (i = slot; i < slot + count; ++i) {
874 slot_to_free = EDMA_CHAN_SLOT(i);
876 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
877 &dummy_paramset, PARM_SIZE);
878 clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse);
883 EXPORT_SYMBOL(edma_free_cont_slots);
885 /*-----------------------------------------------------------------------*/
887 /* Parameter RAM operations (i) -- read/write partial slots */
890 * edma_set_src - set initial DMA source address in parameter RAM slot
891 * @slot: parameter RAM slot being configured
892 * @src_port: physical address of source (memory, controller FIFO, etc)
893 * @addressMode: INCR, except in very rare cases
894 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
895 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
897 * Note that the source address is modified during the DMA transfer
898 * according to edma_set_src_index().
900 void edma_set_src(unsigned slot, dma_addr_t src_port,
901 enum address_mode mode, enum fifo_width width)
905 ctlr = EDMA_CTLR(slot);
906 slot = EDMA_CHAN_SLOT(slot);
908 if (slot < edma_cc[ctlr]->num_slots) {
909 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
912 /* set SAM and program FWID */
913 i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
918 edma_parm_write(ctlr, PARM_OPT, slot, i);
920 /* set the source port address
921 in source register of param structure */
922 edma_parm_write(ctlr, PARM_SRC, slot, src_port);
925 EXPORT_SYMBOL(edma_set_src);
928 * edma_set_dest - set initial DMA destination address in parameter RAM slot
929 * @slot: parameter RAM slot being configured
930 * @dest_port: physical address of destination (memory, controller FIFO, etc)
931 * @addressMode: INCR, except in very rare cases
932 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
933 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
935 * Note that the destination address is modified during the DMA transfer
936 * according to edma_set_dest_index().
938 void edma_set_dest(unsigned slot, dma_addr_t dest_port,
939 enum address_mode mode, enum fifo_width width)
943 ctlr = EDMA_CTLR(slot);
944 slot = EDMA_CHAN_SLOT(slot);
946 if (slot < edma_cc[ctlr]->num_slots) {
947 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
950 /* set DAM and program FWID */
951 i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
956 edma_parm_write(ctlr, PARM_OPT, slot, i);
957 /* set the destination port address
958 in dest register of param structure */
959 edma_parm_write(ctlr, PARM_DST, slot, dest_port);
962 EXPORT_SYMBOL(edma_set_dest);
965 * edma_get_position - returns the current transfer points
966 * @slot: parameter RAM slot being examined
967 * @src: pointer to source port position
968 * @dst: pointer to destination port position
970 * Returns current source and destination addresses for a particular
971 * parameter RAM slot. Its channel should not be active when this is called.
973 void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst)
975 struct edmacc_param temp;
978 ctlr = EDMA_CTLR(slot);
979 slot = EDMA_CHAN_SLOT(slot);
981 edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp);
987 EXPORT_SYMBOL(edma_get_position);
990 * edma_set_src_index - configure DMA source address indexing
991 * @slot: parameter RAM slot being configured
992 * @src_bidx: byte offset between source arrays in a frame
993 * @src_cidx: byte offset between source frames in a block
995 * Offsets are specified to support either contiguous or discontiguous
996 * memory transfers, or repeated access to a hardware register, as needed.
997 * When accessing hardware registers, both offsets are normally zero.
999 void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
1003 ctlr = EDMA_CTLR(slot);
1004 slot = EDMA_CHAN_SLOT(slot);
1006 if (slot < edma_cc[ctlr]->num_slots) {
1007 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1008 0xffff0000, src_bidx);
1009 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1010 0xffff0000, src_cidx);
1013 EXPORT_SYMBOL(edma_set_src_index);
1016 * edma_set_dest_index - configure DMA destination address indexing
1017 * @slot: parameter RAM slot being configured
1018 * @dest_bidx: byte offset between destination arrays in a frame
1019 * @dest_cidx: byte offset between destination frames in a block
1021 * Offsets are specified to support either contiguous or discontiguous
1022 * memory transfers, or repeated access to a hardware register, as needed.
1023 * When accessing hardware registers, both offsets are normally zero.
1025 void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
1029 ctlr = EDMA_CTLR(slot);
1030 slot = EDMA_CHAN_SLOT(slot);
1032 if (slot < edma_cc[ctlr]->num_slots) {
1033 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1034 0x0000ffff, dest_bidx << 16);
1035 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1036 0x0000ffff, dest_cidx << 16);
1039 EXPORT_SYMBOL(edma_set_dest_index);
1042 * edma_set_transfer_params - configure DMA transfer parameters
1043 * @slot: parameter RAM slot being configured
1044 * @acnt: how many bytes per array (at least one)
1045 * @bcnt: how many arrays per frame (at least one)
1046 * @ccnt: how many frames per block (at least one)
1047 * @bcnt_rld: used only for A-Synchronized transfers; this specifies
1048 * the value to reload into bcnt when it decrements to zero
1049 * @sync_mode: ASYNC or ABSYNC
1051 * See the EDMA3 documentation to understand how to configure and link
1052 * transfers using the fields in PaRAM slots. If you are not doing it
1053 * all at once with edma_write_slot(), you will use this routine
1054 * plus two calls each for source and destination, setting the initial
1055 * address and saying how to index that address.
1057 * An example of an A-Synchronized transfer is a serial link using a
1058 * single word shift register. In that case, @acnt would be equal to
1059 * that word size; the serial controller issues a DMA synchronization
1060 * event to transfer each word, and memory access by the DMA transfer
1061 * controller will be word-at-a-time.
1063 * An example of an AB-Synchronized transfer is a device using a FIFO.
1064 * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
1065 * The controller with the FIFO issues DMA synchronization events when
1066 * the FIFO threshold is reached, and the DMA transfer controller will
1067 * transfer one frame to (or from) the FIFO. It will probably use
1068 * efficient burst modes to access memory.
1070 void edma_set_transfer_params(unsigned slot,
1071 u16 acnt, u16 bcnt, u16 ccnt,
1072 u16 bcnt_rld, enum sync_dimension sync_mode)
1076 ctlr = EDMA_CTLR(slot);
1077 slot = EDMA_CHAN_SLOT(slot);
1079 if (slot < edma_cc[ctlr]->num_slots) {
1080 edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
1081 0x0000ffff, bcnt_rld << 16);
1082 if (sync_mode == ASYNC)
1083 edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
1085 edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
1086 /* Set the acount, bcount, ccount registers */
1087 edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
1088 edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
1091 EXPORT_SYMBOL(edma_set_transfer_params);
1094 * edma_link - link one parameter RAM slot to another
1095 * @from: parameter RAM slot originating the link
1096 * @to: parameter RAM slot which is the link target
1098 * The originating slot should not be part of any active DMA transfer.
1100 void edma_link(unsigned from, unsigned to)
1102 unsigned ctlr_from, ctlr_to;
1104 ctlr_from = EDMA_CTLR(from);
1105 from = EDMA_CHAN_SLOT(from);
1106 ctlr_to = EDMA_CTLR(to);
1107 to = EDMA_CHAN_SLOT(to);
1109 if (from >= edma_cc[ctlr_from]->num_slots)
1111 if (to >= edma_cc[ctlr_to]->num_slots)
1113 edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
1116 EXPORT_SYMBOL(edma_link);
1119 * edma_unlink - cut link from one parameter RAM slot
1120 * @from: parameter RAM slot originating the link
1122 * The originating slot should not be part of any active DMA transfer.
1123 * Its link is set to 0xffff.
1125 void edma_unlink(unsigned from)
1129 ctlr = EDMA_CTLR(from);
1130 from = EDMA_CHAN_SLOT(from);
1132 if (from >= edma_cc[ctlr]->num_slots)
1134 edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
1136 EXPORT_SYMBOL(edma_unlink);
1138 /*-----------------------------------------------------------------------*/
1140 /* Parameter RAM operations (ii) -- read/write whole parameter sets */
1143 * edma_write_slot - write parameter RAM data for slot
1144 * @slot: number of parameter RAM slot being modified
1145 * @param: data to be written into parameter RAM slot
1147 * Use this to assign all parameters of a transfer at once. This
1148 * allows more efficient setup of transfers than issuing multiple
1149 * calls to set up those parameters in small pieces, and provides
1150 * complete control over all transfer options.
1152 void edma_write_slot(unsigned slot, const struct edmacc_param *param)
1156 ctlr = EDMA_CTLR(slot);
1157 slot = EDMA_CHAN_SLOT(slot);
1159 if (slot >= edma_cc[ctlr]->num_slots)
1161 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
1164 EXPORT_SYMBOL(edma_write_slot);
1167 * edma_read_slot - read parameter RAM data from slot
1168 * @slot: number of parameter RAM slot being copied
1169 * @param: where to store copy of parameter RAM data
1171 * Use this to read data from a parameter RAM slot, perhaps to
1172 * save them as a template for later reuse.
1174 void edma_read_slot(unsigned slot, struct edmacc_param *param)
1178 ctlr = EDMA_CTLR(slot);
1179 slot = EDMA_CHAN_SLOT(slot);
1181 if (slot >= edma_cc[ctlr]->num_slots)
1183 memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
1186 EXPORT_SYMBOL(edma_read_slot);
1188 /*-----------------------------------------------------------------------*/
1190 /* Various EDMA channel control operations */
1193 * edma_pause - pause dma on a channel
1194 * @channel: on which edma_start() has been called
1196 * This temporarily disables EDMA hardware events on the specified channel,
1197 * preventing them from triggering new transfers on its behalf
1199 void edma_pause(unsigned channel)
1203 ctlr = EDMA_CTLR(channel);
1204 channel = EDMA_CHAN_SLOT(channel);
1206 if (channel < edma_cc[ctlr]->num_channels) {
1207 unsigned int mask = BIT(channel & 0x1f);
1209 edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
1212 EXPORT_SYMBOL(edma_pause);
1215 * edma_resume - resumes dma on a paused channel
1216 * @channel: on which edma_pause() has been called
1218 * This re-enables EDMA hardware events on the specified channel.
1220 void edma_resume(unsigned channel)
1224 ctlr = EDMA_CTLR(channel);
1225 channel = EDMA_CHAN_SLOT(channel);
1227 if (channel < edma_cc[ctlr]->num_channels) {
1228 unsigned int mask = BIT(channel & 0x1f);
1230 edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
1233 EXPORT_SYMBOL(edma_resume);
1236 * edma_start - start dma on a channel
1237 * @channel: channel being activated
1239 * Channels with event associations will be triggered by their hardware
1240 * events, and channels without such associations will be triggered by
1241 * software. (At this writing there is no interface for using software
1242 * triggers except with channels that don't support hardware triggers.)
1244 * Returns zero on success, else negative errno.
1246 int edma_start(unsigned channel)
1250 ctlr = EDMA_CTLR(channel);
1251 channel = EDMA_CHAN_SLOT(channel);
1253 if (channel < edma_cc[ctlr]->num_channels) {
1254 int j = channel >> 5;
1255 unsigned int mask = BIT(channel & 0x1f);
1257 /* EDMA channels without event association */
1258 if (test_bit(channel, edma_cc[ctlr]->edma_unused)) {
1259 pr_debug("EDMA: ESR%d %08x\n", j,
1260 edma_shadow0_read_array(ctlr, SH_ESR, j));
1261 edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
1265 /* EDMA channel with event association */
1266 pr_debug("EDMA: ER%d %08x\n", j,
1267 edma_shadow0_read_array(ctlr, SH_ER, j));
1268 /* Clear any pending event or error */
1269 edma_write_array(ctlr, EDMA_ECR, j, mask);
1270 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1272 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1273 edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
1274 pr_debug("EDMA: EER%d %08x\n", j,
1275 edma_shadow0_read_array(ctlr, SH_EER, j));
1281 EXPORT_SYMBOL(edma_start);
1284 * edma_stop - stops dma on the channel passed
1285 * @channel: channel being deactivated
1287 * When @lch is a channel, any active transfer is paused and
1288 * all pending hardware events are cleared. The current transfer
1289 * may not be resumed, and the channel's Parameter RAM should be
1290 * reinitialized before being reused.
1292 void edma_stop(unsigned channel)
1296 ctlr = EDMA_CTLR(channel);
1297 channel = EDMA_CHAN_SLOT(channel);
1299 if (channel < edma_cc[ctlr]->num_channels) {
1300 int j = channel >> 5;
1301 unsigned int mask = BIT(channel & 0x1f);
1303 edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
1304 edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1305 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1306 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1308 pr_debug("EDMA: EER%d %08x\n", j,
1309 edma_shadow0_read_array(ctlr, SH_EER, j));
1311 /* REVISIT: consider guarding against inappropriate event
1312 * chaining by overwriting with dummy_paramset.
1316 EXPORT_SYMBOL(edma_stop);
1318 /******************************************************************************
1320 * It cleans ParamEntry qand bring back EDMA to initial state if media has
1321 * been removed before EDMA has finished.It is usedful for removable media.
1323 * ch_no - channel no
1325 * Return: zero on success, or corresponding error no on failure
1327 * FIXME this should not be needed ... edma_stop() should suffice.
1329 *****************************************************************************/
1331 void edma_clean_channel(unsigned channel)
1335 ctlr = EDMA_CTLR(channel);
1336 channel = EDMA_CHAN_SLOT(channel);
1338 if (channel < edma_cc[ctlr]->num_channels) {
1339 int j = (channel >> 5);
1340 unsigned int mask = BIT(channel & 0x1f);
1342 pr_debug("EDMA: EMR%d %08x\n", j,
1343 edma_read_array(ctlr, EDMA_EMR, j));
1344 edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1345 /* Clear the corresponding EMR bits */
1346 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1348 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1349 edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
1352 EXPORT_SYMBOL(edma_clean_channel);
1355 * edma_clear_event - clear an outstanding event on the DMA channel
1357 * channel - channel number
1359 void edma_clear_event(unsigned channel)
1363 ctlr = EDMA_CTLR(channel);
1364 channel = EDMA_CHAN_SLOT(channel);
1366 if (channel >= edma_cc[ctlr]->num_channels)
1369 edma_write(ctlr, EDMA_ECR, BIT(channel));
1371 edma_write(ctlr, EDMA_ECRH, BIT(channel - 32));
1373 EXPORT_SYMBOL(edma_clear_event);
1375 /*-----------------------------------------------------------------------*/
1377 static int __init edma_probe(struct platform_device *pdev)
1379 struct edma_soc_info *info = pdev->dev.platform_data;
1380 const s8 (*queue_priority_mapping)[2];
1381 const s8 (*queue_tc_mapping)[2];
1382 int i, j, found = 0;
1384 int irq[EDMA_MAX_CC] = {0, 0};
1385 int err_irq[EDMA_MAX_CC] = {0, 0};
1386 struct resource *r[EDMA_MAX_CC] = {NULL};
1387 resource_size_t len[EDMA_MAX_CC];
1394 for (j = 0; j < EDMA_MAX_CC; j++) {
1395 sprintf(res_name, "edma_cc%d", j);
1396 r[j] = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1407 len[j] = resource_size(r[j]);
1409 r[j] = request_mem_region(r[j]->start, len[j],
1410 dev_name(&pdev->dev));
1416 edmacc_regs_base[j] = ioremap(r[j]->start, len[j]);
1417 if (!edmacc_regs_base[j]) {
1422 edma_cc[j] = kmalloc(sizeof(struct edma), GFP_KERNEL);
1427 memset(edma_cc[j], 0, sizeof(struct edma));
1429 edma_cc[j]->num_channels = min_t(unsigned, info[j].n_channel,
1431 edma_cc[j]->num_slots = min_t(unsigned, info[j].n_slot,
1432 EDMA_MAX_PARAMENTRY);
1433 edma_cc[j]->num_cc = min_t(unsigned, info[j].n_cc, EDMA_MAX_CC);
1435 edma_cc[j]->default_queue = info[j].default_queue;
1436 if (!edma_cc[j]->default_queue)
1437 edma_cc[j]->default_queue = EVENTQ_1;
1439 dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
1440 edmacc_regs_base[j]);
1442 for (i = 0; i < edma_cc[j]->num_slots; i++)
1443 memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
1444 &dummy_paramset, PARM_SIZE);
1446 /* Mark all channels as unused */
1447 memset(edma_cc[j]->edma_unused, 0xff,
1448 sizeof(edma_cc[j]->edma_unused));
1450 sprintf(irq_name, "edma%d", j);
1451 irq[j] = platform_get_irq_byname(pdev, irq_name);
1452 edma_cc[j]->irq_res_start = irq[j];
1453 status = request_irq(irq[j], dma_irq_handler, 0, "edma",
1456 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1461 sprintf(irq_name, "edma%d_err", j);
1462 err_irq[j] = platform_get_irq_byname(pdev, irq_name);
1463 edma_cc[j]->irq_res_end = err_irq[j];
1464 status = request_irq(err_irq[j], dma_ccerr_handler, 0,
1465 "edma_error", &pdev->dev);
1467 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1468 err_irq[j], status);
1472 /* Everything lives on transfer controller 1 until otherwise
1473 * specified. This way, long transfers on the low priority queue
1474 * started by the codec engine will not cause audio defects.
1476 for (i = 0; i < edma_cc[j]->num_channels; i++)
1477 map_dmach_queue(j, i, EVENTQ_1);
1479 queue_tc_mapping = info[j].queue_tc_mapping;
1480 queue_priority_mapping = info[j].queue_priority_mapping;
1482 /* Event queue to TC mapping */
1483 for (i = 0; queue_tc_mapping[i][0] != -1; i++)
1484 map_queue_tc(j, queue_tc_mapping[i][0],
1485 queue_tc_mapping[i][1]);
1487 /* Event queue priority mapping */
1488 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1489 assign_priority_to_queue(j,
1490 queue_priority_mapping[i][0],
1491 queue_priority_mapping[i][1]);
1493 /* Map the channel to param entry if channel mapping logic
1496 if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
1499 for (i = 0; i < info[j].n_region; i++) {
1500 edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
1501 edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
1502 edma_write_array(j, EDMA_QRAE, i, 0x0);
1507 if (tc_errs_handled) {
1508 status = request_irq(IRQ_TCERRINT0, dma_tc0err_handler, 0,
1509 "edma_tc0", &pdev->dev);
1511 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1512 IRQ_TCERRINT0, status);
1515 status = request_irq(IRQ_TCERRINT, dma_tc1err_handler, 0,
1516 "edma_tc1", &pdev->dev);
1518 dev_dbg(&pdev->dev, "request_irq %d --> %d\n",
1519 IRQ_TCERRINT, status);
1527 for (i = 0; i < EDMA_MAX_CC; i++) {
1529 free_irq(err_irq[i], &pdev->dev);
1531 free_irq(irq[i], &pdev->dev);
1534 for (i = 0; i < EDMA_MAX_CC; i++) {
1536 release_mem_region(r[i]->start, len[i]);
1537 if (edmacc_regs_base[i])
1538 iounmap(edmacc_regs_base[i]);
1545 static struct platform_driver edma_driver = {
1546 .driver.name = "edma",
1549 static int __init edma_init(void)
1551 return platform_driver_probe(&edma_driver, edma_probe);
1553 arch_initcall(edma_init);