2 * TI EDMA DMA engine driver
4 * Copyright 2012 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/edma.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
28 #include <linux/of_dma.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_address.h>
31 #include <linux/of_device.h>
32 #include <linux/pm_runtime.h>
34 #include <linux/platform_data/edma.h>
36 #include "dmaengine.h"
39 /* Offsets matching "struct edmacc_param" */
42 #define PARM_A_B_CNT 0x08
44 #define PARM_SRC_DST_BIDX 0x10
45 #define PARM_LINK_BCNTRLD 0x14
46 #define PARM_SRC_DST_CIDX 0x18
47 #define PARM_CCNT 0x1c
49 #define PARM_SIZE 0x20
51 /* Offsets for EDMA CC global channel registers and their shadows */
52 #define SH_ER 0x00 /* 64 bits */
53 #define SH_ECR 0x08 /* 64 bits */
54 #define SH_ESR 0x10 /* 64 bits */
55 #define SH_CER 0x18 /* 64 bits */
56 #define SH_EER 0x20 /* 64 bits */
57 #define SH_EECR 0x28 /* 64 bits */
58 #define SH_EESR 0x30 /* 64 bits */
59 #define SH_SER 0x38 /* 64 bits */
60 #define SH_SECR 0x40 /* 64 bits */
61 #define SH_IER 0x50 /* 64 bits */
62 #define SH_IECR 0x58 /* 64 bits */
63 #define SH_IESR 0x60 /* 64 bits */
64 #define SH_IPR 0x68 /* 64 bits */
65 #define SH_ICR 0x70 /* 64 bits */
75 /* Offsets for EDMA CC global registers */
76 #define EDMA_REV 0x0000
77 #define EDMA_CCCFG 0x0004
78 #define EDMA_QCHMAP 0x0200 /* 8 registers */
79 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
80 #define EDMA_QDMAQNUM 0x0260
81 #define EDMA_QUETCMAP 0x0280
82 #define EDMA_QUEPRI 0x0284
83 #define EDMA_EMR 0x0300 /* 64 bits */
84 #define EDMA_EMCR 0x0308 /* 64 bits */
85 #define EDMA_QEMR 0x0310
86 #define EDMA_QEMCR 0x0314
87 #define EDMA_CCERR 0x0318
88 #define EDMA_CCERRCLR 0x031c
89 #define EDMA_EEVAL 0x0320
90 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
91 #define EDMA_QRAE 0x0380 /* 4 registers */
92 #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
93 #define EDMA_QSTAT 0x0600 /* 2 registers */
94 #define EDMA_QWMTHRA 0x0620
95 #define EDMA_QWMTHRB 0x0624
96 #define EDMA_CCSTAT 0x0640
98 #define EDMA_M 0x1000 /* global channel registers */
99 #define EDMA_ECR 0x1008
100 #define EDMA_ECRH 0x100C
101 #define EDMA_SHADOW0 0x2000 /* 4 shadow regions */
102 #define EDMA_PARM 0x4000 /* PaRAM entries */
104 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
106 #define EDMA_DCHMAP 0x0100 /* 64 registers */
109 #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
110 #define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */
111 #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
112 #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
113 #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
114 #define CHMAP_EXIST BIT(24)
116 /* CCSTAT register */
117 #define EDMA_CCSTAT_ACTV BIT(4)
120 * Max of 20 segments per channel to conserve PaRAM slots
121 * Also note that MAX_NR_SG should be atleast the no.of periods
122 * that are required for ASoC, otherwise DMA prep calls will
123 * fail. Today davinci-pcm is the only user of this driver and
124 * requires atleast 17 slots, so we setup the default to 20.
127 #define EDMA_MAX_SLOTS MAX_NR_SG
128 #define EDMA_DESCRIPTORS 16
130 #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
131 #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
132 #define EDMA_CONT_PARAMS_ANY 1001
133 #define EDMA_CONT_PARAMS_FIXED_EXACT 1002
134 #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
136 /* PaRAM slots are laid out like this */
137 struct edmacc_param {
148 /* fields in edmacc_param.opt */
151 #define SYNCDIM BIT(2)
152 #define STATIC BIT(3)
153 #define EDMA_FWID (0x07 << 8)
154 #define TCCMODE BIT(11)
155 #define EDMA_TCC(t) ((t) << 12)
156 #define TCINTEN BIT(20)
157 #define ITCINTEN BIT(21)
158 #define TCCHEN BIT(22)
159 #define ITCCHEN BIT(23)
164 struct edmacc_param param;
168 struct virt_dma_desc vdesc;
169 struct list_head node;
170 enum dma_transfer_direction direction;
174 struct edma_chan *echan;
178 * The following 4 elements are used for residue accounting.
180 * - processed_stat: the number of SG elements we have traversed
181 * so far to cover accounting. This is updated directly to processed
182 * during edma_callback and is always <= processed, because processed
183 * refers to the number of pending transfer (programmed to EDMA
184 * controller), where as processed_stat tracks number of transfers
185 * accounted for so far.
187 * - residue: The amount of bytes we have left to transfer for this desc
189 * - residue_stat: The residue in bytes of data we have covered
190 * so far for accounting. This is updated directly to residue
191 * during callbacks to keep it current.
193 * - sg_len: Tracks the length of the current intermediate transfer,
194 * this is required to update the residue during intermediate transfer
195 * completion callback.
202 struct edma_pset pset[0];
208 struct device_node *node;
213 struct virt_dma_chan vchan;
214 struct list_head node;
215 struct edma_desc *edesc;
221 int slot[EDMA_MAX_SLOTS];
223 struct dma_slave_config cfg;
228 struct edma_soc_info *info;
233 /* eDMA3 resource information */
234 unsigned num_channels;
235 unsigned num_qchannels;
240 enum dma_event_q default_queue;
243 * The slot_inuse bit for each PaRAM slot is clear unless the slot is
244 * in use by Linux or if it is allocated to be used by DSP.
246 unsigned long *slot_inuse;
248 struct dma_device dma_slave;
249 struct dma_device *dma_memcpy;
250 struct edma_chan *slave_chans;
251 struct edma_tc *tc_list;
255 /* dummy param set used to (re)initialize parameter RAM slots */
256 static const struct edmacc_param dummy_paramset = {
257 .link_bcntrld = 0xffff,
261 #define EDMA_BINDING_LEGACY 0
262 #define EDMA_BINDING_TPCC 1
263 static const struct of_device_id edma_of_ids[] = {
265 .compatible = "ti,edma3",
266 .data = (void *)EDMA_BINDING_LEGACY,
269 .compatible = "ti,edma3-tpcc",
270 .data = (void *)EDMA_BINDING_TPCC,
275 static const struct of_device_id edma_tptc_of_ids[] = {
276 { .compatible = "ti,edma3-tptc", },
280 static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
282 return (unsigned int)__raw_readl(ecc->base + offset);
285 static inline void edma_write(struct edma_cc *ecc, int offset, int val)
287 __raw_writel(val, ecc->base + offset);
290 static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
293 unsigned val = edma_read(ecc, offset);
297 edma_write(ecc, offset, val);
300 static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and)
302 unsigned val = edma_read(ecc, offset);
305 edma_write(ecc, offset, val);
308 static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
310 unsigned val = edma_read(ecc, offset);
313 edma_write(ecc, offset, val);
316 static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset,
319 return edma_read(ecc, offset + (i << 2));
322 static inline void edma_write_array(struct edma_cc *ecc, int offset, int i,
325 edma_write(ecc, offset + (i << 2), val);
328 static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
329 unsigned and, unsigned or)
331 edma_modify(ecc, offset + (i << 2), and, or);
334 static inline void edma_or_array(struct edma_cc *ecc, int offset, int i,
337 edma_or(ecc, offset + (i << 2), or);
340 static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
343 edma_or(ecc, offset + ((i * 2 + j) << 2), or);
346 static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
349 edma_write(ecc, offset + ((i * 2 + j) << 2), val);
352 static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset)
354 return edma_read(ecc, EDMA_SHADOW0 + offset);
357 static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
360 return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2));
363 static inline void edma_shadow0_write(struct edma_cc *ecc, int offset,
366 edma_write(ecc, EDMA_SHADOW0 + offset, val);
369 static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
372 edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
375 static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset,
378 return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
381 static inline void edma_param_write(struct edma_cc *ecc, int offset,
382 int param_no, unsigned val)
384 edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
387 static inline void edma_param_modify(struct edma_cc *ecc, int offset,
388 int param_no, unsigned and, unsigned or)
390 edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
393 static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no,
396 edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
399 static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
402 edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
405 static inline void set_bits(int offset, int len, unsigned long *p)
407 for (; len > 0; len--)
408 set_bit(offset + (len - 1), p);
411 static inline void clear_bits(int offset, int len, unsigned long *p)
413 for (; len > 0; len--)
414 clear_bit(offset + (len - 1), p);
417 static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
420 int bit = queue_no * 4;
422 edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
425 static void edma_set_chmap(struct edma_chan *echan, int slot)
427 struct edma_cc *ecc = echan->ecc;
428 int channel = EDMA_CHAN_SLOT(echan->ch_num);
430 if (ecc->chmap_exist) {
431 slot = EDMA_CHAN_SLOT(slot);
432 edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5));
436 static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
438 struct edma_cc *ecc = echan->ecc;
439 int channel = EDMA_CHAN_SLOT(echan->ch_num);
442 edma_shadow0_write_array(ecc, SH_ICR, channel >> 5,
443 BIT(channel & 0x1f));
444 edma_shadow0_write_array(ecc, SH_IESR, channel >> 5,
445 BIT(channel & 0x1f));
447 edma_shadow0_write_array(ecc, SH_IECR, channel >> 5,
448 BIT(channel & 0x1f));
453 * paRAM slot management functions
455 static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
456 const struct edmacc_param *param)
458 slot = EDMA_CHAN_SLOT(slot);
459 if (slot >= ecc->num_slots)
461 memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
464 static void edma_read_slot(struct edma_cc *ecc, unsigned slot,
465 struct edmacc_param *param)
467 slot = EDMA_CHAN_SLOT(slot);
468 if (slot >= ecc->num_slots)
470 memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
474 * edma_alloc_slot - allocate DMA parameter RAM
475 * @ecc: pointer to edma_cc struct
476 * @slot: specific slot to allocate; negative for "any unused slot"
478 * This allocates a parameter RAM slot, initializing it to hold a
479 * dummy transfer. Slots allocated using this routine have not been
480 * mapped to a hardware DMA channel, and will normally be used by
481 * linking to them from a slot associated with a DMA channel.
483 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
484 * slots may be allocated on behalf of DSP firmware.
486 * Returns the number of the slot, else negative errno.
488 static int edma_alloc_slot(struct edma_cc *ecc, int slot)
491 slot = EDMA_CHAN_SLOT(slot);
492 /* Requesting entry paRAM slot for a HW triggered channel. */
493 if (ecc->chmap_exist && slot < ecc->num_channels)
494 slot = EDMA_SLOT_ANY;
498 if (ecc->chmap_exist)
501 slot = ecc->num_channels;
503 slot = find_next_zero_bit(ecc->slot_inuse,
506 if (slot == ecc->num_slots)
508 if (!test_and_set_bit(slot, ecc->slot_inuse))
511 } else if (slot >= ecc->num_slots) {
513 } else if (test_and_set_bit(slot, ecc->slot_inuse)) {
517 edma_write_slot(ecc, slot, &dummy_paramset);
519 return EDMA_CTLR_CHAN(ecc->id, slot);
522 static void edma_free_slot(struct edma_cc *ecc, unsigned slot)
524 slot = EDMA_CHAN_SLOT(slot);
525 if (slot >= ecc->num_slots)
528 edma_write_slot(ecc, slot, &dummy_paramset);
529 clear_bit(slot, ecc->slot_inuse);
533 * edma_link - link one parameter RAM slot to another
534 * @ecc: pointer to edma_cc struct
535 * @from: parameter RAM slot originating the link
536 * @to: parameter RAM slot which is the link target
538 * The originating slot should not be part of any active DMA transfer.
540 static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to)
542 if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to)))
543 dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n");
545 from = EDMA_CHAN_SLOT(from);
546 to = EDMA_CHAN_SLOT(to);
547 if (from >= ecc->num_slots || to >= ecc->num_slots)
550 edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000,
555 * edma_get_position - returns the current transfer point
556 * @ecc: pointer to edma_cc struct
557 * @slot: parameter RAM slot being examined
558 * @dst: true selects the dest position, false the source
560 * Returns the position of the current active slot
562 static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot,
567 slot = EDMA_CHAN_SLOT(slot);
568 offs = PARM_OFFSET(slot);
569 offs += dst ? PARM_DST : PARM_SRC;
571 return edma_read(ecc, offs);
575 * Channels with event associations will be triggered by their hardware
576 * events, and channels without such associations will be triggered by
577 * software. (At this writing there is no interface for using software
578 * triggers except with channels that don't support hardware triggers.)
580 static void edma_start(struct edma_chan *echan)
582 struct edma_cc *ecc = echan->ecc;
583 int channel = EDMA_CHAN_SLOT(echan->ch_num);
584 int j = (channel >> 5);
585 unsigned int mask = BIT(channel & 0x1f);
587 if (!echan->hw_triggered) {
588 /* EDMA channels without event association */
589 dev_dbg(ecc->dev, "ESR%d %08x\n", j,
590 edma_shadow0_read_array(ecc, SH_ESR, j));
591 edma_shadow0_write_array(ecc, SH_ESR, j, mask);
593 /* EDMA channel with event association */
594 dev_dbg(ecc->dev, "ER%d %08x\n", j,
595 edma_shadow0_read_array(ecc, SH_ER, j));
596 /* Clear any pending event or error */
597 edma_write_array(ecc, EDMA_ECR, j, mask);
598 edma_write_array(ecc, EDMA_EMCR, j, mask);
600 edma_shadow0_write_array(ecc, SH_SECR, j, mask);
601 edma_shadow0_write_array(ecc, SH_EESR, j, mask);
602 dev_dbg(ecc->dev, "EER%d %08x\n", j,
603 edma_shadow0_read_array(ecc, SH_EER, j));
607 static void edma_stop(struct edma_chan *echan)
609 struct edma_cc *ecc = echan->ecc;
610 int channel = EDMA_CHAN_SLOT(echan->ch_num);
611 int j = (channel >> 5);
612 unsigned int mask = BIT(channel & 0x1f);
614 edma_shadow0_write_array(ecc, SH_EECR, j, mask);
615 edma_shadow0_write_array(ecc, SH_ECR, j, mask);
616 edma_shadow0_write_array(ecc, SH_SECR, j, mask);
617 edma_write_array(ecc, EDMA_EMCR, j, mask);
619 /* clear possibly pending completion interrupt */
620 edma_shadow0_write_array(ecc, SH_ICR, j, mask);
622 dev_dbg(ecc->dev, "EER%d %08x\n", j,
623 edma_shadow0_read_array(ecc, SH_EER, j));
625 /* REVISIT: consider guarding against inappropriate event
626 * chaining by overwriting with dummy_paramset.
631 * Temporarily disable EDMA hardware events on the specified channel,
632 * preventing them from triggering new transfers
634 static void edma_pause(struct edma_chan *echan)
636 int channel = EDMA_CHAN_SLOT(echan->ch_num);
637 unsigned int mask = BIT(channel & 0x1f);
639 edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask);
642 /* Re-enable EDMA hardware events on the specified channel. */
643 static void edma_resume(struct edma_chan *echan)
645 int channel = EDMA_CHAN_SLOT(echan->ch_num);
646 unsigned int mask = BIT(channel & 0x1f);
648 edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask);
651 static void edma_trigger_channel(struct edma_chan *echan)
653 struct edma_cc *ecc = echan->ecc;
654 int channel = EDMA_CHAN_SLOT(echan->ch_num);
655 unsigned int mask = BIT(channel & 0x1f);
657 edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
659 dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
660 edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
663 static void edma_clean_channel(struct edma_chan *echan)
665 struct edma_cc *ecc = echan->ecc;
666 int channel = EDMA_CHAN_SLOT(echan->ch_num);
667 int j = (channel >> 5);
668 unsigned int mask = BIT(channel & 0x1f);
670 dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j));
671 edma_shadow0_write_array(ecc, SH_ECR, j, mask);
672 /* Clear the corresponding EMR bits */
673 edma_write_array(ecc, EDMA_EMCR, j, mask);
675 edma_shadow0_write_array(ecc, SH_SECR, j, mask);
676 edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
679 /* Move channel to a specific event queue */
680 static void edma_assign_channel_eventq(struct edma_chan *echan,
681 enum dma_event_q eventq_no)
683 struct edma_cc *ecc = echan->ecc;
684 int channel = EDMA_CHAN_SLOT(echan->ch_num);
685 int bit = (channel & 0x7) * 4;
687 /* default to low priority queue */
688 if (eventq_no == EVENTQ_DEFAULT)
689 eventq_no = ecc->default_queue;
690 if (eventq_no >= ecc->num_tc)
694 edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit),
698 static int edma_alloc_channel(struct edma_chan *echan,
699 enum dma_event_q eventq_no)
701 struct edma_cc *ecc = echan->ecc;
702 int channel = EDMA_CHAN_SLOT(echan->ch_num);
704 /* ensure access through shadow region 0 */
705 edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
707 /* ensure no events are pending */
710 edma_setup_interrupt(echan, true);
712 edma_assign_channel_eventq(echan, eventq_no);
717 static void edma_free_channel(struct edma_chan *echan)
719 /* ensure no events are pending */
721 /* REVISIT should probably take out of shadow region 0 */
722 edma_setup_interrupt(echan, false);
725 static inline struct edma_cc *to_edma_cc(struct dma_device *d)
727 return container_of(d, struct edma_cc, dma_slave);
730 static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
732 return container_of(c, struct edma_chan, vchan.chan);
735 static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx)
737 return container_of(tx, struct edma_desc, vdesc.tx);
740 static void edma_desc_free(struct virt_dma_desc *vdesc)
742 kfree(container_of(vdesc, struct edma_desc, vdesc));
745 /* Dispatch a queued descriptor to the controller (caller holds lock) */
746 static void edma_execute(struct edma_chan *echan)
748 struct edma_cc *ecc = echan->ecc;
749 struct virt_dma_desc *vdesc;
750 struct edma_desc *edesc;
751 struct device *dev = echan->vchan.chan.device->dev;
752 int i, j, left, nslots;
755 /* Setup is needed for the first transfer */
756 vdesc = vchan_next_desc(&echan->vchan);
759 list_del(&vdesc->node);
760 echan->edesc = to_edma_desc(&vdesc->tx);
763 edesc = echan->edesc;
765 /* Find out how many left */
766 left = edesc->pset_nr - edesc->processed;
767 nslots = min(MAX_NR_SG, left);
770 /* Write descriptor PaRAM set(s) */
771 for (i = 0; i < nslots; i++) {
772 j = i + edesc->processed;
773 edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param);
774 edesc->sg_len += edesc->pset[j].len;
787 j, echan->ch_num, echan->slot[i],
788 edesc->pset[j].param.opt,
789 edesc->pset[j].param.src,
790 edesc->pset[j].param.dst,
791 edesc->pset[j].param.a_b_cnt,
792 edesc->pset[j].param.ccnt,
793 edesc->pset[j].param.src_dst_bidx,
794 edesc->pset[j].param.src_dst_cidx,
795 edesc->pset[j].param.link_bcntrld);
796 /* Link to the previous slot if not the last set */
797 if (i != (nslots - 1))
798 edma_link(ecc, echan->slot[i], echan->slot[i + 1]);
801 edesc->processed += nslots;
804 * If this is either the last set in a set of SG-list transactions
805 * then setup a link to the dummy slot, this results in all future
806 * events being absorbed and that's OK because we're done
808 if (edesc->processed == edesc->pset_nr) {
810 edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]);
812 edma_link(ecc, echan->slot[nslots - 1],
813 echan->ecc->dummy_slot);
818 * This happens due to setup times between intermediate
819 * transfers in long SG lists which have to be broken up into
820 * transfers of MAX_NR_SG
822 dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
823 edma_clean_channel(echan);
826 edma_trigger_channel(echan);
828 } else if (edesc->processed <= MAX_NR_SG) {
829 dev_dbg(dev, "first transfer starting on channel %d\n",
833 dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
834 echan->ch_num, edesc->processed);
839 static int edma_terminate_all(struct dma_chan *chan)
841 struct edma_chan *echan = to_edma_chan(chan);
845 spin_lock_irqsave(&echan->vchan.lock, flags);
848 * Stop DMA activity: we assume the callback will not be called
849 * after edma_dma() returns (even if it does, it will see
850 * echan->edesc is NULL and exit.)
854 /* Move the cyclic channel back to default queue */
855 if (!echan->tc && echan->edesc->cyclic)
856 edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
858 * free the running request descriptor
859 * since it is not in any of the vdesc lists
861 edma_desc_free(&echan->edesc->vdesc);
865 vchan_get_all_descriptors(&echan->vchan, &head);
866 spin_unlock_irqrestore(&echan->vchan.lock, flags);
867 vchan_dma_desc_free_list(&echan->vchan, &head);
872 static int edma_slave_config(struct dma_chan *chan,
873 struct dma_slave_config *cfg)
875 struct edma_chan *echan = to_edma_chan(chan);
877 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
878 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
881 memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
886 static int edma_dma_pause(struct dma_chan *chan)
888 struct edma_chan *echan = to_edma_chan(chan);
897 static int edma_dma_resume(struct dma_chan *chan)
899 struct edma_chan *echan = to_edma_chan(chan);
906 * A PaRAM set configuration abstraction used by other modes
907 * @chan: Channel who's PaRAM set we're configuring
908 * @pset: PaRAM set to initialize and setup.
909 * @src_addr: Source address of the DMA
910 * @dst_addr: Destination address of the DMA
911 * @burst: In units of dev_width, how much to send
912 * @dev_width: How much is the dev_width
913 * @dma_length: Total length of the DMA transfer
914 * @direction: Direction of the transfer
916 static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
917 dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
918 unsigned int acnt, unsigned int dma_length,
919 enum dma_transfer_direction direction)
921 struct edma_chan *echan = to_edma_chan(chan);
922 struct device *dev = chan->device->dev;
923 struct edmacc_param *param = &epset->param;
924 int bcnt, ccnt, cidx;
925 int src_bidx, dst_bidx, src_cidx, dst_cidx;
928 /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
932 * If the maxburst is equal to the fifo width, use
933 * A-synced transfers. This allows for large contiguous
934 * buffer transfers using only one PaRAM set.
938 * For the A-sync case, bcnt and ccnt are the remainder
939 * and quotient respectively of the division of:
940 * (dma_length / acnt) by (SZ_64K -1). This is so
941 * that in case bcnt over flows, we have ccnt to use.
942 * Note: In A-sync tranfer only, bcntrld is used, but it
943 * only applies for sg_dma_len(sg) >= SZ_64K.
944 * In this case, the best way adopted is- bccnt for the
945 * first frame will be the remainder below. Then for
946 * every successive frame, bcnt will be SZ_64K-1. This
947 * is assured as bcntrld = 0xffff in end of function.
950 ccnt = dma_length / acnt / (SZ_64K - 1);
951 bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
953 * If bcnt is non-zero, we have a remainder and hence an
954 * extra frame to transfer, so increment ccnt.
963 * If maxburst is greater than the fifo address_width,
964 * use AB-synced transfers where A count is the fifo
965 * address_width and B count is the maxburst. In this
966 * case, we are limited to transfers of C count frames
967 * of (address_width * maxburst) where C count is limited
968 * to SZ_64K-1. This places an upper bound on the length
969 * of an SG segment that can be handled.
973 ccnt = dma_length / (acnt * bcnt);
974 if (ccnt > (SZ_64K - 1)) {
975 dev_err(dev, "Exceeded max SG segment size\n");
981 epset->len = dma_length;
983 if (direction == DMA_MEM_TO_DEV) {
988 epset->addr = src_addr;
989 } else if (direction == DMA_DEV_TO_MEM) {
994 epset->addr = dst_addr;
995 } else if (direction == DMA_MEM_TO_MEM) {
1001 dev_err(dev, "%s: direction not implemented yet\n", __func__);
1005 param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
1006 /* Configure A or AB synchronized transfers */
1008 param->opt |= SYNCDIM;
1010 param->src = src_addr;
1011 param->dst = dst_addr;
1013 param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
1014 param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
1016 param->a_b_cnt = bcnt << 16 | acnt;
1019 * Only time when (bcntrld) auto reload is required is for
1020 * A-sync case, and in this case, a requirement of reload value
1021 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
1022 * and then later will be populated by edma_execute.
1024 param->link_bcntrld = 0xffffffff;
1028 static struct dma_async_tx_descriptor *edma_prep_slave_sg(
1029 struct dma_chan *chan, struct scatterlist *sgl,
1030 unsigned int sg_len, enum dma_transfer_direction direction,
1031 unsigned long tx_flags, void *context)
1033 struct edma_chan *echan = to_edma_chan(chan);
1034 struct device *dev = chan->device->dev;
1035 struct edma_desc *edesc;
1036 dma_addr_t src_addr = 0, dst_addr = 0;
1037 enum dma_slave_buswidth dev_width;
1039 struct scatterlist *sg;
1042 if (unlikely(!echan || !sgl || !sg_len))
1045 if (direction == DMA_DEV_TO_MEM) {
1046 src_addr = echan->cfg.src_addr;
1047 dev_width = echan->cfg.src_addr_width;
1048 burst = echan->cfg.src_maxburst;
1049 } else if (direction == DMA_MEM_TO_DEV) {
1050 dst_addr = echan->cfg.dst_addr;
1051 dev_width = echan->cfg.dst_addr_width;
1052 burst = echan->cfg.dst_maxburst;
1054 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
1058 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
1059 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
1063 edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]),
1066 dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
1070 edesc->pset_nr = sg_len;
1072 edesc->direction = direction;
1073 edesc->echan = echan;
1075 /* Allocate a PaRAM slot, if needed */
1076 nslots = min_t(unsigned, MAX_NR_SG, sg_len);
1078 for (i = 0; i < nslots; i++) {
1079 if (echan->slot[i] < 0) {
1081 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
1082 if (echan->slot[i] < 0) {
1084 dev_err(dev, "%s: Failed to allocate slot\n",
1091 /* Configure PaRAM sets for each SG */
1092 for_each_sg(sgl, sg, sg_len, i) {
1093 /* Get address for each SG */
1094 if (direction == DMA_DEV_TO_MEM)
1095 dst_addr = sg_dma_address(sg);
1097 src_addr = sg_dma_address(sg);
1099 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1100 dst_addr, burst, dev_width,
1101 sg_dma_len(sg), direction);
1107 edesc->absync = ret;
1108 edesc->residue += sg_dma_len(sg);
1110 /* If this is the last in a current SG set of transactions,
1111 enable interrupts so that next set is processed */
1112 if (!((i+1) % MAX_NR_SG))
1113 edesc->pset[i].param.opt |= TCINTEN;
1115 /* If this is the last set, enable completion interrupt flag */
1116 if (i == sg_len - 1)
1117 edesc->pset[i].param.opt |= TCINTEN;
1119 edesc->residue_stat = edesc->residue;
1121 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1124 static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
1125 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1126 size_t len, unsigned long tx_flags)
1129 struct edma_desc *edesc;
1130 struct device *dev = chan->device->dev;
1131 struct edma_chan *echan = to_edma_chan(chan);
1132 unsigned int width, pset_len;
1134 if (unlikely(!echan || !len))
1139 * Transfer size less than 64K can be handled with one paRAM
1140 * slot and with one burst.
1148 * Transfer size bigger than 64K will be handled with maximum of
1150 * slot1: (full_length / 32767) times 32767 bytes bursts.
1151 * ACNT = 32767, length1: (full_length / 32767) * 32767
1152 * slot2: the remaining amount of data after slot1.
1153 * ACNT = full_length - length1, length2 = ACNT
1155 * When the full_length is multibple of 32767 one slot can be
1156 * used to complete the transfer.
1159 pset_len = rounddown(len, width);
1160 /* One slot is enough for lengths multiple of (SZ_32K -1) */
1161 if (unlikely(pset_len == len))
1167 edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
1170 dev_dbg(dev, "Failed to allocate a descriptor\n");
1174 edesc->pset_nr = nslots;
1175 edesc->residue = edesc->residue_stat = len;
1176 edesc->direction = DMA_MEM_TO_MEM;
1177 edesc->echan = echan;
1179 ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
1180 width, pset_len, DMA_MEM_TO_MEM);
1186 edesc->absync = ret;
1188 edesc->pset[0].param.opt |= ITCCHEN;
1190 /* Enable transfer complete interrupt */
1191 edesc->pset[0].param.opt |= TCINTEN;
1193 /* Enable transfer complete chaining for the first slot */
1194 edesc->pset[0].param.opt |= TCCHEN;
1196 if (echan->slot[1] < 0) {
1197 echan->slot[1] = edma_alloc_slot(echan->ecc,
1199 if (echan->slot[1] < 0) {
1201 dev_err(dev, "%s: Failed to allocate slot\n",
1208 pset_len = width = len % (SZ_32K - 1);
1210 ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
1211 width, pset_len, DMA_MEM_TO_MEM);
1217 edesc->pset[1].param.opt |= ITCCHEN;
1218 edesc->pset[1].param.opt |= TCINTEN;
1221 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1224 static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1225 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1226 size_t period_len, enum dma_transfer_direction direction,
1227 unsigned long tx_flags)
1229 struct edma_chan *echan = to_edma_chan(chan);
1230 struct device *dev = chan->device->dev;
1231 struct edma_desc *edesc;
1232 dma_addr_t src_addr, dst_addr;
1233 enum dma_slave_buswidth dev_width;
1237 if (unlikely(!echan || !buf_len || !period_len))
1240 if (direction == DMA_DEV_TO_MEM) {
1241 src_addr = echan->cfg.src_addr;
1242 dst_addr = buf_addr;
1243 dev_width = echan->cfg.src_addr_width;
1244 burst = echan->cfg.src_maxburst;
1245 } else if (direction == DMA_MEM_TO_DEV) {
1246 src_addr = buf_addr;
1247 dst_addr = echan->cfg.dst_addr;
1248 dev_width = echan->cfg.dst_addr_width;
1249 burst = echan->cfg.dst_maxburst;
1251 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
1255 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
1256 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
1260 if (unlikely(buf_len % period_len)) {
1261 dev_err(dev, "Period should be multiple of Buffer length\n");
1265 nslots = (buf_len / period_len) + 1;
1268 * Cyclic DMA users such as audio cannot tolerate delays introduced
1269 * by cases where the number of periods is more than the maximum
1270 * number of SGs the EDMA driver can handle at a time. For DMA types
1271 * such as Slave SGs, such delays are tolerable and synchronized,
1272 * but the synchronization is difficult to achieve with Cyclic and
1273 * cannot be guaranteed, so we error out early.
1275 if (nslots > MAX_NR_SG)
1278 edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
1281 dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
1286 edesc->pset_nr = nslots;
1287 edesc->residue = edesc->residue_stat = buf_len;
1288 edesc->direction = direction;
1289 edesc->echan = echan;
1291 dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
1292 __func__, echan->ch_num, nslots, period_len, buf_len);
1294 for (i = 0; i < nslots; i++) {
1295 /* Allocate a PaRAM slot, if needed */
1296 if (echan->slot[i] < 0) {
1298 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
1299 if (echan->slot[i] < 0) {
1301 dev_err(dev, "%s: Failed to allocate slot\n",
1307 if (i == nslots - 1) {
1308 memcpy(&edesc->pset[i], &edesc->pset[0],
1309 sizeof(edesc->pset[0]));
1313 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1314 dst_addr, burst, dev_width, period_len,
1321 if (direction == DMA_DEV_TO_MEM)
1322 dst_addr += period_len;
1324 src_addr += period_len;
1326 dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
1339 i, echan->ch_num, echan->slot[i],
1340 edesc->pset[i].param.opt,
1341 edesc->pset[i].param.src,
1342 edesc->pset[i].param.dst,
1343 edesc->pset[i].param.a_b_cnt,
1344 edesc->pset[i].param.ccnt,
1345 edesc->pset[i].param.src_dst_bidx,
1346 edesc->pset[i].param.src_dst_cidx,
1347 edesc->pset[i].param.link_bcntrld);
1349 edesc->absync = ret;
1352 * Enable period interrupt only if it is requested
1354 if (tx_flags & DMA_PREP_INTERRUPT)
1355 edesc->pset[i].param.opt |= TCINTEN;
1358 /* Place the cyclic channel to highest priority queue */
1360 edma_assign_channel_eventq(echan, EVENTQ_0);
1362 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1365 static void edma_completion_handler(struct edma_chan *echan)
1367 struct device *dev = echan->vchan.chan.device->dev;
1368 struct edma_desc *edesc = echan->edesc;
1373 spin_lock(&echan->vchan.lock);
1374 if (edesc->cyclic) {
1375 vchan_cyclic_callback(&edesc->vdesc);
1376 spin_unlock(&echan->vchan.lock);
1378 } else if (edesc->processed == edesc->pset_nr) {
1381 vchan_cookie_complete(&edesc->vdesc);
1382 echan->edesc = NULL;
1384 dev_dbg(dev, "Transfer completed on channel %d\n",
1387 dev_dbg(dev, "Sub transfer completed on channel %d\n",
1392 /* Update statistics for tx_status */
1393 edesc->residue -= edesc->sg_len;
1394 edesc->residue_stat = edesc->residue;
1395 edesc->processed_stat = edesc->processed;
1397 edma_execute(echan);
1399 spin_unlock(&echan->vchan.lock);
1402 /* eDMA interrupt handler */
1403 static irqreturn_t dma_irq_handler(int irq, void *data)
1405 struct edma_cc *ecc = data;
1415 dev_vdbg(ecc->dev, "dma_irq_handler\n");
1417 sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0);
1419 sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1);
1422 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1);
1425 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0);
1433 slot = __ffs(sh_ipr);
1434 sh_ipr &= ~(BIT(slot));
1436 if (sh_ier & BIT(slot)) {
1437 channel = (bank << 5) | slot;
1438 /* Clear the corresponding IPR bits */
1439 edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot));
1440 edma_completion_handler(&ecc->slave_chans[channel]);
1444 edma_shadow0_write(ecc, SH_IEVAL, 1);
1448 static void edma_error_handler(struct edma_chan *echan)
1450 struct edma_cc *ecc = echan->ecc;
1451 struct device *dev = echan->vchan.chan.device->dev;
1452 struct edmacc_param p;
1457 spin_lock(&echan->vchan.lock);
1459 edma_read_slot(ecc, echan->slot[0], &p);
1461 * Issue later based on missed flag which will be sure
1463 * (1) we finished transmitting an intermediate slot and
1464 * edma_execute is coming up.
1465 * (2) or we finished current transfer and issue will
1466 * call edma_execute.
1468 * Important note: issuing can be dangerous here and
1469 * lead to some nasty recursion when we are in a NULL
1470 * slot. So we avoid doing so and set the missed flag.
1472 if (p.a_b_cnt == 0 && p.ccnt == 0) {
1473 dev_dbg(dev, "Error on null slot, setting miss\n");
1477 * The slot is already programmed but the event got
1478 * missed, so its safe to issue it here.
1480 dev_dbg(dev, "Missed event, TRIGGERING\n");
1481 edma_clean_channel(echan);
1484 edma_trigger_channel(echan);
1486 spin_unlock(&echan->vchan.lock);
1489 static inline bool edma_error_pending(struct edma_cc *ecc)
1491 if (edma_read_array(ecc, EDMA_EMR, 0) ||
1492 edma_read_array(ecc, EDMA_EMR, 1) ||
1493 edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR))
1499 /* eDMA error interrupt handler */
1500 static irqreturn_t dma_ccerr_handler(int irq, void *data)
1502 struct edma_cc *ecc = data;
1505 unsigned int cnt = 0;
1512 dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
1514 if (!edma_error_pending(ecc))
1518 /* Event missed register(s) */
1519 for (j = 0; j < 2; j++) {
1522 val = edma_read_array(ecc, EDMA_EMR, j);
1526 dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
1528 for (i = find_next_bit(&emr, 32, 0); i < 32;
1529 i = find_next_bit(&emr, 32, i + 1)) {
1530 int k = (j << 5) + i;
1532 /* Clear the corresponding EMR bits */
1533 edma_write_array(ecc, EDMA_EMCR, j, BIT(i));
1535 edma_shadow0_write_array(ecc, SH_SECR, j,
1537 edma_error_handler(&ecc->slave_chans[k]);
1541 val = edma_read(ecc, EDMA_QEMR);
1543 dev_dbg(ecc->dev, "QEMR 0x%02x\n", val);
1544 /* Not reported, just clear the interrupt reason. */
1545 edma_write(ecc, EDMA_QEMCR, val);
1546 edma_shadow0_write(ecc, SH_QSECR, val);
1549 val = edma_read(ecc, EDMA_CCERR);
1551 dev_warn(ecc->dev, "CCERR 0x%08x\n", val);
1552 /* Not reported, just clear the interrupt reason. */
1553 edma_write(ecc, EDMA_CCERRCLR, val);
1556 if (!edma_error_pending(ecc))
1562 edma_write(ecc, EDMA_EEVAL, 1);
1566 static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
1568 struct platform_device *tc_pdev;
1571 if (!IS_ENABLED(CONFIG_OF) || !tc)
1574 tc_pdev = of_find_device_by_node(tc->node);
1576 pr_err("%s: TPTC device is not found\n", __func__);
1579 if (!pm_runtime_enabled(&tc_pdev->dev))
1580 pm_runtime_enable(&tc_pdev->dev);
1583 ret = pm_runtime_get_sync(&tc_pdev->dev);
1585 ret = pm_runtime_put_sync(&tc_pdev->dev);
1588 pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
1589 enable ? "get" : "put", dev_name(&tc_pdev->dev));
1592 /* Alloc channel resources */
1593 static int edma_alloc_chan_resources(struct dma_chan *chan)
1595 struct edma_chan *echan = to_edma_chan(chan);
1596 struct edma_cc *ecc = echan->ecc;
1597 struct device *dev = ecc->dev;
1598 enum dma_event_q eventq_no = EVENTQ_DEFAULT;
1602 eventq_no = echan->tc->id;
1603 } else if (ecc->tc_list) {
1604 /* memcpy channel */
1605 echan->tc = &ecc->tc_list[ecc->info->default_queue];
1606 eventq_no = echan->tc->id;
1609 ret = edma_alloc_channel(echan, eventq_no);
1613 echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num);
1614 if (echan->slot[0] < 0) {
1615 dev_err(dev, "Entry slot allocation failed for channel %u\n",
1616 EDMA_CHAN_SLOT(echan->ch_num));
1620 /* Set up channel -> slot mapping for the entry slot */
1621 edma_set_chmap(echan, echan->slot[0]);
1622 echan->alloced = true;
1624 dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n",
1625 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
1626 echan->hw_triggered ? "HW" : "SW");
1628 edma_tc_set_pm_state(echan->tc, true);
1633 edma_free_channel(echan);
1637 /* Free channel resources */
1638 static void edma_free_chan_resources(struct dma_chan *chan)
1640 struct edma_chan *echan = to_edma_chan(chan);
1641 struct device *dev = echan->ecc->dev;
1644 /* Terminate transfers */
1647 vchan_free_chan_resources(&echan->vchan);
1649 /* Free EDMA PaRAM slots */
1650 for (i = 0; i < EDMA_MAX_SLOTS; i++) {
1651 if (echan->slot[i] >= 0) {
1652 edma_free_slot(echan->ecc, echan->slot[i]);
1653 echan->slot[i] = -1;
1657 /* Set entry slot to the dummy slot */
1658 edma_set_chmap(echan, echan->ecc->dummy_slot);
1660 /* Free EDMA channel */
1661 if (echan->alloced) {
1662 edma_free_channel(echan);
1663 echan->alloced = false;
1666 edma_tc_set_pm_state(echan->tc, false);
1668 echan->hw_triggered = false;
1670 dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n",
1671 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id);
1674 /* Send pending descriptor to hardware */
1675 static void edma_issue_pending(struct dma_chan *chan)
1677 struct edma_chan *echan = to_edma_chan(chan);
1678 unsigned long flags;
1680 spin_lock_irqsave(&echan->vchan.lock, flags);
1681 if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
1682 edma_execute(echan);
1683 spin_unlock_irqrestore(&echan->vchan.lock, flags);
1687 * This limit exists to avoid a possible infinite loop when waiting for proof
1688 * that a particular transfer is completed. This limit can be hit if there
1689 * are large bursts to/from slow devices or the CPU is never able to catch
1690 * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
1691 * RX-FIFO, as many as 55 loops have been seen.
1693 #define EDMA_MAX_TR_WAIT_LOOPS 1000
1695 static u32 edma_residue(struct edma_desc *edesc)
1697 bool dst = edesc->direction == DMA_DEV_TO_MEM;
1698 int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
1699 struct edma_chan *echan = edesc->echan;
1700 struct edma_pset *pset = edesc->pset;
1701 dma_addr_t done, pos;
1705 * We always read the dst/src position from the first RamPar
1706 * pset. That's the one which is active now.
1708 pos = edma_get_position(echan->ecc, echan->slot[0], dst);
1711 * "pos" may represent a transfer request that is still being
1712 * processed by the EDMACC or EDMATC. We will busy wait until
1713 * any one of the situations occurs:
1714 * 1. the DMA hardware is idle
1715 * 2. a new transfer request is setup
1716 * 3. we hit the loop limit
1718 while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
1719 /* check if a new transfer request is setup */
1720 if (edma_get_position(echan->ecc,
1721 echan->slot[0], dst) != pos) {
1725 if (!--loop_count) {
1726 dev_dbg_ratelimited(echan->vchan.chan.device->dev,
1727 "%s: timeout waiting for PaRAM update\n",
1736 * Cyclic is simple. Just subtract pset[0].addr from pos.
1738 * We never update edesc->residue in the cyclic case, so we
1739 * can tell the remaining room to the end of the circular
1742 if (edesc->cyclic) {
1743 done = pos - pset->addr;
1744 edesc->residue_stat = edesc->residue - done;
1745 return edesc->residue_stat;
1749 * For SG operation we catch up with the last processed
1752 pset += edesc->processed_stat;
1754 for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
1756 * If we are inside this pset address range, we know
1757 * this is the active one. Get the current delta and
1758 * stop walking the psets.
1760 if (pos >= pset->addr && pos < pset->addr + pset->len)
1761 return edesc->residue_stat - (pos - pset->addr);
1763 /* Otherwise mark it done and update residue_stat. */
1764 edesc->processed_stat++;
1765 edesc->residue_stat -= pset->len;
1767 return edesc->residue_stat;
1770 /* Check request completion status */
1771 static enum dma_status edma_tx_status(struct dma_chan *chan,
1772 dma_cookie_t cookie,
1773 struct dma_tx_state *txstate)
1775 struct edma_chan *echan = to_edma_chan(chan);
1776 struct virt_dma_desc *vdesc;
1777 enum dma_status ret;
1778 unsigned long flags;
1780 ret = dma_cookie_status(chan, cookie, txstate);
1781 if (ret == DMA_COMPLETE || !txstate)
1784 spin_lock_irqsave(&echan->vchan.lock, flags);
1785 if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
1786 txstate->residue = edma_residue(echan->edesc);
1787 else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
1788 txstate->residue = to_edma_desc(&vdesc->tx)->residue;
1789 spin_unlock_irqrestore(&echan->vchan.lock, flags);
1794 static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
1796 if (!memcpy_channels)
1798 while (*memcpy_channels != -1) {
1799 if (*memcpy_channels == ch_num)
1806 #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1807 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1808 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
1809 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1811 static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
1813 struct dma_device *s_ddev = &ecc->dma_slave;
1814 struct dma_device *m_ddev = NULL;
1815 s32 *memcpy_channels = ecc->info->memcpy_channels;
1818 dma_cap_zero(s_ddev->cap_mask);
1819 dma_cap_set(DMA_SLAVE, s_ddev->cap_mask);
1820 dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask);
1821 if (ecc->legacy_mode && !memcpy_channels) {
1823 "Legacy memcpy is enabled, things might not work\n");
1825 dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
1826 s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1827 s_ddev->directions = BIT(DMA_MEM_TO_MEM);
1830 s_ddev->device_prep_slave_sg = edma_prep_slave_sg;
1831 s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic;
1832 s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
1833 s_ddev->device_free_chan_resources = edma_free_chan_resources;
1834 s_ddev->device_issue_pending = edma_issue_pending;
1835 s_ddev->device_tx_status = edma_tx_status;
1836 s_ddev->device_config = edma_slave_config;
1837 s_ddev->device_pause = edma_dma_pause;
1838 s_ddev->device_resume = edma_dma_resume;
1839 s_ddev->device_terminate_all = edma_terminate_all;
1841 s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1842 s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
1843 s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV));
1844 s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1846 s_ddev->dev = ecc->dev;
1847 INIT_LIST_HEAD(&s_ddev->channels);
1849 if (memcpy_channels) {
1850 m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL);
1851 ecc->dma_memcpy = m_ddev;
1853 dma_cap_zero(m_ddev->cap_mask);
1854 dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
1856 m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1857 m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
1858 m_ddev->device_free_chan_resources = edma_free_chan_resources;
1859 m_ddev->device_issue_pending = edma_issue_pending;
1860 m_ddev->device_tx_status = edma_tx_status;
1861 m_ddev->device_config = edma_slave_config;
1862 m_ddev->device_pause = edma_dma_pause;
1863 m_ddev->device_resume = edma_dma_resume;
1864 m_ddev->device_terminate_all = edma_terminate_all;
1866 m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1867 m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
1868 m_ddev->directions = BIT(DMA_MEM_TO_MEM);
1869 m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1871 m_ddev->dev = ecc->dev;
1872 INIT_LIST_HEAD(&m_ddev->channels);
1873 } else if (!ecc->legacy_mode) {
1874 dev_info(ecc->dev, "memcpy is disabled\n");
1877 for (i = 0; i < ecc->num_channels; i++) {
1878 struct edma_chan *echan = &ecc->slave_chans[i];
1879 echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i);
1881 echan->vchan.desc_free = edma_desc_free;
1883 if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels))
1884 vchan_init(&echan->vchan, m_ddev);
1886 vchan_init(&echan->vchan, s_ddev);
1888 INIT_LIST_HEAD(&echan->node);
1889 for (j = 0; j < EDMA_MAX_SLOTS; j++)
1890 echan->slot[j] = -1;
1894 static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
1895 struct edma_cc *ecc)
1899 s8 (*queue_priority_map)[2];
1901 /* Decode the eDMA3 configuration from CCCFG register */
1902 cccfg = edma_read(ecc, EDMA_CCCFG);
1904 value = GET_NUM_REGN(cccfg);
1905 ecc->num_region = BIT(value);
1907 value = GET_NUM_DMACH(cccfg);
1908 ecc->num_channels = BIT(value + 1);
1910 value = GET_NUM_QDMACH(cccfg);
1911 ecc->num_qchannels = value * 2;
1913 value = GET_NUM_PAENTRY(cccfg);
1914 ecc->num_slots = BIT(value + 4);
1916 value = GET_NUM_EVQUE(cccfg);
1917 ecc->num_tc = value + 1;
1919 ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false;
1921 dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg);
1922 dev_dbg(dev, "num_region: %u\n", ecc->num_region);
1923 dev_dbg(dev, "num_channels: %u\n", ecc->num_channels);
1924 dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
1925 dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
1926 dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
1927 dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
1929 /* Nothing need to be done if queue priority is provided */
1930 if (pdata->queue_priority_mapping)
1934 * Configure TC/queue priority as follows:
1939 * The meaning of priority numbers: 0 highest priority, 7 lowest
1940 * priority. So Q0 is the highest priority queue and the last queue has
1941 * the lowest priority.
1943 queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
1945 if (!queue_priority_map)
1948 for (i = 0; i < ecc->num_tc; i++) {
1949 queue_priority_map[i][0] = i;
1950 queue_priority_map[i][1] = i;
1952 queue_priority_map[i][0] = -1;
1953 queue_priority_map[i][1] = -1;
1955 pdata->queue_priority_mapping = queue_priority_map;
1956 /* Default queue has the lowest priority */
1957 pdata->default_queue = i - 1;
1962 #if IS_ENABLED(CONFIG_OF)
1963 static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata,
1966 const char pname[] = "ti,edma-xbar-event-map";
1967 struct resource res;
1969 s16 (*xbar_chans)[2];
1970 size_t nelm = sz / sizeof(s16);
1971 u32 shift, offset, mux;
1974 xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL);
1978 ret = of_address_to_resource(dev->of_node, 1, &res);
1982 xbar = devm_ioremap(dev, res.start, resource_size(&res));
1986 ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans,
1991 /* Invalidate last entry for the other user of this mess */
1993 xbar_chans[nelm][0] = -1;
1994 xbar_chans[nelm][1] = -1;
1996 for (i = 0; i < nelm; i++) {
1997 shift = (xbar_chans[i][1] & 0x03) << 3;
1998 offset = xbar_chans[i][1] & 0xfffffffc;
1999 mux = readl(xbar + offset);
2000 mux &= ~(0xff << shift);
2001 mux |= xbar_chans[i][0] << shift;
2002 writel(mux, (xbar + offset));
2005 pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
2009 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
2012 struct edma_soc_info *info;
2013 struct property *prop;
2017 info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
2019 return ERR_PTR(-ENOMEM);
2022 prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map",
2025 ret = edma_xbar_event_map(dev, info, sz);
2027 return ERR_PTR(ret);
2032 /* Get the list of channels allocated to be used for memcpy */
2033 prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
2035 const char pname[] = "ti,edma-memcpy-channels";
2036 size_t nelm = sz / sizeof(s32);
2039 memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
2042 return ERR_PTR(-ENOMEM);
2044 ret = of_property_read_u32_array(dev->of_node, pname,
2045 (u32 *)memcpy_ch, nelm);
2047 return ERR_PTR(ret);
2049 memcpy_ch[nelm] = -1;
2050 info->memcpy_channels = memcpy_ch;
2053 prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges",
2056 const char pname[] = "ti,edma-reserved-slot-ranges";
2058 s16 (*rsv_slots)[2];
2059 size_t nelm = sz / sizeof(*tmp);
2060 struct edma_rsv_info *rsv_info;
2066 tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
2068 return ERR_PTR(-ENOMEM);
2070 rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
2073 return ERR_PTR(-ENOMEM);
2076 rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
2080 return ERR_PTR(-ENOMEM);
2083 ret = of_property_read_u32_array(dev->of_node, pname,
2084 (u32 *)tmp, nelm * 2);
2087 return ERR_PTR(ret);
2090 for (i = 0; i < nelm; i++) {
2091 rsv_slots[i][0] = tmp[i][0];
2092 rsv_slots[i][1] = tmp[i][1];
2094 rsv_slots[nelm][0] = -1;
2095 rsv_slots[nelm][1] = -1;
2097 info->rsv = rsv_info;
2098 info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
2106 static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
2107 struct of_dma *ofdma)
2109 struct edma_cc *ecc = ofdma->of_dma_data;
2110 struct dma_chan *chan = NULL;
2111 struct edma_chan *echan;
2114 if (!ecc || dma_spec->args_count < 1)
2117 for (i = 0; i < ecc->num_channels; i++) {
2118 echan = &ecc->slave_chans[i];
2119 if (echan->ch_num == dma_spec->args[0]) {
2120 chan = &echan->vchan.chan;
2128 if (echan->ecc->legacy_mode && dma_spec->args_count == 1)
2131 if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 &&
2132 dma_spec->args[1] < echan->ecc->num_tc) {
2133 echan->tc = &echan->ecc->tc_list[dma_spec->args[1]];
2139 /* The channel is going to be used as HW synchronized */
2140 echan->hw_triggered = true;
2141 return dma_get_slave_channel(chan);
2144 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
2147 return ERR_PTR(-EINVAL);
2150 static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
2151 struct of_dma *ofdma)
2157 static int edma_probe(struct platform_device *pdev)
2159 struct edma_soc_info *info = pdev->dev.platform_data;
2160 s8 (*queue_priority_mapping)[2];
2162 const s16 (*rsv_slots)[2];
2163 const s16 (*xbar_chans)[2];
2166 struct resource *mem;
2167 struct device_node *node = pdev->dev.of_node;
2168 struct device *dev = &pdev->dev;
2169 struct edma_cc *ecc;
2170 bool legacy_mode = true;
2174 const struct of_device_id *match;
2176 match = of_match_node(edma_of_ids, node);
2177 if (match && (u32)match->data == EDMA_BINDING_TPCC)
2178 legacy_mode = false;
2180 info = edma_setup_info_from_dt(dev, legacy_mode);
2182 dev_err(dev, "failed to get DT data\n");
2183 return PTR_ERR(info);
2190 pm_runtime_enable(dev);
2191 ret = pm_runtime_get_sync(dev);
2193 dev_err(dev, "pm_runtime_get_sync() failed\n");
2197 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2201 ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
2203 dev_err(dev, "Can't allocate controller\n");
2209 ecc->legacy_mode = legacy_mode;
2210 /* When booting with DT the pdev->id is -1 */
2214 mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc");
2216 dev_dbg(dev, "mem resource not found, using index 0\n");
2217 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2219 dev_err(dev, "no mem resource?\n");
2223 ecc->base = devm_ioremap_resource(dev, mem);
2224 if (IS_ERR(ecc->base))
2225 return PTR_ERR(ecc->base);
2227 platform_set_drvdata(pdev, ecc);
2229 /* Get eDMA3 configuration from IP */
2230 ret = edma_setup_from_hw(dev, info, ecc);
2234 /* Allocate memory based on the information we got from the IP */
2235 ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
2236 sizeof(*ecc->slave_chans), GFP_KERNEL);
2237 if (!ecc->slave_chans)
2240 ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
2241 sizeof(unsigned long), GFP_KERNEL);
2242 if (!ecc->slot_inuse)
2245 ecc->default_queue = info->default_queue;
2247 for (i = 0; i < ecc->num_slots; i++)
2248 edma_write_slot(ecc, i, &dummy_paramset);
2251 /* Set the reserved slots in inuse list */
2252 rsv_slots = info->rsv->rsv_slots;
2254 for (i = 0; rsv_slots[i][0] != -1; i++) {
2255 off = rsv_slots[i][0];
2256 ln = rsv_slots[i][1];
2257 set_bits(off, ln, ecc->slot_inuse);
2262 /* Clear the xbar mapped channels in unused list */
2263 xbar_chans = info->xbar_chans;
2265 for (i = 0; xbar_chans[i][1] != -1; i++) {
2266 off = xbar_chans[i][1];
2270 irq = platform_get_irq_byname(pdev, "edma3_ccint");
2271 if (irq < 0 && node)
2272 irq = irq_of_parse_and_map(node, 0);
2275 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
2277 ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
2280 dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
2285 irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
2286 if (irq < 0 && node)
2287 irq = irq_of_parse_and_map(node, 2);
2290 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
2292 ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
2295 dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
2300 ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
2301 if (ecc->dummy_slot < 0) {
2302 dev_err(dev, "Can't allocate PaRAM dummy slot\n");
2303 return ecc->dummy_slot;
2306 queue_priority_mapping = info->queue_priority_mapping;
2308 if (!ecc->legacy_mode) {
2309 int lowest_priority = 0;
2310 struct of_phandle_args tc_args;
2312 ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
2313 sizeof(*ecc->tc_list), GFP_KERNEL);
2318 ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
2320 if (ret || i == ecc->num_tc)
2323 ecc->tc_list[i].node = tc_args.np;
2324 ecc->tc_list[i].id = i;
2325 queue_priority_mapping[i][1] = tc_args.args[0];
2326 if (queue_priority_mapping[i][1] > lowest_priority) {
2327 lowest_priority = queue_priority_mapping[i][1];
2328 info->default_queue = i;
2333 /* Event queue priority mapping */
2334 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2335 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2336 queue_priority_mapping[i][1]);
2338 for (i = 0; i < ecc->num_region; i++) {
2339 edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0);
2340 edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0);
2341 edma_write_array(ecc, EDMA_QRAE, i, 0x0);
2345 /* Init the dma device and channels */
2346 edma_dma_init(ecc, legacy_mode);
2348 for (i = 0; i < ecc->num_channels; i++) {
2349 /* Assign all channels to the default queue */
2350 edma_assign_channel_eventq(&ecc->slave_chans[i],
2351 info->default_queue);
2352 /* Set entry slot to the dummy slot */
2353 edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
2356 ecc->dma_slave.filter.map = info->slave_map;
2357 ecc->dma_slave.filter.mapcnt = info->slavecnt;
2358 ecc->dma_slave.filter.fn = edma_filter_fn;
2360 ret = dma_async_device_register(&ecc->dma_slave);
2362 dev_err(dev, "slave ddev registration failed (%d)\n", ret);
2366 if (ecc->dma_memcpy) {
2367 ret = dma_async_device_register(ecc->dma_memcpy);
2369 dev_err(dev, "memcpy ddev registration failed (%d)\n",
2371 dma_async_device_unregister(&ecc->dma_slave);
2377 of_dma_controller_register(node, of_edma_xlate, ecc);
2379 dev_info(dev, "TI EDMA DMA engine driver\n");
2384 edma_free_slot(ecc, ecc->dummy_slot);
2388 static int edma_remove(struct platform_device *pdev)
2390 struct device *dev = &pdev->dev;
2391 struct edma_cc *ecc = dev_get_drvdata(dev);
2394 of_dma_controller_free(dev->of_node);
2395 dma_async_device_unregister(&ecc->dma_slave);
2396 if (ecc->dma_memcpy)
2397 dma_async_device_unregister(ecc->dma_memcpy);
2398 edma_free_slot(ecc, ecc->dummy_slot);
2403 #ifdef CONFIG_PM_SLEEP
2404 static int edma_pm_suspend(struct device *dev)
2406 struct edma_cc *ecc = dev_get_drvdata(dev);
2407 struct edma_chan *echan = ecc->slave_chans;
2410 for (i = 0; i < ecc->num_channels; i++) {
2411 if (echan[i].alloced) {
2412 edma_setup_interrupt(&echan[i], false);
2413 edma_tc_set_pm_state(echan[i].tc, false);
2420 static int edma_pm_resume(struct device *dev)
2422 struct edma_cc *ecc = dev_get_drvdata(dev);
2423 struct edma_chan *echan = ecc->slave_chans;
2425 s8 (*queue_priority_mapping)[2];
2427 queue_priority_mapping = ecc->info->queue_priority_mapping;
2429 /* Event queue priority mapping */
2430 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2431 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2432 queue_priority_mapping[i][1]);
2434 for (i = 0; i < ecc->num_channels; i++) {
2435 if (echan[i].alloced) {
2436 /* ensure access through shadow region 0 */
2437 edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
2440 edma_setup_interrupt(&echan[i], true);
2442 /* Set up channel -> slot mapping for the entry slot */
2443 edma_set_chmap(&echan[i], echan[i].slot[0]);
2445 edma_tc_set_pm_state(echan[i].tc, true);
2453 static const struct dev_pm_ops edma_pm_ops = {
2454 SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume)
2457 static struct platform_driver edma_driver = {
2458 .probe = edma_probe,
2459 .remove = edma_remove,
2463 .of_match_table = edma_of_ids,
2467 static int edma_tptc_probe(struct platform_device *pdev)
2472 static struct platform_driver edma_tptc_driver = {
2473 .probe = edma_tptc_probe,
2475 .name = "edma3-tptc",
2476 .of_match_table = edma_tptc_of_ids,
2480 bool edma_filter_fn(struct dma_chan *chan, void *param)
2484 if (chan->device->dev->driver == &edma_driver.driver) {
2485 struct edma_chan *echan = to_edma_chan(chan);
2486 unsigned ch_req = *(unsigned *)param;
2487 if (ch_req == echan->ch_num) {
2488 /* The channel is going to be used as HW synchronized */
2489 echan->hw_triggered = true;
2495 EXPORT_SYMBOL(edma_filter_fn);
2497 static int edma_init(void)
2501 ret = platform_driver_register(&edma_tptc_driver);
2505 return platform_driver_register(&edma_driver);
2507 subsys_initcall(edma_init);
2509 static void __exit edma_exit(void)
2511 platform_driver_unregister(&edma_driver);
2512 platform_driver_unregister(&edma_tptc_driver);
2514 module_exit(edma_exit);
2516 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
2517 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
2518 MODULE_LICENSE("GPL v2");