2 * OMAP DMAengine support
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 #include <linux/delay.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/err.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/omap-dma.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/of_dma.h>
21 #include <linux/of_device.h>
26 struct dma_device ddev;
28 struct tasklet_struct task;
29 struct list_head pending;
30 struct omap_system_dma_plat_info *plat;
34 struct virt_dma_chan vc;
35 struct list_head node;
36 struct omap_system_dma_plat_info *plat;
38 struct dma_slave_config cfg;
44 struct omap_desc *desc;
50 uint32_t en; /* number of elements (24-bit) */
51 uint32_t fn; /* number of frames (16-bit) */
55 struct virt_dma_desc vd;
56 enum dma_transfer_direction dir;
59 int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
60 uint8_t es; /* CSDP_DATA_TYPE_xxx */
61 uint32_t ccr; /* CCR value */
62 uint16_t cicr; /* CICR value */
63 uint32_t csdp; /* CSDP value */
71 CCR_READ_PRIORITY = BIT(6),
73 CCR_AUTO_INIT = BIT(8), /* OMAP1 only */
74 CCR_REPEAT = BIT(9), /* OMAP1 only */
75 CCR_OMAP31_DISABLE = BIT(10), /* OMAP1 only */
76 CCR_SUSPEND_SENSITIVE = BIT(8), /* OMAP2+ only */
77 CCR_RD_ACTIVE = BIT(9), /* OMAP2+ only */
78 CCR_WR_ACTIVE = BIT(10), /* OMAP2+ only */
79 CCR_SRC_AMODE_CONSTANT = 0 << 12,
80 CCR_SRC_AMODE_POSTINC = 1 << 12,
81 CCR_SRC_AMODE_SGLIDX = 2 << 12,
82 CCR_SRC_AMODE_DBLIDX = 3 << 12,
83 CCR_DST_AMODE_CONSTANT = 0 << 14,
84 CCR_DST_AMODE_POSTINC = 1 << 14,
85 CCR_DST_AMODE_SGLIDX = 2 << 14,
86 CCR_DST_AMODE_DBLIDX = 3 << 14,
87 CCR_CONSTANT_FILL = BIT(16),
88 CCR_TRANSPARENT_COPY = BIT(17),
90 CCR_SUPERVISOR = BIT(22),
91 CCR_PREFETCH = BIT(23),
92 CCR_TRIGGER_SRC = BIT(24),
93 CCR_BUFFERING_DISABLE = BIT(25),
94 CCR_WRITE_PRIORITY = BIT(26),
96 CCR_SYNC_FRAME = CCR_FS,
97 CCR_SYNC_BLOCK = CCR_BS,
98 CCR_SYNC_PACKET = CCR_BS | CCR_FS,
100 CSDP_DATA_TYPE_8 = 0,
101 CSDP_DATA_TYPE_16 = 1,
102 CSDP_DATA_TYPE_32 = 2,
103 CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */
104 CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */
105 CSDP_SRC_PORT_OCP_T1 = 2 << 2, /* OMAP1 only */
106 CSDP_SRC_PORT_TIPB = 3 << 2, /* OMAP1 only */
107 CSDP_SRC_PORT_OCP_T2 = 4 << 2, /* OMAP1 only */
108 CSDP_SRC_PORT_MPUI = 5 << 2, /* OMAP1 only */
109 CSDP_SRC_PACKED = BIT(6),
110 CSDP_SRC_BURST_1 = 0 << 7,
111 CSDP_SRC_BURST_16 = 1 << 7,
112 CSDP_SRC_BURST_32 = 2 << 7,
113 CSDP_SRC_BURST_64 = 3 << 7,
114 CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */
115 CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */
116 CSDP_DST_PORT_OCP_T1 = 2 << 9, /* OMAP1 only */
117 CSDP_DST_PORT_TIPB = 3 << 9, /* OMAP1 only */
118 CSDP_DST_PORT_OCP_T2 = 4 << 9, /* OMAP1 only */
119 CSDP_DST_PORT_MPUI = 5 << 9, /* OMAP1 only */
120 CSDP_DST_PACKED = BIT(13),
121 CSDP_DST_BURST_1 = 0 << 14,
122 CSDP_DST_BURST_16 = 1 << 14,
123 CSDP_DST_BURST_32 = 2 << 14,
124 CSDP_DST_BURST_64 = 3 << 14,
126 CICR_TOUT_IE = BIT(0), /* OMAP1 only */
127 CICR_DROP_IE = BIT(1),
128 CICR_HALF_IE = BIT(2),
129 CICR_FRAME_IE = BIT(3),
130 CICR_LAST_IE = BIT(4),
131 CICR_BLOCK_IE = BIT(5),
132 CICR_PKT_IE = BIT(7), /* OMAP2+ only */
133 CICR_TRANS_ERR_IE = BIT(8), /* OMAP2+ only */
134 CICR_SUPERVISOR_ERR_IE = BIT(10), /* OMAP2+ only */
135 CICR_MISALIGNED_ERR_IE = BIT(11), /* OMAP2+ only */
136 CICR_DRAIN_IE = BIT(12), /* OMAP2+ only */
137 CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */
139 CLNK_CTRL_ENABLE_LNK = BIT(15),
142 static const unsigned es_bytes[] = {
143 [CSDP_DATA_TYPE_8] = 1,
144 [CSDP_DATA_TYPE_16] = 2,
145 [CSDP_DATA_TYPE_32] = 4,
148 static struct of_dma_filter_info omap_dma_info = {
149 .filter_fn = omap_dma_filter_fn,
152 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
154 return container_of(d, struct omap_dmadev, ddev);
157 static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
159 return container_of(c, struct omap_chan, vc.chan);
162 static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
164 return container_of(t, struct omap_desc, vd.tx);
167 static void omap_dma_desc_free(struct virt_dma_desc *vd)
169 kfree(container_of(vd, struct omap_desc, vd));
172 static void omap_dma_clear_csr(struct omap_chan *c)
175 c->plat->dma_read(CSR, c->dma_ch);
177 c->plat->dma_write(~0, CSR, c->dma_ch);
180 static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
182 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
185 if (__dma_omap15xx(od->plat->dma_attr))
186 c->plat->dma_write(0, CPC, c->dma_ch);
188 c->plat->dma_write(0, CDAC, c->dma_ch);
190 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
191 val = c->plat->dma_read(CLNK_CTRL, c->dma_ch);
196 val |= c->dma_ch | CLNK_CTRL_ENABLE_LNK;
198 c->plat->dma_write(val, CLNK_CTRL, c->dma_ch);
199 } else if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
200 c->plat->dma_write(c->dma_ch, CLNK_CTRL, c->dma_ch);
202 omap_dma_clear_csr(c);
204 /* Enable interrupts */
205 c->plat->dma_write(d->cicr, CICR, c->dma_ch);
207 val = c->plat->dma_read(CCR, c->dma_ch);
210 c->plat->dma_write(val, CCR, c->dma_ch);
213 static void omap_dma_stop(struct omap_chan *c)
215 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
219 c->plat->dma_write(0, CICR, c->dma_ch);
221 omap_dma_clear_csr(c);
223 val = c->plat->dma_read(CCR, c->dma_ch);
224 if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
228 sysconfig = c->plat->dma_read(OCP_SYSCONFIG, c->dma_ch);
229 val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
230 val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
231 c->plat->dma_write(val, OCP_SYSCONFIG, c->dma_ch);
233 val = c->plat->dma_read(CCR, c->dma_ch);
235 c->plat->dma_write(val, CCR, c->dma_ch);
237 /* Wait for sDMA FIFO to drain */
239 val = c->plat->dma_read(CCR, c->dma_ch);
240 if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
249 if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
250 dev_err(c->vc.chan.device->dev,
251 "DMA drain did not complete on lch %d\n",
254 c->plat->dma_write(sysconfig, OCP_SYSCONFIG, c->dma_ch);
257 c->plat->dma_write(val, CCR, c->dma_ch);
262 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
263 val = c->plat->dma_read(CLNK_CTRL, c->dma_ch);
266 val |= 1 << 14; /* set the STOP_LNK bit */
268 val &= ~CLNK_CTRL_ENABLE_LNK;
270 c->plat->dma_write(val, CLNK_CTRL, c->dma_ch);
274 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
277 struct omap_sg *sg = d->sg + idx;
279 if (d->dir == DMA_DEV_TO_MEM) {
280 c->plat->dma_write(sg->addr, CDSA, c->dma_ch);
281 c->plat->dma_write(0, CDEI, c->dma_ch);
282 c->plat->dma_write(0, CDFI, c->dma_ch);
284 c->plat->dma_write(sg->addr, CSSA, c->dma_ch);
285 c->plat->dma_write(0, CSEI, c->dma_ch);
286 c->plat->dma_write(0, CSFI, c->dma_ch);
289 c->plat->dma_write(sg->en, CEN, c->dma_ch);
290 c->plat->dma_write(sg->fn, CFN, c->dma_ch);
292 omap_dma_start(c, d);
295 static void omap_dma_start_desc(struct omap_chan *c)
297 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
307 c->desc = d = to_omap_dma_desc(&vd->tx);
310 c->plat->dma_write(d->ccr, CCR, c->dma_ch);
312 c->plat->dma_write(d->ccr >> 16, CCR2, c->dma_ch);
314 if (d->dir == DMA_DEV_TO_MEM) {
315 c->plat->dma_write(d->dev_addr, CSSA, c->dma_ch);
316 c->plat->dma_write(0, CSEI, c->dma_ch);
317 c->plat->dma_write(d->fi, CSFI, c->dma_ch);
319 c->plat->dma_write(d->dev_addr, CDSA, c->dma_ch);
320 c->plat->dma_write(0, CDEI, c->dma_ch);
321 c->plat->dma_write(d->fi, CDFI, c->dma_ch);
324 c->plat->dma_write(d->csdp, CSDP, c->dma_ch);
326 omap_dma_start_sg(c, d, 0);
329 static void omap_dma_callback(int ch, u16 status, void *data)
331 struct omap_chan *c = data;
335 spin_lock_irqsave(&c->vc.lock, flags);
339 if (++c->sgidx < d->sglen) {
340 omap_dma_start_sg(c, d, c->sgidx);
342 omap_dma_start_desc(c);
343 vchan_cookie_complete(&d->vd);
346 vchan_cyclic_callback(&d->vd);
349 spin_unlock_irqrestore(&c->vc.lock, flags);
353 * This callback schedules all pending channels. We could be more
354 * clever here by postponing allocation of the real DMA channels to
355 * this point, and freeing them when our virtual channel becomes idle.
357 * We would then need to deal with 'all channels in-use'
359 static void omap_dma_sched(unsigned long data)
361 struct omap_dmadev *d = (struct omap_dmadev *)data;
364 spin_lock_irq(&d->lock);
365 list_splice_tail_init(&d->pending, &head);
366 spin_unlock_irq(&d->lock);
368 while (!list_empty(&head)) {
369 struct omap_chan *c = list_first_entry(&head,
370 struct omap_chan, node);
372 spin_lock_irq(&c->vc.lock);
373 list_del_init(&c->node);
374 omap_dma_start_desc(c);
375 spin_unlock_irq(&c->vc.lock);
379 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
381 struct omap_chan *c = to_omap_dma_chan(chan);
383 dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
385 return omap_request_dma(c->dma_sig, "DMA engine",
386 omap_dma_callback, c, &c->dma_ch);
389 static void omap_dma_free_chan_resources(struct dma_chan *chan)
391 struct omap_chan *c = to_omap_dma_chan(chan);
393 vchan_free_chan_resources(&c->vc);
394 omap_free_dma(c->dma_ch);
396 dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
399 static size_t omap_dma_sg_size(struct omap_sg *sg)
401 return sg->en * sg->fn;
404 static size_t omap_dma_desc_size(struct omap_desc *d)
409 for (size = i = 0; i < d->sglen; i++)
410 size += omap_dma_sg_size(&d->sg[i]);
412 return size * es_bytes[d->es];
415 static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
418 size_t size, es_size = es_bytes[d->es];
420 for (size = i = 0; i < d->sglen; i++) {
421 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
425 else if (addr >= d->sg[i].addr &&
426 addr < d->sg[i].addr + this_size)
427 size += d->sg[i].addr + this_size - addr;
432 static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
434 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
437 if (__dma_omap15xx(od->plat->dma_attr))
438 addr = c->plat->dma_read(CPC, c->dma_ch);
440 addr = c->plat->dma_read(CSAC, c->dma_ch);
442 if (od->plat->errata & DMA_ERRATA_3_3 && addr == 0)
443 addr = c->plat->dma_read(CSAC, c->dma_ch);
445 if (!__dma_omap15xx(od->plat->dma_attr)) {
447 * CDAC == 0 indicates that the DMA transfer on the channel has
448 * not been started (no data has been transferred so far).
449 * Return the programmed source start address in this case.
451 if (c->plat->dma_read(CDAC, c->dma_ch))
452 addr = c->plat->dma_read(CSAC, c->dma_ch);
454 addr = c->plat->dma_read(CSSA, c->dma_ch);
458 addr |= c->plat->dma_read(CSSA, c->dma_ch) & 0xffff0000;
463 static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
465 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
468 if (__dma_omap15xx(od->plat->dma_attr))
469 addr = c->plat->dma_read(CPC, c->dma_ch);
471 addr = c->plat->dma_read(CDAC, c->dma_ch);
474 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
475 * read before the DMA controller finished disabling the channel.
477 if (!__dma_omap15xx(od->plat->dma_attr) && addr == 0) {
478 addr = c->plat->dma_read(CDAC, c->dma_ch);
480 * CDAC == 0 indicates that the DMA transfer on the channel has
481 * not been started (no data has been transferred so far).
482 * Return the programmed destination start address in this case.
485 addr = c->plat->dma_read(CDSA, c->dma_ch);
489 addr |= c->plat->dma_read(CDSA, c->dma_ch) & 0xffff0000;
494 static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
495 dma_cookie_t cookie, struct dma_tx_state *txstate)
497 struct omap_chan *c = to_omap_dma_chan(chan);
498 struct virt_dma_desc *vd;
502 ret = dma_cookie_status(chan, cookie, txstate);
503 if (ret == DMA_COMPLETE || !txstate)
506 spin_lock_irqsave(&c->vc.lock, flags);
507 vd = vchan_find_desc(&c->vc, cookie);
509 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
510 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
511 struct omap_desc *d = c->desc;
514 if (d->dir == DMA_MEM_TO_DEV)
515 pos = omap_dma_get_src_pos(c);
516 else if (d->dir == DMA_DEV_TO_MEM)
517 pos = omap_dma_get_dst_pos(c);
521 txstate->residue = omap_dma_desc_size_pos(d, pos);
523 txstate->residue = 0;
525 spin_unlock_irqrestore(&c->vc.lock, flags);
530 static void omap_dma_issue_pending(struct dma_chan *chan)
532 struct omap_chan *c = to_omap_dma_chan(chan);
535 spin_lock_irqsave(&c->vc.lock, flags);
536 if (vchan_issue_pending(&c->vc) && !c->desc) {
538 * c->cyclic is used only by audio and in this case the DMA need
539 * to be started without delay.
542 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
544 if (list_empty(&c->node))
545 list_add_tail(&c->node, &d->pending);
546 spin_unlock(&d->lock);
547 tasklet_schedule(&d->task);
549 omap_dma_start_desc(c);
552 spin_unlock_irqrestore(&c->vc.lock, flags);
555 static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
556 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
557 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
559 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
560 struct omap_chan *c = to_omap_dma_chan(chan);
561 enum dma_slave_buswidth dev_width;
562 struct scatterlist *sgent;
565 unsigned i, j = 0, es, en, frame_bytes;
568 if (dir == DMA_DEV_TO_MEM) {
569 dev_addr = c->cfg.src_addr;
570 dev_width = c->cfg.src_addr_width;
571 burst = c->cfg.src_maxburst;
572 } else if (dir == DMA_MEM_TO_DEV) {
573 dev_addr = c->cfg.dst_addr;
574 dev_width = c->cfg.dst_addr_width;
575 burst = c->cfg.dst_maxburst;
577 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
581 /* Bus width translates to the element size (ES) */
583 case DMA_SLAVE_BUSWIDTH_1_BYTE:
584 es = CSDP_DATA_TYPE_8;
586 case DMA_SLAVE_BUSWIDTH_2_BYTES:
587 es = CSDP_DATA_TYPE_16;
589 case DMA_SLAVE_BUSWIDTH_4_BYTES:
590 es = CSDP_DATA_TYPE_32;
592 default: /* not reached */
596 /* Now allocate and setup the descriptor. */
597 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
602 d->dev_addr = dev_addr;
605 d->ccr = CCR_SYNC_FRAME;
606 if (dir == DMA_DEV_TO_MEM)
607 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
609 d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
611 d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
615 if (__dma_omap16xx(od->plat->dma_attr)) {
616 d->ccr |= CCR_OMAP31_DISABLE;
617 /* Duplicate what plat-omap/dma.c does */
618 d->ccr |= c->dma_ch + 1;
620 d->ccr |= c->dma_sig & 0x1f;
623 d->cicr |= CICR_TOUT_IE;
625 if (dir == DMA_DEV_TO_MEM)
626 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB;
628 d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF;
630 d->ccr |= (c->dma_sig & ~0x1f) << 14;
631 d->ccr |= c->dma_sig & 0x1f;
633 if (dir == DMA_DEV_TO_MEM)
634 d->ccr |= CCR_TRIGGER_SRC;
636 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
638 if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
639 d->ccr |= CCR_BUFFERING_DISABLE;
642 * Build our scatterlist entries: each contains the address,
643 * the number of elements (EN) in each frame, and the number of
644 * frames (FN). Number of bytes for this entry = ES * EN * FN.
646 * Burst size translates to number of elements with frame sync.
647 * Note: DMA engine defines burst to be the number of dev-width
651 frame_bytes = es_bytes[es] * en;
652 for_each_sg(sgl, sgent, sglen, i) {
653 d->sg[j].addr = sg_dma_address(sgent);
655 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
661 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
664 static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
665 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
666 size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
669 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
670 struct omap_chan *c = to_omap_dma_chan(chan);
671 enum dma_slave_buswidth dev_width;
677 if (dir == DMA_DEV_TO_MEM) {
678 dev_addr = c->cfg.src_addr;
679 dev_width = c->cfg.src_addr_width;
680 burst = c->cfg.src_maxburst;
681 } else if (dir == DMA_MEM_TO_DEV) {
682 dev_addr = c->cfg.dst_addr;
683 dev_width = c->cfg.dst_addr_width;
684 burst = c->cfg.dst_maxburst;
686 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
690 /* Bus width translates to the element size (ES) */
692 case DMA_SLAVE_BUSWIDTH_1_BYTE:
693 es = CSDP_DATA_TYPE_8;
695 case DMA_SLAVE_BUSWIDTH_2_BYTES:
696 es = CSDP_DATA_TYPE_16;
698 case DMA_SLAVE_BUSWIDTH_4_BYTES:
699 es = CSDP_DATA_TYPE_32;
701 default: /* not reached */
705 /* Now allocate and setup the descriptor. */
706 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
711 d->dev_addr = dev_addr;
714 d->sg[0].addr = buf_addr;
715 d->sg[0].en = period_len / es_bytes[es];
716 d->sg[0].fn = buf_len / period_len;
720 if (__dma_omap15xx(od->plat->dma_attr))
721 d->ccr = CCR_AUTO_INIT | CCR_REPEAT;
722 if (dir == DMA_DEV_TO_MEM)
723 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
725 d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
727 d->cicr = CICR_DROP_IE;
728 if (flags & DMA_PREP_INTERRUPT)
729 d->cicr |= CICR_FRAME_IE;
734 if (__dma_omap16xx(od->plat->dma_attr)) {
735 d->ccr |= CCR_OMAP31_DISABLE;
736 /* Duplicate what plat-omap/dma.c does */
737 d->ccr |= c->dma_ch + 1;
739 d->ccr |= c->dma_sig & 0x1f;
742 d->cicr |= CICR_TOUT_IE;
744 if (dir == DMA_DEV_TO_MEM)
745 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI;
747 d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF;
749 d->ccr |= (c->dma_sig & ~0x1f) << 14;
750 d->ccr |= c->dma_sig & 0x1f;
753 d->ccr |= CCR_SYNC_PACKET;
755 d->ccr |= CCR_SYNC_ELEMENT;
757 if (dir == DMA_DEV_TO_MEM)
758 d->ccr |= CCR_TRIGGER_SRC;
760 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
762 d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
764 if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
765 d->ccr |= CCR_BUFFERING_DISABLE;
769 return vchan_tx_prep(&c->vc, &d->vd, flags);
772 static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
774 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
775 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
778 memcpy(&c->cfg, cfg, sizeof(c->cfg));
783 static int omap_dma_terminate_all(struct omap_chan *c)
785 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
789 spin_lock_irqsave(&c->vc.lock, flags);
791 /* Prevent this channel being scheduled */
793 list_del_init(&c->node);
794 spin_unlock(&d->lock);
797 * Stop DMA activity: we assume the callback will not be called
798 * after omap_dma_stop() returns (even if it does, it will see
799 * c->desc is NULL and exit.)
803 /* Avoid stopping the dma twice */
813 vchan_get_all_descriptors(&c->vc, &head);
814 spin_unlock_irqrestore(&c->vc.lock, flags);
815 vchan_dma_desc_free_list(&c->vc, &head);
820 static int omap_dma_pause(struct omap_chan *c)
822 /* Pause/Resume only allowed with cyclic mode */
834 static int omap_dma_resume(struct omap_chan *c)
836 /* Pause/Resume only allowed with cyclic mode */
841 omap_dma_start(c, c->desc);
848 static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
851 struct omap_chan *c = to_omap_dma_chan(chan);
855 case DMA_SLAVE_CONFIG:
856 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
859 case DMA_TERMINATE_ALL:
860 ret = omap_dma_terminate_all(c);
864 ret = omap_dma_pause(c);
868 ret = omap_dma_resume(c);
879 static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
883 c = kzalloc(sizeof(*c), GFP_KERNEL);
888 c->dma_sig = dma_sig;
889 c->vc.desc_free = omap_dma_desc_free;
890 vchan_init(&c->vc, &od->ddev);
891 INIT_LIST_HEAD(&c->node);
898 static void omap_dma_free(struct omap_dmadev *od)
900 tasklet_kill(&od->task);
901 while (!list_empty(&od->ddev.channels)) {
902 struct omap_chan *c = list_first_entry(&od->ddev.channels,
903 struct omap_chan, vc.chan.device_node);
905 list_del(&c->vc.chan.device_node);
906 tasklet_kill(&c->vc.task);
911 static int omap_dma_probe(struct platform_device *pdev)
913 struct omap_dmadev *od;
916 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
920 od->plat = omap_get_plat_info();
922 return -EPROBE_DEFER;
924 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
925 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
926 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
927 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
928 od->ddev.device_tx_status = omap_dma_tx_status;
929 od->ddev.device_issue_pending = omap_dma_issue_pending;
930 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
931 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
932 od->ddev.device_control = omap_dma_control;
933 od->ddev.dev = &pdev->dev;
934 INIT_LIST_HEAD(&od->ddev.channels);
935 INIT_LIST_HEAD(&od->pending);
936 spin_lock_init(&od->lock);
938 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
940 for (i = 0; i < 127; i++) {
941 rc = omap_dma_chan_init(od, i);
948 rc = dma_async_device_register(&od->ddev);
950 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
956 platform_set_drvdata(pdev, od);
958 if (pdev->dev.of_node) {
959 omap_dma_info.dma_cap = od->ddev.cap_mask;
961 /* Device-tree DMA controller registration */
962 rc = of_dma_controller_register(pdev->dev.of_node,
963 of_dma_simple_xlate, &omap_dma_info);
965 pr_warn("OMAP-DMA: failed to register DMA controller\n");
966 dma_async_device_unregister(&od->ddev);
971 dev_info(&pdev->dev, "OMAP DMA engine driver\n");
976 static int omap_dma_remove(struct platform_device *pdev)
978 struct omap_dmadev *od = platform_get_drvdata(pdev);
980 if (pdev->dev.of_node)
981 of_dma_controller_free(pdev->dev.of_node);
983 dma_async_device_unregister(&od->ddev);
989 static const struct of_device_id omap_dma_match[] = {
990 { .compatible = "ti,omap2420-sdma", },
991 { .compatible = "ti,omap2430-sdma", },
992 { .compatible = "ti,omap3430-sdma", },
993 { .compatible = "ti,omap3630-sdma", },
994 { .compatible = "ti,omap4430-sdma", },
997 MODULE_DEVICE_TABLE(of, omap_dma_match);
999 static struct platform_driver omap_dma_driver = {
1000 .probe = omap_dma_probe,
1001 .remove = omap_dma_remove,
1003 .name = "omap-dma-engine",
1004 .owner = THIS_MODULE,
1005 .of_match_table = of_match_ptr(omap_dma_match),
1009 bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
1011 if (chan->device->dev->driver == &omap_dma_driver.driver) {
1012 struct omap_chan *c = to_omap_dma_chan(chan);
1013 unsigned req = *(unsigned *)param;
1015 return req == c->dma_sig;
1019 EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
1021 static int omap_dma_init(void)
1023 return platform_driver_register(&omap_dma_driver);
1025 subsys_initcall(omap_dma_init);
1027 static void __exit omap_dma_exit(void)
1029 platform_driver_unregister(&omap_dma_driver);
1031 module_exit(omap_dma_exit);
1033 MODULE_AUTHOR("Russell King");
1034 MODULE_LICENSE("GPL");