2 * OMAP DMAengine support
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 #include <linux/delay.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/omap-dma.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/of_dma.h>
22 #include <linux/of_device.h>
26 #define OMAP_SDMA_REQUESTS 127
27 #define OMAP_SDMA_CHANNELS 32
30 struct dma_device ddev;
33 const struct omap_dma_reg *reg_map;
34 struct omap_system_dma_plat_info *plat;
37 struct dma_pool *desc_pool;
38 unsigned dma_requests;
40 uint32_t irq_enable_mask;
41 struct omap_chan **lch_map;
45 struct virt_dma_chan vc;
46 void __iomem *channel_base;
47 const struct omap_dma_reg *reg_map;
50 struct dma_slave_config cfg;
57 struct omap_desc *desc;
61 #define DESC_NXT_SV_REFRESH (0x1 << 24)
62 #define DESC_NXT_SV_REUSE (0x2 << 24)
63 #define DESC_NXT_DV_REFRESH (0x1 << 26)
64 #define DESC_NXT_DV_REUSE (0x2 << 26)
65 #define DESC_NTYPE_TYPE2 (0x2 << 29)
67 /* Type 2 descriptor with Source or Destination address update */
68 struct omap_type2_desc {
71 uint32_t addr; /* src or dst */
82 uint32_t en; /* number of elements (24-bit) */
83 uint32_t fn; /* number of frames (16-bit) */
84 int32_t fi; /* for double indexing */
85 int16_t ei; /* for double indexing */
88 struct omap_type2_desc *t2_desc;
89 dma_addr_t t2_desc_paddr;
93 struct virt_dma_desc vd;
95 enum dma_transfer_direction dir;
98 int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */
99 int16_t ei; /* for double indexing */
100 uint8_t es; /* CSDP_DATA_TYPE_xxx */
101 uint32_t ccr; /* CCR value */
102 uint16_t clnk_ctrl; /* CLNK_CTRL value */
103 uint16_t cicr; /* CICR value */
104 uint32_t csdp; /* CSDP value */
107 struct omap_sg sg[0];
111 CAPS_0_SUPPORT_LL123 = BIT(20), /* Linked List type1/2/3 */
112 CAPS_0_SUPPORT_LL4 = BIT(21), /* Linked List type4 */
115 CCR_READ_PRIORITY = BIT(6),
117 CCR_AUTO_INIT = BIT(8), /* OMAP1 only */
118 CCR_REPEAT = BIT(9), /* OMAP1 only */
119 CCR_OMAP31_DISABLE = BIT(10), /* OMAP1 only */
120 CCR_SUSPEND_SENSITIVE = BIT(8), /* OMAP2+ only */
121 CCR_RD_ACTIVE = BIT(9), /* OMAP2+ only */
122 CCR_WR_ACTIVE = BIT(10), /* OMAP2+ only */
123 CCR_SRC_AMODE_CONSTANT = 0 << 12,
124 CCR_SRC_AMODE_POSTINC = 1 << 12,
125 CCR_SRC_AMODE_SGLIDX = 2 << 12,
126 CCR_SRC_AMODE_DBLIDX = 3 << 12,
127 CCR_DST_AMODE_CONSTANT = 0 << 14,
128 CCR_DST_AMODE_POSTINC = 1 << 14,
129 CCR_DST_AMODE_SGLIDX = 2 << 14,
130 CCR_DST_AMODE_DBLIDX = 3 << 14,
131 CCR_CONSTANT_FILL = BIT(16),
132 CCR_TRANSPARENT_COPY = BIT(17),
134 CCR_SUPERVISOR = BIT(22),
135 CCR_PREFETCH = BIT(23),
136 CCR_TRIGGER_SRC = BIT(24),
137 CCR_BUFFERING_DISABLE = BIT(25),
138 CCR_WRITE_PRIORITY = BIT(26),
139 CCR_SYNC_ELEMENT = 0,
140 CCR_SYNC_FRAME = CCR_FS,
141 CCR_SYNC_BLOCK = CCR_BS,
142 CCR_SYNC_PACKET = CCR_BS | CCR_FS,
144 CSDP_DATA_TYPE_8 = 0,
145 CSDP_DATA_TYPE_16 = 1,
146 CSDP_DATA_TYPE_32 = 2,
147 CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */
148 CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */
149 CSDP_SRC_PORT_OCP_T1 = 2 << 2, /* OMAP1 only */
150 CSDP_SRC_PORT_TIPB = 3 << 2, /* OMAP1 only */
151 CSDP_SRC_PORT_OCP_T2 = 4 << 2, /* OMAP1 only */
152 CSDP_SRC_PORT_MPUI = 5 << 2, /* OMAP1 only */
153 CSDP_SRC_PACKED = BIT(6),
154 CSDP_SRC_BURST_1 = 0 << 7,
155 CSDP_SRC_BURST_16 = 1 << 7,
156 CSDP_SRC_BURST_32 = 2 << 7,
157 CSDP_SRC_BURST_64 = 3 << 7,
158 CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */
159 CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */
160 CSDP_DST_PORT_OCP_T1 = 2 << 9, /* OMAP1 only */
161 CSDP_DST_PORT_TIPB = 3 << 9, /* OMAP1 only */
162 CSDP_DST_PORT_OCP_T2 = 4 << 9, /* OMAP1 only */
163 CSDP_DST_PORT_MPUI = 5 << 9, /* OMAP1 only */
164 CSDP_DST_PACKED = BIT(13),
165 CSDP_DST_BURST_1 = 0 << 14,
166 CSDP_DST_BURST_16 = 1 << 14,
167 CSDP_DST_BURST_32 = 2 << 14,
168 CSDP_DST_BURST_64 = 3 << 14,
170 CICR_TOUT_IE = BIT(0), /* OMAP1 only */
171 CICR_DROP_IE = BIT(1),
172 CICR_HALF_IE = BIT(2),
173 CICR_FRAME_IE = BIT(3),
174 CICR_LAST_IE = BIT(4),
175 CICR_BLOCK_IE = BIT(5),
176 CICR_PKT_IE = BIT(7), /* OMAP2+ only */
177 CICR_TRANS_ERR_IE = BIT(8), /* OMAP2+ only */
178 CICR_SUPERVISOR_ERR_IE = BIT(10), /* OMAP2+ only */
179 CICR_MISALIGNED_ERR_IE = BIT(11), /* OMAP2+ only */
180 CICR_DRAIN_IE = BIT(12), /* OMAP2+ only */
181 CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */
183 CLNK_CTRL_ENABLE_LNK = BIT(15),
185 CDP_DST_VALID_INC = 0 << 0,
186 CDP_DST_VALID_RELOAD = 1 << 0,
187 CDP_DST_VALID_REUSE = 2 << 0,
188 CDP_SRC_VALID_INC = 0 << 2,
189 CDP_SRC_VALID_RELOAD = 1 << 2,
190 CDP_SRC_VALID_REUSE = 2 << 2,
191 CDP_NTYPE_TYPE1 = 1 << 4,
192 CDP_NTYPE_TYPE2 = 2 << 4,
193 CDP_NTYPE_TYPE3 = 3 << 4,
194 CDP_TMODE_NORMAL = 0 << 8,
195 CDP_TMODE_LLIST = 1 << 8,
199 static const unsigned es_bytes[] = {
200 [CSDP_DATA_TYPE_8] = 1,
201 [CSDP_DATA_TYPE_16] = 2,
202 [CSDP_DATA_TYPE_32] = 4,
205 static struct of_dma_filter_info omap_dma_info = {
206 .filter_fn = omap_dma_filter_fn,
209 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
211 return container_of(d, struct omap_dmadev, ddev);
214 static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
216 return container_of(c, struct omap_chan, vc.chan);
219 static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
221 return container_of(t, struct omap_desc, vd.tx);
224 static void omap_dma_desc_free(struct virt_dma_desc *vd)
226 struct omap_desc *d = to_omap_dma_desc(&vd->tx);
229 struct omap_dmadev *od = to_omap_dma_dev(vd->tx.chan->device);
232 for (i = 0; i < d->sglen; i++) {
233 if (d->sg[i].t2_desc)
234 dma_pool_free(od->desc_pool, d->sg[i].t2_desc,
235 d->sg[i].t2_desc_paddr);
242 static void omap_dma_fill_type2_desc(struct omap_desc *d, int idx,
243 enum dma_transfer_direction dir, bool last)
245 struct omap_sg *sg = &d->sg[idx];
246 struct omap_type2_desc *t2_desc = sg->t2_desc;
249 d->sg[idx - 1].t2_desc->next_desc = sg->t2_desc_paddr;
251 t2_desc->next_desc = 0xfffffffc;
253 t2_desc->en = sg->en;
254 t2_desc->addr = sg->addr;
255 t2_desc->fn = sg->fn & 0xffff;
256 t2_desc->cicr = d->cicr;
258 t2_desc->cicr &= ~CICR_BLOCK_IE;
262 t2_desc->cdei = sg->ei;
263 t2_desc->csei = d->ei;
264 t2_desc->cdfi = sg->fi;
265 t2_desc->csfi = d->fi;
267 t2_desc->en |= DESC_NXT_DV_REFRESH;
268 t2_desc->en |= DESC_NXT_SV_REUSE;
271 t2_desc->cdei = d->ei;
272 t2_desc->csei = sg->ei;
273 t2_desc->cdfi = d->fi;
274 t2_desc->csfi = sg->fi;
276 t2_desc->en |= DESC_NXT_SV_REFRESH;
277 t2_desc->en |= DESC_NXT_DV_REUSE;
283 t2_desc->en |= DESC_NTYPE_TYPE2;
286 static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr)
289 case OMAP_DMA_REG_16BIT:
290 writew_relaxed(val, addr);
292 case OMAP_DMA_REG_2X16BIT:
293 writew_relaxed(val, addr);
294 writew_relaxed(val >> 16, addr + 2);
296 case OMAP_DMA_REG_32BIT:
297 writel_relaxed(val, addr);
304 static unsigned omap_dma_read(unsigned type, void __iomem *addr)
309 case OMAP_DMA_REG_16BIT:
310 val = readw_relaxed(addr);
312 case OMAP_DMA_REG_2X16BIT:
313 val = readw_relaxed(addr);
314 val |= readw_relaxed(addr + 2) << 16;
316 case OMAP_DMA_REG_32BIT:
317 val = readl_relaxed(addr);
327 static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val)
329 const struct omap_dma_reg *r = od->reg_map + reg;
333 omap_dma_write(val, r->type, od->base + r->offset);
336 static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg)
338 const struct omap_dma_reg *r = od->reg_map + reg;
342 return omap_dma_read(r->type, od->base + r->offset);
345 static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val)
347 const struct omap_dma_reg *r = c->reg_map + reg;
349 omap_dma_write(val, r->type, c->channel_base + r->offset);
352 static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg)
354 const struct omap_dma_reg *r = c->reg_map + reg;
356 return omap_dma_read(r->type, c->channel_base + r->offset);
359 static void omap_dma_clear_csr(struct omap_chan *c)
362 omap_dma_chan_read(c, CSR);
364 omap_dma_chan_write(c, CSR, ~0);
367 static unsigned omap_dma_get_csr(struct omap_chan *c)
369 unsigned val = omap_dma_chan_read(c, CSR);
372 omap_dma_chan_write(c, CSR, val);
377 static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
380 c->channel_base = od->base + od->plat->channel_stride * lch;
382 od->lch_map[lch] = c;
385 static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
387 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
388 uint16_t cicr = d->cicr;
390 if (__dma_omap15xx(od->plat->dma_attr))
391 omap_dma_chan_write(c, CPC, 0);
393 omap_dma_chan_write(c, CDAC, 0);
395 omap_dma_clear_csr(c);
398 uint32_t cdp = CDP_TMODE_LLIST | CDP_NTYPE_TYPE2 | CDP_FAST;
400 if (d->dir == DMA_DEV_TO_MEM)
401 cdp |= (CDP_DST_VALID_RELOAD | CDP_SRC_VALID_REUSE);
403 cdp |= (CDP_DST_VALID_REUSE | CDP_SRC_VALID_RELOAD);
404 omap_dma_chan_write(c, CDP, cdp);
406 omap_dma_chan_write(c, CNDP, d->sg[0].t2_desc_paddr);
407 omap_dma_chan_write(c, CCDN, 0);
408 omap_dma_chan_write(c, CCFN, 0xffff);
409 omap_dma_chan_write(c, CCEN, 0xffffff);
411 cicr &= ~CICR_BLOCK_IE;
412 } else if (od->ll123_supported) {
413 omap_dma_chan_write(c, CDP, 0);
416 /* Enable interrupts */
417 omap_dma_chan_write(c, CICR, cicr);
420 omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
425 static void omap_dma_drain_chan(struct omap_chan *c)
430 /* Wait for sDMA FIFO to drain */
432 val = omap_dma_chan_read(c, CCR);
433 if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
442 if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
443 dev_err(c->vc.chan.device->dev,
444 "DMA drain did not complete on lch %d\n",
448 static int omap_dma_stop(struct omap_chan *c)
450 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
454 omap_dma_chan_write(c, CICR, 0);
456 omap_dma_clear_csr(c);
458 val = omap_dma_chan_read(c, CCR);
459 if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
462 sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
463 val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
464 val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
465 omap_dma_glbl_write(od, OCP_SYSCONFIG, val);
467 val = omap_dma_chan_read(c, CCR);
469 omap_dma_chan_write(c, CCR, val);
471 if (!(c->ccr & CCR_BUFFERING_DISABLE))
472 omap_dma_drain_chan(c);
474 omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
476 if (!(val & CCR_ENABLE))
480 omap_dma_chan_write(c, CCR, val);
482 if (!(c->ccr & CCR_BUFFERING_DISABLE))
483 omap_dma_drain_chan(c);
488 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
489 val = omap_dma_chan_read(c, CLNK_CTRL);
492 val |= 1 << 14; /* set the STOP_LNK bit */
494 val &= ~CLNK_CTRL_ENABLE_LNK;
496 omap_dma_chan_write(c, CLNK_CTRL, val);
502 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d)
504 struct omap_sg *sg = d->sg + c->sgidx;
505 unsigned cxsa, cxei, cxfi;
507 if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
517 omap_dma_chan_write(c, cxsa, sg->addr);
518 omap_dma_chan_write(c, cxei, sg->ei);
519 omap_dma_chan_write(c, cxfi, sg->fi);
520 omap_dma_chan_write(c, CEN, sg->en);
521 omap_dma_chan_write(c, CFN, sg->fn);
523 omap_dma_start(c, d);
527 static void omap_dma_start_desc(struct omap_chan *c)
529 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
531 unsigned cxsa, cxei, cxfi;
540 c->desc = d = to_omap_dma_desc(&vd->tx);
544 * This provides the necessary barrier to ensure data held in
545 * DMA coherent memory is visible to the DMA engine prior to
546 * the transfer starting.
550 omap_dma_chan_write(c, CCR, d->ccr);
552 omap_dma_chan_write(c, CCR2, d->ccr >> 16);
554 if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
564 omap_dma_chan_write(c, cxsa, d->dev_addr);
565 omap_dma_chan_write(c, cxei, d->ei);
566 omap_dma_chan_write(c, cxfi, d->fi);
567 omap_dma_chan_write(c, CSDP, d->csdp);
568 omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
570 omap_dma_start_sg(c, d);
573 static void omap_dma_callback(int ch, u16 status, void *data)
575 struct omap_chan *c = data;
579 spin_lock_irqsave(&c->vc.lock, flags);
583 vchan_cyclic_callback(&d->vd);
584 } else if (d->using_ll || c->sgidx == d->sglen) {
585 omap_dma_start_desc(c);
586 vchan_cookie_complete(&d->vd);
588 omap_dma_start_sg(c, d);
591 spin_unlock_irqrestore(&c->vc.lock, flags);
594 static irqreturn_t omap_dma_irq(int irq, void *devid)
596 struct omap_dmadev *od = devid;
597 unsigned status, channel;
599 spin_lock(&od->irq_lock);
601 status = omap_dma_glbl_read(od, IRQSTATUS_L1);
602 status &= od->irq_enable_mask;
604 spin_unlock(&od->irq_lock);
608 while ((channel = ffs(status)) != 0) {
616 c = od->lch_map[channel];
618 /* This should never happen */
619 dev_err(od->ddev.dev, "invalid channel %u\n", channel);
623 csr = omap_dma_get_csr(c);
624 omap_dma_glbl_write(od, IRQSTATUS_L1, mask);
626 omap_dma_callback(channel, csr, c);
629 spin_unlock(&od->irq_lock);
634 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
636 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
637 struct omap_chan *c = to_omap_dma_chan(chan);
638 struct device *dev = od->ddev.dev;
642 ret = omap_request_dma(c->dma_sig, "DMA engine",
643 omap_dma_callback, c, &c->dma_ch);
645 ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL,
649 dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig);
652 omap_dma_assign(od, c, c->dma_ch);
657 spin_lock_irq(&od->irq_lock);
658 val = BIT(c->dma_ch);
659 omap_dma_glbl_write(od, IRQSTATUS_L1, val);
660 od->irq_enable_mask |= val;
661 omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
663 val = omap_dma_glbl_read(od, IRQENABLE_L0);
664 val &= ~BIT(c->dma_ch);
665 omap_dma_glbl_write(od, IRQENABLE_L0, val);
666 spin_unlock_irq(&od->irq_lock);
671 if (__dma_omap16xx(od->plat->dma_attr)) {
672 c->ccr = CCR_OMAP31_DISABLE;
673 /* Duplicate what plat-omap/dma.c does */
674 c->ccr |= c->dma_ch + 1;
676 c->ccr = c->dma_sig & 0x1f;
679 c->ccr = c->dma_sig & 0x1f;
680 c->ccr |= (c->dma_sig & ~0x1f) << 14;
682 if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
683 c->ccr |= CCR_BUFFERING_DISABLE;
688 static void omap_dma_free_chan_resources(struct dma_chan *chan)
690 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
691 struct omap_chan *c = to_omap_dma_chan(chan);
694 spin_lock_irq(&od->irq_lock);
695 od->irq_enable_mask &= ~BIT(c->dma_ch);
696 omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
697 spin_unlock_irq(&od->irq_lock);
700 c->channel_base = NULL;
701 od->lch_map[c->dma_ch] = NULL;
702 vchan_free_chan_resources(&c->vc);
703 omap_free_dma(c->dma_ch);
705 dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch,
710 static size_t omap_dma_sg_size(struct omap_sg *sg)
712 return sg->en * sg->fn;
715 static size_t omap_dma_desc_size(struct omap_desc *d)
720 for (size = i = 0; i < d->sglen; i++)
721 size += omap_dma_sg_size(&d->sg[i]);
723 return size * es_bytes[d->es];
726 static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
729 size_t size, es_size = es_bytes[d->es];
731 for (size = i = 0; i < d->sglen; i++) {
732 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
736 else if (addr >= d->sg[i].addr &&
737 addr < d->sg[i].addr + this_size)
738 size += d->sg[i].addr + this_size - addr;
744 * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
745 * read before the DMA controller finished disabling the channel.
747 static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg)
749 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
752 val = omap_dma_chan_read(c, reg);
753 if (val == 0 && od->plat->errata & DMA_ERRATA_3_3)
754 val = omap_dma_chan_read(c, reg);
759 static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
761 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
762 dma_addr_t addr, cdac;
764 if (__dma_omap15xx(od->plat->dma_attr)) {
765 addr = omap_dma_chan_read(c, CPC);
767 addr = omap_dma_chan_read_3_3(c, CSAC);
768 cdac = omap_dma_chan_read_3_3(c, CDAC);
771 * CDAC == 0 indicates that the DMA transfer on the channel has
772 * not been started (no data has been transferred so far).
773 * Return the programmed source start address in this case.
776 addr = omap_dma_chan_read(c, CSSA);
780 addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000;
785 static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
787 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
790 if (__dma_omap15xx(od->plat->dma_attr)) {
791 addr = omap_dma_chan_read(c, CPC);
793 addr = omap_dma_chan_read_3_3(c, CDAC);
796 * CDAC == 0 indicates that the DMA transfer on the channel
797 * has not been started (no data has been transferred so
798 * far). Return the programmed destination start address in
802 addr = omap_dma_chan_read(c, CDSA);
806 addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000;
811 static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
812 dma_cookie_t cookie, struct dma_tx_state *txstate)
814 struct omap_chan *c = to_omap_dma_chan(chan);
815 struct virt_dma_desc *vd;
819 ret = dma_cookie_status(chan, cookie, txstate);
821 if (!c->paused && c->running) {
822 uint32_t ccr = omap_dma_chan_read(c, CCR);
824 * The channel is no longer active, set the return value
827 if (!(ccr & CCR_ENABLE))
831 if (ret == DMA_COMPLETE || !txstate)
834 spin_lock_irqsave(&c->vc.lock, flags);
835 vd = vchan_find_desc(&c->vc, cookie);
837 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
838 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
839 struct omap_desc *d = c->desc;
842 if (d->dir == DMA_MEM_TO_DEV)
843 pos = omap_dma_get_src_pos(c);
844 else if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM)
845 pos = omap_dma_get_dst_pos(c);
849 txstate->residue = omap_dma_desc_size_pos(d, pos);
851 txstate->residue = 0;
853 if (ret == DMA_IN_PROGRESS && c->paused)
855 spin_unlock_irqrestore(&c->vc.lock, flags);
860 static void omap_dma_issue_pending(struct dma_chan *chan)
862 struct omap_chan *c = to_omap_dma_chan(chan);
865 spin_lock_irqsave(&c->vc.lock, flags);
866 if (vchan_issue_pending(&c->vc) && !c->desc)
867 omap_dma_start_desc(c);
868 spin_unlock_irqrestore(&c->vc.lock, flags);
871 static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
872 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
873 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
875 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
876 struct omap_chan *c = to_omap_dma_chan(chan);
877 enum dma_slave_buswidth dev_width;
878 struct scatterlist *sgent;
881 unsigned i, es, en, frame_bytes;
882 bool ll_failed = false;
885 if (dir == DMA_DEV_TO_MEM) {
886 dev_addr = c->cfg.src_addr;
887 dev_width = c->cfg.src_addr_width;
888 burst = c->cfg.src_maxburst;
889 } else if (dir == DMA_MEM_TO_DEV) {
890 dev_addr = c->cfg.dst_addr;
891 dev_width = c->cfg.dst_addr_width;
892 burst = c->cfg.dst_maxburst;
894 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
898 /* Bus width translates to the element size (ES) */
900 case DMA_SLAVE_BUSWIDTH_1_BYTE:
901 es = CSDP_DATA_TYPE_8;
903 case DMA_SLAVE_BUSWIDTH_2_BYTES:
904 es = CSDP_DATA_TYPE_16;
906 case DMA_SLAVE_BUSWIDTH_4_BYTES:
907 es = CSDP_DATA_TYPE_32;
909 default: /* not reached */
913 /* Now allocate and setup the descriptor. */
914 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
919 d->dev_addr = dev_addr;
922 d->ccr = c->ccr | CCR_SYNC_FRAME;
923 if (dir == DMA_DEV_TO_MEM) {
924 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
925 d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
927 d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
928 d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
931 d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
935 d->cicr |= CICR_TOUT_IE;
937 if (dir == DMA_DEV_TO_MEM)
938 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB;
940 d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF;
942 if (dir == DMA_DEV_TO_MEM)
943 d->ccr |= CCR_TRIGGER_SRC;
945 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
947 if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
948 d->clnk_ctrl = c->dma_ch;
951 * Build our scatterlist entries: each contains the address,
952 * the number of elements (EN) in each frame, and the number of
953 * frames (FN). Number of bytes for this entry = ES * EN * FN.
955 * Burst size translates to number of elements with frame sync.
956 * Note: DMA engine defines burst to be the number of dev-width
960 frame_bytes = es_bytes[es] * en;
963 d->using_ll = od->ll123_supported;
965 for_each_sg(sgl, sgent, sglen, i) {
966 struct omap_sg *osg = &d->sg[i];
968 osg->addr = sg_dma_address(sgent);
970 osg->fn = sg_dma_len(sgent) / frame_bytes;
973 osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
974 &osg->t2_desc_paddr);
976 dev_err(chan->device->dev,
977 "t2_desc[%d] allocation failed\n", i);
983 omap_dma_fill_type2_desc(d, i, dir, (i == sglen - 1));
989 /* Release the dma_pool entries if one allocation failed */
991 for (i = 0; i < d->sglen; i++) {
992 struct omap_sg *osg = &d->sg[i];
995 dma_pool_free(od->desc_pool, osg->t2_desc,
1002 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
1005 static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
1006 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1007 size_t period_len, enum dma_transfer_direction dir, unsigned long flags)
1009 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1010 struct omap_chan *c = to_omap_dma_chan(chan);
1011 enum dma_slave_buswidth dev_width;
1012 struct omap_desc *d;
1013 dma_addr_t dev_addr;
1017 if (dir == DMA_DEV_TO_MEM) {
1018 dev_addr = c->cfg.src_addr;
1019 dev_width = c->cfg.src_addr_width;
1020 burst = c->cfg.src_maxburst;
1021 } else if (dir == DMA_MEM_TO_DEV) {
1022 dev_addr = c->cfg.dst_addr;
1023 dev_width = c->cfg.dst_addr_width;
1024 burst = c->cfg.dst_maxburst;
1026 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
1030 /* Bus width translates to the element size (ES) */
1031 switch (dev_width) {
1032 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1033 es = CSDP_DATA_TYPE_8;
1035 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1036 es = CSDP_DATA_TYPE_16;
1038 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1039 es = CSDP_DATA_TYPE_32;
1041 default: /* not reached */
1045 /* Now allocate and setup the descriptor. */
1046 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
1051 d->dev_addr = dev_addr;
1054 d->sg[0].addr = buf_addr;
1055 d->sg[0].en = period_len / es_bytes[es];
1056 d->sg[0].fn = buf_len / period_len;
1060 if (dir == DMA_DEV_TO_MEM)
1061 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
1063 d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
1065 d->cicr = CICR_DROP_IE;
1066 if (flags & DMA_PREP_INTERRUPT)
1067 d->cicr |= CICR_FRAME_IE;
1072 d->cicr |= CICR_TOUT_IE;
1074 if (dir == DMA_DEV_TO_MEM)
1075 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI;
1077 d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF;
1080 d->ccr |= CCR_SYNC_PACKET;
1082 d->ccr |= CCR_SYNC_ELEMENT;
1084 if (dir == DMA_DEV_TO_MEM) {
1085 d->ccr |= CCR_TRIGGER_SRC;
1086 d->csdp |= CSDP_DST_PACKED;
1088 d->csdp |= CSDP_SRC_PACKED;
1091 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1093 d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
1096 if (__dma_omap15xx(od->plat->dma_attr))
1097 d->ccr |= CCR_AUTO_INIT | CCR_REPEAT;
1099 d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK;
1103 return vchan_tx_prep(&c->vc, &d->vd, flags);
1106 static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
1107 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1108 size_t len, unsigned long tx_flags)
1110 struct omap_chan *c = to_omap_dma_chan(chan);
1111 struct omap_desc *d;
1114 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
1118 data_type = __ffs((src | dest | len));
1119 if (data_type > CSDP_DATA_TYPE_32)
1120 data_type = CSDP_DATA_TYPE_32;
1122 d->dir = DMA_MEM_TO_MEM;
1126 d->sg[0].en = len / BIT(data_type);
1128 d->sg[0].addr = dest;
1131 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
1133 d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
1135 d->csdp = data_type;
1138 d->cicr |= CICR_TOUT_IE;
1139 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
1141 d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
1142 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1143 d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
1146 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
1149 static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
1150 struct dma_chan *chan, struct dma_interleaved_template *xt,
1151 unsigned long flags)
1153 struct omap_chan *c = to_omap_dma_chan(chan);
1154 struct omap_desc *d;
1157 size_t src_icg, dst_icg;
1159 /* Slave mode is not supported */
1160 if (is_slave_direction(xt->dir))
1163 if (xt->frame_size != 1 || xt->numf == 0)
1166 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
1170 data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size));
1171 if (data_type > CSDP_DATA_TYPE_32)
1172 data_type = CSDP_DATA_TYPE_32;
1175 d->dir = DMA_MEM_TO_MEM;
1176 d->dev_addr = xt->src_start;
1178 sg->en = xt->sgl[0].size / BIT(data_type);
1180 sg->addr = xt->dst_start;
1184 src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
1185 dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
1187 d->ccr |= CCR_SRC_AMODE_DBLIDX;
1190 } else if (xt->src_inc) {
1191 d->ccr |= CCR_SRC_AMODE_POSTINC;
1194 dev_err(chan->device->dev,
1195 "%s: SRC constant addressing is not supported\n",
1202 d->ccr |= CCR_DST_AMODE_DBLIDX;
1205 } else if (xt->dst_inc) {
1206 d->ccr |= CCR_DST_AMODE_POSTINC;
1209 dev_err(chan->device->dev,
1210 "%s: DST constant addressing is not supported\n",
1216 d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
1218 d->csdp = data_type;
1221 d->cicr |= CICR_TOUT_IE;
1222 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
1224 d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
1225 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1226 d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
1229 return vchan_tx_prep(&c->vc, &d->vd, flags);
1232 static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
1234 struct omap_chan *c = to_omap_dma_chan(chan);
1236 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1237 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1240 memcpy(&c->cfg, cfg, sizeof(c->cfg));
1245 static int omap_dma_terminate_all(struct dma_chan *chan)
1247 struct omap_chan *c = to_omap_dma_chan(chan);
1248 unsigned long flags;
1251 spin_lock_irqsave(&c->vc.lock, flags);
1254 * Stop DMA activity: we assume the callback will not be called
1255 * after omap_dma_stop() returns (even if it does, it will see
1256 * c->desc is NULL and exit.)
1259 omap_dma_desc_free(&c->desc->vd);
1261 /* Avoid stopping the dma twice */
1269 vchan_get_all_descriptors(&c->vc, &head);
1270 spin_unlock_irqrestore(&c->vc.lock, flags);
1271 vchan_dma_desc_free_list(&c->vc, &head);
1276 static void omap_dma_synchronize(struct dma_chan *chan)
1278 struct omap_chan *c = to_omap_dma_chan(chan);
1280 vchan_synchronize(&c->vc);
1283 static int omap_dma_pause(struct dma_chan *chan)
1285 struct omap_chan *c = to_omap_dma_chan(chan);
1286 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1287 unsigned long flags;
1289 bool can_pause = false;
1291 spin_lock_irqsave(&od->irq_lock, flags);
1300 * We do not allow DMA_MEM_TO_DEV transfers to be paused.
1301 * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
1302 * "When a channel is disabled during a transfer, the channel undergoes
1303 * an abort, unless it is hardware-source-synchronized …".
1304 * A source-synchronised channel is one where the fetching of data is
1305 * under control of the device. In other words, a device-to-memory
1306 * transfer. So, a destination-synchronised channel (which would be a
1307 * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
1309 * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
1310 * aborts immediately after completion of current read/write
1311 * transactions and then the FIFO is cleaned up." The term "cleaned up"
1312 * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
1313 * are both clear _before_ disabling the channel, otherwise data loss
1315 * The problem is that if the channel is active, then device activity
1316 * can result in DMA activity starting between reading those as both
1317 * clear and the write to DMA_CCR to clear the enable bit hitting the
1318 * hardware. If the DMA hardware can't drain the data in its FIFO to the
1319 * destination, then data loss "might" occur (say if we write to an UART
1320 * and the UART is not accepting any further data).
1322 else if (c->desc->dir == DMA_DEV_TO_MEM)
1325 if (can_pause && !c->paused) {
1326 ret = omap_dma_stop(c);
1331 spin_unlock_irqrestore(&od->irq_lock, flags);
1336 static int omap_dma_resume(struct dma_chan *chan)
1338 struct omap_chan *c = to_omap_dma_chan(chan);
1339 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1340 unsigned long flags;
1343 spin_lock_irqsave(&od->irq_lock, flags);
1345 if (c->paused && c->desc) {
1348 /* Restore channel link register */
1349 omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl);
1351 omap_dma_start(c, c->desc);
1355 spin_unlock_irqrestore(&od->irq_lock, flags);
1360 static int omap_dma_chan_init(struct omap_dmadev *od)
1362 struct omap_chan *c;
1364 c = kzalloc(sizeof(*c), GFP_KERNEL);
1368 c->reg_map = od->reg_map;
1369 c->vc.desc_free = omap_dma_desc_free;
1370 vchan_init(&c->vc, &od->ddev);
1375 static void omap_dma_free(struct omap_dmadev *od)
1377 while (!list_empty(&od->ddev.channels)) {
1378 struct omap_chan *c = list_first_entry(&od->ddev.channels,
1379 struct omap_chan, vc.chan.device_node);
1381 list_del(&c->vc.chan.device_node);
1382 tasklet_kill(&c->vc.task);
1387 #define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1388 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1389 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1391 static int omap_dma_probe(struct platform_device *pdev)
1393 struct omap_dmadev *od;
1394 struct resource *res;
1397 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
1401 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1402 od->base = devm_ioremap_resource(&pdev->dev, res);
1403 if (IS_ERR(od->base))
1404 return PTR_ERR(od->base);
1406 od->plat = omap_get_plat_info();
1408 return -EPROBE_DEFER;
1410 od->reg_map = od->plat->reg_map;
1412 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
1413 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
1414 dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
1415 dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask);
1416 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
1417 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
1418 od->ddev.device_tx_status = omap_dma_tx_status;
1419 od->ddev.device_issue_pending = omap_dma_issue_pending;
1420 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
1421 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
1422 od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy;
1423 od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved;
1424 od->ddev.device_config = omap_dma_slave_config;
1425 od->ddev.device_pause = omap_dma_pause;
1426 od->ddev.device_resume = omap_dma_resume;
1427 od->ddev.device_terminate_all = omap_dma_terminate_all;
1428 od->ddev.device_synchronize = omap_dma_synchronize;
1429 od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
1430 od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
1431 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1432 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1433 od->ddev.dev = &pdev->dev;
1434 INIT_LIST_HEAD(&od->ddev.channels);
1435 spin_lock_init(&od->lock);
1436 spin_lock_init(&od->irq_lock);
1438 if (!pdev->dev.of_node) {
1439 od->dma_requests = od->plat->dma_attr->lch_count;
1440 if (unlikely(!od->dma_requests))
1441 od->dma_requests = OMAP_SDMA_REQUESTS;
1442 } else if (of_property_read_u32(pdev->dev.of_node, "dma-requests",
1443 &od->dma_requests)) {
1444 dev_info(&pdev->dev,
1445 "Missing dma-requests property, using %u.\n",
1446 OMAP_SDMA_REQUESTS);
1447 od->dma_requests = OMAP_SDMA_REQUESTS;
1450 od->lch_map = devm_kcalloc(&pdev->dev, od->dma_requests,
1451 sizeof(*od->lch_map), GFP_KERNEL);
1455 for (i = 0; i < od->dma_requests; i++) {
1456 rc = omap_dma_chan_init(od);
1463 irq = platform_get_irq(pdev, 1);
1465 dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq);
1468 /* Disable all interrupts */
1469 od->irq_enable_mask = 0;
1470 omap_dma_glbl_write(od, IRQENABLE_L1, 0);
1472 rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
1473 IRQF_SHARED, "omap-dma-engine", od);
1478 if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
1479 od->ll123_supported = true;
1481 od->ddev.filter.map = od->plat->slave_map;
1482 od->ddev.filter.mapcnt = od->plat->slavecnt;
1483 od->ddev.filter.fn = omap_dma_filter_fn;
1485 if (od->ll123_supported) {
1486 od->desc_pool = dma_pool_create(dev_name(&pdev->dev),
1488 sizeof(struct omap_type2_desc),
1490 if (!od->desc_pool) {
1492 "unable to allocate descriptor pool\n");
1493 od->ll123_supported = false;
1497 rc = dma_async_device_register(&od->ddev);
1499 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
1505 platform_set_drvdata(pdev, od);
1507 if (pdev->dev.of_node) {
1508 omap_dma_info.dma_cap = od->ddev.cap_mask;
1510 /* Device-tree DMA controller registration */
1511 rc = of_dma_controller_register(pdev->dev.of_node,
1512 of_dma_simple_xlate, &omap_dma_info);
1514 pr_warn("OMAP-DMA: failed to register DMA controller\n");
1515 dma_async_device_unregister(&od->ddev);
1520 dev_info(&pdev->dev, "OMAP DMA engine driver%s\n",
1521 od->ll123_supported ? " (LinkedList1/2/3 supported)" : "");
1526 static int omap_dma_remove(struct platform_device *pdev)
1528 struct omap_dmadev *od = platform_get_drvdata(pdev);
1531 if (pdev->dev.of_node)
1532 of_dma_controller_free(pdev->dev.of_node);
1534 irq = platform_get_irq(pdev, 1);
1535 devm_free_irq(&pdev->dev, irq, od);
1537 dma_async_device_unregister(&od->ddev);
1540 /* Disable all interrupts */
1541 omap_dma_glbl_write(od, IRQENABLE_L0, 0);
1544 if (od->ll123_supported)
1545 dma_pool_destroy(od->desc_pool);
1552 static const struct of_device_id omap_dma_match[] = {
1553 { .compatible = "ti,omap2420-sdma", },
1554 { .compatible = "ti,omap2430-sdma", },
1555 { .compatible = "ti,omap3430-sdma", },
1556 { .compatible = "ti,omap3630-sdma", },
1557 { .compatible = "ti,omap4430-sdma", },
1560 MODULE_DEVICE_TABLE(of, omap_dma_match);
1562 static struct platform_driver omap_dma_driver = {
1563 .probe = omap_dma_probe,
1564 .remove = omap_dma_remove,
1566 .name = "omap-dma-engine",
1567 .of_match_table = of_match_ptr(omap_dma_match),
1571 bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
1573 if (chan->device->dev->driver == &omap_dma_driver.driver) {
1574 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1575 struct omap_chan *c = to_omap_dma_chan(chan);
1576 unsigned req = *(unsigned *)param;
1578 if (req <= od->dma_requests) {
1585 EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
1587 static int omap_dma_init(void)
1589 return platform_driver_register(&omap_dma_driver);
1591 subsys_initcall(omap_dma_init);
1593 static void __exit omap_dma_exit(void)
1595 platform_driver_unregister(&omap_dma_driver);
1597 module_exit(omap_dma_exit);
1599 MODULE_AUTHOR("Russell King");
1600 MODULE_LICENSE("GPL");