dmaengine: omap-dma: consolidate clearing channel status register
[linux-2.6-block.git] / drivers / dma / omap-dma.c
1 /*
2  * OMAP DMAengine support
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 #include <linux/delay.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/err.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/omap-dma.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/of_dma.h>
21 #include <linux/of_device.h>
22
23 #include "virt-dma.h"
24
25 struct omap_dmadev {
26         struct dma_device ddev;
27         spinlock_t lock;
28         struct tasklet_struct task;
29         struct list_head pending;
30         struct omap_system_dma_plat_info *plat;
31 };
32
33 struct omap_chan {
34         struct virt_dma_chan vc;
35         struct list_head node;
36         struct omap_system_dma_plat_info *plat;
37
38         struct dma_slave_config cfg;
39         unsigned dma_sig;
40         bool cyclic;
41         bool paused;
42
43         int dma_ch;
44         struct omap_desc *desc;
45         unsigned sgidx;
46 };
47
48 struct omap_sg {
49         dma_addr_t addr;
50         uint32_t en;            /* number of elements (24-bit) */
51         uint32_t fn;            /* number of frames (16-bit) */
52 };
53
54 struct omap_desc {
55         struct virt_dma_desc vd;
56         enum dma_transfer_direction dir;
57         dma_addr_t dev_addr;
58
59         int16_t fi;             /* for OMAP_DMA_SYNC_PACKET */
60         uint8_t es;             /* CSDP_DATA_TYPE_xxx */
61         uint32_t ccr;           /* CCR value */
62         uint16_t cicr;          /* CICR value */
63         uint32_t csdp;          /* CSDP value */
64
65         unsigned sglen;
66         struct omap_sg sg[0];
67 };
68
69 enum {
70         CCR_FS                  = BIT(5),
71         CCR_READ_PRIORITY       = BIT(6),
72         CCR_ENABLE              = BIT(7),
73         CCR_AUTO_INIT           = BIT(8),       /* OMAP1 only */
74         CCR_REPEAT              = BIT(9),       /* OMAP1 only */
75         CCR_OMAP31_DISABLE      = BIT(10),      /* OMAP1 only */
76         CCR_SUSPEND_SENSITIVE   = BIT(8),       /* OMAP2+ only */
77         CCR_RD_ACTIVE           = BIT(9),       /* OMAP2+ only */
78         CCR_WR_ACTIVE           = BIT(10),      /* OMAP2+ only */
79         CCR_SRC_AMODE_CONSTANT  = 0 << 12,
80         CCR_SRC_AMODE_POSTINC   = 1 << 12,
81         CCR_SRC_AMODE_SGLIDX    = 2 << 12,
82         CCR_SRC_AMODE_DBLIDX    = 3 << 12,
83         CCR_DST_AMODE_CONSTANT  = 0 << 14,
84         CCR_DST_AMODE_POSTINC   = 1 << 14,
85         CCR_DST_AMODE_SGLIDX    = 2 << 14,
86         CCR_DST_AMODE_DBLIDX    = 3 << 14,
87         CCR_CONSTANT_FILL       = BIT(16),
88         CCR_TRANSPARENT_COPY    = BIT(17),
89         CCR_BS                  = BIT(18),
90         CCR_SUPERVISOR          = BIT(22),
91         CCR_PREFETCH            = BIT(23),
92         CCR_TRIGGER_SRC         = BIT(24),
93         CCR_BUFFERING_DISABLE   = BIT(25),
94         CCR_WRITE_PRIORITY      = BIT(26),
95         CCR_SYNC_ELEMENT        = 0,
96         CCR_SYNC_FRAME          = CCR_FS,
97         CCR_SYNC_BLOCK          = CCR_BS,
98         CCR_SYNC_PACKET         = CCR_BS | CCR_FS,
99
100         CSDP_DATA_TYPE_8        = 0,
101         CSDP_DATA_TYPE_16       = 1,
102         CSDP_DATA_TYPE_32       = 2,
103         CSDP_SRC_PORT_EMIFF     = 0 << 2, /* OMAP1 only */
104         CSDP_SRC_PORT_EMIFS     = 1 << 2, /* OMAP1 only */
105         CSDP_SRC_PORT_OCP_T1    = 2 << 2, /* OMAP1 only */
106         CSDP_SRC_PORT_TIPB      = 3 << 2, /* OMAP1 only */
107         CSDP_SRC_PORT_OCP_T2    = 4 << 2, /* OMAP1 only */
108         CSDP_SRC_PORT_MPUI      = 5 << 2, /* OMAP1 only */
109         CSDP_SRC_PACKED         = BIT(6),
110         CSDP_SRC_BURST_1        = 0 << 7,
111         CSDP_SRC_BURST_16       = 1 << 7,
112         CSDP_SRC_BURST_32       = 2 << 7,
113         CSDP_SRC_BURST_64       = 3 << 7,
114         CSDP_DST_PORT_EMIFF     = 0 << 9, /* OMAP1 only */
115         CSDP_DST_PORT_EMIFS     = 1 << 9, /* OMAP1 only */
116         CSDP_DST_PORT_OCP_T1    = 2 << 9, /* OMAP1 only */
117         CSDP_DST_PORT_TIPB      = 3 << 9, /* OMAP1 only */
118         CSDP_DST_PORT_OCP_T2    = 4 << 9, /* OMAP1 only */
119         CSDP_DST_PORT_MPUI      = 5 << 9, /* OMAP1 only */
120         CSDP_DST_PACKED         = BIT(13),
121         CSDP_DST_BURST_1        = 0 << 14,
122         CSDP_DST_BURST_16       = 1 << 14,
123         CSDP_DST_BURST_32       = 2 << 14,
124         CSDP_DST_BURST_64       = 3 << 14,
125
126         CICR_TOUT_IE            = BIT(0),       /* OMAP1 only */
127         CICR_DROP_IE            = BIT(1),
128         CICR_HALF_IE            = BIT(2),
129         CICR_FRAME_IE           = BIT(3),
130         CICR_LAST_IE            = BIT(4),
131         CICR_BLOCK_IE           = BIT(5),
132         CICR_PKT_IE             = BIT(7),       /* OMAP2+ only */
133         CICR_TRANS_ERR_IE       = BIT(8),       /* OMAP2+ only */
134         CICR_SUPERVISOR_ERR_IE  = BIT(10),      /* OMAP2+ only */
135         CICR_MISALIGNED_ERR_IE  = BIT(11),      /* OMAP2+ only */
136         CICR_DRAIN_IE           = BIT(12),      /* OMAP2+ only */
137         CICR_SUPER_BLOCK_IE     = BIT(14),      /* OMAP2+ only */
138
139         CLNK_CTRL_ENABLE_LNK    = BIT(15),
140 };
141
142 static const unsigned es_bytes[] = {
143         [CSDP_DATA_TYPE_8] = 1,
144         [CSDP_DATA_TYPE_16] = 2,
145         [CSDP_DATA_TYPE_32] = 4,
146 };
147
148 static struct of_dma_filter_info omap_dma_info = {
149         .filter_fn = omap_dma_filter_fn,
150 };
151
152 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
153 {
154         return container_of(d, struct omap_dmadev, ddev);
155 }
156
157 static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
158 {
159         return container_of(c, struct omap_chan, vc.chan);
160 }
161
162 static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
163 {
164         return container_of(t, struct omap_desc, vd.tx);
165 }
166
167 static void omap_dma_desc_free(struct virt_dma_desc *vd)
168 {
169         kfree(container_of(vd, struct omap_desc, vd));
170 }
171
172 static void omap_dma_clear_csr(struct omap_chan *c)
173 {
174         if (dma_omap1())
175                 c->plat->dma_read(CSR, c->dma_ch);
176         else
177                 c->plat->dma_write(~0, CSR, c->dma_ch);
178 }
179
180 static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
181 {
182         struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
183         uint32_t val;
184
185         if (__dma_omap15xx(od->plat->dma_attr))
186                 c->plat->dma_write(0, CPC, c->dma_ch);
187         else
188                 c->plat->dma_write(0, CDAC, c->dma_ch);
189
190         if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
191                 val = c->plat->dma_read(CLNK_CTRL, c->dma_ch);
192
193                 if (dma_omap1())
194                         val &= ~(1 << 14);
195
196                 val |= c->dma_ch | CLNK_CTRL_ENABLE_LNK;
197
198                 c->plat->dma_write(val, CLNK_CTRL, c->dma_ch);
199         } else if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
200                 c->plat->dma_write(c->dma_ch, CLNK_CTRL, c->dma_ch);
201
202         omap_dma_clear_csr(c);
203
204         /* Enable interrupts */
205         c->plat->dma_write(d->cicr, CICR, c->dma_ch);
206
207         val = c->plat->dma_read(CCR, c->dma_ch);
208         val |= CCR_ENABLE;
209         mb();
210         c->plat->dma_write(val, CCR, c->dma_ch);
211 }
212
213 static void omap_dma_stop(struct omap_chan *c)
214 {
215         struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
216         uint32_t val;
217
218         /* disable irq */
219         c->plat->dma_write(0, CICR, c->dma_ch);
220
221         omap_dma_clear_csr(c);
222
223         val = c->plat->dma_read(CCR, c->dma_ch);
224         if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
225                 uint32_t sysconfig;
226                 unsigned i;
227
228                 sysconfig = c->plat->dma_read(OCP_SYSCONFIG, c->dma_ch);
229                 val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
230                 val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
231                 c->plat->dma_write(val, OCP_SYSCONFIG, c->dma_ch);
232
233                 val = c->plat->dma_read(CCR, c->dma_ch);
234                 val &= ~CCR_ENABLE;
235                 c->plat->dma_write(val, CCR, c->dma_ch);
236
237                 /* Wait for sDMA FIFO to drain */
238                 for (i = 0; ; i++) {
239                         val = c->plat->dma_read(CCR, c->dma_ch);
240                         if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
241                                 break;
242
243                         if (i > 100)
244                                 break;
245
246                         udelay(5);
247                 }
248
249                 if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
250                         dev_err(c->vc.chan.device->dev,
251                                 "DMA drain did not complete on lch %d\n",
252                                 c->dma_ch);
253
254                 c->plat->dma_write(sysconfig, OCP_SYSCONFIG, c->dma_ch);
255         } else {
256                 val &= ~CCR_ENABLE;
257                 c->plat->dma_write(val, CCR, c->dma_ch);
258         }
259
260         mb();
261
262         if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
263                 val = c->plat->dma_read(CLNK_CTRL, c->dma_ch);
264
265                 if (dma_omap1())
266                         val |= 1 << 14; /* set the STOP_LNK bit */
267                 else
268                         val &= ~CLNK_CTRL_ENABLE_LNK;
269
270                 c->plat->dma_write(val, CLNK_CTRL, c->dma_ch);
271         }
272 }
273
274 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
275         unsigned idx)
276 {
277         struct omap_sg *sg = d->sg + idx;
278
279         if (d->dir == DMA_DEV_TO_MEM) {
280                 c->plat->dma_write(sg->addr, CDSA, c->dma_ch);
281                 c->plat->dma_write(0, CDEI, c->dma_ch);
282                 c->plat->dma_write(0, CDFI, c->dma_ch);
283         } else {
284                 c->plat->dma_write(sg->addr, CSSA, c->dma_ch);
285                 c->plat->dma_write(0, CSEI, c->dma_ch);
286                 c->plat->dma_write(0, CSFI, c->dma_ch);
287         }
288
289         c->plat->dma_write(sg->en, CEN, c->dma_ch);
290         c->plat->dma_write(sg->fn, CFN, c->dma_ch);
291
292         omap_dma_start(c, d);
293 }
294
295 static void omap_dma_start_desc(struct omap_chan *c)
296 {
297         struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
298         struct omap_desc *d;
299
300         if (!vd) {
301                 c->desc = NULL;
302                 return;
303         }
304
305         list_del(&vd->node);
306
307         c->desc = d = to_omap_dma_desc(&vd->tx);
308         c->sgidx = 0;
309
310         c->plat->dma_write(d->ccr, CCR, c->dma_ch);
311         if (dma_omap1())
312                 c->plat->dma_write(d->ccr >> 16, CCR2, c->dma_ch);
313
314         if (d->dir == DMA_DEV_TO_MEM) {
315                 c->plat->dma_write(d->dev_addr, CSSA, c->dma_ch);
316                 c->plat->dma_write(0, CSEI, c->dma_ch);
317                 c->plat->dma_write(d->fi, CSFI, c->dma_ch);
318         } else {
319                 c->plat->dma_write(d->dev_addr, CDSA, c->dma_ch);
320                 c->plat->dma_write(0, CDEI, c->dma_ch);
321                 c->plat->dma_write(d->fi, CDFI, c->dma_ch);
322         }
323
324         c->plat->dma_write(d->csdp, CSDP, c->dma_ch);
325
326         omap_dma_start_sg(c, d, 0);
327 }
328
329 static void omap_dma_callback(int ch, u16 status, void *data)
330 {
331         struct omap_chan *c = data;
332         struct omap_desc *d;
333         unsigned long flags;
334
335         spin_lock_irqsave(&c->vc.lock, flags);
336         d = c->desc;
337         if (d) {
338                 if (!c->cyclic) {
339                         if (++c->sgidx < d->sglen) {
340                                 omap_dma_start_sg(c, d, c->sgidx);
341                         } else {
342                                 omap_dma_start_desc(c);
343                                 vchan_cookie_complete(&d->vd);
344                         }
345                 } else {
346                         vchan_cyclic_callback(&d->vd);
347                 }
348         }
349         spin_unlock_irqrestore(&c->vc.lock, flags);
350 }
351
352 /*
353  * This callback schedules all pending channels.  We could be more
354  * clever here by postponing allocation of the real DMA channels to
355  * this point, and freeing them when our virtual channel becomes idle.
356  *
357  * We would then need to deal with 'all channels in-use'
358  */
359 static void omap_dma_sched(unsigned long data)
360 {
361         struct omap_dmadev *d = (struct omap_dmadev *)data;
362         LIST_HEAD(head);
363
364         spin_lock_irq(&d->lock);
365         list_splice_tail_init(&d->pending, &head);
366         spin_unlock_irq(&d->lock);
367
368         while (!list_empty(&head)) {
369                 struct omap_chan *c = list_first_entry(&head,
370                         struct omap_chan, node);
371
372                 spin_lock_irq(&c->vc.lock);
373                 list_del_init(&c->node);
374                 omap_dma_start_desc(c);
375                 spin_unlock_irq(&c->vc.lock);
376         }
377 }
378
379 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
380 {
381         struct omap_chan *c = to_omap_dma_chan(chan);
382
383         dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
384
385         return omap_request_dma(c->dma_sig, "DMA engine",
386                 omap_dma_callback, c, &c->dma_ch);
387 }
388
389 static void omap_dma_free_chan_resources(struct dma_chan *chan)
390 {
391         struct omap_chan *c = to_omap_dma_chan(chan);
392
393         vchan_free_chan_resources(&c->vc);
394         omap_free_dma(c->dma_ch);
395
396         dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
397 }
398
399 static size_t omap_dma_sg_size(struct omap_sg *sg)
400 {
401         return sg->en * sg->fn;
402 }
403
404 static size_t omap_dma_desc_size(struct omap_desc *d)
405 {
406         unsigned i;
407         size_t size;
408
409         for (size = i = 0; i < d->sglen; i++)
410                 size += omap_dma_sg_size(&d->sg[i]);
411
412         return size * es_bytes[d->es];
413 }
414
415 static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
416 {
417         unsigned i;
418         size_t size, es_size = es_bytes[d->es];
419
420         for (size = i = 0; i < d->sglen; i++) {
421                 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
422
423                 if (size)
424                         size += this_size;
425                 else if (addr >= d->sg[i].addr &&
426                          addr < d->sg[i].addr + this_size)
427                         size += d->sg[i].addr + this_size - addr;
428         }
429         return size;
430 }
431
432 static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
433 {
434         struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
435         dma_addr_t addr;
436
437         if (__dma_omap15xx(od->plat->dma_attr))
438                 addr = c->plat->dma_read(CPC, c->dma_ch);
439         else
440                 addr = c->plat->dma_read(CSAC, c->dma_ch);
441
442         if (od->plat->errata & DMA_ERRATA_3_3 && addr == 0)
443                 addr = c->plat->dma_read(CSAC, c->dma_ch);
444
445         if (!__dma_omap15xx(od->plat->dma_attr)) {
446                 /*
447                  * CDAC == 0 indicates that the DMA transfer on the channel has
448                  * not been started (no data has been transferred so far).
449                  * Return the programmed source start address in this case.
450                  */
451                 if (c->plat->dma_read(CDAC, c->dma_ch))
452                         addr = c->plat->dma_read(CSAC, c->dma_ch);
453                 else
454                         addr = c->plat->dma_read(CSSA, c->dma_ch);
455         }
456
457         if (dma_omap1())
458                 addr |= c->plat->dma_read(CSSA, c->dma_ch) & 0xffff0000;
459
460         return addr;
461 }
462
463 static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
464 {
465         struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
466         dma_addr_t addr;
467
468         if (__dma_omap15xx(od->plat->dma_attr))
469                 addr = c->plat->dma_read(CPC, c->dma_ch);
470         else
471                 addr = c->plat->dma_read(CDAC, c->dma_ch);
472
473         /*
474          * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
475          * read before the DMA controller finished disabling the channel.
476          */
477         if (!__dma_omap15xx(od->plat->dma_attr) && addr == 0) {
478                 addr = c->plat->dma_read(CDAC, c->dma_ch);
479                 /*
480                  * CDAC == 0 indicates that the DMA transfer on the channel has
481                  * not been started (no data has been transferred so far).
482                  * Return the programmed destination start address in this case.
483                  */
484                 if (addr == 0)
485                         addr = c->plat->dma_read(CDSA, c->dma_ch);
486         }
487
488         if (dma_omap1())
489                 addr |= c->plat->dma_read(CDSA, c->dma_ch) & 0xffff0000;
490
491         return addr;
492 }
493
494 static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
495         dma_cookie_t cookie, struct dma_tx_state *txstate)
496 {
497         struct omap_chan *c = to_omap_dma_chan(chan);
498         struct virt_dma_desc *vd;
499         enum dma_status ret;
500         unsigned long flags;
501
502         ret = dma_cookie_status(chan, cookie, txstate);
503         if (ret == DMA_COMPLETE || !txstate)
504                 return ret;
505
506         spin_lock_irqsave(&c->vc.lock, flags);
507         vd = vchan_find_desc(&c->vc, cookie);
508         if (vd) {
509                 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
510         } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
511                 struct omap_desc *d = c->desc;
512                 dma_addr_t pos;
513
514                 if (d->dir == DMA_MEM_TO_DEV)
515                         pos = omap_dma_get_src_pos(c);
516                 else if (d->dir == DMA_DEV_TO_MEM)
517                         pos = omap_dma_get_dst_pos(c);
518                 else
519                         pos = 0;
520
521                 txstate->residue = omap_dma_desc_size_pos(d, pos);
522         } else {
523                 txstate->residue = 0;
524         }
525         spin_unlock_irqrestore(&c->vc.lock, flags);
526
527         return ret;
528 }
529
530 static void omap_dma_issue_pending(struct dma_chan *chan)
531 {
532         struct omap_chan *c = to_omap_dma_chan(chan);
533         unsigned long flags;
534
535         spin_lock_irqsave(&c->vc.lock, flags);
536         if (vchan_issue_pending(&c->vc) && !c->desc) {
537                 /*
538                  * c->cyclic is used only by audio and in this case the DMA need
539                  * to be started without delay.
540                  */
541                 if (!c->cyclic) {
542                         struct omap_dmadev *d = to_omap_dma_dev(chan->device);
543                         spin_lock(&d->lock);
544                         if (list_empty(&c->node))
545                                 list_add_tail(&c->node, &d->pending);
546                         spin_unlock(&d->lock);
547                         tasklet_schedule(&d->task);
548                 } else {
549                         omap_dma_start_desc(c);
550                 }
551         }
552         spin_unlock_irqrestore(&c->vc.lock, flags);
553 }
554
555 static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
556         struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
557         enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
558 {
559         struct omap_dmadev *od = to_omap_dma_dev(chan->device);
560         struct omap_chan *c = to_omap_dma_chan(chan);
561         enum dma_slave_buswidth dev_width;
562         struct scatterlist *sgent;
563         struct omap_desc *d;
564         dma_addr_t dev_addr;
565         unsigned i, j = 0, es, en, frame_bytes;
566         u32 burst;
567
568         if (dir == DMA_DEV_TO_MEM) {
569                 dev_addr = c->cfg.src_addr;
570                 dev_width = c->cfg.src_addr_width;
571                 burst = c->cfg.src_maxburst;
572         } else if (dir == DMA_MEM_TO_DEV) {
573                 dev_addr = c->cfg.dst_addr;
574                 dev_width = c->cfg.dst_addr_width;
575                 burst = c->cfg.dst_maxburst;
576         } else {
577                 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
578                 return NULL;
579         }
580
581         /* Bus width translates to the element size (ES) */
582         switch (dev_width) {
583         case DMA_SLAVE_BUSWIDTH_1_BYTE:
584                 es = CSDP_DATA_TYPE_8;
585                 break;
586         case DMA_SLAVE_BUSWIDTH_2_BYTES:
587                 es = CSDP_DATA_TYPE_16;
588                 break;
589         case DMA_SLAVE_BUSWIDTH_4_BYTES:
590                 es = CSDP_DATA_TYPE_32;
591                 break;
592         default: /* not reached */
593                 return NULL;
594         }
595
596         /* Now allocate and setup the descriptor. */
597         d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
598         if (!d)
599                 return NULL;
600
601         d->dir = dir;
602         d->dev_addr = dev_addr;
603         d->es = es;
604
605         d->ccr = CCR_SYNC_FRAME;
606         if (dir == DMA_DEV_TO_MEM)
607                 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
608         else
609                 d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
610
611         d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
612         d->csdp = es;
613
614         if (dma_omap1()) {
615                 if (__dma_omap16xx(od->plat->dma_attr)) {
616                         d->ccr |= CCR_OMAP31_DISABLE;
617                         /* Duplicate what plat-omap/dma.c does */
618                         d->ccr |= c->dma_ch + 1;
619                 } else {
620                         d->ccr |= c->dma_sig & 0x1f;
621                 }
622
623                 d->cicr |= CICR_TOUT_IE;
624
625                 if (dir == DMA_DEV_TO_MEM)
626                         d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB;
627                 else
628                         d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF;
629         } else {
630                 d->ccr |= (c->dma_sig & ~0x1f) << 14;
631                 d->ccr |= c->dma_sig & 0x1f;
632
633                 if (dir == DMA_DEV_TO_MEM)
634                         d->ccr |= CCR_TRIGGER_SRC;
635
636                 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
637         }
638         if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
639                 d->ccr |= CCR_BUFFERING_DISABLE;
640
641         /*
642          * Build our scatterlist entries: each contains the address,
643          * the number of elements (EN) in each frame, and the number of
644          * frames (FN).  Number of bytes for this entry = ES * EN * FN.
645          *
646          * Burst size translates to number of elements with frame sync.
647          * Note: DMA engine defines burst to be the number of dev-width
648          * transfers.
649          */
650         en = burst;
651         frame_bytes = es_bytes[es] * en;
652         for_each_sg(sgl, sgent, sglen, i) {
653                 d->sg[j].addr = sg_dma_address(sgent);
654                 d->sg[j].en = en;
655                 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
656                 j++;
657         }
658
659         d->sglen = j;
660
661         return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
662 }
663
664 static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
665         struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
666         size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
667         void *context)
668 {
669         struct omap_dmadev *od = to_omap_dma_dev(chan->device);
670         struct omap_chan *c = to_omap_dma_chan(chan);
671         enum dma_slave_buswidth dev_width;
672         struct omap_desc *d;
673         dma_addr_t dev_addr;
674         unsigned es;
675         u32 burst;
676
677         if (dir == DMA_DEV_TO_MEM) {
678                 dev_addr = c->cfg.src_addr;
679                 dev_width = c->cfg.src_addr_width;
680                 burst = c->cfg.src_maxburst;
681         } else if (dir == DMA_MEM_TO_DEV) {
682                 dev_addr = c->cfg.dst_addr;
683                 dev_width = c->cfg.dst_addr_width;
684                 burst = c->cfg.dst_maxburst;
685         } else {
686                 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
687                 return NULL;
688         }
689
690         /* Bus width translates to the element size (ES) */
691         switch (dev_width) {
692         case DMA_SLAVE_BUSWIDTH_1_BYTE:
693                 es = CSDP_DATA_TYPE_8;
694                 break;
695         case DMA_SLAVE_BUSWIDTH_2_BYTES:
696                 es = CSDP_DATA_TYPE_16;
697                 break;
698         case DMA_SLAVE_BUSWIDTH_4_BYTES:
699                 es = CSDP_DATA_TYPE_32;
700                 break;
701         default: /* not reached */
702                 return NULL;
703         }
704
705         /* Now allocate and setup the descriptor. */
706         d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
707         if (!d)
708                 return NULL;
709
710         d->dir = dir;
711         d->dev_addr = dev_addr;
712         d->fi = burst;
713         d->es = es;
714         d->sg[0].addr = buf_addr;
715         d->sg[0].en = period_len / es_bytes[es];
716         d->sg[0].fn = buf_len / period_len;
717         d->sglen = 1;
718
719         d->ccr = 0;
720         if (__dma_omap15xx(od->plat->dma_attr))
721                 d->ccr = CCR_AUTO_INIT | CCR_REPEAT;
722         if (dir == DMA_DEV_TO_MEM)
723                 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
724         else
725                 d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
726
727         d->cicr = CICR_DROP_IE;
728         if (flags & DMA_PREP_INTERRUPT)
729                 d->cicr |= CICR_FRAME_IE;
730
731         d->csdp = es;
732
733         if (dma_omap1()) {
734                 if (__dma_omap16xx(od->plat->dma_attr)) {
735                         d->ccr |= CCR_OMAP31_DISABLE;
736                         /* Duplicate what plat-omap/dma.c does */
737                         d->ccr |= c->dma_ch + 1;
738                 } else {
739                         d->ccr |= c->dma_sig & 0x1f;
740                 }
741
742                 d->cicr |= CICR_TOUT_IE;
743
744                 if (dir == DMA_DEV_TO_MEM)
745                         d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI;
746                 else
747                         d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF;
748         } else {
749                 d->ccr |= (c->dma_sig & ~0x1f) << 14;
750                 d->ccr |= c->dma_sig & 0x1f;
751
752                 if (burst)
753                         d->ccr |= CCR_SYNC_PACKET;
754                 else
755                         d->ccr |= CCR_SYNC_ELEMENT;
756
757                 if (dir == DMA_DEV_TO_MEM)
758                         d->ccr |= CCR_TRIGGER_SRC;
759
760                 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
761
762                 d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
763         }
764         if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
765                 d->ccr |= CCR_BUFFERING_DISABLE;
766
767         c->cyclic = true;
768
769         return vchan_tx_prep(&c->vc, &d->vd, flags);
770 }
771
772 static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
773 {
774         if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
775             cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
776                 return -EINVAL;
777
778         memcpy(&c->cfg, cfg, sizeof(c->cfg));
779
780         return 0;
781 }
782
783 static int omap_dma_terminate_all(struct omap_chan *c)
784 {
785         struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
786         unsigned long flags;
787         LIST_HEAD(head);
788
789         spin_lock_irqsave(&c->vc.lock, flags);
790
791         /* Prevent this channel being scheduled */
792         spin_lock(&d->lock);
793         list_del_init(&c->node);
794         spin_unlock(&d->lock);
795
796         /*
797          * Stop DMA activity: we assume the callback will not be called
798          * after omap_dma_stop() returns (even if it does, it will see
799          * c->desc is NULL and exit.)
800          */
801         if (c->desc) {
802                 c->desc = NULL;
803                 /* Avoid stopping the dma twice */
804                 if (!c->paused)
805                         omap_dma_stop(c);
806         }
807
808         if (c->cyclic) {
809                 c->cyclic = false;
810                 c->paused = false;
811         }
812
813         vchan_get_all_descriptors(&c->vc, &head);
814         spin_unlock_irqrestore(&c->vc.lock, flags);
815         vchan_dma_desc_free_list(&c->vc, &head);
816
817         return 0;
818 }
819
820 static int omap_dma_pause(struct omap_chan *c)
821 {
822         /* Pause/Resume only allowed with cyclic mode */
823         if (!c->cyclic)
824                 return -EINVAL;
825
826         if (!c->paused) {
827                 omap_dma_stop(c);
828                 c->paused = true;
829         }
830
831         return 0;
832 }
833
834 static int omap_dma_resume(struct omap_chan *c)
835 {
836         /* Pause/Resume only allowed with cyclic mode */
837         if (!c->cyclic)
838                 return -EINVAL;
839
840         if (c->paused) {
841                 omap_dma_start(c, c->desc);
842                 c->paused = false;
843         }
844
845         return 0;
846 }
847
848 static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
849         unsigned long arg)
850 {
851         struct omap_chan *c = to_omap_dma_chan(chan);
852         int ret;
853
854         switch (cmd) {
855         case DMA_SLAVE_CONFIG:
856                 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
857                 break;
858
859         case DMA_TERMINATE_ALL:
860                 ret = omap_dma_terminate_all(c);
861                 break;
862
863         case DMA_PAUSE:
864                 ret = omap_dma_pause(c);
865                 break;
866
867         case DMA_RESUME:
868                 ret = omap_dma_resume(c);
869                 break;
870
871         default:
872                 ret = -ENXIO;
873                 break;
874         }
875
876         return ret;
877 }
878
879 static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
880 {
881         struct omap_chan *c;
882
883         c = kzalloc(sizeof(*c), GFP_KERNEL);
884         if (!c)
885                 return -ENOMEM;
886
887         c->plat = od->plat;
888         c->dma_sig = dma_sig;
889         c->vc.desc_free = omap_dma_desc_free;
890         vchan_init(&c->vc, &od->ddev);
891         INIT_LIST_HEAD(&c->node);
892
893         od->ddev.chancnt++;
894
895         return 0;
896 }
897
898 static void omap_dma_free(struct omap_dmadev *od)
899 {
900         tasklet_kill(&od->task);
901         while (!list_empty(&od->ddev.channels)) {
902                 struct omap_chan *c = list_first_entry(&od->ddev.channels,
903                         struct omap_chan, vc.chan.device_node);
904
905                 list_del(&c->vc.chan.device_node);
906                 tasklet_kill(&c->vc.task);
907                 kfree(c);
908         }
909 }
910
911 static int omap_dma_probe(struct platform_device *pdev)
912 {
913         struct omap_dmadev *od;
914         int rc, i;
915
916         od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
917         if (!od)
918                 return -ENOMEM;
919
920         od->plat = omap_get_plat_info();
921         if (!od->plat)
922                 return -EPROBE_DEFER;
923
924         dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
925         dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
926         od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
927         od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
928         od->ddev.device_tx_status = omap_dma_tx_status;
929         od->ddev.device_issue_pending = omap_dma_issue_pending;
930         od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
931         od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
932         od->ddev.device_control = omap_dma_control;
933         od->ddev.dev = &pdev->dev;
934         INIT_LIST_HEAD(&od->ddev.channels);
935         INIT_LIST_HEAD(&od->pending);
936         spin_lock_init(&od->lock);
937
938         tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
939
940         for (i = 0; i < 127; i++) {
941                 rc = omap_dma_chan_init(od, i);
942                 if (rc) {
943                         omap_dma_free(od);
944                         return rc;
945                 }
946         }
947
948         rc = dma_async_device_register(&od->ddev);
949         if (rc) {
950                 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
951                         rc);
952                 omap_dma_free(od);
953                 return rc;
954         }
955
956         platform_set_drvdata(pdev, od);
957
958         if (pdev->dev.of_node) {
959                 omap_dma_info.dma_cap = od->ddev.cap_mask;
960
961                 /* Device-tree DMA controller registration */
962                 rc = of_dma_controller_register(pdev->dev.of_node,
963                                 of_dma_simple_xlate, &omap_dma_info);
964                 if (rc) {
965                         pr_warn("OMAP-DMA: failed to register DMA controller\n");
966                         dma_async_device_unregister(&od->ddev);
967                         omap_dma_free(od);
968                 }
969         }
970
971         dev_info(&pdev->dev, "OMAP DMA engine driver\n");
972
973         return rc;
974 }
975
976 static int omap_dma_remove(struct platform_device *pdev)
977 {
978         struct omap_dmadev *od = platform_get_drvdata(pdev);
979
980         if (pdev->dev.of_node)
981                 of_dma_controller_free(pdev->dev.of_node);
982
983         dma_async_device_unregister(&od->ddev);
984         omap_dma_free(od);
985
986         return 0;
987 }
988
989 static const struct of_device_id omap_dma_match[] = {
990         { .compatible = "ti,omap2420-sdma", },
991         { .compatible = "ti,omap2430-sdma", },
992         { .compatible = "ti,omap3430-sdma", },
993         { .compatible = "ti,omap3630-sdma", },
994         { .compatible = "ti,omap4430-sdma", },
995         {},
996 };
997 MODULE_DEVICE_TABLE(of, omap_dma_match);
998
999 static struct platform_driver omap_dma_driver = {
1000         .probe  = omap_dma_probe,
1001         .remove = omap_dma_remove,
1002         .driver = {
1003                 .name = "omap-dma-engine",
1004                 .owner = THIS_MODULE,
1005                 .of_match_table = of_match_ptr(omap_dma_match),
1006         },
1007 };
1008
1009 bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
1010 {
1011         if (chan->device->dev->driver == &omap_dma_driver.driver) {
1012                 struct omap_chan *c = to_omap_dma_chan(chan);
1013                 unsigned req = *(unsigned *)param;
1014
1015                 return req == c->dma_sig;
1016         }
1017         return false;
1018 }
1019 EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
1020
1021 static int omap_dma_init(void)
1022 {
1023         return platform_driver_register(&omap_dma_driver);
1024 }
1025 subsys_initcall(omap_dma_init);
1026
1027 static void __exit omap_dma_exit(void)
1028 {
1029         platform_driver_unregister(&omap_dma_driver);
1030 }
1031 module_exit(omap_dma_exit);
1032
1033 MODULE_AUTHOR("Russell King");
1034 MODULE_LICENSE("GPL");