dw_dmac: fill optional encoded parameters in register structure
[linux-2.6-block.git] / drivers / dma / dw_dmac.c
CommitLineData
3bfb1d20
HS
1/*
2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
3 * AVR32 systems.)
4 *
5 * Copyright (C) 2007-2008 Atmel Corporation
aecb7b64 6 * Copyright (C) 2010-2011 ST Microelectronics
3bfb1d20
HS
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
327e6970 12#include <linux/bitops.h>
3bfb1d20
HS
13#include <linux/clk.h>
14#include <linux/delay.h>
15#include <linux/dmaengine.h>
16#include <linux/dma-mapping.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
d3f797d9 20#include <linux/of.h>
3bfb1d20
HS
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/slab.h>
25
26#include "dw_dmac_regs.h"
d2ebfb33 27#include "dmaengine.h"
3bfb1d20
HS
28
29/*
30 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
31 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
32 * of which use ARM any more). See the "Databook" from Synopsys for
33 * information beyond what licensees probably provide.
34 *
35 * The driver has currently been tested only with the Atmel AT32AP7000,
36 * which does not support descriptor writeback.
37 */
38
327e6970
VK
39#define DWC_DEFAULT_CTLLO(_chan) ({ \
40 struct dw_dma_slave *__slave = (_chan->private); \
41 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
42 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
43 int _dms = __slave ? __slave->dst_master : 0; \
44 int _sms = __slave ? __slave->src_master : 1; \
45 u8 _smsize = __slave ? _sconfig->src_maxburst : \
46 DW_DMA_MSIZE_16; \
47 u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
48 DW_DMA_MSIZE_16; \
f301c062 49 \
327e6970
VK
50 (DWC_CTLL_DST_MSIZE(_dmsize) \
51 | DWC_CTLL_SRC_MSIZE(_smsize) \
f301c062
JI
52 | DWC_CTLL_LLP_D_EN \
53 | DWC_CTLL_LLP_S_EN \
327e6970
VK
54 | DWC_CTLL_DMS(_dms) \
55 | DWC_CTLL_SMS(_sms)); \
f301c062 56 })
3bfb1d20
HS
57
58/*
59 * This is configuration-dependent and usually a funny size like 4095.
3bfb1d20
HS
60 *
61 * Note that this is a transfer count, i.e. if we transfer 32-bit
418e7407 62 * words, we can do 16380 bytes per descriptor.
3bfb1d20
HS
63 *
64 * This parameter is also system-specific.
65 */
418e7407 66#define DWC_MAX_COUNT 4095U
3bfb1d20
HS
67
68/*
69 * Number of descriptors to allocate for each channel. This should be
70 * made configurable somehow; preferably, the clients (at least the
71 * ones using slave transfers) should be able to give us a hint.
72 */
73#define NR_DESCS_PER_CHANNEL 64
74
75/*----------------------------------------------------------------------*/
76
77/*
78 * Because we're not relying on writeback from the controller (it may not
79 * even be configured into the core!) we don't need to use dma_pool. These
80 * descriptors -- and associated data -- are cacheable. We do need to make
81 * sure their dcache entries are written back before handing them off to
82 * the controller, though.
83 */
84
41d5e59c
DW
85static struct device *chan2dev(struct dma_chan *chan)
86{
87 return &chan->dev->device;
88}
89static struct device *chan2parent(struct dma_chan *chan)
90{
91 return chan->dev->device.parent;
92}
93
3bfb1d20
HS
94static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
95{
96 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
97}
98
3bfb1d20
HS
99static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
100{
101 struct dw_desc *desc, *_desc;
102 struct dw_desc *ret = NULL;
103 unsigned int i = 0;
69cea5a0 104 unsigned long flags;
3bfb1d20 105
69cea5a0 106 spin_lock_irqsave(&dwc->lock, flags);
3bfb1d20 107 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
2ab37276 108 i++;
3bfb1d20
HS
109 if (async_tx_test_ack(&desc->txd)) {
110 list_del(&desc->desc_node);
111 ret = desc;
112 break;
113 }
41d5e59c 114 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
3bfb1d20 115 }
69cea5a0 116 spin_unlock_irqrestore(&dwc->lock, flags);
3bfb1d20 117
41d5e59c 118 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
3bfb1d20
HS
119
120 return ret;
121}
122
123static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
124{
125 struct dw_desc *child;
126
e0bd0f8c 127 list_for_each_entry(child, &desc->tx_list, desc_node)
41d5e59c 128 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
3bfb1d20
HS
129 child->txd.phys, sizeof(child->lli),
130 DMA_TO_DEVICE);
41d5e59c 131 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
3bfb1d20
HS
132 desc->txd.phys, sizeof(desc->lli),
133 DMA_TO_DEVICE);
134}
135
136/*
137 * Move a descriptor, including any children, to the free list.
138 * `desc' must not be on any lists.
139 */
140static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
141{
69cea5a0
VK
142 unsigned long flags;
143
3bfb1d20
HS
144 if (desc) {
145 struct dw_desc *child;
146
147 dwc_sync_desc_for_cpu(dwc, desc);
148
69cea5a0 149 spin_lock_irqsave(&dwc->lock, flags);
e0bd0f8c 150 list_for_each_entry(child, &desc->tx_list, desc_node)
41d5e59c 151 dev_vdbg(chan2dev(&dwc->chan),
3bfb1d20
HS
152 "moving child desc %p to freelist\n",
153 child);
e0bd0f8c 154 list_splice_init(&desc->tx_list, &dwc->free_list);
41d5e59c 155 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
3bfb1d20 156 list_add(&desc->desc_node, &dwc->free_list);
69cea5a0 157 spin_unlock_irqrestore(&dwc->lock, flags);
3bfb1d20
HS
158 }
159}
160
61e183f8
VK
161static void dwc_initialize(struct dw_dma_chan *dwc)
162{
163 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
164 struct dw_dma_slave *dws = dwc->chan.private;
165 u32 cfghi = DWC_CFGH_FIFO_MODE;
166 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
167
168 if (dwc->initialized == true)
169 return;
170
171 if (dws) {
172 /*
173 * We need controller-specific data to set up slave
174 * transfers.
175 */
176 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
177
178 cfghi = dws->cfg_hi;
179 cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
8fccc5bf
AS
180 } else {
181 if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV)
182 cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id);
183 else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM)
184 cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id);
61e183f8
VK
185 }
186
187 channel_writel(dwc, CFG_LO, cfglo);
188 channel_writel(dwc, CFG_HI, cfghi);
189
190 /* Enable interrupts */
191 channel_set_bit(dw, MASK.XFER, dwc->mask);
61e183f8
VK
192 channel_set_bit(dw, MASK.ERROR, dwc->mask);
193
194 dwc->initialized = true;
195}
196
3bfb1d20
HS
197/*----------------------------------------------------------------------*/
198
4c2d56c5
AS
199static inline unsigned int dwc_fast_fls(unsigned long long v)
200{
201 /*
202 * We can be a lot more clever here, but this should take care
203 * of the most common optimization.
204 */
205 if (!(v & 7))
206 return 3;
207 else if (!(v & 3))
208 return 2;
209 else if (!(v & 1))
210 return 1;
211 return 0;
212}
213
f52b36d2 214static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
1d455437
AS
215{
216 dev_err(chan2dev(&dwc->chan),
217 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
218 channel_readl(dwc, SAR),
219 channel_readl(dwc, DAR),
220 channel_readl(dwc, LLP),
221 channel_readl(dwc, CTL_HI),
222 channel_readl(dwc, CTL_LO));
223}
224
3f936207
AS
225
226static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
227{
228 channel_clear_bit(dw, CH_EN, dwc->mask);
229 while (dma_readl(dw, CH_EN) & dwc->mask)
230 cpu_relax();
231}
232
1d455437
AS
233/*----------------------------------------------------------------------*/
234
3bfb1d20
HS
235/* Called with dwc->lock held and bh disabled */
236static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
237{
238 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
239
240 /* ASSERT: channel is idle */
241 if (dma_readl(dw, CH_EN) & dwc->mask) {
41d5e59c 242 dev_err(chan2dev(&dwc->chan),
3bfb1d20 243 "BUG: Attempted to start non-idle channel\n");
1d455437 244 dwc_dump_chan_regs(dwc);
3bfb1d20
HS
245
246 /* The tasklet will hopefully advance the queue... */
247 return;
248 }
249
61e183f8
VK
250 dwc_initialize(dwc);
251
3bfb1d20
HS
252 channel_writel(dwc, LLP, first->txd.phys);
253 channel_writel(dwc, CTL_LO,
254 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
255 channel_writel(dwc, CTL_HI, 0);
256 channel_set_bit(dw, CH_EN, dwc->mask);
257}
258
259/*----------------------------------------------------------------------*/
260
261static void
5fedefb8
VK
262dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
263 bool callback_required)
3bfb1d20 264{
5fedefb8
VK
265 dma_async_tx_callback callback = NULL;
266 void *param = NULL;
3bfb1d20 267 struct dma_async_tx_descriptor *txd = &desc->txd;
e518076e 268 struct dw_desc *child;
69cea5a0 269 unsigned long flags;
3bfb1d20 270
41d5e59c 271 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
3bfb1d20 272
69cea5a0 273 spin_lock_irqsave(&dwc->lock, flags);
f7fbce07 274 dma_cookie_complete(txd);
5fedefb8
VK
275 if (callback_required) {
276 callback = txd->callback;
277 param = txd->callback_param;
278 }
3bfb1d20
HS
279
280 dwc_sync_desc_for_cpu(dwc, desc);
e518076e
VK
281
282 /* async_tx_ack */
283 list_for_each_entry(child, &desc->tx_list, desc_node)
284 async_tx_ack(&child->txd);
285 async_tx_ack(&desc->txd);
286
e0bd0f8c 287 list_splice_init(&desc->tx_list, &dwc->free_list);
3bfb1d20
HS
288 list_move(&desc->desc_node, &dwc->free_list);
289
657a77fa
AN
290 if (!dwc->chan.private) {
291 struct device *parent = chan2parent(&dwc->chan);
292 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
293 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
294 dma_unmap_single(parent, desc->lli.dar,
295 desc->len, DMA_FROM_DEVICE);
296 else
297 dma_unmap_page(parent, desc->lli.dar,
298 desc->len, DMA_FROM_DEVICE);
299 }
300 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
301 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
302 dma_unmap_single(parent, desc->lli.sar,
303 desc->len, DMA_TO_DEVICE);
304 else
305 dma_unmap_page(parent, desc->lli.sar,
306 desc->len, DMA_TO_DEVICE);
307 }
308 }
3bfb1d20 309
69cea5a0
VK
310 spin_unlock_irqrestore(&dwc->lock, flags);
311
5fedefb8 312 if (callback_required && callback)
3bfb1d20
HS
313 callback(param);
314}
315
316static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
317{
318 struct dw_desc *desc, *_desc;
319 LIST_HEAD(list);
69cea5a0 320 unsigned long flags;
3bfb1d20 321
69cea5a0 322 spin_lock_irqsave(&dwc->lock, flags);
3bfb1d20 323 if (dma_readl(dw, CH_EN) & dwc->mask) {
41d5e59c 324 dev_err(chan2dev(&dwc->chan),
3bfb1d20
HS
325 "BUG: XFER bit set, but channel not idle!\n");
326
327 /* Try to continue after resetting the channel... */
3f936207 328 dwc_chan_disable(dw, dwc);
3bfb1d20
HS
329 }
330
331 /*
332 * Submit queued descriptors ASAP, i.e. before we go through
333 * the completed ones.
334 */
3bfb1d20 335 list_splice_init(&dwc->active_list, &list);
f336e42f
VK
336 if (!list_empty(&dwc->queue)) {
337 list_move(dwc->queue.next, &dwc->active_list);
338 dwc_dostart(dwc, dwc_first_active(dwc));
339 }
3bfb1d20 340
69cea5a0
VK
341 spin_unlock_irqrestore(&dwc->lock, flags);
342
3bfb1d20 343 list_for_each_entry_safe(desc, _desc, &list, desc_node)
5fedefb8 344 dwc_descriptor_complete(dwc, desc, true);
3bfb1d20
HS
345}
346
347static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
348{
349 dma_addr_t llp;
350 struct dw_desc *desc, *_desc;
351 struct dw_desc *child;
352 u32 status_xfer;
69cea5a0 353 unsigned long flags;
3bfb1d20 354
69cea5a0 355 spin_lock_irqsave(&dwc->lock, flags);
3bfb1d20
HS
356 llp = channel_readl(dwc, LLP);
357 status_xfer = dma_readl(dw, RAW.XFER);
358
359 if (status_xfer & dwc->mask) {
360 /* Everything we've submitted is done */
361 dma_writel(dw, CLEAR.XFER, dwc->mask);
69cea5a0
VK
362 spin_unlock_irqrestore(&dwc->lock, flags);
363
3bfb1d20
HS
364 dwc_complete_all(dw, dwc);
365 return;
366 }
367
69cea5a0
VK
368 if (list_empty(&dwc->active_list)) {
369 spin_unlock_irqrestore(&dwc->lock, flags);
087809fc 370 return;
69cea5a0 371 }
087809fc 372
2e4c364e 373 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
2f45d613 374 (unsigned long long)llp);
3bfb1d20
HS
375
376 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
84adccfb 377 /* check first descriptors addr */
69cea5a0
VK
378 if (desc->txd.phys == llp) {
379 spin_unlock_irqrestore(&dwc->lock, flags);
84adccfb 380 return;
69cea5a0 381 }
84adccfb
VK
382
383 /* check first descriptors llp */
69cea5a0 384 if (desc->lli.llp == llp) {
3bfb1d20 385 /* This one is currently in progress */
69cea5a0 386 spin_unlock_irqrestore(&dwc->lock, flags);
3bfb1d20 387 return;
69cea5a0 388 }
3bfb1d20 389
e0bd0f8c 390 list_for_each_entry(child, &desc->tx_list, desc_node)
69cea5a0 391 if (child->lli.llp == llp) {
3bfb1d20 392 /* Currently in progress */
69cea5a0 393 spin_unlock_irqrestore(&dwc->lock, flags);
3bfb1d20 394 return;
69cea5a0 395 }
3bfb1d20
HS
396
397 /*
398 * No descriptors so far seem to be in progress, i.e.
399 * this one must be done.
400 */
69cea5a0 401 spin_unlock_irqrestore(&dwc->lock, flags);
5fedefb8 402 dwc_descriptor_complete(dwc, desc, true);
69cea5a0 403 spin_lock_irqsave(&dwc->lock, flags);
3bfb1d20
HS
404 }
405
41d5e59c 406 dev_err(chan2dev(&dwc->chan),
3bfb1d20
HS
407 "BUG: All descriptors done, but channel not idle!\n");
408
409 /* Try to continue after resetting the channel... */
3f936207 410 dwc_chan_disable(dw, dwc);
3bfb1d20
HS
411
412 if (!list_empty(&dwc->queue)) {
f336e42f
VK
413 list_move(dwc->queue.next, &dwc->active_list);
414 dwc_dostart(dwc, dwc_first_active(dwc));
3bfb1d20 415 }
69cea5a0 416 spin_unlock_irqrestore(&dwc->lock, flags);
3bfb1d20
HS
417}
418
93aad1bc 419static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
3bfb1d20 420{
41d5e59c 421 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
3bfb1d20 422 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
f8609c2b 423 lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
3bfb1d20
HS
424}
425
426static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
427{
428 struct dw_desc *bad_desc;
429 struct dw_desc *child;
69cea5a0 430 unsigned long flags;
3bfb1d20
HS
431
432 dwc_scan_descriptors(dw, dwc);
433
69cea5a0
VK
434 spin_lock_irqsave(&dwc->lock, flags);
435
3bfb1d20
HS
436 /*
437 * The descriptor currently at the head of the active list is
438 * borked. Since we don't have any way to report errors, we'll
439 * just have to scream loudly and try to carry on.
440 */
441 bad_desc = dwc_first_active(dwc);
442 list_del_init(&bad_desc->desc_node);
f336e42f 443 list_move(dwc->queue.next, dwc->active_list.prev);
3bfb1d20
HS
444
445 /* Clear the error flag and try to restart the controller */
446 dma_writel(dw, CLEAR.ERROR, dwc->mask);
447 if (!list_empty(&dwc->active_list))
448 dwc_dostart(dwc, dwc_first_active(dwc));
449
450 /*
451 * KERN_CRITICAL may seem harsh, but since this only happens
452 * when someone submits a bad physical address in a
453 * descriptor, we should consider ourselves lucky that the
454 * controller flagged an error instead of scribbling over
455 * random memory locations.
456 */
41d5e59c 457 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
3bfb1d20 458 "Bad descriptor submitted for DMA!\n");
41d5e59c 459 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
3bfb1d20
HS
460 " cookie: %d\n", bad_desc->txd.cookie);
461 dwc_dump_lli(dwc, &bad_desc->lli);
e0bd0f8c 462 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
3bfb1d20
HS
463 dwc_dump_lli(dwc, &child->lli);
464
69cea5a0
VK
465 spin_unlock_irqrestore(&dwc->lock, flags);
466
3bfb1d20 467 /* Pretend the descriptor completed successfully */
5fedefb8 468 dwc_descriptor_complete(dwc, bad_desc, true);
3bfb1d20
HS
469}
470
d9de4519
HCE
471/* --------------------- Cyclic DMA API extensions -------------------- */
472
473inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
474{
475 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
476 return channel_readl(dwc, SAR);
477}
478EXPORT_SYMBOL(dw_dma_get_src_addr);
479
480inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
481{
482 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
483 return channel_readl(dwc, DAR);
484}
485EXPORT_SYMBOL(dw_dma_get_dst_addr);
486
487/* called with dwc->lock held and all DMAC interrupts disabled */
488static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
ff7b05f2 489 u32 status_err, u32 status_xfer)
d9de4519 490{
69cea5a0
VK
491 unsigned long flags;
492
ff7b05f2 493 if (dwc->mask) {
d9de4519
HCE
494 void (*callback)(void *param);
495 void *callback_param;
496
497 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
498 channel_readl(dwc, LLP));
d9de4519
HCE
499
500 callback = dwc->cdesc->period_callback;
501 callback_param = dwc->cdesc->period_callback_param;
69cea5a0
VK
502
503 if (callback)
d9de4519 504 callback(callback_param);
d9de4519
HCE
505 }
506
507 /*
508 * Error and transfer complete are highly unlikely, and will most
509 * likely be due to a configuration error by the user.
510 */
511 if (unlikely(status_err & dwc->mask) ||
512 unlikely(status_xfer & dwc->mask)) {
513 int i;
514
515 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
516 "interrupt, stopping DMA transfer\n",
517 status_xfer ? "xfer" : "error");
69cea5a0
VK
518
519 spin_lock_irqsave(&dwc->lock, flags);
520
1d455437 521 dwc_dump_chan_regs(dwc);
d9de4519 522
3f936207 523 dwc_chan_disable(dw, dwc);
d9de4519
HCE
524
525 /* make sure DMA does not restart by loading a new list */
526 channel_writel(dwc, LLP, 0);
527 channel_writel(dwc, CTL_LO, 0);
528 channel_writel(dwc, CTL_HI, 0);
529
d9de4519
HCE
530 dma_writel(dw, CLEAR.ERROR, dwc->mask);
531 dma_writel(dw, CLEAR.XFER, dwc->mask);
532
533 for (i = 0; i < dwc->cdesc->periods; i++)
534 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
69cea5a0
VK
535
536 spin_unlock_irqrestore(&dwc->lock, flags);
d9de4519
HCE
537 }
538}
539
540/* ------------------------------------------------------------------------- */
541
3bfb1d20
HS
542static void dw_dma_tasklet(unsigned long data)
543{
544 struct dw_dma *dw = (struct dw_dma *)data;
545 struct dw_dma_chan *dwc;
3bfb1d20
HS
546 u32 status_xfer;
547 u32 status_err;
548 int i;
549
7fe7b2f4 550 status_xfer = dma_readl(dw, RAW.XFER);
3bfb1d20
HS
551 status_err = dma_readl(dw, RAW.ERROR);
552
2e4c364e 553 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
3bfb1d20
HS
554
555 for (i = 0; i < dw->dma.chancnt; i++) {
556 dwc = &dw->chan[i];
d9de4519 557 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
ff7b05f2 558 dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
d9de4519 559 else if (status_err & (1 << i))
3bfb1d20 560 dwc_handle_error(dw, dwc);
ff7b05f2 561 else if (status_xfer & (1 << i))
3bfb1d20 562 dwc_scan_descriptors(dw, dwc);
3bfb1d20
HS
563 }
564
565 /*
ff7b05f2 566 * Re-enable interrupts.
3bfb1d20
HS
567 */
568 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
3bfb1d20
HS
569 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
570}
571
572static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
573{
574 struct dw_dma *dw = dev_id;
575 u32 status;
576
2e4c364e 577 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
3bfb1d20
HS
578 dma_readl(dw, STATUS_INT));
579
580 /*
581 * Just disable the interrupts. We'll turn them back on in the
582 * softirq handler.
583 */
584 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
3bfb1d20
HS
585 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
586
587 status = dma_readl(dw, STATUS_INT);
588 if (status) {
589 dev_err(dw->dma.dev,
590 "BUG: Unexpected interrupts pending: 0x%x\n",
591 status);
592
593 /* Try to recover */
594 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
3bfb1d20
HS
595 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
596 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
597 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
598 }
599
600 tasklet_schedule(&dw->tasklet);
601
602 return IRQ_HANDLED;
603}
604
605/*----------------------------------------------------------------------*/
606
607static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
608{
609 struct dw_desc *desc = txd_to_dw_desc(tx);
610 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
611 dma_cookie_t cookie;
69cea5a0 612 unsigned long flags;
3bfb1d20 613
69cea5a0 614 spin_lock_irqsave(&dwc->lock, flags);
884485e1 615 cookie = dma_cookie_assign(tx);
3bfb1d20
HS
616
617 /*
618 * REVISIT: We should attempt to chain as many descriptors as
619 * possible, perhaps even appending to those already submitted
620 * for DMA. But this is hard to do in a race-free manner.
621 */
622 if (list_empty(&dwc->active_list)) {
2e4c364e 623 dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
3bfb1d20 624 desc->txd.cookie);
3bfb1d20 625 list_add_tail(&desc->desc_node, &dwc->active_list);
f336e42f 626 dwc_dostart(dwc, dwc_first_active(dwc));
3bfb1d20 627 } else {
2e4c364e 628 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
3bfb1d20
HS
629 desc->txd.cookie);
630
631 list_add_tail(&desc->desc_node, &dwc->queue);
632 }
633
69cea5a0 634 spin_unlock_irqrestore(&dwc->lock, flags);
3bfb1d20
HS
635
636 return cookie;
637}
638
639static struct dma_async_tx_descriptor *
640dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
641 size_t len, unsigned long flags)
642{
643 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
644 struct dw_desc *desc;
645 struct dw_desc *first;
646 struct dw_desc *prev;
647 size_t xfer_count;
648 size_t offset;
649 unsigned int src_width;
650 unsigned int dst_width;
651 u32 ctllo;
652
2f45d613 653 dev_vdbg(chan2dev(chan),
2e4c364e 654 "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
2f45d613
AS
655 (unsigned long long)dest, (unsigned long long)src,
656 len, flags);
3bfb1d20
HS
657
658 if (unlikely(!len)) {
2e4c364e 659 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
3bfb1d20
HS
660 return NULL;
661 }
662
4c2d56c5 663 src_width = dst_width = dwc_fast_fls(src | dest | len);
3bfb1d20 664
327e6970 665 ctllo = DWC_DEFAULT_CTLLO(chan)
3bfb1d20
HS
666 | DWC_CTLL_DST_WIDTH(dst_width)
667 | DWC_CTLL_SRC_WIDTH(src_width)
668 | DWC_CTLL_DST_INC
669 | DWC_CTLL_SRC_INC
670 | DWC_CTLL_FC_M2M;
671 prev = first = NULL;
672
673 for (offset = 0; offset < len; offset += xfer_count << src_width) {
674 xfer_count = min_t(size_t, (len - offset) >> src_width,
675 DWC_MAX_COUNT);
676
677 desc = dwc_desc_get(dwc);
678 if (!desc)
679 goto err_desc_get;
680
681 desc->lli.sar = src + offset;
682 desc->lli.dar = dest + offset;
683 desc->lli.ctllo = ctllo;
684 desc->lli.ctlhi = xfer_count;
685
686 if (!first) {
687 first = desc;
688 } else {
689 prev->lli.llp = desc->txd.phys;
41d5e59c 690 dma_sync_single_for_device(chan2parent(chan),
3bfb1d20
HS
691 prev->txd.phys, sizeof(prev->lli),
692 DMA_TO_DEVICE);
693 list_add_tail(&desc->desc_node,
e0bd0f8c 694 &first->tx_list);
3bfb1d20
HS
695 }
696 prev = desc;
697 }
698
699
700 if (flags & DMA_PREP_INTERRUPT)
701 /* Trigger interrupt after last block */
702 prev->lli.ctllo |= DWC_CTLL_INT_EN;
703
704 prev->lli.llp = 0;
41d5e59c 705 dma_sync_single_for_device(chan2parent(chan),
3bfb1d20
HS
706 prev->txd.phys, sizeof(prev->lli),
707 DMA_TO_DEVICE);
708
709 first->txd.flags = flags;
710 first->len = len;
711
712 return &first->txd;
713
714err_desc_get:
715 dwc_desc_put(dwc, first);
716 return NULL;
717}
718
719static struct dma_async_tx_descriptor *
720dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
db8196df 721 unsigned int sg_len, enum dma_transfer_direction direction,
185ecb5f 722 unsigned long flags, void *context)
3bfb1d20
HS
723{
724 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
287d8592 725 struct dw_dma_slave *dws = chan->private;
327e6970 726 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
3bfb1d20
HS
727 struct dw_desc *prev;
728 struct dw_desc *first;
729 u32 ctllo;
730 dma_addr_t reg;
731 unsigned int reg_width;
732 unsigned int mem_width;
733 unsigned int i;
734 struct scatterlist *sg;
735 size_t total_len = 0;
736
2e4c364e 737 dev_vdbg(chan2dev(chan), "%s\n", __func__);
3bfb1d20
HS
738
739 if (unlikely(!dws || !sg_len))
740 return NULL;
741
3bfb1d20
HS
742 prev = first = NULL;
743
3bfb1d20 744 switch (direction) {
db8196df 745 case DMA_MEM_TO_DEV:
327e6970
VK
746 reg_width = __fls(sconfig->dst_addr_width);
747 reg = sconfig->dst_addr;
748 ctllo = (DWC_DEFAULT_CTLLO(chan)
3bfb1d20
HS
749 | DWC_CTLL_DST_WIDTH(reg_width)
750 | DWC_CTLL_DST_FIX
327e6970
VK
751 | DWC_CTLL_SRC_INC);
752
753 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
754 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
755
3bfb1d20
HS
756 for_each_sg(sgl, sg, sg_len, i) {
757 struct dw_desc *desc;
69dc14b5 758 u32 len, dlen, mem;
3bfb1d20 759
cbb796cc 760 mem = sg_dma_address(sg);
69dc14b5 761 len = sg_dma_len(sg);
6bc711f6 762
4c2d56c5 763 mem_width = dwc_fast_fls(mem | len);
3bfb1d20 764
69dc14b5 765slave_sg_todev_fill_desc:
3bfb1d20
HS
766 desc = dwc_desc_get(dwc);
767 if (!desc) {
41d5e59c 768 dev_err(chan2dev(chan),
3bfb1d20
HS
769 "not enough descriptors available\n");
770 goto err_desc_get;
771 }
772
3bfb1d20
HS
773 desc->lli.sar = mem;
774 desc->lli.dar = reg;
775 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
69dc14b5
VK
776 if ((len >> mem_width) > DWC_MAX_COUNT) {
777 dlen = DWC_MAX_COUNT << mem_width;
778 mem += dlen;
779 len -= dlen;
780 } else {
781 dlen = len;
782 len = 0;
783 }
784
785 desc->lli.ctlhi = dlen >> mem_width;
3bfb1d20
HS
786
787 if (!first) {
788 first = desc;
789 } else {
790 prev->lli.llp = desc->txd.phys;
41d5e59c 791 dma_sync_single_for_device(chan2parent(chan),
3bfb1d20
HS
792 prev->txd.phys,
793 sizeof(prev->lli),
794 DMA_TO_DEVICE);
795 list_add_tail(&desc->desc_node,
e0bd0f8c 796 &first->tx_list);
3bfb1d20
HS
797 }
798 prev = desc;
69dc14b5
VK
799 total_len += dlen;
800
801 if (len)
802 goto slave_sg_todev_fill_desc;
3bfb1d20
HS
803 }
804 break;
db8196df 805 case DMA_DEV_TO_MEM:
327e6970
VK
806 reg_width = __fls(sconfig->src_addr_width);
807 reg = sconfig->src_addr;
808 ctllo = (DWC_DEFAULT_CTLLO(chan)
3bfb1d20
HS
809 | DWC_CTLL_SRC_WIDTH(reg_width)
810 | DWC_CTLL_DST_INC
327e6970
VK
811 | DWC_CTLL_SRC_FIX);
812
813 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
814 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
3bfb1d20 815
3bfb1d20
HS
816 for_each_sg(sgl, sg, sg_len, i) {
817 struct dw_desc *desc;
69dc14b5 818 u32 len, dlen, mem;
3bfb1d20 819
cbb796cc 820 mem = sg_dma_address(sg);
3bfb1d20 821 len = sg_dma_len(sg);
6bc711f6 822
4c2d56c5 823 mem_width = dwc_fast_fls(mem | len);
3bfb1d20 824
69dc14b5
VK
825slave_sg_fromdev_fill_desc:
826 desc = dwc_desc_get(dwc);
827 if (!desc) {
828 dev_err(chan2dev(chan),
829 "not enough descriptors available\n");
830 goto err_desc_get;
831 }
832
3bfb1d20
HS
833 desc->lli.sar = reg;
834 desc->lli.dar = mem;
835 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
69dc14b5
VK
836 if ((len >> reg_width) > DWC_MAX_COUNT) {
837 dlen = DWC_MAX_COUNT << reg_width;
838 mem += dlen;
839 len -= dlen;
840 } else {
841 dlen = len;
842 len = 0;
843 }
844 desc->lli.ctlhi = dlen >> reg_width;
3bfb1d20
HS
845
846 if (!first) {
847 first = desc;
848 } else {
849 prev->lli.llp = desc->txd.phys;
41d5e59c 850 dma_sync_single_for_device(chan2parent(chan),
3bfb1d20
HS
851 prev->txd.phys,
852 sizeof(prev->lli),
853 DMA_TO_DEVICE);
854 list_add_tail(&desc->desc_node,
e0bd0f8c 855 &first->tx_list);
3bfb1d20
HS
856 }
857 prev = desc;
69dc14b5
VK
858 total_len += dlen;
859
860 if (len)
861 goto slave_sg_fromdev_fill_desc;
3bfb1d20
HS
862 }
863 break;
864 default:
865 return NULL;
866 }
867
868 if (flags & DMA_PREP_INTERRUPT)
869 /* Trigger interrupt after last block */
870 prev->lli.ctllo |= DWC_CTLL_INT_EN;
871
872 prev->lli.llp = 0;
41d5e59c 873 dma_sync_single_for_device(chan2parent(chan),
3bfb1d20
HS
874 prev->txd.phys, sizeof(prev->lli),
875 DMA_TO_DEVICE);
876
877 first->len = total_len;
878
879 return &first->txd;
880
881err_desc_get:
882 dwc_desc_put(dwc, first);
883 return NULL;
884}
885
327e6970
VK
886/*
887 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
888 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
889 *
890 * NOTE: burst size 2 is not supported by controller.
891 *
892 * This can be done by finding least significant bit set: n & (n - 1)
893 */
894static inline void convert_burst(u32 *maxburst)
895{
896 if (*maxburst > 1)
897 *maxburst = fls(*maxburst) - 2;
898 else
899 *maxburst = 0;
900}
901
902static int
903set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
904{
905 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
906
907 /* Check if it is chan is configured for slave transfers */
908 if (!chan->private)
909 return -EINVAL;
910
911 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
912
913 convert_burst(&dwc->dma_sconfig.src_maxburst);
914 convert_burst(&dwc->dma_sconfig.dst_maxburst);
915
916 return 0;
917}
918
05827630
LW
919static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
920 unsigned long arg)
3bfb1d20
HS
921{
922 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
923 struct dw_dma *dw = to_dw_dma(chan->device);
924 struct dw_desc *desc, *_desc;
69cea5a0 925 unsigned long flags;
a7c57cf7 926 u32 cfglo;
3bfb1d20
HS
927 LIST_HEAD(list);
928
a7c57cf7
LW
929 if (cmd == DMA_PAUSE) {
930 spin_lock_irqsave(&dwc->lock, flags);
c3635c78 931
a7c57cf7
LW
932 cfglo = channel_readl(dwc, CFG_LO);
933 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
934 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
935 cpu_relax();
3bfb1d20 936
a7c57cf7
LW
937 dwc->paused = true;
938 spin_unlock_irqrestore(&dwc->lock, flags);
939 } else if (cmd == DMA_RESUME) {
940 if (!dwc->paused)
941 return 0;
3bfb1d20 942
a7c57cf7 943 spin_lock_irqsave(&dwc->lock, flags);
3bfb1d20 944
a7c57cf7
LW
945 cfglo = channel_readl(dwc, CFG_LO);
946 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
947 dwc->paused = false;
3bfb1d20 948
a7c57cf7
LW
949 spin_unlock_irqrestore(&dwc->lock, flags);
950 } else if (cmd == DMA_TERMINATE_ALL) {
951 spin_lock_irqsave(&dwc->lock, flags);
3bfb1d20 952
3f936207 953 dwc_chan_disable(dw, dwc);
a7c57cf7
LW
954
955 dwc->paused = false;
956
957 /* active_list entries will end up before queued entries */
958 list_splice_init(&dwc->queue, &list);
959 list_splice_init(&dwc->active_list, &list);
960
961 spin_unlock_irqrestore(&dwc->lock, flags);
962
963 /* Flush all pending and queued descriptors */
964 list_for_each_entry_safe(desc, _desc, &list, desc_node)
965 dwc_descriptor_complete(dwc, desc, false);
327e6970
VK
966 } else if (cmd == DMA_SLAVE_CONFIG) {
967 return set_runtime_config(chan, (struct dma_slave_config *)arg);
968 } else {
a7c57cf7 969 return -ENXIO;
327e6970 970 }
c3635c78
LW
971
972 return 0;
3bfb1d20
HS
973}
974
975static enum dma_status
07934481
LW
976dwc_tx_status(struct dma_chan *chan,
977 dma_cookie_t cookie,
978 struct dma_tx_state *txstate)
3bfb1d20
HS
979{
980 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
96a2af41 981 enum dma_status ret;
3bfb1d20 982
96a2af41 983 ret = dma_cookie_status(chan, cookie, txstate);
3bfb1d20
HS
984 if (ret != DMA_SUCCESS) {
985 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
986
96a2af41 987 ret = dma_cookie_status(chan, cookie, txstate);
3bfb1d20
HS
988 }
989
abf53902 990 if (ret != DMA_SUCCESS)
96a2af41 991 dma_set_residue(txstate, dwc_first_active(dwc)->len);
3bfb1d20 992
a7c57cf7
LW
993 if (dwc->paused)
994 return DMA_PAUSED;
3bfb1d20
HS
995
996 return ret;
997}
998
999static void dwc_issue_pending(struct dma_chan *chan)
1000{
1001 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1002
3bfb1d20
HS
1003 if (!list_empty(&dwc->queue))
1004 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
3bfb1d20
HS
1005}
1006
aa1e6f1a 1007static int dwc_alloc_chan_resources(struct dma_chan *chan)
3bfb1d20
HS
1008{
1009 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1010 struct dw_dma *dw = to_dw_dma(chan->device);
1011 struct dw_desc *desc;
3bfb1d20 1012 int i;
69cea5a0 1013 unsigned long flags;
3bfb1d20 1014
2e4c364e 1015 dev_vdbg(chan2dev(chan), "%s\n", __func__);
3bfb1d20 1016
3bfb1d20
HS
1017 /* ASSERT: channel is idle */
1018 if (dma_readl(dw, CH_EN) & dwc->mask) {
41d5e59c 1019 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
3bfb1d20
HS
1020 return -EIO;
1021 }
1022
d3ee98cd 1023 dma_cookie_init(chan);
3bfb1d20 1024
3bfb1d20
HS
1025 /*
1026 * NOTE: some controllers may have additional features that we
1027 * need to initialize here, like "scatter-gather" (which
1028 * doesn't mean what you think it means), and status writeback.
1029 */
1030
69cea5a0 1031 spin_lock_irqsave(&dwc->lock, flags);
3bfb1d20
HS
1032 i = dwc->descs_allocated;
1033 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
69cea5a0 1034 spin_unlock_irqrestore(&dwc->lock, flags);
3bfb1d20
HS
1035
1036 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
1037 if (!desc) {
41d5e59c 1038 dev_info(chan2dev(chan),
3bfb1d20 1039 "only allocated %d descriptors\n", i);
69cea5a0 1040 spin_lock_irqsave(&dwc->lock, flags);
3bfb1d20
HS
1041 break;
1042 }
1043
e0bd0f8c 1044 INIT_LIST_HEAD(&desc->tx_list);
3bfb1d20
HS
1045 dma_async_tx_descriptor_init(&desc->txd, chan);
1046 desc->txd.tx_submit = dwc_tx_submit;
1047 desc->txd.flags = DMA_CTRL_ACK;
41d5e59c 1048 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
3bfb1d20
HS
1049 sizeof(desc->lli), DMA_TO_DEVICE);
1050 dwc_desc_put(dwc, desc);
1051
69cea5a0 1052 spin_lock_irqsave(&dwc->lock, flags);
3bfb1d20
HS
1053 i = ++dwc->descs_allocated;
1054 }
1055
69cea5a0 1056 spin_unlock_irqrestore(&dwc->lock, flags);
3bfb1d20 1057
2e4c364e 1058 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
3bfb1d20
HS
1059
1060 return i;
1061}
1062
1063static void dwc_free_chan_resources(struct dma_chan *chan)
1064{
1065 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1066 struct dw_dma *dw = to_dw_dma(chan->device);
1067 struct dw_desc *desc, *_desc;
69cea5a0 1068 unsigned long flags;
3bfb1d20
HS
1069 LIST_HEAD(list);
1070
2e4c364e 1071 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
3bfb1d20
HS
1072 dwc->descs_allocated);
1073
1074 /* ASSERT: channel is idle */
1075 BUG_ON(!list_empty(&dwc->active_list));
1076 BUG_ON(!list_empty(&dwc->queue));
1077 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1078
69cea5a0 1079 spin_lock_irqsave(&dwc->lock, flags);
3bfb1d20
HS
1080 list_splice_init(&dwc->free_list, &list);
1081 dwc->descs_allocated = 0;
61e183f8 1082 dwc->initialized = false;
3bfb1d20
HS
1083
1084 /* Disable interrupts */
1085 channel_clear_bit(dw, MASK.XFER, dwc->mask);
3bfb1d20
HS
1086 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1087
69cea5a0 1088 spin_unlock_irqrestore(&dwc->lock, flags);
3bfb1d20
HS
1089
1090 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
41d5e59c
DW
1091 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1092 dma_unmap_single(chan2parent(chan), desc->txd.phys,
3bfb1d20
HS
1093 sizeof(desc->lli), DMA_TO_DEVICE);
1094 kfree(desc);
1095 }
1096
2e4c364e 1097 dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
3bfb1d20
HS
1098}
1099
d9de4519
HCE
1100/* --------------------- Cyclic DMA API extensions -------------------- */
1101
1102/**
1103 * dw_dma_cyclic_start - start the cyclic DMA transfer
1104 * @chan: the DMA channel to start
1105 *
1106 * Must be called with soft interrupts disabled. Returns zero on success or
1107 * -errno on failure.
1108 */
1109int dw_dma_cyclic_start(struct dma_chan *chan)
1110{
1111 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1112 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
69cea5a0 1113 unsigned long flags;
d9de4519
HCE
1114
1115 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1116 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1117 return -ENODEV;
1118 }
1119
69cea5a0 1120 spin_lock_irqsave(&dwc->lock, flags);
d9de4519
HCE
1121
1122 /* assert channel is idle */
1123 if (dma_readl(dw, CH_EN) & dwc->mask) {
1124 dev_err(chan2dev(&dwc->chan),
1125 "BUG: Attempted to start non-idle channel\n");
1d455437 1126 dwc_dump_chan_regs(dwc);
69cea5a0 1127 spin_unlock_irqrestore(&dwc->lock, flags);
d9de4519
HCE
1128 return -EBUSY;
1129 }
1130
d9de4519
HCE
1131 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1132 dma_writel(dw, CLEAR.XFER, dwc->mask);
1133
1134 /* setup DMAC channel registers */
1135 channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1136 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1137 channel_writel(dwc, CTL_HI, 0);
1138
1139 channel_set_bit(dw, CH_EN, dwc->mask);
1140
69cea5a0 1141 spin_unlock_irqrestore(&dwc->lock, flags);
d9de4519
HCE
1142
1143 return 0;
1144}
1145EXPORT_SYMBOL(dw_dma_cyclic_start);
1146
1147/**
1148 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1149 * @chan: the DMA channel to stop
1150 *
1151 * Must be called with soft interrupts disabled.
1152 */
1153void dw_dma_cyclic_stop(struct dma_chan *chan)
1154{
1155 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1156 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
69cea5a0 1157 unsigned long flags;
d9de4519 1158
69cea5a0 1159 spin_lock_irqsave(&dwc->lock, flags);
d9de4519 1160
3f936207 1161 dwc_chan_disable(dw, dwc);
d9de4519 1162
69cea5a0 1163 spin_unlock_irqrestore(&dwc->lock, flags);
d9de4519
HCE
1164}
1165EXPORT_SYMBOL(dw_dma_cyclic_stop);
1166
1167/**
1168 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1169 * @chan: the DMA channel to prepare
1170 * @buf_addr: physical DMA address where the buffer starts
1171 * @buf_len: total number of bytes for the entire buffer
1172 * @period_len: number of bytes for each period
1173 * @direction: transfer direction, to or from device
1174 *
1175 * Must be called before trying to start the transfer. Returns a valid struct
1176 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1177 */
1178struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1179 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
db8196df 1180 enum dma_transfer_direction direction)
d9de4519
HCE
1181{
1182 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
327e6970 1183 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
d9de4519
HCE
1184 struct dw_cyclic_desc *cdesc;
1185 struct dw_cyclic_desc *retval = NULL;
1186 struct dw_desc *desc;
1187 struct dw_desc *last = NULL;
d9de4519
HCE
1188 unsigned long was_cyclic;
1189 unsigned int reg_width;
1190 unsigned int periods;
1191 unsigned int i;
69cea5a0 1192 unsigned long flags;
d9de4519 1193
69cea5a0 1194 spin_lock_irqsave(&dwc->lock, flags);
d9de4519 1195 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
69cea5a0 1196 spin_unlock_irqrestore(&dwc->lock, flags);
d9de4519
HCE
1197 dev_dbg(chan2dev(&dwc->chan),
1198 "queue and/or active list are not empty\n");
1199 return ERR_PTR(-EBUSY);
1200 }
1201
1202 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
69cea5a0 1203 spin_unlock_irqrestore(&dwc->lock, flags);
d9de4519
HCE
1204 if (was_cyclic) {
1205 dev_dbg(chan2dev(&dwc->chan),
1206 "channel already prepared for cyclic DMA\n");
1207 return ERR_PTR(-EBUSY);
1208 }
1209
1210 retval = ERR_PTR(-EINVAL);
327e6970
VK
1211
1212 if (direction == DMA_MEM_TO_DEV)
1213 reg_width = __ffs(sconfig->dst_addr_width);
1214 else
1215 reg_width = __ffs(sconfig->src_addr_width);
1216
d9de4519
HCE
1217 periods = buf_len / period_len;
1218
1219 /* Check for too big/unaligned periods and unaligned DMA buffer. */
1220 if (period_len > (DWC_MAX_COUNT << reg_width))
1221 goto out_err;
1222 if (unlikely(period_len & ((1 << reg_width) - 1)))
1223 goto out_err;
1224 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1225 goto out_err;
db8196df 1226 if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
d9de4519
HCE
1227 goto out_err;
1228
1229 retval = ERR_PTR(-ENOMEM);
1230
1231 if (periods > NR_DESCS_PER_CHANNEL)
1232 goto out_err;
1233
1234 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1235 if (!cdesc)
1236 goto out_err;
1237
1238 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1239 if (!cdesc->desc)
1240 goto out_err_alloc;
1241
1242 for (i = 0; i < periods; i++) {
1243 desc = dwc_desc_get(dwc);
1244 if (!desc)
1245 goto out_err_desc_get;
1246
1247 switch (direction) {
db8196df 1248 case DMA_MEM_TO_DEV:
327e6970 1249 desc->lli.dar = sconfig->dst_addr;
d9de4519 1250 desc->lli.sar = buf_addr + (period_len * i);
327e6970 1251 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
d9de4519
HCE
1252 | DWC_CTLL_DST_WIDTH(reg_width)
1253 | DWC_CTLL_SRC_WIDTH(reg_width)
1254 | DWC_CTLL_DST_FIX
1255 | DWC_CTLL_SRC_INC
d9de4519 1256 | DWC_CTLL_INT_EN);
327e6970
VK
1257
1258 desc->lli.ctllo |= sconfig->device_fc ?
1259 DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1260 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
1261
d9de4519 1262 break;
db8196df 1263 case DMA_DEV_TO_MEM:
d9de4519 1264 desc->lli.dar = buf_addr + (period_len * i);
327e6970
VK
1265 desc->lli.sar = sconfig->src_addr;
1266 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
d9de4519
HCE
1267 | DWC_CTLL_SRC_WIDTH(reg_width)
1268 | DWC_CTLL_DST_WIDTH(reg_width)
1269 | DWC_CTLL_DST_INC
1270 | DWC_CTLL_SRC_FIX
d9de4519 1271 | DWC_CTLL_INT_EN);
327e6970
VK
1272
1273 desc->lli.ctllo |= sconfig->device_fc ?
1274 DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1275 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
1276
d9de4519
HCE
1277 break;
1278 default:
1279 break;
1280 }
1281
1282 desc->lli.ctlhi = (period_len >> reg_width);
1283 cdesc->desc[i] = desc;
1284
1285 if (last) {
1286 last->lli.llp = desc->txd.phys;
1287 dma_sync_single_for_device(chan2parent(chan),
1288 last->txd.phys, sizeof(last->lli),
1289 DMA_TO_DEVICE);
1290 }
1291
1292 last = desc;
1293 }
1294
1295 /* lets make a cyclic list */
1296 last->lli.llp = cdesc->desc[0]->txd.phys;
1297 dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
1298 sizeof(last->lli), DMA_TO_DEVICE);
1299
2f45d613
AS
1300 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
1301 "period %zu periods %d\n", (unsigned long long)buf_addr,
1302 buf_len, period_len, periods);
d9de4519
HCE
1303
1304 cdesc->periods = periods;
1305 dwc->cdesc = cdesc;
1306
1307 return cdesc;
1308
1309out_err_desc_get:
1310 while (i--)
1311 dwc_desc_put(dwc, cdesc->desc[i]);
1312out_err_alloc:
1313 kfree(cdesc);
1314out_err:
1315 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1316 return (struct dw_cyclic_desc *)retval;
1317}
1318EXPORT_SYMBOL(dw_dma_cyclic_prep);
1319
1320/**
1321 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1322 * @chan: the DMA channel to free
1323 */
1324void dw_dma_cyclic_free(struct dma_chan *chan)
1325{
1326 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1327 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1328 struct dw_cyclic_desc *cdesc = dwc->cdesc;
1329 int i;
69cea5a0 1330 unsigned long flags;
d9de4519 1331
2e4c364e 1332 dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
d9de4519
HCE
1333
1334 if (!cdesc)
1335 return;
1336
69cea5a0 1337 spin_lock_irqsave(&dwc->lock, flags);
d9de4519 1338
3f936207 1339 dwc_chan_disable(dw, dwc);
d9de4519 1340
d9de4519
HCE
1341 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1342 dma_writel(dw, CLEAR.XFER, dwc->mask);
1343
69cea5a0 1344 spin_unlock_irqrestore(&dwc->lock, flags);
d9de4519
HCE
1345
1346 for (i = 0; i < cdesc->periods; i++)
1347 dwc_desc_put(dwc, cdesc->desc[i]);
1348
1349 kfree(cdesc->desc);
1350 kfree(cdesc);
1351
1352 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1353}
1354EXPORT_SYMBOL(dw_dma_cyclic_free);
1355
3bfb1d20
HS
1356/*----------------------------------------------------------------------*/
1357
1358static void dw_dma_off(struct dw_dma *dw)
1359{
61e183f8
VK
1360 int i;
1361
3bfb1d20
HS
1362 dma_writel(dw, CFG, 0);
1363
1364 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
3bfb1d20
HS
1365 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1366 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1367 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1368
1369 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1370 cpu_relax();
61e183f8
VK
1371
1372 for (i = 0; i < dw->dma.chancnt; i++)
1373 dw->chan[i].initialized = false;
3bfb1d20
HS
1374}
1375
0272e93f 1376static int __devinit dw_probe(struct platform_device *pdev)
3bfb1d20
HS
1377{
1378 struct dw_dma_platform_data *pdata;
1379 struct resource *io;
1380 struct dw_dma *dw;
1381 size_t size;
1382 int irq;
1383 int err;
1384 int i;
1385
6c618c9d 1386 pdata = dev_get_platdata(&pdev->dev);
3bfb1d20
HS
1387 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1388 return -EINVAL;
1389
1390 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1391 if (!io)
1392 return -EINVAL;
1393
1394 irq = platform_get_irq(pdev, 0);
1395 if (irq < 0)
1396 return irq;
1397
1398 size = sizeof(struct dw_dma);
1399 size += pdata->nr_channels * sizeof(struct dw_dma_chan);
dbde5c29 1400 dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
3bfb1d20
HS
1401 if (!dw)
1402 return -ENOMEM;
1403
dbde5c29
AS
1404 dw->regs = devm_request_and_ioremap(&pdev->dev, io);
1405 if (!dw->regs)
1406 return -EBUSY;
3bfb1d20 1407
dbde5c29
AS
1408 dw->clk = devm_clk_get(&pdev->dev, "hclk");
1409 if (IS_ERR(dw->clk))
1410 return PTR_ERR(dw->clk);
3075528d 1411 clk_prepare_enable(dw->clk);
3bfb1d20 1412
11f932ec
AS
1413 /* Calculate all channel mask before DMA setup */
1414 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1415
3bfb1d20
HS
1416 /* force dma off, just in case */
1417 dw_dma_off(dw);
1418
236b106f
AS
1419 /* disable BLOCK interrupts as well */
1420 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1421
dbde5c29
AS
1422 err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
1423 "dw_dmac", dw);
3bfb1d20 1424 if (err)
dbde5c29 1425 return err;
3bfb1d20
HS
1426
1427 platform_set_drvdata(pdev, dw);
1428
1429 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1430
3bfb1d20 1431 INIT_LIST_HEAD(&dw->dma.channels);
46389470 1432 for (i = 0; i < pdata->nr_channels; i++) {
3bfb1d20
HS
1433 struct dw_dma_chan *dwc = &dw->chan[i];
1434
1435 dwc->chan.device = &dw->dma;
d3ee98cd 1436 dma_cookie_init(&dwc->chan);
b0c3130d
VK
1437 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1438 list_add_tail(&dwc->chan.device_node,
1439 &dw->dma.channels);
1440 else
1441 list_add(&dwc->chan.device_node, &dw->dma.channels);
3bfb1d20 1442
93317e8e
VK
1443 /* 7 is highest priority & 0 is lowest. */
1444 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
e8d9f875 1445 dwc->priority = pdata->nr_channels - i - 1;
93317e8e
VK
1446 else
1447 dwc->priority = i;
1448
3bfb1d20
HS
1449 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1450 spin_lock_init(&dwc->lock);
1451 dwc->mask = 1 << i;
1452
1453 INIT_LIST_HEAD(&dwc->active_list);
1454 INIT_LIST_HEAD(&dwc->queue);
1455 INIT_LIST_HEAD(&dwc->free_list);
1456
1457 channel_clear_bit(dw, CH_EN, dwc->mask);
1458 }
1459
11f932ec 1460 /* Clear all interrupts on all channels. */
3bfb1d20 1461 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
236b106f 1462 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
3bfb1d20
HS
1463 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1464 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1465 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1466
3bfb1d20
HS
1467 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1468 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
95ea759e
JI
1469 if (pdata->is_private)
1470 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
3bfb1d20
HS
1471 dw->dma.dev = &pdev->dev;
1472 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1473 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1474
1475 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1476
1477 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
c3635c78 1478 dw->dma.device_control = dwc_control;
3bfb1d20 1479
07934481 1480 dw->dma.device_tx_status = dwc_tx_status;
3bfb1d20
HS
1481 dw->dma.device_issue_pending = dwc_issue_pending;
1482
1483 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1484
1485 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
46389470 1486 dev_name(&pdev->dev), pdata->nr_channels);
3bfb1d20
HS
1487
1488 dma_async_device_register(&dw->dma);
1489
1490 return 0;
3bfb1d20
HS
1491}
1492
0272e93f 1493static int __devexit dw_remove(struct platform_device *pdev)
3bfb1d20
HS
1494{
1495 struct dw_dma *dw = platform_get_drvdata(pdev);
1496 struct dw_dma_chan *dwc, *_dwc;
3bfb1d20
HS
1497
1498 dw_dma_off(dw);
1499 dma_async_device_unregister(&dw->dma);
1500
3bfb1d20
HS
1501 tasklet_kill(&dw->tasklet);
1502
1503 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1504 chan.device_node) {
1505 list_del(&dwc->chan.device_node);
1506 channel_clear_bit(dw, CH_EN, dwc->mask);
1507 }
1508
3bfb1d20
HS
1509 return 0;
1510}
1511
1512static void dw_shutdown(struct platform_device *pdev)
1513{
1514 struct dw_dma *dw = platform_get_drvdata(pdev);
1515
1516 dw_dma_off(platform_get_drvdata(pdev));
3075528d 1517 clk_disable_unprepare(dw->clk);
3bfb1d20
HS
1518}
1519
4a256b5f 1520static int dw_suspend_noirq(struct device *dev)
3bfb1d20 1521{
4a256b5f 1522 struct platform_device *pdev = to_platform_device(dev);
3bfb1d20
HS
1523 struct dw_dma *dw = platform_get_drvdata(pdev);
1524
1525 dw_dma_off(platform_get_drvdata(pdev));
3075528d 1526 clk_disable_unprepare(dw->clk);
61e183f8 1527
3bfb1d20
HS
1528 return 0;
1529}
1530
4a256b5f 1531static int dw_resume_noirq(struct device *dev)
3bfb1d20 1532{
4a256b5f 1533 struct platform_device *pdev = to_platform_device(dev);
3bfb1d20
HS
1534 struct dw_dma *dw = platform_get_drvdata(pdev);
1535
3075528d 1536 clk_prepare_enable(dw->clk);
3bfb1d20
HS
1537 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1538 return 0;
3bfb1d20
HS
1539}
1540
47145210 1541static const struct dev_pm_ops dw_dev_pm_ops = {
4a256b5f
MD
1542 .suspend_noirq = dw_suspend_noirq,
1543 .resume_noirq = dw_resume_noirq,
7414a1b8
RK
1544 .freeze_noirq = dw_suspend_noirq,
1545 .thaw_noirq = dw_resume_noirq,
1546 .restore_noirq = dw_resume_noirq,
1547 .poweroff_noirq = dw_suspend_noirq,
4a256b5f
MD
1548};
1549
d3f797d9
VK
1550#ifdef CONFIG_OF
1551static const struct of_device_id dw_dma_id_table[] = {
1552 { .compatible = "snps,dma-spear1340" },
1553 {}
1554};
1555MODULE_DEVICE_TABLE(of, dw_dma_id_table);
1556#endif
1557
3bfb1d20 1558static struct platform_driver dw_driver = {
0272e93f 1559 .remove = __devexit_p(dw_remove),
3bfb1d20 1560 .shutdown = dw_shutdown,
3bfb1d20
HS
1561 .driver = {
1562 .name = "dw_dmac",
4a256b5f 1563 .pm = &dw_dev_pm_ops,
d3f797d9 1564 .of_match_table = of_match_ptr(dw_dma_id_table),
3bfb1d20
HS
1565 },
1566};
1567
1568static int __init dw_init(void)
1569{
1570 return platform_driver_probe(&dw_driver, dw_probe);
1571}
cb689a70 1572subsys_initcall(dw_init);
3bfb1d20
HS
1573
1574static void __exit dw_exit(void)
1575{
1576 platform_driver_unregister(&dw_driver);
1577}
1578module_exit(dw_exit);
1579
1580MODULE_LICENSE("GPL v2");
1581MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
e05503ef 1582MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
10d8935f 1583MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");