Commit | Line | Data |
---|---|---|
3bfb1d20 | 1 | /* |
b801479b | 2 | * Core driver for the Synopsys DesignWare DMA Controller |
3bfb1d20 HS |
3 | * |
4 | * Copyright (C) 2007-2008 Atmel Corporation | |
aecb7b64 | 5 | * Copyright (C) 2010-2011 ST Microelectronics |
9cade1a4 | 6 | * Copyright (C) 2013 Intel Corporation |
3bfb1d20 HS |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
b801479b | 12 | |
327e6970 | 13 | #include <linux/bitops.h> |
3bfb1d20 HS |
14 | #include <linux/delay.h> |
15 | #include <linux/dmaengine.h> | |
16 | #include <linux/dma-mapping.h> | |
f8122a82 | 17 | #include <linux/dmapool.h> |
7331205a | 18 | #include <linux/err.h> |
3bfb1d20 HS |
19 | #include <linux/init.h> |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/io.h> | |
22 | #include <linux/mm.h> | |
23 | #include <linux/module.h> | |
3bfb1d20 | 24 | #include <linux/slab.h> |
bb32baf7 | 25 | #include <linux/pm_runtime.h> |
3bfb1d20 | 26 | |
61a76496 | 27 | #include "../dmaengine.h" |
9cade1a4 | 28 | #include "internal.h" |
3bfb1d20 HS |
29 | |
30 | /* | |
31 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", | |
32 | * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all | |
33 | * of which use ARM any more). See the "Databook" from Synopsys for | |
34 | * information beyond what licensees probably provide. | |
35 | * | |
dd5720b3 AS |
36 | * The driver has been tested with the Atmel AT32AP7000, which does not |
37 | * support descriptor writeback. | |
3bfb1d20 HS |
38 | */ |
39 | ||
327e6970 | 40 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ |
327e6970 VK |
41 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ |
42 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ | |
495aea4b | 43 | bool _is_slave = is_slave_direction(_dwc->direction); \ |
495aea4b | 44 | u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ |
327e6970 | 45 | DW_DMA_MSIZE_16; \ |
495aea4b | 46 | u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ |
327e6970 | 47 | DW_DMA_MSIZE_16; \ |
f301c062 | 48 | \ |
327e6970 VK |
49 | (DWC_CTLL_DST_MSIZE(_dmsize) \ |
50 | | DWC_CTLL_SRC_MSIZE(_smsize) \ | |
f301c062 JI |
51 | | DWC_CTLL_LLP_D_EN \ |
52 | | DWC_CTLL_LLP_S_EN \ | |
f776076b AB |
53 | | DWC_CTLL_DMS(_dwc->dst_master) \ |
54 | | DWC_CTLL_SMS(_dwc->src_master)); \ | |
f301c062 | 55 | }) |
3bfb1d20 | 56 | |
3bfb1d20 HS |
57 | /* |
58 | * Number of descriptors to allocate for each channel. This should be | |
59 | * made configurable somehow; preferably, the clients (at least the | |
60 | * ones using slave transfers) should be able to give us a hint. | |
61 | */ | |
62 | #define NR_DESCS_PER_CHANNEL 64 | |
63 | ||
029a40e9 AS |
64 | /* The set of bus widths supported by the DMA controller */ |
65 | #define DW_DMA_BUSWIDTHS \ | |
66 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | |
67 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | |
68 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | |
69 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | |
70 | ||
3bfb1d20 | 71 | /*----------------------------------------------------------------------*/ |
3bfb1d20 | 72 | |
41d5e59c DW |
73 | static struct device *chan2dev(struct dma_chan *chan) |
74 | { | |
75 | return &chan->dev->device; | |
76 | } | |
41d5e59c | 77 | |
3bfb1d20 HS |
78 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
79 | { | |
e63a47a3 | 80 | return to_dw_desc(dwc->active_list.next); |
3bfb1d20 HS |
81 | } |
82 | ||
3bfb1d20 HS |
83 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
84 | { | |
85 | struct dw_desc *desc, *_desc; | |
86 | struct dw_desc *ret = NULL; | |
87 | unsigned int i = 0; | |
69cea5a0 | 88 | unsigned long flags; |
3bfb1d20 | 89 | |
69cea5a0 | 90 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 91 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { |
2ab37276 | 92 | i++; |
3bfb1d20 HS |
93 | if (async_tx_test_ack(&desc->txd)) { |
94 | list_del(&desc->desc_node); | |
95 | ret = desc; | |
96 | break; | |
97 | } | |
41d5e59c | 98 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); |
3bfb1d20 | 99 | } |
69cea5a0 | 100 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 101 | |
41d5e59c | 102 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); |
3bfb1d20 HS |
103 | |
104 | return ret; | |
105 | } | |
106 | ||
3bfb1d20 HS |
107 | /* |
108 | * Move a descriptor, including any children, to the free list. | |
109 | * `desc' must not be on any lists. | |
110 | */ | |
111 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |
112 | { | |
69cea5a0 VK |
113 | unsigned long flags; |
114 | ||
3bfb1d20 HS |
115 | if (desc) { |
116 | struct dw_desc *child; | |
117 | ||
69cea5a0 | 118 | spin_lock_irqsave(&dwc->lock, flags); |
e0bd0f8c | 119 | list_for_each_entry(child, &desc->tx_list, desc_node) |
41d5e59c | 120 | dev_vdbg(chan2dev(&dwc->chan), |
3bfb1d20 HS |
121 | "moving child desc %p to freelist\n", |
122 | child); | |
e0bd0f8c | 123 | list_splice_init(&desc->tx_list, &dwc->free_list); |
41d5e59c | 124 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); |
3bfb1d20 | 125 | list_add(&desc->desc_node, &dwc->free_list); |
69cea5a0 | 126 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
127 | } |
128 | } | |
129 | ||
61e183f8 VK |
130 | static void dwc_initialize(struct dw_dma_chan *dwc) |
131 | { | |
132 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
133 | struct dw_dma_slave *dws = dwc->chan.private; | |
134 | u32 cfghi = DWC_CFGH_FIFO_MODE; | |
135 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | |
136 | ||
137 | if (dwc->initialized == true) | |
138 | return; | |
139 | ||
f776076b | 140 | if (dws) { |
61e183f8 VK |
141 | /* |
142 | * We need controller-specific data to set up slave | |
143 | * transfers. | |
144 | */ | |
145 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | |
146 | ||
7e1e2f27 AS |
147 | cfghi |= DWC_CFGH_DST_PER(dws->dst_id); |
148 | cfghi |= DWC_CFGH_SRC_PER(dws->src_id); | |
8fccc5bf | 149 | } else { |
89500520 AS |
150 | cfghi |= DWC_CFGH_DST_PER(dwc->dst_id); |
151 | cfghi |= DWC_CFGH_SRC_PER(dwc->src_id); | |
61e183f8 VK |
152 | } |
153 | ||
154 | channel_writel(dwc, CFG_LO, cfglo); | |
155 | channel_writel(dwc, CFG_HI, cfghi); | |
156 | ||
157 | /* Enable interrupts */ | |
158 | channel_set_bit(dw, MASK.XFER, dwc->mask); | |
61e183f8 VK |
159 | channel_set_bit(dw, MASK.ERROR, dwc->mask); |
160 | ||
161 | dwc->initialized = true; | |
162 | } | |
163 | ||
3bfb1d20 HS |
164 | /*----------------------------------------------------------------------*/ |
165 | ||
39416677 | 166 | static inline unsigned int dwc_fast_ffs(unsigned long long v) |
4c2d56c5 AS |
167 | { |
168 | /* | |
169 | * We can be a lot more clever here, but this should take care | |
170 | * of the most common optimization. | |
171 | */ | |
172 | if (!(v & 7)) | |
173 | return 3; | |
174 | else if (!(v & 3)) | |
175 | return 2; | |
176 | else if (!(v & 1)) | |
177 | return 1; | |
178 | return 0; | |
179 | } | |
180 | ||
f52b36d2 | 181 | static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) |
1d455437 AS |
182 | { |
183 | dev_err(chan2dev(&dwc->chan), | |
184 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | |
185 | channel_readl(dwc, SAR), | |
186 | channel_readl(dwc, DAR), | |
187 | channel_readl(dwc, LLP), | |
188 | channel_readl(dwc, CTL_HI), | |
189 | channel_readl(dwc, CTL_LO)); | |
190 | } | |
191 | ||
3f936207 AS |
192 | static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) |
193 | { | |
194 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
195 | while (dma_readl(dw, CH_EN) & dwc->mask) | |
196 | cpu_relax(); | |
197 | } | |
198 | ||
1d455437 AS |
199 | /*----------------------------------------------------------------------*/ |
200 | ||
fed2574b AS |
201 | /* Perform single block transfer */ |
202 | static inline void dwc_do_single_block(struct dw_dma_chan *dwc, | |
203 | struct dw_desc *desc) | |
204 | { | |
205 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
206 | u32 ctllo; | |
207 | ||
1d566f11 AS |
208 | /* |
209 | * Software emulation of LLP mode relies on interrupts to continue | |
210 | * multi block transfer. | |
211 | */ | |
fed2574b AS |
212 | ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; |
213 | ||
214 | channel_writel(dwc, SAR, desc->lli.sar); | |
215 | channel_writel(dwc, DAR, desc->lli.dar); | |
216 | channel_writel(dwc, CTL_LO, ctllo); | |
217 | channel_writel(dwc, CTL_HI, desc->lli.ctlhi); | |
218 | channel_set_bit(dw, CH_EN, dwc->mask); | |
f5c6a7df AS |
219 | |
220 | /* Move pointer to next descriptor */ | |
221 | dwc->tx_node_active = dwc->tx_node_active->next; | |
fed2574b AS |
222 | } |
223 | ||
3bfb1d20 HS |
224 | /* Called with dwc->lock held and bh disabled */ |
225 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |
226 | { | |
227 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
fed2574b | 228 | unsigned long was_soft_llp; |
3bfb1d20 HS |
229 | |
230 | /* ASSERT: channel is idle */ | |
231 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
41d5e59c | 232 | dev_err(chan2dev(&dwc->chan), |
550da64b JN |
233 | "%s: BUG: Attempted to start non-idle channel\n", |
234 | __func__); | |
1d455437 | 235 | dwc_dump_chan_regs(dwc); |
3bfb1d20 HS |
236 | |
237 | /* The tasklet will hopefully advance the queue... */ | |
238 | return; | |
239 | } | |
240 | ||
fed2574b AS |
241 | if (dwc->nollp) { |
242 | was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, | |
243 | &dwc->flags); | |
244 | if (was_soft_llp) { | |
245 | dev_err(chan2dev(&dwc->chan), | |
fc61f6b4 | 246 | "BUG: Attempted to start new LLP transfer inside ongoing one\n"); |
fed2574b AS |
247 | return; |
248 | } | |
249 | ||
250 | dwc_initialize(dwc); | |
251 | ||
4702d524 | 252 | dwc->residue = first->total_len; |
f5c6a7df | 253 | dwc->tx_node_active = &first->tx_list; |
fed2574b | 254 | |
fdf475fa | 255 | /* Submit first block */ |
fed2574b AS |
256 | dwc_do_single_block(dwc, first); |
257 | ||
258 | return; | |
259 | } | |
260 | ||
61e183f8 VK |
261 | dwc_initialize(dwc); |
262 | ||
3bfb1d20 HS |
263 | channel_writel(dwc, LLP, first->txd.phys); |
264 | channel_writel(dwc, CTL_LO, | |
265 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | |
266 | channel_writel(dwc, CTL_HI, 0); | |
267 | channel_set_bit(dw, CH_EN, dwc->mask); | |
268 | } | |
269 | ||
e7637c6c AS |
270 | static void dwc_dostart_first_queued(struct dw_dma_chan *dwc) |
271 | { | |
cba15617 AS |
272 | struct dw_desc *desc; |
273 | ||
e7637c6c AS |
274 | if (list_empty(&dwc->queue)) |
275 | return; | |
276 | ||
277 | list_move(dwc->queue.next, &dwc->active_list); | |
cba15617 AS |
278 | desc = dwc_first_active(dwc); |
279 | dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie); | |
280 | dwc_dostart(dwc, desc); | |
e7637c6c AS |
281 | } |
282 | ||
3bfb1d20 HS |
283 | /*----------------------------------------------------------------------*/ |
284 | ||
285 | static void | |
5fedefb8 VK |
286 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, |
287 | bool callback_required) | |
3bfb1d20 | 288 | { |
5fedefb8 VK |
289 | dma_async_tx_callback callback = NULL; |
290 | void *param = NULL; | |
3bfb1d20 | 291 | struct dma_async_tx_descriptor *txd = &desc->txd; |
e518076e | 292 | struct dw_desc *child; |
69cea5a0 | 293 | unsigned long flags; |
3bfb1d20 | 294 | |
41d5e59c | 295 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
3bfb1d20 | 296 | |
69cea5a0 | 297 | spin_lock_irqsave(&dwc->lock, flags); |
f7fbce07 | 298 | dma_cookie_complete(txd); |
5fedefb8 VK |
299 | if (callback_required) { |
300 | callback = txd->callback; | |
301 | param = txd->callback_param; | |
302 | } | |
3bfb1d20 | 303 | |
e518076e VK |
304 | /* async_tx_ack */ |
305 | list_for_each_entry(child, &desc->tx_list, desc_node) | |
306 | async_tx_ack(&child->txd); | |
307 | async_tx_ack(&desc->txd); | |
308 | ||
e0bd0f8c | 309 | list_splice_init(&desc->tx_list, &dwc->free_list); |
3bfb1d20 HS |
310 | list_move(&desc->desc_node, &dwc->free_list); |
311 | ||
d38a8c62 | 312 | dma_descriptor_unmap(txd); |
69cea5a0 VK |
313 | spin_unlock_irqrestore(&dwc->lock, flags); |
314 | ||
21e93c1e | 315 | if (callback) |
3bfb1d20 HS |
316 | callback(param); |
317 | } | |
318 | ||
319 | static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
320 | { | |
321 | struct dw_desc *desc, *_desc; | |
322 | LIST_HEAD(list); | |
69cea5a0 | 323 | unsigned long flags; |
3bfb1d20 | 324 | |
69cea5a0 | 325 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 326 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
41d5e59c | 327 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
328 | "BUG: XFER bit set, but channel not idle!\n"); |
329 | ||
330 | /* Try to continue after resetting the channel... */ | |
3f936207 | 331 | dwc_chan_disable(dw, dwc); |
3bfb1d20 HS |
332 | } |
333 | ||
334 | /* | |
335 | * Submit queued descriptors ASAP, i.e. before we go through | |
336 | * the completed ones. | |
337 | */ | |
3bfb1d20 | 338 | list_splice_init(&dwc->active_list, &list); |
e7637c6c | 339 | dwc_dostart_first_queued(dwc); |
3bfb1d20 | 340 | |
69cea5a0 VK |
341 | spin_unlock_irqrestore(&dwc->lock, flags); |
342 | ||
3bfb1d20 | 343 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
5fedefb8 | 344 | dwc_descriptor_complete(dwc, desc, true); |
3bfb1d20 HS |
345 | } |
346 | ||
4702d524 AS |
347 | /* Returns how many bytes were already received from source */ |
348 | static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) | |
349 | { | |
350 | u32 ctlhi = channel_readl(dwc, CTL_HI); | |
351 | u32 ctllo = channel_readl(dwc, CTL_LO); | |
352 | ||
353 | return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7)); | |
354 | } | |
355 | ||
3bfb1d20 HS |
356 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) |
357 | { | |
358 | dma_addr_t llp; | |
359 | struct dw_desc *desc, *_desc; | |
360 | struct dw_desc *child; | |
361 | u32 status_xfer; | |
69cea5a0 | 362 | unsigned long flags; |
3bfb1d20 | 363 | |
69cea5a0 | 364 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
365 | llp = channel_readl(dwc, LLP); |
366 | status_xfer = dma_readl(dw, RAW.XFER); | |
367 | ||
368 | if (status_xfer & dwc->mask) { | |
369 | /* Everything we've submitted is done */ | |
370 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
77bcc497 AS |
371 | |
372 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { | |
fdf475fa AS |
373 | struct list_head *head, *active = dwc->tx_node_active; |
374 | ||
375 | /* | |
376 | * We are inside first active descriptor. | |
377 | * Otherwise something is really wrong. | |
378 | */ | |
379 | desc = dwc_first_active(dwc); | |
380 | ||
381 | head = &desc->tx_list; | |
382 | if (active != head) { | |
4702d524 AS |
383 | /* Update desc to reflect last sent one */ |
384 | if (active != head->next) | |
385 | desc = to_dw_desc(active->prev); | |
386 | ||
387 | dwc->residue -= desc->len; | |
388 | ||
fdf475fa | 389 | child = to_dw_desc(active); |
77bcc497 AS |
390 | |
391 | /* Submit next block */ | |
fdf475fa | 392 | dwc_do_single_block(dwc, child); |
77bcc497 | 393 | |
fdf475fa | 394 | spin_unlock_irqrestore(&dwc->lock, flags); |
77bcc497 AS |
395 | return; |
396 | } | |
fdf475fa | 397 | |
77bcc497 AS |
398 | /* We are done here */ |
399 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); | |
400 | } | |
4702d524 AS |
401 | |
402 | dwc->residue = 0; | |
403 | ||
69cea5a0 VK |
404 | spin_unlock_irqrestore(&dwc->lock, flags); |
405 | ||
3bfb1d20 HS |
406 | dwc_complete_all(dw, dwc); |
407 | return; | |
408 | } | |
409 | ||
69cea5a0 | 410 | if (list_empty(&dwc->active_list)) { |
4702d524 | 411 | dwc->residue = 0; |
69cea5a0 | 412 | spin_unlock_irqrestore(&dwc->lock, flags); |
087809fc | 413 | return; |
69cea5a0 | 414 | } |
087809fc | 415 | |
77bcc497 AS |
416 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { |
417 | dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); | |
69cea5a0 | 418 | spin_unlock_irqrestore(&dwc->lock, flags); |
087809fc | 419 | return; |
69cea5a0 | 420 | } |
087809fc | 421 | |
5a87f0e6 | 422 | dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp); |
3bfb1d20 HS |
423 | |
424 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | |
75c61225 | 425 | /* Initial residue value */ |
4702d524 AS |
426 | dwc->residue = desc->total_len; |
427 | ||
75c61225 | 428 | /* Check first descriptors addr */ |
69cea5a0 VK |
429 | if (desc->txd.phys == llp) { |
430 | spin_unlock_irqrestore(&dwc->lock, flags); | |
84adccfb | 431 | return; |
69cea5a0 | 432 | } |
84adccfb | 433 | |
75c61225 | 434 | /* Check first descriptors llp */ |
69cea5a0 | 435 | if (desc->lli.llp == llp) { |
3bfb1d20 | 436 | /* This one is currently in progress */ |
4702d524 | 437 | dwc->residue -= dwc_get_sent(dwc); |
69cea5a0 | 438 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 439 | return; |
69cea5a0 | 440 | } |
3bfb1d20 | 441 | |
4702d524 AS |
442 | dwc->residue -= desc->len; |
443 | list_for_each_entry(child, &desc->tx_list, desc_node) { | |
69cea5a0 | 444 | if (child->lli.llp == llp) { |
3bfb1d20 | 445 | /* Currently in progress */ |
4702d524 | 446 | dwc->residue -= dwc_get_sent(dwc); |
69cea5a0 | 447 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 448 | return; |
69cea5a0 | 449 | } |
4702d524 AS |
450 | dwc->residue -= child->len; |
451 | } | |
3bfb1d20 HS |
452 | |
453 | /* | |
454 | * No descriptors so far seem to be in progress, i.e. | |
455 | * this one must be done. | |
456 | */ | |
69cea5a0 | 457 | spin_unlock_irqrestore(&dwc->lock, flags); |
5fedefb8 | 458 | dwc_descriptor_complete(dwc, desc, true); |
69cea5a0 | 459 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
460 | } |
461 | ||
41d5e59c | 462 | dev_err(chan2dev(&dwc->chan), |
3bfb1d20 HS |
463 | "BUG: All descriptors done, but channel not idle!\n"); |
464 | ||
465 | /* Try to continue after resetting the channel... */ | |
3f936207 | 466 | dwc_chan_disable(dw, dwc); |
3bfb1d20 | 467 | |
e7637c6c | 468 | dwc_dostart_first_queued(dwc); |
69cea5a0 | 469 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
470 | } |
471 | ||
93aad1bc | 472 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) |
3bfb1d20 | 473 | { |
21d43f49 AS |
474 | dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
475 | lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); | |
3bfb1d20 HS |
476 | } |
477 | ||
478 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | |
479 | { | |
480 | struct dw_desc *bad_desc; | |
481 | struct dw_desc *child; | |
69cea5a0 | 482 | unsigned long flags; |
3bfb1d20 HS |
483 | |
484 | dwc_scan_descriptors(dw, dwc); | |
485 | ||
69cea5a0 VK |
486 | spin_lock_irqsave(&dwc->lock, flags); |
487 | ||
3bfb1d20 HS |
488 | /* |
489 | * The descriptor currently at the head of the active list is | |
490 | * borked. Since we don't have any way to report errors, we'll | |
491 | * just have to scream loudly and try to carry on. | |
492 | */ | |
493 | bad_desc = dwc_first_active(dwc); | |
494 | list_del_init(&bad_desc->desc_node); | |
f336e42f | 495 | list_move(dwc->queue.next, dwc->active_list.prev); |
3bfb1d20 HS |
496 | |
497 | /* Clear the error flag and try to restart the controller */ | |
498 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | |
499 | if (!list_empty(&dwc->active_list)) | |
500 | dwc_dostart(dwc, dwc_first_active(dwc)); | |
501 | ||
502 | /* | |
ba84bd71 | 503 | * WARN may seem harsh, but since this only happens |
3bfb1d20 HS |
504 | * when someone submits a bad physical address in a |
505 | * descriptor, we should consider ourselves lucky that the | |
506 | * controller flagged an error instead of scribbling over | |
507 | * random memory locations. | |
508 | */ | |
ba84bd71 AS |
509 | dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" |
510 | " cookie: %d\n", bad_desc->txd.cookie); | |
3bfb1d20 | 511 | dwc_dump_lli(dwc, &bad_desc->lli); |
e0bd0f8c | 512 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
3bfb1d20 HS |
513 | dwc_dump_lli(dwc, &child->lli); |
514 | ||
69cea5a0 VK |
515 | spin_unlock_irqrestore(&dwc->lock, flags); |
516 | ||
3bfb1d20 | 517 | /* Pretend the descriptor completed successfully */ |
5fedefb8 | 518 | dwc_descriptor_complete(dwc, bad_desc, true); |
3bfb1d20 HS |
519 | } |
520 | ||
d9de4519 HCE |
521 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
522 | ||
8004cbb4 | 523 | dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) |
d9de4519 HCE |
524 | { |
525 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
526 | return channel_readl(dwc, SAR); | |
527 | } | |
528 | EXPORT_SYMBOL(dw_dma_get_src_addr); | |
529 | ||
8004cbb4 | 530 | dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) |
d9de4519 HCE |
531 | { |
532 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
533 | return channel_readl(dwc, DAR); | |
534 | } | |
535 | EXPORT_SYMBOL(dw_dma_get_dst_addr); | |
536 | ||
75c61225 | 537 | /* Called with dwc->lock held and all DMAC interrupts disabled */ |
d9de4519 | 538 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, |
2895b2ca | 539 | u32 status_block, u32 status_err, u32 status_xfer) |
d9de4519 | 540 | { |
69cea5a0 VK |
541 | unsigned long flags; |
542 | ||
2895b2ca | 543 | if (status_block & dwc->mask) { |
d9de4519 HCE |
544 | void (*callback)(void *param); |
545 | void *callback_param; | |
546 | ||
547 | dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", | |
548 | channel_readl(dwc, LLP)); | |
2895b2ca | 549 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); |
d9de4519 HCE |
550 | |
551 | callback = dwc->cdesc->period_callback; | |
552 | callback_param = dwc->cdesc->period_callback_param; | |
69cea5a0 VK |
553 | |
554 | if (callback) | |
d9de4519 | 555 | callback(callback_param); |
d9de4519 HCE |
556 | } |
557 | ||
558 | /* | |
559 | * Error and transfer complete are highly unlikely, and will most | |
560 | * likely be due to a configuration error by the user. | |
561 | */ | |
562 | if (unlikely(status_err & dwc->mask) || | |
563 | unlikely(status_xfer & dwc->mask)) { | |
564 | int i; | |
565 | ||
fc61f6b4 AS |
566 | dev_err(chan2dev(&dwc->chan), |
567 | "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n", | |
568 | status_xfer ? "xfer" : "error"); | |
69cea5a0 VK |
569 | |
570 | spin_lock_irqsave(&dwc->lock, flags); | |
571 | ||
1d455437 | 572 | dwc_dump_chan_regs(dwc); |
d9de4519 | 573 | |
3f936207 | 574 | dwc_chan_disable(dw, dwc); |
d9de4519 | 575 | |
75c61225 | 576 | /* Make sure DMA does not restart by loading a new list */ |
d9de4519 HCE |
577 | channel_writel(dwc, LLP, 0); |
578 | channel_writel(dwc, CTL_LO, 0); | |
579 | channel_writel(dwc, CTL_HI, 0); | |
580 | ||
2895b2ca | 581 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); |
d9de4519 HCE |
582 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
583 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
584 | ||
585 | for (i = 0; i < dwc->cdesc->periods; i++) | |
586 | dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); | |
69cea5a0 VK |
587 | |
588 | spin_unlock_irqrestore(&dwc->lock, flags); | |
d9de4519 | 589 | } |
ee1cdcda AS |
590 | |
591 | /* Re-enable interrupts */ | |
592 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | |
d9de4519 HCE |
593 | } |
594 | ||
595 | /* ------------------------------------------------------------------------- */ | |
596 | ||
3bfb1d20 HS |
597 | static void dw_dma_tasklet(unsigned long data) |
598 | { | |
599 | struct dw_dma *dw = (struct dw_dma *)data; | |
600 | struct dw_dma_chan *dwc; | |
2895b2ca | 601 | u32 status_block; |
3bfb1d20 HS |
602 | u32 status_xfer; |
603 | u32 status_err; | |
604 | int i; | |
605 | ||
2895b2ca | 606 | status_block = dma_readl(dw, RAW.BLOCK); |
7fe7b2f4 | 607 | status_xfer = dma_readl(dw, RAW.XFER); |
3bfb1d20 HS |
608 | status_err = dma_readl(dw, RAW.ERROR); |
609 | ||
2e4c364e | 610 | dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); |
3bfb1d20 HS |
611 | |
612 | for (i = 0; i < dw->dma.chancnt; i++) { | |
613 | dwc = &dw->chan[i]; | |
d9de4519 | 614 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) |
2895b2ca MR |
615 | dwc_handle_cyclic(dw, dwc, status_block, status_err, |
616 | status_xfer); | |
d9de4519 | 617 | else if (status_err & (1 << i)) |
3bfb1d20 | 618 | dwc_handle_error(dw, dwc); |
77bcc497 | 619 | else if (status_xfer & (1 << i)) |
3bfb1d20 | 620 | dwc_scan_descriptors(dw, dwc); |
3bfb1d20 HS |
621 | } |
622 | ||
ee1cdcda | 623 | /* Re-enable interrupts */ |
3bfb1d20 | 624 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); |
3bfb1d20 HS |
625 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); |
626 | } | |
627 | ||
628 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | |
629 | { | |
630 | struct dw_dma *dw = dev_id; | |
02a21b79 | 631 | u32 status; |
3bfb1d20 | 632 | |
02a21b79 AS |
633 | /* Check if we have any interrupt from the DMAC which is not in use */ |
634 | if (!dw->in_use) | |
635 | return IRQ_NONE; | |
636 | ||
637 | status = dma_readl(dw, STATUS_INT); | |
3783cef8 AS |
638 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); |
639 | ||
640 | /* Check if we have any interrupt from the DMAC */ | |
02a21b79 | 641 | if (!status) |
3783cef8 | 642 | return IRQ_NONE; |
3bfb1d20 HS |
643 | |
644 | /* | |
645 | * Just disable the interrupts. We'll turn them back on in the | |
646 | * softirq handler. | |
647 | */ | |
648 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
2895b2ca | 649 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
3bfb1d20 HS |
650 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
651 | ||
652 | status = dma_readl(dw, STATUS_INT); | |
653 | if (status) { | |
654 | dev_err(dw->dma.dev, | |
655 | "BUG: Unexpected interrupts pending: 0x%x\n", | |
656 | status); | |
657 | ||
658 | /* Try to recover */ | |
659 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); | |
2895b2ca | 660 | channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); |
3bfb1d20 HS |
661 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); |
662 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); | |
663 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); | |
664 | } | |
665 | ||
666 | tasklet_schedule(&dw->tasklet); | |
667 | ||
668 | return IRQ_HANDLED; | |
669 | } | |
670 | ||
671 | /*----------------------------------------------------------------------*/ | |
672 | ||
673 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |
674 | { | |
675 | struct dw_desc *desc = txd_to_dw_desc(tx); | |
676 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); | |
677 | dma_cookie_t cookie; | |
69cea5a0 | 678 | unsigned long flags; |
3bfb1d20 | 679 | |
69cea5a0 | 680 | spin_lock_irqsave(&dwc->lock, flags); |
884485e1 | 681 | cookie = dma_cookie_assign(tx); |
3bfb1d20 HS |
682 | |
683 | /* | |
684 | * REVISIT: We should attempt to chain as many descriptors as | |
685 | * possible, perhaps even appending to those already submitted | |
686 | * for DMA. But this is hard to do in a race-free manner. | |
687 | */ | |
3bfb1d20 | 688 | |
dd8ecfca AS |
689 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie); |
690 | list_add_tail(&desc->desc_node, &dwc->queue); | |
3bfb1d20 | 691 | |
69cea5a0 | 692 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 HS |
693 | |
694 | return cookie; | |
695 | } | |
696 | ||
697 | static struct dma_async_tx_descriptor * | |
698 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
699 | size_t len, unsigned long flags) | |
700 | { | |
701 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
f776076b | 702 | struct dw_dma *dw = to_dw_dma(chan->device); |
3bfb1d20 HS |
703 | struct dw_desc *desc; |
704 | struct dw_desc *first; | |
705 | struct dw_desc *prev; | |
706 | size_t xfer_count; | |
707 | size_t offset; | |
708 | unsigned int src_width; | |
709 | unsigned int dst_width; | |
3d4f8605 | 710 | unsigned int data_width; |
3bfb1d20 HS |
711 | u32 ctllo; |
712 | ||
2f45d613 | 713 | dev_vdbg(chan2dev(chan), |
5a87f0e6 AS |
714 | "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__, |
715 | &dest, &src, len, flags); | |
3bfb1d20 HS |
716 | |
717 | if (unlikely(!len)) { | |
2e4c364e | 718 | dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); |
3bfb1d20 HS |
719 | return NULL; |
720 | } | |
721 | ||
0fdb567f AS |
722 | dwc->direction = DMA_MEM_TO_MEM; |
723 | ||
f776076b AB |
724 | data_width = min_t(unsigned int, dw->data_width[dwc->src_master], |
725 | dw->data_width[dwc->dst_master]); | |
a0982004 | 726 | |
3d4f8605 | 727 | src_width = dst_width = min_t(unsigned int, data_width, |
39416677 | 728 | dwc_fast_ffs(src | dest | len)); |
3bfb1d20 | 729 | |
327e6970 | 730 | ctllo = DWC_DEFAULT_CTLLO(chan) |
3bfb1d20 HS |
731 | | DWC_CTLL_DST_WIDTH(dst_width) |
732 | | DWC_CTLL_SRC_WIDTH(src_width) | |
733 | | DWC_CTLL_DST_INC | |
734 | | DWC_CTLL_SRC_INC | |
735 | | DWC_CTLL_FC_M2M; | |
736 | prev = first = NULL; | |
737 | ||
738 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | |
739 | xfer_count = min_t(size_t, (len - offset) >> src_width, | |
4a63a8b3 | 740 | dwc->block_size); |
3bfb1d20 HS |
741 | |
742 | desc = dwc_desc_get(dwc); | |
743 | if (!desc) | |
744 | goto err_desc_get; | |
745 | ||
746 | desc->lli.sar = src + offset; | |
747 | desc->lli.dar = dest + offset; | |
748 | desc->lli.ctllo = ctllo; | |
749 | desc->lli.ctlhi = xfer_count; | |
176dcec5 | 750 | desc->len = xfer_count << src_width; |
3bfb1d20 HS |
751 | |
752 | if (!first) { | |
753 | first = desc; | |
754 | } else { | |
755 | prev->lli.llp = desc->txd.phys; | |
3bfb1d20 | 756 | list_add_tail(&desc->desc_node, |
e0bd0f8c | 757 | &first->tx_list); |
3bfb1d20 HS |
758 | } |
759 | prev = desc; | |
760 | } | |
761 | ||
3bfb1d20 HS |
762 | if (flags & DMA_PREP_INTERRUPT) |
763 | /* Trigger interrupt after last block */ | |
764 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | |
765 | ||
766 | prev->lli.llp = 0; | |
3bfb1d20 | 767 | first->txd.flags = flags; |
30d38a32 | 768 | first->total_len = len; |
3bfb1d20 HS |
769 | |
770 | return &first->txd; | |
771 | ||
772 | err_desc_get: | |
773 | dwc_desc_put(dwc, first); | |
774 | return NULL; | |
775 | } | |
776 | ||
777 | static struct dma_async_tx_descriptor * | |
778 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
db8196df | 779 | unsigned int sg_len, enum dma_transfer_direction direction, |
185ecb5f | 780 | unsigned long flags, void *context) |
3bfb1d20 HS |
781 | { |
782 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
f776076b | 783 | struct dw_dma *dw = to_dw_dma(chan->device); |
327e6970 | 784 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
3bfb1d20 HS |
785 | struct dw_desc *prev; |
786 | struct dw_desc *first; | |
787 | u32 ctllo; | |
788 | dma_addr_t reg; | |
789 | unsigned int reg_width; | |
790 | unsigned int mem_width; | |
a0982004 | 791 | unsigned int data_width; |
3bfb1d20 HS |
792 | unsigned int i; |
793 | struct scatterlist *sg; | |
794 | size_t total_len = 0; | |
795 | ||
2e4c364e | 796 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
3bfb1d20 | 797 | |
495aea4b | 798 | if (unlikely(!is_slave_direction(direction) || !sg_len)) |
3bfb1d20 HS |
799 | return NULL; |
800 | ||
0fdb567f AS |
801 | dwc->direction = direction; |
802 | ||
3bfb1d20 HS |
803 | prev = first = NULL; |
804 | ||
3bfb1d20 | 805 | switch (direction) { |
db8196df | 806 | case DMA_MEM_TO_DEV: |
39416677 | 807 | reg_width = __ffs(sconfig->dst_addr_width); |
327e6970 VK |
808 | reg = sconfig->dst_addr; |
809 | ctllo = (DWC_DEFAULT_CTLLO(chan) | |
3bfb1d20 HS |
810 | | DWC_CTLL_DST_WIDTH(reg_width) |
811 | | DWC_CTLL_DST_FIX | |
327e6970 VK |
812 | | DWC_CTLL_SRC_INC); |
813 | ||
814 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | |
815 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | |
816 | ||
f776076b | 817 | data_width = dw->data_width[dwc->src_master]; |
a0982004 | 818 | |
3bfb1d20 HS |
819 | for_each_sg(sgl, sg, sg_len, i) { |
820 | struct dw_desc *desc; | |
69dc14b5 | 821 | u32 len, dlen, mem; |
3bfb1d20 | 822 | |
cbb796cc | 823 | mem = sg_dma_address(sg); |
69dc14b5 | 824 | len = sg_dma_len(sg); |
6bc711f6 | 825 | |
a0982004 | 826 | mem_width = min_t(unsigned int, |
39416677 | 827 | data_width, dwc_fast_ffs(mem | len)); |
3bfb1d20 | 828 | |
69dc14b5 | 829 | slave_sg_todev_fill_desc: |
3bfb1d20 | 830 | desc = dwc_desc_get(dwc); |
b2607227 | 831 | if (!desc) |
3bfb1d20 | 832 | goto err_desc_get; |
3bfb1d20 | 833 | |
3bfb1d20 HS |
834 | desc->lli.sar = mem; |
835 | desc->lli.dar = reg; | |
836 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); | |
4a63a8b3 AS |
837 | if ((len >> mem_width) > dwc->block_size) { |
838 | dlen = dwc->block_size << mem_width; | |
69dc14b5 VK |
839 | mem += dlen; |
840 | len -= dlen; | |
841 | } else { | |
842 | dlen = len; | |
843 | len = 0; | |
844 | } | |
845 | ||
846 | desc->lli.ctlhi = dlen >> mem_width; | |
176dcec5 | 847 | desc->len = dlen; |
3bfb1d20 HS |
848 | |
849 | if (!first) { | |
850 | first = desc; | |
851 | } else { | |
852 | prev->lli.llp = desc->txd.phys; | |
3bfb1d20 | 853 | list_add_tail(&desc->desc_node, |
e0bd0f8c | 854 | &first->tx_list); |
3bfb1d20 HS |
855 | } |
856 | prev = desc; | |
69dc14b5 VK |
857 | total_len += dlen; |
858 | ||
859 | if (len) | |
860 | goto slave_sg_todev_fill_desc; | |
3bfb1d20 HS |
861 | } |
862 | break; | |
db8196df | 863 | case DMA_DEV_TO_MEM: |
39416677 | 864 | reg_width = __ffs(sconfig->src_addr_width); |
327e6970 VK |
865 | reg = sconfig->src_addr; |
866 | ctllo = (DWC_DEFAULT_CTLLO(chan) | |
3bfb1d20 HS |
867 | | DWC_CTLL_SRC_WIDTH(reg_width) |
868 | | DWC_CTLL_DST_INC | |
327e6970 VK |
869 | | DWC_CTLL_SRC_FIX); |
870 | ||
871 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | |
872 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | |
3bfb1d20 | 873 | |
f776076b | 874 | data_width = dw->data_width[dwc->dst_master]; |
a0982004 | 875 | |
3bfb1d20 HS |
876 | for_each_sg(sgl, sg, sg_len, i) { |
877 | struct dw_desc *desc; | |
69dc14b5 | 878 | u32 len, dlen, mem; |
3bfb1d20 | 879 | |
cbb796cc | 880 | mem = sg_dma_address(sg); |
3bfb1d20 | 881 | len = sg_dma_len(sg); |
6bc711f6 | 882 | |
a0982004 | 883 | mem_width = min_t(unsigned int, |
39416677 | 884 | data_width, dwc_fast_ffs(mem | len)); |
3bfb1d20 | 885 | |
69dc14b5 VK |
886 | slave_sg_fromdev_fill_desc: |
887 | desc = dwc_desc_get(dwc); | |
b2607227 | 888 | if (!desc) |
69dc14b5 | 889 | goto err_desc_get; |
69dc14b5 | 890 | |
3bfb1d20 HS |
891 | desc->lli.sar = reg; |
892 | desc->lli.dar = mem; | |
893 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); | |
4a63a8b3 AS |
894 | if ((len >> reg_width) > dwc->block_size) { |
895 | dlen = dwc->block_size << reg_width; | |
69dc14b5 VK |
896 | mem += dlen; |
897 | len -= dlen; | |
898 | } else { | |
899 | dlen = len; | |
900 | len = 0; | |
901 | } | |
902 | desc->lli.ctlhi = dlen >> reg_width; | |
176dcec5 | 903 | desc->len = dlen; |
3bfb1d20 HS |
904 | |
905 | if (!first) { | |
906 | first = desc; | |
907 | } else { | |
908 | prev->lli.llp = desc->txd.phys; | |
3bfb1d20 | 909 | list_add_tail(&desc->desc_node, |
e0bd0f8c | 910 | &first->tx_list); |
3bfb1d20 HS |
911 | } |
912 | prev = desc; | |
69dc14b5 VK |
913 | total_len += dlen; |
914 | ||
915 | if (len) | |
916 | goto slave_sg_fromdev_fill_desc; | |
3bfb1d20 HS |
917 | } |
918 | break; | |
919 | default: | |
920 | return NULL; | |
921 | } | |
922 | ||
923 | if (flags & DMA_PREP_INTERRUPT) | |
924 | /* Trigger interrupt after last block */ | |
925 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | |
926 | ||
927 | prev->lli.llp = 0; | |
30d38a32 | 928 | first->total_len = total_len; |
3bfb1d20 HS |
929 | |
930 | return &first->txd; | |
931 | ||
932 | err_desc_get: | |
b2607227 JN |
933 | dev_err(chan2dev(chan), |
934 | "not enough descriptors available. Direction %d\n", direction); | |
3bfb1d20 HS |
935 | dwc_desc_put(dwc, first); |
936 | return NULL; | |
937 | } | |
938 | ||
4d130de2 AS |
939 | bool dw_dma_filter(struct dma_chan *chan, void *param) |
940 | { | |
941 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
942 | struct dw_dma_slave *dws = param; | |
943 | ||
944 | if (!dws || dws->dma_dev != chan->device->dev) | |
945 | return false; | |
946 | ||
947 | /* We have to copy data since dws can be temporary storage */ | |
948 | ||
949 | dwc->src_id = dws->src_id; | |
950 | dwc->dst_id = dws->dst_id; | |
951 | ||
952 | dwc->src_master = dws->src_master; | |
953 | dwc->dst_master = dws->dst_master; | |
954 | ||
955 | return true; | |
956 | } | |
957 | EXPORT_SYMBOL_GPL(dw_dma_filter); | |
958 | ||
327e6970 VK |
959 | /* |
960 | * Fix sconfig's burst size according to dw_dmac. We need to convert them as: | |
961 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | |
962 | * | |
963 | * NOTE: burst size 2 is not supported by controller. | |
964 | * | |
965 | * This can be done by finding least significant bit set: n & (n - 1) | |
966 | */ | |
967 | static inline void convert_burst(u32 *maxburst) | |
968 | { | |
969 | if (*maxburst > 1) | |
970 | *maxburst = fls(*maxburst) - 2; | |
971 | else | |
972 | *maxburst = 0; | |
973 | } | |
974 | ||
a4b0d348 | 975 | static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) |
327e6970 VK |
976 | { |
977 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
978 | ||
495aea4b AS |
979 | /* Check if chan will be configured for slave transfers */ |
980 | if (!is_slave_direction(sconfig->direction)) | |
327e6970 VK |
981 | return -EINVAL; |
982 | ||
983 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); | |
0fdb567f | 984 | dwc->direction = sconfig->direction; |
327e6970 VK |
985 | |
986 | convert_burst(&dwc->dma_sconfig.src_maxburst); | |
987 | convert_burst(&dwc->dma_sconfig.dst_maxburst); | |
988 | ||
989 | return 0; | |
990 | } | |
991 | ||
a4b0d348 | 992 | static int dwc_pause(struct dma_chan *chan) |
21fe3c52 | 993 | { |
a4b0d348 MR |
994 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
995 | unsigned long flags; | |
996 | unsigned int count = 20; /* timeout iterations */ | |
997 | u32 cfglo; | |
998 | ||
999 | spin_lock_irqsave(&dwc->lock, flags); | |
21fe3c52 | 1000 | |
a4b0d348 | 1001 | cfglo = channel_readl(dwc, CFG_LO); |
21fe3c52 | 1002 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); |
123b69ab AS |
1003 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) |
1004 | udelay(2); | |
21fe3c52 AS |
1005 | |
1006 | dwc->paused = true; | |
a4b0d348 MR |
1007 | |
1008 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1009 | ||
1010 | return 0; | |
21fe3c52 AS |
1011 | } |
1012 | ||
1013 | static inline void dwc_chan_resume(struct dw_dma_chan *dwc) | |
1014 | { | |
1015 | u32 cfglo = channel_readl(dwc, CFG_LO); | |
1016 | ||
1017 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | |
1018 | ||
1019 | dwc->paused = false; | |
1020 | } | |
1021 | ||
a4b0d348 | 1022 | static int dwc_resume(struct dma_chan *chan) |
3bfb1d20 HS |
1023 | { |
1024 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
69cea5a0 | 1025 | unsigned long flags; |
3bfb1d20 | 1026 | |
a4b0d348 MR |
1027 | if (!dwc->paused) |
1028 | return 0; | |
c3635c78 | 1029 | |
a4b0d348 | 1030 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 | 1031 | |
a4b0d348 | 1032 | dwc_chan_resume(dwc); |
3bfb1d20 | 1033 | |
a4b0d348 | 1034 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 1035 | |
a4b0d348 MR |
1036 | return 0; |
1037 | } | |
3bfb1d20 | 1038 | |
a4b0d348 MR |
1039 | static int dwc_terminate_all(struct dma_chan *chan) |
1040 | { | |
1041 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1042 | struct dw_dma *dw = to_dw_dma(chan->device); | |
1043 | struct dw_desc *desc, *_desc; | |
1044 | unsigned long flags; | |
1045 | LIST_HEAD(list); | |
3bfb1d20 | 1046 | |
a4b0d348 | 1047 | spin_lock_irqsave(&dwc->lock, flags); |
fed2574b | 1048 | |
a4b0d348 | 1049 | clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); |
fed2574b | 1050 | |
a4b0d348 | 1051 | dwc_chan_disable(dw, dwc); |
a7c57cf7 | 1052 | |
a4b0d348 | 1053 | dwc_chan_resume(dwc); |
a7c57cf7 | 1054 | |
a4b0d348 MR |
1055 | /* active_list entries will end up before queued entries */ |
1056 | list_splice_init(&dwc->queue, &list); | |
1057 | list_splice_init(&dwc->active_list, &list); | |
a7c57cf7 | 1058 | |
a4b0d348 | 1059 | spin_unlock_irqrestore(&dwc->lock, flags); |
a7c57cf7 | 1060 | |
a4b0d348 MR |
1061 | /* Flush all pending and queued descriptors */ |
1062 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | |
1063 | dwc_descriptor_complete(dwc, desc, false); | |
c3635c78 LW |
1064 | |
1065 | return 0; | |
3bfb1d20 HS |
1066 | } |
1067 | ||
4702d524 AS |
1068 | static inline u32 dwc_get_residue(struct dw_dma_chan *dwc) |
1069 | { | |
1070 | unsigned long flags; | |
1071 | u32 residue; | |
1072 | ||
1073 | spin_lock_irqsave(&dwc->lock, flags); | |
1074 | ||
1075 | residue = dwc->residue; | |
1076 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) | |
1077 | residue -= dwc_get_sent(dwc); | |
1078 | ||
1079 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1080 | return residue; | |
1081 | } | |
1082 | ||
3bfb1d20 | 1083 | static enum dma_status |
07934481 LW |
1084 | dwc_tx_status(struct dma_chan *chan, |
1085 | dma_cookie_t cookie, | |
1086 | struct dma_tx_state *txstate) | |
3bfb1d20 HS |
1087 | { |
1088 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
96a2af41 | 1089 | enum dma_status ret; |
3bfb1d20 | 1090 | |
96a2af41 | 1091 | ret = dma_cookie_status(chan, cookie, txstate); |
2c40410b | 1092 | if (ret == DMA_COMPLETE) |
12381dc0 | 1093 | return ret; |
3bfb1d20 | 1094 | |
12381dc0 | 1095 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
3bfb1d20 | 1096 | |
12381dc0 | 1097 | ret = dma_cookie_status(chan, cookie, txstate); |
2c40410b | 1098 | if (ret != DMA_COMPLETE) |
4702d524 | 1099 | dma_set_residue(txstate, dwc_get_residue(dwc)); |
3bfb1d20 | 1100 | |
effd5cf6 | 1101 | if (dwc->paused && ret == DMA_IN_PROGRESS) |
a7c57cf7 | 1102 | return DMA_PAUSED; |
3bfb1d20 HS |
1103 | |
1104 | return ret; | |
1105 | } | |
1106 | ||
1107 | static void dwc_issue_pending(struct dma_chan *chan) | |
1108 | { | |
1109 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
dd8ecfca | 1110 | unsigned long flags; |
3bfb1d20 | 1111 | |
dd8ecfca AS |
1112 | spin_lock_irqsave(&dwc->lock, flags); |
1113 | if (list_empty(&dwc->active_list)) | |
1114 | dwc_dostart_first_queued(dwc); | |
1115 | spin_unlock_irqrestore(&dwc->lock, flags); | |
3bfb1d20 HS |
1116 | } |
1117 | ||
99d9bf4e AS |
1118 | /*----------------------------------------------------------------------*/ |
1119 | ||
1120 | static void dw_dma_off(struct dw_dma *dw) | |
1121 | { | |
1122 | int i; | |
1123 | ||
1124 | dma_writel(dw, CFG, 0); | |
1125 | ||
1126 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | |
2895b2ca | 1127 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
99d9bf4e AS |
1128 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); |
1129 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | |
1130 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | |
1131 | ||
1132 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | |
1133 | cpu_relax(); | |
1134 | ||
1135 | for (i = 0; i < dw->dma.chancnt; i++) | |
1136 | dw->chan[i].initialized = false; | |
1137 | } | |
1138 | ||
1139 | static void dw_dma_on(struct dw_dma *dw) | |
1140 | { | |
1141 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | |
1142 | } | |
1143 | ||
aa1e6f1a | 1144 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
3bfb1d20 HS |
1145 | { |
1146 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1147 | struct dw_dma *dw = to_dw_dma(chan->device); | |
1148 | struct dw_desc *desc; | |
3bfb1d20 | 1149 | int i; |
69cea5a0 | 1150 | unsigned long flags; |
3bfb1d20 | 1151 | |
2e4c364e | 1152 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
3bfb1d20 | 1153 | |
3bfb1d20 HS |
1154 | /* ASSERT: channel is idle */ |
1155 | if (dma_readl(dw, CH_EN) & dwc->mask) { | |
41d5e59c | 1156 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); |
3bfb1d20 HS |
1157 | return -EIO; |
1158 | } | |
1159 | ||
d3ee98cd | 1160 | dma_cookie_init(chan); |
3bfb1d20 | 1161 | |
3bfb1d20 HS |
1162 | /* |
1163 | * NOTE: some controllers may have additional features that we | |
1164 | * need to initialize here, like "scatter-gather" (which | |
1165 | * doesn't mean what you think it means), and status writeback. | |
1166 | */ | |
1167 | ||
99d9bf4e AS |
1168 | /* Enable controller here if needed */ |
1169 | if (!dw->in_use) | |
1170 | dw_dma_on(dw); | |
1171 | dw->in_use |= dwc->mask; | |
1172 | ||
69cea5a0 | 1173 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1174 | i = dwc->descs_allocated; |
1175 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { | |
f8122a82 AS |
1176 | dma_addr_t phys; |
1177 | ||
69cea5a0 | 1178 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 1179 | |
f8122a82 | 1180 | desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys); |
cbd65312 AS |
1181 | if (!desc) |
1182 | goto err_desc_alloc; | |
3bfb1d20 | 1183 | |
f8122a82 | 1184 | memset(desc, 0, sizeof(struct dw_desc)); |
3bfb1d20 | 1185 | |
e0bd0f8c | 1186 | INIT_LIST_HEAD(&desc->tx_list); |
3bfb1d20 HS |
1187 | dma_async_tx_descriptor_init(&desc->txd, chan); |
1188 | desc->txd.tx_submit = dwc_tx_submit; | |
1189 | desc->txd.flags = DMA_CTRL_ACK; | |
f8122a82 | 1190 | desc->txd.phys = phys; |
cbd65312 | 1191 | |
3bfb1d20 HS |
1192 | dwc_desc_put(dwc, desc); |
1193 | ||
69cea5a0 | 1194 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1195 | i = ++dwc->descs_allocated; |
1196 | } | |
1197 | ||
69cea5a0 | 1198 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 1199 | |
2e4c364e | 1200 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); |
3bfb1d20 | 1201 | |
cbd65312 AS |
1202 | return i; |
1203 | ||
1204 | err_desc_alloc: | |
cbd65312 AS |
1205 | dev_info(chan2dev(chan), "only allocated %d descriptors\n", i); |
1206 | ||
3bfb1d20 HS |
1207 | return i; |
1208 | } | |
1209 | ||
1210 | static void dwc_free_chan_resources(struct dma_chan *chan) | |
1211 | { | |
1212 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1213 | struct dw_dma *dw = to_dw_dma(chan->device); | |
1214 | struct dw_desc *desc, *_desc; | |
69cea5a0 | 1215 | unsigned long flags; |
3bfb1d20 HS |
1216 | LIST_HEAD(list); |
1217 | ||
2e4c364e | 1218 | dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, |
3bfb1d20 HS |
1219 | dwc->descs_allocated); |
1220 | ||
1221 | /* ASSERT: channel is idle */ | |
1222 | BUG_ON(!list_empty(&dwc->active_list)); | |
1223 | BUG_ON(!list_empty(&dwc->queue)); | |
1224 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); | |
1225 | ||
69cea5a0 | 1226 | spin_lock_irqsave(&dwc->lock, flags); |
3bfb1d20 HS |
1227 | list_splice_init(&dwc->free_list, &list); |
1228 | dwc->descs_allocated = 0; | |
61e183f8 | 1229 | dwc->initialized = false; |
3bfb1d20 HS |
1230 | |
1231 | /* Disable interrupts */ | |
1232 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | |
2895b2ca | 1233 | channel_clear_bit(dw, MASK.BLOCK, dwc->mask); |
3bfb1d20 HS |
1234 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); |
1235 | ||
69cea5a0 | 1236 | spin_unlock_irqrestore(&dwc->lock, flags); |
3bfb1d20 | 1237 | |
99d9bf4e AS |
1238 | /* Disable controller in case it was a last user */ |
1239 | dw->in_use &= ~dwc->mask; | |
1240 | if (!dw->in_use) | |
1241 | dw_dma_off(dw); | |
1242 | ||
3bfb1d20 | 1243 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { |
41d5e59c | 1244 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
f8122a82 | 1245 | dma_pool_free(dw->desc_pool, desc, desc->txd.phys); |
3bfb1d20 HS |
1246 | } |
1247 | ||
2e4c364e | 1248 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); |
3bfb1d20 HS |
1249 | } |
1250 | ||
d9de4519 HCE |
1251 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
1252 | ||
1253 | /** | |
1254 | * dw_dma_cyclic_start - start the cyclic DMA transfer | |
1255 | * @chan: the DMA channel to start | |
1256 | * | |
1257 | * Must be called with soft interrupts disabled. Returns zero on success or | |
1258 | * -errno on failure. | |
1259 | */ | |
1260 | int dw_dma_cyclic_start(struct dma_chan *chan) | |
1261 | { | |
1262 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
ee1cdcda | 1263 | struct dw_dma *dw = to_dw_dma(chan->device); |
69cea5a0 | 1264 | unsigned long flags; |
d9de4519 HCE |
1265 | |
1266 | if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { | |
1267 | dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); | |
1268 | return -ENODEV; | |
1269 | } | |
1270 | ||
69cea5a0 | 1271 | spin_lock_irqsave(&dwc->lock, flags); |
ee1cdcda AS |
1272 | |
1273 | /* Enable interrupts to perform cyclic transfer */ | |
1274 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | |
1275 | ||
df3bb8a0 | 1276 | dwc_dostart(dwc, dwc->cdesc->desc[0]); |
ee1cdcda | 1277 | |
69cea5a0 | 1278 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1279 | |
1280 | return 0; | |
1281 | } | |
1282 | EXPORT_SYMBOL(dw_dma_cyclic_start); | |
1283 | ||
1284 | /** | |
1285 | * dw_dma_cyclic_stop - stop the cyclic DMA transfer | |
1286 | * @chan: the DMA channel to stop | |
1287 | * | |
1288 | * Must be called with soft interrupts disabled. | |
1289 | */ | |
1290 | void dw_dma_cyclic_stop(struct dma_chan *chan) | |
1291 | { | |
1292 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1293 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
69cea5a0 | 1294 | unsigned long flags; |
d9de4519 | 1295 | |
69cea5a0 | 1296 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 | 1297 | |
3f936207 | 1298 | dwc_chan_disable(dw, dwc); |
d9de4519 | 1299 | |
69cea5a0 | 1300 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1301 | } |
1302 | EXPORT_SYMBOL(dw_dma_cyclic_stop); | |
1303 | ||
1304 | /** | |
1305 | * dw_dma_cyclic_prep - prepare the cyclic DMA transfer | |
1306 | * @chan: the DMA channel to prepare | |
1307 | * @buf_addr: physical DMA address where the buffer starts | |
1308 | * @buf_len: total number of bytes for the entire buffer | |
1309 | * @period_len: number of bytes for each period | |
1310 | * @direction: transfer direction, to or from device | |
1311 | * | |
1312 | * Must be called before trying to start the transfer. Returns a valid struct | |
1313 | * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. | |
1314 | */ | |
1315 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |
1316 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | |
db8196df | 1317 | enum dma_transfer_direction direction) |
d9de4519 HCE |
1318 | { |
1319 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
327e6970 | 1320 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
d9de4519 HCE |
1321 | struct dw_cyclic_desc *cdesc; |
1322 | struct dw_cyclic_desc *retval = NULL; | |
1323 | struct dw_desc *desc; | |
1324 | struct dw_desc *last = NULL; | |
d9de4519 HCE |
1325 | unsigned long was_cyclic; |
1326 | unsigned int reg_width; | |
1327 | unsigned int periods; | |
1328 | unsigned int i; | |
69cea5a0 | 1329 | unsigned long flags; |
d9de4519 | 1330 | |
69cea5a0 | 1331 | spin_lock_irqsave(&dwc->lock, flags); |
fed2574b AS |
1332 | if (dwc->nollp) { |
1333 | spin_unlock_irqrestore(&dwc->lock, flags); | |
1334 | dev_dbg(chan2dev(&dwc->chan), | |
1335 | "channel doesn't support LLP transfers\n"); | |
1336 | return ERR_PTR(-EINVAL); | |
1337 | } | |
1338 | ||
d9de4519 | 1339 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { |
69cea5a0 | 1340 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1341 | dev_dbg(chan2dev(&dwc->chan), |
1342 | "queue and/or active list are not empty\n"); | |
1343 | return ERR_PTR(-EBUSY); | |
1344 | } | |
1345 | ||
1346 | was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
69cea5a0 | 1347 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1348 | if (was_cyclic) { |
1349 | dev_dbg(chan2dev(&dwc->chan), | |
1350 | "channel already prepared for cyclic DMA\n"); | |
1351 | return ERR_PTR(-EBUSY); | |
1352 | } | |
1353 | ||
1354 | retval = ERR_PTR(-EINVAL); | |
327e6970 | 1355 | |
f44b92f4 AS |
1356 | if (unlikely(!is_slave_direction(direction))) |
1357 | goto out_err; | |
1358 | ||
0fdb567f AS |
1359 | dwc->direction = direction; |
1360 | ||
327e6970 VK |
1361 | if (direction == DMA_MEM_TO_DEV) |
1362 | reg_width = __ffs(sconfig->dst_addr_width); | |
1363 | else | |
1364 | reg_width = __ffs(sconfig->src_addr_width); | |
1365 | ||
d9de4519 HCE |
1366 | periods = buf_len / period_len; |
1367 | ||
1368 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ | |
4a63a8b3 | 1369 | if (period_len > (dwc->block_size << reg_width)) |
d9de4519 HCE |
1370 | goto out_err; |
1371 | if (unlikely(period_len & ((1 << reg_width) - 1))) | |
1372 | goto out_err; | |
1373 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | |
1374 | goto out_err; | |
d9de4519 HCE |
1375 | |
1376 | retval = ERR_PTR(-ENOMEM); | |
1377 | ||
1378 | if (periods > NR_DESCS_PER_CHANNEL) | |
1379 | goto out_err; | |
1380 | ||
1381 | cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); | |
1382 | if (!cdesc) | |
1383 | goto out_err; | |
1384 | ||
1385 | cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); | |
1386 | if (!cdesc->desc) | |
1387 | goto out_err_alloc; | |
1388 | ||
1389 | for (i = 0; i < periods; i++) { | |
1390 | desc = dwc_desc_get(dwc); | |
1391 | if (!desc) | |
1392 | goto out_err_desc_get; | |
1393 | ||
1394 | switch (direction) { | |
db8196df | 1395 | case DMA_MEM_TO_DEV: |
327e6970 | 1396 | desc->lli.dar = sconfig->dst_addr; |
d9de4519 | 1397 | desc->lli.sar = buf_addr + (period_len * i); |
327e6970 | 1398 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) |
d9de4519 HCE |
1399 | | DWC_CTLL_DST_WIDTH(reg_width) |
1400 | | DWC_CTLL_SRC_WIDTH(reg_width) | |
1401 | | DWC_CTLL_DST_FIX | |
1402 | | DWC_CTLL_SRC_INC | |
d9de4519 | 1403 | | DWC_CTLL_INT_EN); |
327e6970 VK |
1404 | |
1405 | desc->lli.ctllo |= sconfig->device_fc ? | |
1406 | DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | |
1407 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | |
1408 | ||
d9de4519 | 1409 | break; |
db8196df | 1410 | case DMA_DEV_TO_MEM: |
d9de4519 | 1411 | desc->lli.dar = buf_addr + (period_len * i); |
327e6970 VK |
1412 | desc->lli.sar = sconfig->src_addr; |
1413 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) | |
d9de4519 HCE |
1414 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1415 | | DWC_CTLL_DST_WIDTH(reg_width) | |
1416 | | DWC_CTLL_DST_INC | |
1417 | | DWC_CTLL_SRC_FIX | |
d9de4519 | 1418 | | DWC_CTLL_INT_EN); |
327e6970 VK |
1419 | |
1420 | desc->lli.ctllo |= sconfig->device_fc ? | |
1421 | DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | |
1422 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | |
1423 | ||
d9de4519 HCE |
1424 | break; |
1425 | default: | |
1426 | break; | |
1427 | } | |
1428 | ||
1429 | desc->lli.ctlhi = (period_len >> reg_width); | |
1430 | cdesc->desc[i] = desc; | |
1431 | ||
f8122a82 | 1432 | if (last) |
d9de4519 | 1433 | last->lli.llp = desc->txd.phys; |
d9de4519 HCE |
1434 | |
1435 | last = desc; | |
1436 | } | |
1437 | ||
75c61225 | 1438 | /* Let's make a cyclic list */ |
d9de4519 | 1439 | last->lli.llp = cdesc->desc[0]->txd.phys; |
d9de4519 | 1440 | |
5a87f0e6 AS |
1441 | dev_dbg(chan2dev(&dwc->chan), |
1442 | "cyclic prepared buf %pad len %zu period %zu periods %d\n", | |
1443 | &buf_addr, buf_len, period_len, periods); | |
d9de4519 HCE |
1444 | |
1445 | cdesc->periods = periods; | |
1446 | dwc->cdesc = cdesc; | |
1447 | ||
1448 | return cdesc; | |
1449 | ||
1450 | out_err_desc_get: | |
1451 | while (i--) | |
1452 | dwc_desc_put(dwc, cdesc->desc[i]); | |
1453 | out_err_alloc: | |
1454 | kfree(cdesc); | |
1455 | out_err: | |
1456 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
1457 | return (struct dw_cyclic_desc *)retval; | |
1458 | } | |
1459 | EXPORT_SYMBOL(dw_dma_cyclic_prep); | |
1460 | ||
1461 | /** | |
1462 | * dw_dma_cyclic_free - free a prepared cyclic DMA transfer | |
1463 | * @chan: the DMA channel to free | |
1464 | */ | |
1465 | void dw_dma_cyclic_free(struct dma_chan *chan) | |
1466 | { | |
1467 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | |
1468 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | |
1469 | struct dw_cyclic_desc *cdesc = dwc->cdesc; | |
1470 | int i; | |
69cea5a0 | 1471 | unsigned long flags; |
d9de4519 | 1472 | |
2e4c364e | 1473 | dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); |
d9de4519 HCE |
1474 | |
1475 | if (!cdesc) | |
1476 | return; | |
1477 | ||
69cea5a0 | 1478 | spin_lock_irqsave(&dwc->lock, flags); |
d9de4519 | 1479 | |
3f936207 | 1480 | dwc_chan_disable(dw, dwc); |
d9de4519 | 1481 | |
2895b2ca | 1482 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); |
d9de4519 HCE |
1483 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1484 | dma_writel(dw, CLEAR.XFER, dwc->mask); | |
1485 | ||
69cea5a0 | 1486 | spin_unlock_irqrestore(&dwc->lock, flags); |
d9de4519 HCE |
1487 | |
1488 | for (i = 0; i < cdesc->periods; i++) | |
1489 | dwc_desc_put(dwc, cdesc->desc[i]); | |
1490 | ||
1491 | kfree(cdesc->desc); | |
1492 | kfree(cdesc); | |
1493 | ||
1494 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | |
1495 | } | |
1496 | EXPORT_SYMBOL(dw_dma_cyclic_free); | |
1497 | ||
3bfb1d20 HS |
1498 | /*----------------------------------------------------------------------*/ |
1499 | ||
9cade1a4 | 1500 | int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) |
a9ddb575 | 1501 | { |
3bfb1d20 | 1502 | struct dw_dma *dw; |
30cb2639 | 1503 | bool autocfg = false; |
482c67ea | 1504 | unsigned int dw_params; |
4a63a8b3 | 1505 | unsigned int max_blk_size = 0; |
3bfb1d20 HS |
1506 | int err; |
1507 | int i; | |
1508 | ||
000871ce AS |
1509 | dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL); |
1510 | if (!dw) | |
1511 | return -ENOMEM; | |
1512 | ||
1513 | dw->regs = chip->regs; | |
1514 | chip->dw = dw; | |
1515 | ||
bb32baf7 AS |
1516 | pm_runtime_get_sync(chip->dev); |
1517 | ||
30cb2639 AS |
1518 | if (!pdata) { |
1519 | dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); | |
1520 | dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); | |
482c67ea | 1521 | |
30cb2639 AS |
1522 | autocfg = dw_params >> DW_PARAMS_EN & 1; |
1523 | if (!autocfg) { | |
1524 | err = -EINVAL; | |
1525 | goto err_pdata; | |
1526 | } | |
123de543 | 1527 | |
9cade1a4 | 1528 | pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL); |
8be4f523 AS |
1529 | if (!pdata) { |
1530 | err = -ENOMEM; | |
1531 | goto err_pdata; | |
1532 | } | |
123de543 | 1533 | |
30cb2639 AS |
1534 | /* Get hardware configuration parameters */ |
1535 | pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1; | |
1536 | pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; | |
1537 | for (i = 0; i < pdata->nr_masters; i++) { | |
1538 | pdata->data_width[i] = | |
1539 | (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; | |
1540 | } | |
1541 | max_blk_size = dma_readl(dw, MAX_BLK_SIZE); | |
1542 | ||
123de543 AS |
1543 | /* Fill platform data with the default values */ |
1544 | pdata->is_private = true; | |
df5c7386 | 1545 | pdata->is_memcpy = true; |
123de543 AS |
1546 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; |
1547 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; | |
30cb2639 | 1548 | } else if (pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { |
8be4f523 AS |
1549 | err = -EINVAL; |
1550 | goto err_pdata; | |
1551 | } | |
123de543 | 1552 | |
30cb2639 | 1553 | dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan), |
000871ce | 1554 | GFP_KERNEL); |
8be4f523 AS |
1555 | if (!dw->chan) { |
1556 | err = -ENOMEM; | |
1557 | goto err_pdata; | |
1558 | } | |
3bfb1d20 | 1559 | |
75c61225 | 1560 | /* Get hardware configuration parameters */ |
30cb2639 AS |
1561 | dw->nr_masters = pdata->nr_masters; |
1562 | for (i = 0; i < dw->nr_masters; i++) | |
1563 | dw->data_width[i] = pdata->data_width[i]; | |
a0982004 | 1564 | |
11f932ec | 1565 | /* Calculate all channel mask before DMA setup */ |
30cb2639 | 1566 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; |
11f932ec | 1567 | |
75c61225 | 1568 | /* Force dma off, just in case */ |
3bfb1d20 HS |
1569 | dw_dma_off(dw); |
1570 | ||
75c61225 | 1571 | /* Create a pool of consistent memory blocks for hardware descriptors */ |
9cade1a4 | 1572 | dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, |
f8122a82 AS |
1573 | sizeof(struct dw_desc), 4, 0); |
1574 | if (!dw->desc_pool) { | |
9cade1a4 | 1575 | dev_err(chip->dev, "No memory for descriptors dma pool\n"); |
8be4f523 AS |
1576 | err = -ENOMEM; |
1577 | goto err_pdata; | |
f8122a82 AS |
1578 | } |
1579 | ||
3bfb1d20 HS |
1580 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); |
1581 | ||
97977f75 AS |
1582 | err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED, |
1583 | "dw_dmac", dw); | |
1584 | if (err) | |
8be4f523 | 1585 | goto err_pdata; |
97977f75 | 1586 | |
3bfb1d20 | 1587 | INIT_LIST_HEAD(&dw->dma.channels); |
30cb2639 | 1588 | for (i = 0; i < pdata->nr_channels; i++) { |
3bfb1d20 HS |
1589 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1590 | ||
1591 | dwc->chan.device = &dw->dma; | |
d3ee98cd | 1592 | dma_cookie_init(&dwc->chan); |
b0c3130d VK |
1593 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) |
1594 | list_add_tail(&dwc->chan.device_node, | |
1595 | &dw->dma.channels); | |
1596 | else | |
1597 | list_add(&dwc->chan.device_node, &dw->dma.channels); | |
3bfb1d20 | 1598 | |
93317e8e VK |
1599 | /* 7 is highest priority & 0 is lowest. */ |
1600 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | |
30cb2639 | 1601 | dwc->priority = pdata->nr_channels - i - 1; |
93317e8e VK |
1602 | else |
1603 | dwc->priority = i; | |
1604 | ||
3bfb1d20 HS |
1605 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; |
1606 | spin_lock_init(&dwc->lock); | |
1607 | dwc->mask = 1 << i; | |
1608 | ||
1609 | INIT_LIST_HEAD(&dwc->active_list); | |
1610 | INIT_LIST_HEAD(&dwc->queue); | |
1611 | INIT_LIST_HEAD(&dwc->free_list); | |
1612 | ||
1613 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
4a63a8b3 | 1614 | |
0fdb567f | 1615 | dwc->direction = DMA_TRANS_NONE; |
a0982004 | 1616 | |
75c61225 | 1617 | /* Hardware configuration */ |
fed2574b AS |
1618 | if (autocfg) { |
1619 | unsigned int dwc_params; | |
6bea0f6d | 1620 | unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; |
9cade1a4 | 1621 | void __iomem *addr = chip->regs + r * sizeof(u32); |
fed2574b | 1622 | |
9cade1a4 | 1623 | dwc_params = dma_read_byaddr(addr, DWC_PARAMS); |
fed2574b | 1624 | |
9cade1a4 AS |
1625 | dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, |
1626 | dwc_params); | |
985a6c7d | 1627 | |
1d566f11 AS |
1628 | /* |
1629 | * Decode maximum block size for given channel. The | |
4a63a8b3 | 1630 | * stored 4 bit value represents blocks from 0x00 for 3 |
1d566f11 AS |
1631 | * up to 0x0a for 4095. |
1632 | */ | |
4a63a8b3 AS |
1633 | dwc->block_size = |
1634 | (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; | |
fed2574b AS |
1635 | dwc->nollp = |
1636 | (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; | |
1637 | } else { | |
4a63a8b3 | 1638 | dwc->block_size = pdata->block_size; |
fed2574b AS |
1639 | |
1640 | /* Check if channel supports multi block transfer */ | |
1641 | channel_writel(dwc, LLP, 0xfffffffc); | |
1642 | dwc->nollp = | |
1643 | (channel_readl(dwc, LLP) & 0xfffffffc) == 0; | |
1644 | channel_writel(dwc, LLP, 0); | |
1645 | } | |
3bfb1d20 HS |
1646 | } |
1647 | ||
11f932ec | 1648 | /* Clear all interrupts on all channels. */ |
3bfb1d20 | 1649 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); |
236b106f | 1650 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); |
3bfb1d20 HS |
1651 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); |
1652 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | |
1653 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | |
1654 | ||
df5c7386 | 1655 | /* Set capabilities */ |
3bfb1d20 | 1656 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); |
95ea759e JI |
1657 | if (pdata->is_private) |
1658 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); | |
df5c7386 AS |
1659 | if (pdata->is_memcpy) |
1660 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | |
1661 | ||
9cade1a4 | 1662 | dw->dma.dev = chip->dev; |
3bfb1d20 HS |
1663 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; |
1664 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | |
1665 | ||
1666 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; | |
3bfb1d20 | 1667 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; |
029a40e9 | 1668 | |
a4b0d348 MR |
1669 | dw->dma.device_config = dwc_config; |
1670 | dw->dma.device_pause = dwc_pause; | |
1671 | dw->dma.device_resume = dwc_resume; | |
1672 | dw->dma.device_terminate_all = dwc_terminate_all; | |
3bfb1d20 | 1673 | |
07934481 | 1674 | dw->dma.device_tx_status = dwc_tx_status; |
3bfb1d20 HS |
1675 | dw->dma.device_issue_pending = dwc_issue_pending; |
1676 | ||
029a40e9 AS |
1677 | /* DMA capabilities */ |
1678 | dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS; | |
1679 | dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS; | |
1680 | dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | | |
1681 | BIT(DMA_MEM_TO_MEM); | |
1682 | dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
1683 | ||
1222934e AS |
1684 | err = dma_async_device_register(&dw->dma); |
1685 | if (err) | |
1686 | goto err_dma_register; | |
1687 | ||
9cade1a4 | 1688 | dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", |
30cb2639 | 1689 | pdata->nr_channels); |
3bfb1d20 | 1690 | |
bb32baf7 AS |
1691 | pm_runtime_put_sync_suspend(chip->dev); |
1692 | ||
3bfb1d20 | 1693 | return 0; |
8be4f523 | 1694 | |
1222934e AS |
1695 | err_dma_register: |
1696 | free_irq(chip->irq, dw); | |
8be4f523 | 1697 | err_pdata: |
bb32baf7 | 1698 | pm_runtime_put_sync_suspend(chip->dev); |
8be4f523 | 1699 | return err; |
3bfb1d20 | 1700 | } |
9cade1a4 | 1701 | EXPORT_SYMBOL_GPL(dw_dma_probe); |
3bfb1d20 | 1702 | |
9cade1a4 | 1703 | int dw_dma_remove(struct dw_dma_chip *chip) |
3bfb1d20 | 1704 | { |
9cade1a4 | 1705 | struct dw_dma *dw = chip->dw; |
3bfb1d20 | 1706 | struct dw_dma_chan *dwc, *_dwc; |
3bfb1d20 | 1707 | |
bb32baf7 AS |
1708 | pm_runtime_get_sync(chip->dev); |
1709 | ||
3bfb1d20 HS |
1710 | dw_dma_off(dw); |
1711 | dma_async_device_unregister(&dw->dma); | |
1712 | ||
97977f75 | 1713 | free_irq(chip->irq, dw); |
3bfb1d20 HS |
1714 | tasklet_kill(&dw->tasklet); |
1715 | ||
1716 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, | |
1717 | chan.device_node) { | |
1718 | list_del(&dwc->chan.device_node); | |
1719 | channel_clear_bit(dw, CH_EN, dwc->mask); | |
1720 | } | |
1721 | ||
bb32baf7 | 1722 | pm_runtime_put_sync_suspend(chip->dev); |
3bfb1d20 HS |
1723 | return 0; |
1724 | } | |
9cade1a4 | 1725 | EXPORT_SYMBOL_GPL(dw_dma_remove); |
3bfb1d20 | 1726 | |
2540f74b | 1727 | int dw_dma_disable(struct dw_dma_chip *chip) |
3bfb1d20 | 1728 | { |
9cade1a4 | 1729 | struct dw_dma *dw = chip->dw; |
3bfb1d20 | 1730 | |
6168d567 | 1731 | dw_dma_off(dw); |
3bfb1d20 HS |
1732 | return 0; |
1733 | } | |
2540f74b | 1734 | EXPORT_SYMBOL_GPL(dw_dma_disable); |
3bfb1d20 | 1735 | |
2540f74b | 1736 | int dw_dma_enable(struct dw_dma_chip *chip) |
3bfb1d20 | 1737 | { |
9cade1a4 | 1738 | struct dw_dma *dw = chip->dw; |
3bfb1d20 | 1739 | |
7a83c045 | 1740 | dw_dma_on(dw); |
3bfb1d20 | 1741 | return 0; |
3bfb1d20 | 1742 | } |
2540f74b | 1743 | EXPORT_SYMBOL_GPL(dw_dma_enable); |
3bfb1d20 HS |
1744 | |
1745 | MODULE_LICENSE("GPL v2"); | |
9cade1a4 | 1746 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); |
e05503ef | 1747 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
da89947b | 1748 | MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>"); |