Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
ea76f0b3 AN |
2 | /* |
3 | * Driver for the TXx9 SoC DMA Controller | |
4 | * | |
5 | * Copyright (C) 2009 Atsushi Nemoto | |
ea76f0b3 AN |
6 | */ |
7 | #include <linux/dma-mapping.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/interrupt.h> | |
10 | #include <linux/io.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/platform_device.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/scatterlist.h> | |
d2ebfb33 RKAL |
15 | |
16 | #include "dmaengine.h" | |
ea76f0b3 AN |
17 | #include "txx9dmac.h" |
18 | ||
19 | static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan) | |
20 | { | |
21 | return container_of(chan, struct txx9dmac_chan, chan); | |
22 | } | |
23 | ||
24 | static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc) | |
25 | { | |
26 | return dc->ch_regs; | |
27 | } | |
28 | ||
29 | static struct txx9dmac_cregs32 __iomem *__dma_regs32( | |
30 | const struct txx9dmac_chan *dc) | |
31 | { | |
32 | return dc->ch_regs; | |
33 | } | |
34 | ||
35 | #define channel64_readq(dc, name) \ | |
36 | __raw_readq(&(__dma_regs(dc)->name)) | |
37 | #define channel64_writeq(dc, name, val) \ | |
38 | __raw_writeq((val), &(__dma_regs(dc)->name)) | |
39 | #define channel64_readl(dc, name) \ | |
40 | __raw_readl(&(__dma_regs(dc)->name)) | |
41 | #define channel64_writel(dc, name, val) \ | |
42 | __raw_writel((val), &(__dma_regs(dc)->name)) | |
43 | ||
44 | #define channel32_readl(dc, name) \ | |
45 | __raw_readl(&(__dma_regs32(dc)->name)) | |
46 | #define channel32_writel(dc, name, val) \ | |
47 | __raw_writel((val), &(__dma_regs32(dc)->name)) | |
48 | ||
49 | #define channel_readq(dc, name) channel64_readq(dc, name) | |
50 | #define channel_writeq(dc, name, val) channel64_writeq(dc, name, val) | |
51 | #define channel_readl(dc, name) \ | |
52 | (is_dmac64(dc) ? \ | |
53 | channel64_readl(dc, name) : channel32_readl(dc, name)) | |
54 | #define channel_writel(dc, name, val) \ | |
55 | (is_dmac64(dc) ? \ | |
56 | channel64_writel(dc, name, val) : channel32_writel(dc, name, val)) | |
57 | ||
58 | static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc) | |
59 | { | |
60 | if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) | |
61 | return channel64_readq(dc, CHAR); | |
62 | else | |
63 | return channel64_readl(dc, CHAR); | |
64 | } | |
65 | ||
66 | static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) | |
67 | { | |
68 | if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) | |
69 | channel64_writeq(dc, CHAR, val); | |
70 | else | |
71 | channel64_writel(dc, CHAR, val); | |
72 | } | |
73 | ||
74 | static void channel64_clear_CHAR(const struct txx9dmac_chan *dc) | |
75 | { | |
34adb28d | 76 | #if defined(CONFIG_32BIT) && !defined(CONFIG_PHYS_ADDR_T_64BIT) |
ea76f0b3 AN |
77 | channel64_writel(dc, CHAR, 0); |
78 | channel64_writel(dc, __pad_CHAR, 0); | |
79 | #else | |
80 | channel64_writeq(dc, CHAR, 0); | |
81 | #endif | |
82 | } | |
83 | ||
84 | static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc) | |
85 | { | |
86 | if (is_dmac64(dc)) | |
87 | return channel64_read_CHAR(dc); | |
88 | else | |
89 | return channel32_readl(dc, CHAR); | |
90 | } | |
91 | ||
92 | static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) | |
93 | { | |
94 | if (is_dmac64(dc)) | |
95 | channel64_write_CHAR(dc, val); | |
96 | else | |
97 | channel32_writel(dc, CHAR, val); | |
98 | } | |
99 | ||
100 | static struct txx9dmac_regs __iomem *__txx9dmac_regs( | |
101 | const struct txx9dmac_dev *ddev) | |
102 | { | |
103 | return ddev->regs; | |
104 | } | |
105 | ||
106 | static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32( | |
107 | const struct txx9dmac_dev *ddev) | |
108 | { | |
109 | return ddev->regs; | |
110 | } | |
111 | ||
112 | #define dma64_readl(ddev, name) \ | |
113 | __raw_readl(&(__txx9dmac_regs(ddev)->name)) | |
114 | #define dma64_writel(ddev, name, val) \ | |
115 | __raw_writel((val), &(__txx9dmac_regs(ddev)->name)) | |
116 | ||
117 | #define dma32_readl(ddev, name) \ | |
118 | __raw_readl(&(__txx9dmac_regs32(ddev)->name)) | |
119 | #define dma32_writel(ddev, name, val) \ | |
120 | __raw_writel((val), &(__txx9dmac_regs32(ddev)->name)) | |
121 | ||
122 | #define dma_readl(ddev, name) \ | |
123 | (__is_dmac64(ddev) ? \ | |
124 | dma64_readl(ddev, name) : dma32_readl(ddev, name)) | |
125 | #define dma_writel(ddev, name, val) \ | |
126 | (__is_dmac64(ddev) ? \ | |
127 | dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val)) | |
128 | ||
129 | static struct device *chan2dev(struct dma_chan *chan) | |
130 | { | |
131 | return &chan->dev->device; | |
132 | } | |
133 | static struct device *chan2parent(struct dma_chan *chan) | |
134 | { | |
135 | return chan->dev->device.parent; | |
136 | } | |
137 | ||
138 | static struct txx9dmac_desc * | |
139 | txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd) | |
140 | { | |
141 | return container_of(txd, struct txx9dmac_desc, txd); | |
142 | } | |
143 | ||
144 | static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc, | |
145 | const struct txx9dmac_desc *desc) | |
146 | { | |
147 | return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR; | |
148 | } | |
149 | ||
150 | static void desc_write_CHAR(const struct txx9dmac_chan *dc, | |
151 | struct txx9dmac_desc *desc, dma_addr_t val) | |
152 | { | |
153 | if (is_dmac64(dc)) | |
154 | desc->hwdesc.CHAR = val; | |
155 | else | |
156 | desc->hwdesc32.CHAR = val; | |
157 | } | |
158 | ||
159 | #define TXX9_DMA_MAX_COUNT 0x04000000 | |
160 | ||
161 | #define TXX9_DMA_INITIAL_DESC_COUNT 64 | |
162 | ||
163 | static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc) | |
164 | { | |
165 | return list_entry(dc->active_list.next, | |
166 | struct txx9dmac_desc, desc_node); | |
167 | } | |
168 | ||
169 | static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc) | |
170 | { | |
171 | return list_entry(dc->active_list.prev, | |
172 | struct txx9dmac_desc, desc_node); | |
173 | } | |
174 | ||
175 | static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc) | |
176 | { | |
177 | return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node); | |
178 | } | |
179 | ||
180 | static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc) | |
181 | { | |
1979b186 DW |
182 | if (!list_empty(&desc->tx_list)) |
183 | desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node); | |
ea76f0b3 AN |
184 | return desc; |
185 | } | |
186 | ||
187 | static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx); | |
188 | ||
189 | static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc, | |
190 | gfp_t flags) | |
191 | { | |
192 | struct txx9dmac_dev *ddev = dc->ddev; | |
193 | struct txx9dmac_desc *desc; | |
194 | ||
195 | desc = kzalloc(sizeof(*desc), flags); | |
196 | if (!desc) | |
197 | return NULL; | |
1979b186 | 198 | INIT_LIST_HEAD(&desc->tx_list); |
ea76f0b3 AN |
199 | dma_async_tx_descriptor_init(&desc->txd, &dc->chan); |
200 | desc->txd.tx_submit = txx9dmac_tx_submit; | |
201 | /* txd.flags will be overwritten in prep funcs */ | |
202 | desc->txd.flags = DMA_CTRL_ACK; | |
203 | desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc, | |
204 | ddev->descsize, DMA_TO_DEVICE); | |
205 | return desc; | |
206 | } | |
207 | ||
208 | static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc) | |
209 | { | |
210 | struct txx9dmac_desc *desc, *_desc; | |
211 | struct txx9dmac_desc *ret = NULL; | |
212 | unsigned int i = 0; | |
213 | ||
214 | spin_lock_bh(&dc->lock); | |
215 | list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) { | |
216 | if (async_tx_test_ack(&desc->txd)) { | |
217 | list_del(&desc->desc_node); | |
218 | ret = desc; | |
219 | break; | |
220 | } | |
221 | dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc); | |
222 | i++; | |
223 | } | |
224 | spin_unlock_bh(&dc->lock); | |
225 | ||
226 | dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n", | |
227 | i); | |
228 | if (!ret) { | |
229 | ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC); | |
230 | if (ret) { | |
231 | spin_lock_bh(&dc->lock); | |
232 | dc->descs_allocated++; | |
233 | spin_unlock_bh(&dc->lock); | |
234 | } else | |
235 | dev_err(chan2dev(&dc->chan), | |
236 | "not enough descriptors available\n"); | |
237 | } | |
238 | return ret; | |
239 | } | |
240 | ||
241 | static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc, | |
242 | struct txx9dmac_desc *desc) | |
243 | { | |
244 | struct txx9dmac_dev *ddev = dc->ddev; | |
245 | struct txx9dmac_desc *child; | |
246 | ||
1979b186 | 247 | list_for_each_entry(child, &desc->tx_list, desc_node) |
ea76f0b3 AN |
248 | dma_sync_single_for_cpu(chan2parent(&dc->chan), |
249 | child->txd.phys, ddev->descsize, | |
250 | DMA_TO_DEVICE); | |
251 | dma_sync_single_for_cpu(chan2parent(&dc->chan), | |
252 | desc->txd.phys, ddev->descsize, | |
253 | DMA_TO_DEVICE); | |
254 | } | |
255 | ||
256 | /* | |
257 | * Move a descriptor, including any children, to the free list. | |
258 | * `desc' must not be on any lists. | |
259 | */ | |
260 | static void txx9dmac_desc_put(struct txx9dmac_chan *dc, | |
261 | struct txx9dmac_desc *desc) | |
262 | { | |
263 | if (desc) { | |
264 | struct txx9dmac_desc *child; | |
265 | ||
266 | txx9dmac_sync_desc_for_cpu(dc, desc); | |
267 | ||
268 | spin_lock_bh(&dc->lock); | |
1979b186 | 269 | list_for_each_entry(child, &desc->tx_list, desc_node) |
ea76f0b3 AN |
270 | dev_vdbg(chan2dev(&dc->chan), |
271 | "moving child desc %p to freelist\n", | |
272 | child); | |
1979b186 | 273 | list_splice_init(&desc->tx_list, &dc->free_list); |
ea76f0b3 AN |
274 | dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n", |
275 | desc); | |
276 | list_add(&desc->desc_node, &dc->free_list); | |
277 | spin_unlock_bh(&dc->lock); | |
278 | } | |
279 | } | |
280 | ||
ea76f0b3 AN |
281 | /*----------------------------------------------------------------------*/ |
282 | ||
283 | static void txx9dmac_dump_regs(struct txx9dmac_chan *dc) | |
284 | { | |
285 | if (is_dmac64(dc)) | |
286 | dev_err(chan2dev(&dc->chan), | |
287 | " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x" | |
288 | " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n", | |
289 | (u64)channel64_read_CHAR(dc), | |
290 | channel64_readq(dc, SAR), | |
291 | channel64_readq(dc, DAR), | |
292 | channel64_readl(dc, CNTR), | |
293 | channel64_readl(dc, SAIR), | |
294 | channel64_readl(dc, DAIR), | |
295 | channel64_readl(dc, CCR), | |
296 | channel64_readl(dc, CSR)); | |
297 | else | |
298 | dev_err(chan2dev(&dc->chan), | |
299 | " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x" | |
300 | " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n", | |
301 | channel32_readl(dc, CHAR), | |
302 | channel32_readl(dc, SAR), | |
303 | channel32_readl(dc, DAR), | |
304 | channel32_readl(dc, CNTR), | |
305 | channel32_readl(dc, SAIR), | |
306 | channel32_readl(dc, DAIR), | |
307 | channel32_readl(dc, CCR), | |
308 | channel32_readl(dc, CSR)); | |
309 | } | |
310 | ||
311 | static void txx9dmac_reset_chan(struct txx9dmac_chan *dc) | |
312 | { | |
313 | channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST); | |
314 | if (is_dmac64(dc)) { | |
315 | channel64_clear_CHAR(dc); | |
316 | channel_writeq(dc, SAR, 0); | |
317 | channel_writeq(dc, DAR, 0); | |
318 | } else { | |
319 | channel_writel(dc, CHAR, 0); | |
320 | channel_writel(dc, SAR, 0); | |
321 | channel_writel(dc, DAR, 0); | |
322 | } | |
323 | channel_writel(dc, CNTR, 0); | |
324 | channel_writel(dc, SAIR, 0); | |
325 | channel_writel(dc, DAIR, 0); | |
326 | channel_writel(dc, CCR, 0); | |
ea76f0b3 AN |
327 | } |
328 | ||
329 | /* Called with dc->lock held and bh disabled */ | |
330 | static void txx9dmac_dostart(struct txx9dmac_chan *dc, | |
331 | struct txx9dmac_desc *first) | |
332 | { | |
333 | struct txx9dmac_slave *ds = dc->chan.private; | |
334 | u32 sai, dai; | |
335 | ||
336 | dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n", | |
337 | first->txd.cookie, first); | |
338 | /* ASSERT: channel is idle */ | |
339 | if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { | |
340 | dev_err(chan2dev(&dc->chan), | |
341 | "BUG: Attempted to start non-idle channel\n"); | |
342 | txx9dmac_dump_regs(dc); | |
343 | /* The tasklet will hopefully advance the queue... */ | |
344 | return; | |
345 | } | |
346 | ||
347 | if (is_dmac64(dc)) { | |
348 | channel64_writel(dc, CNTR, 0); | |
349 | channel64_writel(dc, CSR, 0xffffffff); | |
350 | if (ds) { | |
351 | if (ds->tx_reg) { | |
352 | sai = ds->reg_width; | |
353 | dai = 0; | |
354 | } else { | |
355 | sai = 0; | |
356 | dai = ds->reg_width; | |
357 | } | |
358 | } else { | |
359 | sai = 8; | |
360 | dai = 8; | |
361 | } | |
362 | channel64_writel(dc, SAIR, sai); | |
363 | channel64_writel(dc, DAIR, dai); | |
364 | /* All 64-bit DMAC supports SMPCHN */ | |
365 | channel64_writel(dc, CCR, dc->ccr); | |
366 | /* Writing a non zero value to CHAR will assert XFACT */ | |
367 | channel64_write_CHAR(dc, first->txd.phys); | |
368 | } else { | |
369 | channel32_writel(dc, CNTR, 0); | |
370 | channel32_writel(dc, CSR, 0xffffffff); | |
371 | if (ds) { | |
372 | if (ds->tx_reg) { | |
373 | sai = ds->reg_width; | |
374 | dai = 0; | |
375 | } else { | |
376 | sai = 0; | |
377 | dai = ds->reg_width; | |
378 | } | |
379 | } else { | |
380 | sai = 4; | |
381 | dai = 4; | |
382 | } | |
383 | channel32_writel(dc, SAIR, sai); | |
384 | channel32_writel(dc, DAIR, dai); | |
385 | if (txx9_dma_have_SMPCHN()) { | |
386 | channel32_writel(dc, CCR, dc->ccr); | |
387 | /* Writing a non zero value to CHAR will assert XFACT */ | |
388 | channel32_writel(dc, CHAR, first->txd.phys); | |
389 | } else { | |
390 | channel32_writel(dc, CHAR, first->txd.phys); | |
391 | channel32_writel(dc, CCR, dc->ccr); | |
392 | } | |
393 | } | |
394 | } | |
395 | ||
396 | /*----------------------------------------------------------------------*/ | |
397 | ||
398 | static void | |
399 | txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, | |
400 | struct txx9dmac_desc *desc) | |
401 | { | |
d254c8d0 | 402 | struct dmaengine_desc_callback cb; |
ea76f0b3 | 403 | struct dma_async_tx_descriptor *txd = &desc->txd; |
ea76f0b3 AN |
404 | |
405 | dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", | |
406 | txd->cookie, desc); | |
407 | ||
f7fbce07 | 408 | dma_cookie_complete(txd); |
d254c8d0 | 409 | dmaengine_desc_get_callback(txd, &cb); |
ea76f0b3 AN |
410 | |
411 | txx9dmac_sync_desc_for_cpu(dc, desc); | |
1979b186 | 412 | list_splice_init(&desc->tx_list, &dc->free_list); |
ea76f0b3 AN |
413 | list_move(&desc->desc_node, &dc->free_list); |
414 | ||
d38a8c62 | 415 | dma_descriptor_unmap(txd); |
ea76f0b3 AN |
416 | /* |
417 | * The API requires that no submissions are done from a | |
418 | * callback, so we don't need to drop the lock here | |
419 | */ | |
d254c8d0 | 420 | dmaengine_desc_callback_invoke(&cb, NULL); |
ea76f0b3 AN |
421 | dma_run_dependencies(txd); |
422 | } | |
423 | ||
424 | static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list) | |
425 | { | |
426 | struct txx9dmac_dev *ddev = dc->ddev; | |
427 | struct txx9dmac_desc *desc; | |
428 | struct txx9dmac_desc *prev = NULL; | |
429 | ||
430 | BUG_ON(!list_empty(list)); | |
431 | do { | |
432 | desc = txx9dmac_first_queued(dc); | |
433 | if (prev) { | |
434 | desc_write_CHAR(dc, prev, desc->txd.phys); | |
435 | dma_sync_single_for_device(chan2parent(&dc->chan), | |
436 | prev->txd.phys, ddev->descsize, | |
437 | DMA_TO_DEVICE); | |
438 | } | |
439 | prev = txx9dmac_last_child(desc); | |
440 | list_move_tail(&desc->desc_node, list); | |
441 | /* Make chain-completion interrupt happen */ | |
442 | if ((desc->txd.flags & DMA_PREP_INTERRUPT) && | |
443 | !txx9dmac_chan_INTENT(dc)) | |
444 | break; | |
445 | } while (!list_empty(&dc->queue)); | |
446 | } | |
447 | ||
448 | static void txx9dmac_complete_all(struct txx9dmac_chan *dc) | |
449 | { | |
450 | struct txx9dmac_desc *desc, *_desc; | |
451 | LIST_HEAD(list); | |
452 | ||
453 | /* | |
454 | * Submit queued descriptors ASAP, i.e. before we go through | |
455 | * the completed ones. | |
456 | */ | |
457 | list_splice_init(&dc->active_list, &list); | |
458 | if (!list_empty(&dc->queue)) { | |
459 | txx9dmac_dequeue(dc, &dc->active_list); | |
460 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); | |
461 | } | |
462 | ||
463 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | |
464 | txx9dmac_descriptor_complete(dc, desc); | |
465 | } | |
466 | ||
467 | static void txx9dmac_dump_desc(struct txx9dmac_chan *dc, | |
468 | struct txx9dmac_hwdesc *desc) | |
469 | { | |
470 | if (is_dmac64(dc)) { | |
471 | #ifdef TXX9_DMA_USE_SIMPLE_CHAIN | |
472 | dev_crit(chan2dev(&dc->chan), | |
473 | " desc: ch%#llx s%#llx d%#llx c%#x\n", | |
474 | (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR); | |
475 | #else | |
476 | dev_crit(chan2dev(&dc->chan), | |
477 | " desc: ch%#llx s%#llx d%#llx c%#x" | |
478 | " si%#x di%#x cc%#x cs%#x\n", | |
479 | (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR, | |
480 | desc->SAIR, desc->DAIR, desc->CCR, desc->CSR); | |
481 | #endif | |
482 | } else { | |
483 | struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc; | |
484 | #ifdef TXX9_DMA_USE_SIMPLE_CHAIN | |
485 | dev_crit(chan2dev(&dc->chan), | |
486 | " desc: ch%#x s%#x d%#x c%#x\n", | |
487 | d->CHAR, d->SAR, d->DAR, d->CNTR); | |
488 | #else | |
489 | dev_crit(chan2dev(&dc->chan), | |
490 | " desc: ch%#x s%#x d%#x c%#x" | |
491 | " si%#x di%#x cc%#x cs%#x\n", | |
492 | d->CHAR, d->SAR, d->DAR, d->CNTR, | |
493 | d->SAIR, d->DAIR, d->CCR, d->CSR); | |
494 | #endif | |
495 | } | |
496 | } | |
497 | ||
498 | static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr) | |
499 | { | |
500 | struct txx9dmac_desc *bad_desc; | |
501 | struct txx9dmac_desc *child; | |
502 | u32 errors; | |
503 | ||
504 | /* | |
505 | * The descriptor currently at the head of the active list is | |
506 | * borked. Since we don't have any way to report errors, we'll | |
507 | * just have to scream loudly and try to carry on. | |
508 | */ | |
509 | dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n"); | |
510 | txx9dmac_dump_regs(dc); | |
511 | ||
512 | bad_desc = txx9dmac_first_active(dc); | |
513 | list_del_init(&bad_desc->desc_node); | |
514 | ||
515 | /* Clear all error flags and try to restart the controller */ | |
516 | errors = csr & (TXX9_DMA_CSR_ABCHC | | |
517 | TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR | | |
518 | TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR); | |
519 | channel_writel(dc, CSR, errors); | |
520 | ||
521 | if (list_empty(&dc->active_list) && !list_empty(&dc->queue)) | |
522 | txx9dmac_dequeue(dc, &dc->active_list); | |
523 | if (!list_empty(&dc->active_list)) | |
524 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); | |
525 | ||
526 | dev_crit(chan2dev(&dc->chan), | |
527 | "Bad descriptor submitted for DMA! (cookie: %d)\n", | |
528 | bad_desc->txd.cookie); | |
529 | txx9dmac_dump_desc(dc, &bad_desc->hwdesc); | |
1979b186 | 530 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
ea76f0b3 AN |
531 | txx9dmac_dump_desc(dc, &child->hwdesc); |
532 | /* Pretend the descriptor completed successfully */ | |
533 | txx9dmac_descriptor_complete(dc, bad_desc); | |
534 | } | |
535 | ||
536 | static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc) | |
537 | { | |
538 | dma_addr_t chain; | |
539 | struct txx9dmac_desc *desc, *_desc; | |
540 | struct txx9dmac_desc *child; | |
541 | u32 csr; | |
542 | ||
543 | if (is_dmac64(dc)) { | |
544 | chain = channel64_read_CHAR(dc); | |
545 | csr = channel64_readl(dc, CSR); | |
546 | channel64_writel(dc, CSR, csr); | |
547 | } else { | |
548 | chain = channel32_readl(dc, CHAR); | |
549 | csr = channel32_readl(dc, CSR); | |
550 | channel32_writel(dc, CSR, csr); | |
551 | } | |
552 | /* For dynamic chain, we should look at XFACT instead of NCHNC */ | |
553 | if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) { | |
554 | /* Everything we've submitted is done */ | |
555 | txx9dmac_complete_all(dc); | |
556 | return; | |
557 | } | |
558 | if (!(csr & TXX9_DMA_CSR_CHNEN)) | |
559 | chain = 0; /* last descriptor of this chain */ | |
560 | ||
561 | dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n", | |
562 | (u64)chain); | |
563 | ||
564 | list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) { | |
565 | if (desc_read_CHAR(dc, desc) == chain) { | |
566 | /* This one is currently in progress */ | |
567 | if (csr & TXX9_DMA_CSR_ABCHC) | |
568 | goto scan_done; | |
569 | return; | |
570 | } | |
571 | ||
1979b186 | 572 | list_for_each_entry(child, &desc->tx_list, desc_node) |
ea76f0b3 AN |
573 | if (desc_read_CHAR(dc, child) == chain) { |
574 | /* Currently in progress */ | |
575 | if (csr & TXX9_DMA_CSR_ABCHC) | |
576 | goto scan_done; | |
577 | return; | |
578 | } | |
579 | ||
580 | /* | |
581 | * No descriptors so far seem to be in progress, i.e. | |
582 | * this one must be done. | |
583 | */ | |
584 | txx9dmac_descriptor_complete(dc, desc); | |
585 | } | |
586 | scan_done: | |
587 | if (csr & TXX9_DMA_CSR_ABCHC) { | |
588 | txx9dmac_handle_error(dc, csr); | |
589 | return; | |
590 | } | |
591 | ||
592 | dev_err(chan2dev(&dc->chan), | |
593 | "BUG: All descriptors done, but channel not idle!\n"); | |
594 | ||
595 | /* Try to continue after resetting the channel... */ | |
596 | txx9dmac_reset_chan(dc); | |
597 | ||
598 | if (!list_empty(&dc->queue)) { | |
599 | txx9dmac_dequeue(dc, &dc->active_list); | |
600 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); | |
601 | } | |
602 | } | |
603 | ||
a81b0e6d | 604 | static void txx9dmac_chan_tasklet(struct tasklet_struct *t) |
ea76f0b3 AN |
605 | { |
606 | int irq; | |
607 | u32 csr; | |
608 | struct txx9dmac_chan *dc; | |
609 | ||
a81b0e6d | 610 | dc = from_tasklet(dc, t, tasklet); |
ea76f0b3 AN |
611 | csr = channel_readl(dc, CSR); |
612 | dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr); | |
613 | ||
614 | spin_lock(&dc->lock); | |
615 | if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | | |
616 | TXX9_DMA_CSR_NTRNFC)) | |
617 | txx9dmac_scan_descriptors(dc); | |
618 | spin_unlock(&dc->lock); | |
619 | irq = dc->irq; | |
620 | ||
621 | enable_irq(irq); | |
622 | } | |
623 | ||
624 | static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id) | |
625 | { | |
626 | struct txx9dmac_chan *dc = dev_id; | |
627 | ||
628 | dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n", | |
629 | channel_readl(dc, CSR)); | |
630 | ||
631 | tasklet_schedule(&dc->tasklet); | |
632 | /* | |
633 | * Just disable the interrupts. We'll turn them back on in the | |
634 | * softirq handler. | |
635 | */ | |
636 | disable_irq_nosync(irq); | |
637 | ||
638 | return IRQ_HANDLED; | |
639 | } | |
640 | ||
a81b0e6d | 641 | static void txx9dmac_tasklet(struct tasklet_struct *t) |
ea76f0b3 AN |
642 | { |
643 | int irq; | |
644 | u32 csr; | |
645 | struct txx9dmac_chan *dc; | |
646 | ||
a81b0e6d | 647 | struct txx9dmac_dev *ddev = from_tasklet(ddev, t, tasklet); |
ea76f0b3 AN |
648 | u32 mcr; |
649 | int i; | |
650 | ||
651 | mcr = dma_readl(ddev, MCR); | |
652 | dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr); | |
653 | for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) { | |
654 | if ((mcr >> (24 + i)) & 0x11) { | |
655 | dc = ddev->chan[i]; | |
656 | csr = channel_readl(dc, CSR); | |
657 | dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", | |
658 | csr); | |
659 | spin_lock(&dc->lock); | |
660 | if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | | |
661 | TXX9_DMA_CSR_NTRNFC)) | |
662 | txx9dmac_scan_descriptors(dc); | |
663 | spin_unlock(&dc->lock); | |
664 | } | |
665 | } | |
666 | irq = ddev->irq; | |
667 | ||
668 | enable_irq(irq); | |
669 | } | |
670 | ||
671 | static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id) | |
672 | { | |
673 | struct txx9dmac_dev *ddev = dev_id; | |
674 | ||
675 | dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n", | |
676 | dma_readl(ddev, MCR)); | |
677 | ||
678 | tasklet_schedule(&ddev->tasklet); | |
679 | /* | |
680 | * Just disable the interrupts. We'll turn them back on in the | |
681 | * softirq handler. | |
682 | */ | |
683 | disable_irq_nosync(irq); | |
684 | ||
685 | return IRQ_HANDLED; | |
686 | } | |
687 | ||
688 | /*----------------------------------------------------------------------*/ | |
689 | ||
690 | static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx) | |
691 | { | |
692 | struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx); | |
693 | struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan); | |
694 | dma_cookie_t cookie; | |
695 | ||
696 | spin_lock_bh(&dc->lock); | |
884485e1 | 697 | cookie = dma_cookie_assign(tx); |
ea76f0b3 AN |
698 | |
699 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n", | |
700 | desc->txd.cookie, desc); | |
701 | ||
702 | list_add_tail(&desc->desc_node, &dc->queue); | |
703 | spin_unlock_bh(&dc->lock); | |
704 | ||
705 | return cookie; | |
706 | } | |
707 | ||
708 | static struct dma_async_tx_descriptor * | |
709 | txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
710 | size_t len, unsigned long flags) | |
711 | { | |
712 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
713 | struct txx9dmac_dev *ddev = dc->ddev; | |
714 | struct txx9dmac_desc *desc; | |
715 | struct txx9dmac_desc *first; | |
716 | struct txx9dmac_desc *prev; | |
717 | size_t xfer_count; | |
718 | size_t offset; | |
719 | ||
720 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n", | |
721 | (u64)dest, (u64)src, len, flags); | |
722 | ||
723 | if (unlikely(!len)) { | |
724 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); | |
725 | return NULL; | |
726 | } | |
727 | ||
728 | prev = first = NULL; | |
729 | ||
730 | for (offset = 0; offset < len; offset += xfer_count) { | |
731 | xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT); | |
732 | /* | |
733 | * Workaround for ERT-TX49H2-033, ERT-TX49H3-020, | |
734 | * ERT-TX49H4-016 (slightly conservative) | |
735 | */ | |
736 | if (__is_dmac64(ddev)) { | |
737 | if (xfer_count > 0x100 && | |
738 | (xfer_count & 0xff) >= 0xfa && | |
739 | (xfer_count & 0xff) <= 0xff) | |
740 | xfer_count -= 0x20; | |
741 | } else { | |
742 | if (xfer_count > 0x80 && | |
743 | (xfer_count & 0x7f) >= 0x7e && | |
744 | (xfer_count & 0x7f) <= 0x7f) | |
745 | xfer_count -= 0x20; | |
746 | } | |
747 | ||
748 | desc = txx9dmac_desc_get(dc); | |
749 | if (!desc) { | |
750 | txx9dmac_desc_put(dc, first); | |
751 | return NULL; | |
752 | } | |
753 | ||
754 | if (__is_dmac64(ddev)) { | |
755 | desc->hwdesc.SAR = src + offset; | |
756 | desc->hwdesc.DAR = dest + offset; | |
757 | desc->hwdesc.CNTR = xfer_count; | |
758 | txx9dmac_desc_set_nosimple(ddev, desc, 8, 8, | |
759 | dc->ccr | TXX9_DMA_CCR_XFACT); | |
760 | } else { | |
761 | desc->hwdesc32.SAR = src + offset; | |
762 | desc->hwdesc32.DAR = dest + offset; | |
763 | desc->hwdesc32.CNTR = xfer_count; | |
764 | txx9dmac_desc_set_nosimple(ddev, desc, 4, 4, | |
765 | dc->ccr | TXX9_DMA_CCR_XFACT); | |
766 | } | |
767 | ||
768 | /* | |
769 | * The descriptors on tx_list are not reachable from | |
770 | * the dc->queue list or dc->active_list after a | |
771 | * submit. If we put all descriptors on active_list, | |
772 | * calling of callback on the completion will be more | |
773 | * complex. | |
774 | */ | |
775 | if (!first) { | |
776 | first = desc; | |
777 | } else { | |
778 | desc_write_CHAR(dc, prev, desc->txd.phys); | |
779 | dma_sync_single_for_device(chan2parent(&dc->chan), | |
780 | prev->txd.phys, ddev->descsize, | |
781 | DMA_TO_DEVICE); | |
1979b186 | 782 | list_add_tail(&desc->desc_node, &first->tx_list); |
ea76f0b3 AN |
783 | } |
784 | prev = desc; | |
785 | } | |
786 | ||
787 | /* Trigger interrupt after last block */ | |
788 | if (flags & DMA_PREP_INTERRUPT) | |
789 | txx9dmac_desc_set_INTENT(ddev, prev); | |
790 | ||
791 | desc_write_CHAR(dc, prev, 0); | |
792 | dma_sync_single_for_device(chan2parent(&dc->chan), | |
793 | prev->txd.phys, ddev->descsize, | |
794 | DMA_TO_DEVICE); | |
795 | ||
796 | first->txd.flags = flags; | |
797 | first->len = len; | |
798 | ||
799 | return &first->txd; | |
800 | } | |
801 | ||
802 | static struct dma_async_tx_descriptor * | |
803 | txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
db8196df | 804 | unsigned int sg_len, enum dma_transfer_direction direction, |
185ecb5f | 805 | unsigned long flags, void *context) |
ea76f0b3 AN |
806 | { |
807 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
808 | struct txx9dmac_dev *ddev = dc->ddev; | |
809 | struct txx9dmac_slave *ds = chan->private; | |
810 | struct txx9dmac_desc *prev; | |
811 | struct txx9dmac_desc *first; | |
812 | unsigned int i; | |
813 | struct scatterlist *sg; | |
814 | ||
815 | dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); | |
816 | ||
817 | BUG_ON(!ds || !ds->reg_width); | |
818 | if (ds->tx_reg) | |
db8196df | 819 | BUG_ON(direction != DMA_MEM_TO_DEV); |
ea76f0b3 | 820 | else |
db8196df | 821 | BUG_ON(direction != DMA_DEV_TO_MEM); |
ea76f0b3 AN |
822 | if (unlikely(!sg_len)) |
823 | return NULL; | |
824 | ||
825 | prev = first = NULL; | |
826 | ||
827 | for_each_sg(sgl, sg, sg_len, i) { | |
828 | struct txx9dmac_desc *desc; | |
829 | dma_addr_t mem; | |
830 | u32 sai, dai; | |
831 | ||
832 | desc = txx9dmac_desc_get(dc); | |
833 | if (!desc) { | |
834 | txx9dmac_desc_put(dc, first); | |
835 | return NULL; | |
836 | } | |
837 | ||
838 | mem = sg_dma_address(sg); | |
839 | ||
840 | if (__is_dmac64(ddev)) { | |
db8196df | 841 | if (direction == DMA_MEM_TO_DEV) { |
ea76f0b3 AN |
842 | desc->hwdesc.SAR = mem; |
843 | desc->hwdesc.DAR = ds->tx_reg; | |
844 | } else { | |
845 | desc->hwdesc.SAR = ds->rx_reg; | |
846 | desc->hwdesc.DAR = mem; | |
847 | } | |
848 | desc->hwdesc.CNTR = sg_dma_len(sg); | |
849 | } else { | |
db8196df | 850 | if (direction == DMA_MEM_TO_DEV) { |
ea76f0b3 AN |
851 | desc->hwdesc32.SAR = mem; |
852 | desc->hwdesc32.DAR = ds->tx_reg; | |
853 | } else { | |
854 | desc->hwdesc32.SAR = ds->rx_reg; | |
855 | desc->hwdesc32.DAR = mem; | |
856 | } | |
857 | desc->hwdesc32.CNTR = sg_dma_len(sg); | |
858 | } | |
db8196df | 859 | if (direction == DMA_MEM_TO_DEV) { |
ea76f0b3 AN |
860 | sai = ds->reg_width; |
861 | dai = 0; | |
862 | } else { | |
863 | sai = 0; | |
864 | dai = ds->reg_width; | |
865 | } | |
866 | txx9dmac_desc_set_nosimple(ddev, desc, sai, dai, | |
867 | dc->ccr | TXX9_DMA_CCR_XFACT); | |
868 | ||
869 | if (!first) { | |
870 | first = desc; | |
871 | } else { | |
872 | desc_write_CHAR(dc, prev, desc->txd.phys); | |
873 | dma_sync_single_for_device(chan2parent(&dc->chan), | |
874 | prev->txd.phys, | |
875 | ddev->descsize, | |
876 | DMA_TO_DEVICE); | |
1979b186 | 877 | list_add_tail(&desc->desc_node, &first->tx_list); |
ea76f0b3 AN |
878 | } |
879 | prev = desc; | |
880 | } | |
881 | ||
882 | /* Trigger interrupt after last block */ | |
883 | if (flags & DMA_PREP_INTERRUPT) | |
884 | txx9dmac_desc_set_INTENT(ddev, prev); | |
885 | ||
886 | desc_write_CHAR(dc, prev, 0); | |
887 | dma_sync_single_for_device(chan2parent(&dc->chan), | |
888 | prev->txd.phys, ddev->descsize, | |
889 | DMA_TO_DEVICE); | |
890 | ||
891 | first->txd.flags = flags; | |
892 | first->len = 0; | |
893 | ||
894 | return &first->txd; | |
895 | } | |
896 | ||
be16d833 | 897 | static int txx9dmac_terminate_all(struct dma_chan *chan) |
ea76f0b3 AN |
898 | { |
899 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
900 | struct txx9dmac_desc *desc, *_desc; | |
901 | LIST_HEAD(list); | |
902 | ||
903 | dev_vdbg(chan2dev(chan), "terminate_all\n"); | |
904 | spin_lock_bh(&dc->lock); | |
905 | ||
906 | txx9dmac_reset_chan(dc); | |
907 | ||
908 | /* active_list entries will end up before queued entries */ | |
909 | list_splice_init(&dc->queue, &list); | |
910 | list_splice_init(&dc->active_list, &list); | |
911 | ||
912 | spin_unlock_bh(&dc->lock); | |
913 | ||
914 | /* Flush all pending and queued descriptors */ | |
915 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | |
916 | txx9dmac_descriptor_complete(dc, desc); | |
c3635c78 LW |
917 | |
918 | return 0; | |
ea76f0b3 AN |
919 | } |
920 | ||
921 | static enum dma_status | |
07934481 LW |
922 | txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
923 | struct dma_tx_state *txstate) | |
ea76f0b3 AN |
924 | { |
925 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
96a2af41 | 926 | enum dma_status ret; |
ea76f0b3 | 927 | |
96a2af41 | 928 | ret = dma_cookie_status(chan, cookie, txstate); |
8f1fd114 VK |
929 | if (ret == DMA_COMPLETE) |
930 | return DMA_COMPLETE; | |
ea76f0b3 | 931 | |
985a0cb9 AS |
932 | spin_lock_bh(&dc->lock); |
933 | txx9dmac_scan_descriptors(dc); | |
934 | spin_unlock_bh(&dc->lock); | |
ea76f0b3 | 935 | |
985a0cb9 | 936 | return dma_cookie_status(chan, cookie, txstate); |
ea76f0b3 AN |
937 | } |
938 | ||
939 | static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc, | |
940 | struct txx9dmac_desc *prev) | |
941 | { | |
942 | struct txx9dmac_dev *ddev = dc->ddev; | |
943 | struct txx9dmac_desc *desc; | |
944 | LIST_HEAD(list); | |
945 | ||
946 | prev = txx9dmac_last_child(prev); | |
947 | txx9dmac_dequeue(dc, &list); | |
948 | desc = list_entry(list.next, struct txx9dmac_desc, desc_node); | |
949 | desc_write_CHAR(dc, prev, desc->txd.phys); | |
950 | dma_sync_single_for_device(chan2parent(&dc->chan), | |
951 | prev->txd.phys, ddev->descsize, | |
952 | DMA_TO_DEVICE); | |
ea76f0b3 AN |
953 | if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) && |
954 | channel_read_CHAR(dc) == prev->txd.phys) | |
955 | /* Restart chain DMA */ | |
956 | channel_write_CHAR(dc, desc->txd.phys); | |
957 | list_splice_tail(&list, &dc->active_list); | |
958 | } | |
959 | ||
960 | static void txx9dmac_issue_pending(struct dma_chan *chan) | |
961 | { | |
962 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
963 | ||
964 | spin_lock_bh(&dc->lock); | |
965 | ||
966 | if (!list_empty(&dc->active_list)) | |
967 | txx9dmac_scan_descriptors(dc); | |
968 | if (!list_empty(&dc->queue)) { | |
969 | if (list_empty(&dc->active_list)) { | |
970 | txx9dmac_dequeue(dc, &dc->active_list); | |
971 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); | |
972 | } else if (txx9_dma_have_SMPCHN()) { | |
973 | struct txx9dmac_desc *prev = txx9dmac_last_active(dc); | |
974 | ||
975 | if (!(prev->txd.flags & DMA_PREP_INTERRUPT) || | |
976 | txx9dmac_chan_INTENT(dc)) | |
977 | txx9dmac_chain_dynamic(dc, prev); | |
978 | } | |
979 | } | |
980 | ||
981 | spin_unlock_bh(&dc->lock); | |
982 | } | |
983 | ||
984 | static int txx9dmac_alloc_chan_resources(struct dma_chan *chan) | |
985 | { | |
986 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
987 | struct txx9dmac_slave *ds = chan->private; | |
988 | struct txx9dmac_desc *desc; | |
989 | int i; | |
990 | ||
991 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); | |
992 | ||
993 | /* ASSERT: channel is idle */ | |
994 | if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { | |
995 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); | |
996 | return -EIO; | |
997 | } | |
998 | ||
d3ee98cd | 999 | dma_cookie_init(chan); |
ea76f0b3 AN |
1000 | |
1001 | dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; | |
1002 | txx9dmac_chan_set_SMPCHN(dc); | |
1003 | if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN)) | |
1004 | dc->ccr |= TXX9_DMA_CCR_INTENC; | |
1005 | if (chan->device->device_prep_dma_memcpy) { | |
1006 | if (ds) | |
1007 | return -EINVAL; | |
1008 | dc->ccr |= TXX9_DMA_CCR_XFSZ_X8; | |
1009 | } else { | |
1010 | if (!ds || | |
1011 | (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg)) | |
1012 | return -EINVAL; | |
1013 | dc->ccr |= TXX9_DMA_CCR_EXTRQ | | |
1014 | TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width)); | |
1015 | txx9dmac_chan_set_INTENT(dc); | |
1016 | } | |
1017 | ||
1018 | spin_lock_bh(&dc->lock); | |
1019 | i = dc->descs_allocated; | |
1020 | while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) { | |
1021 | spin_unlock_bh(&dc->lock); | |
1022 | ||
1023 | desc = txx9dmac_desc_alloc(dc, GFP_KERNEL); | |
1024 | if (!desc) { | |
1025 | dev_info(chan2dev(chan), | |
1026 | "only allocated %d descriptors\n", i); | |
1027 | spin_lock_bh(&dc->lock); | |
1028 | break; | |
1029 | } | |
1030 | txx9dmac_desc_put(dc, desc); | |
1031 | ||
1032 | spin_lock_bh(&dc->lock); | |
1033 | i = ++dc->descs_allocated; | |
1034 | } | |
1035 | spin_unlock_bh(&dc->lock); | |
1036 | ||
1037 | dev_dbg(chan2dev(chan), | |
1038 | "alloc_chan_resources allocated %d descriptors\n", i); | |
1039 | ||
1040 | return i; | |
1041 | } | |
1042 | ||
1043 | static void txx9dmac_free_chan_resources(struct dma_chan *chan) | |
1044 | { | |
1045 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
1046 | struct txx9dmac_dev *ddev = dc->ddev; | |
1047 | struct txx9dmac_desc *desc, *_desc; | |
1048 | LIST_HEAD(list); | |
1049 | ||
1050 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", | |
1051 | dc->descs_allocated); | |
1052 | ||
1053 | /* ASSERT: channel is idle */ | |
1054 | BUG_ON(!list_empty(&dc->active_list)); | |
1055 | BUG_ON(!list_empty(&dc->queue)); | |
1056 | BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT); | |
1057 | ||
1058 | spin_lock_bh(&dc->lock); | |
1059 | list_splice_init(&dc->free_list, &list); | |
1060 | dc->descs_allocated = 0; | |
1061 | spin_unlock_bh(&dc->lock); | |
1062 | ||
1063 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | |
1064 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); | |
1065 | dma_unmap_single(chan2parent(chan), desc->txd.phys, | |
1066 | ddev->descsize, DMA_TO_DEVICE); | |
1067 | kfree(desc); | |
1068 | } | |
1069 | ||
1070 | dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); | |
1071 | } | |
1072 | ||
1073 | /*----------------------------------------------------------------------*/ | |
1074 | ||
1075 | static void txx9dmac_off(struct txx9dmac_dev *ddev) | |
1076 | { | |
1077 | dma_writel(ddev, MCR, 0); | |
ea76f0b3 AN |
1078 | } |
1079 | ||
1080 | static int __init txx9dmac_chan_probe(struct platform_device *pdev) | |
1081 | { | |
d4adcc01 JH |
1082 | struct txx9dmac_chan_platform_data *cpdata = |
1083 | dev_get_platdata(&pdev->dev); | |
ea76f0b3 | 1084 | struct platform_device *dmac_dev = cpdata->dmac_dev; |
d4adcc01 | 1085 | struct txx9dmac_platform_data *pdata = dev_get_platdata(&dmac_dev->dev); |
ea76f0b3 AN |
1086 | struct txx9dmac_chan *dc; |
1087 | int err; | |
1088 | int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS; | |
1089 | int irq; | |
1090 | ||
1091 | dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL); | |
1092 | if (!dc) | |
1093 | return -ENOMEM; | |
1094 | ||
1095 | dc->dma.dev = &pdev->dev; | |
1096 | dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; | |
1097 | dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; | |
be16d833 | 1098 | dc->dma.device_terminate_all = txx9dmac_terminate_all; |
07934481 | 1099 | dc->dma.device_tx_status = txx9dmac_tx_status; |
ea76f0b3 AN |
1100 | dc->dma.device_issue_pending = txx9dmac_issue_pending; |
1101 | if (pdata && pdata->memcpy_chan == ch) { | |
1102 | dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy; | |
1103 | dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask); | |
1104 | } else { | |
1105 | dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg; | |
1106 | dma_cap_set(DMA_SLAVE, dc->dma.cap_mask); | |
1107 | dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask); | |
1108 | } | |
1109 | ||
1110 | INIT_LIST_HEAD(&dc->dma.channels); | |
1111 | dc->ddev = platform_get_drvdata(dmac_dev); | |
1112 | if (dc->ddev->irq < 0) { | |
1113 | irq = platform_get_irq(pdev, 0); | |
1114 | if (irq < 0) | |
1115 | return irq; | |
a81b0e6d | 1116 | tasklet_setup(&dc->tasklet, txx9dmac_chan_tasklet); |
ea76f0b3 AN |
1117 | dc->irq = irq; |
1118 | err = devm_request_irq(&pdev->dev, dc->irq, | |
1119 | txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc); | |
1120 | if (err) | |
1121 | return err; | |
1122 | } else | |
1123 | dc->irq = -1; | |
1124 | dc->ddev->chan[ch] = dc; | |
1125 | dc->chan.device = &dc->dma; | |
1126 | list_add_tail(&dc->chan.device_node, &dc->chan.device->channels); | |
d3ee98cd | 1127 | dma_cookie_init(&dc->chan); |
ea76f0b3 AN |
1128 | |
1129 | if (is_dmac64(dc)) | |
1130 | dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch]; | |
1131 | else | |
1132 | dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch]; | |
1133 | spin_lock_init(&dc->lock); | |
1134 | ||
1135 | INIT_LIST_HEAD(&dc->active_list); | |
1136 | INIT_LIST_HEAD(&dc->queue); | |
1137 | INIT_LIST_HEAD(&dc->free_list); | |
1138 | ||
1139 | txx9dmac_reset_chan(dc); | |
1140 | ||
1141 | platform_set_drvdata(pdev, dc); | |
1142 | ||
1143 | err = dma_async_device_register(&dc->dma); | |
1144 | if (err) | |
1145 | return err; | |
1146 | dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n", | |
1147 | dc->dma.dev_id, | |
1148 | dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "", | |
1149 | dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : ""); | |
1150 | ||
1151 | return 0; | |
1152 | } | |
1153 | ||
1d1bbd30 | 1154 | static int txx9dmac_chan_remove(struct platform_device *pdev) |
ea76f0b3 AN |
1155 | { |
1156 | struct txx9dmac_chan *dc = platform_get_drvdata(pdev); | |
1157 | ||
debc4849 | 1158 | |
ea76f0b3 | 1159 | dma_async_device_unregister(&dc->dma); |
debc4849 VK |
1160 | if (dc->irq >= 0) { |
1161 | devm_free_irq(&pdev->dev, dc->irq, dc); | |
ea76f0b3 | 1162 | tasklet_kill(&dc->tasklet); |
debc4849 | 1163 | } |
ea76f0b3 AN |
1164 | dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL; |
1165 | return 0; | |
1166 | } | |
1167 | ||
1168 | static int __init txx9dmac_probe(struct platform_device *pdev) | |
1169 | { | |
d4adcc01 | 1170 | struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev); |
ea76f0b3 AN |
1171 | struct resource *io; |
1172 | struct txx9dmac_dev *ddev; | |
1173 | u32 mcr; | |
1174 | int err; | |
1175 | ||
1176 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1177 | if (!io) | |
1178 | return -EINVAL; | |
1179 | ||
1180 | ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL); | |
1181 | if (!ddev) | |
1182 | return -ENOMEM; | |
1183 | ||
1184 | if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io), | |
1185 | dev_name(&pdev->dev))) | |
1186 | return -EBUSY; | |
1187 | ||
1188 | ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io)); | |
1189 | if (!ddev->regs) | |
1190 | return -ENOMEM; | |
1191 | ddev->have_64bit_regs = pdata->have_64bit_regs; | |
1192 | if (__is_dmac64(ddev)) | |
1193 | ddev->descsize = sizeof(struct txx9dmac_hwdesc); | |
1194 | else | |
1195 | ddev->descsize = sizeof(struct txx9dmac_hwdesc32); | |
1196 | ||
1197 | /* force dma off, just in case */ | |
1198 | txx9dmac_off(ddev); | |
1199 | ||
1200 | ddev->irq = platform_get_irq(pdev, 0); | |
1201 | if (ddev->irq >= 0) { | |
a81b0e6d | 1202 | tasklet_setup(&ddev->tasklet, txx9dmac_tasklet); |
ea76f0b3 AN |
1203 | err = devm_request_irq(&pdev->dev, ddev->irq, |
1204 | txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev); | |
1205 | if (err) | |
1206 | return err; | |
1207 | } | |
1208 | ||
1209 | mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; | |
1210 | if (pdata && pdata->memcpy_chan >= 0) | |
1211 | mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); | |
1212 | dma_writel(ddev, MCR, mcr); | |
1213 | ||
1214 | platform_set_drvdata(pdev, ddev); | |
1215 | return 0; | |
1216 | } | |
1217 | ||
1d1bbd30 | 1218 | static int txx9dmac_remove(struct platform_device *pdev) |
ea76f0b3 AN |
1219 | { |
1220 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); | |
1221 | ||
1222 | txx9dmac_off(ddev); | |
debc4849 VK |
1223 | if (ddev->irq >= 0) { |
1224 | devm_free_irq(&pdev->dev, ddev->irq, ddev); | |
ea76f0b3 | 1225 | tasklet_kill(&ddev->tasklet); |
debc4849 | 1226 | } |
ea76f0b3 AN |
1227 | return 0; |
1228 | } | |
1229 | ||
1230 | static void txx9dmac_shutdown(struct platform_device *pdev) | |
1231 | { | |
1232 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); | |
1233 | ||
1234 | txx9dmac_off(ddev); | |
1235 | } | |
1236 | ||
4aebac2f | 1237 | static int txx9dmac_suspend_noirq(struct device *dev) |
ea76f0b3 | 1238 | { |
a8afcfeb | 1239 | struct txx9dmac_dev *ddev = dev_get_drvdata(dev); |
ea76f0b3 AN |
1240 | |
1241 | txx9dmac_off(ddev); | |
1242 | return 0; | |
1243 | } | |
1244 | ||
4aebac2f | 1245 | static int txx9dmac_resume_noirq(struct device *dev) |
ea76f0b3 | 1246 | { |
a8afcfeb WS |
1247 | struct txx9dmac_dev *ddev = dev_get_drvdata(dev); |
1248 | struct txx9dmac_platform_data *pdata = dev_get_platdata(dev); | |
ea76f0b3 AN |
1249 | u32 mcr; |
1250 | ||
1251 | mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; | |
1252 | if (pdata && pdata->memcpy_chan >= 0) | |
1253 | mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); | |
1254 | dma_writel(ddev, MCR, mcr); | |
1255 | return 0; | |
1256 | ||
1257 | } | |
1258 | ||
47145210 | 1259 | static const struct dev_pm_ops txx9dmac_dev_pm_ops = { |
4aebac2f MD |
1260 | .suspend_noirq = txx9dmac_suspend_noirq, |
1261 | .resume_noirq = txx9dmac_resume_noirq, | |
1262 | }; | |
1263 | ||
ea76f0b3 | 1264 | static struct platform_driver txx9dmac_chan_driver = { |
1d1bbd30 | 1265 | .remove = txx9dmac_chan_remove, |
ea76f0b3 AN |
1266 | .driver = { |
1267 | .name = "txx9dmac-chan", | |
1268 | }, | |
1269 | }; | |
1270 | ||
1271 | static struct platform_driver txx9dmac_driver = { | |
1d1bbd30 | 1272 | .remove = txx9dmac_remove, |
ea76f0b3 | 1273 | .shutdown = txx9dmac_shutdown, |
ea76f0b3 AN |
1274 | .driver = { |
1275 | .name = "txx9dmac", | |
4aebac2f | 1276 | .pm = &txx9dmac_dev_pm_ops, |
ea76f0b3 AN |
1277 | }, |
1278 | }; | |
1279 | ||
1280 | static int __init txx9dmac_init(void) | |
1281 | { | |
1282 | int rc; | |
1283 | ||
1284 | rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe); | |
1285 | if (!rc) { | |
1286 | rc = platform_driver_probe(&txx9dmac_chan_driver, | |
1287 | txx9dmac_chan_probe); | |
1288 | if (rc) | |
1289 | platform_driver_unregister(&txx9dmac_driver); | |
1290 | } | |
1291 | return rc; | |
1292 | } | |
1293 | module_init(txx9dmac_init); | |
1294 | ||
1295 | static void __exit txx9dmac_exit(void) | |
1296 | { | |
1297 | platform_driver_unregister(&txx9dmac_chan_driver); | |
1298 | platform_driver_unregister(&txx9dmac_driver); | |
1299 | } | |
1300 | module_exit(txx9dmac_exit); | |
1301 | ||
1302 | MODULE_LICENSE("GPL"); | |
1303 | MODULE_DESCRIPTION("TXx9 DMA Controller driver"); | |
1304 | MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>"); | |
b0b4ce38 GU |
1305 | MODULE_ALIAS("platform:txx9dmac"); |
1306 | MODULE_ALIAS("platform:txx9dmac-chan"); |