Commit | Line | Data |
---|---|---|
e63d79d1 GP |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. | |
4 | * Synopsys DesignWare eDMA core driver | |
5 | * | |
6 | * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/device.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/pm_runtime.h> | |
13 | #include <linux/dmaengine.h> | |
14 | #include <linux/err.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/dma/edma.h> | |
17 | #include <linux/pci.h> | |
18 | ||
19 | #include "dw-edma-core.h" | |
7e4b8a4f | 20 | #include "dw-edma-v0-core.h" |
e63d79d1 GP |
21 | #include "../dmaengine.h" |
22 | #include "../virt-dma.h" | |
23 | ||
24 | static inline | |
25 | struct device *dchan2dev(struct dma_chan *dchan) | |
26 | { | |
27 | return &dchan->dev->device; | |
28 | } | |
29 | ||
30 | static inline | |
31 | struct device *chan2dev(struct dw_edma_chan *chan) | |
32 | { | |
33 | return &chan->vc.chan.dev->device; | |
34 | } | |
35 | ||
36 | static inline | |
37 | struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd) | |
38 | { | |
39 | return container_of(vd, struct dw_edma_desc, vd); | |
40 | } | |
41 | ||
42 | static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk) | |
43 | { | |
44 | struct dw_edma_burst *burst; | |
45 | ||
46 | burst = kzalloc(sizeof(*burst), GFP_NOWAIT); | |
47 | if (unlikely(!burst)) | |
48 | return NULL; | |
49 | ||
50 | INIT_LIST_HEAD(&burst->list); | |
51 | if (chunk->burst) { | |
52 | /* Create and add new element into the linked list */ | |
53 | chunk->bursts_alloc++; | |
54 | list_add_tail(&burst->list, &chunk->burst->list); | |
55 | } else { | |
56 | /* List head */ | |
57 | chunk->bursts_alloc = 0; | |
58 | chunk->burst = burst; | |
59 | } | |
60 | ||
61 | return burst; | |
62 | } | |
63 | ||
64 | static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc) | |
65 | { | |
66 | struct dw_edma_chan *chan = desc->chan; | |
67 | struct dw_edma *dw = chan->chip->dw; | |
68 | struct dw_edma_chunk *chunk; | |
69 | ||
70 | chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); | |
71 | if (unlikely(!chunk)) | |
72 | return NULL; | |
73 | ||
74 | INIT_LIST_HEAD(&chunk->list); | |
75 | chunk->chan = chan; | |
76 | /* Toggling change bit (CB) in each chunk, this is a mechanism to | |
77 | * inform the eDMA HW block that this is a new linked list ready | |
78 | * to be consumed. | |
79 | * - Odd chunks originate CB equal to 0 | |
80 | * - Even chunks originate CB equal to 1 | |
81 | */ | |
82 | chunk->cb = !(desc->chunks_alloc % 2); | |
83 | chunk->ll_region.paddr = dw->ll_region.paddr + chan->ll_off; | |
84 | chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off; | |
85 | ||
86 | if (desc->chunk) { | |
87 | /* Create and add new element into the linked list */ | |
88 | desc->chunks_alloc++; | |
89 | list_add_tail(&chunk->list, &desc->chunk->list); | |
90 | if (!dw_edma_alloc_burst(chunk)) { | |
91 | kfree(chunk); | |
92 | return NULL; | |
93 | } | |
94 | } else { | |
95 | /* List head */ | |
96 | chunk->burst = NULL; | |
97 | desc->chunks_alloc = 0; | |
98 | desc->chunk = chunk; | |
99 | } | |
100 | ||
101 | return chunk; | |
102 | } | |
103 | ||
104 | static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan) | |
105 | { | |
106 | struct dw_edma_desc *desc; | |
107 | ||
108 | desc = kzalloc(sizeof(*desc), GFP_NOWAIT); | |
109 | if (unlikely(!desc)) | |
110 | return NULL; | |
111 | ||
112 | desc->chan = chan; | |
113 | if (!dw_edma_alloc_chunk(desc)) { | |
114 | kfree(desc); | |
115 | return NULL; | |
116 | } | |
117 | ||
118 | return desc; | |
119 | } | |
120 | ||
121 | static void dw_edma_free_burst(struct dw_edma_chunk *chunk) | |
122 | { | |
123 | struct dw_edma_burst *child, *_next; | |
124 | ||
125 | /* Remove all the list elements */ | |
126 | list_for_each_entry_safe(child, _next, &chunk->burst->list, list) { | |
127 | list_del(&child->list); | |
128 | kfree(child); | |
129 | chunk->bursts_alloc--; | |
130 | } | |
131 | ||
132 | /* Remove the list head */ | |
133 | kfree(child); | |
134 | chunk->burst = NULL; | |
135 | } | |
136 | ||
137 | static void dw_edma_free_chunk(struct dw_edma_desc *desc) | |
138 | { | |
139 | struct dw_edma_chunk *child, *_next; | |
140 | ||
141 | if (!desc->chunk) | |
142 | return; | |
143 | ||
144 | /* Remove all the list elements */ | |
145 | list_for_each_entry_safe(child, _next, &desc->chunk->list, list) { | |
146 | dw_edma_free_burst(child); | |
147 | list_del(&child->list); | |
148 | kfree(child); | |
149 | desc->chunks_alloc--; | |
150 | } | |
151 | ||
152 | /* Remove the list head */ | |
153 | kfree(child); | |
154 | desc->chunk = NULL; | |
155 | } | |
156 | ||
157 | static void dw_edma_free_desc(struct dw_edma_desc *desc) | |
158 | { | |
159 | dw_edma_free_chunk(desc); | |
160 | kfree(desc); | |
161 | } | |
162 | ||
163 | static void vchan_free_desc(struct virt_dma_desc *vdesc) | |
164 | { | |
165 | dw_edma_free_desc(vd2dw_edma_desc(vdesc)); | |
166 | } | |
167 | ||
168 | static void dw_edma_start_transfer(struct dw_edma_chan *chan) | |
169 | { | |
170 | struct dw_edma_chunk *child; | |
171 | struct dw_edma_desc *desc; | |
172 | struct virt_dma_desc *vd; | |
173 | ||
174 | vd = vchan_next_desc(&chan->vc); | |
175 | if (!vd) | |
176 | return; | |
177 | ||
178 | desc = vd2dw_edma_desc(vd); | |
179 | if (!desc) | |
180 | return; | |
181 | ||
182 | child = list_first_entry_or_null(&desc->chunk->list, | |
183 | struct dw_edma_chunk, list); | |
184 | if (!child) | |
185 | return; | |
186 | ||
187 | dw_edma_v0_core_start(child, !desc->xfer_sz); | |
188 | desc->xfer_sz += child->ll_region.sz; | |
189 | dw_edma_free_burst(child); | |
190 | list_del(&child->list); | |
191 | kfree(child); | |
192 | desc->chunks_alloc--; | |
193 | } | |
194 | ||
195 | static int dw_edma_device_config(struct dma_chan *dchan, | |
196 | struct dma_slave_config *config) | |
197 | { | |
198 | struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); | |
199 | ||
200 | memcpy(&chan->config, config, sizeof(*config)); | |
201 | chan->configured = true; | |
202 | ||
203 | return 0; | |
204 | } | |
205 | ||
206 | static int dw_edma_device_pause(struct dma_chan *dchan) | |
207 | { | |
208 | struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); | |
209 | int err = 0; | |
210 | ||
211 | if (!chan->configured) | |
212 | err = -EPERM; | |
213 | else if (chan->status != EDMA_ST_BUSY) | |
214 | err = -EPERM; | |
215 | else if (chan->request != EDMA_REQ_NONE) | |
216 | err = -EPERM; | |
217 | else | |
218 | chan->request = EDMA_REQ_PAUSE; | |
219 | ||
220 | return err; | |
221 | } | |
222 | ||
223 | static int dw_edma_device_resume(struct dma_chan *dchan) | |
224 | { | |
225 | struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); | |
226 | int err = 0; | |
227 | ||
228 | if (!chan->configured) { | |
229 | err = -EPERM; | |
230 | } else if (chan->status != EDMA_ST_PAUSE) { | |
231 | err = -EPERM; | |
232 | } else if (chan->request != EDMA_REQ_NONE) { | |
233 | err = -EPERM; | |
234 | } else { | |
235 | chan->status = EDMA_ST_BUSY; | |
236 | dw_edma_start_transfer(chan); | |
237 | } | |
238 | ||
239 | return err; | |
240 | } | |
241 | ||
242 | static int dw_edma_device_terminate_all(struct dma_chan *dchan) | |
243 | { | |
244 | struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); | |
245 | int err = 0; | |
246 | LIST_HEAD(head); | |
247 | ||
248 | if (!chan->configured) { | |
249 | /* Do nothing */ | |
250 | } else if (chan->status == EDMA_ST_PAUSE) { | |
251 | chan->status = EDMA_ST_IDLE; | |
252 | chan->configured = false; | |
253 | } else if (chan->status == EDMA_ST_IDLE) { | |
254 | chan->configured = false; | |
255 | } else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) { | |
256 | /* | |
257 | * The channel is in a false BUSY state, probably didn't | |
258 | * receive or lost an interrupt | |
259 | */ | |
260 | chan->status = EDMA_ST_IDLE; | |
261 | chan->configured = false; | |
262 | } else if (chan->request > EDMA_REQ_PAUSE) { | |
263 | err = -EPERM; | |
264 | } else { | |
265 | chan->request = EDMA_REQ_STOP; | |
266 | } | |
267 | ||
268 | return err; | |
269 | } | |
270 | ||
271 | static void dw_edma_device_issue_pending(struct dma_chan *dchan) | |
272 | { | |
273 | struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); | |
274 | unsigned long flags; | |
275 | ||
276 | spin_lock_irqsave(&chan->vc.lock, flags); | |
277 | if (chan->configured && chan->request == EDMA_REQ_NONE && | |
278 | chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) { | |
279 | chan->status = EDMA_ST_BUSY; | |
280 | dw_edma_start_transfer(chan); | |
281 | } | |
282 | spin_unlock_irqrestore(&chan->vc.lock, flags); | |
283 | } | |
284 | ||
285 | static enum dma_status | |
286 | dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, | |
287 | struct dma_tx_state *txstate) | |
288 | { | |
289 | struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); | |
290 | struct dw_edma_desc *desc; | |
291 | struct virt_dma_desc *vd; | |
292 | unsigned long flags; | |
293 | enum dma_status ret; | |
294 | u32 residue = 0; | |
295 | ||
296 | ret = dma_cookie_status(dchan, cookie, txstate); | |
297 | if (ret == DMA_COMPLETE) | |
298 | return ret; | |
299 | ||
300 | if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE) | |
301 | ret = DMA_PAUSED; | |
302 | ||
303 | if (!txstate) | |
304 | goto ret_residue; | |
305 | ||
306 | spin_lock_irqsave(&chan->vc.lock, flags); | |
307 | vd = vchan_find_desc(&chan->vc, cookie); | |
308 | if (vd) { | |
309 | desc = vd2dw_edma_desc(vd); | |
310 | if (desc) | |
311 | residue = desc->alloc_sz - desc->xfer_sz; | |
312 | } | |
313 | spin_unlock_irqrestore(&chan->vc.lock, flags); | |
314 | ||
315 | ret_residue: | |
316 | dma_set_residue(txstate, residue); | |
317 | ||
318 | return ret; | |
319 | } | |
320 | ||
321 | static struct dma_async_tx_descriptor * | |
322 | dw_edma_device_transfer(struct dw_edma_transfer *xfer) | |
323 | { | |
324 | struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan); | |
325 | enum dma_transfer_direction direction = xfer->direction; | |
326 | phys_addr_t src_addr, dst_addr; | |
327 | struct scatterlist *sg = NULL; | |
328 | struct dw_edma_chunk *chunk; | |
329 | struct dw_edma_burst *burst; | |
330 | struct dw_edma_desc *desc; | |
331 | u32 cnt; | |
332 | int i; | |
333 | ||
334 | if ((direction == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) || | |
335 | (direction == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)) | |
336 | return NULL; | |
337 | ||
338 | if (xfer->cyclic) { | |
339 | if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt) | |
340 | return NULL; | |
341 | } else { | |
342 | if (xfer->xfer.sg.len < 1) | |
343 | return NULL; | |
344 | } | |
345 | ||
346 | if (!chan->configured) | |
347 | return NULL; | |
348 | ||
349 | desc = dw_edma_alloc_desc(chan); | |
350 | if (unlikely(!desc)) | |
351 | goto err_alloc; | |
352 | ||
353 | chunk = dw_edma_alloc_chunk(desc); | |
354 | if (unlikely(!chunk)) | |
355 | goto err_alloc; | |
356 | ||
357 | src_addr = chan->config.src_addr; | |
358 | dst_addr = chan->config.dst_addr; | |
359 | ||
360 | if (xfer->cyclic) { | |
361 | cnt = xfer->xfer.cyclic.cnt; | |
362 | } else { | |
363 | cnt = xfer->xfer.sg.len; | |
364 | sg = xfer->xfer.sg.sgl; | |
365 | } | |
366 | ||
367 | for (i = 0; i < cnt; i++) { | |
368 | if (!xfer->cyclic && !sg) | |
369 | break; | |
370 | ||
371 | if (chunk->bursts_alloc == chan->ll_max) { | |
372 | chunk = dw_edma_alloc_chunk(desc); | |
373 | if (unlikely(!chunk)) | |
374 | goto err_alloc; | |
375 | } | |
376 | ||
377 | burst = dw_edma_alloc_burst(chunk); | |
378 | if (unlikely(!burst)) | |
379 | goto err_alloc; | |
380 | ||
381 | if (xfer->cyclic) | |
382 | burst->sz = xfer->xfer.cyclic.len; | |
383 | else | |
384 | burst->sz = sg_dma_len(sg); | |
385 | ||
386 | chunk->ll_region.sz += burst->sz; | |
387 | desc->alloc_sz += burst->sz; | |
388 | ||
389 | if (direction == DMA_DEV_TO_MEM) { | |
390 | burst->sar = src_addr; | |
391 | if (xfer->cyclic) { | |
392 | burst->dar = xfer->xfer.cyclic.paddr; | |
393 | } else { | |
394 | burst->dar = sg_dma_address(sg); | |
395 | /* Unlike the typical assumption by other | |
396 | * drivers/IPs the peripheral memory isn't | |
397 | * a FIFO memory, in this case, it's a | |
398 | * linear memory and that why the source | |
399 | * and destination addresses are increased | |
400 | * by the same portion (data length) | |
401 | */ | |
402 | src_addr += sg_dma_len(sg); | |
403 | } | |
404 | } else { | |
405 | burst->dar = dst_addr; | |
406 | if (xfer->cyclic) { | |
407 | burst->sar = xfer->xfer.cyclic.paddr; | |
408 | } else { | |
409 | burst->sar = sg_dma_address(sg); | |
410 | /* Unlike the typical assumption by other | |
411 | * drivers/IPs the peripheral memory isn't | |
412 | * a FIFO memory, in this case, it's a | |
413 | * linear memory and that why the source | |
414 | * and destination addresses are increased | |
415 | * by the same portion (data length) | |
416 | */ | |
417 | dst_addr += sg_dma_len(sg); | |
418 | } | |
419 | } | |
420 | ||
421 | if (!xfer->cyclic) | |
422 | sg = sg_next(sg); | |
423 | } | |
424 | ||
425 | return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags); | |
426 | ||
427 | err_alloc: | |
428 | if (desc) | |
429 | dw_edma_free_desc(desc); | |
430 | ||
431 | return NULL; | |
432 | } | |
433 | ||
434 | static struct dma_async_tx_descriptor * | |
435 | dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | |
436 | unsigned int len, | |
437 | enum dma_transfer_direction direction, | |
438 | unsigned long flags, void *context) | |
439 | { | |
440 | struct dw_edma_transfer xfer; | |
441 | ||
442 | xfer.dchan = dchan; | |
443 | xfer.direction = direction; | |
444 | xfer.xfer.sg.sgl = sgl; | |
445 | xfer.xfer.sg.len = len; | |
446 | xfer.flags = flags; | |
447 | xfer.cyclic = false; | |
448 | ||
449 | return dw_edma_device_transfer(&xfer); | |
450 | } | |
451 | ||
452 | static struct dma_async_tx_descriptor * | |
453 | dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr, | |
454 | size_t len, size_t count, | |
455 | enum dma_transfer_direction direction, | |
456 | unsigned long flags) | |
457 | { | |
458 | struct dw_edma_transfer xfer; | |
459 | ||
460 | xfer.dchan = dchan; | |
461 | xfer.direction = direction; | |
462 | xfer.xfer.cyclic.paddr = paddr; | |
463 | xfer.xfer.cyclic.len = len; | |
464 | xfer.xfer.cyclic.cnt = count; | |
465 | xfer.flags = flags; | |
466 | xfer.cyclic = true; | |
467 | ||
468 | return dw_edma_device_transfer(&xfer); | |
469 | } | |
470 | ||
471 | static void dw_edma_done_interrupt(struct dw_edma_chan *chan) | |
472 | { | |
473 | struct dw_edma_desc *desc; | |
474 | struct virt_dma_desc *vd; | |
475 | unsigned long flags; | |
476 | ||
477 | dw_edma_v0_core_clear_done_int(chan); | |
478 | ||
479 | spin_lock_irqsave(&chan->vc.lock, flags); | |
480 | vd = vchan_next_desc(&chan->vc); | |
481 | if (vd) { | |
482 | switch (chan->request) { | |
483 | case EDMA_REQ_NONE: | |
484 | desc = vd2dw_edma_desc(vd); | |
485 | if (desc->chunks_alloc) { | |
486 | chan->status = EDMA_ST_BUSY; | |
487 | dw_edma_start_transfer(chan); | |
488 | } else { | |
489 | list_del(&vd->node); | |
490 | vchan_cookie_complete(vd); | |
491 | chan->status = EDMA_ST_IDLE; | |
492 | } | |
493 | break; | |
494 | ||
495 | case EDMA_REQ_STOP: | |
496 | list_del(&vd->node); | |
497 | vchan_cookie_complete(vd); | |
498 | chan->request = EDMA_REQ_NONE; | |
499 | chan->status = EDMA_ST_IDLE; | |
500 | break; | |
501 | ||
502 | case EDMA_REQ_PAUSE: | |
503 | chan->request = EDMA_REQ_NONE; | |
504 | chan->status = EDMA_ST_PAUSE; | |
505 | break; | |
506 | ||
507 | default: | |
508 | break; | |
509 | } | |
510 | } | |
511 | spin_unlock_irqrestore(&chan->vc.lock, flags); | |
512 | } | |
513 | ||
514 | static void dw_edma_abort_interrupt(struct dw_edma_chan *chan) | |
515 | { | |
516 | struct virt_dma_desc *vd; | |
517 | unsigned long flags; | |
518 | ||
519 | dw_edma_v0_core_clear_abort_int(chan); | |
520 | ||
521 | spin_lock_irqsave(&chan->vc.lock, flags); | |
522 | vd = vchan_next_desc(&chan->vc); | |
523 | if (vd) { | |
524 | list_del(&vd->node); | |
525 | vchan_cookie_complete(vd); | |
526 | } | |
527 | spin_unlock_irqrestore(&chan->vc.lock, flags); | |
528 | chan->request = EDMA_REQ_NONE; | |
529 | chan->status = EDMA_ST_IDLE; | |
530 | } | |
531 | ||
532 | static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write) | |
533 | { | |
534 | struct dw_edma_irq *dw_irq = data; | |
535 | struct dw_edma *dw = dw_irq->dw; | |
536 | unsigned long total, pos, val; | |
537 | unsigned long off; | |
538 | u32 mask; | |
539 | ||
540 | if (write) { | |
541 | total = dw->wr_ch_cnt; | |
542 | off = 0; | |
543 | mask = dw_irq->wr_mask; | |
544 | } else { | |
545 | total = dw->rd_ch_cnt; | |
546 | off = dw->wr_ch_cnt; | |
547 | mask = dw_irq->rd_mask; | |
548 | } | |
549 | ||
550 | val = dw_edma_v0_core_status_done_int(dw, write ? | |
551 | EDMA_DIR_WRITE : | |
552 | EDMA_DIR_READ); | |
553 | val &= mask; | |
554 | for_each_set_bit(pos, &val, total) { | |
555 | struct dw_edma_chan *chan = &dw->chan[pos + off]; | |
556 | ||
557 | dw_edma_done_interrupt(chan); | |
558 | } | |
559 | ||
560 | val = dw_edma_v0_core_status_abort_int(dw, write ? | |
561 | EDMA_DIR_WRITE : | |
562 | EDMA_DIR_READ); | |
563 | val &= mask; | |
564 | for_each_set_bit(pos, &val, total) { | |
565 | struct dw_edma_chan *chan = &dw->chan[pos + off]; | |
566 | ||
567 | dw_edma_abort_interrupt(chan); | |
568 | } | |
569 | ||
570 | return IRQ_HANDLED; | |
571 | } | |
572 | ||
573 | static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data) | |
574 | { | |
575 | return dw_edma_interrupt(irq, data, true); | |
576 | } | |
577 | ||
578 | static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data) | |
579 | { | |
580 | return dw_edma_interrupt(irq, data, false); | |
581 | } | |
582 | ||
583 | static irqreturn_t dw_edma_interrupt_common(int irq, void *data) | |
584 | { | |
585 | dw_edma_interrupt(irq, data, true); | |
586 | dw_edma_interrupt(irq, data, false); | |
587 | ||
588 | return IRQ_HANDLED; | |
589 | } | |
590 | ||
591 | static int dw_edma_alloc_chan_resources(struct dma_chan *dchan) | |
592 | { | |
593 | struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); | |
594 | ||
595 | if (chan->status != EDMA_ST_IDLE) | |
596 | return -EBUSY; | |
597 | ||
598 | pm_runtime_get(chan->chip->dev); | |
599 | ||
600 | return 0; | |
601 | } | |
602 | ||
603 | static void dw_edma_free_chan_resources(struct dma_chan *dchan) | |
604 | { | |
605 | unsigned long timeout = jiffies + msecs_to_jiffies(5000); | |
606 | struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); | |
607 | int ret; | |
608 | ||
609 | while (time_before(jiffies, timeout)) { | |
610 | ret = dw_edma_device_terminate_all(dchan); | |
611 | if (!ret) | |
612 | break; | |
613 | ||
614 | if (time_after_eq(jiffies, timeout)) | |
615 | return; | |
616 | ||
617 | cpu_relax(); | |
2b651ecf | 618 | } |
e63d79d1 GP |
619 | |
620 | pm_runtime_put(chan->chip->dev); | |
621 | } | |
622 | ||
623 | static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write, | |
624 | u32 wr_alloc, u32 rd_alloc) | |
625 | { | |
626 | struct dw_edma_region *dt_region; | |
627 | struct device *dev = chip->dev; | |
628 | struct dw_edma *dw = chip->dw; | |
629 | struct dw_edma_chan *chan; | |
630 | size_t ll_chunk, dt_chunk; | |
631 | struct dw_edma_irq *irq; | |
632 | struct dma_device *dma; | |
633 | u32 i, j, cnt, ch_cnt; | |
634 | u32 alloc, off_alloc; | |
635 | int err = 0; | |
636 | u32 pos; | |
637 | ||
638 | ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; | |
639 | ll_chunk = dw->ll_region.sz; | |
640 | dt_chunk = dw->dt_region.sz; | |
641 | ||
642 | /* Calculate linked list chunk for each channel */ | |
643 | ll_chunk /= roundup_pow_of_two(ch_cnt); | |
644 | ||
645 | /* Calculate linked list chunk for each channel */ | |
646 | dt_chunk /= roundup_pow_of_two(ch_cnt); | |
647 | ||
648 | if (write) { | |
649 | i = 0; | |
650 | cnt = dw->wr_ch_cnt; | |
651 | dma = &dw->wr_edma; | |
652 | alloc = wr_alloc; | |
653 | off_alloc = 0; | |
654 | } else { | |
655 | i = dw->wr_ch_cnt; | |
656 | cnt = dw->rd_ch_cnt; | |
657 | dma = &dw->rd_edma; | |
658 | alloc = rd_alloc; | |
659 | off_alloc = wr_alloc; | |
660 | } | |
661 | ||
662 | INIT_LIST_HEAD(&dma->channels); | |
663 | for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) { | |
664 | chan = &dw->chan[i]; | |
665 | ||
666 | dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL); | |
667 | if (!dt_region) | |
668 | return -ENOMEM; | |
669 | ||
670 | chan->vc.chan.private = dt_region; | |
671 | ||
672 | chan->chip = chip; | |
673 | chan->id = j; | |
674 | chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ; | |
675 | chan->configured = false; | |
676 | chan->request = EDMA_REQ_NONE; | |
677 | chan->status = EDMA_ST_IDLE; | |
678 | ||
679 | chan->ll_off = (ll_chunk * i); | |
680 | chan->ll_max = (ll_chunk / EDMA_LL_SZ) - 1; | |
681 | ||
682 | chan->dt_off = (dt_chunk * i); | |
683 | ||
684 | dev_vdbg(dev, "L. List:\tChannel %s[%u] off=0x%.8lx, max_cnt=%u\n", | |
685 | write ? "write" : "read", j, | |
686 | chan->ll_off, chan->ll_max); | |
687 | ||
688 | if (dw->nr_irqs == 1) | |
689 | pos = 0; | |
690 | else | |
691 | pos = off_alloc + (j % alloc); | |
692 | ||
693 | irq = &dw->irq[pos]; | |
694 | ||
695 | if (write) | |
696 | irq->wr_mask |= BIT(j); | |
697 | else | |
698 | irq->rd_mask |= BIT(j); | |
699 | ||
700 | irq->dw = dw; | |
701 | memcpy(&chan->msi, &irq->msi, sizeof(chan->msi)); | |
702 | ||
703 | dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n", | |
704 | write ? "write" : "read", j, | |
705 | chan->msi.address_hi, chan->msi.address_lo, | |
706 | chan->msi.data); | |
707 | ||
708 | chan->vc.desc_free = vchan_free_desc; | |
709 | vchan_init(&chan->vc, dma); | |
710 | ||
711 | dt_region->paddr = dw->dt_region.paddr + chan->dt_off; | |
712 | dt_region->vaddr = dw->dt_region.vaddr + chan->dt_off; | |
713 | dt_region->sz = dt_chunk; | |
714 | ||
715 | dev_vdbg(dev, "Data:\tChannel %s[%u] off=0x%.8lx\n", | |
716 | write ? "write" : "read", j, chan->dt_off); | |
717 | ||
718 | dw_edma_v0_core_device_config(chan); | |
719 | } | |
720 | ||
721 | /* Set DMA channel capabilities */ | |
722 | dma_cap_zero(dma->cap_mask); | |
723 | dma_cap_set(DMA_SLAVE, dma->cap_mask); | |
724 | dma_cap_set(DMA_CYCLIC, dma->cap_mask); | |
725 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); | |
726 | dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV); | |
727 | dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
728 | dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
729 | dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | |
730 | dma->chancnt = cnt; | |
731 | ||
732 | /* Set DMA channel callbacks */ | |
733 | dma->dev = chip->dev; | |
734 | dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources; | |
735 | dma->device_free_chan_resources = dw_edma_free_chan_resources; | |
736 | dma->device_config = dw_edma_device_config; | |
737 | dma->device_pause = dw_edma_device_pause; | |
738 | dma->device_resume = dw_edma_device_resume; | |
739 | dma->device_terminate_all = dw_edma_device_terminate_all; | |
740 | dma->device_issue_pending = dw_edma_device_issue_pending; | |
741 | dma->device_tx_status = dw_edma_device_tx_status; | |
742 | dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg; | |
743 | dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic; | |
744 | ||
745 | dma_set_max_seg_size(dma->dev, U32_MAX); | |
746 | ||
747 | /* Register DMA device */ | |
748 | err = dma_async_device_register(dma); | |
749 | ||
750 | return err; | |
751 | } | |
752 | ||
753 | static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt) | |
754 | { | |
755 | if (*nr_irqs && *alloc < cnt) { | |
756 | (*alloc)++; | |
757 | (*nr_irqs)--; | |
758 | } | |
759 | } | |
760 | ||
761 | static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt) | |
762 | { | |
763 | while (*mask * alloc < cnt) | |
764 | (*mask)++; | |
765 | } | |
766 | ||
767 | static int dw_edma_irq_request(struct dw_edma_chip *chip, | |
768 | u32 *wr_alloc, u32 *rd_alloc) | |
769 | { | |
770 | struct device *dev = chip->dev; | |
771 | struct dw_edma *dw = chip->dw; | |
772 | u32 wr_mask = 1; | |
773 | u32 rd_mask = 1; | |
774 | int i, err = 0; | |
775 | u32 ch_cnt; | |
776 | ||
777 | ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; | |
778 | ||
779 | if (dw->nr_irqs < 1) | |
780 | return -EINVAL; | |
781 | ||
782 | if (dw->nr_irqs == 1) { | |
783 | /* Common IRQ shared among all channels */ | |
784 | err = request_irq(pci_irq_vector(to_pci_dev(dev), 0), | |
785 | dw_edma_interrupt_common, | |
786 | IRQF_SHARED, dw->name, &dw->irq[0]); | |
787 | if (err) { | |
788 | dw->nr_irqs = 0; | |
789 | return err; | |
790 | } | |
791 | ||
792 | get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), 0), | |
793 | &dw->irq[0].msi); | |
794 | } else { | |
795 | /* Distribute IRQs equally among all channels */ | |
796 | int tmp = dw->nr_irqs; | |
797 | ||
798 | while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) { | |
799 | dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt); | |
800 | dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt); | |
801 | } | |
802 | ||
803 | dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt); | |
804 | dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt); | |
805 | ||
806 | for (i = 0; i < (*wr_alloc + *rd_alloc); i++) { | |
807 | err = request_irq(pci_irq_vector(to_pci_dev(dev), i), | |
808 | i < *wr_alloc ? | |
809 | dw_edma_interrupt_write : | |
810 | dw_edma_interrupt_read, | |
811 | IRQF_SHARED, dw->name, | |
812 | &dw->irq[i]); | |
813 | if (err) { | |
814 | dw->nr_irqs = i; | |
815 | return err; | |
816 | } | |
817 | ||
818 | get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), i), | |
819 | &dw->irq[i].msi); | |
820 | } | |
821 | ||
822 | dw->nr_irqs = i; | |
823 | } | |
824 | ||
825 | return err; | |
826 | } | |
827 | ||
828 | int dw_edma_probe(struct dw_edma_chip *chip) | |
829 | { | |
830 | struct device *dev = chip->dev; | |
831 | struct dw_edma *dw = chip->dw; | |
832 | u32 wr_alloc = 0; | |
833 | u32 rd_alloc = 0; | |
834 | int i, err; | |
835 | ||
836 | raw_spin_lock_init(&dw->lock); | |
837 | ||
838 | /* Find out how many write channels are supported by hardware */ | |
839 | dw->wr_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE); | |
840 | if (!dw->wr_ch_cnt) | |
841 | return -EINVAL; | |
842 | ||
843 | /* Find out how many read channels are supported by hardware */ | |
844 | dw->rd_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ); | |
845 | if (!dw->rd_ch_cnt) | |
846 | return -EINVAL; | |
847 | ||
848 | dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n", | |
849 | dw->wr_ch_cnt, dw->rd_ch_cnt); | |
850 | ||
851 | /* Allocate channels */ | |
852 | dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt, | |
853 | sizeof(*dw->chan), GFP_KERNEL); | |
854 | if (!dw->chan) | |
855 | return -ENOMEM; | |
856 | ||
857 | snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id); | |
858 | ||
859 | /* Disable eDMA, only to establish the ideal initial conditions */ | |
860 | dw_edma_v0_core_off(dw); | |
861 | ||
862 | /* Request IRQs */ | |
863 | err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc); | |
864 | if (err) | |
865 | return err; | |
866 | ||
867 | /* Setup write channels */ | |
868 | err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc); | |
869 | if (err) | |
870 | goto err_irq_free; | |
871 | ||
872 | /* Setup read channels */ | |
873 | err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc); | |
874 | if (err) | |
875 | goto err_irq_free; | |
876 | ||
877 | /* Power management */ | |
878 | pm_runtime_enable(dev); | |
879 | ||
880 | /* Turn debugfs on */ | |
881 | dw_edma_v0_core_debugfs_on(chip); | |
882 | ||
883 | return 0; | |
884 | ||
885 | err_irq_free: | |
886 | for (i = (dw->nr_irqs - 1); i >= 0; i--) | |
887 | free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]); | |
888 | ||
889 | dw->nr_irqs = 0; | |
890 | ||
891 | return err; | |
892 | } | |
893 | EXPORT_SYMBOL_GPL(dw_edma_probe); | |
894 | ||
895 | int dw_edma_remove(struct dw_edma_chip *chip) | |
896 | { | |
897 | struct dw_edma_chan *chan, *_chan; | |
898 | struct device *dev = chip->dev; | |
899 | struct dw_edma *dw = chip->dw; | |
900 | int i; | |
901 | ||
902 | /* Disable eDMA */ | |
903 | dw_edma_v0_core_off(dw); | |
904 | ||
905 | /* Free irqs */ | |
906 | for (i = (dw->nr_irqs - 1); i >= 0; i--) | |
907 | free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]); | |
908 | ||
909 | /* Power management */ | |
910 | pm_runtime_disable(dev); | |
911 | ||
912 | list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels, | |
913 | vc.chan.device_node) { | |
914 | list_del(&chan->vc.chan.device_node); | |
915 | tasklet_kill(&chan->vc.task); | |
916 | } | |
917 | ||
918 | list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels, | |
919 | vc.chan.device_node) { | |
920 | list_del(&chan->vc.chan.device_node); | |
921 | tasklet_kill(&chan->vc.task); | |
922 | } | |
923 | ||
924 | /* Deregister eDMA device */ | |
925 | dma_async_device_unregister(&dw->wr_edma); | |
926 | dma_async_device_unregister(&dw->rd_edma); | |
927 | ||
928 | /* Turn debugfs off */ | |
929 | dw_edma_v0_core_debugfs_off(); | |
930 | ||
931 | return 0; | |
932 | } | |
933 | EXPORT_SYMBOL_GPL(dw_edma_remove); | |
934 | ||
935 | MODULE_LICENSE("GPL v2"); | |
936 | MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver"); | |
937 | MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>"); |