Commit | Line | Data |
---|---|---|
9a7b8e00 GL |
1 | /* |
2 | * Dmaengine driver base library for DMA controllers, found on SH-based SoCs | |
3 | * | |
4 | * extracted from shdma.c | |
5 | * | |
6 | * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | |
7 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | |
8 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | |
9 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | |
10 | * | |
11 | * This is free software; you can redistribute it and/or modify | |
12 | * it under the terms of version 2 of the GNU General Public License as | |
13 | * published by the Free Software Foundation. | |
14 | */ | |
15 | ||
16 | #include <linux/delay.h> | |
17 | #include <linux/shdma-base.h> | |
18 | #include <linux/dmaengine.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/pm_runtime.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/spinlock.h> | |
25 | ||
26 | #include "../dmaengine.h" | |
27 | ||
28 | /* DMA descriptor control */ | |
29 | enum shdma_desc_status { | |
30 | DESC_IDLE, | |
31 | DESC_PREPARED, | |
32 | DESC_SUBMITTED, | |
33 | DESC_COMPLETED, /* completed, have to call callback */ | |
34 | DESC_WAITING, /* callback called, waiting for ack / re-submit */ | |
35 | }; | |
36 | ||
37 | #define NR_DESCS_PER_CHANNEL 32 | |
38 | ||
39 | #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) | |
40 | #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev) | |
41 | ||
42 | /* | |
43 | * For slave DMA we assume, that there is a finite number of DMA slaves in the | |
44 | * system, and that each such slave can only use a finite number of channels. | |
45 | * We use slave channel IDs to make sure, that no such slave channel ID is | |
46 | * allocated more than once. | |
47 | */ | |
48 | static unsigned int slave_num = 256; | |
49 | module_param(slave_num, uint, 0444); | |
50 | ||
51 | /* A bitmask with slave_num bits */ | |
52 | static unsigned long *shdma_slave_used; | |
53 | ||
54 | /* Called under spin_lock_irq(&schan->chan_lock") */ | |
55 | static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan) | |
56 | { | |
57 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | |
58 | const struct shdma_ops *ops = sdev->ops; | |
59 | struct shdma_desc *sdesc; | |
60 | ||
61 | /* DMA work check */ | |
62 | if (ops->channel_busy(schan)) | |
63 | return; | |
64 | ||
65 | /* Find the first not transferred descriptor */ | |
66 | list_for_each_entry(sdesc, &schan->ld_queue, node) | |
67 | if (sdesc->mark == DESC_SUBMITTED) { | |
68 | ops->start_xfer(schan, sdesc); | |
69 | break; | |
70 | } | |
71 | } | |
72 | ||
73 | static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) | |
74 | { | |
75 | struct shdma_desc *chunk, *c, *desc = | |
76 | container_of(tx, struct shdma_desc, async_tx), | |
77 | *last = desc; | |
78 | struct shdma_chan *schan = to_shdma_chan(tx->chan); | |
ecf90fbb | 79 | struct shdma_slave *slave = schan->slave; |
9a7b8e00 GL |
80 | dma_async_tx_callback callback = tx->callback; |
81 | dma_cookie_t cookie; | |
82 | bool power_up; | |
83 | ||
84 | spin_lock_irq(&schan->chan_lock); | |
85 | ||
86 | power_up = list_empty(&schan->ld_queue); | |
87 | ||
88 | cookie = dma_cookie_assign(tx); | |
89 | ||
90 | /* Mark all chunks of this descriptor as submitted, move to the queue */ | |
91 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { | |
92 | /* | |
93 | * All chunks are on the global ld_free, so, we have to find | |
94 | * the end of the chain ourselves | |
95 | */ | |
96 | if (chunk != desc && (chunk->mark == DESC_IDLE || | |
97 | chunk->async_tx.cookie > 0 || | |
98 | chunk->async_tx.cookie == -EBUSY || | |
99 | &chunk->node == &schan->ld_free)) | |
100 | break; | |
101 | chunk->mark = DESC_SUBMITTED; | |
102 | /* Callback goes to the last chunk */ | |
103 | chunk->async_tx.callback = NULL; | |
104 | chunk->cookie = cookie; | |
105 | list_move_tail(&chunk->node, &schan->ld_queue); | |
106 | last = chunk; | |
107 | ||
108 | dev_dbg(schan->dev, "submit #%d@%p on %d\n", | |
109 | tx->cookie, &last->async_tx, schan->id); | |
110 | } | |
111 | ||
112 | last->async_tx.callback = callback; | |
113 | last->async_tx.callback_param = tx->callback_param; | |
114 | ||
115 | if (power_up) { | |
116 | int ret; | |
117 | schan->pm_state = SHDMA_PM_BUSY; | |
118 | ||
119 | ret = pm_runtime_get(schan->dev); | |
120 | ||
121 | spin_unlock_irq(&schan->chan_lock); | |
122 | if (ret < 0) | |
123 | dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); | |
124 | ||
125 | pm_runtime_barrier(schan->dev); | |
126 | ||
127 | spin_lock_irq(&schan->chan_lock); | |
128 | ||
129 | /* Have we been reset, while waiting? */ | |
130 | if (schan->pm_state != SHDMA_PM_ESTABLISHED) { | |
131 | struct shdma_dev *sdev = | |
132 | to_shdma_dev(schan->dma_chan.device); | |
133 | const struct shdma_ops *ops = sdev->ops; | |
134 | dev_dbg(schan->dev, "Bring up channel %d\n", | |
135 | schan->id); | |
136 | /* | |
137 | * TODO: .xfer_setup() might fail on some platforms. | |
138 | * Make it int then, on error remove chunks from the | |
139 | * queue again | |
140 | */ | |
141 | ops->setup_xfer(schan, slave); | |
142 | ||
143 | if (schan->pm_state == SHDMA_PM_PENDING) | |
144 | shdma_chan_xfer_ld_queue(schan); | |
145 | schan->pm_state = SHDMA_PM_ESTABLISHED; | |
146 | } | |
147 | } else { | |
148 | /* | |
149 | * Tell .device_issue_pending() not to run the queue, interrupts | |
150 | * will do it anyway | |
151 | */ | |
152 | schan->pm_state = SHDMA_PM_PENDING; | |
153 | } | |
154 | ||
155 | spin_unlock_irq(&schan->chan_lock); | |
156 | ||
157 | return cookie; | |
158 | } | |
159 | ||
160 | /* Called with desc_lock held */ | |
161 | static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan) | |
162 | { | |
163 | struct shdma_desc *sdesc; | |
164 | ||
165 | list_for_each_entry(sdesc, &schan->ld_free, node) | |
166 | if (sdesc->mark != DESC_PREPARED) { | |
167 | BUG_ON(sdesc->mark != DESC_IDLE); | |
168 | list_del(&sdesc->node); | |
169 | return sdesc; | |
170 | } | |
171 | ||
172 | return NULL; | |
173 | } | |
174 | ||
175 | static int shdma_alloc_chan_resources(struct dma_chan *chan) | |
176 | { | |
177 | struct shdma_chan *schan = to_shdma_chan(chan); | |
178 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | |
179 | const struct shdma_ops *ops = sdev->ops; | |
180 | struct shdma_desc *desc; | |
181 | struct shdma_slave *slave = chan->private; | |
182 | int ret, i; | |
183 | ||
184 | /* | |
185 | * This relies on the guarantee from dmaengine that alloc_chan_resources | |
186 | * never runs concurrently with itself or free_chan_resources. | |
187 | */ | |
188 | if (slave) { | |
189 | if (slave->slave_id >= slave_num) { | |
190 | ret = -EINVAL; | |
191 | goto evalid; | |
192 | } | |
193 | ||
194 | if (test_and_set_bit(slave->slave_id, shdma_slave_used)) { | |
195 | ret = -EBUSY; | |
196 | goto etestused; | |
197 | } | |
198 | ||
199 | ret = ops->set_slave(schan, slave); | |
200 | if (ret < 0) | |
201 | goto esetslave; | |
202 | } | |
203 | ||
204 | schan->desc = kcalloc(NR_DESCS_PER_CHANNEL, | |
205 | sdev->desc_size, GFP_KERNEL); | |
206 | if (!schan->desc) { | |
207 | ret = -ENOMEM; | |
208 | goto edescalloc; | |
209 | } | |
210 | schan->desc_num = NR_DESCS_PER_CHANNEL; | |
ecf90fbb | 211 | schan->slave = slave; |
9a7b8e00 GL |
212 | |
213 | for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) { | |
214 | desc = ops->embedded_desc(schan->desc, i); | |
215 | dma_async_tx_descriptor_init(&desc->async_tx, | |
216 | &schan->dma_chan); | |
217 | desc->async_tx.tx_submit = shdma_tx_submit; | |
218 | desc->mark = DESC_IDLE; | |
219 | ||
220 | list_add(&desc->node, &schan->ld_free); | |
221 | } | |
222 | ||
223 | return NR_DESCS_PER_CHANNEL; | |
224 | ||
225 | edescalloc: | |
226 | if (slave) | |
227 | esetslave: | |
228 | clear_bit(slave->slave_id, shdma_slave_used); | |
229 | etestused: | |
230 | evalid: | |
231 | chan->private = NULL; | |
232 | return ret; | |
233 | } | |
234 | ||
235 | static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) | |
236 | { | |
237 | struct shdma_desc *desc, *_desc; | |
238 | /* Is the "exposed" head of a chain acked? */ | |
239 | bool head_acked = false; | |
240 | dma_cookie_t cookie = 0; | |
241 | dma_async_tx_callback callback = NULL; | |
242 | void *param = NULL; | |
243 | unsigned long flags; | |
244 | ||
245 | spin_lock_irqsave(&schan->chan_lock, flags); | |
246 | list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { | |
247 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | |
248 | ||
249 | BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); | |
250 | BUG_ON(desc->mark != DESC_SUBMITTED && | |
251 | desc->mark != DESC_COMPLETED && | |
252 | desc->mark != DESC_WAITING); | |
253 | ||
254 | /* | |
255 | * queue is ordered, and we use this loop to (1) clean up all | |
256 | * completed descriptors, and to (2) update descriptor flags of | |
257 | * any chunks in a (partially) completed chain | |
258 | */ | |
259 | if (!all && desc->mark == DESC_SUBMITTED && | |
260 | desc->cookie != cookie) | |
261 | break; | |
262 | ||
263 | if (tx->cookie > 0) | |
264 | cookie = tx->cookie; | |
265 | ||
266 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { | |
267 | if (schan->dma_chan.completed_cookie != desc->cookie - 1) | |
268 | dev_dbg(schan->dev, | |
269 | "Completing cookie %d, expected %d\n", | |
270 | desc->cookie, | |
271 | schan->dma_chan.completed_cookie + 1); | |
272 | schan->dma_chan.completed_cookie = desc->cookie; | |
273 | } | |
274 | ||
275 | /* Call callback on the last chunk */ | |
276 | if (desc->mark == DESC_COMPLETED && tx->callback) { | |
277 | desc->mark = DESC_WAITING; | |
278 | callback = tx->callback; | |
279 | param = tx->callback_param; | |
280 | dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n", | |
281 | tx->cookie, tx, schan->id); | |
282 | BUG_ON(desc->chunks != 1); | |
283 | break; | |
284 | } | |
285 | ||
286 | if (tx->cookie > 0 || tx->cookie == -EBUSY) { | |
287 | if (desc->mark == DESC_COMPLETED) { | |
288 | BUG_ON(tx->cookie < 0); | |
289 | desc->mark = DESC_WAITING; | |
290 | } | |
291 | head_acked = async_tx_test_ack(tx); | |
292 | } else { | |
293 | switch (desc->mark) { | |
294 | case DESC_COMPLETED: | |
295 | desc->mark = DESC_WAITING; | |
296 | /* Fall through */ | |
297 | case DESC_WAITING: | |
298 | if (head_acked) | |
299 | async_tx_ack(&desc->async_tx); | |
300 | } | |
301 | } | |
302 | ||
303 | dev_dbg(schan->dev, "descriptor %p #%d completed.\n", | |
304 | tx, tx->cookie); | |
305 | ||
306 | if (((desc->mark == DESC_COMPLETED || | |
307 | desc->mark == DESC_WAITING) && | |
308 | async_tx_test_ack(&desc->async_tx)) || all) { | |
309 | /* Remove from ld_queue list */ | |
310 | desc->mark = DESC_IDLE; | |
311 | ||
312 | list_move(&desc->node, &schan->ld_free); | |
313 | ||
314 | if (list_empty(&schan->ld_queue)) { | |
315 | dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); | |
316 | pm_runtime_put(schan->dev); | |
317 | schan->pm_state = SHDMA_PM_ESTABLISHED; | |
318 | } | |
319 | } | |
320 | } | |
321 | ||
322 | if (all && !callback) | |
323 | /* | |
324 | * Terminating and the loop completed normally: forgive | |
325 | * uncompleted cookies | |
326 | */ | |
327 | schan->dma_chan.completed_cookie = schan->dma_chan.cookie; | |
328 | ||
329 | spin_unlock_irqrestore(&schan->chan_lock, flags); | |
330 | ||
331 | if (callback) | |
332 | callback(param); | |
333 | ||
334 | return callback; | |
335 | } | |
336 | ||
337 | /* | |
338 | * shdma_chan_ld_cleanup - Clean up link descriptors | |
339 | * | |
340 | * Clean up the ld_queue of DMA channel. | |
341 | */ | |
342 | static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all) | |
343 | { | |
344 | while (__ld_cleanup(schan, all)) | |
345 | ; | |
346 | } | |
347 | ||
348 | /* | |
349 | * shdma_free_chan_resources - Free all resources of the channel. | |
350 | */ | |
351 | static void shdma_free_chan_resources(struct dma_chan *chan) | |
352 | { | |
353 | struct shdma_chan *schan = to_shdma_chan(chan); | |
354 | struct shdma_dev *sdev = to_shdma_dev(chan->device); | |
355 | const struct shdma_ops *ops = sdev->ops; | |
356 | LIST_HEAD(list); | |
357 | ||
358 | /* Protect against ISR */ | |
359 | spin_lock_irq(&schan->chan_lock); | |
360 | ops->halt_channel(schan); | |
361 | spin_unlock_irq(&schan->chan_lock); | |
362 | ||
363 | /* Now no new interrupts will occur */ | |
364 | ||
365 | /* Prepared and not submitted descriptors can still be on the queue */ | |
366 | if (!list_empty(&schan->ld_queue)) | |
367 | shdma_chan_ld_cleanup(schan, true); | |
368 | ||
ecf90fbb | 369 | if (schan->slave) { |
9a7b8e00 | 370 | /* The caller is holding dma_list_mutex */ |
ecf90fbb | 371 | struct shdma_slave *slave = schan->slave; |
9a7b8e00 GL |
372 | clear_bit(slave->slave_id, shdma_slave_used); |
373 | chan->private = NULL; | |
374 | } | |
375 | ||
376 | spin_lock_irq(&schan->chan_lock); | |
377 | ||
378 | list_splice_init(&schan->ld_free, &list); | |
379 | schan->desc_num = 0; | |
380 | ||
381 | spin_unlock_irq(&schan->chan_lock); | |
382 | ||
383 | kfree(schan->desc); | |
384 | } | |
385 | ||
386 | /** | |
387 | * shdma_add_desc - get, set up and return one transfer descriptor | |
388 | * @schan: DMA channel | |
389 | * @flags: DMA transfer flags | |
390 | * @dst: destination DMA address, incremented when direction equals | |
391 | * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM | |
392 | * @src: source DMA address, incremented when direction equals | |
393 | * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM | |
394 | * @len: DMA transfer length | |
395 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY | |
396 | * @direction: needed for slave DMA to decide which address to keep constant, | |
397 | * equals DMA_MEM_TO_MEM for MEMCPY | |
398 | * Returns 0 or an error | |
399 | * Locks: called with desc_lock held | |
400 | */ | |
401 | static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, | |
402 | unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len, | |
403 | struct shdma_desc **first, enum dma_transfer_direction direction) | |
404 | { | |
405 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | |
406 | const struct shdma_ops *ops = sdev->ops; | |
407 | struct shdma_desc *new; | |
408 | size_t copy_size = *len; | |
409 | ||
410 | if (!copy_size) | |
411 | return NULL; | |
412 | ||
413 | /* Allocate the link descriptor from the free list */ | |
414 | new = shdma_get_desc(schan); | |
415 | if (!new) { | |
416 | dev_err(schan->dev, "No free link descriptor available\n"); | |
417 | return NULL; | |
418 | } | |
419 | ||
420 | ops->desc_setup(schan, new, *src, *dst, ©_size); | |
421 | ||
422 | if (!*first) { | |
423 | /* First desc */ | |
424 | new->async_tx.cookie = -EBUSY; | |
425 | *first = new; | |
426 | } else { | |
427 | /* Other desc - invisible to the user */ | |
428 | new->async_tx.cookie = -EINVAL; | |
429 | } | |
430 | ||
431 | dev_dbg(schan->dev, | |
432 | "chaining (%u/%u)@%x -> %x with %p, cookie %d\n", | |
433 | copy_size, *len, *src, *dst, &new->async_tx, | |
434 | new->async_tx.cookie); | |
435 | ||
436 | new->mark = DESC_PREPARED; | |
437 | new->async_tx.flags = flags; | |
438 | new->direction = direction; | |
439 | ||
440 | *len -= copy_size; | |
441 | if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) | |
442 | *src += copy_size; | |
443 | if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) | |
444 | *dst += copy_size; | |
445 | ||
446 | return new; | |
447 | } | |
448 | ||
449 | /* | |
450 | * shdma_prep_sg - prepare transfer descriptors from an SG list | |
451 | * | |
452 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also | |
453 | * converted to scatter-gather to guarantee consistent locking and a correct | |
454 | * list manipulation. For slave DMA direction carries the usual meaning, and, | |
455 | * logically, the SG list is RAM and the addr variable contains slave address, | |
456 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM | |
457 | * and the SG list contains only one element and points at the source buffer. | |
458 | */ | |
459 | static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, | |
460 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, | |
461 | enum dma_transfer_direction direction, unsigned long flags) | |
462 | { | |
463 | struct scatterlist *sg; | |
464 | struct shdma_desc *first = NULL, *new = NULL /* compiler... */; | |
465 | LIST_HEAD(tx_list); | |
466 | int chunks = 0; | |
467 | unsigned long irq_flags; | |
468 | int i; | |
469 | ||
470 | for_each_sg(sgl, sg, sg_len, i) | |
471 | chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); | |
472 | ||
473 | /* Have to lock the whole loop to protect against concurrent release */ | |
474 | spin_lock_irqsave(&schan->chan_lock, irq_flags); | |
475 | ||
476 | /* | |
477 | * Chaining: | |
478 | * first descriptor is what user is dealing with in all API calls, its | |
479 | * cookie is at first set to -EBUSY, at tx-submit to a positive | |
480 | * number | |
481 | * if more than one chunk is needed further chunks have cookie = -EINVAL | |
482 | * the last chunk, if not equal to the first, has cookie = -ENOSPC | |
483 | * all chunks are linked onto the tx_list head with their .node heads | |
484 | * only during this function, then they are immediately spliced | |
485 | * back onto the free list in form of a chain | |
486 | */ | |
487 | for_each_sg(sgl, sg, sg_len, i) { | |
488 | dma_addr_t sg_addr = sg_dma_address(sg); | |
489 | size_t len = sg_dma_len(sg); | |
490 | ||
491 | if (!len) | |
492 | goto err_get_desc; | |
493 | ||
494 | do { | |
495 | dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n", | |
496 | i, sg, len, (unsigned long long)sg_addr); | |
497 | ||
498 | if (direction == DMA_DEV_TO_MEM) | |
499 | new = shdma_add_desc(schan, flags, | |
500 | &sg_addr, addr, &len, &first, | |
501 | direction); | |
502 | else | |
503 | new = shdma_add_desc(schan, flags, | |
504 | addr, &sg_addr, &len, &first, | |
505 | direction); | |
506 | if (!new) | |
507 | goto err_get_desc; | |
508 | ||
509 | new->chunks = chunks--; | |
510 | list_add_tail(&new->node, &tx_list); | |
511 | } while (len); | |
512 | } | |
513 | ||
514 | if (new != first) | |
515 | new->async_tx.cookie = -ENOSPC; | |
516 | ||
517 | /* Put them back on the free list, so, they don't get lost */ | |
518 | list_splice_tail(&tx_list, &schan->ld_free); | |
519 | ||
520 | spin_unlock_irqrestore(&schan->chan_lock, irq_flags); | |
521 | ||
522 | return &first->async_tx; | |
523 | ||
524 | err_get_desc: | |
525 | list_for_each_entry(new, &tx_list, node) | |
526 | new->mark = DESC_IDLE; | |
527 | list_splice(&tx_list, &schan->ld_free); | |
528 | ||
529 | spin_unlock_irqrestore(&schan->chan_lock, irq_flags); | |
530 | ||
531 | return NULL; | |
532 | } | |
533 | ||
534 | static struct dma_async_tx_descriptor *shdma_prep_memcpy( | |
535 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | |
536 | size_t len, unsigned long flags) | |
537 | { | |
538 | struct shdma_chan *schan = to_shdma_chan(chan); | |
539 | struct scatterlist sg; | |
540 | ||
541 | if (!chan || !len) | |
542 | return NULL; | |
543 | ||
544 | BUG_ON(!schan->desc_num); | |
545 | ||
546 | sg_init_table(&sg, 1); | |
547 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, | |
548 | offset_in_page(dma_src)); | |
549 | sg_dma_address(&sg) = dma_src; | |
550 | sg_dma_len(&sg) = len; | |
551 | ||
552 | return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags); | |
553 | } | |
554 | ||
555 | static struct dma_async_tx_descriptor *shdma_prep_slave_sg( | |
556 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | |
557 | enum dma_transfer_direction direction, unsigned long flags, void *context) | |
558 | { | |
559 | struct shdma_chan *schan = to_shdma_chan(chan); | |
560 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | |
561 | const struct shdma_ops *ops = sdev->ops; | |
ecf90fbb | 562 | struct shdma_slave *slave = schan->slave; |
9a7b8e00 GL |
563 | dma_addr_t slave_addr; |
564 | ||
565 | if (!chan) | |
566 | return NULL; | |
567 | ||
568 | BUG_ON(!schan->desc_num); | |
569 | ||
570 | /* Someone calling slave DMA on a generic channel? */ | |
571 | if (!slave || !sg_len) { | |
572 | dev_warn(schan->dev, "%s: bad parameter: %p, %d, %d\n", | |
573 | __func__, slave, sg_len, slave ? slave->slave_id : -1); | |
574 | return NULL; | |
575 | } | |
576 | ||
577 | slave_addr = ops->slave_addr(schan); | |
578 | ||
579 | return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, | |
580 | direction, flags); | |
581 | } | |
582 | ||
583 | static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
584 | unsigned long arg) | |
585 | { | |
586 | struct shdma_chan *schan = to_shdma_chan(chan); | |
587 | struct shdma_dev *sdev = to_shdma_dev(chan->device); | |
588 | const struct shdma_ops *ops = sdev->ops; | |
589 | unsigned long flags; | |
590 | ||
591 | /* Only supports DMA_TERMINATE_ALL */ | |
592 | if (cmd != DMA_TERMINATE_ALL) | |
593 | return -ENXIO; | |
594 | ||
595 | if (!chan) | |
596 | return -EINVAL; | |
597 | ||
598 | spin_lock_irqsave(&schan->chan_lock, flags); | |
599 | ||
600 | ops->halt_channel(schan); | |
601 | ||
602 | spin_unlock_irqrestore(&schan->chan_lock, flags); | |
603 | ||
604 | shdma_chan_ld_cleanup(schan, true); | |
605 | ||
606 | return 0; | |
607 | } | |
608 | ||
609 | static void shdma_issue_pending(struct dma_chan *chan) | |
610 | { | |
611 | struct shdma_chan *schan = to_shdma_chan(chan); | |
612 | ||
613 | spin_lock_irq(&schan->chan_lock); | |
614 | if (schan->pm_state == SHDMA_PM_ESTABLISHED) | |
615 | shdma_chan_xfer_ld_queue(schan); | |
616 | else | |
617 | schan->pm_state = SHDMA_PM_PENDING; | |
618 | spin_unlock_irq(&schan->chan_lock); | |
619 | } | |
620 | ||
621 | static enum dma_status shdma_tx_status(struct dma_chan *chan, | |
622 | dma_cookie_t cookie, | |
623 | struct dma_tx_state *txstate) | |
624 | { | |
625 | struct shdma_chan *schan = to_shdma_chan(chan); | |
626 | enum dma_status status; | |
627 | unsigned long flags; | |
628 | ||
629 | shdma_chan_ld_cleanup(schan, false); | |
630 | ||
631 | spin_lock_irqsave(&schan->chan_lock, flags); | |
632 | ||
633 | status = dma_cookie_status(chan, cookie, txstate); | |
634 | ||
635 | /* | |
636 | * If we don't find cookie on the queue, it has been aborted and we have | |
637 | * to report error | |
638 | */ | |
639 | if (status != DMA_SUCCESS) { | |
640 | struct shdma_desc *sdesc; | |
641 | status = DMA_ERROR; | |
642 | list_for_each_entry(sdesc, &schan->ld_queue, node) | |
643 | if (sdesc->cookie == cookie) { | |
644 | status = DMA_IN_PROGRESS; | |
645 | break; | |
646 | } | |
647 | } | |
648 | ||
649 | spin_unlock_irqrestore(&schan->chan_lock, flags); | |
650 | ||
651 | return status; | |
652 | } | |
653 | ||
654 | /* Called from error IRQ or NMI */ | |
655 | bool shdma_reset(struct shdma_dev *sdev) | |
656 | { | |
657 | const struct shdma_ops *ops = sdev->ops; | |
658 | struct shdma_chan *schan; | |
659 | unsigned int handled = 0; | |
660 | int i; | |
661 | ||
662 | /* Reset all channels */ | |
663 | shdma_for_each_chan(schan, sdev, i) { | |
664 | struct shdma_desc *sdesc; | |
665 | LIST_HEAD(dl); | |
666 | ||
667 | if (!schan) | |
668 | continue; | |
669 | ||
670 | spin_lock(&schan->chan_lock); | |
671 | ||
672 | /* Stop the channel */ | |
673 | ops->halt_channel(schan); | |
674 | ||
675 | list_splice_init(&schan->ld_queue, &dl); | |
676 | ||
677 | if (!list_empty(&dl)) { | |
678 | dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); | |
679 | pm_runtime_put(schan->dev); | |
680 | } | |
681 | schan->pm_state = SHDMA_PM_ESTABLISHED; | |
682 | ||
683 | spin_unlock(&schan->chan_lock); | |
684 | ||
685 | /* Complete all */ | |
686 | list_for_each_entry(sdesc, &dl, node) { | |
687 | struct dma_async_tx_descriptor *tx = &sdesc->async_tx; | |
688 | sdesc->mark = DESC_IDLE; | |
689 | if (tx->callback) | |
690 | tx->callback(tx->callback_param); | |
691 | } | |
692 | ||
693 | spin_lock(&schan->chan_lock); | |
694 | list_splice(&dl, &schan->ld_free); | |
695 | spin_unlock(&schan->chan_lock); | |
696 | ||
697 | handled++; | |
698 | } | |
699 | ||
700 | return !!handled; | |
701 | } | |
702 | EXPORT_SYMBOL(shdma_reset); | |
703 | ||
704 | static irqreturn_t chan_irq(int irq, void *dev) | |
705 | { | |
706 | struct shdma_chan *schan = dev; | |
707 | const struct shdma_ops *ops = | |
708 | to_shdma_dev(schan->dma_chan.device)->ops; | |
709 | irqreturn_t ret; | |
710 | ||
711 | spin_lock(&schan->chan_lock); | |
712 | ||
713 | ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE; | |
714 | ||
715 | spin_unlock(&schan->chan_lock); | |
716 | ||
717 | return ret; | |
718 | } | |
719 | ||
720 | static irqreturn_t chan_irqt(int irq, void *dev) | |
721 | { | |
722 | struct shdma_chan *schan = dev; | |
723 | const struct shdma_ops *ops = | |
724 | to_shdma_dev(schan->dma_chan.device)->ops; | |
725 | struct shdma_desc *sdesc; | |
726 | ||
727 | spin_lock_irq(&schan->chan_lock); | |
728 | list_for_each_entry(sdesc, &schan->ld_queue, node) { | |
729 | if (sdesc->mark == DESC_SUBMITTED && | |
730 | ops->desc_completed(schan, sdesc)) { | |
731 | dev_dbg(schan->dev, "done #%d@%p\n", | |
732 | sdesc->async_tx.cookie, &sdesc->async_tx); | |
733 | sdesc->mark = DESC_COMPLETED; | |
734 | break; | |
735 | } | |
736 | } | |
737 | /* Next desc */ | |
738 | shdma_chan_xfer_ld_queue(schan); | |
739 | spin_unlock_irq(&schan->chan_lock); | |
740 | ||
741 | shdma_chan_ld_cleanup(schan, false); | |
742 | ||
743 | return IRQ_HANDLED; | |
744 | } | |
745 | ||
746 | int shdma_request_irq(struct shdma_chan *schan, int irq, | |
747 | unsigned long flags, const char *name) | |
748 | { | |
749 | int ret = request_threaded_irq(irq, chan_irq, chan_irqt, | |
750 | flags, name, schan); | |
751 | ||
752 | schan->irq = ret < 0 ? ret : irq; | |
753 | ||
754 | return ret; | |
755 | } | |
756 | EXPORT_SYMBOL(shdma_request_irq); | |
757 | ||
758 | void shdma_free_irq(struct shdma_chan *schan) | |
759 | { | |
760 | if (schan->irq >= 0) | |
761 | free_irq(schan->irq, schan); | |
762 | } | |
763 | EXPORT_SYMBOL(shdma_free_irq); | |
764 | ||
765 | void shdma_chan_probe(struct shdma_dev *sdev, | |
766 | struct shdma_chan *schan, int id) | |
767 | { | |
768 | schan->pm_state = SHDMA_PM_ESTABLISHED; | |
769 | ||
770 | /* reference struct dma_device */ | |
771 | schan->dma_chan.device = &sdev->dma_dev; | |
772 | dma_cookie_init(&schan->dma_chan); | |
773 | ||
774 | schan->dev = sdev->dma_dev.dev; | |
775 | schan->id = id; | |
776 | ||
777 | if (!schan->max_xfer_len) | |
778 | schan->max_xfer_len = PAGE_SIZE; | |
779 | ||
780 | spin_lock_init(&schan->chan_lock); | |
781 | ||
782 | /* Init descripter manage list */ | |
783 | INIT_LIST_HEAD(&schan->ld_queue); | |
784 | INIT_LIST_HEAD(&schan->ld_free); | |
785 | ||
786 | /* Add the channel to DMA device channel list */ | |
787 | list_add_tail(&schan->dma_chan.device_node, | |
788 | &sdev->dma_dev.channels); | |
789 | sdev->schan[sdev->dma_dev.chancnt++] = schan; | |
790 | } | |
791 | EXPORT_SYMBOL(shdma_chan_probe); | |
792 | ||
793 | void shdma_chan_remove(struct shdma_chan *schan) | |
794 | { | |
795 | list_del(&schan->dma_chan.device_node); | |
796 | } | |
797 | EXPORT_SYMBOL(shdma_chan_remove); | |
798 | ||
799 | int shdma_init(struct device *dev, struct shdma_dev *sdev, | |
800 | int chan_num) | |
801 | { | |
802 | struct dma_device *dma_dev = &sdev->dma_dev; | |
803 | ||
804 | /* | |
805 | * Require all call-backs for now, they can trivially be made optional | |
806 | * later as required | |
807 | */ | |
808 | if (!sdev->ops || | |
809 | !sdev->desc_size || | |
810 | !sdev->ops->embedded_desc || | |
811 | !sdev->ops->start_xfer || | |
812 | !sdev->ops->setup_xfer || | |
813 | !sdev->ops->set_slave || | |
814 | !sdev->ops->desc_setup || | |
815 | !sdev->ops->slave_addr || | |
816 | !sdev->ops->channel_busy || | |
817 | !sdev->ops->halt_channel || | |
818 | !sdev->ops->desc_completed) | |
819 | return -EINVAL; | |
820 | ||
821 | sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL); | |
822 | if (!sdev->schan) | |
823 | return -ENOMEM; | |
824 | ||
825 | INIT_LIST_HEAD(&dma_dev->channels); | |
826 | ||
827 | /* Common and MEMCPY operations */ | |
828 | dma_dev->device_alloc_chan_resources | |
829 | = shdma_alloc_chan_resources; | |
830 | dma_dev->device_free_chan_resources = shdma_free_chan_resources; | |
831 | dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy; | |
832 | dma_dev->device_tx_status = shdma_tx_status; | |
833 | dma_dev->device_issue_pending = shdma_issue_pending; | |
834 | ||
835 | /* Compulsory for DMA_SLAVE fields */ | |
836 | dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; | |
837 | dma_dev->device_control = shdma_control; | |
838 | ||
839 | dma_dev->dev = dev; | |
840 | ||
841 | return 0; | |
842 | } | |
843 | EXPORT_SYMBOL(shdma_init); | |
844 | ||
845 | void shdma_cleanup(struct shdma_dev *sdev) | |
846 | { | |
847 | kfree(sdev->schan); | |
848 | } | |
849 | EXPORT_SYMBOL(shdma_cleanup); | |
850 | ||
851 | static int __init shdma_enter(void) | |
852 | { | |
853 | shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) * | |
854 | sizeof(long), GFP_KERNEL); | |
855 | if (!shdma_slave_used) | |
856 | return -ENOMEM; | |
857 | return 0; | |
858 | } | |
859 | module_init(shdma_enter); | |
860 | ||
861 | static void __exit shdma_exit(void) | |
862 | { | |
863 | kfree(shdma_slave_used); | |
864 | } | |
865 | module_exit(shdma_exit); | |
866 | ||
867 | MODULE_LICENSE("GPL v2"); | |
868 | MODULE_DESCRIPTION("SH-DMA driver base library"); | |
869 | MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); |