dmaengine: sh: fix print specifier warnings
[linux-2.6-block.git] / drivers / dma / sh / shdma-base.c
CommitLineData
9a7b8e00
GL
1/*
2 * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
3 *
4 * extracted from shdma.c
5 *
6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
10 *
11 * This is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/delay.h>
17#include <linux/shdma-base.h>
18#include <linux/dmaengine.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/module.h>
22#include <linux/pm_runtime.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25
26#include "../dmaengine.h"
27
28/* DMA descriptor control */
29enum shdma_desc_status {
30 DESC_IDLE,
31 DESC_PREPARED,
32 DESC_SUBMITTED,
33 DESC_COMPLETED, /* completed, have to call callback */
34 DESC_WAITING, /* callback called, waiting for ack / re-submit */
35};
36
37#define NR_DESCS_PER_CHANNEL 32
38
39#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
40#define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
41
42/*
43 * For slave DMA we assume, that there is a finite number of DMA slaves in the
44 * system, and that each such slave can only use a finite number of channels.
45 * We use slave channel IDs to make sure, that no such slave channel ID is
46 * allocated more than once.
47 */
48static unsigned int slave_num = 256;
49module_param(slave_num, uint, 0444);
50
51/* A bitmask with slave_num bits */
52static unsigned long *shdma_slave_used;
53
54/* Called under spin_lock_irq(&schan->chan_lock") */
55static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
56{
57 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
58 const struct shdma_ops *ops = sdev->ops;
59 struct shdma_desc *sdesc;
60
61 /* DMA work check */
62 if (ops->channel_busy(schan))
63 return;
64
65 /* Find the first not transferred descriptor */
66 list_for_each_entry(sdesc, &schan->ld_queue, node)
67 if (sdesc->mark == DESC_SUBMITTED) {
68 ops->start_xfer(schan, sdesc);
69 break;
70 }
71}
72
73static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
74{
75 struct shdma_desc *chunk, *c, *desc =
91ea74e9 76 container_of(tx, struct shdma_desc, async_tx);
9a7b8e00 77 struct shdma_chan *schan = to_shdma_chan(tx->chan);
9a7b8e00
GL
78 dma_async_tx_callback callback = tx->callback;
79 dma_cookie_t cookie;
80 bool power_up;
81
82 spin_lock_irq(&schan->chan_lock);
83
84 power_up = list_empty(&schan->ld_queue);
85
86 cookie = dma_cookie_assign(tx);
87
88 /* Mark all chunks of this descriptor as submitted, move to the queue */
89 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
90 /*
91 * All chunks are on the global ld_free, so, we have to find
92 * the end of the chain ourselves
93 */
94 if (chunk != desc && (chunk->mark == DESC_IDLE ||
95 chunk->async_tx.cookie > 0 ||
96 chunk->async_tx.cookie == -EBUSY ||
97 &chunk->node == &schan->ld_free))
98 break;
99 chunk->mark = DESC_SUBMITTED;
91ea74e9
KM
100 if (chunk->chunks == 1) {
101 chunk->async_tx.callback = callback;
102 chunk->async_tx.callback_param = tx->callback_param;
103 } else {
104 /* Callback goes to the last chunk */
105 chunk->async_tx.callback = NULL;
106 }
9a7b8e00
GL
107 chunk->cookie = cookie;
108 list_move_tail(&chunk->node, &schan->ld_queue);
9a7b8e00
GL
109
110 dev_dbg(schan->dev, "submit #%d@%p on %d\n",
91ea74e9 111 tx->cookie, &chunk->async_tx, schan->id);
9a7b8e00
GL
112 }
113
9a7b8e00
GL
114 if (power_up) {
115 int ret;
116 schan->pm_state = SHDMA_PM_BUSY;
117
118 ret = pm_runtime_get(schan->dev);
119
120 spin_unlock_irq(&schan->chan_lock);
121 if (ret < 0)
122 dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
123
124 pm_runtime_barrier(schan->dev);
125
126 spin_lock_irq(&schan->chan_lock);
127
128 /* Have we been reset, while waiting? */
129 if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
130 struct shdma_dev *sdev =
131 to_shdma_dev(schan->dma_chan.device);
132 const struct shdma_ops *ops = sdev->ops;
133 dev_dbg(schan->dev, "Bring up channel %d\n",
134 schan->id);
135 /*
136 * TODO: .xfer_setup() might fail on some platforms.
137 * Make it int then, on error remove chunks from the
138 * queue again
139 */
c2cdb7e4 140 ops->setup_xfer(schan, schan->slave_id);
9a7b8e00
GL
141
142 if (schan->pm_state == SHDMA_PM_PENDING)
143 shdma_chan_xfer_ld_queue(schan);
144 schan->pm_state = SHDMA_PM_ESTABLISHED;
145 }
146 } else {
147 /*
148 * Tell .device_issue_pending() not to run the queue, interrupts
149 * will do it anyway
150 */
151 schan->pm_state = SHDMA_PM_PENDING;
152 }
153
154 spin_unlock_irq(&schan->chan_lock);
155
156 return cookie;
157}
158
159/* Called with desc_lock held */
160static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
161{
162 struct shdma_desc *sdesc;
163
164 list_for_each_entry(sdesc, &schan->ld_free, node)
165 if (sdesc->mark != DESC_PREPARED) {
166 BUG_ON(sdesc->mark != DESC_IDLE);
167 list_del(&sdesc->node);
168 return sdesc;
169 }
170
171 return NULL;
172}
173
4981c4dc
GL
174static int shdma_setup_slave(struct shdma_chan *schan, int slave_id,
175 dma_addr_t slave_addr)
1ff8df4f
GL
176{
177 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
178 const struct shdma_ops *ops = sdev->ops;
67eacc15
GL
179 int ret, match;
180
181 if (schan->dev->of_node) {
182 match = schan->hw_req;
4981c4dc 183 ret = ops->set_slave(schan, match, slave_addr, true);
67eacc15
GL
184 if (ret < 0)
185 return ret;
186
187 slave_id = schan->slave_id;
188 } else {
189 match = slave_id;
190 }
1ff8df4f
GL
191
192 if (slave_id < 0 || slave_id >= slave_num)
193 return -EINVAL;
194
195 if (test_and_set_bit(slave_id, shdma_slave_used))
196 return -EBUSY;
197
4981c4dc 198 ret = ops->set_slave(schan, match, slave_addr, false);
1ff8df4f
GL
199 if (ret < 0) {
200 clear_bit(slave_id, shdma_slave_used);
201 return ret;
202 }
203
204 schan->slave_id = slave_id;
205
206 return 0;
207}
208
209/*
210 * This is the standard shdma filter function to be used as a replacement to the
211 * "old" method, using the .private pointer. If for some reason you allocate a
212 * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter
213 * parameter. If this filter is used, the slave driver, after calling
214 * dma_request_channel(), will also have to call dmaengine_slave_config() with
215 * .slave_id, .direction, and either .src_addr or .dst_addr set.
216 * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
217 * capability! If this becomes a requirement, hardware glue drivers, using this
218 * services would have to provide their own filters, which first would check
219 * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
220 * this, and only then, in case of a match, call this common filter.
67eacc15
GL
221 * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
222 * In that case the MID-RID value is used for slave channel filtering and is
223 * passed to this function in the "arg" parameter.
1ff8df4f
GL
224 */
225bool shdma_chan_filter(struct dma_chan *chan, void *arg)
226{
227 struct shdma_chan *schan = to_shdma_chan(chan);
228 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
229 const struct shdma_ops *ops = sdev->ops;
42e4a12a 230 int match = (long)arg;
1ff8df4f
GL
231 int ret;
232
67eacc15 233 if (match < 0)
1ff8df4f
GL
234 /* No slave requested - arbitrary channel */
235 return true;
236
67eacc15 237 if (!schan->dev->of_node && match >= slave_num)
1ff8df4f
GL
238 return false;
239
4981c4dc 240 ret = ops->set_slave(schan, match, 0, true);
1ff8df4f
GL
241 if (ret < 0)
242 return false;
243
244 return true;
245}
246EXPORT_SYMBOL(shdma_chan_filter);
247
9a7b8e00
GL
248static int shdma_alloc_chan_resources(struct dma_chan *chan)
249{
250 struct shdma_chan *schan = to_shdma_chan(chan);
251 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
252 const struct shdma_ops *ops = sdev->ops;
253 struct shdma_desc *desc;
254 struct shdma_slave *slave = chan->private;
255 int ret, i;
256
257 /*
258 * This relies on the guarantee from dmaengine that alloc_chan_resources
259 * never runs concurrently with itself or free_chan_resources.
260 */
261 if (slave) {
1ff8df4f 262 /* Legacy mode: .private is set in filter */
4981c4dc 263 ret = shdma_setup_slave(schan, slave->slave_id, 0);
9a7b8e00
GL
264 if (ret < 0)
265 goto esetslave;
c2cdb7e4
GL
266 } else {
267 schan->slave_id = -EINVAL;
9a7b8e00
GL
268 }
269
270 schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
271 sdev->desc_size, GFP_KERNEL);
272 if (!schan->desc) {
273 ret = -ENOMEM;
274 goto edescalloc;
275 }
276 schan->desc_num = NR_DESCS_PER_CHANNEL;
277
278 for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
279 desc = ops->embedded_desc(schan->desc, i);
280 dma_async_tx_descriptor_init(&desc->async_tx,
281 &schan->dma_chan);
282 desc->async_tx.tx_submit = shdma_tx_submit;
283 desc->mark = DESC_IDLE;
284
285 list_add(&desc->node, &schan->ld_free);
286 }
287
288 return NR_DESCS_PER_CHANNEL;
289
290edescalloc:
291 if (slave)
292esetslave:
293 clear_bit(slave->slave_id, shdma_slave_used);
9a7b8e00
GL
294 chan->private = NULL;
295 return ret;
296}
297
298static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
299{
300 struct shdma_desc *desc, *_desc;
301 /* Is the "exposed" head of a chain acked? */
302 bool head_acked = false;
303 dma_cookie_t cookie = 0;
304 dma_async_tx_callback callback = NULL;
305 void *param = NULL;
306 unsigned long flags;
dfbb85ca 307 LIST_HEAD(cyclic_list);
9a7b8e00
GL
308
309 spin_lock_irqsave(&schan->chan_lock, flags);
310 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
311 struct dma_async_tx_descriptor *tx = &desc->async_tx;
312
313 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
314 BUG_ON(desc->mark != DESC_SUBMITTED &&
315 desc->mark != DESC_COMPLETED &&
316 desc->mark != DESC_WAITING);
317
318 /*
319 * queue is ordered, and we use this loop to (1) clean up all
320 * completed descriptors, and to (2) update descriptor flags of
321 * any chunks in a (partially) completed chain
322 */
323 if (!all && desc->mark == DESC_SUBMITTED &&
324 desc->cookie != cookie)
325 break;
326
327 if (tx->cookie > 0)
328 cookie = tx->cookie;
329
330 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
331 if (schan->dma_chan.completed_cookie != desc->cookie - 1)
332 dev_dbg(schan->dev,
333 "Completing cookie %d, expected %d\n",
334 desc->cookie,
335 schan->dma_chan.completed_cookie + 1);
336 schan->dma_chan.completed_cookie = desc->cookie;
337 }
338
339 /* Call callback on the last chunk */
340 if (desc->mark == DESC_COMPLETED && tx->callback) {
341 desc->mark = DESC_WAITING;
342 callback = tx->callback;
343 param = tx->callback_param;
344 dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
345 tx->cookie, tx, schan->id);
346 BUG_ON(desc->chunks != 1);
347 break;
348 }
349
350 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
351 if (desc->mark == DESC_COMPLETED) {
352 BUG_ON(tx->cookie < 0);
353 desc->mark = DESC_WAITING;
354 }
355 head_acked = async_tx_test_ack(tx);
356 } else {
357 switch (desc->mark) {
358 case DESC_COMPLETED:
359 desc->mark = DESC_WAITING;
360 /* Fall through */
361 case DESC_WAITING:
362 if (head_acked)
363 async_tx_ack(&desc->async_tx);
364 }
365 }
366
367 dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
368 tx, tx->cookie);
369
370 if (((desc->mark == DESC_COMPLETED ||
371 desc->mark == DESC_WAITING) &&
372 async_tx_test_ack(&desc->async_tx)) || all) {
9a7b8e00 373
dfbb85ca
KM
374 if (all || !desc->cyclic) {
375 /* Remove from ld_queue list */
376 desc->mark = DESC_IDLE;
377 list_move(&desc->node, &schan->ld_free);
378 } else {
379 /* reuse as cyclic */
380 desc->mark = DESC_SUBMITTED;
381 list_move_tail(&desc->node, &cyclic_list);
382 }
9a7b8e00
GL
383
384 if (list_empty(&schan->ld_queue)) {
385 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
386 pm_runtime_put(schan->dev);
387 schan->pm_state = SHDMA_PM_ESTABLISHED;
388 }
389 }
390 }
391
392 if (all && !callback)
393 /*
394 * Terminating and the loop completed normally: forgive
395 * uncompleted cookies
396 */
397 schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
398
dfbb85ca
KM
399 list_splice_tail(&cyclic_list, &schan->ld_queue);
400
9a7b8e00
GL
401 spin_unlock_irqrestore(&schan->chan_lock, flags);
402
403 if (callback)
404 callback(param);
405
406 return callback;
407}
408
409/*
410 * shdma_chan_ld_cleanup - Clean up link descriptors
411 *
412 * Clean up the ld_queue of DMA channel.
413 */
414static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
415{
416 while (__ld_cleanup(schan, all))
417 ;
418}
419
420/*
421 * shdma_free_chan_resources - Free all resources of the channel.
422 */
423static void shdma_free_chan_resources(struct dma_chan *chan)
424{
425 struct shdma_chan *schan = to_shdma_chan(chan);
426 struct shdma_dev *sdev = to_shdma_dev(chan->device);
427 const struct shdma_ops *ops = sdev->ops;
428 LIST_HEAD(list);
429
430 /* Protect against ISR */
431 spin_lock_irq(&schan->chan_lock);
432 ops->halt_channel(schan);
433 spin_unlock_irq(&schan->chan_lock);
434
435 /* Now no new interrupts will occur */
436
437 /* Prepared and not submitted descriptors can still be on the queue */
438 if (!list_empty(&schan->ld_queue))
439 shdma_chan_ld_cleanup(schan, true);
440
c2cdb7e4 441 if (schan->slave_id >= 0) {
9a7b8e00 442 /* The caller is holding dma_list_mutex */
c2cdb7e4 443 clear_bit(schan->slave_id, shdma_slave_used);
9a7b8e00
GL
444 chan->private = NULL;
445 }
446
447 spin_lock_irq(&schan->chan_lock);
448
449 list_splice_init(&schan->ld_free, &list);
450 schan->desc_num = 0;
451
452 spin_unlock_irq(&schan->chan_lock);
453
454 kfree(schan->desc);
455}
456
457/**
458 * shdma_add_desc - get, set up and return one transfer descriptor
459 * @schan: DMA channel
460 * @flags: DMA transfer flags
461 * @dst: destination DMA address, incremented when direction equals
462 * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
463 * @src: source DMA address, incremented when direction equals
464 * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
465 * @len: DMA transfer length
466 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
467 * @direction: needed for slave DMA to decide which address to keep constant,
468 * equals DMA_MEM_TO_MEM for MEMCPY
469 * Returns 0 or an error
470 * Locks: called with desc_lock held
471 */
472static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
473 unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
474 struct shdma_desc **first, enum dma_transfer_direction direction)
475{
476 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
477 const struct shdma_ops *ops = sdev->ops;
478 struct shdma_desc *new;
479 size_t copy_size = *len;
480
481 if (!copy_size)
482 return NULL;
483
484 /* Allocate the link descriptor from the free list */
485 new = shdma_get_desc(schan);
486 if (!new) {
487 dev_err(schan->dev, "No free link descriptor available\n");
488 return NULL;
489 }
490
491 ops->desc_setup(schan, new, *src, *dst, &copy_size);
492
493 if (!*first) {
494 /* First desc */
495 new->async_tx.cookie = -EBUSY;
496 *first = new;
497 } else {
498 /* Other desc - invisible to the user */
499 new->async_tx.cookie = -EINVAL;
500 }
501
502 dev_dbg(schan->dev,
42e4a12a
LP
503 "chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
504 copy_size, *len, src, dst, &new->async_tx,
9a7b8e00
GL
505 new->async_tx.cookie);
506
507 new->mark = DESC_PREPARED;
508 new->async_tx.flags = flags;
509 new->direction = direction;
4f46f8ac 510 new->partial = 0;
9a7b8e00
GL
511
512 *len -= copy_size;
513 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
514 *src += copy_size;
515 if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
516 *dst += copy_size;
517
518 return new;
519}
520
521/*
522 * shdma_prep_sg - prepare transfer descriptors from an SG list
523 *
524 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
525 * converted to scatter-gather to guarantee consistent locking and a correct
526 * list manipulation. For slave DMA direction carries the usual meaning, and,
527 * logically, the SG list is RAM and the addr variable contains slave address,
528 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
529 * and the SG list contains only one element and points at the source buffer.
530 */
531static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
532 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
dfbb85ca 533 enum dma_transfer_direction direction, unsigned long flags, bool cyclic)
9a7b8e00
GL
534{
535 struct scatterlist *sg;
536 struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
537 LIST_HEAD(tx_list);
538 int chunks = 0;
539 unsigned long irq_flags;
540 int i;
541
542 for_each_sg(sgl, sg, sg_len, i)
543 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
544
545 /* Have to lock the whole loop to protect against concurrent release */
546 spin_lock_irqsave(&schan->chan_lock, irq_flags);
547
548 /*
549 * Chaining:
550 * first descriptor is what user is dealing with in all API calls, its
551 * cookie is at first set to -EBUSY, at tx-submit to a positive
552 * number
553 * if more than one chunk is needed further chunks have cookie = -EINVAL
554 * the last chunk, if not equal to the first, has cookie = -ENOSPC
555 * all chunks are linked onto the tx_list head with their .node heads
556 * only during this function, then they are immediately spliced
557 * back onto the free list in form of a chain
558 */
559 for_each_sg(sgl, sg, sg_len, i) {
560 dma_addr_t sg_addr = sg_dma_address(sg);
561 size_t len = sg_dma_len(sg);
562
563 if (!len)
564 goto err_get_desc;
565
566 do {
42e4a12a
LP
567 dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
568 i, sg, len, &sg_addr);
9a7b8e00
GL
569
570 if (direction == DMA_DEV_TO_MEM)
571 new = shdma_add_desc(schan, flags,
572 &sg_addr, addr, &len, &first,
573 direction);
574 else
575 new = shdma_add_desc(schan, flags,
576 addr, &sg_addr, &len, &first,
577 direction);
578 if (!new)
579 goto err_get_desc;
580
dfbb85ca
KM
581 new->cyclic = cyclic;
582 if (cyclic)
583 new->chunks = 1;
584 else
585 new->chunks = chunks--;
9a7b8e00
GL
586 list_add_tail(&new->node, &tx_list);
587 } while (len);
588 }
589
590 if (new != first)
591 new->async_tx.cookie = -ENOSPC;
592
593 /* Put them back on the free list, so, they don't get lost */
594 list_splice_tail(&tx_list, &schan->ld_free);
595
596 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
597
598 return &first->async_tx;
599
600err_get_desc:
601 list_for_each_entry(new, &tx_list, node)
602 new->mark = DESC_IDLE;
603 list_splice(&tx_list, &schan->ld_free);
604
605 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
606
607 return NULL;
608}
609
610static struct dma_async_tx_descriptor *shdma_prep_memcpy(
611 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
612 size_t len, unsigned long flags)
613{
614 struct shdma_chan *schan = to_shdma_chan(chan);
615 struct scatterlist sg;
616
617 if (!chan || !len)
618 return NULL;
619
620 BUG_ON(!schan->desc_num);
621
622 sg_init_table(&sg, 1);
623 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
624 offset_in_page(dma_src));
625 sg_dma_address(&sg) = dma_src;
626 sg_dma_len(&sg) = len;
627
dfbb85ca
KM
628 return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
629 flags, false);
9a7b8e00
GL
630}
631
632static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
633 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
634 enum dma_transfer_direction direction, unsigned long flags, void *context)
635{
636 struct shdma_chan *schan = to_shdma_chan(chan);
637 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
638 const struct shdma_ops *ops = sdev->ops;
c2cdb7e4 639 int slave_id = schan->slave_id;
9a7b8e00
GL
640 dma_addr_t slave_addr;
641
642 if (!chan)
643 return NULL;
644
645 BUG_ON(!schan->desc_num);
646
647 /* Someone calling slave DMA on a generic channel? */
c2cdb7e4
GL
648 if (slave_id < 0 || !sg_len) {
649 dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
650 __func__, sg_len, slave_id);
9a7b8e00
GL
651 return NULL;
652 }
653
654 slave_addr = ops->slave_addr(schan);
655
656 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
dfbb85ca
KM
657 direction, flags, false);
658}
659
a6876543 660static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
dfbb85ca
KM
661 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
662 size_t period_len, enum dma_transfer_direction direction,
663 unsigned long flags, void *context)
664{
665 struct shdma_chan *schan = to_shdma_chan(chan);
666 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
667 const struct shdma_ops *ops = sdev->ops;
668 unsigned int sg_len = buf_len / period_len;
669 int slave_id = schan->slave_id;
670 dma_addr_t slave_addr;
671 struct scatterlist sgl[sg_len];
672 int i;
673
674 if (!chan)
675 return NULL;
676
677 BUG_ON(!schan->desc_num);
678
679 /* Someone calling slave DMA on a generic channel? */
680 if (slave_id < 0 || (buf_len < period_len)) {
681 dev_warn(schan->dev,
9d9f71a8 682 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
dfbb85ca
KM
683 __func__, buf_len, period_len, slave_id);
684 return NULL;
685 }
686
687 slave_addr = ops->slave_addr(schan);
688
689 sg_init_table(sgl, sg_len);
690 for (i = 0; i < sg_len; i++) {
691 dma_addr_t src = buf_addr + (period_len * i);
692
693 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
694 offset_in_page(src));
695 sg_dma_address(&sgl[i]) = src;
696 sg_dma_len(&sgl[i]) = period_len;
697 }
698
699 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
700 direction, flags, true);
9a7b8e00
GL
701}
702
703static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
704 unsigned long arg)
705{
706 struct shdma_chan *schan = to_shdma_chan(chan);
707 struct shdma_dev *sdev = to_shdma_dev(chan->device);
708 const struct shdma_ops *ops = sdev->ops;
1ff8df4f 709 struct dma_slave_config *config;
9a7b8e00 710 unsigned long flags;
1ff8df4f 711 int ret;
9a7b8e00 712
1ff8df4f
GL
713 switch (cmd) {
714 case DMA_TERMINATE_ALL:
715 spin_lock_irqsave(&schan->chan_lock, flags);
716 ops->halt_channel(schan);
4f46f8ac
GL
717
718 if (ops->get_partial && !list_empty(&schan->ld_queue)) {
719 /* Record partial transfer */
720 struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
721 struct shdma_desc, node);
722 desc->partial = ops->get_partial(schan, desc);
723 }
724
1ff8df4f 725 spin_unlock_irqrestore(&schan->chan_lock, flags);
9a7b8e00 726
1ff8df4f
GL
727 shdma_chan_ld_cleanup(schan, true);
728 break;
729 case DMA_SLAVE_CONFIG:
730 /*
731 * So far only .slave_id is used, but the slave drivers are
732 * encouraged to also set a transfer direction and an address.
733 */
734 if (!arg)
735 return -EINVAL;
736 /*
737 * We could lock this, but you shouldn't be configuring the
738 * channel, while using it...
739 */
740 config = (struct dma_slave_config *)arg;
4981c4dc
GL
741 ret = shdma_setup_slave(schan, config->slave_id,
742 config->direction == DMA_DEV_TO_MEM ?
743 config->src_addr : config->dst_addr);
1ff8df4f
GL
744 if (ret < 0)
745 return ret;
746 break;
747 default:
748 return -ENXIO;
749 }
9a7b8e00
GL
750
751 return 0;
752}
753
754static void shdma_issue_pending(struct dma_chan *chan)
755{
756 struct shdma_chan *schan = to_shdma_chan(chan);
757
758 spin_lock_irq(&schan->chan_lock);
759 if (schan->pm_state == SHDMA_PM_ESTABLISHED)
760 shdma_chan_xfer_ld_queue(schan);
761 else
762 schan->pm_state = SHDMA_PM_PENDING;
763 spin_unlock_irq(&schan->chan_lock);
764}
765
766static enum dma_status shdma_tx_status(struct dma_chan *chan,
767 dma_cookie_t cookie,
768 struct dma_tx_state *txstate)
769{
770 struct shdma_chan *schan = to_shdma_chan(chan);
771 enum dma_status status;
772 unsigned long flags;
773
774 shdma_chan_ld_cleanup(schan, false);
775
776 spin_lock_irqsave(&schan->chan_lock, flags);
777
778 status = dma_cookie_status(chan, cookie, txstate);
779
780 /*
781 * If we don't find cookie on the queue, it has been aborted and we have
782 * to report error
783 */
a8d8d268 784 if (status != DMA_COMPLETE) {
9a7b8e00
GL
785 struct shdma_desc *sdesc;
786 status = DMA_ERROR;
787 list_for_each_entry(sdesc, &schan->ld_queue, node)
788 if (sdesc->cookie == cookie) {
789 status = DMA_IN_PROGRESS;
790 break;
791 }
792 }
793
794 spin_unlock_irqrestore(&schan->chan_lock, flags);
795
796 return status;
797}
798
799/* Called from error IRQ or NMI */
800bool shdma_reset(struct shdma_dev *sdev)
801{
802 const struct shdma_ops *ops = sdev->ops;
803 struct shdma_chan *schan;
804 unsigned int handled = 0;
805 int i;
806
807 /* Reset all channels */
808 shdma_for_each_chan(schan, sdev, i) {
809 struct shdma_desc *sdesc;
810 LIST_HEAD(dl);
811
812 if (!schan)
813 continue;
814
815 spin_lock(&schan->chan_lock);
816
817 /* Stop the channel */
818 ops->halt_channel(schan);
819
820 list_splice_init(&schan->ld_queue, &dl);
821
822 if (!list_empty(&dl)) {
823 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
824 pm_runtime_put(schan->dev);
825 }
826 schan->pm_state = SHDMA_PM_ESTABLISHED;
827
828 spin_unlock(&schan->chan_lock);
829
830 /* Complete all */
831 list_for_each_entry(sdesc, &dl, node) {
832 struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
833 sdesc->mark = DESC_IDLE;
834 if (tx->callback)
835 tx->callback(tx->callback_param);
836 }
837
838 spin_lock(&schan->chan_lock);
839 list_splice(&dl, &schan->ld_free);
840 spin_unlock(&schan->chan_lock);
841
842 handled++;
843 }
844
845 return !!handled;
846}
847EXPORT_SYMBOL(shdma_reset);
848
849static irqreturn_t chan_irq(int irq, void *dev)
850{
851 struct shdma_chan *schan = dev;
852 const struct shdma_ops *ops =
853 to_shdma_dev(schan->dma_chan.device)->ops;
854 irqreturn_t ret;
855
856 spin_lock(&schan->chan_lock);
857
858 ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
859
860 spin_unlock(&schan->chan_lock);
861
862 return ret;
863}
864
865static irqreturn_t chan_irqt(int irq, void *dev)
866{
867 struct shdma_chan *schan = dev;
868 const struct shdma_ops *ops =
869 to_shdma_dev(schan->dma_chan.device)->ops;
870 struct shdma_desc *sdesc;
871
872 spin_lock_irq(&schan->chan_lock);
873 list_for_each_entry(sdesc, &schan->ld_queue, node) {
874 if (sdesc->mark == DESC_SUBMITTED &&
875 ops->desc_completed(schan, sdesc)) {
876 dev_dbg(schan->dev, "done #%d@%p\n",
877 sdesc->async_tx.cookie, &sdesc->async_tx);
878 sdesc->mark = DESC_COMPLETED;
879 break;
880 }
881 }
882 /* Next desc */
883 shdma_chan_xfer_ld_queue(schan);
884 spin_unlock_irq(&schan->chan_lock);
885
886 shdma_chan_ld_cleanup(schan, false);
887
888 return IRQ_HANDLED;
889}
890
891int shdma_request_irq(struct shdma_chan *schan, int irq,
892 unsigned long flags, const char *name)
893{
c1c63a14
GL
894 int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
895 chan_irqt, flags, name, schan);
9a7b8e00
GL
896
897 schan->irq = ret < 0 ? ret : irq;
898
899 return ret;
900}
901EXPORT_SYMBOL(shdma_request_irq);
902
9a7b8e00
GL
903void shdma_chan_probe(struct shdma_dev *sdev,
904 struct shdma_chan *schan, int id)
905{
906 schan->pm_state = SHDMA_PM_ESTABLISHED;
907
908 /* reference struct dma_device */
909 schan->dma_chan.device = &sdev->dma_dev;
910 dma_cookie_init(&schan->dma_chan);
911
912 schan->dev = sdev->dma_dev.dev;
913 schan->id = id;
914
915 if (!schan->max_xfer_len)
916 schan->max_xfer_len = PAGE_SIZE;
917
918 spin_lock_init(&schan->chan_lock);
919
920 /* Init descripter manage list */
921 INIT_LIST_HEAD(&schan->ld_queue);
922 INIT_LIST_HEAD(&schan->ld_free);
923
924 /* Add the channel to DMA device channel list */
925 list_add_tail(&schan->dma_chan.device_node,
926 &sdev->dma_dev.channels);
927 sdev->schan[sdev->dma_dev.chancnt++] = schan;
928}
929EXPORT_SYMBOL(shdma_chan_probe);
930
931void shdma_chan_remove(struct shdma_chan *schan)
932{
933 list_del(&schan->dma_chan.device_node);
934}
935EXPORT_SYMBOL(shdma_chan_remove);
936
937int shdma_init(struct device *dev, struct shdma_dev *sdev,
938 int chan_num)
939{
940 struct dma_device *dma_dev = &sdev->dma_dev;
941
942 /*
943 * Require all call-backs for now, they can trivially be made optional
944 * later as required
945 */
946 if (!sdev->ops ||
947 !sdev->desc_size ||
948 !sdev->ops->embedded_desc ||
949 !sdev->ops->start_xfer ||
950 !sdev->ops->setup_xfer ||
951 !sdev->ops->set_slave ||
952 !sdev->ops->desc_setup ||
953 !sdev->ops->slave_addr ||
954 !sdev->ops->channel_busy ||
955 !sdev->ops->halt_channel ||
956 !sdev->ops->desc_completed)
957 return -EINVAL;
958
959 sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
960 if (!sdev->schan)
961 return -ENOMEM;
962
963 INIT_LIST_HEAD(&dma_dev->channels);
964
965 /* Common and MEMCPY operations */
966 dma_dev->device_alloc_chan_resources
967 = shdma_alloc_chan_resources;
968 dma_dev->device_free_chan_resources = shdma_free_chan_resources;
969 dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
970 dma_dev->device_tx_status = shdma_tx_status;
971 dma_dev->device_issue_pending = shdma_issue_pending;
972
973 /* Compulsory for DMA_SLAVE fields */
974 dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
dfbb85ca 975 dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
9a7b8e00
GL
976 dma_dev->device_control = shdma_control;
977
978 dma_dev->dev = dev;
979
980 return 0;
981}
982EXPORT_SYMBOL(shdma_init);
983
984void shdma_cleanup(struct shdma_dev *sdev)
985{
986 kfree(sdev->schan);
987}
988EXPORT_SYMBOL(shdma_cleanup);
989
990static int __init shdma_enter(void)
991{
992 shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) *
993 sizeof(long), GFP_KERNEL);
994 if (!shdma_slave_used)
995 return -ENOMEM;
996 return 0;
997}
998module_init(shdma_enter);
999
1000static void __exit shdma_exit(void)
1001{
1002 kfree(shdma_slave_used);
1003}
1004module_exit(shdma_exit);
1005
1006MODULE_LICENSE("GPL v2");
1007MODULE_DESCRIPTION("SH-DMA driver base library");
1008MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");