dmaengine: ep93xx: Don't drain the transfers in terminate_all()
[linux-block.git] / drivers / dma / sh / rcar-dmac.c
CommitLineData
87244fe5
LP
1/*
2 * Renesas R-Car Gen2 DMA Controller Driver
3 *
4 * Copyright (C) 2014 Renesas Electronics Inc.
5 *
6 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
7 *
8 * This is free software; you can redistribute it and/or modify
9 * it under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 */
12
ccadee9b 13#include <linux/dma-mapping.h>
87244fe5
LP
14#include <linux/dmaengine.h>
15#include <linux/interrupt.h>
16#include <linux/list.h>
17#include <linux/module.h>
18#include <linux/mutex.h>
19#include <linux/of.h>
20#include <linux/of_dma.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/pm_runtime.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26
27#include "../dmaengine.h"
28
29/*
30 * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
31 * @node: entry in the parent's chunks list
32 * @src_addr: device source address
33 * @dst_addr: device destination address
34 * @size: transfer size in bytes
35 */
36struct rcar_dmac_xfer_chunk {
37 struct list_head node;
38
39 dma_addr_t src_addr;
40 dma_addr_t dst_addr;
41 u32 size;
42};
43
ccadee9b
LP
44/*
45 * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
46 * @sar: value of the SAR register (source address)
47 * @dar: value of the DAR register (destination address)
48 * @tcr: value of the TCR register (transfer count)
49 */
50struct rcar_dmac_hw_desc {
51 u32 sar;
52 u32 dar;
53 u32 tcr;
54 u32 reserved;
55} __attribute__((__packed__));
56
87244fe5
LP
57/*
58 * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
59 * @async_tx: base DMA asynchronous transaction descriptor
60 * @direction: direction of the DMA transfer
61 * @xfer_shift: log2 of the transfer size
62 * @chcr: value of the channel configuration register for this transfer
63 * @node: entry in the channel's descriptors lists
64 * @chunks: list of transfer chunks for this transfer
65 * @running: the transfer chunk being currently processed
ccadee9b 66 * @nchunks: number of transfer chunks for this transfer
1ed1315f 67 * @hwdescs.use: whether the transfer descriptor uses hardware descriptors
ccadee9b
LP
68 * @hwdescs.mem: hardware descriptors memory for the transfer
69 * @hwdescs.dma: device address of the hardware descriptors memory
70 * @hwdescs.size: size of the hardware descriptors in bytes
87244fe5
LP
71 * @size: transfer size in bytes
72 * @cyclic: when set indicates that the DMA transfer is cyclic
73 */
74struct rcar_dmac_desc {
75 struct dma_async_tx_descriptor async_tx;
76 enum dma_transfer_direction direction;
77 unsigned int xfer_shift;
78 u32 chcr;
79
80 struct list_head node;
81 struct list_head chunks;
82 struct rcar_dmac_xfer_chunk *running;
ccadee9b
LP
83 unsigned int nchunks;
84
85 struct {
1ed1315f 86 bool use;
ccadee9b
LP
87 struct rcar_dmac_hw_desc *mem;
88 dma_addr_t dma;
89 size_t size;
90 } hwdescs;
87244fe5
LP
91
92 unsigned int size;
93 bool cyclic;
94};
95
96#define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
97
98/*
99 * struct rcar_dmac_desc_page - One page worth of descriptors
100 * @node: entry in the channel's pages list
101 * @descs: array of DMA descriptors
102 * @chunks: array of transfer chunk descriptors
103 */
104struct rcar_dmac_desc_page {
105 struct list_head node;
106
107 union {
108 struct rcar_dmac_desc descs[0];
109 struct rcar_dmac_xfer_chunk chunks[0];
110 };
111};
112
113#define RCAR_DMAC_DESCS_PER_PAGE \
114 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
115 sizeof(struct rcar_dmac_desc))
116#define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
117 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
118 sizeof(struct rcar_dmac_xfer_chunk))
119
c5ed08e9
NS
120/*
121 * struct rcar_dmac_chan_slave - Slave configuration
122 * @slave_addr: slave memory address
123 * @xfer_size: size (in bytes) of hardware transfers
124 */
125struct rcar_dmac_chan_slave {
126 phys_addr_t slave_addr;
127 unsigned int xfer_size;
128};
129
9f878603
NS
130/*
131 * struct rcar_dmac_chan_map - Map of slave device phys to dma address
132 * @addr: slave dma address
133 * @dir: direction of mapping
134 * @slave: slave configuration that is mapped
135 */
136struct rcar_dmac_chan_map {
137 dma_addr_t addr;
138 enum dma_data_direction dir;
139 struct rcar_dmac_chan_slave slave;
140};
141
87244fe5
LP
142/*
143 * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
144 * @chan: base DMA channel object
145 * @iomem: channel I/O memory base
146 * @index: index of this channel in the controller
c5ed08e9
NS
147 * @src: slave memory address and size on the source side
148 * @dst: slave memory address and size on the destination side
87244fe5
LP
149 * @mid_rid: hardware MID/RID for the DMA client using this channel
150 * @lock: protects the channel CHCR register and the desc members
151 * @desc.free: list of free descriptors
152 * @desc.pending: list of pending descriptors (submitted with tx_submit)
153 * @desc.active: list of active descriptors (activated with issue_pending)
154 * @desc.done: list of completed descriptors
155 * @desc.wait: list of descriptors waiting for an ack
156 * @desc.running: the descriptor being processed (a member of the active list)
157 * @desc.chunks_free: list of free transfer chunk descriptors
158 * @desc.pages: list of pages used by allocated descriptors
159 */
160struct rcar_dmac_chan {
161 struct dma_chan chan;
162 void __iomem *iomem;
163 unsigned int index;
164
c5ed08e9
NS
165 struct rcar_dmac_chan_slave src;
166 struct rcar_dmac_chan_slave dst;
9f878603 167 struct rcar_dmac_chan_map map;
87244fe5
LP
168 int mid_rid;
169
170 spinlock_t lock;
171
172 struct {
173 struct list_head free;
174 struct list_head pending;
175 struct list_head active;
176 struct list_head done;
177 struct list_head wait;
178 struct rcar_dmac_desc *running;
179
180 struct list_head chunks_free;
181
182 struct list_head pages;
183 } desc;
184};
185
186#define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
187
188/*
189 * struct rcar_dmac - R-Car Gen2 DMA Controller
190 * @engine: base DMA engine object
191 * @dev: the hardware device
192 * @iomem: remapped I/O memory base
193 * @n_channels: number of available channels
194 * @channels: array of DMAC channels
195 * @modules: bitmask of client modules in use
196 */
197struct rcar_dmac {
198 struct dma_device engine;
199 struct device *dev;
200 void __iomem *iomem;
201
202 unsigned int n_channels;
203 struct rcar_dmac_chan *channels;
204
08acf38e 205 DECLARE_BITMAP(modules, 256);
87244fe5
LP
206};
207
208#define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
209
210/* -----------------------------------------------------------------------------
211 * Registers
212 */
213
214#define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i))
215
216#define RCAR_DMAISTA 0x0020
217#define RCAR_DMASEC 0x0030
218#define RCAR_DMAOR 0x0060
219#define RCAR_DMAOR_PRI_FIXED (0 << 8)
220#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
221#define RCAR_DMAOR_AE (1 << 2)
222#define RCAR_DMAOR_DME (1 << 0)
223#define RCAR_DMACHCLR 0x0080
224#define RCAR_DMADPSEC 0x00a0
225
226#define RCAR_DMASAR 0x0000
227#define RCAR_DMADAR 0x0004
228#define RCAR_DMATCR 0x0008
229#define RCAR_DMATCR_MASK 0x00ffffff
230#define RCAR_DMATSR 0x0028
231#define RCAR_DMACHCR 0x000c
232#define RCAR_DMACHCR_CAE (1 << 31)
233#define RCAR_DMACHCR_CAIE (1 << 30)
234#define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
235#define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
236#define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
237#define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
238#define RCAR_DMACHCR_RPT_SAR (1 << 27)
239#define RCAR_DMACHCR_RPT_DAR (1 << 26)
240#define RCAR_DMACHCR_RPT_TCR (1 << 25)
241#define RCAR_DMACHCR_DPB (1 << 22)
242#define RCAR_DMACHCR_DSE (1 << 19)
243#define RCAR_DMACHCR_DSIE (1 << 18)
244#define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
245#define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
246#define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
247#define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
248#define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
249#define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
250#define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
251#define RCAR_DMACHCR_DM_FIXED (0 << 14)
252#define RCAR_DMACHCR_DM_INC (1 << 14)
253#define RCAR_DMACHCR_DM_DEC (2 << 14)
254#define RCAR_DMACHCR_SM_FIXED (0 << 12)
255#define RCAR_DMACHCR_SM_INC (1 << 12)
256#define RCAR_DMACHCR_SM_DEC (2 << 12)
257#define RCAR_DMACHCR_RS_AUTO (4 << 8)
258#define RCAR_DMACHCR_RS_DMARS (8 << 8)
259#define RCAR_DMACHCR_IE (1 << 2)
260#define RCAR_DMACHCR_TE (1 << 1)
261#define RCAR_DMACHCR_DE (1 << 0)
262#define RCAR_DMATCRB 0x0018
263#define RCAR_DMATSRB 0x0038
264#define RCAR_DMACHCRB 0x001c
265#define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
ccadee9b
LP
266#define RCAR_DMACHCRB_DPTR_MASK (0xff << 16)
267#define RCAR_DMACHCRB_DPTR_SHIFT 16
87244fe5
LP
268#define RCAR_DMACHCRB_DRST (1 << 15)
269#define RCAR_DMACHCRB_DTS (1 << 8)
270#define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
271#define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
272#define RCAR_DMACHCRB_PRI(n) ((n) << 0)
273#define RCAR_DMARS 0x0040
274#define RCAR_DMABUFCR 0x0048
275#define RCAR_DMABUFCR_MBU(n) ((n) << 16)
276#define RCAR_DMABUFCR_ULB(n) ((n) << 0)
277#define RCAR_DMADPBASE 0x0050
278#define RCAR_DMADPBASE_MASK 0xfffffff0
279#define RCAR_DMADPBASE_SEL (1 << 0)
280#define RCAR_DMADPCR 0x0054
281#define RCAR_DMADPCR_DIPT(n) ((n) << 24)
282#define RCAR_DMAFIXSAR 0x0010
283#define RCAR_DMAFIXDAR 0x0014
284#define RCAR_DMAFIXDPBASE 0x0060
285
286/* Hardcode the MEMCPY transfer size to 4 bytes. */
287#define RCAR_DMAC_MEMCPY_XFER_SIZE 4
288
289/* -----------------------------------------------------------------------------
290 * Device access
291 */
292
293static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
294{
295 if (reg == RCAR_DMAOR)
296 writew(data, dmac->iomem + reg);
297 else
298 writel(data, dmac->iomem + reg);
299}
300
301static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
302{
303 if (reg == RCAR_DMAOR)
304 return readw(dmac->iomem + reg);
305 else
306 return readl(dmac->iomem + reg);
307}
308
309static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
310{
311 if (reg == RCAR_DMARS)
312 return readw(chan->iomem + reg);
313 else
314 return readl(chan->iomem + reg);
315}
316
317static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
318{
319 if (reg == RCAR_DMARS)
320 writew(data, chan->iomem + reg);
321 else
322 writel(data, chan->iomem + reg);
323}
324
325/* -----------------------------------------------------------------------------
326 * Initialization and configuration
327 */
328
329static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
330{
331 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
332
0f78e3b5 333 return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE));
87244fe5
LP
334}
335
336static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
337{
338 struct rcar_dmac_desc *desc = chan->desc.running;
ccadee9b 339 u32 chcr = desc->chcr;
87244fe5
LP
340
341 WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
342
ccadee9b
LP
343 if (chan->mid_rid >= 0)
344 rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
345
1ed1315f 346 if (desc->hwdescs.use) {
1175f83c
KM
347 struct rcar_dmac_xfer_chunk *chunk =
348 list_first_entry(&desc->chunks,
349 struct rcar_dmac_xfer_chunk, node);
3f463061 350
ccadee9b
LP
351 dev_dbg(chan->chan.device->dev,
352 "chan%u: queue desc %p: %u@%pad\n",
353 chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
354
87244fe5 355#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1175f83c
KM
356 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
357 chunk->src_addr >> 32);
358 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
359 chunk->dst_addr >> 32);
ccadee9b
LP
360 rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
361 desc->hwdescs.dma >> 32);
87244fe5 362#endif
ccadee9b
LP
363 rcar_dmac_chan_write(chan, RCAR_DMADPBASE,
364 (desc->hwdescs.dma & 0xfffffff0) |
365 RCAR_DMADPBASE_SEL);
366 rcar_dmac_chan_write(chan, RCAR_DMACHCRB,
367 RCAR_DMACHCRB_DCNT(desc->nchunks - 1) |
368 RCAR_DMACHCRB_DRST);
87244fe5 369
3f463061
LP
370 /*
371 * Errata: When descriptor memory is accessed through an IOMMU
372 * the DMADAR register isn't initialized automatically from the
373 * first descriptor at beginning of transfer by the DMAC like it
374 * should. Initialize it manually with the destination address
375 * of the first chunk.
376 */
3f463061
LP
377 rcar_dmac_chan_write(chan, RCAR_DMADAR,
378 chunk->dst_addr & 0xffffffff);
379
ccadee9b
LP
380 /*
381 * Program the descriptor stage interrupt to occur after the end
382 * of the first stage.
383 */
384 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1));
385
386 chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR
387 | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB;
388
389 /*
390 * If the descriptor isn't cyclic enable normal descriptor mode
391 * and the transfer completion interrupt.
392 */
393 if (!desc->cyclic)
394 chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE;
395 /*
396 * If the descriptor is cyclic and has a callback enable the
397 * descriptor stage interrupt in infinite repeat mode.
398 */
399 else if (desc->async_tx.callback)
400 chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE;
401 /*
402 * Otherwise just select infinite repeat mode without any
403 * interrupt.
404 */
405 else
406 chcr |= RCAR_DMACHCR_DPM_INFINITE;
407 } else {
408 struct rcar_dmac_xfer_chunk *chunk = desc->running;
87244fe5 409
ccadee9b
LP
410 dev_dbg(chan->chan.device->dev,
411 "chan%u: queue chunk %p: %u@%pad -> %pad\n",
412 chan->index, chunk, chunk->size, &chunk->src_addr,
413 &chunk->dst_addr);
87244fe5 414
ccadee9b
LP
415#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
416 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
417 chunk->src_addr >> 32);
418 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
419 chunk->dst_addr >> 32);
420#endif
421 rcar_dmac_chan_write(chan, RCAR_DMASAR,
422 chunk->src_addr & 0xffffffff);
423 rcar_dmac_chan_write(chan, RCAR_DMADAR,
424 chunk->dst_addr & 0xffffffff);
425 rcar_dmac_chan_write(chan, RCAR_DMATCR,
426 chunk->size >> desc->xfer_shift);
427
428 chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
429 }
430
431 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE);
87244fe5
LP
432}
433
434static int rcar_dmac_init(struct rcar_dmac *dmac)
435{
436 u16 dmaor;
437
438 /* Clear all channels and enable the DMAC globally. */
20c169ac 439 rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0));
87244fe5
LP
440 rcar_dmac_write(dmac, RCAR_DMAOR,
441 RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
442
443 dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
444 if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
445 dev_warn(dmac->dev, "DMAOR initialization failed.\n");
446 return -EIO;
447 }
448
449 return 0;
450}
451
452/* -----------------------------------------------------------------------------
453 * Descriptors submission
454 */
455
456static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
457{
458 struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
459 struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
460 unsigned long flags;
461 dma_cookie_t cookie;
462
463 spin_lock_irqsave(&chan->lock, flags);
464
465 cookie = dma_cookie_assign(tx);
466
467 dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
468 chan->index, tx->cookie, desc);
469
470 list_add_tail(&desc->node, &chan->desc.pending);
471 desc->running = list_first_entry(&desc->chunks,
472 struct rcar_dmac_xfer_chunk, node);
473
474 spin_unlock_irqrestore(&chan->lock, flags);
475
476 return cookie;
477}
478
479/* -----------------------------------------------------------------------------
480 * Descriptors allocation and free
481 */
482
483/*
484 * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
485 * @chan: the DMA channel
486 * @gfp: allocation flags
487 */
488static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
489{
490 struct rcar_dmac_desc_page *page;
d23c9a0a 491 unsigned long flags;
87244fe5
LP
492 LIST_HEAD(list);
493 unsigned int i;
494
495 page = (void *)get_zeroed_page(gfp);
496 if (!page)
497 return -ENOMEM;
498
499 for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
500 struct rcar_dmac_desc *desc = &page->descs[i];
501
502 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
503 desc->async_tx.tx_submit = rcar_dmac_tx_submit;
504 INIT_LIST_HEAD(&desc->chunks);
505
506 list_add_tail(&desc->node, &list);
507 }
508
d23c9a0a 509 spin_lock_irqsave(&chan->lock, flags);
87244fe5
LP
510 list_splice_tail(&list, &chan->desc.free);
511 list_add_tail(&page->node, &chan->desc.pages);
d23c9a0a 512 spin_unlock_irqrestore(&chan->lock, flags);
87244fe5
LP
513
514 return 0;
515}
516
517/*
518 * rcar_dmac_desc_put - Release a DMA transfer descriptor
519 * @chan: the DMA channel
520 * @desc: the descriptor
521 *
522 * Put the descriptor and its transfer chunk descriptors back in the channel's
1ed1315f
LP
523 * free descriptors lists. The descriptor's chunks list will be reinitialized to
524 * an empty list as a result.
87244fe5 525 *
ccadee9b
LP
526 * The descriptor must have been removed from the channel's lists before calling
527 * this function.
87244fe5
LP
528 */
529static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
530 struct rcar_dmac_desc *desc)
531{
f3915072
LP
532 unsigned long flags;
533
534 spin_lock_irqsave(&chan->lock, flags);
87244fe5 535 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
3565fe53 536 list_add(&desc->node, &chan->desc.free);
f3915072 537 spin_unlock_irqrestore(&chan->lock, flags);
87244fe5
LP
538}
539
540static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
541{
542 struct rcar_dmac_desc *desc, *_desc;
d23c9a0a 543 unsigned long flags;
ccadee9b 544 LIST_HEAD(list);
87244fe5 545
ccadee9b
LP
546 /*
547 * We have to temporarily move all descriptors from the wait list to a
548 * local list as iterating over the wait list, even with
549 * list_for_each_entry_safe, isn't safe if we release the channel lock
550 * around the rcar_dmac_desc_put() call.
551 */
d23c9a0a 552 spin_lock_irqsave(&chan->lock, flags);
ccadee9b 553 list_splice_init(&chan->desc.wait, &list);
d23c9a0a 554 spin_unlock_irqrestore(&chan->lock, flags);
ccadee9b
LP
555
556 list_for_each_entry_safe(desc, _desc, &list, node) {
87244fe5
LP
557 if (async_tx_test_ack(&desc->async_tx)) {
558 list_del(&desc->node);
559 rcar_dmac_desc_put(chan, desc);
560 }
561 }
ccadee9b
LP
562
563 if (list_empty(&list))
564 return;
565
566 /* Put the remaining descriptors back in the wait list. */
d23c9a0a 567 spin_lock_irqsave(&chan->lock, flags);
ccadee9b 568 list_splice(&list, &chan->desc.wait);
d23c9a0a 569 spin_unlock_irqrestore(&chan->lock, flags);
87244fe5
LP
570}
571
572/*
573 * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
574 * @chan: the DMA channel
575 *
576 * Locking: This function must be called in a non-atomic context.
577 *
578 * Return: A pointer to the allocated descriptor or NULL if no descriptor can
579 * be allocated.
580 */
581static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
582{
583 struct rcar_dmac_desc *desc;
d23c9a0a 584 unsigned long flags;
87244fe5
LP
585 int ret;
586
87244fe5
LP
587 /* Recycle acked descriptors before attempting allocation. */
588 rcar_dmac_desc_recycle_acked(chan);
589
d23c9a0a 590 spin_lock_irqsave(&chan->lock, flags);
ccadee9b 591
a55e07c8
LP
592 while (list_empty(&chan->desc.free)) {
593 /*
594 * No free descriptors, allocate a page worth of them and try
595 * again, as someone else could race us to get the newly
596 * allocated descriptors. If the allocation fails return an
597 * error.
598 */
d23c9a0a 599 spin_unlock_irqrestore(&chan->lock, flags);
a55e07c8
LP
600 ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
601 if (ret < 0)
602 return NULL;
d23c9a0a 603 spin_lock_irqsave(&chan->lock, flags);
a55e07c8 604 }
87244fe5 605
a55e07c8
LP
606 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
607 list_del(&desc->node);
87244fe5 608
d23c9a0a 609 spin_unlock_irqrestore(&chan->lock, flags);
87244fe5
LP
610
611 return desc;
612}
613
614/*
615 * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
616 * @chan: the DMA channel
617 * @gfp: allocation flags
618 */
619static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
620{
621 struct rcar_dmac_desc_page *page;
d23c9a0a 622 unsigned long flags;
87244fe5
LP
623 LIST_HEAD(list);
624 unsigned int i;
625
626 page = (void *)get_zeroed_page(gfp);
627 if (!page)
628 return -ENOMEM;
629
630 for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
631 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
632
633 list_add_tail(&chunk->node, &list);
634 }
635
d23c9a0a 636 spin_lock_irqsave(&chan->lock, flags);
87244fe5
LP
637 list_splice_tail(&list, &chan->desc.chunks_free);
638 list_add_tail(&page->node, &chan->desc.pages);
d23c9a0a 639 spin_unlock_irqrestore(&chan->lock, flags);
87244fe5
LP
640
641 return 0;
642}
643
644/*
645 * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
646 * @chan: the DMA channel
647 *
648 * Locking: This function must be called in a non-atomic context.
649 *
650 * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
651 * descriptor can be allocated.
652 */
653static struct rcar_dmac_xfer_chunk *
654rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
655{
656 struct rcar_dmac_xfer_chunk *chunk;
d23c9a0a 657 unsigned long flags;
87244fe5
LP
658 int ret;
659
d23c9a0a 660 spin_lock_irqsave(&chan->lock, flags);
87244fe5 661
a55e07c8
LP
662 while (list_empty(&chan->desc.chunks_free)) {
663 /*
664 * No free descriptors, allocate a page worth of them and try
665 * again, as someone else could race us to get the newly
666 * allocated descriptors. If the allocation fails return an
667 * error.
668 */
d23c9a0a 669 spin_unlock_irqrestore(&chan->lock, flags);
a55e07c8
LP
670 ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
671 if (ret < 0)
672 return NULL;
d23c9a0a 673 spin_lock_irqsave(&chan->lock, flags);
a55e07c8 674 }
87244fe5 675
a55e07c8
LP
676 chunk = list_first_entry(&chan->desc.chunks_free,
677 struct rcar_dmac_xfer_chunk, node);
678 list_del(&chunk->node);
87244fe5 679
d23c9a0a 680 spin_unlock_irqrestore(&chan->lock, flags);
87244fe5
LP
681
682 return chunk;
683}
684
1ed1315f
LP
685static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan,
686 struct rcar_dmac_desc *desc, size_t size)
687{
688 /*
689 * dma_alloc_coherent() allocates memory in page size increments. To
690 * avoid reallocating the hardware descriptors when the allocated size
691 * wouldn't change align the requested size to a multiple of the page
692 * size.
693 */
694 size = PAGE_ALIGN(size);
695
696 if (desc->hwdescs.size == size)
697 return;
698
699 if (desc->hwdescs.mem) {
6a634808
LP
700 dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size,
701 desc->hwdescs.mem, desc->hwdescs.dma);
1ed1315f
LP
702 desc->hwdescs.mem = NULL;
703 desc->hwdescs.size = 0;
704 }
705
706 if (!size)
707 return;
708
6a634808
LP
709 desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size,
710 &desc->hwdescs.dma, GFP_NOWAIT);
1ed1315f
LP
711 if (!desc->hwdescs.mem)
712 return;
713
714 desc->hwdescs.size = size;
715}
716
ee4b876b
JB
717static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
718 struct rcar_dmac_desc *desc)
ccadee9b
LP
719{
720 struct rcar_dmac_xfer_chunk *chunk;
721 struct rcar_dmac_hw_desc *hwdesc;
ccadee9b 722
1ed1315f
LP
723 rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc));
724
725 hwdesc = desc->hwdescs.mem;
ccadee9b 726 if (!hwdesc)
ee4b876b 727 return -ENOMEM;
ccadee9b 728
ccadee9b
LP
729 list_for_each_entry(chunk, &desc->chunks, node) {
730 hwdesc->sar = chunk->src_addr;
731 hwdesc->dar = chunk->dst_addr;
732 hwdesc->tcr = chunk->size >> desc->xfer_shift;
733 hwdesc++;
734 }
ee4b876b
JB
735
736 return 0;
ccadee9b
LP
737}
738
87244fe5
LP
739/* -----------------------------------------------------------------------------
740 * Stop and reset
741 */
742
743static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
744{
745 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
746
ccadee9b
LP
747 chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
748 RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
87244fe5
LP
749 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
750}
751
752static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
753{
754 struct rcar_dmac_desc *desc, *_desc;
755 unsigned long flags;
756 LIST_HEAD(descs);
757
758 spin_lock_irqsave(&chan->lock, flags);
759
760 /* Move all non-free descriptors to the local lists. */
761 list_splice_init(&chan->desc.pending, &descs);
762 list_splice_init(&chan->desc.active, &descs);
763 list_splice_init(&chan->desc.done, &descs);
764 list_splice_init(&chan->desc.wait, &descs);
765
766 chan->desc.running = NULL;
767
768 spin_unlock_irqrestore(&chan->lock, flags);
769
770 list_for_each_entry_safe(desc, _desc, &descs, node) {
771 list_del(&desc->node);
772 rcar_dmac_desc_put(chan, desc);
773 }
774}
775
776static void rcar_dmac_stop(struct rcar_dmac *dmac)
777{
778 rcar_dmac_write(dmac, RCAR_DMAOR, 0);
779}
780
781static void rcar_dmac_abort(struct rcar_dmac *dmac)
782{
783 unsigned int i;
784
785 /* Stop all channels. */
786 for (i = 0; i < dmac->n_channels; ++i) {
787 struct rcar_dmac_chan *chan = &dmac->channels[i];
788
789 /* Stop and reinitialize the channel. */
790 spin_lock(&chan->lock);
791 rcar_dmac_chan_halt(chan);
792 spin_unlock(&chan->lock);
793
794 rcar_dmac_chan_reinit(chan);
795 }
796}
797
798/* -----------------------------------------------------------------------------
799 * Descriptors preparation
800 */
801
802static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
803 struct rcar_dmac_desc *desc)
804{
805 static const u32 chcr_ts[] = {
806 RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
807 RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
808 RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
809 RCAR_DMACHCR_TS_64B,
810 };
811
812 unsigned int xfer_size;
813 u32 chcr;
814
815 switch (desc->direction) {
816 case DMA_DEV_TO_MEM:
817 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
818 | RCAR_DMACHCR_RS_DMARS;
c5ed08e9 819 xfer_size = chan->src.xfer_size;
87244fe5
LP
820 break;
821
822 case DMA_MEM_TO_DEV:
823 chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
824 | RCAR_DMACHCR_RS_DMARS;
c5ed08e9 825 xfer_size = chan->dst.xfer_size;
87244fe5
LP
826 break;
827
828 case DMA_MEM_TO_MEM:
829 default:
830 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
831 | RCAR_DMACHCR_RS_AUTO;
832 xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
833 break;
834 }
835
836 desc->xfer_shift = ilog2(xfer_size);
837 desc->chcr = chcr | chcr_ts[desc->xfer_shift];
838}
839
840/*
841 * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
842 *
843 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
844 * converted to scatter-gather to guarantee consistent locking and a correct
845 * list manipulation. For slave DMA direction carries the usual meaning, and,
846 * logically, the SG list is RAM and the addr variable contains slave address,
847 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
848 * and the SG list contains only one element and points at the source buffer.
849 */
850static struct dma_async_tx_descriptor *
851rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
852 unsigned int sg_len, dma_addr_t dev_addr,
853 enum dma_transfer_direction dir, unsigned long dma_flags,
854 bool cyclic)
855{
856 struct rcar_dmac_xfer_chunk *chunk;
857 struct rcar_dmac_desc *desc;
858 struct scatterlist *sg;
ccadee9b 859 unsigned int nchunks = 0;
87244fe5
LP
860 unsigned int max_chunk_size;
861 unsigned int full_size = 0;
1175f83c 862 bool cross_boundary = false;
87244fe5 863 unsigned int i;
1175f83c
KM
864#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
865 u32 high_dev_addr;
866 u32 high_mem_addr;
867#endif
87244fe5
LP
868
869 desc = rcar_dmac_desc_get(chan);
870 if (!desc)
871 return NULL;
872
873 desc->async_tx.flags = dma_flags;
874 desc->async_tx.cookie = -EBUSY;
875
876 desc->cyclic = cyclic;
877 desc->direction = dir;
878
879 rcar_dmac_chan_configure_desc(chan, desc);
880
881 max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
882
883 /*
884 * Allocate and fill the transfer chunk descriptors. We own the only
885 * reference to the DMA descriptor, there's no need for locking.
886 */
887 for_each_sg(sgl, sg, sg_len, i) {
888 dma_addr_t mem_addr = sg_dma_address(sg);
889 unsigned int len = sg_dma_len(sg);
890
891 full_size += len;
892
1175f83c
KM
893#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
894 if (i == 0) {
895 high_dev_addr = dev_addr >> 32;
896 high_mem_addr = mem_addr >> 32;
897 }
898
899 if ((dev_addr >> 32 != high_dev_addr) ||
900 (mem_addr >> 32 != high_mem_addr))
901 cross_boundary = true;
902#endif
87244fe5
LP
903 while (len) {
904 unsigned int size = min(len, max_chunk_size);
905
906#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
907 /*
908 * Prevent individual transfers from crossing 4GB
909 * boundaries.
910 */
1175f83c 911 if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) {
87244fe5 912 size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
1175f83c
KM
913 cross_boundary = true;
914 }
915 if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) {
87244fe5 916 size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
1175f83c
KM
917 cross_boundary = true;
918 }
87244fe5
LP
919#endif
920
921 chunk = rcar_dmac_xfer_chunk_get(chan);
922 if (!chunk) {
923 rcar_dmac_desc_put(chan, desc);
924 return NULL;
925 }
926
927 if (dir == DMA_DEV_TO_MEM) {
928 chunk->src_addr = dev_addr;
929 chunk->dst_addr = mem_addr;
930 } else {
931 chunk->src_addr = mem_addr;
932 chunk->dst_addr = dev_addr;
933 }
934
935 chunk->size = size;
936
937 dev_dbg(chan->chan.device->dev,
938 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
939 chan->index, chunk, desc, i, sg, size, len,
940 &chunk->src_addr, &chunk->dst_addr);
941
942 mem_addr += size;
943 if (dir == DMA_MEM_TO_MEM)
944 dev_addr += size;
945
946 len -= size;
947
948 list_add_tail(&chunk->node, &desc->chunks);
ccadee9b 949 nchunks++;
87244fe5
LP
950 }
951 }
952
ccadee9b 953 desc->nchunks = nchunks;
87244fe5
LP
954 desc->size = full_size;
955
ccadee9b
LP
956 /*
957 * Use hardware descriptor lists if possible when more than one chunk
958 * needs to be transferred (otherwise they don't make much sense).
959 *
1175f83c
KM
960 * Source/Destination address should be located in same 4GiB region
961 * in the 40bit address space when it uses Hardware descriptor,
962 * and cross_boundary is checking it.
ccadee9b 963 */
1175f83c 964 desc->hwdescs.use = !cross_boundary && nchunks > 1;
ee4b876b
JB
965 if (desc->hwdescs.use) {
966 if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
967 desc->hwdescs.use = false;
968 }
ccadee9b 969
87244fe5
LP
970 return &desc->async_tx;
971}
972
973/* -----------------------------------------------------------------------------
974 * DMA engine operations
975 */
976
977static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
978{
979 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
980 int ret;
981
87244fe5
LP
982 INIT_LIST_HEAD(&rchan->desc.chunks_free);
983 INIT_LIST_HEAD(&rchan->desc.pages);
984
985 /* Preallocate descriptors. */
986 ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
987 if (ret < 0)
988 return -ENOMEM;
989
990 ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
991 if (ret < 0)
992 return -ENOMEM;
993
994 return pm_runtime_get_sync(chan->device->dev);
995}
996
997static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
998{
999 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1000 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
3139dc8d 1001 struct rcar_dmac_chan_map *map = &rchan->map;
87244fe5 1002 struct rcar_dmac_desc_page *page, *_page;
1ed1315f
LP
1003 struct rcar_dmac_desc *desc;
1004 LIST_HEAD(list);
87244fe5
LP
1005
1006 /* Protect against ISR */
1007 spin_lock_irq(&rchan->lock);
1008 rcar_dmac_chan_halt(rchan);
1009 spin_unlock_irq(&rchan->lock);
1010
1011 /* Now no new interrupts will occur */
1012
1013 if (rchan->mid_rid >= 0) {
1014 /* The caller is holding dma_list_mutex */
1015 clear_bit(rchan->mid_rid, dmac->modules);
1016 rchan->mid_rid = -EINVAL;
1017 }
1018
f7638c90
LP
1019 list_splice_init(&rchan->desc.free, &list);
1020 list_splice_init(&rchan->desc.pending, &list);
1021 list_splice_init(&rchan->desc.active, &list);
1022 list_splice_init(&rchan->desc.done, &list);
1023 list_splice_init(&rchan->desc.wait, &list);
1ed1315f 1024
48c73659
MHF
1025 rchan->desc.running = NULL;
1026
1ed1315f
LP
1027 list_for_each_entry(desc, &list, node)
1028 rcar_dmac_realloc_hwdesc(rchan, desc, 0);
1029
87244fe5
LP
1030 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
1031 list_del(&page->node);
1032 free_page((unsigned long)page);
1033 }
1034
3139dc8d
NS
1035 /* Remove slave mapping if present. */
1036 if (map->slave.xfer_size) {
1037 dma_unmap_resource(chan->device->dev, map->addr,
1038 map->slave.xfer_size, map->dir, 0);
1039 map->slave.xfer_size = 0;
1040 }
1041
87244fe5
LP
1042 pm_runtime_put(chan->device->dev);
1043}
1044
1045static struct dma_async_tx_descriptor *
1046rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
1047 dma_addr_t dma_src, size_t len, unsigned long flags)
1048{
1049 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1050 struct scatterlist sgl;
1051
1052 if (!len)
1053 return NULL;
1054
1055 sg_init_table(&sgl, 1);
1056 sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
1057 offset_in_page(dma_src));
1058 sg_dma_address(&sgl) = dma_src;
1059 sg_dma_len(&sgl) = len;
1060
1061 return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
1062 DMA_MEM_TO_MEM, flags, false);
1063}
1064
9f878603
NS
1065static int rcar_dmac_map_slave_addr(struct dma_chan *chan,
1066 enum dma_transfer_direction dir)
1067{
1068 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1069 struct rcar_dmac_chan_map *map = &rchan->map;
1070 phys_addr_t dev_addr;
1071 size_t dev_size;
1072 enum dma_data_direction dev_dir;
1073
1074 if (dir == DMA_DEV_TO_MEM) {
1075 dev_addr = rchan->src.slave_addr;
1076 dev_size = rchan->src.xfer_size;
1077 dev_dir = DMA_TO_DEVICE;
1078 } else {
1079 dev_addr = rchan->dst.slave_addr;
1080 dev_size = rchan->dst.xfer_size;
1081 dev_dir = DMA_FROM_DEVICE;
1082 }
1083
1084 /* Reuse current map if possible. */
1085 if (dev_addr == map->slave.slave_addr &&
1086 dev_size == map->slave.xfer_size &&
1087 dev_dir == map->dir)
1088 return 0;
1089
1090 /* Remove old mapping if present. */
1091 if (map->slave.xfer_size)
1092 dma_unmap_resource(chan->device->dev, map->addr,
1093 map->slave.xfer_size, map->dir, 0);
1094 map->slave.xfer_size = 0;
1095
1096 /* Create new slave address map. */
1097 map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size,
1098 dev_dir, 0);
1099
1100 if (dma_mapping_error(chan->device->dev, map->addr)) {
1101 dev_err(chan->device->dev,
1102 "chan%u: failed to map %zx@%pap", rchan->index,
1103 dev_size, &dev_addr);
1104 return -EIO;
1105 }
1106
1107 dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n",
1108 rchan->index, dev_size, &dev_addr, &map->addr,
1109 dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE");
1110
1111 map->slave.slave_addr = dev_addr;
1112 map->slave.xfer_size = dev_size;
1113 map->dir = dev_dir;
1114
1115 return 0;
1116}
1117
87244fe5
LP
1118static struct dma_async_tx_descriptor *
1119rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1120 unsigned int sg_len, enum dma_transfer_direction dir,
1121 unsigned long flags, void *context)
1122{
1123 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
87244fe5
LP
1124
1125 /* Someone calling slave DMA on a generic channel? */
1126 if (rchan->mid_rid < 0 || !sg_len) {
1127 dev_warn(chan->device->dev,
1128 "%s: bad parameter: len=%d, id=%d\n",
1129 __func__, sg_len, rchan->mid_rid);
1130 return NULL;
1131 }
1132
9f878603
NS
1133 if (rcar_dmac_map_slave_addr(chan, dir))
1134 return NULL;
1135
1136 return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
87244fe5
LP
1137 dir, flags, false);
1138}
1139
1140#define RCAR_DMAC_MAX_SG_LEN 32
1141
1142static struct dma_async_tx_descriptor *
1143rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
1144 size_t buf_len, size_t period_len,
1145 enum dma_transfer_direction dir, unsigned long flags)
1146{
1147 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1148 struct dma_async_tx_descriptor *desc;
1149 struct scatterlist *sgl;
87244fe5
LP
1150 unsigned int sg_len;
1151 unsigned int i;
1152
1153 /* Someone calling slave DMA on a generic channel? */
1154 if (rchan->mid_rid < 0 || buf_len < period_len) {
1155 dev_warn(chan->device->dev,
1156 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
1157 __func__, buf_len, period_len, rchan->mid_rid);
1158 return NULL;
1159 }
1160
9f878603
NS
1161 if (rcar_dmac_map_slave_addr(chan, dir))
1162 return NULL;
1163
87244fe5
LP
1164 sg_len = buf_len / period_len;
1165 if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
1166 dev_err(chan->device->dev,
1167 "chan%u: sg length %d exceds limit %d",
1168 rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
1169 return NULL;
1170 }
1171
1172 /*
1173 * Allocate the sg list dynamically as it would consume too much stack
1174 * space.
1175 */
1176 sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
1177 if (!sgl)
1178 return NULL;
1179
1180 sg_init_table(sgl, sg_len);
1181
1182 for (i = 0; i < sg_len; ++i) {
1183 dma_addr_t src = buf_addr + (period_len * i);
1184
1185 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
1186 offset_in_page(src));
1187 sg_dma_address(&sgl[i]) = src;
1188 sg_dma_len(&sgl[i]) = period_len;
1189 }
1190
9f878603 1191 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
87244fe5
LP
1192 dir, flags, true);
1193
1194 kfree(sgl);
1195 return desc;
1196}
1197
1198static int rcar_dmac_device_config(struct dma_chan *chan,
1199 struct dma_slave_config *cfg)
1200{
1201 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1202
1203 /*
1204 * We could lock this, but you shouldn't be configuring the
1205 * channel, while using it...
1206 */
c5ed08e9
NS
1207 rchan->src.slave_addr = cfg->src_addr;
1208 rchan->dst.slave_addr = cfg->dst_addr;
1209 rchan->src.xfer_size = cfg->src_addr_width;
1210 rchan->dst.xfer_size = cfg->dst_addr_width;
87244fe5
LP
1211
1212 return 0;
1213}
1214
1215static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
1216{
1217 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1218 unsigned long flags;
1219
1220 spin_lock_irqsave(&rchan->lock, flags);
1221 rcar_dmac_chan_halt(rchan);
1222 spin_unlock_irqrestore(&rchan->lock, flags);
1223
1224 /*
1225 * FIXME: No new interrupt can occur now, but the IRQ thread might still
1226 * be running.
1227 */
1228
1229 rcar_dmac_chan_reinit(rchan);
1230
1231 return 0;
1232}
1233
1234static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1235 dma_cookie_t cookie)
1236{
1237 struct rcar_dmac_desc *desc = chan->desc.running;
ccadee9b 1238 struct rcar_dmac_xfer_chunk *running = NULL;
87244fe5 1239 struct rcar_dmac_xfer_chunk *chunk;
55bd582b 1240 enum dma_status status;
87244fe5 1241 unsigned int residue = 0;
ccadee9b 1242 unsigned int dptr = 0;
87244fe5
LP
1243
1244 if (!desc)
1245 return 0;
1246
55bd582b
LP
1247 /*
1248 * If the cookie corresponds to a descriptor that has been completed
1249 * there is no residue. The same check has already been performed by the
1250 * caller but without holding the channel lock, so the descriptor could
1251 * now be complete.
1252 */
1253 status = dma_cookie_status(&chan->chan, cookie, NULL);
1254 if (status == DMA_COMPLETE)
1255 return 0;
1256
87244fe5
LP
1257 /*
1258 * If the cookie doesn't correspond to the currently running transfer
1259 * then the descriptor hasn't been processed yet, and the residue is
1260 * equal to the full descriptor size.
1261 */
55bd582b
LP
1262 if (cookie != desc->async_tx.cookie) {
1263 list_for_each_entry(desc, &chan->desc.pending, node) {
1264 if (cookie == desc->async_tx.cookie)
1265 return desc->size;
1266 }
1267 list_for_each_entry(desc, &chan->desc.active, node) {
1268 if (cookie == desc->async_tx.cookie)
1269 return desc->size;
1270 }
1271
1272 /*
1273 * No descriptor found for the cookie, there's thus no residue.
1274 * This shouldn't happen if the calling driver passes a correct
1275 * cookie value.
1276 */
1277 WARN(1, "No descriptor for cookie!");
1278 return 0;
1279 }
87244fe5 1280
ccadee9b
LP
1281 /*
1282 * In descriptor mode the descriptor running pointer is not maintained
1283 * by the interrupt handler, find the running descriptor from the
1284 * descriptor pointer field in the CHCRB register. In non-descriptor
1285 * mode just use the running descriptor pointer.
1286 */
1ed1315f 1287 if (desc->hwdescs.use) {
ccadee9b
LP
1288 dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1289 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1290 WARN_ON(dptr >= desc->nchunks);
1291 } else {
1292 running = desc->running;
1293 }
1294
87244fe5
LP
1295 /* Compute the size of all chunks still to be transferred. */
1296 list_for_each_entry_reverse(chunk, &desc->chunks, node) {
ccadee9b 1297 if (chunk == running || ++dptr == desc->nchunks)
87244fe5
LP
1298 break;
1299
1300 residue += chunk->size;
1301 }
1302
1303 /* Add the residue for the current chunk. */
1304 residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift;
1305
1306 return residue;
1307}
1308
1309static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
1310 dma_cookie_t cookie,
1311 struct dma_tx_state *txstate)
1312{
1313 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1314 enum dma_status status;
1315 unsigned long flags;
1316 unsigned int residue;
1317
1318 status = dma_cookie_status(chan, cookie, txstate);
1319 if (status == DMA_COMPLETE || !txstate)
1320 return status;
1321
1322 spin_lock_irqsave(&rchan->lock, flags);
1323 residue = rcar_dmac_chan_get_residue(rchan, cookie);
1324 spin_unlock_irqrestore(&rchan->lock, flags);
1325
3544d287
MHF
1326 /* if there's no residue, the cookie is complete */
1327 if (!residue)
1328 return DMA_COMPLETE;
1329
87244fe5
LP
1330 dma_set_residue(txstate, residue);
1331
1332 return status;
1333}
1334
1335static void rcar_dmac_issue_pending(struct dma_chan *chan)
1336{
1337 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1338 unsigned long flags;
1339
1340 spin_lock_irqsave(&rchan->lock, flags);
1341
1342 if (list_empty(&rchan->desc.pending))
1343 goto done;
1344
1345 /* Append the pending list to the active list. */
1346 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
1347
1348 /*
1349 * If no transfer is running pick the first descriptor from the active
1350 * list and start the transfer.
1351 */
1352 if (!rchan->desc.running) {
1353 struct rcar_dmac_desc *desc;
1354
1355 desc = list_first_entry(&rchan->desc.active,
1356 struct rcar_dmac_desc, node);
1357 rchan->desc.running = desc;
1358
1359 rcar_dmac_chan_start_xfer(rchan);
1360 }
1361
1362done:
1363 spin_unlock_irqrestore(&rchan->lock, flags);
1364}
1365
1366/* -----------------------------------------------------------------------------
1367 * IRQ handling
1368 */
1369
ccadee9b
LP
1370static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan)
1371{
1372 struct rcar_dmac_desc *desc = chan->desc.running;
1373 unsigned int stage;
1374
1375 if (WARN_ON(!desc || !desc->cyclic)) {
1376 /*
1377 * This should never happen, there should always be a running
1378 * cyclic descriptor when a descriptor stage end interrupt is
1379 * triggered. Warn and return.
1380 */
1381 return IRQ_NONE;
1382 }
1383
1384 /* Program the interrupt pointer to the next stage. */
1385 stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1386 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1387 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage));
1388
1389 return IRQ_WAKE_THREAD;
1390}
1391
87244fe5
LP
1392static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
1393{
1394 struct rcar_dmac_desc *desc = chan->desc.running;
87244fe5
LP
1395 irqreturn_t ret = IRQ_WAKE_THREAD;
1396
1397 if (WARN_ON_ONCE(!desc)) {
1398 /*
ccadee9b
LP
1399 * This should never happen, there should always be a running
1400 * descriptor when a transfer end interrupt is triggered. Warn
1401 * and return.
87244fe5
LP
1402 */
1403 return IRQ_NONE;
1404 }
1405
1406 /*
ccadee9b
LP
1407 * The transfer end interrupt isn't generated for each chunk when using
1408 * descriptor mode. Only update the running chunk pointer in
1409 * non-descriptor mode.
87244fe5 1410 */
1ed1315f 1411 if (!desc->hwdescs.use) {
ccadee9b
LP
1412 /*
1413 * If we haven't completed the last transfer chunk simply move
1414 * to the next one. Only wake the IRQ thread if the transfer is
1415 * cyclic.
1416 */
1417 if (!list_is_last(&desc->running->node, &desc->chunks)) {
1418 desc->running = list_next_entry(desc->running, node);
1419 if (!desc->cyclic)
1420 ret = IRQ_HANDLED;
1421 goto done;
1422 }
87244fe5 1423
ccadee9b
LP
1424 /*
1425 * We've completed the last transfer chunk. If the transfer is
1426 * cyclic, move back to the first one.
1427 */
1428 if (desc->cyclic) {
1429 desc->running =
1430 list_first_entry(&desc->chunks,
87244fe5
LP
1431 struct rcar_dmac_xfer_chunk,
1432 node);
ccadee9b
LP
1433 goto done;
1434 }
87244fe5
LP
1435 }
1436
1437 /* The descriptor is complete, move it to the done list. */
1438 list_move_tail(&desc->node, &chan->desc.done);
1439
1440 /* Queue the next descriptor, if any. */
1441 if (!list_empty(&chan->desc.active))
1442 chan->desc.running = list_first_entry(&chan->desc.active,
1443 struct rcar_dmac_desc,
1444 node);
1445 else
1446 chan->desc.running = NULL;
1447
1448done:
1449 if (chan->desc.running)
1450 rcar_dmac_chan_start_xfer(chan);
1451
1452 return ret;
1453}
1454
1455static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
1456{
ccadee9b 1457 u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
87244fe5
LP
1458 struct rcar_dmac_chan *chan = dev;
1459 irqreturn_t ret = IRQ_NONE;
1460 u32 chcr;
1461
1462 spin_lock(&chan->lock);
1463
1464 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
ccadee9b
LP
1465 if (chcr & RCAR_DMACHCR_TE)
1466 mask |= RCAR_DMACHCR_DE;
1467 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
1468
1469 if (chcr & RCAR_DMACHCR_DSE)
1470 ret |= rcar_dmac_isr_desc_stage_end(chan);
87244fe5
LP
1471
1472 if (chcr & RCAR_DMACHCR_TE)
1473 ret |= rcar_dmac_isr_transfer_end(chan);
1474
1475 spin_unlock(&chan->lock);
1476
1477 return ret;
1478}
1479
1480static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
1481{
1482 struct rcar_dmac_chan *chan = dev;
1483 struct rcar_dmac_desc *desc;
964b2fd8 1484 struct dmaengine_desc_callback cb;
87244fe5
LP
1485
1486 spin_lock_irq(&chan->lock);
1487
1488 /* For cyclic transfers notify the user after every chunk. */
1489 if (chan->desc.running && chan->desc.running->cyclic) {
87244fe5 1490 desc = chan->desc.running;
964b2fd8 1491 dmaengine_desc_get_callback(&desc->async_tx, &cb);
87244fe5 1492
964b2fd8 1493 if (dmaengine_desc_callback_valid(&cb)) {
87244fe5 1494 spin_unlock_irq(&chan->lock);
964b2fd8 1495 dmaengine_desc_callback_invoke(&cb, NULL);
87244fe5
LP
1496 spin_lock_irq(&chan->lock);
1497 }
1498 }
1499
1500 /*
1501 * Call the callback function for all descriptors on the done list and
1502 * move them to the ack wait list.
1503 */
1504 while (!list_empty(&chan->desc.done)) {
1505 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
1506 node);
1507 dma_cookie_complete(&desc->async_tx);
1508 list_del(&desc->node);
1509
964b2fd8
DJ
1510 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1511 if (dmaengine_desc_callback_valid(&cb)) {
87244fe5
LP
1512 spin_unlock_irq(&chan->lock);
1513 /*
1514 * We own the only reference to this descriptor, we can
1515 * safely dereference it without holding the channel
1516 * lock.
1517 */
964b2fd8 1518 dmaengine_desc_callback_invoke(&cb, NULL);
87244fe5
LP
1519 spin_lock_irq(&chan->lock);
1520 }
1521
1522 list_add_tail(&desc->node, &chan->desc.wait);
1523 }
1524
ccadee9b
LP
1525 spin_unlock_irq(&chan->lock);
1526
87244fe5
LP
1527 /* Recycle all acked descriptors. */
1528 rcar_dmac_desc_recycle_acked(chan);
1529
87244fe5
LP
1530 return IRQ_HANDLED;
1531}
1532
1533static irqreturn_t rcar_dmac_isr_error(int irq, void *data)
1534{
1535 struct rcar_dmac *dmac = data;
1536
1537 if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE))
1538 return IRQ_NONE;
1539
1540 /*
1541 * An unrecoverable error occurred on an unknown channel. Halt the DMAC,
1542 * abort transfers on all channels, and reinitialize the DMAC.
1543 */
1544 rcar_dmac_stop(dmac);
1545 rcar_dmac_abort(dmac);
1546 rcar_dmac_init(dmac);
1547
1548 return IRQ_HANDLED;
1549}
1550
1551/* -----------------------------------------------------------------------------
1552 * OF xlate and channel filter
1553 */
1554
1555static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
1556{
1557 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
1558 struct of_phandle_args *dma_spec = arg;
1559
1560 /*
1561 * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate
1562 * function knows from which device it wants to allocate a channel from,
1563 * and would be perfectly capable of selecting the channel it wants.
1564 * Forcing it to call dma_request_channel() and iterate through all
1565 * channels from all controllers is just pointless.
1566 */
1567 if (chan->device->device_config != rcar_dmac_device_config ||
1568 dma_spec->np != chan->device->dev->of_node)
1569 return false;
1570
1571 return !test_and_set_bit(dma_spec->args[0], dmac->modules);
1572}
1573
1574static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
1575 struct of_dma *ofdma)
1576{
1577 struct rcar_dmac_chan *rchan;
1578 struct dma_chan *chan;
1579 dma_cap_mask_t mask;
1580
1581 if (dma_spec->args_count != 1)
1582 return NULL;
1583
1584 /* Only slave DMA channels can be allocated via DT */
1585 dma_cap_zero(mask);
1586 dma_cap_set(DMA_SLAVE, mask);
1587
1588 chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec);
1589 if (!chan)
1590 return NULL;
1591
1592 rchan = to_rcar_dmac_chan(chan);
1593 rchan->mid_rid = dma_spec->args[0];
1594
1595 return chan;
1596}
1597
1598/* -----------------------------------------------------------------------------
1599 * Power management
1600 */
1601
1602#ifdef CONFIG_PM_SLEEP
1603static int rcar_dmac_sleep_suspend(struct device *dev)
1604{
1605 /*
1606 * TODO: Wait for the current transfer to complete and stop the device.
1607 */
1608 return 0;
1609}
1610
1611static int rcar_dmac_sleep_resume(struct device *dev)
1612{
1613 /* TODO: Resume transfers, if any. */
1614 return 0;
1615}
1616#endif
1617
1618#ifdef CONFIG_PM
1619static int rcar_dmac_runtime_suspend(struct device *dev)
1620{
1621 return 0;
1622}
1623
1624static int rcar_dmac_runtime_resume(struct device *dev)
1625{
1626 struct rcar_dmac *dmac = dev_get_drvdata(dev);
1627
1628 return rcar_dmac_init(dmac);
1629}
1630#endif
1631
1632static const struct dev_pm_ops rcar_dmac_pm = {
1633 SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume)
1634 SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
1635 NULL)
1636};
1637
1638/* -----------------------------------------------------------------------------
1639 * Probe and remove
1640 */
1641
1642static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
1643 struct rcar_dmac_chan *rchan,
1644 unsigned int index)
1645{
1646 struct platform_device *pdev = to_platform_device(dmac->dev);
1647 struct dma_chan *chan = &rchan->chan;
1648 char pdev_irqname[5];
1649 char *irqname;
1650 int irq;
1651 int ret;
1652
1653 rchan->index = index;
1654 rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index);
1655 rchan->mid_rid = -EINVAL;
1656
1657 spin_lock_init(&rchan->lock);
1658
f7638c90
LP
1659 INIT_LIST_HEAD(&rchan->desc.free);
1660 INIT_LIST_HEAD(&rchan->desc.pending);
1661 INIT_LIST_HEAD(&rchan->desc.active);
1662 INIT_LIST_HEAD(&rchan->desc.done);
1663 INIT_LIST_HEAD(&rchan->desc.wait);
1664
87244fe5
LP
1665 /* Request the channel interrupt. */
1666 sprintf(pdev_irqname, "ch%u", index);
1667 irq = platform_get_irq_byname(pdev, pdev_irqname);
1668 if (irq < 0) {
1669 dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
1670 return -ENODEV;
1671 }
1672
1673 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
1674 dev_name(dmac->dev), index);
1675 if (!irqname)
1676 return -ENOMEM;
1677
1678 ret = devm_request_threaded_irq(dmac->dev, irq, rcar_dmac_isr_channel,
1679 rcar_dmac_isr_channel_thread, 0,
1680 irqname, rchan);
1681 if (ret) {
1682 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret);
1683 return ret;
1684 }
1685
1686 /*
1687 * Initialize the DMA engine channel and add it to the DMA engine
1688 * channels list.
1689 */
1690 chan->device = &dmac->engine;
1691 dma_cookie_init(chan);
1692
1693 list_add_tail(&chan->device_node, &dmac->engine.channels);
1694
1695 return 0;
1696}
1697
1698static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1699{
1700 struct device_node *np = dev->of_node;
1701 int ret;
1702
1703 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
1704 if (ret < 0) {
1705 dev_err(dev, "unable to read dma-channels property\n");
1706 return ret;
1707 }
1708
1709 if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
1710 dev_err(dev, "invalid number of channels %u\n",
1711 dmac->n_channels);
1712 return -EINVAL;
1713 }
1714
1715 return 0;
1716}
1717
1718static int rcar_dmac_probe(struct platform_device *pdev)
1719{
1720 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
1721 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
1722 DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
1723 DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
be6893e1 1724 unsigned int channels_offset = 0;
87244fe5
LP
1725 struct dma_device *engine;
1726 struct rcar_dmac *dmac;
1727 struct resource *mem;
1728 unsigned int i;
1729 char *irqname;
1730 int irq;
1731 int ret;
1732
1733 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
1734 if (!dmac)
1735 return -ENOMEM;
1736
1737 dmac->dev = &pdev->dev;
1738 platform_set_drvdata(pdev, dmac);
dc312349 1739 dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
87244fe5
LP
1740
1741 ret = rcar_dmac_parse_of(&pdev->dev, dmac);
1742 if (ret < 0)
1743 return ret;
1744
be6893e1
LP
1745 /*
1746 * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be
1747 * flushed correctly, resulting in memory corruption. DMAC 0 channel 0
1748 * is connected to microTLB 0 on currently supported platforms, so we
1749 * can't use it with the IPMMU. As the IOMMU API operates at the device
1750 * level we can't disable it selectively, so ignore channel 0 for now if
1751 * the device is part of an IOMMU group.
1752 */
1753 if (pdev->dev.iommu_group) {
1754 dmac->n_channels--;
1755 channels_offset = 1;
1756 }
1757
87244fe5
LP
1758 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
1759 sizeof(*dmac->channels), GFP_KERNEL);
1760 if (!dmac->channels)
1761 return -ENOMEM;
1762
1763 /* Request resources. */
1764 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1765 dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
1766 if (IS_ERR(dmac->iomem))
1767 return PTR_ERR(dmac->iomem);
1768
1769 irq = platform_get_irq_byname(pdev, "error");
1770 if (irq < 0) {
1771 dev_err(&pdev->dev, "no error IRQ specified\n");
1772 return -ENODEV;
1773 }
1774
1775 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error",
1776 dev_name(dmac->dev));
1777 if (!irqname)
1778 return -ENOMEM;
1779
1780 ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
1781 irqname, dmac);
1782 if (ret) {
1783 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
1784 irq, ret);
1785 return ret;
1786 }
1787
1788 /* Enable runtime PM and initialize the device. */
1789 pm_runtime_enable(&pdev->dev);
1790 ret = pm_runtime_get_sync(&pdev->dev);
1791 if (ret < 0) {
1792 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
1793 return ret;
1794 }
1795
1796 ret = rcar_dmac_init(dmac);
1797 pm_runtime_put(&pdev->dev);
1798
1799 if (ret) {
1800 dev_err(&pdev->dev, "failed to reset device\n");
1801 goto error;
1802 }
1803
1804 /* Initialize the channels. */
1805 INIT_LIST_HEAD(&dmac->engine.channels);
1806
1807 for (i = 0; i < dmac->n_channels; ++i) {
be6893e1
LP
1808 ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
1809 i + channels_offset);
87244fe5
LP
1810 if (ret < 0)
1811 goto error;
1812 }
1813
1814 /* Register the DMAC as a DMA provider for DT. */
1815 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
1816 NULL);
1817 if (ret < 0)
1818 goto error;
1819
1820 /*
1821 * Register the DMA engine device.
1822 *
1823 * Default transfer size of 32 bytes requires 32-byte alignment.
1824 */
1825 engine = &dmac->engine;
1826 dma_cap_set(DMA_MEMCPY, engine->cap_mask);
1827 dma_cap_set(DMA_SLAVE, engine->cap_mask);
1828
1829 engine->dev = &pdev->dev;
1830 engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
1831
1832 engine->src_addr_widths = widths;
1833 engine->dst_addr_widths = widths;
1834 engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1835 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1836
1837 engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
1838 engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
1839 engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
1840 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
1841 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
1842 engine->device_config = rcar_dmac_device_config;
1843 engine->device_terminate_all = rcar_dmac_chan_terminate_all;
1844 engine->device_tx_status = rcar_dmac_tx_status;
1845 engine->device_issue_pending = rcar_dmac_issue_pending;
1846
1847 ret = dma_async_device_register(engine);
1848 if (ret < 0)
1849 goto error;
1850
1851 return 0;
1852
1853error:
1854 of_dma_controller_free(pdev->dev.of_node);
1855 pm_runtime_disable(&pdev->dev);
1856 return ret;
1857}
1858
1859static int rcar_dmac_remove(struct platform_device *pdev)
1860{
1861 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1862
1863 of_dma_controller_free(pdev->dev.of_node);
1864 dma_async_device_unregister(&dmac->engine);
1865
1866 pm_runtime_disable(&pdev->dev);
1867
1868 return 0;
1869}
1870
1871static void rcar_dmac_shutdown(struct platform_device *pdev)
1872{
1873 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1874
1875 rcar_dmac_stop(dmac);
1876}
1877
1878static const struct of_device_id rcar_dmac_of_ids[] = {
1879 { .compatible = "renesas,rcar-dmac", },
1880 { /* Sentinel */ }
1881};
1882MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
1883
1884static struct platform_driver rcar_dmac_driver = {
1885 .driver = {
1886 .pm = &rcar_dmac_pm,
1887 .name = "rcar-dmac",
1888 .of_match_table = rcar_dmac_of_ids,
1889 },
1890 .probe = rcar_dmac_probe,
1891 .remove = rcar_dmac_remove,
1892 .shutdown = rcar_dmac_shutdown,
1893};
1894
1895module_platform_driver(rcar_dmac_driver);
1896
1897MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
1898MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1899MODULE_LICENSE("GPL v2");