Merge remote-tracking branches 'asoc/topic/wm8753', 'asoc/topic/wm8770', 'asoc/topic...
[linux-block.git] / drivers / dma / sh / rcar-dmac.c
CommitLineData
87244fe5
LP
1/*
2 * Renesas R-Car Gen2 DMA Controller Driver
3 *
4 * Copyright (C) 2014 Renesas Electronics Inc.
5 *
6 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
7 *
8 * This is free software; you can redistribute it and/or modify
9 * it under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 */
12
a8d46a7f 13#include <linux/delay.h>
ccadee9b 14#include <linux/dma-mapping.h>
87244fe5
LP
15#include <linux/dmaengine.h>
16#include <linux/interrupt.h>
17#include <linux/list.h>
18#include <linux/module.h>
19#include <linux/mutex.h>
20#include <linux/of.h>
21#include <linux/of_dma.h>
22#include <linux/of_platform.h>
23#include <linux/platform_device.h>
24#include <linux/pm_runtime.h>
25#include <linux/slab.h>
26#include <linux/spinlock.h>
27
28#include "../dmaengine.h"
29
30/*
31 * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
32 * @node: entry in the parent's chunks list
33 * @src_addr: device source address
34 * @dst_addr: device destination address
35 * @size: transfer size in bytes
36 */
37struct rcar_dmac_xfer_chunk {
38 struct list_head node;
39
40 dma_addr_t src_addr;
41 dma_addr_t dst_addr;
42 u32 size;
43};
44
ccadee9b
LP
45/*
46 * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
47 * @sar: value of the SAR register (source address)
48 * @dar: value of the DAR register (destination address)
49 * @tcr: value of the TCR register (transfer count)
50 */
51struct rcar_dmac_hw_desc {
52 u32 sar;
53 u32 dar;
54 u32 tcr;
55 u32 reserved;
56} __attribute__((__packed__));
57
87244fe5
LP
58/*
59 * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
60 * @async_tx: base DMA asynchronous transaction descriptor
61 * @direction: direction of the DMA transfer
62 * @xfer_shift: log2 of the transfer size
63 * @chcr: value of the channel configuration register for this transfer
64 * @node: entry in the channel's descriptors lists
65 * @chunks: list of transfer chunks for this transfer
66 * @running: the transfer chunk being currently processed
ccadee9b 67 * @nchunks: number of transfer chunks for this transfer
1ed1315f 68 * @hwdescs.use: whether the transfer descriptor uses hardware descriptors
ccadee9b
LP
69 * @hwdescs.mem: hardware descriptors memory for the transfer
70 * @hwdescs.dma: device address of the hardware descriptors memory
71 * @hwdescs.size: size of the hardware descriptors in bytes
87244fe5
LP
72 * @size: transfer size in bytes
73 * @cyclic: when set indicates that the DMA transfer is cyclic
74 */
75struct rcar_dmac_desc {
76 struct dma_async_tx_descriptor async_tx;
77 enum dma_transfer_direction direction;
78 unsigned int xfer_shift;
79 u32 chcr;
80
81 struct list_head node;
82 struct list_head chunks;
83 struct rcar_dmac_xfer_chunk *running;
ccadee9b
LP
84 unsigned int nchunks;
85
86 struct {
1ed1315f 87 bool use;
ccadee9b
LP
88 struct rcar_dmac_hw_desc *mem;
89 dma_addr_t dma;
90 size_t size;
91 } hwdescs;
87244fe5
LP
92
93 unsigned int size;
94 bool cyclic;
95};
96
97#define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
98
99/*
100 * struct rcar_dmac_desc_page - One page worth of descriptors
101 * @node: entry in the channel's pages list
102 * @descs: array of DMA descriptors
103 * @chunks: array of transfer chunk descriptors
104 */
105struct rcar_dmac_desc_page {
106 struct list_head node;
107
108 union {
109 struct rcar_dmac_desc descs[0];
110 struct rcar_dmac_xfer_chunk chunks[0];
111 };
112};
113
114#define RCAR_DMAC_DESCS_PER_PAGE \
115 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
116 sizeof(struct rcar_dmac_desc))
117#define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
118 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
119 sizeof(struct rcar_dmac_xfer_chunk))
120
c5ed08e9
NS
121/*
122 * struct rcar_dmac_chan_slave - Slave configuration
123 * @slave_addr: slave memory address
124 * @xfer_size: size (in bytes) of hardware transfers
125 */
126struct rcar_dmac_chan_slave {
127 phys_addr_t slave_addr;
128 unsigned int xfer_size;
129};
130
9f878603
NS
131/*
132 * struct rcar_dmac_chan_map - Map of slave device phys to dma address
133 * @addr: slave dma address
134 * @dir: direction of mapping
135 * @slave: slave configuration that is mapped
136 */
137struct rcar_dmac_chan_map {
138 dma_addr_t addr;
139 enum dma_data_direction dir;
140 struct rcar_dmac_chan_slave slave;
141};
142
87244fe5
LP
143/*
144 * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
145 * @chan: base DMA channel object
146 * @iomem: channel I/O memory base
147 * @index: index of this channel in the controller
427d5ecd 148 * @irq: channel IRQ
c5ed08e9
NS
149 * @src: slave memory address and size on the source side
150 * @dst: slave memory address and size on the destination side
87244fe5
LP
151 * @mid_rid: hardware MID/RID for the DMA client using this channel
152 * @lock: protects the channel CHCR register and the desc members
153 * @desc.free: list of free descriptors
154 * @desc.pending: list of pending descriptors (submitted with tx_submit)
155 * @desc.active: list of active descriptors (activated with issue_pending)
156 * @desc.done: list of completed descriptors
157 * @desc.wait: list of descriptors waiting for an ack
158 * @desc.running: the descriptor being processed (a member of the active list)
159 * @desc.chunks_free: list of free transfer chunk descriptors
160 * @desc.pages: list of pages used by allocated descriptors
161 */
162struct rcar_dmac_chan {
163 struct dma_chan chan;
164 void __iomem *iomem;
165 unsigned int index;
427d5ecd 166 int irq;
87244fe5 167
c5ed08e9
NS
168 struct rcar_dmac_chan_slave src;
169 struct rcar_dmac_chan_slave dst;
9f878603 170 struct rcar_dmac_chan_map map;
87244fe5
LP
171 int mid_rid;
172
173 spinlock_t lock;
174
175 struct {
176 struct list_head free;
177 struct list_head pending;
178 struct list_head active;
179 struct list_head done;
180 struct list_head wait;
181 struct rcar_dmac_desc *running;
182
183 struct list_head chunks_free;
184
185 struct list_head pages;
186 } desc;
187};
188
189#define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
190
191/*
192 * struct rcar_dmac - R-Car Gen2 DMA Controller
193 * @engine: base DMA engine object
194 * @dev: the hardware device
195 * @iomem: remapped I/O memory base
196 * @n_channels: number of available channels
197 * @channels: array of DMAC channels
198 * @modules: bitmask of client modules in use
199 */
200struct rcar_dmac {
201 struct dma_device engine;
202 struct device *dev;
203 void __iomem *iomem;
204
205 unsigned int n_channels;
206 struct rcar_dmac_chan *channels;
207
08acf38e 208 DECLARE_BITMAP(modules, 256);
87244fe5
LP
209};
210
211#define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
212
213/* -----------------------------------------------------------------------------
214 * Registers
215 */
216
217#define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i))
218
219#define RCAR_DMAISTA 0x0020
220#define RCAR_DMASEC 0x0030
221#define RCAR_DMAOR 0x0060
222#define RCAR_DMAOR_PRI_FIXED (0 << 8)
223#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
224#define RCAR_DMAOR_AE (1 << 2)
225#define RCAR_DMAOR_DME (1 << 0)
226#define RCAR_DMACHCLR 0x0080
227#define RCAR_DMADPSEC 0x00a0
228
229#define RCAR_DMASAR 0x0000
230#define RCAR_DMADAR 0x0004
231#define RCAR_DMATCR 0x0008
232#define RCAR_DMATCR_MASK 0x00ffffff
233#define RCAR_DMATSR 0x0028
234#define RCAR_DMACHCR 0x000c
235#define RCAR_DMACHCR_CAE (1 << 31)
236#define RCAR_DMACHCR_CAIE (1 << 30)
237#define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
238#define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
239#define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
240#define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
241#define RCAR_DMACHCR_RPT_SAR (1 << 27)
242#define RCAR_DMACHCR_RPT_DAR (1 << 26)
243#define RCAR_DMACHCR_RPT_TCR (1 << 25)
244#define RCAR_DMACHCR_DPB (1 << 22)
245#define RCAR_DMACHCR_DSE (1 << 19)
246#define RCAR_DMACHCR_DSIE (1 << 18)
247#define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
248#define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
249#define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
250#define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
251#define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
252#define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
253#define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
254#define RCAR_DMACHCR_DM_FIXED (0 << 14)
255#define RCAR_DMACHCR_DM_INC (1 << 14)
256#define RCAR_DMACHCR_DM_DEC (2 << 14)
257#define RCAR_DMACHCR_SM_FIXED (0 << 12)
258#define RCAR_DMACHCR_SM_INC (1 << 12)
259#define RCAR_DMACHCR_SM_DEC (2 << 12)
260#define RCAR_DMACHCR_RS_AUTO (4 << 8)
261#define RCAR_DMACHCR_RS_DMARS (8 << 8)
262#define RCAR_DMACHCR_IE (1 << 2)
263#define RCAR_DMACHCR_TE (1 << 1)
264#define RCAR_DMACHCR_DE (1 << 0)
265#define RCAR_DMATCRB 0x0018
266#define RCAR_DMATSRB 0x0038
267#define RCAR_DMACHCRB 0x001c
268#define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
ccadee9b
LP
269#define RCAR_DMACHCRB_DPTR_MASK (0xff << 16)
270#define RCAR_DMACHCRB_DPTR_SHIFT 16
87244fe5
LP
271#define RCAR_DMACHCRB_DRST (1 << 15)
272#define RCAR_DMACHCRB_DTS (1 << 8)
273#define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
274#define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
275#define RCAR_DMACHCRB_PRI(n) ((n) << 0)
276#define RCAR_DMARS 0x0040
277#define RCAR_DMABUFCR 0x0048
278#define RCAR_DMABUFCR_MBU(n) ((n) << 16)
279#define RCAR_DMABUFCR_ULB(n) ((n) << 0)
280#define RCAR_DMADPBASE 0x0050
281#define RCAR_DMADPBASE_MASK 0xfffffff0
282#define RCAR_DMADPBASE_SEL (1 << 0)
283#define RCAR_DMADPCR 0x0054
284#define RCAR_DMADPCR_DIPT(n) ((n) << 24)
285#define RCAR_DMAFIXSAR 0x0010
286#define RCAR_DMAFIXDAR 0x0014
287#define RCAR_DMAFIXDPBASE 0x0060
288
289/* Hardcode the MEMCPY transfer size to 4 bytes. */
290#define RCAR_DMAC_MEMCPY_XFER_SIZE 4
291
292/* -----------------------------------------------------------------------------
293 * Device access
294 */
295
296static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
297{
298 if (reg == RCAR_DMAOR)
299 writew(data, dmac->iomem + reg);
300 else
301 writel(data, dmac->iomem + reg);
302}
303
304static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
305{
306 if (reg == RCAR_DMAOR)
307 return readw(dmac->iomem + reg);
308 else
309 return readl(dmac->iomem + reg);
310}
311
312static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
313{
314 if (reg == RCAR_DMARS)
315 return readw(chan->iomem + reg);
316 else
317 return readl(chan->iomem + reg);
318}
319
320static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
321{
322 if (reg == RCAR_DMARS)
323 writew(data, chan->iomem + reg);
324 else
325 writel(data, chan->iomem + reg);
326}
327
328/* -----------------------------------------------------------------------------
329 * Initialization and configuration
330 */
331
332static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
333{
334 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
335
0f78e3b5 336 return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE));
87244fe5
LP
337}
338
339static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
340{
341 struct rcar_dmac_desc *desc = chan->desc.running;
ccadee9b 342 u32 chcr = desc->chcr;
87244fe5
LP
343
344 WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
345
ccadee9b
LP
346 if (chan->mid_rid >= 0)
347 rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
348
1ed1315f 349 if (desc->hwdescs.use) {
1175f83c
KM
350 struct rcar_dmac_xfer_chunk *chunk =
351 list_first_entry(&desc->chunks,
352 struct rcar_dmac_xfer_chunk, node);
3f463061 353
ccadee9b
LP
354 dev_dbg(chan->chan.device->dev,
355 "chan%u: queue desc %p: %u@%pad\n",
356 chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
357
87244fe5 358#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1175f83c
KM
359 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
360 chunk->src_addr >> 32);
361 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
362 chunk->dst_addr >> 32);
ccadee9b
LP
363 rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
364 desc->hwdescs.dma >> 32);
87244fe5 365#endif
ccadee9b
LP
366 rcar_dmac_chan_write(chan, RCAR_DMADPBASE,
367 (desc->hwdescs.dma & 0xfffffff0) |
368 RCAR_DMADPBASE_SEL);
369 rcar_dmac_chan_write(chan, RCAR_DMACHCRB,
370 RCAR_DMACHCRB_DCNT(desc->nchunks - 1) |
371 RCAR_DMACHCRB_DRST);
87244fe5 372
3f463061
LP
373 /*
374 * Errata: When descriptor memory is accessed through an IOMMU
375 * the DMADAR register isn't initialized automatically from the
376 * first descriptor at beginning of transfer by the DMAC like it
377 * should. Initialize it manually with the destination address
378 * of the first chunk.
379 */
3f463061
LP
380 rcar_dmac_chan_write(chan, RCAR_DMADAR,
381 chunk->dst_addr & 0xffffffff);
382
ccadee9b
LP
383 /*
384 * Program the descriptor stage interrupt to occur after the end
385 * of the first stage.
386 */
387 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1));
388
389 chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR
390 | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB;
391
392 /*
393 * If the descriptor isn't cyclic enable normal descriptor mode
394 * and the transfer completion interrupt.
395 */
396 if (!desc->cyclic)
397 chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE;
398 /*
399 * If the descriptor is cyclic and has a callback enable the
400 * descriptor stage interrupt in infinite repeat mode.
401 */
402 else if (desc->async_tx.callback)
403 chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE;
404 /*
405 * Otherwise just select infinite repeat mode without any
406 * interrupt.
407 */
408 else
409 chcr |= RCAR_DMACHCR_DPM_INFINITE;
410 } else {
411 struct rcar_dmac_xfer_chunk *chunk = desc->running;
87244fe5 412
ccadee9b
LP
413 dev_dbg(chan->chan.device->dev,
414 "chan%u: queue chunk %p: %u@%pad -> %pad\n",
415 chan->index, chunk, chunk->size, &chunk->src_addr,
416 &chunk->dst_addr);
87244fe5 417
ccadee9b
LP
418#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
419 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
420 chunk->src_addr >> 32);
421 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
422 chunk->dst_addr >> 32);
423#endif
424 rcar_dmac_chan_write(chan, RCAR_DMASAR,
425 chunk->src_addr & 0xffffffff);
426 rcar_dmac_chan_write(chan, RCAR_DMADAR,
427 chunk->dst_addr & 0xffffffff);
428 rcar_dmac_chan_write(chan, RCAR_DMATCR,
429 chunk->size >> desc->xfer_shift);
430
431 chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
432 }
433
434 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE);
87244fe5
LP
435}
436
437static int rcar_dmac_init(struct rcar_dmac *dmac)
438{
439 u16 dmaor;
440
441 /* Clear all channels and enable the DMAC globally. */
20c169ac 442 rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0));
87244fe5
LP
443 rcar_dmac_write(dmac, RCAR_DMAOR,
444 RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
445
446 dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
447 if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
448 dev_warn(dmac->dev, "DMAOR initialization failed.\n");
449 return -EIO;
450 }
451
452 return 0;
453}
454
455/* -----------------------------------------------------------------------------
456 * Descriptors submission
457 */
458
459static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
460{
461 struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
462 struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
463 unsigned long flags;
464 dma_cookie_t cookie;
465
466 spin_lock_irqsave(&chan->lock, flags);
467
468 cookie = dma_cookie_assign(tx);
469
470 dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
471 chan->index, tx->cookie, desc);
472
473 list_add_tail(&desc->node, &chan->desc.pending);
474 desc->running = list_first_entry(&desc->chunks,
475 struct rcar_dmac_xfer_chunk, node);
476
477 spin_unlock_irqrestore(&chan->lock, flags);
478
479 return cookie;
480}
481
482/* -----------------------------------------------------------------------------
483 * Descriptors allocation and free
484 */
485
486/*
487 * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
488 * @chan: the DMA channel
489 * @gfp: allocation flags
490 */
491static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
492{
493 struct rcar_dmac_desc_page *page;
d23c9a0a 494 unsigned long flags;
87244fe5
LP
495 LIST_HEAD(list);
496 unsigned int i;
497
498 page = (void *)get_zeroed_page(gfp);
499 if (!page)
500 return -ENOMEM;
501
502 for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
503 struct rcar_dmac_desc *desc = &page->descs[i];
504
505 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
506 desc->async_tx.tx_submit = rcar_dmac_tx_submit;
507 INIT_LIST_HEAD(&desc->chunks);
508
509 list_add_tail(&desc->node, &list);
510 }
511
d23c9a0a 512 spin_lock_irqsave(&chan->lock, flags);
87244fe5
LP
513 list_splice_tail(&list, &chan->desc.free);
514 list_add_tail(&page->node, &chan->desc.pages);
d23c9a0a 515 spin_unlock_irqrestore(&chan->lock, flags);
87244fe5
LP
516
517 return 0;
518}
519
520/*
521 * rcar_dmac_desc_put - Release a DMA transfer descriptor
522 * @chan: the DMA channel
523 * @desc: the descriptor
524 *
525 * Put the descriptor and its transfer chunk descriptors back in the channel's
1ed1315f
LP
526 * free descriptors lists. The descriptor's chunks list will be reinitialized to
527 * an empty list as a result.
87244fe5 528 *
ccadee9b
LP
529 * The descriptor must have been removed from the channel's lists before calling
530 * this function.
87244fe5
LP
531 */
532static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
533 struct rcar_dmac_desc *desc)
534{
f3915072
LP
535 unsigned long flags;
536
537 spin_lock_irqsave(&chan->lock, flags);
87244fe5 538 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
3565fe53 539 list_add(&desc->node, &chan->desc.free);
f3915072 540 spin_unlock_irqrestore(&chan->lock, flags);
87244fe5
LP
541}
542
543static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
544{
545 struct rcar_dmac_desc *desc, *_desc;
d23c9a0a 546 unsigned long flags;
ccadee9b 547 LIST_HEAD(list);
87244fe5 548
ccadee9b
LP
549 /*
550 * We have to temporarily move all descriptors from the wait list to a
551 * local list as iterating over the wait list, even with
552 * list_for_each_entry_safe, isn't safe if we release the channel lock
553 * around the rcar_dmac_desc_put() call.
554 */
d23c9a0a 555 spin_lock_irqsave(&chan->lock, flags);
ccadee9b 556 list_splice_init(&chan->desc.wait, &list);
d23c9a0a 557 spin_unlock_irqrestore(&chan->lock, flags);
ccadee9b
LP
558
559 list_for_each_entry_safe(desc, _desc, &list, node) {
87244fe5
LP
560 if (async_tx_test_ack(&desc->async_tx)) {
561 list_del(&desc->node);
562 rcar_dmac_desc_put(chan, desc);
563 }
564 }
ccadee9b
LP
565
566 if (list_empty(&list))
567 return;
568
569 /* Put the remaining descriptors back in the wait list. */
d23c9a0a 570 spin_lock_irqsave(&chan->lock, flags);
ccadee9b 571 list_splice(&list, &chan->desc.wait);
d23c9a0a 572 spin_unlock_irqrestore(&chan->lock, flags);
87244fe5
LP
573}
574
575/*
576 * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
577 * @chan: the DMA channel
578 *
579 * Locking: This function must be called in a non-atomic context.
580 *
581 * Return: A pointer to the allocated descriptor or NULL if no descriptor can
582 * be allocated.
583 */
584static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
585{
586 struct rcar_dmac_desc *desc;
d23c9a0a 587 unsigned long flags;
87244fe5
LP
588 int ret;
589
87244fe5
LP
590 /* Recycle acked descriptors before attempting allocation. */
591 rcar_dmac_desc_recycle_acked(chan);
592
d23c9a0a 593 spin_lock_irqsave(&chan->lock, flags);
ccadee9b 594
a55e07c8
LP
595 while (list_empty(&chan->desc.free)) {
596 /*
597 * No free descriptors, allocate a page worth of them and try
598 * again, as someone else could race us to get the newly
599 * allocated descriptors. If the allocation fails return an
600 * error.
601 */
d23c9a0a 602 spin_unlock_irqrestore(&chan->lock, flags);
a55e07c8
LP
603 ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
604 if (ret < 0)
605 return NULL;
d23c9a0a 606 spin_lock_irqsave(&chan->lock, flags);
a55e07c8 607 }
87244fe5 608
a55e07c8
LP
609 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
610 list_del(&desc->node);
87244fe5 611
d23c9a0a 612 spin_unlock_irqrestore(&chan->lock, flags);
87244fe5
LP
613
614 return desc;
615}
616
617/*
618 * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
619 * @chan: the DMA channel
620 * @gfp: allocation flags
621 */
622static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
623{
624 struct rcar_dmac_desc_page *page;
d23c9a0a 625 unsigned long flags;
87244fe5
LP
626 LIST_HEAD(list);
627 unsigned int i;
628
629 page = (void *)get_zeroed_page(gfp);
630 if (!page)
631 return -ENOMEM;
632
633 for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
634 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
635
636 list_add_tail(&chunk->node, &list);
637 }
638
d23c9a0a 639 spin_lock_irqsave(&chan->lock, flags);
87244fe5
LP
640 list_splice_tail(&list, &chan->desc.chunks_free);
641 list_add_tail(&page->node, &chan->desc.pages);
d23c9a0a 642 spin_unlock_irqrestore(&chan->lock, flags);
87244fe5
LP
643
644 return 0;
645}
646
647/*
648 * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
649 * @chan: the DMA channel
650 *
651 * Locking: This function must be called in a non-atomic context.
652 *
653 * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
654 * descriptor can be allocated.
655 */
656static struct rcar_dmac_xfer_chunk *
657rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
658{
659 struct rcar_dmac_xfer_chunk *chunk;
d23c9a0a 660 unsigned long flags;
87244fe5
LP
661 int ret;
662
d23c9a0a 663 spin_lock_irqsave(&chan->lock, flags);
87244fe5 664
a55e07c8
LP
665 while (list_empty(&chan->desc.chunks_free)) {
666 /*
667 * No free descriptors, allocate a page worth of them and try
668 * again, as someone else could race us to get the newly
669 * allocated descriptors. If the allocation fails return an
670 * error.
671 */
d23c9a0a 672 spin_unlock_irqrestore(&chan->lock, flags);
a55e07c8
LP
673 ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
674 if (ret < 0)
675 return NULL;
d23c9a0a 676 spin_lock_irqsave(&chan->lock, flags);
a55e07c8 677 }
87244fe5 678
a55e07c8
LP
679 chunk = list_first_entry(&chan->desc.chunks_free,
680 struct rcar_dmac_xfer_chunk, node);
681 list_del(&chunk->node);
87244fe5 682
d23c9a0a 683 spin_unlock_irqrestore(&chan->lock, flags);
87244fe5
LP
684
685 return chunk;
686}
687
1ed1315f
LP
688static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan,
689 struct rcar_dmac_desc *desc, size_t size)
690{
691 /*
692 * dma_alloc_coherent() allocates memory in page size increments. To
693 * avoid reallocating the hardware descriptors when the allocated size
694 * wouldn't change align the requested size to a multiple of the page
695 * size.
696 */
697 size = PAGE_ALIGN(size);
698
699 if (desc->hwdescs.size == size)
700 return;
701
702 if (desc->hwdescs.mem) {
6a634808
LP
703 dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size,
704 desc->hwdescs.mem, desc->hwdescs.dma);
1ed1315f
LP
705 desc->hwdescs.mem = NULL;
706 desc->hwdescs.size = 0;
707 }
708
709 if (!size)
710 return;
711
6a634808
LP
712 desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size,
713 &desc->hwdescs.dma, GFP_NOWAIT);
1ed1315f
LP
714 if (!desc->hwdescs.mem)
715 return;
716
717 desc->hwdescs.size = size;
718}
719
ee4b876b
JB
720static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
721 struct rcar_dmac_desc *desc)
ccadee9b
LP
722{
723 struct rcar_dmac_xfer_chunk *chunk;
724 struct rcar_dmac_hw_desc *hwdesc;
ccadee9b 725
1ed1315f
LP
726 rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc));
727
728 hwdesc = desc->hwdescs.mem;
ccadee9b 729 if (!hwdesc)
ee4b876b 730 return -ENOMEM;
ccadee9b 731
ccadee9b
LP
732 list_for_each_entry(chunk, &desc->chunks, node) {
733 hwdesc->sar = chunk->src_addr;
734 hwdesc->dar = chunk->dst_addr;
735 hwdesc->tcr = chunk->size >> desc->xfer_shift;
736 hwdesc++;
737 }
ee4b876b
JB
738
739 return 0;
ccadee9b
LP
740}
741
87244fe5
LP
742/* -----------------------------------------------------------------------------
743 * Stop and reset
744 */
a8d46a7f
KM
745static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan)
746{
747 u32 chcr;
748 unsigned int i;
749
750 /*
751 * Ensure that the setting of the DE bit is actually 0 after
752 * clearing it.
753 */
754 for (i = 0; i < 1024; i++) {
755 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
756 if (!(chcr & RCAR_DMACHCR_DE))
757 return;
758 udelay(1);
759 }
760
761 dev_err(chan->chan.device->dev, "CHCR DE check error\n");
762}
87244fe5 763
73a47bd0
KM
764static void rcar_dmac_sync_tcr(struct rcar_dmac_chan *chan)
765{
766 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
767
768 if (!(chcr & RCAR_DMACHCR_DE))
769 return;
770
771 /* set DE=0 and flush remaining data */
772 rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE));
773
774 /* make sure all remaining data was flushed */
775 rcar_dmac_chcr_de_barrier(chan);
776
777 /* back DE */
778 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
779}
87244fe5
LP
780
781static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
782{
783 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
784
ccadee9b
LP
785 chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
786 RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
87244fe5 787 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
a8d46a7f 788 rcar_dmac_chcr_de_barrier(chan);
87244fe5
LP
789}
790
791static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
792{
793 struct rcar_dmac_desc *desc, *_desc;
794 unsigned long flags;
795 LIST_HEAD(descs);
796
797 spin_lock_irqsave(&chan->lock, flags);
798
799 /* Move all non-free descriptors to the local lists. */
800 list_splice_init(&chan->desc.pending, &descs);
801 list_splice_init(&chan->desc.active, &descs);
802 list_splice_init(&chan->desc.done, &descs);
803 list_splice_init(&chan->desc.wait, &descs);
804
805 chan->desc.running = NULL;
806
807 spin_unlock_irqrestore(&chan->lock, flags);
808
809 list_for_each_entry_safe(desc, _desc, &descs, node) {
810 list_del(&desc->node);
811 rcar_dmac_desc_put(chan, desc);
812 }
813}
814
815static void rcar_dmac_stop(struct rcar_dmac *dmac)
816{
817 rcar_dmac_write(dmac, RCAR_DMAOR, 0);
818}
819
820static void rcar_dmac_abort(struct rcar_dmac *dmac)
821{
822 unsigned int i;
823
824 /* Stop all channels. */
825 for (i = 0; i < dmac->n_channels; ++i) {
826 struct rcar_dmac_chan *chan = &dmac->channels[i];
827
828 /* Stop and reinitialize the channel. */
829 spin_lock(&chan->lock);
830 rcar_dmac_chan_halt(chan);
831 spin_unlock(&chan->lock);
832
833 rcar_dmac_chan_reinit(chan);
834 }
835}
836
837/* -----------------------------------------------------------------------------
838 * Descriptors preparation
839 */
840
841static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
842 struct rcar_dmac_desc *desc)
843{
844 static const u32 chcr_ts[] = {
845 RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
846 RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
847 RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
848 RCAR_DMACHCR_TS_64B,
849 };
850
851 unsigned int xfer_size;
852 u32 chcr;
853
854 switch (desc->direction) {
855 case DMA_DEV_TO_MEM:
856 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
857 | RCAR_DMACHCR_RS_DMARS;
c5ed08e9 858 xfer_size = chan->src.xfer_size;
87244fe5
LP
859 break;
860
861 case DMA_MEM_TO_DEV:
862 chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
863 | RCAR_DMACHCR_RS_DMARS;
c5ed08e9 864 xfer_size = chan->dst.xfer_size;
87244fe5
LP
865 break;
866
867 case DMA_MEM_TO_MEM:
868 default:
869 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
870 | RCAR_DMACHCR_RS_AUTO;
871 xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
872 break;
873 }
874
875 desc->xfer_shift = ilog2(xfer_size);
876 desc->chcr = chcr | chcr_ts[desc->xfer_shift];
877}
878
879/*
880 * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
881 *
882 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
883 * converted to scatter-gather to guarantee consistent locking and a correct
884 * list manipulation. For slave DMA direction carries the usual meaning, and,
885 * logically, the SG list is RAM and the addr variable contains slave address,
886 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
887 * and the SG list contains only one element and points at the source buffer.
888 */
889static struct dma_async_tx_descriptor *
890rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
891 unsigned int sg_len, dma_addr_t dev_addr,
892 enum dma_transfer_direction dir, unsigned long dma_flags,
893 bool cyclic)
894{
895 struct rcar_dmac_xfer_chunk *chunk;
896 struct rcar_dmac_desc *desc;
897 struct scatterlist *sg;
ccadee9b 898 unsigned int nchunks = 0;
87244fe5
LP
899 unsigned int max_chunk_size;
900 unsigned int full_size = 0;
1175f83c 901 bool cross_boundary = false;
87244fe5 902 unsigned int i;
1175f83c
KM
903#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
904 u32 high_dev_addr;
905 u32 high_mem_addr;
906#endif
87244fe5
LP
907
908 desc = rcar_dmac_desc_get(chan);
909 if (!desc)
910 return NULL;
911
912 desc->async_tx.flags = dma_flags;
913 desc->async_tx.cookie = -EBUSY;
914
915 desc->cyclic = cyclic;
916 desc->direction = dir;
917
918 rcar_dmac_chan_configure_desc(chan, desc);
919
d716d9b7 920 max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
87244fe5
LP
921
922 /*
923 * Allocate and fill the transfer chunk descriptors. We own the only
924 * reference to the DMA descriptor, there's no need for locking.
925 */
926 for_each_sg(sgl, sg, sg_len, i) {
927 dma_addr_t mem_addr = sg_dma_address(sg);
928 unsigned int len = sg_dma_len(sg);
929
930 full_size += len;
931
1175f83c
KM
932#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
933 if (i == 0) {
934 high_dev_addr = dev_addr >> 32;
935 high_mem_addr = mem_addr >> 32;
936 }
937
938 if ((dev_addr >> 32 != high_dev_addr) ||
939 (mem_addr >> 32 != high_mem_addr))
940 cross_boundary = true;
941#endif
87244fe5
LP
942 while (len) {
943 unsigned int size = min(len, max_chunk_size);
944
945#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
946 /*
947 * Prevent individual transfers from crossing 4GB
948 * boundaries.
949 */
1175f83c 950 if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) {
87244fe5 951 size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
1175f83c
KM
952 cross_boundary = true;
953 }
954 if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) {
87244fe5 955 size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
1175f83c
KM
956 cross_boundary = true;
957 }
87244fe5
LP
958#endif
959
960 chunk = rcar_dmac_xfer_chunk_get(chan);
961 if (!chunk) {
962 rcar_dmac_desc_put(chan, desc);
963 return NULL;
964 }
965
966 if (dir == DMA_DEV_TO_MEM) {
967 chunk->src_addr = dev_addr;
968 chunk->dst_addr = mem_addr;
969 } else {
970 chunk->src_addr = mem_addr;
971 chunk->dst_addr = dev_addr;
972 }
973
974 chunk->size = size;
975
976 dev_dbg(chan->chan.device->dev,
977 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
978 chan->index, chunk, desc, i, sg, size, len,
979 &chunk->src_addr, &chunk->dst_addr);
980
981 mem_addr += size;
982 if (dir == DMA_MEM_TO_MEM)
983 dev_addr += size;
984
985 len -= size;
986
987 list_add_tail(&chunk->node, &desc->chunks);
ccadee9b 988 nchunks++;
87244fe5
LP
989 }
990 }
991
ccadee9b 992 desc->nchunks = nchunks;
87244fe5
LP
993 desc->size = full_size;
994
ccadee9b
LP
995 /*
996 * Use hardware descriptor lists if possible when more than one chunk
997 * needs to be transferred (otherwise they don't make much sense).
998 *
1175f83c
KM
999 * Source/Destination address should be located in same 4GiB region
1000 * in the 40bit address space when it uses Hardware descriptor,
1001 * and cross_boundary is checking it.
ccadee9b 1002 */
1175f83c 1003 desc->hwdescs.use = !cross_boundary && nchunks > 1;
ee4b876b
JB
1004 if (desc->hwdescs.use) {
1005 if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
1006 desc->hwdescs.use = false;
1007 }
ccadee9b 1008
87244fe5
LP
1009 return &desc->async_tx;
1010}
1011
1012/* -----------------------------------------------------------------------------
1013 * DMA engine operations
1014 */
1015
1016static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
1017{
1018 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1019 int ret;
1020
87244fe5
LP
1021 INIT_LIST_HEAD(&rchan->desc.chunks_free);
1022 INIT_LIST_HEAD(&rchan->desc.pages);
1023
1024 /* Preallocate descriptors. */
1025 ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
1026 if (ret < 0)
1027 return -ENOMEM;
1028
1029 ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
1030 if (ret < 0)
1031 return -ENOMEM;
1032
1033 return pm_runtime_get_sync(chan->device->dev);
1034}
1035
1036static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
1037{
1038 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1039 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
3139dc8d 1040 struct rcar_dmac_chan_map *map = &rchan->map;
87244fe5 1041 struct rcar_dmac_desc_page *page, *_page;
1ed1315f
LP
1042 struct rcar_dmac_desc *desc;
1043 LIST_HEAD(list);
87244fe5
LP
1044
1045 /* Protect against ISR */
1046 spin_lock_irq(&rchan->lock);
1047 rcar_dmac_chan_halt(rchan);
1048 spin_unlock_irq(&rchan->lock);
1049
a1ed64ef
NS
1050 /*
1051 * Now no new interrupts will occur, but one might already be
1052 * running. Wait for it to finish before freeing resources.
1053 */
1054 synchronize_irq(rchan->irq);
87244fe5
LP
1055
1056 if (rchan->mid_rid >= 0) {
1057 /* The caller is holding dma_list_mutex */
1058 clear_bit(rchan->mid_rid, dmac->modules);
1059 rchan->mid_rid = -EINVAL;
1060 }
1061
f7638c90
LP
1062 list_splice_init(&rchan->desc.free, &list);
1063 list_splice_init(&rchan->desc.pending, &list);
1064 list_splice_init(&rchan->desc.active, &list);
1065 list_splice_init(&rchan->desc.done, &list);
1066 list_splice_init(&rchan->desc.wait, &list);
1ed1315f 1067
48c73659
MHF
1068 rchan->desc.running = NULL;
1069
1ed1315f
LP
1070 list_for_each_entry(desc, &list, node)
1071 rcar_dmac_realloc_hwdesc(rchan, desc, 0);
1072
87244fe5
LP
1073 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
1074 list_del(&page->node);
1075 free_page((unsigned long)page);
1076 }
1077
3139dc8d
NS
1078 /* Remove slave mapping if present. */
1079 if (map->slave.xfer_size) {
1080 dma_unmap_resource(chan->device->dev, map->addr,
1081 map->slave.xfer_size, map->dir, 0);
1082 map->slave.xfer_size = 0;
1083 }
1084
87244fe5
LP
1085 pm_runtime_put(chan->device->dev);
1086}
1087
1088static struct dma_async_tx_descriptor *
1089rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
1090 dma_addr_t dma_src, size_t len, unsigned long flags)
1091{
1092 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1093 struct scatterlist sgl;
1094
1095 if (!len)
1096 return NULL;
1097
1098 sg_init_table(&sgl, 1);
1099 sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
1100 offset_in_page(dma_src));
1101 sg_dma_address(&sgl) = dma_src;
1102 sg_dma_len(&sgl) = len;
1103
1104 return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
1105 DMA_MEM_TO_MEM, flags, false);
1106}
1107
9f878603
NS
1108static int rcar_dmac_map_slave_addr(struct dma_chan *chan,
1109 enum dma_transfer_direction dir)
1110{
1111 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1112 struct rcar_dmac_chan_map *map = &rchan->map;
1113 phys_addr_t dev_addr;
1114 size_t dev_size;
1115 enum dma_data_direction dev_dir;
1116
1117 if (dir == DMA_DEV_TO_MEM) {
1118 dev_addr = rchan->src.slave_addr;
1119 dev_size = rchan->src.xfer_size;
1120 dev_dir = DMA_TO_DEVICE;
1121 } else {
1122 dev_addr = rchan->dst.slave_addr;
1123 dev_size = rchan->dst.xfer_size;
1124 dev_dir = DMA_FROM_DEVICE;
1125 }
1126
1127 /* Reuse current map if possible. */
1128 if (dev_addr == map->slave.slave_addr &&
1129 dev_size == map->slave.xfer_size &&
1130 dev_dir == map->dir)
1131 return 0;
1132
1133 /* Remove old mapping if present. */
1134 if (map->slave.xfer_size)
1135 dma_unmap_resource(chan->device->dev, map->addr,
1136 map->slave.xfer_size, map->dir, 0);
1137 map->slave.xfer_size = 0;
1138
1139 /* Create new slave address map. */
1140 map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size,
1141 dev_dir, 0);
1142
1143 if (dma_mapping_error(chan->device->dev, map->addr)) {
1144 dev_err(chan->device->dev,
1145 "chan%u: failed to map %zx@%pap", rchan->index,
1146 dev_size, &dev_addr);
1147 return -EIO;
1148 }
1149
1150 dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n",
1151 rchan->index, dev_size, &dev_addr, &map->addr,
1152 dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE");
1153
1154 map->slave.slave_addr = dev_addr;
1155 map->slave.xfer_size = dev_size;
1156 map->dir = dev_dir;
1157
1158 return 0;
1159}
1160
87244fe5
LP
1161static struct dma_async_tx_descriptor *
1162rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1163 unsigned int sg_len, enum dma_transfer_direction dir,
1164 unsigned long flags, void *context)
1165{
1166 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
87244fe5
LP
1167
1168 /* Someone calling slave DMA on a generic channel? */
1169 if (rchan->mid_rid < 0 || !sg_len) {
1170 dev_warn(chan->device->dev,
1171 "%s: bad parameter: len=%d, id=%d\n",
1172 __func__, sg_len, rchan->mid_rid);
1173 return NULL;
1174 }
1175
9f878603
NS
1176 if (rcar_dmac_map_slave_addr(chan, dir))
1177 return NULL;
1178
1179 return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
87244fe5
LP
1180 dir, flags, false);
1181}
1182
1183#define RCAR_DMAC_MAX_SG_LEN 32
1184
1185static struct dma_async_tx_descriptor *
1186rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
1187 size_t buf_len, size_t period_len,
1188 enum dma_transfer_direction dir, unsigned long flags)
1189{
1190 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1191 struct dma_async_tx_descriptor *desc;
1192 struct scatterlist *sgl;
87244fe5
LP
1193 unsigned int sg_len;
1194 unsigned int i;
1195
1196 /* Someone calling slave DMA on a generic channel? */
1197 if (rchan->mid_rid < 0 || buf_len < period_len) {
1198 dev_warn(chan->device->dev,
1199 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
1200 __func__, buf_len, period_len, rchan->mid_rid);
1201 return NULL;
1202 }
1203
9f878603
NS
1204 if (rcar_dmac_map_slave_addr(chan, dir))
1205 return NULL;
1206
87244fe5
LP
1207 sg_len = buf_len / period_len;
1208 if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
1209 dev_err(chan->device->dev,
1210 "chan%u: sg length %d exceds limit %d",
1211 rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
1212 return NULL;
1213 }
1214
1215 /*
1216 * Allocate the sg list dynamically as it would consume too much stack
1217 * space.
1218 */
1219 sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
1220 if (!sgl)
1221 return NULL;
1222
1223 sg_init_table(sgl, sg_len);
1224
1225 for (i = 0; i < sg_len; ++i) {
1226 dma_addr_t src = buf_addr + (period_len * i);
1227
1228 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
1229 offset_in_page(src));
1230 sg_dma_address(&sgl[i]) = src;
1231 sg_dma_len(&sgl[i]) = period_len;
1232 }
1233
9f878603 1234 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
87244fe5
LP
1235 dir, flags, true);
1236
1237 kfree(sgl);
1238 return desc;
1239}
1240
1241static int rcar_dmac_device_config(struct dma_chan *chan,
1242 struct dma_slave_config *cfg)
1243{
1244 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1245
1246 /*
1247 * We could lock this, but you shouldn't be configuring the
1248 * channel, while using it...
1249 */
c5ed08e9
NS
1250 rchan->src.slave_addr = cfg->src_addr;
1251 rchan->dst.slave_addr = cfg->dst_addr;
1252 rchan->src.xfer_size = cfg->src_addr_width;
1253 rchan->dst.xfer_size = cfg->dst_addr_width;
87244fe5
LP
1254
1255 return 0;
1256}
1257
1258static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
1259{
1260 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1261 unsigned long flags;
1262
1263 spin_lock_irqsave(&rchan->lock, flags);
1264 rcar_dmac_chan_halt(rchan);
1265 spin_unlock_irqrestore(&rchan->lock, flags);
1266
1267 /*
1268 * FIXME: No new interrupt can occur now, but the IRQ thread might still
1269 * be running.
1270 */
1271
1272 rcar_dmac_chan_reinit(rchan);
1273
1274 return 0;
1275}
1276
1277static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1278 dma_cookie_t cookie)
1279{
1280 struct rcar_dmac_desc *desc = chan->desc.running;
ccadee9b 1281 struct rcar_dmac_xfer_chunk *running = NULL;
87244fe5 1282 struct rcar_dmac_xfer_chunk *chunk;
55bd582b 1283 enum dma_status status;
87244fe5 1284 unsigned int residue = 0;
ccadee9b 1285 unsigned int dptr = 0;
87244fe5
LP
1286
1287 if (!desc)
1288 return 0;
1289
55bd582b
LP
1290 /*
1291 * If the cookie corresponds to a descriptor that has been completed
1292 * there is no residue. The same check has already been performed by the
1293 * caller but without holding the channel lock, so the descriptor could
1294 * now be complete.
1295 */
1296 status = dma_cookie_status(&chan->chan, cookie, NULL);
1297 if (status == DMA_COMPLETE)
1298 return 0;
1299
87244fe5
LP
1300 /*
1301 * If the cookie doesn't correspond to the currently running transfer
1302 * then the descriptor hasn't been processed yet, and the residue is
1303 * equal to the full descriptor size.
1304 */
55bd582b
LP
1305 if (cookie != desc->async_tx.cookie) {
1306 list_for_each_entry(desc, &chan->desc.pending, node) {
1307 if (cookie == desc->async_tx.cookie)
1308 return desc->size;
1309 }
1310 list_for_each_entry(desc, &chan->desc.active, node) {
1311 if (cookie == desc->async_tx.cookie)
1312 return desc->size;
1313 }
1314
1315 /*
1316 * No descriptor found for the cookie, there's thus no residue.
1317 * This shouldn't happen if the calling driver passes a correct
1318 * cookie value.
1319 */
1320 WARN(1, "No descriptor for cookie!");
1321 return 0;
1322 }
87244fe5 1323
ccadee9b
LP
1324 /*
1325 * In descriptor mode the descriptor running pointer is not maintained
1326 * by the interrupt handler, find the running descriptor from the
1327 * descriptor pointer field in the CHCRB register. In non-descriptor
1328 * mode just use the running descriptor pointer.
1329 */
1ed1315f 1330 if (desc->hwdescs.use) {
ccadee9b
LP
1331 dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1332 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
56b17705
KM
1333 if (dptr == 0)
1334 dptr = desc->nchunks;
1335 dptr--;
ccadee9b
LP
1336 WARN_ON(dptr >= desc->nchunks);
1337 } else {
1338 running = desc->running;
1339 }
1340
87244fe5
LP
1341 /* Compute the size of all chunks still to be transferred. */
1342 list_for_each_entry_reverse(chunk, &desc->chunks, node) {
ccadee9b 1343 if (chunk == running || ++dptr == desc->nchunks)
87244fe5
LP
1344 break;
1345
1346 residue += chunk->size;
1347 }
1348
73a47bd0
KM
1349 if (desc->direction == DMA_DEV_TO_MEM)
1350 rcar_dmac_sync_tcr(chan);
1351
87244fe5 1352 /* Add the residue for the current chunk. */
73a47bd0 1353 residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
87244fe5
LP
1354
1355 return residue;
1356}
1357
1358static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
1359 dma_cookie_t cookie,
1360 struct dma_tx_state *txstate)
1361{
1362 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1363 enum dma_status status;
1364 unsigned long flags;
1365 unsigned int residue;
1366
1367 status = dma_cookie_status(chan, cookie, txstate);
1368 if (status == DMA_COMPLETE || !txstate)
1369 return status;
1370
1371 spin_lock_irqsave(&rchan->lock, flags);
1372 residue = rcar_dmac_chan_get_residue(rchan, cookie);
1373 spin_unlock_irqrestore(&rchan->lock, flags);
1374
3544d287
MHF
1375 /* if there's no residue, the cookie is complete */
1376 if (!residue)
1377 return DMA_COMPLETE;
1378
87244fe5
LP
1379 dma_set_residue(txstate, residue);
1380
1381 return status;
1382}
1383
1384static void rcar_dmac_issue_pending(struct dma_chan *chan)
1385{
1386 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1387 unsigned long flags;
1388
1389 spin_lock_irqsave(&rchan->lock, flags);
1390
1391 if (list_empty(&rchan->desc.pending))
1392 goto done;
1393
1394 /* Append the pending list to the active list. */
1395 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
1396
1397 /*
1398 * If no transfer is running pick the first descriptor from the active
1399 * list and start the transfer.
1400 */
1401 if (!rchan->desc.running) {
1402 struct rcar_dmac_desc *desc;
1403
1404 desc = list_first_entry(&rchan->desc.active,
1405 struct rcar_dmac_desc, node);
1406 rchan->desc.running = desc;
1407
1408 rcar_dmac_chan_start_xfer(rchan);
1409 }
1410
1411done:
1412 spin_unlock_irqrestore(&rchan->lock, flags);
1413}
1414
30c45005
NS
1415static void rcar_dmac_device_synchronize(struct dma_chan *chan)
1416{
1417 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1418
1419 synchronize_irq(rchan->irq);
1420}
1421
87244fe5
LP
1422/* -----------------------------------------------------------------------------
1423 * IRQ handling
1424 */
1425
ccadee9b
LP
1426static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan)
1427{
1428 struct rcar_dmac_desc *desc = chan->desc.running;
1429 unsigned int stage;
1430
1431 if (WARN_ON(!desc || !desc->cyclic)) {
1432 /*
1433 * This should never happen, there should always be a running
1434 * cyclic descriptor when a descriptor stage end interrupt is
1435 * triggered. Warn and return.
1436 */
1437 return IRQ_NONE;
1438 }
1439
1440 /* Program the interrupt pointer to the next stage. */
1441 stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1442 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1443 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage));
1444
1445 return IRQ_WAKE_THREAD;
1446}
1447
87244fe5
LP
1448static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
1449{
1450 struct rcar_dmac_desc *desc = chan->desc.running;
87244fe5
LP
1451 irqreturn_t ret = IRQ_WAKE_THREAD;
1452
1453 if (WARN_ON_ONCE(!desc)) {
1454 /*
ccadee9b
LP
1455 * This should never happen, there should always be a running
1456 * descriptor when a transfer end interrupt is triggered. Warn
1457 * and return.
87244fe5
LP
1458 */
1459 return IRQ_NONE;
1460 }
1461
1462 /*
ccadee9b
LP
1463 * The transfer end interrupt isn't generated for each chunk when using
1464 * descriptor mode. Only update the running chunk pointer in
1465 * non-descriptor mode.
87244fe5 1466 */
1ed1315f 1467 if (!desc->hwdescs.use) {
ccadee9b
LP
1468 /*
1469 * If we haven't completed the last transfer chunk simply move
1470 * to the next one. Only wake the IRQ thread if the transfer is
1471 * cyclic.
1472 */
1473 if (!list_is_last(&desc->running->node, &desc->chunks)) {
1474 desc->running = list_next_entry(desc->running, node);
1475 if (!desc->cyclic)
1476 ret = IRQ_HANDLED;
1477 goto done;
1478 }
87244fe5 1479
ccadee9b
LP
1480 /*
1481 * We've completed the last transfer chunk. If the transfer is
1482 * cyclic, move back to the first one.
1483 */
1484 if (desc->cyclic) {
1485 desc->running =
1486 list_first_entry(&desc->chunks,
87244fe5
LP
1487 struct rcar_dmac_xfer_chunk,
1488 node);
ccadee9b
LP
1489 goto done;
1490 }
87244fe5
LP
1491 }
1492
1493 /* The descriptor is complete, move it to the done list. */
1494 list_move_tail(&desc->node, &chan->desc.done);
1495
1496 /* Queue the next descriptor, if any. */
1497 if (!list_empty(&chan->desc.active))
1498 chan->desc.running = list_first_entry(&chan->desc.active,
1499 struct rcar_dmac_desc,
1500 node);
1501 else
1502 chan->desc.running = NULL;
1503
1504done:
1505 if (chan->desc.running)
1506 rcar_dmac_chan_start_xfer(chan);
1507
1508 return ret;
1509}
1510
1511static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
1512{
ccadee9b 1513 u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
87244fe5
LP
1514 struct rcar_dmac_chan *chan = dev;
1515 irqreturn_t ret = IRQ_NONE;
1516 u32 chcr;
1517
1518 spin_lock(&chan->lock);
1519
1520 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
ccadee9b
LP
1521 if (chcr & RCAR_DMACHCR_TE)
1522 mask |= RCAR_DMACHCR_DE;
1523 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
a8d46a7f
KM
1524 if (mask & RCAR_DMACHCR_DE)
1525 rcar_dmac_chcr_de_barrier(chan);
ccadee9b
LP
1526
1527 if (chcr & RCAR_DMACHCR_DSE)
1528 ret |= rcar_dmac_isr_desc_stage_end(chan);
87244fe5
LP
1529
1530 if (chcr & RCAR_DMACHCR_TE)
1531 ret |= rcar_dmac_isr_transfer_end(chan);
1532
1533 spin_unlock(&chan->lock);
1534
1535 return ret;
1536}
1537
1538static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
1539{
1540 struct rcar_dmac_chan *chan = dev;
1541 struct rcar_dmac_desc *desc;
964b2fd8 1542 struct dmaengine_desc_callback cb;
87244fe5
LP
1543
1544 spin_lock_irq(&chan->lock);
1545
1546 /* For cyclic transfers notify the user after every chunk. */
1547 if (chan->desc.running && chan->desc.running->cyclic) {
87244fe5 1548 desc = chan->desc.running;
964b2fd8 1549 dmaengine_desc_get_callback(&desc->async_tx, &cb);
87244fe5 1550
964b2fd8 1551 if (dmaengine_desc_callback_valid(&cb)) {
87244fe5 1552 spin_unlock_irq(&chan->lock);
964b2fd8 1553 dmaengine_desc_callback_invoke(&cb, NULL);
87244fe5
LP
1554 spin_lock_irq(&chan->lock);
1555 }
1556 }
1557
1558 /*
1559 * Call the callback function for all descriptors on the done list and
1560 * move them to the ack wait list.
1561 */
1562 while (!list_empty(&chan->desc.done)) {
1563 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
1564 node);
1565 dma_cookie_complete(&desc->async_tx);
1566 list_del(&desc->node);
1567
964b2fd8
DJ
1568 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1569 if (dmaengine_desc_callback_valid(&cb)) {
87244fe5
LP
1570 spin_unlock_irq(&chan->lock);
1571 /*
1572 * We own the only reference to this descriptor, we can
1573 * safely dereference it without holding the channel
1574 * lock.
1575 */
964b2fd8 1576 dmaengine_desc_callback_invoke(&cb, NULL);
87244fe5
LP
1577 spin_lock_irq(&chan->lock);
1578 }
1579
1580 list_add_tail(&desc->node, &chan->desc.wait);
1581 }
1582
ccadee9b
LP
1583 spin_unlock_irq(&chan->lock);
1584
87244fe5
LP
1585 /* Recycle all acked descriptors. */
1586 rcar_dmac_desc_recycle_acked(chan);
1587
87244fe5
LP
1588 return IRQ_HANDLED;
1589}
1590
1591static irqreturn_t rcar_dmac_isr_error(int irq, void *data)
1592{
1593 struct rcar_dmac *dmac = data;
1594
1595 if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE))
1596 return IRQ_NONE;
1597
1598 /*
1599 * An unrecoverable error occurred on an unknown channel. Halt the DMAC,
1600 * abort transfers on all channels, and reinitialize the DMAC.
1601 */
1602 rcar_dmac_stop(dmac);
1603 rcar_dmac_abort(dmac);
1604 rcar_dmac_init(dmac);
1605
1606 return IRQ_HANDLED;
1607}
1608
1609/* -----------------------------------------------------------------------------
1610 * OF xlate and channel filter
1611 */
1612
1613static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
1614{
1615 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
1616 struct of_phandle_args *dma_spec = arg;
1617
1618 /*
1619 * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate
1620 * function knows from which device it wants to allocate a channel from,
1621 * and would be perfectly capable of selecting the channel it wants.
1622 * Forcing it to call dma_request_channel() and iterate through all
1623 * channels from all controllers is just pointless.
1624 */
1625 if (chan->device->device_config != rcar_dmac_device_config ||
1626 dma_spec->np != chan->device->dev->of_node)
1627 return false;
1628
1629 return !test_and_set_bit(dma_spec->args[0], dmac->modules);
1630}
1631
1632static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
1633 struct of_dma *ofdma)
1634{
1635 struct rcar_dmac_chan *rchan;
1636 struct dma_chan *chan;
1637 dma_cap_mask_t mask;
1638
1639 if (dma_spec->args_count != 1)
1640 return NULL;
1641
1642 /* Only slave DMA channels can be allocated via DT */
1643 dma_cap_zero(mask);
1644 dma_cap_set(DMA_SLAVE, mask);
1645
1646 chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec);
1647 if (!chan)
1648 return NULL;
1649
1650 rchan = to_rcar_dmac_chan(chan);
1651 rchan->mid_rid = dma_spec->args[0];
1652
1653 return chan;
1654}
1655
1656/* -----------------------------------------------------------------------------
1657 * Power management
1658 */
1659
87244fe5
LP
1660#ifdef CONFIG_PM
1661static int rcar_dmac_runtime_suspend(struct device *dev)
1662{
1663 return 0;
1664}
1665
1666static int rcar_dmac_runtime_resume(struct device *dev)
1667{
1668 struct rcar_dmac *dmac = dev_get_drvdata(dev);
1669
1670 return rcar_dmac_init(dmac);
1671}
1672#endif
1673
1674static const struct dev_pm_ops rcar_dmac_pm = {
1131b0a4
GU
1675 /*
1676 * TODO for system sleep/resume:
1677 * - Wait for the current transfer to complete and stop the device,
1678 * - Resume transfers, if any.
1679 */
1680 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1681 pm_runtime_force_resume)
87244fe5
LP
1682 SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
1683 NULL)
1684};
1685
1686/* -----------------------------------------------------------------------------
1687 * Probe and remove
1688 */
1689
1690static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
1691 struct rcar_dmac_chan *rchan,
1692 unsigned int index)
1693{
1694 struct platform_device *pdev = to_platform_device(dmac->dev);
1695 struct dma_chan *chan = &rchan->chan;
1696 char pdev_irqname[5];
1697 char *irqname;
87244fe5
LP
1698 int ret;
1699
1700 rchan->index = index;
1701 rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index);
1702 rchan->mid_rid = -EINVAL;
1703
1704 spin_lock_init(&rchan->lock);
1705
f7638c90
LP
1706 INIT_LIST_HEAD(&rchan->desc.free);
1707 INIT_LIST_HEAD(&rchan->desc.pending);
1708 INIT_LIST_HEAD(&rchan->desc.active);
1709 INIT_LIST_HEAD(&rchan->desc.done);
1710 INIT_LIST_HEAD(&rchan->desc.wait);
1711
87244fe5
LP
1712 /* Request the channel interrupt. */
1713 sprintf(pdev_irqname, "ch%u", index);
427d5ecd
NS
1714 rchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
1715 if (rchan->irq < 0) {
87244fe5
LP
1716 dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
1717 return -ENODEV;
1718 }
1719
1720 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
1721 dev_name(dmac->dev), index);
1722 if (!irqname)
1723 return -ENOMEM;
1724
5e857047
KM
1725 /*
1726 * Initialize the DMA engine channel and add it to the DMA engine
1727 * channels list.
1728 */
1729 chan->device = &dmac->engine;
1730 dma_cookie_init(chan);
1731
1732 list_add_tail(&chan->device_node, &dmac->engine.channels);
1733
427d5ecd
NS
1734 ret = devm_request_threaded_irq(dmac->dev, rchan->irq,
1735 rcar_dmac_isr_channel,
87244fe5
LP
1736 rcar_dmac_isr_channel_thread, 0,
1737 irqname, rchan);
1738 if (ret) {
427d5ecd
NS
1739 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
1740 rchan->irq, ret);
87244fe5
LP
1741 return ret;
1742 }
1743
87244fe5
LP
1744 return 0;
1745}
1746
1747static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1748{
1749 struct device_node *np = dev->of_node;
1750 int ret;
1751
1752 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
1753 if (ret < 0) {
1754 dev_err(dev, "unable to read dma-channels property\n");
1755 return ret;
1756 }
1757
1758 if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
1759 dev_err(dev, "invalid number of channels %u\n",
1760 dmac->n_channels);
1761 return -EINVAL;
1762 }
1763
1764 return 0;
1765}
1766
1767static int rcar_dmac_probe(struct platform_device *pdev)
1768{
1769 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
1770 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
1771 DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
1772 DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
be6893e1 1773 unsigned int channels_offset = 0;
87244fe5
LP
1774 struct dma_device *engine;
1775 struct rcar_dmac *dmac;
1776 struct resource *mem;
1777 unsigned int i;
1778 char *irqname;
1779 int irq;
1780 int ret;
1781
1782 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
1783 if (!dmac)
1784 return -ENOMEM;
1785
1786 dmac->dev = &pdev->dev;
1787 platform_set_drvdata(pdev, dmac);
dc312349 1788 dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
87244fe5
LP
1789
1790 ret = rcar_dmac_parse_of(&pdev->dev, dmac);
1791 if (ret < 0)
1792 return ret;
1793
be6893e1
LP
1794 /*
1795 * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be
1796 * flushed correctly, resulting in memory corruption. DMAC 0 channel 0
1797 * is connected to microTLB 0 on currently supported platforms, so we
1798 * can't use it with the IPMMU. As the IOMMU API operates at the device
1799 * level we can't disable it selectively, so ignore channel 0 for now if
1800 * the device is part of an IOMMU group.
1801 */
1802 if (pdev->dev.iommu_group) {
1803 dmac->n_channels--;
1804 channels_offset = 1;
1805 }
1806
87244fe5
LP
1807 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
1808 sizeof(*dmac->channels), GFP_KERNEL);
1809 if (!dmac->channels)
1810 return -ENOMEM;
1811
1812 /* Request resources. */
1813 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1814 dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
1815 if (IS_ERR(dmac->iomem))
1816 return PTR_ERR(dmac->iomem);
1817
1818 irq = platform_get_irq_byname(pdev, "error");
1819 if (irq < 0) {
1820 dev_err(&pdev->dev, "no error IRQ specified\n");
1821 return -ENODEV;
1822 }
1823
1824 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error",
1825 dev_name(dmac->dev));
1826 if (!irqname)
1827 return -ENOMEM;
1828
87244fe5
LP
1829 /* Enable runtime PM and initialize the device. */
1830 pm_runtime_enable(&pdev->dev);
1831 ret = pm_runtime_get_sync(&pdev->dev);
1832 if (ret < 0) {
1833 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
1834 return ret;
1835 }
1836
1837 ret = rcar_dmac_init(dmac);
1838 pm_runtime_put(&pdev->dev);
1839
1840 if (ret) {
1841 dev_err(&pdev->dev, "failed to reset device\n");
1842 goto error;
1843 }
1844
5e857047
KM
1845 /* Initialize engine */
1846 engine = &dmac->engine;
1847
1848 dma_cap_set(DMA_MEMCPY, engine->cap_mask);
1849 dma_cap_set(DMA_SLAVE, engine->cap_mask);
1850
1851 engine->dev = &pdev->dev;
1852 engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
1853
1854 engine->src_addr_widths = widths;
1855 engine->dst_addr_widths = widths;
1856 engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1857 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1858
1859 engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
1860 engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
1861 engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
1862 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
1863 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
1864 engine->device_config = rcar_dmac_device_config;
1865 engine->device_terminate_all = rcar_dmac_chan_terminate_all;
1866 engine->device_tx_status = rcar_dmac_tx_status;
1867 engine->device_issue_pending = rcar_dmac_issue_pending;
1868 engine->device_synchronize = rcar_dmac_device_synchronize;
1869
1870 INIT_LIST_HEAD(&engine->channels);
87244fe5
LP
1871
1872 for (i = 0; i < dmac->n_channels; ++i) {
be6893e1
LP
1873 ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
1874 i + channels_offset);
87244fe5
LP
1875 if (ret < 0)
1876 goto error;
1877 }
1878
5e857047
KM
1879 ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
1880 irqname, dmac);
1881 if (ret) {
1882 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
1883 irq, ret);
1884 return ret;
1885 }
1886
87244fe5
LP
1887 /* Register the DMAC as a DMA provider for DT. */
1888 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
1889 NULL);
1890 if (ret < 0)
1891 goto error;
1892
1893 /*
1894 * Register the DMA engine device.
1895 *
1896 * Default transfer size of 32 bytes requires 32-byte alignment.
1897 */
87244fe5
LP
1898 ret = dma_async_device_register(engine);
1899 if (ret < 0)
1900 goto error;
1901
1902 return 0;
1903
1904error:
1905 of_dma_controller_free(pdev->dev.of_node);
1906 pm_runtime_disable(&pdev->dev);
1907 return ret;
1908}
1909
1910static int rcar_dmac_remove(struct platform_device *pdev)
1911{
1912 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1913
1914 of_dma_controller_free(pdev->dev.of_node);
1915 dma_async_device_unregister(&dmac->engine);
1916
1917 pm_runtime_disable(&pdev->dev);
1918
1919 return 0;
1920}
1921
1922static void rcar_dmac_shutdown(struct platform_device *pdev)
1923{
1924 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1925
1926 rcar_dmac_stop(dmac);
1927}
1928
1929static const struct of_device_id rcar_dmac_of_ids[] = {
1930 { .compatible = "renesas,rcar-dmac", },
1931 { /* Sentinel */ }
1932};
1933MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
1934
1935static struct platform_driver rcar_dmac_driver = {
1936 .driver = {
1937 .pm = &rcar_dmac_pm,
1938 .name = "rcar-dmac",
1939 .of_match_table = rcar_dmac_of_ids,
1940 },
1941 .probe = rcar_dmac_probe,
1942 .remove = rcar_dmac_remove,
1943 .shutdown = rcar_dmac_shutdown,
1944};
1945
1946module_platform_driver(rcar_dmac_driver);
1947
1948MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
1949MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1950MODULE_LICENSE("GPL v2");