Merge branch 'linus' into x86/urgent, to refresh the tree
[linux-2.6-block.git] / drivers / dma / sh / rcar-dmac.c
CommitLineData
b9b0a74a 1// SPDX-License-Identifier: GPL-2.0
87244fe5
LP
2/*
3 * Renesas R-Car Gen2 DMA Controller Driver
4 *
5 * Copyright (C) 2014 Renesas Electronics Inc.
6 *
7 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
87244fe5
LP
8 */
9
a8d46a7f 10#include <linux/delay.h>
ccadee9b 11#include <linux/dma-mapping.h>
87244fe5
LP
12#include <linux/dmaengine.h>
13#include <linux/interrupt.h>
14#include <linux/list.h>
15#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/of.h>
18#include <linux/of_dma.h>
19#include <linux/of_platform.h>
20#include <linux/platform_device.h>
21#include <linux/pm_runtime.h>
22#include <linux/slab.h>
23#include <linux/spinlock.h>
24
25#include "../dmaengine.h"
26
27/*
28 * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
29 * @node: entry in the parent's chunks list
30 * @src_addr: device source address
31 * @dst_addr: device destination address
32 * @size: transfer size in bytes
33 */
34struct rcar_dmac_xfer_chunk {
35 struct list_head node;
36
37 dma_addr_t src_addr;
38 dma_addr_t dst_addr;
39 u32 size;
40};
41
ccadee9b
LP
42/*
43 * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
44 * @sar: value of the SAR register (source address)
45 * @dar: value of the DAR register (destination address)
46 * @tcr: value of the TCR register (transfer count)
47 */
48struct rcar_dmac_hw_desc {
49 u32 sar;
50 u32 dar;
51 u32 tcr;
52 u32 reserved;
53} __attribute__((__packed__));
54
87244fe5
LP
55/*
56 * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
57 * @async_tx: base DMA asynchronous transaction descriptor
58 * @direction: direction of the DMA transfer
59 * @xfer_shift: log2 of the transfer size
60 * @chcr: value of the channel configuration register for this transfer
61 * @node: entry in the channel's descriptors lists
62 * @chunks: list of transfer chunks for this transfer
63 * @running: the transfer chunk being currently processed
ccadee9b 64 * @nchunks: number of transfer chunks for this transfer
1ed1315f 65 * @hwdescs.use: whether the transfer descriptor uses hardware descriptors
ccadee9b
LP
66 * @hwdescs.mem: hardware descriptors memory for the transfer
67 * @hwdescs.dma: device address of the hardware descriptors memory
68 * @hwdescs.size: size of the hardware descriptors in bytes
87244fe5
LP
69 * @size: transfer size in bytes
70 * @cyclic: when set indicates that the DMA transfer is cyclic
71 */
72struct rcar_dmac_desc {
73 struct dma_async_tx_descriptor async_tx;
74 enum dma_transfer_direction direction;
75 unsigned int xfer_shift;
76 u32 chcr;
77
78 struct list_head node;
79 struct list_head chunks;
80 struct rcar_dmac_xfer_chunk *running;
ccadee9b
LP
81 unsigned int nchunks;
82
83 struct {
1ed1315f 84 bool use;
ccadee9b
LP
85 struct rcar_dmac_hw_desc *mem;
86 dma_addr_t dma;
87 size_t size;
88 } hwdescs;
87244fe5
LP
89
90 unsigned int size;
91 bool cyclic;
92};
93
94#define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
95
96/*
97 * struct rcar_dmac_desc_page - One page worth of descriptors
98 * @node: entry in the channel's pages list
99 * @descs: array of DMA descriptors
100 * @chunks: array of transfer chunk descriptors
101 */
102struct rcar_dmac_desc_page {
103 struct list_head node;
104
105 union {
106 struct rcar_dmac_desc descs[0];
107 struct rcar_dmac_xfer_chunk chunks[0];
108 };
109};
110
111#define RCAR_DMAC_DESCS_PER_PAGE \
112 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
113 sizeof(struct rcar_dmac_desc))
114#define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
115 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
116 sizeof(struct rcar_dmac_xfer_chunk))
117
c5ed08e9
NS
118/*
119 * struct rcar_dmac_chan_slave - Slave configuration
120 * @slave_addr: slave memory address
121 * @xfer_size: size (in bytes) of hardware transfers
122 */
123struct rcar_dmac_chan_slave {
124 phys_addr_t slave_addr;
125 unsigned int xfer_size;
126};
127
9f878603
NS
128/*
129 * struct rcar_dmac_chan_map - Map of slave device phys to dma address
130 * @addr: slave dma address
131 * @dir: direction of mapping
132 * @slave: slave configuration that is mapped
133 */
134struct rcar_dmac_chan_map {
135 dma_addr_t addr;
136 enum dma_data_direction dir;
137 struct rcar_dmac_chan_slave slave;
138};
139
87244fe5
LP
140/*
141 * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
142 * @chan: base DMA channel object
143 * @iomem: channel I/O memory base
144 * @index: index of this channel in the controller
427d5ecd 145 * @irq: channel IRQ
c5ed08e9
NS
146 * @src: slave memory address and size on the source side
147 * @dst: slave memory address and size on the destination side
87244fe5
LP
148 * @mid_rid: hardware MID/RID for the DMA client using this channel
149 * @lock: protects the channel CHCR register and the desc members
150 * @desc.free: list of free descriptors
151 * @desc.pending: list of pending descriptors (submitted with tx_submit)
152 * @desc.active: list of active descriptors (activated with issue_pending)
153 * @desc.done: list of completed descriptors
154 * @desc.wait: list of descriptors waiting for an ack
155 * @desc.running: the descriptor being processed (a member of the active list)
156 * @desc.chunks_free: list of free transfer chunk descriptors
157 * @desc.pages: list of pages used by allocated descriptors
158 */
159struct rcar_dmac_chan {
160 struct dma_chan chan;
161 void __iomem *iomem;
162 unsigned int index;
427d5ecd 163 int irq;
87244fe5 164
c5ed08e9
NS
165 struct rcar_dmac_chan_slave src;
166 struct rcar_dmac_chan_slave dst;
9f878603 167 struct rcar_dmac_chan_map map;
87244fe5
LP
168 int mid_rid;
169
170 spinlock_t lock;
171
172 struct {
173 struct list_head free;
174 struct list_head pending;
175 struct list_head active;
176 struct list_head done;
177 struct list_head wait;
178 struct rcar_dmac_desc *running;
179
180 struct list_head chunks_free;
181
182 struct list_head pages;
183 } desc;
184};
185
186#define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
187
188/*
189 * struct rcar_dmac - R-Car Gen2 DMA Controller
190 * @engine: base DMA engine object
191 * @dev: the hardware device
192 * @iomem: remapped I/O memory base
193 * @n_channels: number of available channels
194 * @channels: array of DMAC channels
195 * @modules: bitmask of client modules in use
196 */
197struct rcar_dmac {
198 struct dma_device engine;
199 struct device *dev;
200 void __iomem *iomem;
97d49c59 201 struct device_dma_parameters parms;
87244fe5
LP
202
203 unsigned int n_channels;
204 struct rcar_dmac_chan *channels;
205
08acf38e 206 DECLARE_BITMAP(modules, 256);
87244fe5
LP
207};
208
209#define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
210
211/* -----------------------------------------------------------------------------
212 * Registers
213 */
214
215#define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i))
216
217#define RCAR_DMAISTA 0x0020
218#define RCAR_DMASEC 0x0030
219#define RCAR_DMAOR 0x0060
220#define RCAR_DMAOR_PRI_FIXED (0 << 8)
221#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
222#define RCAR_DMAOR_AE (1 << 2)
223#define RCAR_DMAOR_DME (1 << 0)
224#define RCAR_DMACHCLR 0x0080
225#define RCAR_DMADPSEC 0x00a0
226
227#define RCAR_DMASAR 0x0000
228#define RCAR_DMADAR 0x0004
229#define RCAR_DMATCR 0x0008
230#define RCAR_DMATCR_MASK 0x00ffffff
231#define RCAR_DMATSR 0x0028
232#define RCAR_DMACHCR 0x000c
233#define RCAR_DMACHCR_CAE (1 << 31)
234#define RCAR_DMACHCR_CAIE (1 << 30)
235#define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
236#define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
237#define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
238#define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
239#define RCAR_DMACHCR_RPT_SAR (1 << 27)
240#define RCAR_DMACHCR_RPT_DAR (1 << 26)
241#define RCAR_DMACHCR_RPT_TCR (1 << 25)
242#define RCAR_DMACHCR_DPB (1 << 22)
243#define RCAR_DMACHCR_DSE (1 << 19)
244#define RCAR_DMACHCR_DSIE (1 << 18)
245#define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
246#define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
247#define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
248#define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
249#define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
250#define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
251#define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
252#define RCAR_DMACHCR_DM_FIXED (0 << 14)
253#define RCAR_DMACHCR_DM_INC (1 << 14)
254#define RCAR_DMACHCR_DM_DEC (2 << 14)
255#define RCAR_DMACHCR_SM_FIXED (0 << 12)
256#define RCAR_DMACHCR_SM_INC (1 << 12)
257#define RCAR_DMACHCR_SM_DEC (2 << 12)
258#define RCAR_DMACHCR_RS_AUTO (4 << 8)
259#define RCAR_DMACHCR_RS_DMARS (8 << 8)
260#define RCAR_DMACHCR_IE (1 << 2)
261#define RCAR_DMACHCR_TE (1 << 1)
262#define RCAR_DMACHCR_DE (1 << 0)
263#define RCAR_DMATCRB 0x0018
264#define RCAR_DMATSRB 0x0038
265#define RCAR_DMACHCRB 0x001c
266#define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
ccadee9b
LP
267#define RCAR_DMACHCRB_DPTR_MASK (0xff << 16)
268#define RCAR_DMACHCRB_DPTR_SHIFT 16
87244fe5
LP
269#define RCAR_DMACHCRB_DRST (1 << 15)
270#define RCAR_DMACHCRB_DTS (1 << 8)
271#define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
272#define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
273#define RCAR_DMACHCRB_PRI(n) ((n) << 0)
274#define RCAR_DMARS 0x0040
275#define RCAR_DMABUFCR 0x0048
276#define RCAR_DMABUFCR_MBU(n) ((n) << 16)
277#define RCAR_DMABUFCR_ULB(n) ((n) << 0)
278#define RCAR_DMADPBASE 0x0050
279#define RCAR_DMADPBASE_MASK 0xfffffff0
280#define RCAR_DMADPBASE_SEL (1 << 0)
281#define RCAR_DMADPCR 0x0054
282#define RCAR_DMADPCR_DIPT(n) ((n) << 24)
283#define RCAR_DMAFIXSAR 0x0010
284#define RCAR_DMAFIXDAR 0x0014
285#define RCAR_DMAFIXDPBASE 0x0060
286
287/* Hardcode the MEMCPY transfer size to 4 bytes. */
288#define RCAR_DMAC_MEMCPY_XFER_SIZE 4
289
290/* -----------------------------------------------------------------------------
291 * Device access
292 */
293
294static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
295{
296 if (reg == RCAR_DMAOR)
297 writew(data, dmac->iomem + reg);
298 else
299 writel(data, dmac->iomem + reg);
300}
301
302static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
303{
304 if (reg == RCAR_DMAOR)
305 return readw(dmac->iomem + reg);
306 else
307 return readl(dmac->iomem + reg);
308}
309
310static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
311{
312 if (reg == RCAR_DMARS)
313 return readw(chan->iomem + reg);
314 else
315 return readl(chan->iomem + reg);
316}
317
318static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
319{
320 if (reg == RCAR_DMARS)
321 writew(data, chan->iomem + reg);
322 else
323 writel(data, chan->iomem + reg);
324}
325
326/* -----------------------------------------------------------------------------
327 * Initialization and configuration
328 */
329
330static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
331{
332 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
333
0f78e3b5 334 return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE));
87244fe5
LP
335}
336
337static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
338{
339 struct rcar_dmac_desc *desc = chan->desc.running;
ccadee9b 340 u32 chcr = desc->chcr;
87244fe5
LP
341
342 WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
343
ccadee9b
LP
344 if (chan->mid_rid >= 0)
345 rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
346
1ed1315f 347 if (desc->hwdescs.use) {
1175f83c
KM
348 struct rcar_dmac_xfer_chunk *chunk =
349 list_first_entry(&desc->chunks,
350 struct rcar_dmac_xfer_chunk, node);
3f463061 351
ccadee9b
LP
352 dev_dbg(chan->chan.device->dev,
353 "chan%u: queue desc %p: %u@%pad\n",
354 chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
355
87244fe5 356#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1175f83c
KM
357 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
358 chunk->src_addr >> 32);
359 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
360 chunk->dst_addr >> 32);
ccadee9b
LP
361 rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
362 desc->hwdescs.dma >> 32);
87244fe5 363#endif
ccadee9b
LP
364 rcar_dmac_chan_write(chan, RCAR_DMADPBASE,
365 (desc->hwdescs.dma & 0xfffffff0) |
366 RCAR_DMADPBASE_SEL);
367 rcar_dmac_chan_write(chan, RCAR_DMACHCRB,
368 RCAR_DMACHCRB_DCNT(desc->nchunks - 1) |
369 RCAR_DMACHCRB_DRST);
87244fe5 370
3f463061
LP
371 /*
372 * Errata: When descriptor memory is accessed through an IOMMU
373 * the DMADAR register isn't initialized automatically from the
374 * first descriptor at beginning of transfer by the DMAC like it
375 * should. Initialize it manually with the destination address
376 * of the first chunk.
377 */
3f463061
LP
378 rcar_dmac_chan_write(chan, RCAR_DMADAR,
379 chunk->dst_addr & 0xffffffff);
380
ccadee9b
LP
381 /*
382 * Program the descriptor stage interrupt to occur after the end
383 * of the first stage.
384 */
385 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1));
386
387 chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR
388 | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB;
389
390 /*
391 * If the descriptor isn't cyclic enable normal descriptor mode
392 * and the transfer completion interrupt.
393 */
394 if (!desc->cyclic)
395 chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE;
396 /*
397 * If the descriptor is cyclic and has a callback enable the
398 * descriptor stage interrupt in infinite repeat mode.
399 */
400 else if (desc->async_tx.callback)
401 chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE;
402 /*
403 * Otherwise just select infinite repeat mode without any
404 * interrupt.
405 */
406 else
407 chcr |= RCAR_DMACHCR_DPM_INFINITE;
408 } else {
409 struct rcar_dmac_xfer_chunk *chunk = desc->running;
87244fe5 410
ccadee9b
LP
411 dev_dbg(chan->chan.device->dev,
412 "chan%u: queue chunk %p: %u@%pad -> %pad\n",
413 chan->index, chunk, chunk->size, &chunk->src_addr,
414 &chunk->dst_addr);
87244fe5 415
ccadee9b
LP
416#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
417 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
418 chunk->src_addr >> 32);
419 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
420 chunk->dst_addr >> 32);
421#endif
422 rcar_dmac_chan_write(chan, RCAR_DMASAR,
423 chunk->src_addr & 0xffffffff);
424 rcar_dmac_chan_write(chan, RCAR_DMADAR,
425 chunk->dst_addr & 0xffffffff);
426 rcar_dmac_chan_write(chan, RCAR_DMATCR,
427 chunk->size >> desc->xfer_shift);
428
429 chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
430 }
431
9203dbec
KM
432 rcar_dmac_chan_write(chan, RCAR_DMACHCR,
433 chcr | RCAR_DMACHCR_DE | RCAR_DMACHCR_CAIE);
87244fe5
LP
434}
435
436static int rcar_dmac_init(struct rcar_dmac *dmac)
437{
438 u16 dmaor;
439
440 /* Clear all channels and enable the DMAC globally. */
20c169ac 441 rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0));
87244fe5
LP
442 rcar_dmac_write(dmac, RCAR_DMAOR,
443 RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
444
445 dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
446 if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
447 dev_warn(dmac->dev, "DMAOR initialization failed.\n");
448 return -EIO;
449 }
450
451 return 0;
452}
453
454/* -----------------------------------------------------------------------------
455 * Descriptors submission
456 */
457
458static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
459{
460 struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
461 struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
462 unsigned long flags;
463 dma_cookie_t cookie;
464
465 spin_lock_irqsave(&chan->lock, flags);
466
467 cookie = dma_cookie_assign(tx);
468
469 dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
470 chan->index, tx->cookie, desc);
471
472 list_add_tail(&desc->node, &chan->desc.pending);
473 desc->running = list_first_entry(&desc->chunks,
474 struct rcar_dmac_xfer_chunk, node);
475
476 spin_unlock_irqrestore(&chan->lock, flags);
477
478 return cookie;
479}
480
481/* -----------------------------------------------------------------------------
482 * Descriptors allocation and free
483 */
484
485/*
486 * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
487 * @chan: the DMA channel
488 * @gfp: allocation flags
489 */
490static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
491{
492 struct rcar_dmac_desc_page *page;
d23c9a0a 493 unsigned long flags;
87244fe5
LP
494 LIST_HEAD(list);
495 unsigned int i;
496
497 page = (void *)get_zeroed_page(gfp);
498 if (!page)
499 return -ENOMEM;
500
501 for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
502 struct rcar_dmac_desc *desc = &page->descs[i];
503
504 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
505 desc->async_tx.tx_submit = rcar_dmac_tx_submit;
506 INIT_LIST_HEAD(&desc->chunks);
507
508 list_add_tail(&desc->node, &list);
509 }
510
d23c9a0a 511 spin_lock_irqsave(&chan->lock, flags);
87244fe5
LP
512 list_splice_tail(&list, &chan->desc.free);
513 list_add_tail(&page->node, &chan->desc.pages);
d23c9a0a 514 spin_unlock_irqrestore(&chan->lock, flags);
87244fe5
LP
515
516 return 0;
517}
518
519/*
520 * rcar_dmac_desc_put - Release a DMA transfer descriptor
521 * @chan: the DMA channel
522 * @desc: the descriptor
523 *
524 * Put the descriptor and its transfer chunk descriptors back in the channel's
1ed1315f
LP
525 * free descriptors lists. The descriptor's chunks list will be reinitialized to
526 * an empty list as a result.
87244fe5 527 *
ccadee9b
LP
528 * The descriptor must have been removed from the channel's lists before calling
529 * this function.
87244fe5
LP
530 */
531static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
532 struct rcar_dmac_desc *desc)
533{
f3915072
LP
534 unsigned long flags;
535
536 spin_lock_irqsave(&chan->lock, flags);
87244fe5 537 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
3565fe53 538 list_add(&desc->node, &chan->desc.free);
f3915072 539 spin_unlock_irqrestore(&chan->lock, flags);
87244fe5
LP
540}
541
542static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
543{
544 struct rcar_dmac_desc *desc, *_desc;
d23c9a0a 545 unsigned long flags;
ccadee9b 546 LIST_HEAD(list);
87244fe5 547
ccadee9b
LP
548 /*
549 * We have to temporarily move all descriptors from the wait list to a
550 * local list as iterating over the wait list, even with
551 * list_for_each_entry_safe, isn't safe if we release the channel lock
552 * around the rcar_dmac_desc_put() call.
553 */
d23c9a0a 554 spin_lock_irqsave(&chan->lock, flags);
ccadee9b 555 list_splice_init(&chan->desc.wait, &list);
d23c9a0a 556 spin_unlock_irqrestore(&chan->lock, flags);
ccadee9b
LP
557
558 list_for_each_entry_safe(desc, _desc, &list, node) {
87244fe5
LP
559 if (async_tx_test_ack(&desc->async_tx)) {
560 list_del(&desc->node);
561 rcar_dmac_desc_put(chan, desc);
562 }
563 }
ccadee9b
LP
564
565 if (list_empty(&list))
566 return;
567
568 /* Put the remaining descriptors back in the wait list. */
d23c9a0a 569 spin_lock_irqsave(&chan->lock, flags);
ccadee9b 570 list_splice(&list, &chan->desc.wait);
d23c9a0a 571 spin_unlock_irqrestore(&chan->lock, flags);
87244fe5
LP
572}
573
574/*
575 * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
576 * @chan: the DMA channel
577 *
578 * Locking: This function must be called in a non-atomic context.
579 *
580 * Return: A pointer to the allocated descriptor or NULL if no descriptor can
581 * be allocated.
582 */
583static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
584{
585 struct rcar_dmac_desc *desc;
d23c9a0a 586 unsigned long flags;
87244fe5
LP
587 int ret;
588
87244fe5
LP
589 /* Recycle acked descriptors before attempting allocation. */
590 rcar_dmac_desc_recycle_acked(chan);
591
d23c9a0a 592 spin_lock_irqsave(&chan->lock, flags);
ccadee9b 593
a55e07c8
LP
594 while (list_empty(&chan->desc.free)) {
595 /*
596 * No free descriptors, allocate a page worth of them and try
597 * again, as someone else could race us to get the newly
598 * allocated descriptors. If the allocation fails return an
599 * error.
600 */
d23c9a0a 601 spin_unlock_irqrestore(&chan->lock, flags);
a55e07c8
LP
602 ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
603 if (ret < 0)
604 return NULL;
d23c9a0a 605 spin_lock_irqsave(&chan->lock, flags);
a55e07c8 606 }
87244fe5 607
a55e07c8
LP
608 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
609 list_del(&desc->node);
87244fe5 610
d23c9a0a 611 spin_unlock_irqrestore(&chan->lock, flags);
87244fe5
LP
612
613 return desc;
614}
615
616/*
617 * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
618 * @chan: the DMA channel
619 * @gfp: allocation flags
620 */
621static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
622{
623 struct rcar_dmac_desc_page *page;
d23c9a0a 624 unsigned long flags;
87244fe5
LP
625 LIST_HEAD(list);
626 unsigned int i;
627
628 page = (void *)get_zeroed_page(gfp);
629 if (!page)
630 return -ENOMEM;
631
632 for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
633 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
634
635 list_add_tail(&chunk->node, &list);
636 }
637
d23c9a0a 638 spin_lock_irqsave(&chan->lock, flags);
87244fe5
LP
639 list_splice_tail(&list, &chan->desc.chunks_free);
640 list_add_tail(&page->node, &chan->desc.pages);
d23c9a0a 641 spin_unlock_irqrestore(&chan->lock, flags);
87244fe5
LP
642
643 return 0;
644}
645
646/*
647 * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
648 * @chan: the DMA channel
649 *
650 * Locking: This function must be called in a non-atomic context.
651 *
652 * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
653 * descriptor can be allocated.
654 */
655static struct rcar_dmac_xfer_chunk *
656rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
657{
658 struct rcar_dmac_xfer_chunk *chunk;
d23c9a0a 659 unsigned long flags;
87244fe5
LP
660 int ret;
661
d23c9a0a 662 spin_lock_irqsave(&chan->lock, flags);
87244fe5 663
a55e07c8
LP
664 while (list_empty(&chan->desc.chunks_free)) {
665 /*
666 * No free descriptors, allocate a page worth of them and try
667 * again, as someone else could race us to get the newly
668 * allocated descriptors. If the allocation fails return an
669 * error.
670 */
d23c9a0a 671 spin_unlock_irqrestore(&chan->lock, flags);
a55e07c8
LP
672 ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
673 if (ret < 0)
674 return NULL;
d23c9a0a 675 spin_lock_irqsave(&chan->lock, flags);
a55e07c8 676 }
87244fe5 677
a55e07c8
LP
678 chunk = list_first_entry(&chan->desc.chunks_free,
679 struct rcar_dmac_xfer_chunk, node);
680 list_del(&chunk->node);
87244fe5 681
d23c9a0a 682 spin_unlock_irqrestore(&chan->lock, flags);
87244fe5
LP
683
684 return chunk;
685}
686
1ed1315f
LP
687static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan,
688 struct rcar_dmac_desc *desc, size_t size)
689{
690 /*
691 * dma_alloc_coherent() allocates memory in page size increments. To
692 * avoid reallocating the hardware descriptors when the allocated size
693 * wouldn't change align the requested size to a multiple of the page
694 * size.
695 */
696 size = PAGE_ALIGN(size);
697
698 if (desc->hwdescs.size == size)
699 return;
700
701 if (desc->hwdescs.mem) {
6a634808
LP
702 dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size,
703 desc->hwdescs.mem, desc->hwdescs.dma);
1ed1315f
LP
704 desc->hwdescs.mem = NULL;
705 desc->hwdescs.size = 0;
706 }
707
708 if (!size)
709 return;
710
6a634808
LP
711 desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size,
712 &desc->hwdescs.dma, GFP_NOWAIT);
1ed1315f
LP
713 if (!desc->hwdescs.mem)
714 return;
715
716 desc->hwdescs.size = size;
717}
718
ee4b876b
JB
719static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
720 struct rcar_dmac_desc *desc)
ccadee9b
LP
721{
722 struct rcar_dmac_xfer_chunk *chunk;
723 struct rcar_dmac_hw_desc *hwdesc;
ccadee9b 724
1ed1315f
LP
725 rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc));
726
727 hwdesc = desc->hwdescs.mem;
ccadee9b 728 if (!hwdesc)
ee4b876b 729 return -ENOMEM;
ccadee9b 730
ccadee9b
LP
731 list_for_each_entry(chunk, &desc->chunks, node) {
732 hwdesc->sar = chunk->src_addr;
733 hwdesc->dar = chunk->dst_addr;
734 hwdesc->tcr = chunk->size >> desc->xfer_shift;
735 hwdesc++;
736 }
ee4b876b
JB
737
738 return 0;
ccadee9b
LP
739}
740
87244fe5
LP
741/* -----------------------------------------------------------------------------
742 * Stop and reset
743 */
a8d46a7f
KM
744static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan)
745{
746 u32 chcr;
747 unsigned int i;
748
749 /*
750 * Ensure that the setting of the DE bit is actually 0 after
751 * clearing it.
752 */
753 for (i = 0; i < 1024; i++) {
754 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
755 if (!(chcr & RCAR_DMACHCR_DE))
756 return;
757 udelay(1);
758 }
759
760 dev_err(chan->chan.device->dev, "CHCR DE check error\n");
761}
87244fe5 762
4de1247a 763static void rcar_dmac_clear_chcr_de(struct rcar_dmac_chan *chan)
73a47bd0
KM
764{
765 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
766
73a47bd0
KM
767 /* set DE=0 and flush remaining data */
768 rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE));
769
770 /* make sure all remaining data was flushed */
771 rcar_dmac_chcr_de_barrier(chan);
4de1247a
YS
772}
773
87244fe5
LP
774static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
775{
776 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
777
ccadee9b 778 chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
9203dbec
KM
779 RCAR_DMACHCR_TE | RCAR_DMACHCR_DE |
780 RCAR_DMACHCR_CAE | RCAR_DMACHCR_CAIE);
87244fe5 781 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
a8d46a7f 782 rcar_dmac_chcr_de_barrier(chan);
87244fe5
LP
783}
784
785static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
786{
787 struct rcar_dmac_desc *desc, *_desc;
788 unsigned long flags;
789 LIST_HEAD(descs);
790
791 spin_lock_irqsave(&chan->lock, flags);
792
793 /* Move all non-free descriptors to the local lists. */
794 list_splice_init(&chan->desc.pending, &descs);
795 list_splice_init(&chan->desc.active, &descs);
796 list_splice_init(&chan->desc.done, &descs);
797 list_splice_init(&chan->desc.wait, &descs);
798
799 chan->desc.running = NULL;
800
801 spin_unlock_irqrestore(&chan->lock, flags);
802
803 list_for_each_entry_safe(desc, _desc, &descs, node) {
804 list_del(&desc->node);
805 rcar_dmac_desc_put(chan, desc);
806 }
807}
808
9203dbec 809static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
87244fe5
LP
810{
811 unsigned int i;
812
813 /* Stop all channels. */
814 for (i = 0; i < dmac->n_channels; ++i) {
815 struct rcar_dmac_chan *chan = &dmac->channels[i];
816
817 /* Stop and reinitialize the channel. */
45c9a603 818 spin_lock_irq(&chan->lock);
87244fe5 819 rcar_dmac_chan_halt(chan);
45c9a603 820 spin_unlock_irq(&chan->lock);
87244fe5
LP
821 }
822}
823
8115ce74
YS
824static int rcar_dmac_chan_pause(struct dma_chan *chan)
825{
826 unsigned long flags;
827 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
828
829 spin_lock_irqsave(&rchan->lock, flags);
830 rcar_dmac_clear_chcr_de(rchan);
831 spin_unlock_irqrestore(&rchan->lock, flags);
832
833 return 0;
834}
9203dbec 835
87244fe5
LP
836/* -----------------------------------------------------------------------------
837 * Descriptors preparation
838 */
839
840static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
841 struct rcar_dmac_desc *desc)
842{
843 static const u32 chcr_ts[] = {
844 RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
845 RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
846 RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
847 RCAR_DMACHCR_TS_64B,
848 };
849
850 unsigned int xfer_size;
851 u32 chcr;
852
853 switch (desc->direction) {
854 case DMA_DEV_TO_MEM:
855 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
856 | RCAR_DMACHCR_RS_DMARS;
c5ed08e9 857 xfer_size = chan->src.xfer_size;
87244fe5
LP
858 break;
859
860 case DMA_MEM_TO_DEV:
861 chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
862 | RCAR_DMACHCR_RS_DMARS;
c5ed08e9 863 xfer_size = chan->dst.xfer_size;
87244fe5
LP
864 break;
865
866 case DMA_MEM_TO_MEM:
867 default:
868 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
869 | RCAR_DMACHCR_RS_AUTO;
870 xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
871 break;
872 }
873
874 desc->xfer_shift = ilog2(xfer_size);
875 desc->chcr = chcr | chcr_ts[desc->xfer_shift];
876}
877
878/*
879 * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
880 *
881 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
882 * converted to scatter-gather to guarantee consistent locking and a correct
883 * list manipulation. For slave DMA direction carries the usual meaning, and,
884 * logically, the SG list is RAM and the addr variable contains slave address,
885 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
886 * and the SG list contains only one element and points at the source buffer.
887 */
888static struct dma_async_tx_descriptor *
889rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
890 unsigned int sg_len, dma_addr_t dev_addr,
891 enum dma_transfer_direction dir, unsigned long dma_flags,
892 bool cyclic)
893{
894 struct rcar_dmac_xfer_chunk *chunk;
895 struct rcar_dmac_desc *desc;
896 struct scatterlist *sg;
ccadee9b 897 unsigned int nchunks = 0;
87244fe5
LP
898 unsigned int max_chunk_size;
899 unsigned int full_size = 0;
1175f83c 900 bool cross_boundary = false;
87244fe5 901 unsigned int i;
1175f83c
KM
902#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
903 u32 high_dev_addr;
904 u32 high_mem_addr;
905#endif
87244fe5
LP
906
907 desc = rcar_dmac_desc_get(chan);
908 if (!desc)
909 return NULL;
910
911 desc->async_tx.flags = dma_flags;
912 desc->async_tx.cookie = -EBUSY;
913
914 desc->cyclic = cyclic;
915 desc->direction = dir;
916
917 rcar_dmac_chan_configure_desc(chan, desc);
918
d716d9b7 919 max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
87244fe5
LP
920
921 /*
922 * Allocate and fill the transfer chunk descriptors. We own the only
923 * reference to the DMA descriptor, there's no need for locking.
924 */
925 for_each_sg(sgl, sg, sg_len, i) {
926 dma_addr_t mem_addr = sg_dma_address(sg);
927 unsigned int len = sg_dma_len(sg);
928
929 full_size += len;
930
1175f83c
KM
931#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
932 if (i == 0) {
933 high_dev_addr = dev_addr >> 32;
934 high_mem_addr = mem_addr >> 32;
935 }
936
937 if ((dev_addr >> 32 != high_dev_addr) ||
938 (mem_addr >> 32 != high_mem_addr))
939 cross_boundary = true;
940#endif
87244fe5
LP
941 while (len) {
942 unsigned int size = min(len, max_chunk_size);
943
944#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
945 /*
946 * Prevent individual transfers from crossing 4GB
947 * boundaries.
948 */
1175f83c 949 if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) {
87244fe5 950 size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
1175f83c
KM
951 cross_boundary = true;
952 }
953 if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) {
87244fe5 954 size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
1175f83c
KM
955 cross_boundary = true;
956 }
87244fe5
LP
957#endif
958
959 chunk = rcar_dmac_xfer_chunk_get(chan);
960 if (!chunk) {
961 rcar_dmac_desc_put(chan, desc);
962 return NULL;
963 }
964
965 if (dir == DMA_DEV_TO_MEM) {
966 chunk->src_addr = dev_addr;
967 chunk->dst_addr = mem_addr;
968 } else {
969 chunk->src_addr = mem_addr;
970 chunk->dst_addr = dev_addr;
971 }
972
973 chunk->size = size;
974
975 dev_dbg(chan->chan.device->dev,
976 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
977 chan->index, chunk, desc, i, sg, size, len,
978 &chunk->src_addr, &chunk->dst_addr);
979
980 mem_addr += size;
981 if (dir == DMA_MEM_TO_MEM)
982 dev_addr += size;
983
984 len -= size;
985
986 list_add_tail(&chunk->node, &desc->chunks);
ccadee9b 987 nchunks++;
87244fe5
LP
988 }
989 }
990
ccadee9b 991 desc->nchunks = nchunks;
87244fe5
LP
992 desc->size = full_size;
993
ccadee9b
LP
994 /*
995 * Use hardware descriptor lists if possible when more than one chunk
996 * needs to be transferred (otherwise they don't make much sense).
997 *
1175f83c
KM
998 * Source/Destination address should be located in same 4GiB region
999 * in the 40bit address space when it uses Hardware descriptor,
1000 * and cross_boundary is checking it.
ccadee9b 1001 */
1175f83c 1002 desc->hwdescs.use = !cross_boundary && nchunks > 1;
ee4b876b
JB
1003 if (desc->hwdescs.use) {
1004 if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
1005 desc->hwdescs.use = false;
1006 }
ccadee9b 1007
87244fe5
LP
1008 return &desc->async_tx;
1009}
1010
1011/* -----------------------------------------------------------------------------
1012 * DMA engine operations
1013 */
1014
1015static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
1016{
1017 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1018 int ret;
1019
87244fe5
LP
1020 INIT_LIST_HEAD(&rchan->desc.chunks_free);
1021 INIT_LIST_HEAD(&rchan->desc.pages);
1022
1023 /* Preallocate descriptors. */
1024 ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
1025 if (ret < 0)
1026 return -ENOMEM;
1027
1028 ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
1029 if (ret < 0)
1030 return -ENOMEM;
1031
1032 return pm_runtime_get_sync(chan->device->dev);
1033}
1034
1035static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
1036{
1037 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1038 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
3139dc8d 1039 struct rcar_dmac_chan_map *map = &rchan->map;
87244fe5 1040 struct rcar_dmac_desc_page *page, *_page;
1ed1315f
LP
1041 struct rcar_dmac_desc *desc;
1042 LIST_HEAD(list);
87244fe5
LP
1043
1044 /* Protect against ISR */
1045 spin_lock_irq(&rchan->lock);
1046 rcar_dmac_chan_halt(rchan);
1047 spin_unlock_irq(&rchan->lock);
1048
a1ed64ef
NS
1049 /*
1050 * Now no new interrupts will occur, but one might already be
1051 * running. Wait for it to finish before freeing resources.
1052 */
1053 synchronize_irq(rchan->irq);
87244fe5
LP
1054
1055 if (rchan->mid_rid >= 0) {
1056 /* The caller is holding dma_list_mutex */
1057 clear_bit(rchan->mid_rid, dmac->modules);
1058 rchan->mid_rid = -EINVAL;
1059 }
1060
f7638c90
LP
1061 list_splice_init(&rchan->desc.free, &list);
1062 list_splice_init(&rchan->desc.pending, &list);
1063 list_splice_init(&rchan->desc.active, &list);
1064 list_splice_init(&rchan->desc.done, &list);
1065 list_splice_init(&rchan->desc.wait, &list);
1ed1315f 1066
48c73659
MHF
1067 rchan->desc.running = NULL;
1068
1ed1315f
LP
1069 list_for_each_entry(desc, &list, node)
1070 rcar_dmac_realloc_hwdesc(rchan, desc, 0);
1071
87244fe5
LP
1072 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
1073 list_del(&page->node);
1074 free_page((unsigned long)page);
1075 }
1076
3139dc8d
NS
1077 /* Remove slave mapping if present. */
1078 if (map->slave.xfer_size) {
1079 dma_unmap_resource(chan->device->dev, map->addr,
1080 map->slave.xfer_size, map->dir, 0);
1081 map->slave.xfer_size = 0;
1082 }
1083
87244fe5
LP
1084 pm_runtime_put(chan->device->dev);
1085}
1086
1087static struct dma_async_tx_descriptor *
1088rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
1089 dma_addr_t dma_src, size_t len, unsigned long flags)
1090{
1091 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1092 struct scatterlist sgl;
1093
1094 if (!len)
1095 return NULL;
1096
1097 sg_init_table(&sgl, 1);
1098 sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
1099 offset_in_page(dma_src));
1100 sg_dma_address(&sgl) = dma_src;
1101 sg_dma_len(&sgl) = len;
1102
1103 return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
1104 DMA_MEM_TO_MEM, flags, false);
1105}
1106
9f878603
NS
1107static int rcar_dmac_map_slave_addr(struct dma_chan *chan,
1108 enum dma_transfer_direction dir)
1109{
1110 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1111 struct rcar_dmac_chan_map *map = &rchan->map;
1112 phys_addr_t dev_addr;
1113 size_t dev_size;
1114 enum dma_data_direction dev_dir;
1115
1116 if (dir == DMA_DEV_TO_MEM) {
1117 dev_addr = rchan->src.slave_addr;
1118 dev_size = rchan->src.xfer_size;
1119 dev_dir = DMA_TO_DEVICE;
1120 } else {
1121 dev_addr = rchan->dst.slave_addr;
1122 dev_size = rchan->dst.xfer_size;
1123 dev_dir = DMA_FROM_DEVICE;
1124 }
1125
1126 /* Reuse current map if possible. */
1127 if (dev_addr == map->slave.slave_addr &&
1128 dev_size == map->slave.xfer_size &&
1129 dev_dir == map->dir)
1130 return 0;
1131
1132 /* Remove old mapping if present. */
1133 if (map->slave.xfer_size)
1134 dma_unmap_resource(chan->device->dev, map->addr,
1135 map->slave.xfer_size, map->dir, 0);
1136 map->slave.xfer_size = 0;
1137
1138 /* Create new slave address map. */
1139 map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size,
1140 dev_dir, 0);
1141
1142 if (dma_mapping_error(chan->device->dev, map->addr)) {
1143 dev_err(chan->device->dev,
1144 "chan%u: failed to map %zx@%pap", rchan->index,
1145 dev_size, &dev_addr);
1146 return -EIO;
1147 }
1148
1149 dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n",
1150 rchan->index, dev_size, &dev_addr, &map->addr,
1151 dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE");
1152
1153 map->slave.slave_addr = dev_addr;
1154 map->slave.xfer_size = dev_size;
1155 map->dir = dev_dir;
1156
1157 return 0;
1158}
1159
87244fe5
LP
1160static struct dma_async_tx_descriptor *
1161rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1162 unsigned int sg_len, enum dma_transfer_direction dir,
1163 unsigned long flags, void *context)
1164{
1165 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
87244fe5
LP
1166
1167 /* Someone calling slave DMA on a generic channel? */
1168 if (rchan->mid_rid < 0 || !sg_len) {
1169 dev_warn(chan->device->dev,
1170 "%s: bad parameter: len=%d, id=%d\n",
1171 __func__, sg_len, rchan->mid_rid);
1172 return NULL;
1173 }
1174
9f878603
NS
1175 if (rcar_dmac_map_slave_addr(chan, dir))
1176 return NULL;
1177
1178 return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
87244fe5
LP
1179 dir, flags, false);
1180}
1181
1182#define RCAR_DMAC_MAX_SG_LEN 32
1183
1184static struct dma_async_tx_descriptor *
1185rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
1186 size_t buf_len, size_t period_len,
1187 enum dma_transfer_direction dir, unsigned long flags)
1188{
1189 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1190 struct dma_async_tx_descriptor *desc;
1191 struct scatterlist *sgl;
87244fe5
LP
1192 unsigned int sg_len;
1193 unsigned int i;
1194
1195 /* Someone calling slave DMA on a generic channel? */
1196 if (rchan->mid_rid < 0 || buf_len < period_len) {
1197 dev_warn(chan->device->dev,
1198 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
1199 __func__, buf_len, period_len, rchan->mid_rid);
1200 return NULL;
1201 }
1202
9f878603
NS
1203 if (rcar_dmac_map_slave_addr(chan, dir))
1204 return NULL;
1205
87244fe5
LP
1206 sg_len = buf_len / period_len;
1207 if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
1208 dev_err(chan->device->dev,
1209 "chan%u: sg length %d exceds limit %d",
1210 rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
1211 return NULL;
1212 }
1213
1214 /*
1215 * Allocate the sg list dynamically as it would consume too much stack
1216 * space.
1217 */
1218 sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
1219 if (!sgl)
1220 return NULL;
1221
1222 sg_init_table(sgl, sg_len);
1223
1224 for (i = 0; i < sg_len; ++i) {
1225 dma_addr_t src = buf_addr + (period_len * i);
1226
1227 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
1228 offset_in_page(src));
1229 sg_dma_address(&sgl[i]) = src;
1230 sg_dma_len(&sgl[i]) = period_len;
1231 }
1232
9f878603 1233 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
87244fe5
LP
1234 dir, flags, true);
1235
1236 kfree(sgl);
1237 return desc;
1238}
1239
1240static int rcar_dmac_device_config(struct dma_chan *chan,
1241 struct dma_slave_config *cfg)
1242{
1243 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1244
1245 /*
1246 * We could lock this, but you shouldn't be configuring the
1247 * channel, while using it...
1248 */
c5ed08e9
NS
1249 rchan->src.slave_addr = cfg->src_addr;
1250 rchan->dst.slave_addr = cfg->dst_addr;
1251 rchan->src.xfer_size = cfg->src_addr_width;
1252 rchan->dst.xfer_size = cfg->dst_addr_width;
87244fe5
LP
1253
1254 return 0;
1255}
1256
1257static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
1258{
1259 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1260 unsigned long flags;
1261
1262 spin_lock_irqsave(&rchan->lock, flags);
1263 rcar_dmac_chan_halt(rchan);
1264 spin_unlock_irqrestore(&rchan->lock, flags);
1265
1266 /*
1267 * FIXME: No new interrupt can occur now, but the IRQ thread might still
1268 * be running.
1269 */
1270
1271 rcar_dmac_chan_reinit(rchan);
1272
1273 return 0;
1274}
1275
1276static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1277 dma_cookie_t cookie)
1278{
1279 struct rcar_dmac_desc *desc = chan->desc.running;
ccadee9b 1280 struct rcar_dmac_xfer_chunk *running = NULL;
87244fe5 1281 struct rcar_dmac_xfer_chunk *chunk;
55bd582b 1282 enum dma_status status;
87244fe5 1283 unsigned int residue = 0;
ccadee9b 1284 unsigned int dptr = 0;
87244fe5
LP
1285
1286 if (!desc)
1287 return 0;
1288
55bd582b
LP
1289 /*
1290 * If the cookie corresponds to a descriptor that has been completed
1291 * there is no residue. The same check has already been performed by the
1292 * caller but without holding the channel lock, so the descriptor could
1293 * now be complete.
1294 */
1295 status = dma_cookie_status(&chan->chan, cookie, NULL);
1296 if (status == DMA_COMPLETE)
1297 return 0;
1298
87244fe5
LP
1299 /*
1300 * If the cookie doesn't correspond to the currently running transfer
1301 * then the descriptor hasn't been processed yet, and the residue is
1302 * equal to the full descriptor size.
3e081628
YS
1303 * Also, a client driver is possible to call this function before
1304 * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running"
1305 * will be the next descriptor, and the done list will appear. So, if
1306 * the argument cookie matches the done list's cookie, we can assume
1307 * the residue is zero.
87244fe5 1308 */
55bd582b 1309 if (cookie != desc->async_tx.cookie) {
3e081628
YS
1310 list_for_each_entry(desc, &chan->desc.done, node) {
1311 if (cookie == desc->async_tx.cookie)
1312 return 0;
1313 }
55bd582b
LP
1314 list_for_each_entry(desc, &chan->desc.pending, node) {
1315 if (cookie == desc->async_tx.cookie)
1316 return desc->size;
1317 }
1318 list_for_each_entry(desc, &chan->desc.active, node) {
1319 if (cookie == desc->async_tx.cookie)
1320 return desc->size;
1321 }
1322
1323 /*
1324 * No descriptor found for the cookie, there's thus no residue.
1325 * This shouldn't happen if the calling driver passes a correct
1326 * cookie value.
1327 */
1328 WARN(1, "No descriptor for cookie!");
1329 return 0;
1330 }
87244fe5 1331
ccadee9b
LP
1332 /*
1333 * In descriptor mode the descriptor running pointer is not maintained
1334 * by the interrupt handler, find the running descriptor from the
1335 * descriptor pointer field in the CHCRB register. In non-descriptor
1336 * mode just use the running descriptor pointer.
1337 */
1ed1315f 1338 if (desc->hwdescs.use) {
ccadee9b
LP
1339 dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1340 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
56b17705
KM
1341 if (dptr == 0)
1342 dptr = desc->nchunks;
1343 dptr--;
ccadee9b
LP
1344 WARN_ON(dptr >= desc->nchunks);
1345 } else {
1346 running = desc->running;
1347 }
1348
87244fe5
LP
1349 /* Compute the size of all chunks still to be transferred. */
1350 list_for_each_entry_reverse(chunk, &desc->chunks, node) {
ccadee9b 1351 if (chunk == running || ++dptr == desc->nchunks)
87244fe5
LP
1352 break;
1353
1354 residue += chunk->size;
1355 }
1356
1357 /* Add the residue for the current chunk. */
73a47bd0 1358 residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
87244fe5
LP
1359
1360 return residue;
1361}
1362
1363static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
1364 dma_cookie_t cookie,
1365 struct dma_tx_state *txstate)
1366{
1367 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1368 enum dma_status status;
1369 unsigned long flags;
1370 unsigned int residue;
1371
1372 status = dma_cookie_status(chan, cookie, txstate);
1373 if (status == DMA_COMPLETE || !txstate)
1374 return status;
1375
1376 spin_lock_irqsave(&rchan->lock, flags);
1377 residue = rcar_dmac_chan_get_residue(rchan, cookie);
1378 spin_unlock_irqrestore(&rchan->lock, flags);
1379
3544d287
MHF
1380 /* if there's no residue, the cookie is complete */
1381 if (!residue)
1382 return DMA_COMPLETE;
1383
87244fe5
LP
1384 dma_set_residue(txstate, residue);
1385
1386 return status;
1387}
1388
1389static void rcar_dmac_issue_pending(struct dma_chan *chan)
1390{
1391 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1392 unsigned long flags;
1393
1394 spin_lock_irqsave(&rchan->lock, flags);
1395
1396 if (list_empty(&rchan->desc.pending))
1397 goto done;
1398
1399 /* Append the pending list to the active list. */
1400 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
1401
1402 /*
1403 * If no transfer is running pick the first descriptor from the active
1404 * list and start the transfer.
1405 */
1406 if (!rchan->desc.running) {
1407 struct rcar_dmac_desc *desc;
1408
1409 desc = list_first_entry(&rchan->desc.active,
1410 struct rcar_dmac_desc, node);
1411 rchan->desc.running = desc;
1412
1413 rcar_dmac_chan_start_xfer(rchan);
1414 }
1415
1416done:
1417 spin_unlock_irqrestore(&rchan->lock, flags);
1418}
1419
30c45005
NS
1420static void rcar_dmac_device_synchronize(struct dma_chan *chan)
1421{
1422 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1423
1424 synchronize_irq(rchan->irq);
1425}
1426
87244fe5
LP
1427/* -----------------------------------------------------------------------------
1428 * IRQ handling
1429 */
1430
ccadee9b
LP
1431static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan)
1432{
1433 struct rcar_dmac_desc *desc = chan->desc.running;
1434 unsigned int stage;
1435
1436 if (WARN_ON(!desc || !desc->cyclic)) {
1437 /*
1438 * This should never happen, there should always be a running
1439 * cyclic descriptor when a descriptor stage end interrupt is
1440 * triggered. Warn and return.
1441 */
1442 return IRQ_NONE;
1443 }
1444
1445 /* Program the interrupt pointer to the next stage. */
1446 stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1447 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1448 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage));
1449
1450 return IRQ_WAKE_THREAD;
1451}
1452
87244fe5
LP
1453static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
1454{
1455 struct rcar_dmac_desc *desc = chan->desc.running;
87244fe5
LP
1456 irqreturn_t ret = IRQ_WAKE_THREAD;
1457
1458 if (WARN_ON_ONCE(!desc)) {
1459 /*
ccadee9b
LP
1460 * This should never happen, there should always be a running
1461 * descriptor when a transfer end interrupt is triggered. Warn
1462 * and return.
87244fe5
LP
1463 */
1464 return IRQ_NONE;
1465 }
1466
1467 /*
ccadee9b
LP
1468 * The transfer end interrupt isn't generated for each chunk when using
1469 * descriptor mode. Only update the running chunk pointer in
1470 * non-descriptor mode.
87244fe5 1471 */
1ed1315f 1472 if (!desc->hwdescs.use) {
ccadee9b
LP
1473 /*
1474 * If we haven't completed the last transfer chunk simply move
1475 * to the next one. Only wake the IRQ thread if the transfer is
1476 * cyclic.
1477 */
1478 if (!list_is_last(&desc->running->node, &desc->chunks)) {
1479 desc->running = list_next_entry(desc->running, node);
1480 if (!desc->cyclic)
1481 ret = IRQ_HANDLED;
1482 goto done;
1483 }
87244fe5 1484
ccadee9b
LP
1485 /*
1486 * We've completed the last transfer chunk. If the transfer is
1487 * cyclic, move back to the first one.
1488 */
1489 if (desc->cyclic) {
1490 desc->running =
1491 list_first_entry(&desc->chunks,
87244fe5
LP
1492 struct rcar_dmac_xfer_chunk,
1493 node);
ccadee9b
LP
1494 goto done;
1495 }
87244fe5
LP
1496 }
1497
1498 /* The descriptor is complete, move it to the done list. */
1499 list_move_tail(&desc->node, &chan->desc.done);
1500
1501 /* Queue the next descriptor, if any. */
1502 if (!list_empty(&chan->desc.active))
1503 chan->desc.running = list_first_entry(&chan->desc.active,
1504 struct rcar_dmac_desc,
1505 node);
1506 else
1507 chan->desc.running = NULL;
1508
1509done:
1510 if (chan->desc.running)
1511 rcar_dmac_chan_start_xfer(chan);
1512
1513 return ret;
1514}
1515
1516static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
1517{
ccadee9b 1518 u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
87244fe5
LP
1519 struct rcar_dmac_chan *chan = dev;
1520 irqreturn_t ret = IRQ_NONE;
9203dbec 1521 bool reinit = false;
87244fe5
LP
1522 u32 chcr;
1523
1524 spin_lock(&chan->lock);
1525
1526 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
9203dbec 1527 if (chcr & RCAR_DMACHCR_CAE) {
e919417b
KM
1528 struct rcar_dmac *dmac = to_rcar_dmac(chan->chan.device);
1529
1530 /*
1531 * We don't need to call rcar_dmac_chan_halt()
1532 * because channel is already stopped in error case.
1533 * We need to clear register and check DE bit as recovery.
1534 */
1535 rcar_dmac_write(dmac, RCAR_DMACHCLR, 1 << chan->index);
1536 rcar_dmac_chcr_de_barrier(chan);
9203dbec
KM
1537 reinit = true;
1538 goto spin_lock_end;
1539 }
1540
ccadee9b
LP
1541 if (chcr & RCAR_DMACHCR_TE)
1542 mask |= RCAR_DMACHCR_DE;
1543 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
a8d46a7f
KM
1544 if (mask & RCAR_DMACHCR_DE)
1545 rcar_dmac_chcr_de_barrier(chan);
ccadee9b
LP
1546
1547 if (chcr & RCAR_DMACHCR_DSE)
1548 ret |= rcar_dmac_isr_desc_stage_end(chan);
87244fe5
LP
1549
1550 if (chcr & RCAR_DMACHCR_TE)
1551 ret |= rcar_dmac_isr_transfer_end(chan);
1552
9203dbec 1553spin_lock_end:
87244fe5
LP
1554 spin_unlock(&chan->lock);
1555
9203dbec
KM
1556 if (reinit) {
1557 dev_err(chan->chan.device->dev, "Channel Address Error\n");
1558
1559 rcar_dmac_chan_reinit(chan);
1560 ret = IRQ_HANDLED;
1561 }
1562
87244fe5
LP
1563 return ret;
1564}
1565
1566static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
1567{
1568 struct rcar_dmac_chan *chan = dev;
1569 struct rcar_dmac_desc *desc;
964b2fd8 1570 struct dmaengine_desc_callback cb;
87244fe5
LP
1571
1572 spin_lock_irq(&chan->lock);
1573
1574 /* For cyclic transfers notify the user after every chunk. */
1575 if (chan->desc.running && chan->desc.running->cyclic) {
87244fe5 1576 desc = chan->desc.running;
964b2fd8 1577 dmaengine_desc_get_callback(&desc->async_tx, &cb);
87244fe5 1578
964b2fd8 1579 if (dmaengine_desc_callback_valid(&cb)) {
87244fe5 1580 spin_unlock_irq(&chan->lock);
964b2fd8 1581 dmaengine_desc_callback_invoke(&cb, NULL);
87244fe5
LP
1582 spin_lock_irq(&chan->lock);
1583 }
1584 }
1585
1586 /*
1587 * Call the callback function for all descriptors on the done list and
1588 * move them to the ack wait list.
1589 */
1590 while (!list_empty(&chan->desc.done)) {
1591 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
1592 node);
1593 dma_cookie_complete(&desc->async_tx);
1594 list_del(&desc->node);
1595
964b2fd8
DJ
1596 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1597 if (dmaengine_desc_callback_valid(&cb)) {
87244fe5
LP
1598 spin_unlock_irq(&chan->lock);
1599 /*
1600 * We own the only reference to this descriptor, we can
1601 * safely dereference it without holding the channel
1602 * lock.
1603 */
964b2fd8 1604 dmaengine_desc_callback_invoke(&cb, NULL);
87244fe5
LP
1605 spin_lock_irq(&chan->lock);
1606 }
1607
1608 list_add_tail(&desc->node, &chan->desc.wait);
1609 }
1610
ccadee9b
LP
1611 spin_unlock_irq(&chan->lock);
1612
87244fe5
LP
1613 /* Recycle all acked descriptors. */
1614 rcar_dmac_desc_recycle_acked(chan);
1615
87244fe5
LP
1616 return IRQ_HANDLED;
1617}
1618
87244fe5
LP
1619/* -----------------------------------------------------------------------------
1620 * OF xlate and channel filter
1621 */
1622
1623static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
1624{
1625 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
1626 struct of_phandle_args *dma_spec = arg;
1627
1628 /*
1629 * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate
1630 * function knows from which device it wants to allocate a channel from,
1631 * and would be perfectly capable of selecting the channel it wants.
1632 * Forcing it to call dma_request_channel() and iterate through all
1633 * channels from all controllers is just pointless.
1634 */
1635 if (chan->device->device_config != rcar_dmac_device_config ||
1636 dma_spec->np != chan->device->dev->of_node)
1637 return false;
1638
1639 return !test_and_set_bit(dma_spec->args[0], dmac->modules);
1640}
1641
1642static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
1643 struct of_dma *ofdma)
1644{
1645 struct rcar_dmac_chan *rchan;
1646 struct dma_chan *chan;
1647 dma_cap_mask_t mask;
1648
1649 if (dma_spec->args_count != 1)
1650 return NULL;
1651
1652 /* Only slave DMA channels can be allocated via DT */
1653 dma_cap_zero(mask);
1654 dma_cap_set(DMA_SLAVE, mask);
1655
1656 chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec);
1657 if (!chan)
1658 return NULL;
1659
1660 rchan = to_rcar_dmac_chan(chan);
1661 rchan->mid_rid = dma_spec->args[0];
1662
1663 return chan;
1664}
1665
1666/* -----------------------------------------------------------------------------
1667 * Power management
1668 */
1669
87244fe5
LP
1670#ifdef CONFIG_PM
1671static int rcar_dmac_runtime_suspend(struct device *dev)
1672{
1673 return 0;
1674}
1675
1676static int rcar_dmac_runtime_resume(struct device *dev)
1677{
1678 struct rcar_dmac *dmac = dev_get_drvdata(dev);
1679
1680 return rcar_dmac_init(dmac);
1681}
1682#endif
1683
1684static const struct dev_pm_ops rcar_dmac_pm = {
1131b0a4
GU
1685 /*
1686 * TODO for system sleep/resume:
1687 * - Wait for the current transfer to complete and stop the device,
1688 * - Resume transfers, if any.
1689 */
73dcc666
GU
1690 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1691 pm_runtime_force_resume)
87244fe5
LP
1692 SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
1693 NULL)
1694};
1695
1696/* -----------------------------------------------------------------------------
1697 * Probe and remove
1698 */
1699
1700static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
1701 struct rcar_dmac_chan *rchan,
1702 unsigned int index)
1703{
1704 struct platform_device *pdev = to_platform_device(dmac->dev);
1705 struct dma_chan *chan = &rchan->chan;
1706 char pdev_irqname[5];
1707 char *irqname;
87244fe5
LP
1708 int ret;
1709
1710 rchan->index = index;
1711 rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index);
1712 rchan->mid_rid = -EINVAL;
1713
1714 spin_lock_init(&rchan->lock);
1715
f7638c90
LP
1716 INIT_LIST_HEAD(&rchan->desc.free);
1717 INIT_LIST_HEAD(&rchan->desc.pending);
1718 INIT_LIST_HEAD(&rchan->desc.active);
1719 INIT_LIST_HEAD(&rchan->desc.done);
1720 INIT_LIST_HEAD(&rchan->desc.wait);
1721
87244fe5
LP
1722 /* Request the channel interrupt. */
1723 sprintf(pdev_irqname, "ch%u", index);
427d5ecd
NS
1724 rchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
1725 if (rchan->irq < 0) {
87244fe5
LP
1726 dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
1727 return -ENODEV;
1728 }
1729
1730 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
1731 dev_name(dmac->dev), index);
1732 if (!irqname)
1733 return -ENOMEM;
1734
5e857047
KM
1735 /*
1736 * Initialize the DMA engine channel and add it to the DMA engine
1737 * channels list.
1738 */
1739 chan->device = &dmac->engine;
1740 dma_cookie_init(chan);
1741
1742 list_add_tail(&chan->device_node, &dmac->engine.channels);
1743
427d5ecd
NS
1744 ret = devm_request_threaded_irq(dmac->dev, rchan->irq,
1745 rcar_dmac_isr_channel,
87244fe5
LP
1746 rcar_dmac_isr_channel_thread, 0,
1747 irqname, rchan);
1748 if (ret) {
427d5ecd
NS
1749 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
1750 rchan->irq, ret);
87244fe5
LP
1751 return ret;
1752 }
1753
87244fe5
LP
1754 return 0;
1755}
1756
1757static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1758{
1759 struct device_node *np = dev->of_node;
1760 int ret;
1761
1762 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
1763 if (ret < 0) {
1764 dev_err(dev, "unable to read dma-channels property\n");
1765 return ret;
1766 }
1767
1768 if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
1769 dev_err(dev, "invalid number of channels %u\n",
1770 dmac->n_channels);
1771 return -EINVAL;
1772 }
1773
1774 return 0;
1775}
1776
1777static int rcar_dmac_probe(struct platform_device *pdev)
1778{
1779 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
1780 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
1781 DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
1782 DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
be6893e1 1783 unsigned int channels_offset = 0;
87244fe5
LP
1784 struct dma_device *engine;
1785 struct rcar_dmac *dmac;
1786 struct resource *mem;
1787 unsigned int i;
87244fe5
LP
1788 int ret;
1789
1790 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
1791 if (!dmac)
1792 return -ENOMEM;
1793
1794 dmac->dev = &pdev->dev;
1795 platform_set_drvdata(pdev, dmac);
97d49c59
WS
1796 dmac->dev->dma_parms = &dmac->parms;
1797 dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
dc312349 1798 dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
87244fe5
LP
1799
1800 ret = rcar_dmac_parse_of(&pdev->dev, dmac);
1801 if (ret < 0)
1802 return ret;
1803
be6893e1
LP
1804 /*
1805 * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be
1806 * flushed correctly, resulting in memory corruption. DMAC 0 channel 0
1807 * is connected to microTLB 0 on currently supported platforms, so we
1808 * can't use it with the IPMMU. As the IOMMU API operates at the device
1809 * level we can't disable it selectively, so ignore channel 0 for now if
1810 * the device is part of an IOMMU group.
1811 */
1812 if (pdev->dev.iommu_group) {
1813 dmac->n_channels--;
1814 channels_offset = 1;
1815 }
1816
87244fe5
LP
1817 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
1818 sizeof(*dmac->channels), GFP_KERNEL);
1819 if (!dmac->channels)
1820 return -ENOMEM;
1821
1822 /* Request resources. */
1823 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1824 dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
1825 if (IS_ERR(dmac->iomem))
1826 return PTR_ERR(dmac->iomem);
1827
87244fe5
LP
1828 /* Enable runtime PM and initialize the device. */
1829 pm_runtime_enable(&pdev->dev);
1830 ret = pm_runtime_get_sync(&pdev->dev);
1831 if (ret < 0) {
1832 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
1833 return ret;
1834 }
1835
1836 ret = rcar_dmac_init(dmac);
1837 pm_runtime_put(&pdev->dev);
1838
1839 if (ret) {
1840 dev_err(&pdev->dev, "failed to reset device\n");
1841 goto error;
1842 }
1843
5e857047
KM
1844 /* Initialize engine */
1845 engine = &dmac->engine;
1846
1847 dma_cap_set(DMA_MEMCPY, engine->cap_mask);
1848 dma_cap_set(DMA_SLAVE, engine->cap_mask);
1849
1850 engine->dev = &pdev->dev;
1851 engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
1852
1853 engine->src_addr_widths = widths;
1854 engine->dst_addr_widths = widths;
1855 engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1856 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1857
1858 engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
1859 engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
1860 engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
1861 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
1862 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
1863 engine->device_config = rcar_dmac_device_config;
8115ce74 1864 engine->device_pause = rcar_dmac_chan_pause;
5e857047
KM
1865 engine->device_terminate_all = rcar_dmac_chan_terminate_all;
1866 engine->device_tx_status = rcar_dmac_tx_status;
1867 engine->device_issue_pending = rcar_dmac_issue_pending;
1868 engine->device_synchronize = rcar_dmac_device_synchronize;
1869
1870 INIT_LIST_HEAD(&engine->channels);
87244fe5
LP
1871
1872 for (i = 0; i < dmac->n_channels; ++i) {
be6893e1
LP
1873 ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
1874 i + channels_offset);
87244fe5
LP
1875 if (ret < 0)
1876 goto error;
1877 }
1878
1879 /* Register the DMAC as a DMA provider for DT. */
1880 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
1881 NULL);
1882 if (ret < 0)
1883 goto error;
1884
1885 /*
1886 * Register the DMA engine device.
1887 *
1888 * Default transfer size of 32 bytes requires 32-byte alignment.
1889 */
87244fe5
LP
1890 ret = dma_async_device_register(engine);
1891 if (ret < 0)
1892 goto error;
1893
1894 return 0;
1895
1896error:
1897 of_dma_controller_free(pdev->dev.of_node);
1898 pm_runtime_disable(&pdev->dev);
1899 return ret;
1900}
1901
1902static int rcar_dmac_remove(struct platform_device *pdev)
1903{
1904 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1905
1906 of_dma_controller_free(pdev->dev.of_node);
1907 dma_async_device_unregister(&dmac->engine);
1908
1909 pm_runtime_disable(&pdev->dev);
1910
1911 return 0;
1912}
1913
1914static void rcar_dmac_shutdown(struct platform_device *pdev)
1915{
1916 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1917
9203dbec 1918 rcar_dmac_stop_all_chan(dmac);
87244fe5
LP
1919}
1920
1921static const struct of_device_id rcar_dmac_of_ids[] = {
1922 { .compatible = "renesas,rcar-dmac", },
1923 { /* Sentinel */ }
1924};
1925MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
1926
1927static struct platform_driver rcar_dmac_driver = {
1928 .driver = {
1929 .pm = &rcar_dmac_pm,
1930 .name = "rcar-dmac",
1931 .of_match_table = rcar_dmac_of_ids,
1932 },
1933 .probe = rcar_dmac_probe,
1934 .remove = rcar_dmac_remove,
1935 .shutdown = rcar_dmac_shutdown,
1936};
1937
1938module_platform_driver(rcar_dmac_driver);
1939
1940MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
1941MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1942MODULE_LICENSE("GPL v2");