Commit | Line | Data |
---|---|---|
b9b0a74a | 1 | // SPDX-License-Identifier: GPL-2.0 |
87244fe5 | 2 | /* |
8a6061c3 | 3 | * Renesas R-Car Gen2/Gen3 DMA Controller Driver |
87244fe5 | 4 | * |
8a6061c3 | 5 | * Copyright (C) 2014-2019 Renesas Electronics Inc. |
87244fe5 LP |
6 | * |
7 | * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> | |
87244fe5 LP |
8 | */ |
9 | ||
a8d46a7f | 10 | #include <linux/delay.h> |
ccadee9b | 11 | #include <linux/dma-mapping.h> |
87244fe5 LP |
12 | #include <linux/dmaengine.h> |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/list.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/mutex.h> | |
17 | #include <linux/of.h> | |
18 | #include <linux/of_dma.h> | |
19 | #include <linux/of_platform.h> | |
20 | #include <linux/platform_device.h> | |
21 | #include <linux/pm_runtime.h> | |
22 | #include <linux/slab.h> | |
23 | #include <linux/spinlock.h> | |
24 | ||
25 | #include "../dmaengine.h" | |
26 | ||
27 | /* | |
28 | * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer | |
29 | * @node: entry in the parent's chunks list | |
30 | * @src_addr: device source address | |
31 | * @dst_addr: device destination address | |
32 | * @size: transfer size in bytes | |
33 | */ | |
34 | struct rcar_dmac_xfer_chunk { | |
35 | struct list_head node; | |
36 | ||
37 | dma_addr_t src_addr; | |
38 | dma_addr_t dst_addr; | |
39 | u32 size; | |
40 | }; | |
41 | ||
ccadee9b LP |
42 | /* |
43 | * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk | |
44 | * @sar: value of the SAR register (source address) | |
45 | * @dar: value of the DAR register (destination address) | |
46 | * @tcr: value of the TCR register (transfer count) | |
47 | */ | |
48 | struct rcar_dmac_hw_desc { | |
49 | u32 sar; | |
50 | u32 dar; | |
51 | u32 tcr; | |
52 | u32 reserved; | |
53 | } __attribute__((__packed__)); | |
54 | ||
87244fe5 LP |
55 | /* |
56 | * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor | |
57 | * @async_tx: base DMA asynchronous transaction descriptor | |
58 | * @direction: direction of the DMA transfer | |
59 | * @xfer_shift: log2 of the transfer size | |
60 | * @chcr: value of the channel configuration register for this transfer | |
61 | * @node: entry in the channel's descriptors lists | |
62 | * @chunks: list of transfer chunks for this transfer | |
63 | * @running: the transfer chunk being currently processed | |
ccadee9b | 64 | * @nchunks: number of transfer chunks for this transfer |
1ed1315f | 65 | * @hwdescs.use: whether the transfer descriptor uses hardware descriptors |
ccadee9b LP |
66 | * @hwdescs.mem: hardware descriptors memory for the transfer |
67 | * @hwdescs.dma: device address of the hardware descriptors memory | |
68 | * @hwdescs.size: size of the hardware descriptors in bytes | |
87244fe5 LP |
69 | * @size: transfer size in bytes |
70 | * @cyclic: when set indicates that the DMA transfer is cyclic | |
71 | */ | |
72 | struct rcar_dmac_desc { | |
73 | struct dma_async_tx_descriptor async_tx; | |
74 | enum dma_transfer_direction direction; | |
75 | unsigned int xfer_shift; | |
76 | u32 chcr; | |
77 | ||
78 | struct list_head node; | |
79 | struct list_head chunks; | |
80 | struct rcar_dmac_xfer_chunk *running; | |
ccadee9b LP |
81 | unsigned int nchunks; |
82 | ||
83 | struct { | |
1ed1315f | 84 | bool use; |
ccadee9b LP |
85 | struct rcar_dmac_hw_desc *mem; |
86 | dma_addr_t dma; | |
87 | size_t size; | |
88 | } hwdescs; | |
87244fe5 LP |
89 | |
90 | unsigned int size; | |
91 | bool cyclic; | |
92 | }; | |
93 | ||
94 | #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx) | |
95 | ||
96 | /* | |
97 | * struct rcar_dmac_desc_page - One page worth of descriptors | |
98 | * @node: entry in the channel's pages list | |
99 | * @descs: array of DMA descriptors | |
100 | * @chunks: array of transfer chunk descriptors | |
101 | */ | |
102 | struct rcar_dmac_desc_page { | |
103 | struct list_head node; | |
104 | ||
105 | union { | |
45ecf27f GS |
106 | DECLARE_FLEX_ARRAY(struct rcar_dmac_desc, descs); |
107 | DECLARE_FLEX_ARRAY(struct rcar_dmac_xfer_chunk, chunks); | |
87244fe5 LP |
108 | }; |
109 | }; | |
110 | ||
111 | #define RCAR_DMAC_DESCS_PER_PAGE \ | |
112 | ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \ | |
113 | sizeof(struct rcar_dmac_desc)) | |
114 | #define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \ | |
115 | ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \ | |
116 | sizeof(struct rcar_dmac_xfer_chunk)) | |
117 | ||
c5ed08e9 NS |
118 | /* |
119 | * struct rcar_dmac_chan_slave - Slave configuration | |
120 | * @slave_addr: slave memory address | |
121 | * @xfer_size: size (in bytes) of hardware transfers | |
122 | */ | |
123 | struct rcar_dmac_chan_slave { | |
124 | phys_addr_t slave_addr; | |
125 | unsigned int xfer_size; | |
126 | }; | |
127 | ||
9f878603 NS |
128 | /* |
129 | * struct rcar_dmac_chan_map - Map of slave device phys to dma address | |
130 | * @addr: slave dma address | |
131 | * @dir: direction of mapping | |
132 | * @slave: slave configuration that is mapped | |
133 | */ | |
134 | struct rcar_dmac_chan_map { | |
135 | dma_addr_t addr; | |
136 | enum dma_data_direction dir; | |
137 | struct rcar_dmac_chan_slave slave; | |
138 | }; | |
139 | ||
87244fe5 LP |
140 | /* |
141 | * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel | |
142 | * @chan: base DMA channel object | |
143 | * @iomem: channel I/O memory base | |
144 | * @index: index of this channel in the controller | |
427d5ecd | 145 | * @irq: channel IRQ |
c5ed08e9 NS |
146 | * @src: slave memory address and size on the source side |
147 | * @dst: slave memory address and size on the destination side | |
87244fe5 LP |
148 | * @mid_rid: hardware MID/RID for the DMA client using this channel |
149 | * @lock: protects the channel CHCR register and the desc members | |
150 | * @desc.free: list of free descriptors | |
151 | * @desc.pending: list of pending descriptors (submitted with tx_submit) | |
152 | * @desc.active: list of active descriptors (activated with issue_pending) | |
153 | * @desc.done: list of completed descriptors | |
154 | * @desc.wait: list of descriptors waiting for an ack | |
155 | * @desc.running: the descriptor being processed (a member of the active list) | |
156 | * @desc.chunks_free: list of free transfer chunk descriptors | |
157 | * @desc.pages: list of pages used by allocated descriptors | |
158 | */ | |
159 | struct rcar_dmac_chan { | |
160 | struct dma_chan chan; | |
161 | void __iomem *iomem; | |
162 | unsigned int index; | |
427d5ecd | 163 | int irq; |
87244fe5 | 164 | |
c5ed08e9 NS |
165 | struct rcar_dmac_chan_slave src; |
166 | struct rcar_dmac_chan_slave dst; | |
9f878603 | 167 | struct rcar_dmac_chan_map map; |
87244fe5 LP |
168 | int mid_rid; |
169 | ||
170 | spinlock_t lock; | |
171 | ||
172 | struct { | |
173 | struct list_head free; | |
174 | struct list_head pending; | |
175 | struct list_head active; | |
176 | struct list_head done; | |
177 | struct list_head wait; | |
178 | struct rcar_dmac_desc *running; | |
179 | ||
180 | struct list_head chunks_free; | |
181 | ||
182 | struct list_head pages; | |
183 | } desc; | |
184 | }; | |
185 | ||
186 | #define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan) | |
187 | ||
188 | /* | |
189 | * struct rcar_dmac - R-Car Gen2 DMA Controller | |
190 | * @engine: base DMA engine object | |
191 | * @dev: the hardware device | |
e5bfbbb9 GU |
192 | * @dmac_base: remapped base register block |
193 | * @chan_base: remapped channel register block (optional) | |
87244fe5 LP |
194 | * @n_channels: number of available channels |
195 | * @channels: array of DMAC channels | |
cf24aac3 | 196 | * @channels_mask: bitfield of which DMA channels are managed by this driver |
87244fe5 LP |
197 | * @modules: bitmask of client modules in use |
198 | */ | |
199 | struct rcar_dmac { | |
200 | struct dma_device engine; | |
201 | struct device *dev; | |
e5bfbbb9 GU |
202 | void __iomem *dmac_base; |
203 | void __iomem *chan_base; | |
87244fe5 LP |
204 | |
205 | unsigned int n_channels; | |
206 | struct rcar_dmac_chan *channels; | |
fcf8adb7 | 207 | u32 channels_mask; |
87244fe5 | 208 | |
08acf38e | 209 | DECLARE_BITMAP(modules, 256); |
87244fe5 LP |
210 | }; |
211 | ||
212 | #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine) | |
213 | ||
d249b5fb GU |
214 | #define for_each_rcar_dmac_chan(i, dmac, chan) \ |
215 | for (i = 0, chan = &(dmac)->channels[0]; i < (dmac)->n_channels; i++, chan++) \ | |
216 | if (!((dmac)->channels_mask & BIT(i))) continue; else | |
217 | ||
2df4a02a YS |
218 | /* |
219 | * struct rcar_dmac_of_data - This driver's OF data | |
220 | * @chan_offset_base: DMAC channels base offset | |
221 | * @chan_offset_stride: DMAC channels offset stride | |
222 | */ | |
223 | struct rcar_dmac_of_data { | |
224 | u32 chan_offset_base; | |
225 | u32 chan_offset_stride; | |
226 | }; | |
227 | ||
87244fe5 LP |
228 | /* ----------------------------------------------------------------------------- |
229 | * Registers | |
230 | */ | |
231 | ||
87244fe5 LP |
232 | #define RCAR_DMAISTA 0x0020 |
233 | #define RCAR_DMASEC 0x0030 | |
234 | #define RCAR_DMAOR 0x0060 | |
235 | #define RCAR_DMAOR_PRI_FIXED (0 << 8) | |
236 | #define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8) | |
237 | #define RCAR_DMAOR_AE (1 << 2) | |
238 | #define RCAR_DMAOR_DME (1 << 0) | |
2fe6777b | 239 | #define RCAR_DMACHCLR 0x0080 /* Not on R-Car Gen4 */ |
87244fe5 LP |
240 | #define RCAR_DMADPSEC 0x00a0 |
241 | ||
242 | #define RCAR_DMASAR 0x0000 | |
243 | #define RCAR_DMADAR 0x0004 | |
244 | #define RCAR_DMATCR 0x0008 | |
245 | #define RCAR_DMATCR_MASK 0x00ffffff | |
246 | #define RCAR_DMATSR 0x0028 | |
247 | #define RCAR_DMACHCR 0x000c | |
248 | #define RCAR_DMACHCR_CAE (1 << 31) | |
249 | #define RCAR_DMACHCR_CAIE (1 << 30) | |
250 | #define RCAR_DMACHCR_DPM_DISABLED (0 << 28) | |
251 | #define RCAR_DMACHCR_DPM_ENABLED (1 << 28) | |
252 | #define RCAR_DMACHCR_DPM_REPEAT (2 << 28) | |
253 | #define RCAR_DMACHCR_DPM_INFINITE (3 << 28) | |
254 | #define RCAR_DMACHCR_RPT_SAR (1 << 27) | |
255 | #define RCAR_DMACHCR_RPT_DAR (1 << 26) | |
256 | #define RCAR_DMACHCR_RPT_TCR (1 << 25) | |
257 | #define RCAR_DMACHCR_DPB (1 << 22) | |
258 | #define RCAR_DMACHCR_DSE (1 << 19) | |
259 | #define RCAR_DMACHCR_DSIE (1 << 18) | |
260 | #define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3)) | |
261 | #define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3)) | |
262 | #define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3)) | |
263 | #define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3)) | |
264 | #define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3)) | |
265 | #define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3)) | |
266 | #define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3)) | |
267 | #define RCAR_DMACHCR_DM_FIXED (0 << 14) | |
268 | #define RCAR_DMACHCR_DM_INC (1 << 14) | |
269 | #define RCAR_DMACHCR_DM_DEC (2 << 14) | |
270 | #define RCAR_DMACHCR_SM_FIXED (0 << 12) | |
271 | #define RCAR_DMACHCR_SM_INC (1 << 12) | |
272 | #define RCAR_DMACHCR_SM_DEC (2 << 12) | |
273 | #define RCAR_DMACHCR_RS_AUTO (4 << 8) | |
274 | #define RCAR_DMACHCR_RS_DMARS (8 << 8) | |
275 | #define RCAR_DMACHCR_IE (1 << 2) | |
276 | #define RCAR_DMACHCR_TE (1 << 1) | |
277 | #define RCAR_DMACHCR_DE (1 << 0) | |
278 | #define RCAR_DMATCRB 0x0018 | |
279 | #define RCAR_DMATSRB 0x0038 | |
280 | #define RCAR_DMACHCRB 0x001c | |
281 | #define RCAR_DMACHCRB_DCNT(n) ((n) << 24) | |
ccadee9b LP |
282 | #define RCAR_DMACHCRB_DPTR_MASK (0xff << 16) |
283 | #define RCAR_DMACHCRB_DPTR_SHIFT 16 | |
87244fe5 LP |
284 | #define RCAR_DMACHCRB_DRST (1 << 15) |
285 | #define RCAR_DMACHCRB_DTS (1 << 8) | |
286 | #define RCAR_DMACHCRB_SLM_NORMAL (0 << 4) | |
287 | #define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4) | |
288 | #define RCAR_DMACHCRB_PRI(n) ((n) << 0) | |
289 | #define RCAR_DMARS 0x0040 | |
290 | #define RCAR_DMABUFCR 0x0048 | |
291 | #define RCAR_DMABUFCR_MBU(n) ((n) << 16) | |
292 | #define RCAR_DMABUFCR_ULB(n) ((n) << 0) | |
293 | #define RCAR_DMADPBASE 0x0050 | |
294 | #define RCAR_DMADPBASE_MASK 0xfffffff0 | |
295 | #define RCAR_DMADPBASE_SEL (1 << 0) | |
296 | #define RCAR_DMADPCR 0x0054 | |
297 | #define RCAR_DMADPCR_DIPT(n) ((n) << 24) | |
298 | #define RCAR_DMAFIXSAR 0x0010 | |
299 | #define RCAR_DMAFIXDAR 0x0014 | |
300 | #define RCAR_DMAFIXDPBASE 0x0060 | |
301 | ||
2fe6777b YS |
302 | /* For R-Car Gen4 */ |
303 | #define RCAR_GEN4_DMACHCLR 0x0100 | |
e5bfbbb9 | 304 | |
87244fe5 LP |
305 | /* Hardcode the MEMCPY transfer size to 4 bytes. */ |
306 | #define RCAR_DMAC_MEMCPY_XFER_SIZE 4 | |
307 | ||
308 | /* ----------------------------------------------------------------------------- | |
309 | * Device access | |
310 | */ | |
311 | ||
312 | static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data) | |
313 | { | |
314 | if (reg == RCAR_DMAOR) | |
e5bfbbb9 | 315 | writew(data, dmac->dmac_base + reg); |
87244fe5 | 316 | else |
e5bfbbb9 | 317 | writel(data, dmac->dmac_base + reg); |
87244fe5 LP |
318 | } |
319 | ||
320 | static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg) | |
321 | { | |
322 | if (reg == RCAR_DMAOR) | |
e5bfbbb9 | 323 | return readw(dmac->dmac_base + reg); |
87244fe5 | 324 | else |
e5bfbbb9 | 325 | return readl(dmac->dmac_base + reg); |
87244fe5 LP |
326 | } |
327 | ||
328 | static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg) | |
329 | { | |
330 | if (reg == RCAR_DMARS) | |
331 | return readw(chan->iomem + reg); | |
332 | else | |
333 | return readl(chan->iomem + reg); | |
334 | } | |
335 | ||
336 | static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data) | |
337 | { | |
338 | if (reg == RCAR_DMARS) | |
339 | writew(data, chan->iomem + reg); | |
340 | else | |
341 | writel(data, chan->iomem + reg); | |
342 | } | |
343 | ||
245bbd16 GU |
344 | static void rcar_dmac_chan_clear(struct rcar_dmac *dmac, |
345 | struct rcar_dmac_chan *chan) | |
346 | { | |
e5bfbbb9 | 347 | if (dmac->chan_base) |
2fe6777b | 348 | rcar_dmac_chan_write(chan, RCAR_GEN4_DMACHCLR, 1); |
e5bfbbb9 GU |
349 | else |
350 | rcar_dmac_write(dmac, RCAR_DMACHCLR, BIT(chan->index)); | |
245bbd16 GU |
351 | } |
352 | ||
353 | static void rcar_dmac_chan_clear_all(struct rcar_dmac *dmac) | |
354 | { | |
e5bfbbb9 GU |
355 | struct rcar_dmac_chan *chan; |
356 | unsigned int i; | |
357 | ||
358 | if (dmac->chan_base) { | |
359 | for_each_rcar_dmac_chan(i, dmac, chan) | |
2fe6777b | 360 | rcar_dmac_chan_write(chan, RCAR_GEN4_DMACHCLR, 1); |
e5bfbbb9 GU |
361 | } else { |
362 | rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask); | |
363 | } | |
245bbd16 GU |
364 | } |
365 | ||
87244fe5 LP |
366 | /* ----------------------------------------------------------------------------- |
367 | * Initialization and configuration | |
368 | */ | |
369 | ||
370 | static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan) | |
371 | { | |
372 | u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); | |
373 | ||
0f78e3b5 | 374 | return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)); |
87244fe5 LP |
375 | } |
376 | ||
377 | static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) | |
378 | { | |
379 | struct rcar_dmac_desc *desc = chan->desc.running; | |
ccadee9b | 380 | u32 chcr = desc->chcr; |
87244fe5 LP |
381 | |
382 | WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan)); | |
383 | ||
ccadee9b LP |
384 | if (chan->mid_rid >= 0) |
385 | rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); | |
386 | ||
1ed1315f | 387 | if (desc->hwdescs.use) { |
1175f83c KM |
388 | struct rcar_dmac_xfer_chunk *chunk = |
389 | list_first_entry(&desc->chunks, | |
390 | struct rcar_dmac_xfer_chunk, node); | |
3f463061 | 391 | |
ccadee9b LP |
392 | dev_dbg(chan->chan.device->dev, |
393 | "chan%u: queue desc %p: %u@%pad\n", | |
394 | chan->index, desc, desc->nchunks, &desc->hwdescs.dma); | |
395 | ||
87244fe5 | 396 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
1175f83c KM |
397 | rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, |
398 | chunk->src_addr >> 32); | |
399 | rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, | |
400 | chunk->dst_addr >> 32); | |
ccadee9b LP |
401 | rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE, |
402 | desc->hwdescs.dma >> 32); | |
87244fe5 | 403 | #endif |
ccadee9b LP |
404 | rcar_dmac_chan_write(chan, RCAR_DMADPBASE, |
405 | (desc->hwdescs.dma & 0xfffffff0) | | |
406 | RCAR_DMADPBASE_SEL); | |
407 | rcar_dmac_chan_write(chan, RCAR_DMACHCRB, | |
408 | RCAR_DMACHCRB_DCNT(desc->nchunks - 1) | | |
409 | RCAR_DMACHCRB_DRST); | |
87244fe5 | 410 | |
3f463061 LP |
411 | /* |
412 | * Errata: When descriptor memory is accessed through an IOMMU | |
413 | * the DMADAR register isn't initialized automatically from the | |
414 | * first descriptor at beginning of transfer by the DMAC like it | |
415 | * should. Initialize it manually with the destination address | |
416 | * of the first chunk. | |
417 | */ | |
3f463061 LP |
418 | rcar_dmac_chan_write(chan, RCAR_DMADAR, |
419 | chunk->dst_addr & 0xffffffff); | |
420 | ||
ccadee9b LP |
421 | /* |
422 | * Program the descriptor stage interrupt to occur after the end | |
423 | * of the first stage. | |
424 | */ | |
425 | rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1)); | |
426 | ||
427 | chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR | |
428 | | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB; | |
429 | ||
430 | /* | |
431 | * If the descriptor isn't cyclic enable normal descriptor mode | |
432 | * and the transfer completion interrupt. | |
433 | */ | |
434 | if (!desc->cyclic) | |
435 | chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE; | |
436 | /* | |
437 | * If the descriptor is cyclic and has a callback enable the | |
438 | * descriptor stage interrupt in infinite repeat mode. | |
439 | */ | |
440 | else if (desc->async_tx.callback) | |
441 | chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE; | |
442 | /* | |
443 | * Otherwise just select infinite repeat mode without any | |
444 | * interrupt. | |
445 | */ | |
446 | else | |
447 | chcr |= RCAR_DMACHCR_DPM_INFINITE; | |
448 | } else { | |
449 | struct rcar_dmac_xfer_chunk *chunk = desc->running; | |
87244fe5 | 450 | |
ccadee9b LP |
451 | dev_dbg(chan->chan.device->dev, |
452 | "chan%u: queue chunk %p: %u@%pad -> %pad\n", | |
453 | chan->index, chunk, chunk->size, &chunk->src_addr, | |
454 | &chunk->dst_addr); | |
87244fe5 | 455 | |
ccadee9b LP |
456 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
457 | rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, | |
458 | chunk->src_addr >> 32); | |
459 | rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, | |
460 | chunk->dst_addr >> 32); | |
461 | #endif | |
462 | rcar_dmac_chan_write(chan, RCAR_DMASAR, | |
463 | chunk->src_addr & 0xffffffff); | |
464 | rcar_dmac_chan_write(chan, RCAR_DMADAR, | |
465 | chunk->dst_addr & 0xffffffff); | |
466 | rcar_dmac_chan_write(chan, RCAR_DMATCR, | |
467 | chunk->size >> desc->xfer_shift); | |
468 | ||
469 | chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE; | |
470 | } | |
471 | ||
9203dbec KM |
472 | rcar_dmac_chan_write(chan, RCAR_DMACHCR, |
473 | chcr | RCAR_DMACHCR_DE | RCAR_DMACHCR_CAIE); | |
87244fe5 LP |
474 | } |
475 | ||
476 | static int rcar_dmac_init(struct rcar_dmac *dmac) | |
477 | { | |
478 | u16 dmaor; | |
479 | ||
480 | /* Clear all channels and enable the DMAC globally. */ | |
245bbd16 | 481 | rcar_dmac_chan_clear_all(dmac); |
87244fe5 LP |
482 | rcar_dmac_write(dmac, RCAR_DMAOR, |
483 | RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME); | |
484 | ||
485 | dmaor = rcar_dmac_read(dmac, RCAR_DMAOR); | |
486 | if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) { | |
487 | dev_warn(dmac->dev, "DMAOR initialization failed.\n"); | |
488 | return -EIO; | |
489 | } | |
490 | ||
491 | return 0; | |
492 | } | |
493 | ||
494 | /* ----------------------------------------------------------------------------- | |
495 | * Descriptors submission | |
496 | */ | |
497 | ||
498 | static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx) | |
499 | { | |
500 | struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan); | |
501 | struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx); | |
502 | unsigned long flags; | |
503 | dma_cookie_t cookie; | |
504 | ||
505 | spin_lock_irqsave(&chan->lock, flags); | |
506 | ||
507 | cookie = dma_cookie_assign(tx); | |
508 | ||
509 | dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n", | |
510 | chan->index, tx->cookie, desc); | |
511 | ||
512 | list_add_tail(&desc->node, &chan->desc.pending); | |
513 | desc->running = list_first_entry(&desc->chunks, | |
514 | struct rcar_dmac_xfer_chunk, node); | |
515 | ||
516 | spin_unlock_irqrestore(&chan->lock, flags); | |
517 | ||
518 | return cookie; | |
519 | } | |
520 | ||
521 | /* ----------------------------------------------------------------------------- | |
522 | * Descriptors allocation and free | |
523 | */ | |
524 | ||
525 | /* | |
526 | * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors | |
527 | * @chan: the DMA channel | |
528 | * @gfp: allocation flags | |
529 | */ | |
530 | static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) | |
531 | { | |
532 | struct rcar_dmac_desc_page *page; | |
d23c9a0a | 533 | unsigned long flags; |
87244fe5 LP |
534 | LIST_HEAD(list); |
535 | unsigned int i; | |
536 | ||
537 | page = (void *)get_zeroed_page(gfp); | |
538 | if (!page) | |
539 | return -ENOMEM; | |
540 | ||
541 | for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) { | |
542 | struct rcar_dmac_desc *desc = &page->descs[i]; | |
543 | ||
544 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); | |
545 | desc->async_tx.tx_submit = rcar_dmac_tx_submit; | |
546 | INIT_LIST_HEAD(&desc->chunks); | |
547 | ||
548 | list_add_tail(&desc->node, &list); | |
549 | } | |
550 | ||
d23c9a0a | 551 | spin_lock_irqsave(&chan->lock, flags); |
87244fe5 LP |
552 | list_splice_tail(&list, &chan->desc.free); |
553 | list_add_tail(&page->node, &chan->desc.pages); | |
d23c9a0a | 554 | spin_unlock_irqrestore(&chan->lock, flags); |
87244fe5 LP |
555 | |
556 | return 0; | |
557 | } | |
558 | ||
559 | /* | |
560 | * rcar_dmac_desc_put - Release a DMA transfer descriptor | |
561 | * @chan: the DMA channel | |
562 | * @desc: the descriptor | |
563 | * | |
564 | * Put the descriptor and its transfer chunk descriptors back in the channel's | |
1ed1315f LP |
565 | * free descriptors lists. The descriptor's chunks list will be reinitialized to |
566 | * an empty list as a result. | |
87244fe5 | 567 | * |
ccadee9b LP |
568 | * The descriptor must have been removed from the channel's lists before calling |
569 | * this function. | |
87244fe5 LP |
570 | */ |
571 | static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan, | |
572 | struct rcar_dmac_desc *desc) | |
573 | { | |
f3915072 LP |
574 | unsigned long flags; |
575 | ||
576 | spin_lock_irqsave(&chan->lock, flags); | |
87244fe5 | 577 | list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); |
3565fe53 | 578 | list_add(&desc->node, &chan->desc.free); |
f3915072 | 579 | spin_unlock_irqrestore(&chan->lock, flags); |
87244fe5 LP |
580 | } |
581 | ||
582 | static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan) | |
583 | { | |
584 | struct rcar_dmac_desc *desc, *_desc; | |
d23c9a0a | 585 | unsigned long flags; |
ccadee9b | 586 | LIST_HEAD(list); |
87244fe5 | 587 | |
ccadee9b LP |
588 | /* |
589 | * We have to temporarily move all descriptors from the wait list to a | |
590 | * local list as iterating over the wait list, even with | |
591 | * list_for_each_entry_safe, isn't safe if we release the channel lock | |
592 | * around the rcar_dmac_desc_put() call. | |
593 | */ | |
d23c9a0a | 594 | spin_lock_irqsave(&chan->lock, flags); |
ccadee9b | 595 | list_splice_init(&chan->desc.wait, &list); |
d23c9a0a | 596 | spin_unlock_irqrestore(&chan->lock, flags); |
ccadee9b LP |
597 | |
598 | list_for_each_entry_safe(desc, _desc, &list, node) { | |
87244fe5 LP |
599 | if (async_tx_test_ack(&desc->async_tx)) { |
600 | list_del(&desc->node); | |
601 | rcar_dmac_desc_put(chan, desc); | |
602 | } | |
603 | } | |
ccadee9b LP |
604 | |
605 | if (list_empty(&list)) | |
606 | return; | |
607 | ||
608 | /* Put the remaining descriptors back in the wait list. */ | |
d23c9a0a | 609 | spin_lock_irqsave(&chan->lock, flags); |
ccadee9b | 610 | list_splice(&list, &chan->desc.wait); |
d23c9a0a | 611 | spin_unlock_irqrestore(&chan->lock, flags); |
87244fe5 LP |
612 | } |
613 | ||
614 | /* | |
615 | * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer | |
616 | * @chan: the DMA channel | |
617 | * | |
618 | * Locking: This function must be called in a non-atomic context. | |
619 | * | |
620 | * Return: A pointer to the allocated descriptor or NULL if no descriptor can | |
621 | * be allocated. | |
622 | */ | |
623 | static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan) | |
624 | { | |
625 | struct rcar_dmac_desc *desc; | |
d23c9a0a | 626 | unsigned long flags; |
87244fe5 LP |
627 | int ret; |
628 | ||
87244fe5 LP |
629 | /* Recycle acked descriptors before attempting allocation. */ |
630 | rcar_dmac_desc_recycle_acked(chan); | |
631 | ||
d23c9a0a | 632 | spin_lock_irqsave(&chan->lock, flags); |
ccadee9b | 633 | |
a55e07c8 LP |
634 | while (list_empty(&chan->desc.free)) { |
635 | /* | |
636 | * No free descriptors, allocate a page worth of them and try | |
637 | * again, as someone else could race us to get the newly | |
638 | * allocated descriptors. If the allocation fails return an | |
639 | * error. | |
640 | */ | |
d23c9a0a | 641 | spin_unlock_irqrestore(&chan->lock, flags); |
a55e07c8 LP |
642 | ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT); |
643 | if (ret < 0) | |
644 | return NULL; | |
d23c9a0a | 645 | spin_lock_irqsave(&chan->lock, flags); |
a55e07c8 | 646 | } |
87244fe5 | 647 | |
a55e07c8 LP |
648 | desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node); |
649 | list_del(&desc->node); | |
87244fe5 | 650 | |
d23c9a0a | 651 | spin_unlock_irqrestore(&chan->lock, flags); |
87244fe5 LP |
652 | |
653 | return desc; | |
654 | } | |
655 | ||
656 | /* | |
657 | * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks | |
658 | * @chan: the DMA channel | |
659 | * @gfp: allocation flags | |
660 | */ | |
661 | static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) | |
662 | { | |
663 | struct rcar_dmac_desc_page *page; | |
d23c9a0a | 664 | unsigned long flags; |
87244fe5 LP |
665 | LIST_HEAD(list); |
666 | unsigned int i; | |
667 | ||
668 | page = (void *)get_zeroed_page(gfp); | |
669 | if (!page) | |
670 | return -ENOMEM; | |
671 | ||
672 | for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) { | |
673 | struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; | |
674 | ||
675 | list_add_tail(&chunk->node, &list); | |
676 | } | |
677 | ||
d23c9a0a | 678 | spin_lock_irqsave(&chan->lock, flags); |
87244fe5 LP |
679 | list_splice_tail(&list, &chan->desc.chunks_free); |
680 | list_add_tail(&page->node, &chan->desc.pages); | |
d23c9a0a | 681 | spin_unlock_irqrestore(&chan->lock, flags); |
87244fe5 LP |
682 | |
683 | return 0; | |
684 | } | |
685 | ||
686 | /* | |
687 | * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer | |
688 | * @chan: the DMA channel | |
689 | * | |
690 | * Locking: This function must be called in a non-atomic context. | |
691 | * | |
692 | * Return: A pointer to the allocated transfer chunk descriptor or NULL if no | |
693 | * descriptor can be allocated. | |
694 | */ | |
695 | static struct rcar_dmac_xfer_chunk * | |
696 | rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan) | |
697 | { | |
698 | struct rcar_dmac_xfer_chunk *chunk; | |
d23c9a0a | 699 | unsigned long flags; |
87244fe5 LP |
700 | int ret; |
701 | ||
d23c9a0a | 702 | spin_lock_irqsave(&chan->lock, flags); |
87244fe5 | 703 | |
a55e07c8 LP |
704 | while (list_empty(&chan->desc.chunks_free)) { |
705 | /* | |
706 | * No free descriptors, allocate a page worth of them and try | |
707 | * again, as someone else could race us to get the newly | |
708 | * allocated descriptors. If the allocation fails return an | |
709 | * error. | |
710 | */ | |
d23c9a0a | 711 | spin_unlock_irqrestore(&chan->lock, flags); |
a55e07c8 LP |
712 | ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT); |
713 | if (ret < 0) | |
714 | return NULL; | |
d23c9a0a | 715 | spin_lock_irqsave(&chan->lock, flags); |
a55e07c8 | 716 | } |
87244fe5 | 717 | |
a55e07c8 LP |
718 | chunk = list_first_entry(&chan->desc.chunks_free, |
719 | struct rcar_dmac_xfer_chunk, node); | |
720 | list_del(&chunk->node); | |
87244fe5 | 721 | |
d23c9a0a | 722 | spin_unlock_irqrestore(&chan->lock, flags); |
87244fe5 LP |
723 | |
724 | return chunk; | |
725 | } | |
726 | ||
1ed1315f LP |
727 | static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan, |
728 | struct rcar_dmac_desc *desc, size_t size) | |
729 | { | |
730 | /* | |
731 | * dma_alloc_coherent() allocates memory in page size increments. To | |
732 | * avoid reallocating the hardware descriptors when the allocated size | |
733 | * wouldn't change align the requested size to a multiple of the page | |
734 | * size. | |
735 | */ | |
736 | size = PAGE_ALIGN(size); | |
737 | ||
738 | if (desc->hwdescs.size == size) | |
739 | return; | |
740 | ||
741 | if (desc->hwdescs.mem) { | |
6a634808 LP |
742 | dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size, |
743 | desc->hwdescs.mem, desc->hwdescs.dma); | |
1ed1315f LP |
744 | desc->hwdescs.mem = NULL; |
745 | desc->hwdescs.size = 0; | |
746 | } | |
747 | ||
748 | if (!size) | |
749 | return; | |
750 | ||
6a634808 LP |
751 | desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size, |
752 | &desc->hwdescs.dma, GFP_NOWAIT); | |
1ed1315f LP |
753 | if (!desc->hwdescs.mem) |
754 | return; | |
755 | ||
756 | desc->hwdescs.size = size; | |
757 | } | |
758 | ||
ee4b876b JB |
759 | static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan, |
760 | struct rcar_dmac_desc *desc) | |
ccadee9b LP |
761 | { |
762 | struct rcar_dmac_xfer_chunk *chunk; | |
763 | struct rcar_dmac_hw_desc *hwdesc; | |
ccadee9b | 764 | |
1ed1315f LP |
765 | rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc)); |
766 | ||
767 | hwdesc = desc->hwdescs.mem; | |
ccadee9b | 768 | if (!hwdesc) |
ee4b876b | 769 | return -ENOMEM; |
ccadee9b | 770 | |
ccadee9b LP |
771 | list_for_each_entry(chunk, &desc->chunks, node) { |
772 | hwdesc->sar = chunk->src_addr; | |
773 | hwdesc->dar = chunk->dst_addr; | |
774 | hwdesc->tcr = chunk->size >> desc->xfer_shift; | |
775 | hwdesc++; | |
776 | } | |
ee4b876b JB |
777 | |
778 | return 0; | |
ccadee9b LP |
779 | } |
780 | ||
87244fe5 LP |
781 | /* ----------------------------------------------------------------------------- |
782 | * Stop and reset | |
783 | */ | |
a8d46a7f KM |
784 | static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan) |
785 | { | |
786 | u32 chcr; | |
787 | unsigned int i; | |
788 | ||
789 | /* | |
790 | * Ensure that the setting of the DE bit is actually 0 after | |
791 | * clearing it. | |
792 | */ | |
793 | for (i = 0; i < 1024; i++) { | |
794 | chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); | |
795 | if (!(chcr & RCAR_DMACHCR_DE)) | |
796 | return; | |
797 | udelay(1); | |
798 | } | |
799 | ||
800 | dev_err(chan->chan.device->dev, "CHCR DE check error\n"); | |
801 | } | |
87244fe5 | 802 | |
4de1247a | 803 | static void rcar_dmac_clear_chcr_de(struct rcar_dmac_chan *chan) |
73a47bd0 KM |
804 | { |
805 | u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); | |
806 | ||
73a47bd0 KM |
807 | /* set DE=0 and flush remaining data */ |
808 | rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE)); | |
809 | ||
810 | /* make sure all remaining data was flushed */ | |
811 | rcar_dmac_chcr_de_barrier(chan); | |
4de1247a YS |
812 | } |
813 | ||
87244fe5 LP |
814 | static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan) |
815 | { | |
816 | u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); | |
817 | ||
ccadee9b | 818 | chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE | |
9203dbec KM |
819 | RCAR_DMACHCR_TE | RCAR_DMACHCR_DE | |
820 | RCAR_DMACHCR_CAE | RCAR_DMACHCR_CAIE); | |
87244fe5 | 821 | rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr); |
a8d46a7f | 822 | rcar_dmac_chcr_de_barrier(chan); |
87244fe5 LP |
823 | } |
824 | ||
825 | static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan) | |
826 | { | |
827 | struct rcar_dmac_desc *desc, *_desc; | |
828 | unsigned long flags; | |
829 | LIST_HEAD(descs); | |
830 | ||
831 | spin_lock_irqsave(&chan->lock, flags); | |
832 | ||
833 | /* Move all non-free descriptors to the local lists. */ | |
834 | list_splice_init(&chan->desc.pending, &descs); | |
835 | list_splice_init(&chan->desc.active, &descs); | |
836 | list_splice_init(&chan->desc.done, &descs); | |
837 | list_splice_init(&chan->desc.wait, &descs); | |
838 | ||
839 | chan->desc.running = NULL; | |
840 | ||
841 | spin_unlock_irqrestore(&chan->lock, flags); | |
842 | ||
843 | list_for_each_entry_safe(desc, _desc, &descs, node) { | |
844 | list_del(&desc->node); | |
845 | rcar_dmac_desc_put(chan, desc); | |
846 | } | |
847 | } | |
848 | ||
9203dbec | 849 | static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac) |
87244fe5 | 850 | { |
d249b5fb | 851 | struct rcar_dmac_chan *chan; |
87244fe5 LP |
852 | unsigned int i; |
853 | ||
854 | /* Stop all channels. */ | |
d249b5fb | 855 | for_each_rcar_dmac_chan(i, dmac, chan) { |
87244fe5 | 856 | /* Stop and reinitialize the channel. */ |
45c9a603 | 857 | spin_lock_irq(&chan->lock); |
87244fe5 | 858 | rcar_dmac_chan_halt(chan); |
45c9a603 | 859 | spin_unlock_irq(&chan->lock); |
87244fe5 LP |
860 | } |
861 | } | |
862 | ||
8115ce74 YS |
863 | static int rcar_dmac_chan_pause(struct dma_chan *chan) |
864 | { | |
865 | unsigned long flags; | |
866 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
867 | ||
868 | spin_lock_irqsave(&rchan->lock, flags); | |
869 | rcar_dmac_clear_chcr_de(rchan); | |
870 | spin_unlock_irqrestore(&rchan->lock, flags); | |
871 | ||
872 | return 0; | |
873 | } | |
9203dbec | 874 | |
87244fe5 LP |
875 | /* ----------------------------------------------------------------------------- |
876 | * Descriptors preparation | |
877 | */ | |
878 | ||
879 | static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan, | |
880 | struct rcar_dmac_desc *desc) | |
881 | { | |
882 | static const u32 chcr_ts[] = { | |
883 | RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B, | |
884 | RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B, | |
885 | RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B, | |
886 | RCAR_DMACHCR_TS_64B, | |
887 | }; | |
888 | ||
889 | unsigned int xfer_size; | |
890 | u32 chcr; | |
891 | ||
892 | switch (desc->direction) { | |
893 | case DMA_DEV_TO_MEM: | |
894 | chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED | |
895 | | RCAR_DMACHCR_RS_DMARS; | |
c5ed08e9 | 896 | xfer_size = chan->src.xfer_size; |
87244fe5 LP |
897 | break; |
898 | ||
899 | case DMA_MEM_TO_DEV: | |
900 | chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC | |
901 | | RCAR_DMACHCR_RS_DMARS; | |
c5ed08e9 | 902 | xfer_size = chan->dst.xfer_size; |
87244fe5 LP |
903 | break; |
904 | ||
905 | case DMA_MEM_TO_MEM: | |
906 | default: | |
907 | chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC | |
908 | | RCAR_DMACHCR_RS_AUTO; | |
909 | xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE; | |
910 | break; | |
911 | } | |
912 | ||
913 | desc->xfer_shift = ilog2(xfer_size); | |
914 | desc->chcr = chcr | chcr_ts[desc->xfer_shift]; | |
915 | } | |
916 | ||
917 | /* | |
918 | * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list | |
919 | * | |
920 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also | |
921 | * converted to scatter-gather to guarantee consistent locking and a correct | |
922 | * list manipulation. For slave DMA direction carries the usual meaning, and, | |
923 | * logically, the SG list is RAM and the addr variable contains slave address, | |
924 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM | |
925 | * and the SG list contains only one element and points at the source buffer. | |
926 | */ | |
927 | static struct dma_async_tx_descriptor * | |
928 | rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, | |
929 | unsigned int sg_len, dma_addr_t dev_addr, | |
930 | enum dma_transfer_direction dir, unsigned long dma_flags, | |
931 | bool cyclic) | |
932 | { | |
933 | struct rcar_dmac_xfer_chunk *chunk; | |
934 | struct rcar_dmac_desc *desc; | |
935 | struct scatterlist *sg; | |
ccadee9b | 936 | unsigned int nchunks = 0; |
87244fe5 LP |
937 | unsigned int max_chunk_size; |
938 | unsigned int full_size = 0; | |
1175f83c | 939 | bool cross_boundary = false; |
87244fe5 | 940 | unsigned int i; |
1175f83c KM |
941 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
942 | u32 high_dev_addr; | |
943 | u32 high_mem_addr; | |
944 | #endif | |
87244fe5 LP |
945 | |
946 | desc = rcar_dmac_desc_get(chan); | |
947 | if (!desc) | |
948 | return NULL; | |
949 | ||
950 | desc->async_tx.flags = dma_flags; | |
951 | desc->async_tx.cookie = -EBUSY; | |
952 | ||
953 | desc->cyclic = cyclic; | |
954 | desc->direction = dir; | |
955 | ||
956 | rcar_dmac_chan_configure_desc(chan, desc); | |
957 | ||
d716d9b7 | 958 | max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift; |
87244fe5 LP |
959 | |
960 | /* | |
961 | * Allocate and fill the transfer chunk descriptors. We own the only | |
962 | * reference to the DMA descriptor, there's no need for locking. | |
963 | */ | |
964 | for_each_sg(sgl, sg, sg_len, i) { | |
965 | dma_addr_t mem_addr = sg_dma_address(sg); | |
966 | unsigned int len = sg_dma_len(sg); | |
967 | ||
968 | full_size += len; | |
969 | ||
1175f83c KM |
970 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
971 | if (i == 0) { | |
972 | high_dev_addr = dev_addr >> 32; | |
973 | high_mem_addr = mem_addr >> 32; | |
974 | } | |
975 | ||
976 | if ((dev_addr >> 32 != high_dev_addr) || | |
977 | (mem_addr >> 32 != high_mem_addr)) | |
978 | cross_boundary = true; | |
979 | #endif | |
87244fe5 LP |
980 | while (len) { |
981 | unsigned int size = min(len, max_chunk_size); | |
982 | ||
983 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | |
984 | /* | |
985 | * Prevent individual transfers from crossing 4GB | |
986 | * boundaries. | |
987 | */ | |
1175f83c | 988 | if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) { |
87244fe5 | 989 | size = ALIGN(dev_addr, 1ULL << 32) - dev_addr; |
1175f83c KM |
990 | cross_boundary = true; |
991 | } | |
992 | if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) { | |
87244fe5 | 993 | size = ALIGN(mem_addr, 1ULL << 32) - mem_addr; |
1175f83c KM |
994 | cross_boundary = true; |
995 | } | |
87244fe5 LP |
996 | #endif |
997 | ||
998 | chunk = rcar_dmac_xfer_chunk_get(chan); | |
999 | if (!chunk) { | |
1000 | rcar_dmac_desc_put(chan, desc); | |
1001 | return NULL; | |
1002 | } | |
1003 | ||
1004 | if (dir == DMA_DEV_TO_MEM) { | |
1005 | chunk->src_addr = dev_addr; | |
1006 | chunk->dst_addr = mem_addr; | |
1007 | } else { | |
1008 | chunk->src_addr = mem_addr; | |
1009 | chunk->dst_addr = dev_addr; | |
1010 | } | |
1011 | ||
1012 | chunk->size = size; | |
1013 | ||
1014 | dev_dbg(chan->chan.device->dev, | |
1015 | "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n", | |
1016 | chan->index, chunk, desc, i, sg, size, len, | |
1017 | &chunk->src_addr, &chunk->dst_addr); | |
1018 | ||
1019 | mem_addr += size; | |
1020 | if (dir == DMA_MEM_TO_MEM) | |
1021 | dev_addr += size; | |
1022 | ||
1023 | len -= size; | |
1024 | ||
1025 | list_add_tail(&chunk->node, &desc->chunks); | |
ccadee9b | 1026 | nchunks++; |
87244fe5 LP |
1027 | } |
1028 | } | |
1029 | ||
ccadee9b | 1030 | desc->nchunks = nchunks; |
87244fe5 LP |
1031 | desc->size = full_size; |
1032 | ||
ccadee9b LP |
1033 | /* |
1034 | * Use hardware descriptor lists if possible when more than one chunk | |
1035 | * needs to be transferred (otherwise they don't make much sense). | |
1036 | * | |
1175f83c KM |
1037 | * Source/Destination address should be located in same 4GiB region |
1038 | * in the 40bit address space when it uses Hardware descriptor, | |
1039 | * and cross_boundary is checking it. | |
ccadee9b | 1040 | */ |
1175f83c | 1041 | desc->hwdescs.use = !cross_boundary && nchunks > 1; |
ee4b876b JB |
1042 | if (desc->hwdescs.use) { |
1043 | if (rcar_dmac_fill_hwdesc(chan, desc) < 0) | |
1044 | desc->hwdescs.use = false; | |
1045 | } | |
ccadee9b | 1046 | |
87244fe5 LP |
1047 | return &desc->async_tx; |
1048 | } | |
1049 | ||
1050 | /* ----------------------------------------------------------------------------- | |
1051 | * DMA engine operations | |
1052 | */ | |
1053 | ||
1054 | static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan) | |
1055 | { | |
1056 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
1057 | int ret; | |
1058 | ||
87244fe5 LP |
1059 | INIT_LIST_HEAD(&rchan->desc.chunks_free); |
1060 | INIT_LIST_HEAD(&rchan->desc.pages); | |
1061 | ||
1062 | /* Preallocate descriptors. */ | |
1063 | ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL); | |
1064 | if (ret < 0) | |
1065 | return -ENOMEM; | |
1066 | ||
1067 | ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL); | |
1068 | if (ret < 0) | |
1069 | return -ENOMEM; | |
1070 | ||
1071 | return pm_runtime_get_sync(chan->device->dev); | |
1072 | } | |
1073 | ||
1074 | static void rcar_dmac_free_chan_resources(struct dma_chan *chan) | |
1075 | { | |
1076 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
1077 | struct rcar_dmac *dmac = to_rcar_dmac(chan->device); | |
3139dc8d | 1078 | struct rcar_dmac_chan_map *map = &rchan->map; |
87244fe5 | 1079 | struct rcar_dmac_desc_page *page, *_page; |
1ed1315f LP |
1080 | struct rcar_dmac_desc *desc; |
1081 | LIST_HEAD(list); | |
87244fe5 LP |
1082 | |
1083 | /* Protect against ISR */ | |
1084 | spin_lock_irq(&rchan->lock); | |
1085 | rcar_dmac_chan_halt(rchan); | |
1086 | spin_unlock_irq(&rchan->lock); | |
1087 | ||
a1ed64ef NS |
1088 | /* |
1089 | * Now no new interrupts will occur, but one might already be | |
1090 | * running. Wait for it to finish before freeing resources. | |
1091 | */ | |
1092 | synchronize_irq(rchan->irq); | |
87244fe5 LP |
1093 | |
1094 | if (rchan->mid_rid >= 0) { | |
1095 | /* The caller is holding dma_list_mutex */ | |
1096 | clear_bit(rchan->mid_rid, dmac->modules); | |
1097 | rchan->mid_rid = -EINVAL; | |
1098 | } | |
1099 | ||
f7638c90 LP |
1100 | list_splice_init(&rchan->desc.free, &list); |
1101 | list_splice_init(&rchan->desc.pending, &list); | |
1102 | list_splice_init(&rchan->desc.active, &list); | |
1103 | list_splice_init(&rchan->desc.done, &list); | |
1104 | list_splice_init(&rchan->desc.wait, &list); | |
1ed1315f | 1105 | |
48c73659 MHF |
1106 | rchan->desc.running = NULL; |
1107 | ||
1ed1315f LP |
1108 | list_for_each_entry(desc, &list, node) |
1109 | rcar_dmac_realloc_hwdesc(rchan, desc, 0); | |
1110 | ||
87244fe5 LP |
1111 | list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) { |
1112 | list_del(&page->node); | |
1113 | free_page((unsigned long)page); | |
1114 | } | |
1115 | ||
3139dc8d NS |
1116 | /* Remove slave mapping if present. */ |
1117 | if (map->slave.xfer_size) { | |
1118 | dma_unmap_resource(chan->device->dev, map->addr, | |
1119 | map->slave.xfer_size, map->dir, 0); | |
1120 | map->slave.xfer_size = 0; | |
1121 | } | |
1122 | ||
87244fe5 LP |
1123 | pm_runtime_put(chan->device->dev); |
1124 | } | |
1125 | ||
1126 | static struct dma_async_tx_descriptor * | |
1127 | rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, | |
1128 | dma_addr_t dma_src, size_t len, unsigned long flags) | |
1129 | { | |
1130 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
1131 | struct scatterlist sgl; | |
1132 | ||
1133 | if (!len) | |
1134 | return NULL; | |
1135 | ||
1136 | sg_init_table(&sgl, 1); | |
1137 | sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len, | |
1138 | offset_in_page(dma_src)); | |
1139 | sg_dma_address(&sgl) = dma_src; | |
1140 | sg_dma_len(&sgl) = len; | |
1141 | ||
1142 | return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest, | |
1143 | DMA_MEM_TO_MEM, flags, false); | |
1144 | } | |
1145 | ||
9f878603 NS |
1146 | static int rcar_dmac_map_slave_addr(struct dma_chan *chan, |
1147 | enum dma_transfer_direction dir) | |
1148 | { | |
1149 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
1150 | struct rcar_dmac_chan_map *map = &rchan->map; | |
1151 | phys_addr_t dev_addr; | |
1152 | size_t dev_size; | |
1153 | enum dma_data_direction dev_dir; | |
1154 | ||
1155 | if (dir == DMA_DEV_TO_MEM) { | |
1156 | dev_addr = rchan->src.slave_addr; | |
1157 | dev_size = rchan->src.xfer_size; | |
1158 | dev_dir = DMA_TO_DEVICE; | |
1159 | } else { | |
1160 | dev_addr = rchan->dst.slave_addr; | |
1161 | dev_size = rchan->dst.xfer_size; | |
1162 | dev_dir = DMA_FROM_DEVICE; | |
1163 | } | |
1164 | ||
1165 | /* Reuse current map if possible. */ | |
1166 | if (dev_addr == map->slave.slave_addr && | |
1167 | dev_size == map->slave.xfer_size && | |
1168 | dev_dir == map->dir) | |
1169 | return 0; | |
1170 | ||
1171 | /* Remove old mapping if present. */ | |
1172 | if (map->slave.xfer_size) | |
1173 | dma_unmap_resource(chan->device->dev, map->addr, | |
1174 | map->slave.xfer_size, map->dir, 0); | |
1175 | map->slave.xfer_size = 0; | |
1176 | ||
1177 | /* Create new slave address map. */ | |
1178 | map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size, | |
1179 | dev_dir, 0); | |
1180 | ||
1181 | if (dma_mapping_error(chan->device->dev, map->addr)) { | |
1182 | dev_err(chan->device->dev, | |
1183 | "chan%u: failed to map %zx@%pap", rchan->index, | |
1184 | dev_size, &dev_addr); | |
1185 | return -EIO; | |
1186 | } | |
1187 | ||
1188 | dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n", | |
1189 | rchan->index, dev_size, &dev_addr, &map->addr, | |
1190 | dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE"); | |
1191 | ||
1192 | map->slave.slave_addr = dev_addr; | |
1193 | map->slave.xfer_size = dev_size; | |
1194 | map->dir = dev_dir; | |
1195 | ||
1196 | return 0; | |
1197 | } | |
1198 | ||
87244fe5 LP |
1199 | static struct dma_async_tx_descriptor * |
1200 | rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
1201 | unsigned int sg_len, enum dma_transfer_direction dir, | |
1202 | unsigned long flags, void *context) | |
1203 | { | |
1204 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
87244fe5 LP |
1205 | |
1206 | /* Someone calling slave DMA on a generic channel? */ | |
78efb76a | 1207 | if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) { |
87244fe5 LP |
1208 | dev_warn(chan->device->dev, |
1209 | "%s: bad parameter: len=%d, id=%d\n", | |
1210 | __func__, sg_len, rchan->mid_rid); | |
1211 | return NULL; | |
1212 | } | |
1213 | ||
9f878603 NS |
1214 | if (rcar_dmac_map_slave_addr(chan, dir)) |
1215 | return NULL; | |
1216 | ||
1217 | return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, | |
87244fe5 LP |
1218 | dir, flags, false); |
1219 | } | |
1220 | ||
1221 | #define RCAR_DMAC_MAX_SG_LEN 32 | |
1222 | ||
1223 | static struct dma_async_tx_descriptor * | |
1224 | rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, | |
1225 | size_t buf_len, size_t period_len, | |
1226 | enum dma_transfer_direction dir, unsigned long flags) | |
1227 | { | |
1228 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
1229 | struct dma_async_tx_descriptor *desc; | |
1230 | struct scatterlist *sgl; | |
87244fe5 LP |
1231 | unsigned int sg_len; |
1232 | unsigned int i; | |
1233 | ||
1234 | /* Someone calling slave DMA on a generic channel? */ | |
1235 | if (rchan->mid_rid < 0 || buf_len < period_len) { | |
1236 | dev_warn(chan->device->dev, | |
1237 | "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n", | |
1238 | __func__, buf_len, period_len, rchan->mid_rid); | |
1239 | return NULL; | |
1240 | } | |
1241 | ||
9f878603 NS |
1242 | if (rcar_dmac_map_slave_addr(chan, dir)) |
1243 | return NULL; | |
1244 | ||
87244fe5 LP |
1245 | sg_len = buf_len / period_len; |
1246 | if (sg_len > RCAR_DMAC_MAX_SG_LEN) { | |
1247 | dev_err(chan->device->dev, | |
1986f03b | 1248 | "chan%u: sg length %d exceeds limit %d", |
87244fe5 LP |
1249 | rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN); |
1250 | return NULL; | |
1251 | } | |
1252 | ||
1253 | /* | |
1254 | * Allocate the sg list dynamically as it would consume too much stack | |
1255 | * space. | |
1256 | */ | |
7ffd5c83 | 1257 | sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_NOWAIT); |
87244fe5 LP |
1258 | if (!sgl) |
1259 | return NULL; | |
1260 | ||
1261 | sg_init_table(sgl, sg_len); | |
1262 | ||
1263 | for (i = 0; i < sg_len; ++i) { | |
1264 | dma_addr_t src = buf_addr + (period_len * i); | |
1265 | ||
1266 | sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len, | |
1267 | offset_in_page(src)); | |
1268 | sg_dma_address(&sgl[i]) = src; | |
1269 | sg_dma_len(&sgl[i]) = period_len; | |
1270 | } | |
1271 | ||
9f878603 | 1272 | desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, |
87244fe5 LP |
1273 | dir, flags, true); |
1274 | ||
1275 | kfree(sgl); | |
1276 | return desc; | |
1277 | } | |
1278 | ||
1279 | static int rcar_dmac_device_config(struct dma_chan *chan, | |
1280 | struct dma_slave_config *cfg) | |
1281 | { | |
1282 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
1283 | ||
1284 | /* | |
1285 | * We could lock this, but you shouldn't be configuring the | |
1286 | * channel, while using it... | |
1287 | */ | |
c5ed08e9 NS |
1288 | rchan->src.slave_addr = cfg->src_addr; |
1289 | rchan->dst.slave_addr = cfg->dst_addr; | |
1290 | rchan->src.xfer_size = cfg->src_addr_width; | |
1291 | rchan->dst.xfer_size = cfg->dst_addr_width; | |
87244fe5 LP |
1292 | |
1293 | return 0; | |
1294 | } | |
1295 | ||
1296 | static int rcar_dmac_chan_terminate_all(struct dma_chan *chan) | |
1297 | { | |
1298 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
1299 | unsigned long flags; | |
1300 | ||
1301 | spin_lock_irqsave(&rchan->lock, flags); | |
1302 | rcar_dmac_chan_halt(rchan); | |
1303 | spin_unlock_irqrestore(&rchan->lock, flags); | |
1304 | ||
1305 | /* | |
1306 | * FIXME: No new interrupt can occur now, but the IRQ thread might still | |
1307 | * be running. | |
1308 | */ | |
1309 | ||
1310 | rcar_dmac_chan_reinit(rchan); | |
1311 | ||
1312 | return 0; | |
1313 | } | |
1314 | ||
1315 | static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, | |
1316 | dma_cookie_t cookie) | |
1317 | { | |
1318 | struct rcar_dmac_desc *desc = chan->desc.running; | |
ccadee9b | 1319 | struct rcar_dmac_xfer_chunk *running = NULL; |
87244fe5 | 1320 | struct rcar_dmac_xfer_chunk *chunk; |
55bd582b | 1321 | enum dma_status status; |
87244fe5 | 1322 | unsigned int residue = 0; |
ccadee9b | 1323 | unsigned int dptr = 0; |
6e7da747 AD |
1324 | unsigned int chcrb; |
1325 | unsigned int tcrb; | |
1326 | unsigned int i; | |
87244fe5 LP |
1327 | |
1328 | if (!desc) | |
1329 | return 0; | |
1330 | ||
55bd582b LP |
1331 | /* |
1332 | * If the cookie corresponds to a descriptor that has been completed | |
1333 | * there is no residue. The same check has already been performed by the | |
1334 | * caller but without holding the channel lock, so the descriptor could | |
1335 | * now be complete. | |
1336 | */ | |
1337 | status = dma_cookie_status(&chan->chan, cookie, NULL); | |
1338 | if (status == DMA_COMPLETE) | |
1339 | return 0; | |
1340 | ||
87244fe5 LP |
1341 | /* |
1342 | * If the cookie doesn't correspond to the currently running transfer | |
1343 | * then the descriptor hasn't been processed yet, and the residue is | |
1344 | * equal to the full descriptor size. | |
3e081628 YS |
1345 | * Also, a client driver is possible to call this function before |
1346 | * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running" | |
1347 | * will be the next descriptor, and the done list will appear. So, if | |
1348 | * the argument cookie matches the done list's cookie, we can assume | |
1349 | * the residue is zero. | |
87244fe5 | 1350 | */ |
55bd582b | 1351 | if (cookie != desc->async_tx.cookie) { |
3e081628 YS |
1352 | list_for_each_entry(desc, &chan->desc.done, node) { |
1353 | if (cookie == desc->async_tx.cookie) | |
1354 | return 0; | |
1355 | } | |
55bd582b LP |
1356 | list_for_each_entry(desc, &chan->desc.pending, node) { |
1357 | if (cookie == desc->async_tx.cookie) | |
1358 | return desc->size; | |
1359 | } | |
1360 | list_for_each_entry(desc, &chan->desc.active, node) { | |
1361 | if (cookie == desc->async_tx.cookie) | |
1362 | return desc->size; | |
1363 | } | |
1364 | ||
1365 | /* | |
1366 | * No descriptor found for the cookie, there's thus no residue. | |
1367 | * This shouldn't happen if the calling driver passes a correct | |
1368 | * cookie value. | |
1369 | */ | |
1370 | WARN(1, "No descriptor for cookie!"); | |
1371 | return 0; | |
1372 | } | |
87244fe5 | 1373 | |
6e7da747 AD |
1374 | /* |
1375 | * We need to read two registers. | |
1376 | * Make sure the control register does not skip to next chunk | |
1377 | * while reading the counter. | |
1378 | * Trying it 3 times should be enough: Initial read, retry, retry | |
1379 | * for the paranoid. | |
1380 | */ | |
1381 | for (i = 0; i < 3; i++) { | |
1382 | chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & | |
1383 | RCAR_DMACHCRB_DPTR_MASK; | |
1384 | tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB); | |
1385 | /* Still the same? */ | |
1386 | if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & | |
1387 | RCAR_DMACHCRB_DPTR_MASK)) | |
1388 | break; | |
1389 | } | |
1390 | WARN_ONCE(i >= 3, "residue might be not continuous!"); | |
1391 | ||
ccadee9b LP |
1392 | /* |
1393 | * In descriptor mode the descriptor running pointer is not maintained | |
1394 | * by the interrupt handler, find the running descriptor from the | |
1395 | * descriptor pointer field in the CHCRB register. In non-descriptor | |
1396 | * mode just use the running descriptor pointer. | |
1397 | */ | |
1ed1315f | 1398 | if (desc->hwdescs.use) { |
6e7da747 | 1399 | dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT; |
56b17705 KM |
1400 | if (dptr == 0) |
1401 | dptr = desc->nchunks; | |
1402 | dptr--; | |
ccadee9b LP |
1403 | WARN_ON(dptr >= desc->nchunks); |
1404 | } else { | |
1405 | running = desc->running; | |
1406 | } | |
1407 | ||
87244fe5 LP |
1408 | /* Compute the size of all chunks still to be transferred. */ |
1409 | list_for_each_entry_reverse(chunk, &desc->chunks, node) { | |
ccadee9b | 1410 | if (chunk == running || ++dptr == desc->nchunks) |
87244fe5 LP |
1411 | break; |
1412 | ||
1413 | residue += chunk->size; | |
1414 | } | |
1415 | ||
1416 | /* Add the residue for the current chunk. */ | |
6e7da747 | 1417 | residue += tcrb << desc->xfer_shift; |
87244fe5 LP |
1418 | |
1419 | return residue; | |
1420 | } | |
1421 | ||
1422 | static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan, | |
1423 | dma_cookie_t cookie, | |
1424 | struct dma_tx_state *txstate) | |
1425 | { | |
1426 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
1427 | enum dma_status status; | |
1428 | unsigned long flags; | |
1429 | unsigned int residue; | |
907bd68a | 1430 | bool cyclic; |
87244fe5 LP |
1431 | |
1432 | status = dma_cookie_status(chan, cookie, txstate); | |
1433 | if (status == DMA_COMPLETE || !txstate) | |
1434 | return status; | |
1435 | ||
1436 | spin_lock_irqsave(&rchan->lock, flags); | |
1437 | residue = rcar_dmac_chan_get_residue(rchan, cookie); | |
907bd68a | 1438 | cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false; |
87244fe5 LP |
1439 | spin_unlock_irqrestore(&rchan->lock, flags); |
1440 | ||
3544d287 | 1441 | /* if there's no residue, the cookie is complete */ |
907bd68a | 1442 | if (!residue && !cyclic) |
3544d287 MHF |
1443 | return DMA_COMPLETE; |
1444 | ||
87244fe5 LP |
1445 | dma_set_residue(txstate, residue); |
1446 | ||
1447 | return status; | |
1448 | } | |
1449 | ||
1450 | static void rcar_dmac_issue_pending(struct dma_chan *chan) | |
1451 | { | |
1452 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
1453 | unsigned long flags; | |
1454 | ||
1455 | spin_lock_irqsave(&rchan->lock, flags); | |
1456 | ||
1457 | if (list_empty(&rchan->desc.pending)) | |
1458 | goto done; | |
1459 | ||
1460 | /* Append the pending list to the active list. */ | |
1461 | list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active); | |
1462 | ||
1463 | /* | |
1464 | * If no transfer is running pick the first descriptor from the active | |
1465 | * list and start the transfer. | |
1466 | */ | |
1467 | if (!rchan->desc.running) { | |
1468 | struct rcar_dmac_desc *desc; | |
1469 | ||
1470 | desc = list_first_entry(&rchan->desc.active, | |
1471 | struct rcar_dmac_desc, node); | |
1472 | rchan->desc.running = desc; | |
1473 | ||
1474 | rcar_dmac_chan_start_xfer(rchan); | |
1475 | } | |
1476 | ||
1477 | done: | |
1478 | spin_unlock_irqrestore(&rchan->lock, flags); | |
1479 | } | |
1480 | ||
30c45005 NS |
1481 | static void rcar_dmac_device_synchronize(struct dma_chan *chan) |
1482 | { | |
1483 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
1484 | ||
1485 | synchronize_irq(rchan->irq); | |
1486 | } | |
1487 | ||
87244fe5 LP |
1488 | /* ----------------------------------------------------------------------------- |
1489 | * IRQ handling | |
1490 | */ | |
1491 | ||
ccadee9b LP |
1492 | static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan) |
1493 | { | |
1494 | struct rcar_dmac_desc *desc = chan->desc.running; | |
1495 | unsigned int stage; | |
1496 | ||
1497 | if (WARN_ON(!desc || !desc->cyclic)) { | |
1498 | /* | |
1499 | * This should never happen, there should always be a running | |
1500 | * cyclic descriptor when a descriptor stage end interrupt is | |
1501 | * triggered. Warn and return. | |
1502 | */ | |
1503 | return IRQ_NONE; | |
1504 | } | |
1505 | ||
1506 | /* Program the interrupt pointer to the next stage. */ | |
1507 | stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & | |
1508 | RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; | |
1509 | rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage)); | |
1510 | ||
1511 | return IRQ_WAKE_THREAD; | |
1512 | } | |
1513 | ||
87244fe5 LP |
1514 | static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan) |
1515 | { | |
1516 | struct rcar_dmac_desc *desc = chan->desc.running; | |
87244fe5 LP |
1517 | irqreturn_t ret = IRQ_WAKE_THREAD; |
1518 | ||
1519 | if (WARN_ON_ONCE(!desc)) { | |
1520 | /* | |
ccadee9b LP |
1521 | * This should never happen, there should always be a running |
1522 | * descriptor when a transfer end interrupt is triggered. Warn | |
1523 | * and return. | |
87244fe5 LP |
1524 | */ |
1525 | return IRQ_NONE; | |
1526 | } | |
1527 | ||
1528 | /* | |
ccadee9b LP |
1529 | * The transfer end interrupt isn't generated for each chunk when using |
1530 | * descriptor mode. Only update the running chunk pointer in | |
1531 | * non-descriptor mode. | |
87244fe5 | 1532 | */ |
1ed1315f | 1533 | if (!desc->hwdescs.use) { |
ccadee9b LP |
1534 | /* |
1535 | * If we haven't completed the last transfer chunk simply move | |
1536 | * to the next one. Only wake the IRQ thread if the transfer is | |
1537 | * cyclic. | |
1538 | */ | |
1539 | if (!list_is_last(&desc->running->node, &desc->chunks)) { | |
1540 | desc->running = list_next_entry(desc->running, node); | |
1541 | if (!desc->cyclic) | |
1542 | ret = IRQ_HANDLED; | |
1543 | goto done; | |
1544 | } | |
87244fe5 | 1545 | |
ccadee9b LP |
1546 | /* |
1547 | * We've completed the last transfer chunk. If the transfer is | |
1548 | * cyclic, move back to the first one. | |
1549 | */ | |
1550 | if (desc->cyclic) { | |
1551 | desc->running = | |
1552 | list_first_entry(&desc->chunks, | |
87244fe5 LP |
1553 | struct rcar_dmac_xfer_chunk, |
1554 | node); | |
ccadee9b LP |
1555 | goto done; |
1556 | } | |
87244fe5 LP |
1557 | } |
1558 | ||
1559 | /* The descriptor is complete, move it to the done list. */ | |
1560 | list_move_tail(&desc->node, &chan->desc.done); | |
1561 | ||
1562 | /* Queue the next descriptor, if any. */ | |
1563 | if (!list_empty(&chan->desc.active)) | |
1564 | chan->desc.running = list_first_entry(&chan->desc.active, | |
1565 | struct rcar_dmac_desc, | |
1566 | node); | |
1567 | else | |
1568 | chan->desc.running = NULL; | |
1569 | ||
1570 | done: | |
1571 | if (chan->desc.running) | |
1572 | rcar_dmac_chan_start_xfer(chan); | |
1573 | ||
1574 | return ret; | |
1575 | } | |
1576 | ||
1577 | static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev) | |
1578 | { | |
ccadee9b | 1579 | u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE; |
87244fe5 LP |
1580 | struct rcar_dmac_chan *chan = dev; |
1581 | irqreturn_t ret = IRQ_NONE; | |
9203dbec | 1582 | bool reinit = false; |
87244fe5 LP |
1583 | u32 chcr; |
1584 | ||
1585 | spin_lock(&chan->lock); | |
1586 | ||
1587 | chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); | |
9203dbec | 1588 | if (chcr & RCAR_DMACHCR_CAE) { |
e919417b KM |
1589 | struct rcar_dmac *dmac = to_rcar_dmac(chan->chan.device); |
1590 | ||
1591 | /* | |
1592 | * We don't need to call rcar_dmac_chan_halt() | |
1593 | * because channel is already stopped in error case. | |
1594 | * We need to clear register and check DE bit as recovery. | |
1595 | */ | |
245bbd16 | 1596 | rcar_dmac_chan_clear(dmac, chan); |
e919417b | 1597 | rcar_dmac_chcr_de_barrier(chan); |
9203dbec KM |
1598 | reinit = true; |
1599 | goto spin_lock_end; | |
1600 | } | |
1601 | ||
ccadee9b LP |
1602 | if (chcr & RCAR_DMACHCR_TE) |
1603 | mask |= RCAR_DMACHCR_DE; | |
1604 | rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask); | |
a8d46a7f KM |
1605 | if (mask & RCAR_DMACHCR_DE) |
1606 | rcar_dmac_chcr_de_barrier(chan); | |
ccadee9b LP |
1607 | |
1608 | if (chcr & RCAR_DMACHCR_DSE) | |
1609 | ret |= rcar_dmac_isr_desc_stage_end(chan); | |
87244fe5 LP |
1610 | |
1611 | if (chcr & RCAR_DMACHCR_TE) | |
1612 | ret |= rcar_dmac_isr_transfer_end(chan); | |
1613 | ||
9203dbec | 1614 | spin_lock_end: |
87244fe5 LP |
1615 | spin_unlock(&chan->lock); |
1616 | ||
9203dbec KM |
1617 | if (reinit) { |
1618 | dev_err(chan->chan.device->dev, "Channel Address Error\n"); | |
1619 | ||
1620 | rcar_dmac_chan_reinit(chan); | |
1621 | ret = IRQ_HANDLED; | |
1622 | } | |
1623 | ||
87244fe5 LP |
1624 | return ret; |
1625 | } | |
1626 | ||
1627 | static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev) | |
1628 | { | |
1629 | struct rcar_dmac_chan *chan = dev; | |
1630 | struct rcar_dmac_desc *desc; | |
964b2fd8 | 1631 | struct dmaengine_desc_callback cb; |
87244fe5 LP |
1632 | |
1633 | spin_lock_irq(&chan->lock); | |
1634 | ||
1635 | /* For cyclic transfers notify the user after every chunk. */ | |
1636 | if (chan->desc.running && chan->desc.running->cyclic) { | |
87244fe5 | 1637 | desc = chan->desc.running; |
964b2fd8 | 1638 | dmaengine_desc_get_callback(&desc->async_tx, &cb); |
87244fe5 | 1639 | |
964b2fd8 | 1640 | if (dmaengine_desc_callback_valid(&cb)) { |
87244fe5 | 1641 | spin_unlock_irq(&chan->lock); |
964b2fd8 | 1642 | dmaengine_desc_callback_invoke(&cb, NULL); |
87244fe5 LP |
1643 | spin_lock_irq(&chan->lock); |
1644 | } | |
1645 | } | |
1646 | ||
1647 | /* | |
1648 | * Call the callback function for all descriptors on the done list and | |
1649 | * move them to the ack wait list. | |
1650 | */ | |
1651 | while (!list_empty(&chan->desc.done)) { | |
1652 | desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc, | |
1653 | node); | |
1654 | dma_cookie_complete(&desc->async_tx); | |
1655 | list_del(&desc->node); | |
1656 | ||
964b2fd8 DJ |
1657 | dmaengine_desc_get_callback(&desc->async_tx, &cb); |
1658 | if (dmaengine_desc_callback_valid(&cb)) { | |
87244fe5 LP |
1659 | spin_unlock_irq(&chan->lock); |
1660 | /* | |
1661 | * We own the only reference to this descriptor, we can | |
1662 | * safely dereference it without holding the channel | |
1663 | * lock. | |
1664 | */ | |
964b2fd8 | 1665 | dmaengine_desc_callback_invoke(&cb, NULL); |
87244fe5 LP |
1666 | spin_lock_irq(&chan->lock); |
1667 | } | |
1668 | ||
1669 | list_add_tail(&desc->node, &chan->desc.wait); | |
1670 | } | |
1671 | ||
ccadee9b LP |
1672 | spin_unlock_irq(&chan->lock); |
1673 | ||
87244fe5 LP |
1674 | /* Recycle all acked descriptors. */ |
1675 | rcar_dmac_desc_recycle_acked(chan); | |
1676 | ||
87244fe5 LP |
1677 | return IRQ_HANDLED; |
1678 | } | |
1679 | ||
87244fe5 LP |
1680 | /* ----------------------------------------------------------------------------- |
1681 | * OF xlate and channel filter | |
1682 | */ | |
1683 | ||
1684 | static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg) | |
1685 | { | |
1686 | struct rcar_dmac *dmac = to_rcar_dmac(chan->device); | |
1687 | struct of_phandle_args *dma_spec = arg; | |
1688 | ||
1689 | /* | |
1690 | * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate | |
1691 | * function knows from which device it wants to allocate a channel from, | |
1692 | * and would be perfectly capable of selecting the channel it wants. | |
1693 | * Forcing it to call dma_request_channel() and iterate through all | |
1694 | * channels from all controllers is just pointless. | |
1695 | */ | |
1dc1b29a | 1696 | if (chan->device->device_config != rcar_dmac_device_config) |
87244fe5 LP |
1697 | return false; |
1698 | ||
1699 | return !test_and_set_bit(dma_spec->args[0], dmac->modules); | |
1700 | } | |
1701 | ||
1702 | static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec, | |
1703 | struct of_dma *ofdma) | |
1704 | { | |
1705 | struct rcar_dmac_chan *rchan; | |
1706 | struct dma_chan *chan; | |
1707 | dma_cap_mask_t mask; | |
1708 | ||
1709 | if (dma_spec->args_count != 1) | |
1710 | return NULL; | |
1711 | ||
1712 | /* Only slave DMA channels can be allocated via DT */ | |
1713 | dma_cap_zero(mask); | |
1714 | dma_cap_set(DMA_SLAVE, mask); | |
1715 | ||
1dc1b29a BW |
1716 | chan = __dma_request_channel(&mask, rcar_dmac_chan_filter, dma_spec, |
1717 | ofdma->of_node); | |
87244fe5 LP |
1718 | if (!chan) |
1719 | return NULL; | |
1720 | ||
1721 | rchan = to_rcar_dmac_chan(chan); | |
1722 | rchan->mid_rid = dma_spec->args[0]; | |
1723 | ||
1724 | return chan; | |
1725 | } | |
1726 | ||
1727 | /* ----------------------------------------------------------------------------- | |
1728 | * Power management | |
1729 | */ | |
1730 | ||
87244fe5 LP |
1731 | #ifdef CONFIG_PM |
1732 | static int rcar_dmac_runtime_suspend(struct device *dev) | |
1733 | { | |
1734 | return 0; | |
1735 | } | |
1736 | ||
1737 | static int rcar_dmac_runtime_resume(struct device *dev) | |
1738 | { | |
1739 | struct rcar_dmac *dmac = dev_get_drvdata(dev); | |
1740 | ||
1741 | return rcar_dmac_init(dmac); | |
1742 | } | |
1743 | #endif | |
1744 | ||
1745 | static const struct dev_pm_ops rcar_dmac_pm = { | |
1131b0a4 GU |
1746 | /* |
1747 | * TODO for system sleep/resume: | |
1748 | * - Wait for the current transfer to complete and stop the device, | |
1749 | * - Resume transfers, if any. | |
1750 | */ | |
73dcc666 GU |
1751 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, |
1752 | pm_runtime_force_resume) | |
87244fe5 LP |
1753 | SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume, |
1754 | NULL) | |
1755 | }; | |
1756 | ||
1757 | /* ----------------------------------------------------------------------------- | |
1758 | * Probe and remove | |
1759 | */ | |
1760 | ||
1761 | static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, | |
e5bfbbb9 | 1762 | struct rcar_dmac_chan *rchan) |
87244fe5 LP |
1763 | { |
1764 | struct platform_device *pdev = to_platform_device(dmac->dev); | |
1765 | struct dma_chan *chan = &rchan->chan; | |
1766 | char pdev_irqname[5]; | |
1767 | char *irqname; | |
87244fe5 LP |
1768 | int ret; |
1769 | ||
87244fe5 LP |
1770 | rchan->mid_rid = -EINVAL; |
1771 | ||
1772 | spin_lock_init(&rchan->lock); | |
1773 | ||
f7638c90 LP |
1774 | INIT_LIST_HEAD(&rchan->desc.free); |
1775 | INIT_LIST_HEAD(&rchan->desc.pending); | |
1776 | INIT_LIST_HEAD(&rchan->desc.active); | |
1777 | INIT_LIST_HEAD(&rchan->desc.done); | |
1778 | INIT_LIST_HEAD(&rchan->desc.wait); | |
1779 | ||
87244fe5 | 1780 | /* Request the channel interrupt. */ |
e5bfbbb9 | 1781 | sprintf(pdev_irqname, "ch%u", rchan->index); |
427d5ecd | 1782 | rchan->irq = platform_get_irq_byname(pdev, pdev_irqname); |
e17be6e1 | 1783 | if (rchan->irq < 0) |
87244fe5 | 1784 | return -ENODEV; |
87244fe5 LP |
1785 | |
1786 | irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", | |
e5bfbbb9 | 1787 | dev_name(dmac->dev), rchan->index); |
87244fe5 LP |
1788 | if (!irqname) |
1789 | return -ENOMEM; | |
1790 | ||
5e857047 KM |
1791 | /* |
1792 | * Initialize the DMA engine channel and add it to the DMA engine | |
1793 | * channels list. | |
1794 | */ | |
1795 | chan->device = &dmac->engine; | |
1796 | dma_cookie_init(chan); | |
1797 | ||
1798 | list_add_tail(&chan->device_node, &dmac->engine.channels); | |
1799 | ||
427d5ecd NS |
1800 | ret = devm_request_threaded_irq(dmac->dev, rchan->irq, |
1801 | rcar_dmac_isr_channel, | |
87244fe5 LP |
1802 | rcar_dmac_isr_channel_thread, 0, |
1803 | irqname, rchan); | |
1804 | if (ret) { | |
427d5ecd NS |
1805 | dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", |
1806 | rchan->irq, ret); | |
87244fe5 LP |
1807 | return ret; |
1808 | } | |
1809 | ||
87244fe5 LP |
1810 | return 0; |
1811 | } | |
1812 | ||
cf24aac3 YS |
1813 | #define RCAR_DMAC_MAX_CHANNELS 32 |
1814 | ||
87244fe5 LP |
1815 | static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac) |
1816 | { | |
1817 | struct device_node *np = dev->of_node; | |
1818 | int ret; | |
1819 | ||
1820 | ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); | |
1821 | if (ret < 0) { | |
1822 | dev_err(dev, "unable to read dma-channels property\n"); | |
1823 | return ret; | |
1824 | } | |
1825 | ||
cf24aac3 YS |
1826 | /* The hardware and driver don't support more than 32 bits in CHCLR */ |
1827 | if (dmac->n_channels <= 0 || | |
1828 | dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) { | |
87244fe5 LP |
1829 | dev_err(dev, "invalid number of channels %u\n", |
1830 | dmac->n_channels); | |
1831 | return -EINVAL; | |
1832 | } | |
1833 | ||
fcf8adb7 YS |
1834 | /* |
1835 | * If the driver is unable to read dma-channel-mask property, | |
1836 | * the driver assumes that it can use all channels. | |
1837 | */ | |
cf24aac3 | 1838 | dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0); |
fcf8adb7 YS |
1839 | of_property_read_u32(np, "dma-channel-mask", &dmac->channels_mask); |
1840 | ||
1841 | /* If the property has out-of-channel mask, this driver clears it */ | |
1842 | dmac->channels_mask &= GENMASK(dmac->n_channels - 1, 0); | |
cf24aac3 | 1843 | |
87244fe5 LP |
1844 | return 0; |
1845 | } | |
1846 | ||
1847 | static int rcar_dmac_probe(struct platform_device *pdev) | |
1848 | { | |
1849 | const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | | |
1850 | DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | | |
1851 | DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | | |
1852 | DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; | |
d249b5fb GU |
1853 | const struct rcar_dmac_of_data *data; |
1854 | struct rcar_dmac_chan *chan; | |
87244fe5 | 1855 | struct dma_device *engine; |
e5bfbbb9 | 1856 | void __iomem *chan_base; |
87244fe5 | 1857 | struct rcar_dmac *dmac; |
87244fe5 | 1858 | unsigned int i; |
87244fe5 LP |
1859 | int ret; |
1860 | ||
2df4a02a YS |
1861 | data = of_device_get_match_data(&pdev->dev); |
1862 | if (!data) | |
1863 | return -EINVAL; | |
1864 | ||
87244fe5 LP |
1865 | dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); |
1866 | if (!dmac) | |
1867 | return -ENOMEM; | |
1868 | ||
1869 | dmac->dev = &pdev->dev; | |
1870 | platform_set_drvdata(pdev, dmac); | |
334304ac | 1871 | dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); |
da2ad87f | 1872 | |
2d21543e JJ |
1873 | ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); |
1874 | if (ret) | |
1875 | return ret; | |
87244fe5 LP |
1876 | |
1877 | ret = rcar_dmac_parse_of(&pdev->dev, dmac); | |
1878 | if (ret < 0) | |
1879 | return ret; | |
1880 | ||
be6893e1 LP |
1881 | /* |
1882 | * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be | |
1883 | * flushed correctly, resulting in memory corruption. DMAC 0 channel 0 | |
1884 | * is connected to microTLB 0 on currently supported platforms, so we | |
1885 | * can't use it with the IPMMU. As the IOMMU API operates at the device | |
1886 | * level we can't disable it selectively, so ignore channel 0 for now if | |
1887 | * the device is part of an IOMMU group. | |
1888 | */ | |
cf24aac3 YS |
1889 | if (device_iommu_mapped(&pdev->dev)) |
1890 | dmac->channels_mask &= ~BIT(0); | |
be6893e1 | 1891 | |
87244fe5 LP |
1892 | dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, |
1893 | sizeof(*dmac->channels), GFP_KERNEL); | |
1894 | if (!dmac->channels) | |
1895 | return -ENOMEM; | |
1896 | ||
1897 | /* Request resources. */ | |
e5bfbbb9 GU |
1898 | dmac->dmac_base = devm_platform_ioremap_resource(pdev, 0); |
1899 | if (IS_ERR(dmac->dmac_base)) | |
1900 | return PTR_ERR(dmac->dmac_base); | |
1901 | ||
1902 | if (!data->chan_offset_base) { | |
1903 | dmac->chan_base = devm_platform_ioremap_resource(pdev, 1); | |
1904 | if (IS_ERR(dmac->chan_base)) | |
1905 | return PTR_ERR(dmac->chan_base); | |
1906 | ||
1907 | chan_base = dmac->chan_base; | |
1908 | } else { | |
1909 | chan_base = dmac->dmac_base + data->chan_offset_base; | |
1910 | } | |
1911 | ||
1912 | for_each_rcar_dmac_chan(i, dmac, chan) { | |
1913 | chan->index = i; | |
1914 | chan->iomem = chan_base + i * data->chan_offset_stride; | |
1915 | } | |
87244fe5 | 1916 | |
87244fe5 LP |
1917 | /* Enable runtime PM and initialize the device. */ |
1918 | pm_runtime_enable(&pdev->dev); | |
dea8464d | 1919 | ret = pm_runtime_resume_and_get(&pdev->dev); |
87244fe5 LP |
1920 | if (ret < 0) { |
1921 | dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); | |
05f4fae9 | 1922 | goto err_pm_disable; |
87244fe5 LP |
1923 | } |
1924 | ||
1925 | ret = rcar_dmac_init(dmac); | |
1926 | pm_runtime_put(&pdev->dev); | |
1927 | ||
1928 | if (ret) { | |
1929 | dev_err(&pdev->dev, "failed to reset device\n"); | |
05f4fae9 | 1930 | goto err_pm_disable; |
87244fe5 LP |
1931 | } |
1932 | ||
5e857047 KM |
1933 | /* Initialize engine */ |
1934 | engine = &dmac->engine; | |
1935 | ||
1936 | dma_cap_set(DMA_MEMCPY, engine->cap_mask); | |
1937 | dma_cap_set(DMA_SLAVE, engine->cap_mask); | |
1938 | ||
1939 | engine->dev = &pdev->dev; | |
1940 | engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE); | |
1941 | ||
1942 | engine->src_addr_widths = widths; | |
1943 | engine->dst_addr_widths = widths; | |
1944 | engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); | |
1945 | engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
1946 | ||
1947 | engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources; | |
1948 | engine->device_free_chan_resources = rcar_dmac_free_chan_resources; | |
1949 | engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy; | |
1950 | engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg; | |
1951 | engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic; | |
1952 | engine->device_config = rcar_dmac_device_config; | |
8115ce74 | 1953 | engine->device_pause = rcar_dmac_chan_pause; |
5e857047 KM |
1954 | engine->device_terminate_all = rcar_dmac_chan_terminate_all; |
1955 | engine->device_tx_status = rcar_dmac_tx_status; | |
1956 | engine->device_issue_pending = rcar_dmac_issue_pending; | |
1957 | engine->device_synchronize = rcar_dmac_device_synchronize; | |
1958 | ||
1959 | INIT_LIST_HEAD(&engine->channels); | |
87244fe5 | 1960 | |
d249b5fb | 1961 | for_each_rcar_dmac_chan(i, dmac, chan) { |
e5bfbbb9 | 1962 | ret = rcar_dmac_chan_probe(dmac, chan); |
87244fe5 | 1963 | if (ret < 0) |
05f4fae9 | 1964 | goto err_pm_disable; |
87244fe5 LP |
1965 | } |
1966 | ||
1967 | /* Register the DMAC as a DMA provider for DT. */ | |
1968 | ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate, | |
1969 | NULL); | |
1970 | if (ret < 0) | |
05f4fae9 | 1971 | goto err_pm_disable; |
87244fe5 LP |
1972 | |
1973 | /* | |
1974 | * Register the DMA engine device. | |
1975 | * | |
1976 | * Default transfer size of 32 bytes requires 32-byte alignment. | |
1977 | */ | |
87244fe5 LP |
1978 | ret = dma_async_device_register(engine); |
1979 | if (ret < 0) | |
05f4fae9 | 1980 | goto err_dma_free; |
87244fe5 LP |
1981 | |
1982 | return 0; | |
1983 | ||
05f4fae9 | 1984 | err_dma_free: |
87244fe5 | 1985 | of_dma_controller_free(pdev->dev.of_node); |
05f4fae9 | 1986 | err_pm_disable: |
87244fe5 LP |
1987 | pm_runtime_disable(&pdev->dev); |
1988 | return ret; | |
1989 | } | |
1990 | ||
8ca34299 | 1991 | static void rcar_dmac_remove(struct platform_device *pdev) |
87244fe5 LP |
1992 | { |
1993 | struct rcar_dmac *dmac = platform_get_drvdata(pdev); | |
1994 | ||
1995 | of_dma_controller_free(pdev->dev.of_node); | |
1996 | dma_async_device_unregister(&dmac->engine); | |
1997 | ||
1998 | pm_runtime_disable(&pdev->dev); | |
87244fe5 LP |
1999 | } |
2000 | ||
2001 | static void rcar_dmac_shutdown(struct platform_device *pdev) | |
2002 | { | |
2003 | struct rcar_dmac *dmac = platform_get_drvdata(pdev); | |
2004 | ||
9203dbec | 2005 | rcar_dmac_stop_all_chan(dmac); |
87244fe5 LP |
2006 | } |
2007 | ||
2df4a02a | 2008 | static const struct rcar_dmac_of_data rcar_dmac_data = { |
e5bfbbb9 GU |
2009 | .chan_offset_base = 0x8000, |
2010 | .chan_offset_stride = 0x80, | |
2011 | }; | |
2012 | ||
2fe6777b | 2013 | static const struct rcar_dmac_of_data rcar_gen4_dmac_data = { |
e5bfbbb9 GU |
2014 | .chan_offset_base = 0x0, |
2015 | .chan_offset_stride = 0x1000, | |
2df4a02a YS |
2016 | }; |
2017 | ||
87244fe5 | 2018 | static const struct of_device_id rcar_dmac_of_ids[] = { |
2df4a02a YS |
2019 | { |
2020 | .compatible = "renesas,rcar-dmac", | |
2021 | .data = &rcar_dmac_data, | |
2fe6777b YS |
2022 | }, { |
2023 | .compatible = "renesas,rcar-gen4-dmac", | |
2024 | .data = &rcar_gen4_dmac_data, | |
e5bfbbb9 | 2025 | }, { |
23417899 KM |
2026 | /* |
2027 | * Backward compatibility for between v5.12 - v5.19 | |
2028 | * which didn't combined with "renesas,rcar-gen4-dmac" | |
2029 | */ | |
e5bfbbb9 | 2030 | .compatible = "renesas,dmac-r8a779a0", |
2fe6777b | 2031 | .data = &rcar_gen4_dmac_data, |
2df4a02a | 2032 | }, |
87244fe5 LP |
2033 | { /* Sentinel */ } |
2034 | }; | |
2035 | MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids); | |
2036 | ||
2037 | static struct platform_driver rcar_dmac_driver = { | |
2038 | .driver = { | |
2039 | .pm = &rcar_dmac_pm, | |
2040 | .name = "rcar-dmac", | |
2041 | .of_match_table = rcar_dmac_of_ids, | |
2042 | }, | |
2043 | .probe = rcar_dmac_probe, | |
76355c25 | 2044 | .remove = rcar_dmac_remove, |
87244fe5 LP |
2045 | .shutdown = rcar_dmac_shutdown, |
2046 | }; | |
2047 | ||
2048 | module_platform_driver(rcar_dmac_driver); | |
2049 | ||
2050 | MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver"); | |
2051 | MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); | |
2052 | MODULE_LICENSE("GPL v2"); |