Commit | Line | Data |
---|---|---|
87244fe5 LP |
1 | /* |
2 | * Renesas R-Car Gen2 DMA Controller Driver | |
3 | * | |
4 | * Copyright (C) 2014 Renesas Electronics Inc. | |
5 | * | |
6 | * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> | |
7 | * | |
8 | * This is free software; you can redistribute it and/or modify | |
9 | * it under the terms of version 2 of the GNU General Public License as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
ccadee9b | 13 | #include <linux/dma-mapping.h> |
87244fe5 LP |
14 | #include <linux/dmaengine.h> |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/list.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/mutex.h> | |
19 | #include <linux/of.h> | |
20 | #include <linux/of_dma.h> | |
21 | #include <linux/of_platform.h> | |
22 | #include <linux/platform_device.h> | |
23 | #include <linux/pm_runtime.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/spinlock.h> | |
26 | ||
27 | #include "../dmaengine.h" | |
28 | ||
29 | /* | |
30 | * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer | |
31 | * @node: entry in the parent's chunks list | |
32 | * @src_addr: device source address | |
33 | * @dst_addr: device destination address | |
34 | * @size: transfer size in bytes | |
35 | */ | |
36 | struct rcar_dmac_xfer_chunk { | |
37 | struct list_head node; | |
38 | ||
39 | dma_addr_t src_addr; | |
40 | dma_addr_t dst_addr; | |
41 | u32 size; | |
42 | }; | |
43 | ||
ccadee9b LP |
44 | /* |
45 | * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk | |
46 | * @sar: value of the SAR register (source address) | |
47 | * @dar: value of the DAR register (destination address) | |
48 | * @tcr: value of the TCR register (transfer count) | |
49 | */ | |
50 | struct rcar_dmac_hw_desc { | |
51 | u32 sar; | |
52 | u32 dar; | |
53 | u32 tcr; | |
54 | u32 reserved; | |
55 | } __attribute__((__packed__)); | |
56 | ||
87244fe5 LP |
57 | /* |
58 | * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor | |
59 | * @async_tx: base DMA asynchronous transaction descriptor | |
60 | * @direction: direction of the DMA transfer | |
61 | * @xfer_shift: log2 of the transfer size | |
62 | * @chcr: value of the channel configuration register for this transfer | |
63 | * @node: entry in the channel's descriptors lists | |
64 | * @chunks: list of transfer chunks for this transfer | |
65 | * @running: the transfer chunk being currently processed | |
ccadee9b | 66 | * @nchunks: number of transfer chunks for this transfer |
1ed1315f | 67 | * @hwdescs.use: whether the transfer descriptor uses hardware descriptors |
ccadee9b LP |
68 | * @hwdescs.mem: hardware descriptors memory for the transfer |
69 | * @hwdescs.dma: device address of the hardware descriptors memory | |
70 | * @hwdescs.size: size of the hardware descriptors in bytes | |
87244fe5 LP |
71 | * @size: transfer size in bytes |
72 | * @cyclic: when set indicates that the DMA transfer is cyclic | |
73 | */ | |
74 | struct rcar_dmac_desc { | |
75 | struct dma_async_tx_descriptor async_tx; | |
76 | enum dma_transfer_direction direction; | |
77 | unsigned int xfer_shift; | |
78 | u32 chcr; | |
79 | ||
80 | struct list_head node; | |
81 | struct list_head chunks; | |
82 | struct rcar_dmac_xfer_chunk *running; | |
ccadee9b LP |
83 | unsigned int nchunks; |
84 | ||
85 | struct { | |
1ed1315f | 86 | bool use; |
ccadee9b LP |
87 | struct rcar_dmac_hw_desc *mem; |
88 | dma_addr_t dma; | |
89 | size_t size; | |
90 | } hwdescs; | |
87244fe5 LP |
91 | |
92 | unsigned int size; | |
93 | bool cyclic; | |
94 | }; | |
95 | ||
96 | #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx) | |
97 | ||
98 | /* | |
99 | * struct rcar_dmac_desc_page - One page worth of descriptors | |
100 | * @node: entry in the channel's pages list | |
101 | * @descs: array of DMA descriptors | |
102 | * @chunks: array of transfer chunk descriptors | |
103 | */ | |
104 | struct rcar_dmac_desc_page { | |
105 | struct list_head node; | |
106 | ||
107 | union { | |
108 | struct rcar_dmac_desc descs[0]; | |
109 | struct rcar_dmac_xfer_chunk chunks[0]; | |
110 | }; | |
111 | }; | |
112 | ||
113 | #define RCAR_DMAC_DESCS_PER_PAGE \ | |
114 | ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \ | |
115 | sizeof(struct rcar_dmac_desc)) | |
116 | #define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \ | |
117 | ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \ | |
118 | sizeof(struct rcar_dmac_xfer_chunk)) | |
119 | ||
c5ed08e9 NS |
120 | /* |
121 | * struct rcar_dmac_chan_slave - Slave configuration | |
122 | * @slave_addr: slave memory address | |
123 | * @xfer_size: size (in bytes) of hardware transfers | |
124 | */ | |
125 | struct rcar_dmac_chan_slave { | |
126 | phys_addr_t slave_addr; | |
127 | unsigned int xfer_size; | |
128 | }; | |
129 | ||
9f878603 NS |
130 | /* |
131 | * struct rcar_dmac_chan_map - Map of slave device phys to dma address | |
132 | * @addr: slave dma address | |
133 | * @dir: direction of mapping | |
134 | * @slave: slave configuration that is mapped | |
135 | */ | |
136 | struct rcar_dmac_chan_map { | |
137 | dma_addr_t addr; | |
138 | enum dma_data_direction dir; | |
139 | struct rcar_dmac_chan_slave slave; | |
140 | }; | |
141 | ||
87244fe5 LP |
142 | /* |
143 | * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel | |
144 | * @chan: base DMA channel object | |
145 | * @iomem: channel I/O memory base | |
146 | * @index: index of this channel in the controller | |
427d5ecd | 147 | * @irq: channel IRQ |
c5ed08e9 NS |
148 | * @src: slave memory address and size on the source side |
149 | * @dst: slave memory address and size on the destination side | |
87244fe5 LP |
150 | * @mid_rid: hardware MID/RID for the DMA client using this channel |
151 | * @lock: protects the channel CHCR register and the desc members | |
152 | * @desc.free: list of free descriptors | |
153 | * @desc.pending: list of pending descriptors (submitted with tx_submit) | |
154 | * @desc.active: list of active descriptors (activated with issue_pending) | |
155 | * @desc.done: list of completed descriptors | |
156 | * @desc.wait: list of descriptors waiting for an ack | |
157 | * @desc.running: the descriptor being processed (a member of the active list) | |
158 | * @desc.chunks_free: list of free transfer chunk descriptors | |
159 | * @desc.pages: list of pages used by allocated descriptors | |
160 | */ | |
161 | struct rcar_dmac_chan { | |
162 | struct dma_chan chan; | |
163 | void __iomem *iomem; | |
164 | unsigned int index; | |
427d5ecd | 165 | int irq; |
87244fe5 | 166 | |
c5ed08e9 NS |
167 | struct rcar_dmac_chan_slave src; |
168 | struct rcar_dmac_chan_slave dst; | |
9f878603 | 169 | struct rcar_dmac_chan_map map; |
87244fe5 LP |
170 | int mid_rid; |
171 | ||
172 | spinlock_t lock; | |
173 | ||
174 | struct { | |
175 | struct list_head free; | |
176 | struct list_head pending; | |
177 | struct list_head active; | |
178 | struct list_head done; | |
179 | struct list_head wait; | |
180 | struct rcar_dmac_desc *running; | |
181 | ||
182 | struct list_head chunks_free; | |
183 | ||
184 | struct list_head pages; | |
185 | } desc; | |
186 | }; | |
187 | ||
188 | #define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan) | |
189 | ||
190 | /* | |
191 | * struct rcar_dmac - R-Car Gen2 DMA Controller | |
192 | * @engine: base DMA engine object | |
193 | * @dev: the hardware device | |
194 | * @iomem: remapped I/O memory base | |
195 | * @n_channels: number of available channels | |
196 | * @channels: array of DMAC channels | |
197 | * @modules: bitmask of client modules in use | |
198 | */ | |
199 | struct rcar_dmac { | |
200 | struct dma_device engine; | |
201 | struct device *dev; | |
202 | void __iomem *iomem; | |
203 | ||
204 | unsigned int n_channels; | |
205 | struct rcar_dmac_chan *channels; | |
206 | ||
08acf38e | 207 | DECLARE_BITMAP(modules, 256); |
87244fe5 LP |
208 | }; |
209 | ||
210 | #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine) | |
211 | ||
212 | /* ----------------------------------------------------------------------------- | |
213 | * Registers | |
214 | */ | |
215 | ||
216 | #define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i)) | |
217 | ||
218 | #define RCAR_DMAISTA 0x0020 | |
219 | #define RCAR_DMASEC 0x0030 | |
220 | #define RCAR_DMAOR 0x0060 | |
221 | #define RCAR_DMAOR_PRI_FIXED (0 << 8) | |
222 | #define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8) | |
223 | #define RCAR_DMAOR_AE (1 << 2) | |
224 | #define RCAR_DMAOR_DME (1 << 0) | |
225 | #define RCAR_DMACHCLR 0x0080 | |
226 | #define RCAR_DMADPSEC 0x00a0 | |
227 | ||
228 | #define RCAR_DMASAR 0x0000 | |
229 | #define RCAR_DMADAR 0x0004 | |
230 | #define RCAR_DMATCR 0x0008 | |
231 | #define RCAR_DMATCR_MASK 0x00ffffff | |
232 | #define RCAR_DMATSR 0x0028 | |
233 | #define RCAR_DMACHCR 0x000c | |
234 | #define RCAR_DMACHCR_CAE (1 << 31) | |
235 | #define RCAR_DMACHCR_CAIE (1 << 30) | |
236 | #define RCAR_DMACHCR_DPM_DISABLED (0 << 28) | |
237 | #define RCAR_DMACHCR_DPM_ENABLED (1 << 28) | |
238 | #define RCAR_DMACHCR_DPM_REPEAT (2 << 28) | |
239 | #define RCAR_DMACHCR_DPM_INFINITE (3 << 28) | |
240 | #define RCAR_DMACHCR_RPT_SAR (1 << 27) | |
241 | #define RCAR_DMACHCR_RPT_DAR (1 << 26) | |
242 | #define RCAR_DMACHCR_RPT_TCR (1 << 25) | |
243 | #define RCAR_DMACHCR_DPB (1 << 22) | |
244 | #define RCAR_DMACHCR_DSE (1 << 19) | |
245 | #define RCAR_DMACHCR_DSIE (1 << 18) | |
246 | #define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3)) | |
247 | #define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3)) | |
248 | #define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3)) | |
249 | #define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3)) | |
250 | #define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3)) | |
251 | #define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3)) | |
252 | #define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3)) | |
253 | #define RCAR_DMACHCR_DM_FIXED (0 << 14) | |
254 | #define RCAR_DMACHCR_DM_INC (1 << 14) | |
255 | #define RCAR_DMACHCR_DM_DEC (2 << 14) | |
256 | #define RCAR_DMACHCR_SM_FIXED (0 << 12) | |
257 | #define RCAR_DMACHCR_SM_INC (1 << 12) | |
258 | #define RCAR_DMACHCR_SM_DEC (2 << 12) | |
259 | #define RCAR_DMACHCR_RS_AUTO (4 << 8) | |
260 | #define RCAR_DMACHCR_RS_DMARS (8 << 8) | |
261 | #define RCAR_DMACHCR_IE (1 << 2) | |
262 | #define RCAR_DMACHCR_TE (1 << 1) | |
263 | #define RCAR_DMACHCR_DE (1 << 0) | |
264 | #define RCAR_DMATCRB 0x0018 | |
265 | #define RCAR_DMATSRB 0x0038 | |
266 | #define RCAR_DMACHCRB 0x001c | |
267 | #define RCAR_DMACHCRB_DCNT(n) ((n) << 24) | |
ccadee9b LP |
268 | #define RCAR_DMACHCRB_DPTR_MASK (0xff << 16) |
269 | #define RCAR_DMACHCRB_DPTR_SHIFT 16 | |
87244fe5 LP |
270 | #define RCAR_DMACHCRB_DRST (1 << 15) |
271 | #define RCAR_DMACHCRB_DTS (1 << 8) | |
272 | #define RCAR_DMACHCRB_SLM_NORMAL (0 << 4) | |
273 | #define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4) | |
274 | #define RCAR_DMACHCRB_PRI(n) ((n) << 0) | |
275 | #define RCAR_DMARS 0x0040 | |
276 | #define RCAR_DMABUFCR 0x0048 | |
277 | #define RCAR_DMABUFCR_MBU(n) ((n) << 16) | |
278 | #define RCAR_DMABUFCR_ULB(n) ((n) << 0) | |
279 | #define RCAR_DMADPBASE 0x0050 | |
280 | #define RCAR_DMADPBASE_MASK 0xfffffff0 | |
281 | #define RCAR_DMADPBASE_SEL (1 << 0) | |
282 | #define RCAR_DMADPCR 0x0054 | |
283 | #define RCAR_DMADPCR_DIPT(n) ((n) << 24) | |
284 | #define RCAR_DMAFIXSAR 0x0010 | |
285 | #define RCAR_DMAFIXDAR 0x0014 | |
286 | #define RCAR_DMAFIXDPBASE 0x0060 | |
287 | ||
288 | /* Hardcode the MEMCPY transfer size to 4 bytes. */ | |
289 | #define RCAR_DMAC_MEMCPY_XFER_SIZE 4 | |
290 | ||
291 | /* ----------------------------------------------------------------------------- | |
292 | * Device access | |
293 | */ | |
294 | ||
295 | static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data) | |
296 | { | |
297 | if (reg == RCAR_DMAOR) | |
298 | writew(data, dmac->iomem + reg); | |
299 | else | |
300 | writel(data, dmac->iomem + reg); | |
301 | } | |
302 | ||
303 | static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg) | |
304 | { | |
305 | if (reg == RCAR_DMAOR) | |
306 | return readw(dmac->iomem + reg); | |
307 | else | |
308 | return readl(dmac->iomem + reg); | |
309 | } | |
310 | ||
311 | static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg) | |
312 | { | |
313 | if (reg == RCAR_DMARS) | |
314 | return readw(chan->iomem + reg); | |
315 | else | |
316 | return readl(chan->iomem + reg); | |
317 | } | |
318 | ||
319 | static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data) | |
320 | { | |
321 | if (reg == RCAR_DMARS) | |
322 | writew(data, chan->iomem + reg); | |
323 | else | |
324 | writel(data, chan->iomem + reg); | |
325 | } | |
326 | ||
327 | /* ----------------------------------------------------------------------------- | |
328 | * Initialization and configuration | |
329 | */ | |
330 | ||
331 | static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan) | |
332 | { | |
333 | u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); | |
334 | ||
0f78e3b5 | 335 | return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)); |
87244fe5 LP |
336 | } |
337 | ||
338 | static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) | |
339 | { | |
340 | struct rcar_dmac_desc *desc = chan->desc.running; | |
ccadee9b | 341 | u32 chcr = desc->chcr; |
87244fe5 LP |
342 | |
343 | WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan)); | |
344 | ||
ccadee9b LP |
345 | if (chan->mid_rid >= 0) |
346 | rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); | |
347 | ||
1ed1315f | 348 | if (desc->hwdescs.use) { |
1175f83c KM |
349 | struct rcar_dmac_xfer_chunk *chunk = |
350 | list_first_entry(&desc->chunks, | |
351 | struct rcar_dmac_xfer_chunk, node); | |
3f463061 | 352 | |
ccadee9b LP |
353 | dev_dbg(chan->chan.device->dev, |
354 | "chan%u: queue desc %p: %u@%pad\n", | |
355 | chan->index, desc, desc->nchunks, &desc->hwdescs.dma); | |
356 | ||
87244fe5 | 357 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
1175f83c KM |
358 | rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, |
359 | chunk->src_addr >> 32); | |
360 | rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, | |
361 | chunk->dst_addr >> 32); | |
ccadee9b LP |
362 | rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE, |
363 | desc->hwdescs.dma >> 32); | |
87244fe5 | 364 | #endif |
ccadee9b LP |
365 | rcar_dmac_chan_write(chan, RCAR_DMADPBASE, |
366 | (desc->hwdescs.dma & 0xfffffff0) | | |
367 | RCAR_DMADPBASE_SEL); | |
368 | rcar_dmac_chan_write(chan, RCAR_DMACHCRB, | |
369 | RCAR_DMACHCRB_DCNT(desc->nchunks - 1) | | |
370 | RCAR_DMACHCRB_DRST); | |
87244fe5 | 371 | |
3f463061 LP |
372 | /* |
373 | * Errata: When descriptor memory is accessed through an IOMMU | |
374 | * the DMADAR register isn't initialized automatically from the | |
375 | * first descriptor at beginning of transfer by the DMAC like it | |
376 | * should. Initialize it manually with the destination address | |
377 | * of the first chunk. | |
378 | */ | |
3f463061 LP |
379 | rcar_dmac_chan_write(chan, RCAR_DMADAR, |
380 | chunk->dst_addr & 0xffffffff); | |
381 | ||
ccadee9b LP |
382 | /* |
383 | * Program the descriptor stage interrupt to occur after the end | |
384 | * of the first stage. | |
385 | */ | |
386 | rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1)); | |
387 | ||
388 | chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR | |
389 | | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB; | |
390 | ||
391 | /* | |
392 | * If the descriptor isn't cyclic enable normal descriptor mode | |
393 | * and the transfer completion interrupt. | |
394 | */ | |
395 | if (!desc->cyclic) | |
396 | chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE; | |
397 | /* | |
398 | * If the descriptor is cyclic and has a callback enable the | |
399 | * descriptor stage interrupt in infinite repeat mode. | |
400 | */ | |
401 | else if (desc->async_tx.callback) | |
402 | chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE; | |
403 | /* | |
404 | * Otherwise just select infinite repeat mode without any | |
405 | * interrupt. | |
406 | */ | |
407 | else | |
408 | chcr |= RCAR_DMACHCR_DPM_INFINITE; | |
409 | } else { | |
410 | struct rcar_dmac_xfer_chunk *chunk = desc->running; | |
87244fe5 | 411 | |
ccadee9b LP |
412 | dev_dbg(chan->chan.device->dev, |
413 | "chan%u: queue chunk %p: %u@%pad -> %pad\n", | |
414 | chan->index, chunk, chunk->size, &chunk->src_addr, | |
415 | &chunk->dst_addr); | |
87244fe5 | 416 | |
ccadee9b LP |
417 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
418 | rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, | |
419 | chunk->src_addr >> 32); | |
420 | rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, | |
421 | chunk->dst_addr >> 32); | |
422 | #endif | |
423 | rcar_dmac_chan_write(chan, RCAR_DMASAR, | |
424 | chunk->src_addr & 0xffffffff); | |
425 | rcar_dmac_chan_write(chan, RCAR_DMADAR, | |
426 | chunk->dst_addr & 0xffffffff); | |
427 | rcar_dmac_chan_write(chan, RCAR_DMATCR, | |
428 | chunk->size >> desc->xfer_shift); | |
429 | ||
430 | chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE; | |
431 | } | |
432 | ||
433 | rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE); | |
87244fe5 LP |
434 | } |
435 | ||
436 | static int rcar_dmac_init(struct rcar_dmac *dmac) | |
437 | { | |
438 | u16 dmaor; | |
439 | ||
440 | /* Clear all channels and enable the DMAC globally. */ | |
20c169ac | 441 | rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0)); |
87244fe5 LP |
442 | rcar_dmac_write(dmac, RCAR_DMAOR, |
443 | RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME); | |
444 | ||
445 | dmaor = rcar_dmac_read(dmac, RCAR_DMAOR); | |
446 | if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) { | |
447 | dev_warn(dmac->dev, "DMAOR initialization failed.\n"); | |
448 | return -EIO; | |
449 | } | |
450 | ||
451 | return 0; | |
452 | } | |
453 | ||
454 | /* ----------------------------------------------------------------------------- | |
455 | * Descriptors submission | |
456 | */ | |
457 | ||
458 | static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx) | |
459 | { | |
460 | struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan); | |
461 | struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx); | |
462 | unsigned long flags; | |
463 | dma_cookie_t cookie; | |
464 | ||
465 | spin_lock_irqsave(&chan->lock, flags); | |
466 | ||
467 | cookie = dma_cookie_assign(tx); | |
468 | ||
469 | dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n", | |
470 | chan->index, tx->cookie, desc); | |
471 | ||
472 | list_add_tail(&desc->node, &chan->desc.pending); | |
473 | desc->running = list_first_entry(&desc->chunks, | |
474 | struct rcar_dmac_xfer_chunk, node); | |
475 | ||
476 | spin_unlock_irqrestore(&chan->lock, flags); | |
477 | ||
478 | return cookie; | |
479 | } | |
480 | ||
481 | /* ----------------------------------------------------------------------------- | |
482 | * Descriptors allocation and free | |
483 | */ | |
484 | ||
485 | /* | |
486 | * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors | |
487 | * @chan: the DMA channel | |
488 | * @gfp: allocation flags | |
489 | */ | |
490 | static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) | |
491 | { | |
492 | struct rcar_dmac_desc_page *page; | |
d23c9a0a | 493 | unsigned long flags; |
87244fe5 LP |
494 | LIST_HEAD(list); |
495 | unsigned int i; | |
496 | ||
497 | page = (void *)get_zeroed_page(gfp); | |
498 | if (!page) | |
499 | return -ENOMEM; | |
500 | ||
501 | for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) { | |
502 | struct rcar_dmac_desc *desc = &page->descs[i]; | |
503 | ||
504 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); | |
505 | desc->async_tx.tx_submit = rcar_dmac_tx_submit; | |
506 | INIT_LIST_HEAD(&desc->chunks); | |
507 | ||
508 | list_add_tail(&desc->node, &list); | |
509 | } | |
510 | ||
d23c9a0a | 511 | spin_lock_irqsave(&chan->lock, flags); |
87244fe5 LP |
512 | list_splice_tail(&list, &chan->desc.free); |
513 | list_add_tail(&page->node, &chan->desc.pages); | |
d23c9a0a | 514 | spin_unlock_irqrestore(&chan->lock, flags); |
87244fe5 LP |
515 | |
516 | return 0; | |
517 | } | |
518 | ||
519 | /* | |
520 | * rcar_dmac_desc_put - Release a DMA transfer descriptor | |
521 | * @chan: the DMA channel | |
522 | * @desc: the descriptor | |
523 | * | |
524 | * Put the descriptor and its transfer chunk descriptors back in the channel's | |
1ed1315f LP |
525 | * free descriptors lists. The descriptor's chunks list will be reinitialized to |
526 | * an empty list as a result. | |
87244fe5 | 527 | * |
ccadee9b LP |
528 | * The descriptor must have been removed from the channel's lists before calling |
529 | * this function. | |
87244fe5 LP |
530 | */ |
531 | static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan, | |
532 | struct rcar_dmac_desc *desc) | |
533 | { | |
f3915072 LP |
534 | unsigned long flags; |
535 | ||
536 | spin_lock_irqsave(&chan->lock, flags); | |
87244fe5 | 537 | list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); |
3565fe53 | 538 | list_add(&desc->node, &chan->desc.free); |
f3915072 | 539 | spin_unlock_irqrestore(&chan->lock, flags); |
87244fe5 LP |
540 | } |
541 | ||
542 | static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan) | |
543 | { | |
544 | struct rcar_dmac_desc *desc, *_desc; | |
d23c9a0a | 545 | unsigned long flags; |
ccadee9b | 546 | LIST_HEAD(list); |
87244fe5 | 547 | |
ccadee9b LP |
548 | /* |
549 | * We have to temporarily move all descriptors from the wait list to a | |
550 | * local list as iterating over the wait list, even with | |
551 | * list_for_each_entry_safe, isn't safe if we release the channel lock | |
552 | * around the rcar_dmac_desc_put() call. | |
553 | */ | |
d23c9a0a | 554 | spin_lock_irqsave(&chan->lock, flags); |
ccadee9b | 555 | list_splice_init(&chan->desc.wait, &list); |
d23c9a0a | 556 | spin_unlock_irqrestore(&chan->lock, flags); |
ccadee9b LP |
557 | |
558 | list_for_each_entry_safe(desc, _desc, &list, node) { | |
87244fe5 LP |
559 | if (async_tx_test_ack(&desc->async_tx)) { |
560 | list_del(&desc->node); | |
561 | rcar_dmac_desc_put(chan, desc); | |
562 | } | |
563 | } | |
ccadee9b LP |
564 | |
565 | if (list_empty(&list)) | |
566 | return; | |
567 | ||
568 | /* Put the remaining descriptors back in the wait list. */ | |
d23c9a0a | 569 | spin_lock_irqsave(&chan->lock, flags); |
ccadee9b | 570 | list_splice(&list, &chan->desc.wait); |
d23c9a0a | 571 | spin_unlock_irqrestore(&chan->lock, flags); |
87244fe5 LP |
572 | } |
573 | ||
574 | /* | |
575 | * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer | |
576 | * @chan: the DMA channel | |
577 | * | |
578 | * Locking: This function must be called in a non-atomic context. | |
579 | * | |
580 | * Return: A pointer to the allocated descriptor or NULL if no descriptor can | |
581 | * be allocated. | |
582 | */ | |
583 | static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan) | |
584 | { | |
585 | struct rcar_dmac_desc *desc; | |
d23c9a0a | 586 | unsigned long flags; |
87244fe5 LP |
587 | int ret; |
588 | ||
87244fe5 LP |
589 | /* Recycle acked descriptors before attempting allocation. */ |
590 | rcar_dmac_desc_recycle_acked(chan); | |
591 | ||
d23c9a0a | 592 | spin_lock_irqsave(&chan->lock, flags); |
ccadee9b | 593 | |
a55e07c8 LP |
594 | while (list_empty(&chan->desc.free)) { |
595 | /* | |
596 | * No free descriptors, allocate a page worth of them and try | |
597 | * again, as someone else could race us to get the newly | |
598 | * allocated descriptors. If the allocation fails return an | |
599 | * error. | |
600 | */ | |
d23c9a0a | 601 | spin_unlock_irqrestore(&chan->lock, flags); |
a55e07c8 LP |
602 | ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT); |
603 | if (ret < 0) | |
604 | return NULL; | |
d23c9a0a | 605 | spin_lock_irqsave(&chan->lock, flags); |
a55e07c8 | 606 | } |
87244fe5 | 607 | |
a55e07c8 LP |
608 | desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node); |
609 | list_del(&desc->node); | |
87244fe5 | 610 | |
d23c9a0a | 611 | spin_unlock_irqrestore(&chan->lock, flags); |
87244fe5 LP |
612 | |
613 | return desc; | |
614 | } | |
615 | ||
616 | /* | |
617 | * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks | |
618 | * @chan: the DMA channel | |
619 | * @gfp: allocation flags | |
620 | */ | |
621 | static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) | |
622 | { | |
623 | struct rcar_dmac_desc_page *page; | |
d23c9a0a | 624 | unsigned long flags; |
87244fe5 LP |
625 | LIST_HEAD(list); |
626 | unsigned int i; | |
627 | ||
628 | page = (void *)get_zeroed_page(gfp); | |
629 | if (!page) | |
630 | return -ENOMEM; | |
631 | ||
632 | for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) { | |
633 | struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; | |
634 | ||
635 | list_add_tail(&chunk->node, &list); | |
636 | } | |
637 | ||
d23c9a0a | 638 | spin_lock_irqsave(&chan->lock, flags); |
87244fe5 LP |
639 | list_splice_tail(&list, &chan->desc.chunks_free); |
640 | list_add_tail(&page->node, &chan->desc.pages); | |
d23c9a0a | 641 | spin_unlock_irqrestore(&chan->lock, flags); |
87244fe5 LP |
642 | |
643 | return 0; | |
644 | } | |
645 | ||
646 | /* | |
647 | * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer | |
648 | * @chan: the DMA channel | |
649 | * | |
650 | * Locking: This function must be called in a non-atomic context. | |
651 | * | |
652 | * Return: A pointer to the allocated transfer chunk descriptor or NULL if no | |
653 | * descriptor can be allocated. | |
654 | */ | |
655 | static struct rcar_dmac_xfer_chunk * | |
656 | rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan) | |
657 | { | |
658 | struct rcar_dmac_xfer_chunk *chunk; | |
d23c9a0a | 659 | unsigned long flags; |
87244fe5 LP |
660 | int ret; |
661 | ||
d23c9a0a | 662 | spin_lock_irqsave(&chan->lock, flags); |
87244fe5 | 663 | |
a55e07c8 LP |
664 | while (list_empty(&chan->desc.chunks_free)) { |
665 | /* | |
666 | * No free descriptors, allocate a page worth of them and try | |
667 | * again, as someone else could race us to get the newly | |
668 | * allocated descriptors. If the allocation fails return an | |
669 | * error. | |
670 | */ | |
d23c9a0a | 671 | spin_unlock_irqrestore(&chan->lock, flags); |
a55e07c8 LP |
672 | ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT); |
673 | if (ret < 0) | |
674 | return NULL; | |
d23c9a0a | 675 | spin_lock_irqsave(&chan->lock, flags); |
a55e07c8 | 676 | } |
87244fe5 | 677 | |
a55e07c8 LP |
678 | chunk = list_first_entry(&chan->desc.chunks_free, |
679 | struct rcar_dmac_xfer_chunk, node); | |
680 | list_del(&chunk->node); | |
87244fe5 | 681 | |
d23c9a0a | 682 | spin_unlock_irqrestore(&chan->lock, flags); |
87244fe5 LP |
683 | |
684 | return chunk; | |
685 | } | |
686 | ||
1ed1315f LP |
687 | static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan, |
688 | struct rcar_dmac_desc *desc, size_t size) | |
689 | { | |
690 | /* | |
691 | * dma_alloc_coherent() allocates memory in page size increments. To | |
692 | * avoid reallocating the hardware descriptors when the allocated size | |
693 | * wouldn't change align the requested size to a multiple of the page | |
694 | * size. | |
695 | */ | |
696 | size = PAGE_ALIGN(size); | |
697 | ||
698 | if (desc->hwdescs.size == size) | |
699 | return; | |
700 | ||
701 | if (desc->hwdescs.mem) { | |
6a634808 LP |
702 | dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size, |
703 | desc->hwdescs.mem, desc->hwdescs.dma); | |
1ed1315f LP |
704 | desc->hwdescs.mem = NULL; |
705 | desc->hwdescs.size = 0; | |
706 | } | |
707 | ||
708 | if (!size) | |
709 | return; | |
710 | ||
6a634808 LP |
711 | desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size, |
712 | &desc->hwdescs.dma, GFP_NOWAIT); | |
1ed1315f LP |
713 | if (!desc->hwdescs.mem) |
714 | return; | |
715 | ||
716 | desc->hwdescs.size = size; | |
717 | } | |
718 | ||
ee4b876b JB |
719 | static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan, |
720 | struct rcar_dmac_desc *desc) | |
ccadee9b LP |
721 | { |
722 | struct rcar_dmac_xfer_chunk *chunk; | |
723 | struct rcar_dmac_hw_desc *hwdesc; | |
ccadee9b | 724 | |
1ed1315f LP |
725 | rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc)); |
726 | ||
727 | hwdesc = desc->hwdescs.mem; | |
ccadee9b | 728 | if (!hwdesc) |
ee4b876b | 729 | return -ENOMEM; |
ccadee9b | 730 | |
ccadee9b LP |
731 | list_for_each_entry(chunk, &desc->chunks, node) { |
732 | hwdesc->sar = chunk->src_addr; | |
733 | hwdesc->dar = chunk->dst_addr; | |
734 | hwdesc->tcr = chunk->size >> desc->xfer_shift; | |
735 | hwdesc++; | |
736 | } | |
ee4b876b JB |
737 | |
738 | return 0; | |
ccadee9b LP |
739 | } |
740 | ||
87244fe5 LP |
741 | /* ----------------------------------------------------------------------------- |
742 | * Stop and reset | |
743 | */ | |
744 | ||
745 | static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan) | |
746 | { | |
747 | u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); | |
748 | ||
ccadee9b LP |
749 | chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE | |
750 | RCAR_DMACHCR_TE | RCAR_DMACHCR_DE); | |
87244fe5 LP |
751 | rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr); |
752 | } | |
753 | ||
754 | static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan) | |
755 | { | |
756 | struct rcar_dmac_desc *desc, *_desc; | |
757 | unsigned long flags; | |
758 | LIST_HEAD(descs); | |
759 | ||
760 | spin_lock_irqsave(&chan->lock, flags); | |
761 | ||
762 | /* Move all non-free descriptors to the local lists. */ | |
763 | list_splice_init(&chan->desc.pending, &descs); | |
764 | list_splice_init(&chan->desc.active, &descs); | |
765 | list_splice_init(&chan->desc.done, &descs); | |
766 | list_splice_init(&chan->desc.wait, &descs); | |
767 | ||
768 | chan->desc.running = NULL; | |
769 | ||
770 | spin_unlock_irqrestore(&chan->lock, flags); | |
771 | ||
772 | list_for_each_entry_safe(desc, _desc, &descs, node) { | |
773 | list_del(&desc->node); | |
774 | rcar_dmac_desc_put(chan, desc); | |
775 | } | |
776 | } | |
777 | ||
778 | static void rcar_dmac_stop(struct rcar_dmac *dmac) | |
779 | { | |
780 | rcar_dmac_write(dmac, RCAR_DMAOR, 0); | |
781 | } | |
782 | ||
783 | static void rcar_dmac_abort(struct rcar_dmac *dmac) | |
784 | { | |
785 | unsigned int i; | |
786 | ||
787 | /* Stop all channels. */ | |
788 | for (i = 0; i < dmac->n_channels; ++i) { | |
789 | struct rcar_dmac_chan *chan = &dmac->channels[i]; | |
790 | ||
791 | /* Stop and reinitialize the channel. */ | |
792 | spin_lock(&chan->lock); | |
793 | rcar_dmac_chan_halt(chan); | |
794 | spin_unlock(&chan->lock); | |
795 | ||
796 | rcar_dmac_chan_reinit(chan); | |
797 | } | |
798 | } | |
799 | ||
800 | /* ----------------------------------------------------------------------------- | |
801 | * Descriptors preparation | |
802 | */ | |
803 | ||
804 | static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan, | |
805 | struct rcar_dmac_desc *desc) | |
806 | { | |
807 | static const u32 chcr_ts[] = { | |
808 | RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B, | |
809 | RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B, | |
810 | RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B, | |
811 | RCAR_DMACHCR_TS_64B, | |
812 | }; | |
813 | ||
814 | unsigned int xfer_size; | |
815 | u32 chcr; | |
816 | ||
817 | switch (desc->direction) { | |
818 | case DMA_DEV_TO_MEM: | |
819 | chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED | |
820 | | RCAR_DMACHCR_RS_DMARS; | |
c5ed08e9 | 821 | xfer_size = chan->src.xfer_size; |
87244fe5 LP |
822 | break; |
823 | ||
824 | case DMA_MEM_TO_DEV: | |
825 | chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC | |
826 | | RCAR_DMACHCR_RS_DMARS; | |
c5ed08e9 | 827 | xfer_size = chan->dst.xfer_size; |
87244fe5 LP |
828 | break; |
829 | ||
830 | case DMA_MEM_TO_MEM: | |
831 | default: | |
832 | chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC | |
833 | | RCAR_DMACHCR_RS_AUTO; | |
834 | xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE; | |
835 | break; | |
836 | } | |
837 | ||
838 | desc->xfer_shift = ilog2(xfer_size); | |
839 | desc->chcr = chcr | chcr_ts[desc->xfer_shift]; | |
840 | } | |
841 | ||
842 | /* | |
843 | * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list | |
844 | * | |
845 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also | |
846 | * converted to scatter-gather to guarantee consistent locking and a correct | |
847 | * list manipulation. For slave DMA direction carries the usual meaning, and, | |
848 | * logically, the SG list is RAM and the addr variable contains slave address, | |
849 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM | |
850 | * and the SG list contains only one element and points at the source buffer. | |
851 | */ | |
852 | static struct dma_async_tx_descriptor * | |
853 | rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, | |
854 | unsigned int sg_len, dma_addr_t dev_addr, | |
855 | enum dma_transfer_direction dir, unsigned long dma_flags, | |
856 | bool cyclic) | |
857 | { | |
858 | struct rcar_dmac_xfer_chunk *chunk; | |
859 | struct rcar_dmac_desc *desc; | |
860 | struct scatterlist *sg; | |
ccadee9b | 861 | unsigned int nchunks = 0; |
87244fe5 LP |
862 | unsigned int max_chunk_size; |
863 | unsigned int full_size = 0; | |
1175f83c | 864 | bool cross_boundary = false; |
87244fe5 | 865 | unsigned int i; |
1175f83c KM |
866 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
867 | u32 high_dev_addr; | |
868 | u32 high_mem_addr; | |
869 | #endif | |
87244fe5 LP |
870 | |
871 | desc = rcar_dmac_desc_get(chan); | |
872 | if (!desc) | |
873 | return NULL; | |
874 | ||
875 | desc->async_tx.flags = dma_flags; | |
876 | desc->async_tx.cookie = -EBUSY; | |
877 | ||
878 | desc->cyclic = cyclic; | |
879 | desc->direction = dir; | |
880 | ||
881 | rcar_dmac_chan_configure_desc(chan, desc); | |
882 | ||
883 | max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift; | |
884 | ||
885 | /* | |
886 | * Allocate and fill the transfer chunk descriptors. We own the only | |
887 | * reference to the DMA descriptor, there's no need for locking. | |
888 | */ | |
889 | for_each_sg(sgl, sg, sg_len, i) { | |
890 | dma_addr_t mem_addr = sg_dma_address(sg); | |
891 | unsigned int len = sg_dma_len(sg); | |
892 | ||
893 | full_size += len; | |
894 | ||
1175f83c KM |
895 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
896 | if (i == 0) { | |
897 | high_dev_addr = dev_addr >> 32; | |
898 | high_mem_addr = mem_addr >> 32; | |
899 | } | |
900 | ||
901 | if ((dev_addr >> 32 != high_dev_addr) || | |
902 | (mem_addr >> 32 != high_mem_addr)) | |
903 | cross_boundary = true; | |
904 | #endif | |
87244fe5 LP |
905 | while (len) { |
906 | unsigned int size = min(len, max_chunk_size); | |
907 | ||
908 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | |
909 | /* | |
910 | * Prevent individual transfers from crossing 4GB | |
911 | * boundaries. | |
912 | */ | |
1175f83c | 913 | if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) { |
87244fe5 | 914 | size = ALIGN(dev_addr, 1ULL << 32) - dev_addr; |
1175f83c KM |
915 | cross_boundary = true; |
916 | } | |
917 | if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) { | |
87244fe5 | 918 | size = ALIGN(mem_addr, 1ULL << 32) - mem_addr; |
1175f83c KM |
919 | cross_boundary = true; |
920 | } | |
87244fe5 LP |
921 | #endif |
922 | ||
923 | chunk = rcar_dmac_xfer_chunk_get(chan); | |
924 | if (!chunk) { | |
925 | rcar_dmac_desc_put(chan, desc); | |
926 | return NULL; | |
927 | } | |
928 | ||
929 | if (dir == DMA_DEV_TO_MEM) { | |
930 | chunk->src_addr = dev_addr; | |
931 | chunk->dst_addr = mem_addr; | |
932 | } else { | |
933 | chunk->src_addr = mem_addr; | |
934 | chunk->dst_addr = dev_addr; | |
935 | } | |
936 | ||
937 | chunk->size = size; | |
938 | ||
939 | dev_dbg(chan->chan.device->dev, | |
940 | "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n", | |
941 | chan->index, chunk, desc, i, sg, size, len, | |
942 | &chunk->src_addr, &chunk->dst_addr); | |
943 | ||
944 | mem_addr += size; | |
945 | if (dir == DMA_MEM_TO_MEM) | |
946 | dev_addr += size; | |
947 | ||
948 | len -= size; | |
949 | ||
950 | list_add_tail(&chunk->node, &desc->chunks); | |
ccadee9b | 951 | nchunks++; |
87244fe5 LP |
952 | } |
953 | } | |
954 | ||
ccadee9b | 955 | desc->nchunks = nchunks; |
87244fe5 LP |
956 | desc->size = full_size; |
957 | ||
ccadee9b LP |
958 | /* |
959 | * Use hardware descriptor lists if possible when more than one chunk | |
960 | * needs to be transferred (otherwise they don't make much sense). | |
961 | * | |
1175f83c KM |
962 | * Source/Destination address should be located in same 4GiB region |
963 | * in the 40bit address space when it uses Hardware descriptor, | |
964 | * and cross_boundary is checking it. | |
ccadee9b | 965 | */ |
1175f83c | 966 | desc->hwdescs.use = !cross_boundary && nchunks > 1; |
ee4b876b JB |
967 | if (desc->hwdescs.use) { |
968 | if (rcar_dmac_fill_hwdesc(chan, desc) < 0) | |
969 | desc->hwdescs.use = false; | |
970 | } | |
ccadee9b | 971 | |
87244fe5 LP |
972 | return &desc->async_tx; |
973 | } | |
974 | ||
975 | /* ----------------------------------------------------------------------------- | |
976 | * DMA engine operations | |
977 | */ | |
978 | ||
979 | static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan) | |
980 | { | |
981 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
982 | int ret; | |
983 | ||
87244fe5 LP |
984 | INIT_LIST_HEAD(&rchan->desc.chunks_free); |
985 | INIT_LIST_HEAD(&rchan->desc.pages); | |
986 | ||
987 | /* Preallocate descriptors. */ | |
988 | ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL); | |
989 | if (ret < 0) | |
990 | return -ENOMEM; | |
991 | ||
992 | ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL); | |
993 | if (ret < 0) | |
994 | return -ENOMEM; | |
995 | ||
996 | return pm_runtime_get_sync(chan->device->dev); | |
997 | } | |
998 | ||
999 | static void rcar_dmac_free_chan_resources(struct dma_chan *chan) | |
1000 | { | |
1001 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
1002 | struct rcar_dmac *dmac = to_rcar_dmac(chan->device); | |
3139dc8d | 1003 | struct rcar_dmac_chan_map *map = &rchan->map; |
87244fe5 | 1004 | struct rcar_dmac_desc_page *page, *_page; |
1ed1315f LP |
1005 | struct rcar_dmac_desc *desc; |
1006 | LIST_HEAD(list); | |
87244fe5 LP |
1007 | |
1008 | /* Protect against ISR */ | |
1009 | spin_lock_irq(&rchan->lock); | |
1010 | rcar_dmac_chan_halt(rchan); | |
1011 | spin_unlock_irq(&rchan->lock); | |
1012 | ||
1013 | /* Now no new interrupts will occur */ | |
1014 | ||
1015 | if (rchan->mid_rid >= 0) { | |
1016 | /* The caller is holding dma_list_mutex */ | |
1017 | clear_bit(rchan->mid_rid, dmac->modules); | |
1018 | rchan->mid_rid = -EINVAL; | |
1019 | } | |
1020 | ||
f7638c90 LP |
1021 | list_splice_init(&rchan->desc.free, &list); |
1022 | list_splice_init(&rchan->desc.pending, &list); | |
1023 | list_splice_init(&rchan->desc.active, &list); | |
1024 | list_splice_init(&rchan->desc.done, &list); | |
1025 | list_splice_init(&rchan->desc.wait, &list); | |
1ed1315f | 1026 | |
48c73659 MHF |
1027 | rchan->desc.running = NULL; |
1028 | ||
1ed1315f LP |
1029 | list_for_each_entry(desc, &list, node) |
1030 | rcar_dmac_realloc_hwdesc(rchan, desc, 0); | |
1031 | ||
87244fe5 LP |
1032 | list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) { |
1033 | list_del(&page->node); | |
1034 | free_page((unsigned long)page); | |
1035 | } | |
1036 | ||
3139dc8d NS |
1037 | /* Remove slave mapping if present. */ |
1038 | if (map->slave.xfer_size) { | |
1039 | dma_unmap_resource(chan->device->dev, map->addr, | |
1040 | map->slave.xfer_size, map->dir, 0); | |
1041 | map->slave.xfer_size = 0; | |
1042 | } | |
1043 | ||
87244fe5 LP |
1044 | pm_runtime_put(chan->device->dev); |
1045 | } | |
1046 | ||
1047 | static struct dma_async_tx_descriptor * | |
1048 | rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, | |
1049 | dma_addr_t dma_src, size_t len, unsigned long flags) | |
1050 | { | |
1051 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
1052 | struct scatterlist sgl; | |
1053 | ||
1054 | if (!len) | |
1055 | return NULL; | |
1056 | ||
1057 | sg_init_table(&sgl, 1); | |
1058 | sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len, | |
1059 | offset_in_page(dma_src)); | |
1060 | sg_dma_address(&sgl) = dma_src; | |
1061 | sg_dma_len(&sgl) = len; | |
1062 | ||
1063 | return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest, | |
1064 | DMA_MEM_TO_MEM, flags, false); | |
1065 | } | |
1066 | ||
9f878603 NS |
1067 | static int rcar_dmac_map_slave_addr(struct dma_chan *chan, |
1068 | enum dma_transfer_direction dir) | |
1069 | { | |
1070 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
1071 | struct rcar_dmac_chan_map *map = &rchan->map; | |
1072 | phys_addr_t dev_addr; | |
1073 | size_t dev_size; | |
1074 | enum dma_data_direction dev_dir; | |
1075 | ||
1076 | if (dir == DMA_DEV_TO_MEM) { | |
1077 | dev_addr = rchan->src.slave_addr; | |
1078 | dev_size = rchan->src.xfer_size; | |
1079 | dev_dir = DMA_TO_DEVICE; | |
1080 | } else { | |
1081 | dev_addr = rchan->dst.slave_addr; | |
1082 | dev_size = rchan->dst.xfer_size; | |
1083 | dev_dir = DMA_FROM_DEVICE; | |
1084 | } | |
1085 | ||
1086 | /* Reuse current map if possible. */ | |
1087 | if (dev_addr == map->slave.slave_addr && | |
1088 | dev_size == map->slave.xfer_size && | |
1089 | dev_dir == map->dir) | |
1090 | return 0; | |
1091 | ||
1092 | /* Remove old mapping if present. */ | |
1093 | if (map->slave.xfer_size) | |
1094 | dma_unmap_resource(chan->device->dev, map->addr, | |
1095 | map->slave.xfer_size, map->dir, 0); | |
1096 | map->slave.xfer_size = 0; | |
1097 | ||
1098 | /* Create new slave address map. */ | |
1099 | map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size, | |
1100 | dev_dir, 0); | |
1101 | ||
1102 | if (dma_mapping_error(chan->device->dev, map->addr)) { | |
1103 | dev_err(chan->device->dev, | |
1104 | "chan%u: failed to map %zx@%pap", rchan->index, | |
1105 | dev_size, &dev_addr); | |
1106 | return -EIO; | |
1107 | } | |
1108 | ||
1109 | dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n", | |
1110 | rchan->index, dev_size, &dev_addr, &map->addr, | |
1111 | dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE"); | |
1112 | ||
1113 | map->slave.slave_addr = dev_addr; | |
1114 | map->slave.xfer_size = dev_size; | |
1115 | map->dir = dev_dir; | |
1116 | ||
1117 | return 0; | |
1118 | } | |
1119 | ||
87244fe5 LP |
1120 | static struct dma_async_tx_descriptor * |
1121 | rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
1122 | unsigned int sg_len, enum dma_transfer_direction dir, | |
1123 | unsigned long flags, void *context) | |
1124 | { | |
1125 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
87244fe5 LP |
1126 | |
1127 | /* Someone calling slave DMA on a generic channel? */ | |
1128 | if (rchan->mid_rid < 0 || !sg_len) { | |
1129 | dev_warn(chan->device->dev, | |
1130 | "%s: bad parameter: len=%d, id=%d\n", | |
1131 | __func__, sg_len, rchan->mid_rid); | |
1132 | return NULL; | |
1133 | } | |
1134 | ||
9f878603 NS |
1135 | if (rcar_dmac_map_slave_addr(chan, dir)) |
1136 | return NULL; | |
1137 | ||
1138 | return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, | |
87244fe5 LP |
1139 | dir, flags, false); |
1140 | } | |
1141 | ||
1142 | #define RCAR_DMAC_MAX_SG_LEN 32 | |
1143 | ||
1144 | static struct dma_async_tx_descriptor * | |
1145 | rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, | |
1146 | size_t buf_len, size_t period_len, | |
1147 | enum dma_transfer_direction dir, unsigned long flags) | |
1148 | { | |
1149 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
1150 | struct dma_async_tx_descriptor *desc; | |
1151 | struct scatterlist *sgl; | |
87244fe5 LP |
1152 | unsigned int sg_len; |
1153 | unsigned int i; | |
1154 | ||
1155 | /* Someone calling slave DMA on a generic channel? */ | |
1156 | if (rchan->mid_rid < 0 || buf_len < period_len) { | |
1157 | dev_warn(chan->device->dev, | |
1158 | "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n", | |
1159 | __func__, buf_len, period_len, rchan->mid_rid); | |
1160 | return NULL; | |
1161 | } | |
1162 | ||
9f878603 NS |
1163 | if (rcar_dmac_map_slave_addr(chan, dir)) |
1164 | return NULL; | |
1165 | ||
87244fe5 LP |
1166 | sg_len = buf_len / period_len; |
1167 | if (sg_len > RCAR_DMAC_MAX_SG_LEN) { | |
1168 | dev_err(chan->device->dev, | |
1169 | "chan%u: sg length %d exceds limit %d", | |
1170 | rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN); | |
1171 | return NULL; | |
1172 | } | |
1173 | ||
1174 | /* | |
1175 | * Allocate the sg list dynamically as it would consume too much stack | |
1176 | * space. | |
1177 | */ | |
1178 | sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT); | |
1179 | if (!sgl) | |
1180 | return NULL; | |
1181 | ||
1182 | sg_init_table(sgl, sg_len); | |
1183 | ||
1184 | for (i = 0; i < sg_len; ++i) { | |
1185 | dma_addr_t src = buf_addr + (period_len * i); | |
1186 | ||
1187 | sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len, | |
1188 | offset_in_page(src)); | |
1189 | sg_dma_address(&sgl[i]) = src; | |
1190 | sg_dma_len(&sgl[i]) = period_len; | |
1191 | } | |
1192 | ||
9f878603 | 1193 | desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, |
87244fe5 LP |
1194 | dir, flags, true); |
1195 | ||
1196 | kfree(sgl); | |
1197 | return desc; | |
1198 | } | |
1199 | ||
1200 | static int rcar_dmac_device_config(struct dma_chan *chan, | |
1201 | struct dma_slave_config *cfg) | |
1202 | { | |
1203 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
1204 | ||
1205 | /* | |
1206 | * We could lock this, but you shouldn't be configuring the | |
1207 | * channel, while using it... | |
1208 | */ | |
c5ed08e9 NS |
1209 | rchan->src.slave_addr = cfg->src_addr; |
1210 | rchan->dst.slave_addr = cfg->dst_addr; | |
1211 | rchan->src.xfer_size = cfg->src_addr_width; | |
1212 | rchan->dst.xfer_size = cfg->dst_addr_width; | |
87244fe5 LP |
1213 | |
1214 | return 0; | |
1215 | } | |
1216 | ||
1217 | static int rcar_dmac_chan_terminate_all(struct dma_chan *chan) | |
1218 | { | |
1219 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
1220 | unsigned long flags; | |
1221 | ||
1222 | spin_lock_irqsave(&rchan->lock, flags); | |
1223 | rcar_dmac_chan_halt(rchan); | |
1224 | spin_unlock_irqrestore(&rchan->lock, flags); | |
1225 | ||
1226 | /* | |
1227 | * FIXME: No new interrupt can occur now, but the IRQ thread might still | |
1228 | * be running. | |
1229 | */ | |
1230 | ||
1231 | rcar_dmac_chan_reinit(rchan); | |
1232 | ||
1233 | return 0; | |
1234 | } | |
1235 | ||
1236 | static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, | |
1237 | dma_cookie_t cookie) | |
1238 | { | |
1239 | struct rcar_dmac_desc *desc = chan->desc.running; | |
ccadee9b | 1240 | struct rcar_dmac_xfer_chunk *running = NULL; |
87244fe5 | 1241 | struct rcar_dmac_xfer_chunk *chunk; |
55bd582b | 1242 | enum dma_status status; |
87244fe5 | 1243 | unsigned int residue = 0; |
ccadee9b | 1244 | unsigned int dptr = 0; |
87244fe5 LP |
1245 | |
1246 | if (!desc) | |
1247 | return 0; | |
1248 | ||
55bd582b LP |
1249 | /* |
1250 | * If the cookie corresponds to a descriptor that has been completed | |
1251 | * there is no residue. The same check has already been performed by the | |
1252 | * caller but without holding the channel lock, so the descriptor could | |
1253 | * now be complete. | |
1254 | */ | |
1255 | status = dma_cookie_status(&chan->chan, cookie, NULL); | |
1256 | if (status == DMA_COMPLETE) | |
1257 | return 0; | |
1258 | ||
87244fe5 LP |
1259 | /* |
1260 | * If the cookie doesn't correspond to the currently running transfer | |
1261 | * then the descriptor hasn't been processed yet, and the residue is | |
1262 | * equal to the full descriptor size. | |
1263 | */ | |
55bd582b LP |
1264 | if (cookie != desc->async_tx.cookie) { |
1265 | list_for_each_entry(desc, &chan->desc.pending, node) { | |
1266 | if (cookie == desc->async_tx.cookie) | |
1267 | return desc->size; | |
1268 | } | |
1269 | list_for_each_entry(desc, &chan->desc.active, node) { | |
1270 | if (cookie == desc->async_tx.cookie) | |
1271 | return desc->size; | |
1272 | } | |
1273 | ||
1274 | /* | |
1275 | * No descriptor found for the cookie, there's thus no residue. | |
1276 | * This shouldn't happen if the calling driver passes a correct | |
1277 | * cookie value. | |
1278 | */ | |
1279 | WARN(1, "No descriptor for cookie!"); | |
1280 | return 0; | |
1281 | } | |
87244fe5 | 1282 | |
ccadee9b LP |
1283 | /* |
1284 | * In descriptor mode the descriptor running pointer is not maintained | |
1285 | * by the interrupt handler, find the running descriptor from the | |
1286 | * descriptor pointer field in the CHCRB register. In non-descriptor | |
1287 | * mode just use the running descriptor pointer. | |
1288 | */ | |
1ed1315f | 1289 | if (desc->hwdescs.use) { |
ccadee9b LP |
1290 | dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & |
1291 | RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; | |
1292 | WARN_ON(dptr >= desc->nchunks); | |
1293 | } else { | |
1294 | running = desc->running; | |
1295 | } | |
1296 | ||
87244fe5 LP |
1297 | /* Compute the size of all chunks still to be transferred. */ |
1298 | list_for_each_entry_reverse(chunk, &desc->chunks, node) { | |
ccadee9b | 1299 | if (chunk == running || ++dptr == desc->nchunks) |
87244fe5 LP |
1300 | break; |
1301 | ||
1302 | residue += chunk->size; | |
1303 | } | |
1304 | ||
1305 | /* Add the residue for the current chunk. */ | |
1306 | residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift; | |
1307 | ||
1308 | return residue; | |
1309 | } | |
1310 | ||
1311 | static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan, | |
1312 | dma_cookie_t cookie, | |
1313 | struct dma_tx_state *txstate) | |
1314 | { | |
1315 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
1316 | enum dma_status status; | |
1317 | unsigned long flags; | |
1318 | unsigned int residue; | |
1319 | ||
1320 | status = dma_cookie_status(chan, cookie, txstate); | |
1321 | if (status == DMA_COMPLETE || !txstate) | |
1322 | return status; | |
1323 | ||
1324 | spin_lock_irqsave(&rchan->lock, flags); | |
1325 | residue = rcar_dmac_chan_get_residue(rchan, cookie); | |
1326 | spin_unlock_irqrestore(&rchan->lock, flags); | |
1327 | ||
3544d287 MHF |
1328 | /* if there's no residue, the cookie is complete */ |
1329 | if (!residue) | |
1330 | return DMA_COMPLETE; | |
1331 | ||
87244fe5 LP |
1332 | dma_set_residue(txstate, residue); |
1333 | ||
1334 | return status; | |
1335 | } | |
1336 | ||
1337 | static void rcar_dmac_issue_pending(struct dma_chan *chan) | |
1338 | { | |
1339 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | |
1340 | unsigned long flags; | |
1341 | ||
1342 | spin_lock_irqsave(&rchan->lock, flags); | |
1343 | ||
1344 | if (list_empty(&rchan->desc.pending)) | |
1345 | goto done; | |
1346 | ||
1347 | /* Append the pending list to the active list. */ | |
1348 | list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active); | |
1349 | ||
1350 | /* | |
1351 | * If no transfer is running pick the first descriptor from the active | |
1352 | * list and start the transfer. | |
1353 | */ | |
1354 | if (!rchan->desc.running) { | |
1355 | struct rcar_dmac_desc *desc; | |
1356 | ||
1357 | desc = list_first_entry(&rchan->desc.active, | |
1358 | struct rcar_dmac_desc, node); | |
1359 | rchan->desc.running = desc; | |
1360 | ||
1361 | rcar_dmac_chan_start_xfer(rchan); | |
1362 | } | |
1363 | ||
1364 | done: | |
1365 | spin_unlock_irqrestore(&rchan->lock, flags); | |
1366 | } | |
1367 | ||
1368 | /* ----------------------------------------------------------------------------- | |
1369 | * IRQ handling | |
1370 | */ | |
1371 | ||
ccadee9b LP |
1372 | static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan) |
1373 | { | |
1374 | struct rcar_dmac_desc *desc = chan->desc.running; | |
1375 | unsigned int stage; | |
1376 | ||
1377 | if (WARN_ON(!desc || !desc->cyclic)) { | |
1378 | /* | |
1379 | * This should never happen, there should always be a running | |
1380 | * cyclic descriptor when a descriptor stage end interrupt is | |
1381 | * triggered. Warn and return. | |
1382 | */ | |
1383 | return IRQ_NONE; | |
1384 | } | |
1385 | ||
1386 | /* Program the interrupt pointer to the next stage. */ | |
1387 | stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & | |
1388 | RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; | |
1389 | rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage)); | |
1390 | ||
1391 | return IRQ_WAKE_THREAD; | |
1392 | } | |
1393 | ||
87244fe5 LP |
1394 | static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan) |
1395 | { | |
1396 | struct rcar_dmac_desc *desc = chan->desc.running; | |
87244fe5 LP |
1397 | irqreturn_t ret = IRQ_WAKE_THREAD; |
1398 | ||
1399 | if (WARN_ON_ONCE(!desc)) { | |
1400 | /* | |
ccadee9b LP |
1401 | * This should never happen, there should always be a running |
1402 | * descriptor when a transfer end interrupt is triggered. Warn | |
1403 | * and return. | |
87244fe5 LP |
1404 | */ |
1405 | return IRQ_NONE; | |
1406 | } | |
1407 | ||
1408 | /* | |
ccadee9b LP |
1409 | * The transfer end interrupt isn't generated for each chunk when using |
1410 | * descriptor mode. Only update the running chunk pointer in | |
1411 | * non-descriptor mode. | |
87244fe5 | 1412 | */ |
1ed1315f | 1413 | if (!desc->hwdescs.use) { |
ccadee9b LP |
1414 | /* |
1415 | * If we haven't completed the last transfer chunk simply move | |
1416 | * to the next one. Only wake the IRQ thread if the transfer is | |
1417 | * cyclic. | |
1418 | */ | |
1419 | if (!list_is_last(&desc->running->node, &desc->chunks)) { | |
1420 | desc->running = list_next_entry(desc->running, node); | |
1421 | if (!desc->cyclic) | |
1422 | ret = IRQ_HANDLED; | |
1423 | goto done; | |
1424 | } | |
87244fe5 | 1425 | |
ccadee9b LP |
1426 | /* |
1427 | * We've completed the last transfer chunk. If the transfer is | |
1428 | * cyclic, move back to the first one. | |
1429 | */ | |
1430 | if (desc->cyclic) { | |
1431 | desc->running = | |
1432 | list_first_entry(&desc->chunks, | |
87244fe5 LP |
1433 | struct rcar_dmac_xfer_chunk, |
1434 | node); | |
ccadee9b LP |
1435 | goto done; |
1436 | } | |
87244fe5 LP |
1437 | } |
1438 | ||
1439 | /* The descriptor is complete, move it to the done list. */ | |
1440 | list_move_tail(&desc->node, &chan->desc.done); | |
1441 | ||
1442 | /* Queue the next descriptor, if any. */ | |
1443 | if (!list_empty(&chan->desc.active)) | |
1444 | chan->desc.running = list_first_entry(&chan->desc.active, | |
1445 | struct rcar_dmac_desc, | |
1446 | node); | |
1447 | else | |
1448 | chan->desc.running = NULL; | |
1449 | ||
1450 | done: | |
1451 | if (chan->desc.running) | |
1452 | rcar_dmac_chan_start_xfer(chan); | |
1453 | ||
1454 | return ret; | |
1455 | } | |
1456 | ||
1457 | static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev) | |
1458 | { | |
ccadee9b | 1459 | u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE; |
87244fe5 LP |
1460 | struct rcar_dmac_chan *chan = dev; |
1461 | irqreturn_t ret = IRQ_NONE; | |
1462 | u32 chcr; | |
1463 | ||
1464 | spin_lock(&chan->lock); | |
1465 | ||
1466 | chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); | |
ccadee9b LP |
1467 | if (chcr & RCAR_DMACHCR_TE) |
1468 | mask |= RCAR_DMACHCR_DE; | |
1469 | rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask); | |
1470 | ||
1471 | if (chcr & RCAR_DMACHCR_DSE) | |
1472 | ret |= rcar_dmac_isr_desc_stage_end(chan); | |
87244fe5 LP |
1473 | |
1474 | if (chcr & RCAR_DMACHCR_TE) | |
1475 | ret |= rcar_dmac_isr_transfer_end(chan); | |
1476 | ||
1477 | spin_unlock(&chan->lock); | |
1478 | ||
1479 | return ret; | |
1480 | } | |
1481 | ||
1482 | static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev) | |
1483 | { | |
1484 | struct rcar_dmac_chan *chan = dev; | |
1485 | struct rcar_dmac_desc *desc; | |
964b2fd8 | 1486 | struct dmaengine_desc_callback cb; |
87244fe5 LP |
1487 | |
1488 | spin_lock_irq(&chan->lock); | |
1489 | ||
1490 | /* For cyclic transfers notify the user after every chunk. */ | |
1491 | if (chan->desc.running && chan->desc.running->cyclic) { | |
87244fe5 | 1492 | desc = chan->desc.running; |
964b2fd8 | 1493 | dmaengine_desc_get_callback(&desc->async_tx, &cb); |
87244fe5 | 1494 | |
964b2fd8 | 1495 | if (dmaengine_desc_callback_valid(&cb)) { |
87244fe5 | 1496 | spin_unlock_irq(&chan->lock); |
964b2fd8 | 1497 | dmaengine_desc_callback_invoke(&cb, NULL); |
87244fe5 LP |
1498 | spin_lock_irq(&chan->lock); |
1499 | } | |
1500 | } | |
1501 | ||
1502 | /* | |
1503 | * Call the callback function for all descriptors on the done list and | |
1504 | * move them to the ack wait list. | |
1505 | */ | |
1506 | while (!list_empty(&chan->desc.done)) { | |
1507 | desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc, | |
1508 | node); | |
1509 | dma_cookie_complete(&desc->async_tx); | |
1510 | list_del(&desc->node); | |
1511 | ||
964b2fd8 DJ |
1512 | dmaengine_desc_get_callback(&desc->async_tx, &cb); |
1513 | if (dmaengine_desc_callback_valid(&cb)) { | |
87244fe5 LP |
1514 | spin_unlock_irq(&chan->lock); |
1515 | /* | |
1516 | * We own the only reference to this descriptor, we can | |
1517 | * safely dereference it without holding the channel | |
1518 | * lock. | |
1519 | */ | |
964b2fd8 | 1520 | dmaengine_desc_callback_invoke(&cb, NULL); |
87244fe5 LP |
1521 | spin_lock_irq(&chan->lock); |
1522 | } | |
1523 | ||
1524 | list_add_tail(&desc->node, &chan->desc.wait); | |
1525 | } | |
1526 | ||
ccadee9b LP |
1527 | spin_unlock_irq(&chan->lock); |
1528 | ||
87244fe5 LP |
1529 | /* Recycle all acked descriptors. */ |
1530 | rcar_dmac_desc_recycle_acked(chan); | |
1531 | ||
87244fe5 LP |
1532 | return IRQ_HANDLED; |
1533 | } | |
1534 | ||
1535 | static irqreturn_t rcar_dmac_isr_error(int irq, void *data) | |
1536 | { | |
1537 | struct rcar_dmac *dmac = data; | |
1538 | ||
1539 | if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE)) | |
1540 | return IRQ_NONE; | |
1541 | ||
1542 | /* | |
1543 | * An unrecoverable error occurred on an unknown channel. Halt the DMAC, | |
1544 | * abort transfers on all channels, and reinitialize the DMAC. | |
1545 | */ | |
1546 | rcar_dmac_stop(dmac); | |
1547 | rcar_dmac_abort(dmac); | |
1548 | rcar_dmac_init(dmac); | |
1549 | ||
1550 | return IRQ_HANDLED; | |
1551 | } | |
1552 | ||
1553 | /* ----------------------------------------------------------------------------- | |
1554 | * OF xlate and channel filter | |
1555 | */ | |
1556 | ||
1557 | static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg) | |
1558 | { | |
1559 | struct rcar_dmac *dmac = to_rcar_dmac(chan->device); | |
1560 | struct of_phandle_args *dma_spec = arg; | |
1561 | ||
1562 | /* | |
1563 | * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate | |
1564 | * function knows from which device it wants to allocate a channel from, | |
1565 | * and would be perfectly capable of selecting the channel it wants. | |
1566 | * Forcing it to call dma_request_channel() and iterate through all | |
1567 | * channels from all controllers is just pointless. | |
1568 | */ | |
1569 | if (chan->device->device_config != rcar_dmac_device_config || | |
1570 | dma_spec->np != chan->device->dev->of_node) | |
1571 | return false; | |
1572 | ||
1573 | return !test_and_set_bit(dma_spec->args[0], dmac->modules); | |
1574 | } | |
1575 | ||
1576 | static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec, | |
1577 | struct of_dma *ofdma) | |
1578 | { | |
1579 | struct rcar_dmac_chan *rchan; | |
1580 | struct dma_chan *chan; | |
1581 | dma_cap_mask_t mask; | |
1582 | ||
1583 | if (dma_spec->args_count != 1) | |
1584 | return NULL; | |
1585 | ||
1586 | /* Only slave DMA channels can be allocated via DT */ | |
1587 | dma_cap_zero(mask); | |
1588 | dma_cap_set(DMA_SLAVE, mask); | |
1589 | ||
1590 | chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec); | |
1591 | if (!chan) | |
1592 | return NULL; | |
1593 | ||
1594 | rchan = to_rcar_dmac_chan(chan); | |
1595 | rchan->mid_rid = dma_spec->args[0]; | |
1596 | ||
1597 | return chan; | |
1598 | } | |
1599 | ||
1600 | /* ----------------------------------------------------------------------------- | |
1601 | * Power management | |
1602 | */ | |
1603 | ||
1604 | #ifdef CONFIG_PM_SLEEP | |
1605 | static int rcar_dmac_sleep_suspend(struct device *dev) | |
1606 | { | |
1607 | /* | |
1608 | * TODO: Wait for the current transfer to complete and stop the device. | |
1609 | */ | |
1610 | return 0; | |
1611 | } | |
1612 | ||
1613 | static int rcar_dmac_sleep_resume(struct device *dev) | |
1614 | { | |
1615 | /* TODO: Resume transfers, if any. */ | |
1616 | return 0; | |
1617 | } | |
1618 | #endif | |
1619 | ||
1620 | #ifdef CONFIG_PM | |
1621 | static int rcar_dmac_runtime_suspend(struct device *dev) | |
1622 | { | |
1623 | return 0; | |
1624 | } | |
1625 | ||
1626 | static int rcar_dmac_runtime_resume(struct device *dev) | |
1627 | { | |
1628 | struct rcar_dmac *dmac = dev_get_drvdata(dev); | |
1629 | ||
1630 | return rcar_dmac_init(dmac); | |
1631 | } | |
1632 | #endif | |
1633 | ||
1634 | static const struct dev_pm_ops rcar_dmac_pm = { | |
1635 | SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume) | |
1636 | SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume, | |
1637 | NULL) | |
1638 | }; | |
1639 | ||
1640 | /* ----------------------------------------------------------------------------- | |
1641 | * Probe and remove | |
1642 | */ | |
1643 | ||
1644 | static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, | |
1645 | struct rcar_dmac_chan *rchan, | |
1646 | unsigned int index) | |
1647 | { | |
1648 | struct platform_device *pdev = to_platform_device(dmac->dev); | |
1649 | struct dma_chan *chan = &rchan->chan; | |
1650 | char pdev_irqname[5]; | |
1651 | char *irqname; | |
87244fe5 LP |
1652 | int ret; |
1653 | ||
1654 | rchan->index = index; | |
1655 | rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index); | |
1656 | rchan->mid_rid = -EINVAL; | |
1657 | ||
1658 | spin_lock_init(&rchan->lock); | |
1659 | ||
f7638c90 LP |
1660 | INIT_LIST_HEAD(&rchan->desc.free); |
1661 | INIT_LIST_HEAD(&rchan->desc.pending); | |
1662 | INIT_LIST_HEAD(&rchan->desc.active); | |
1663 | INIT_LIST_HEAD(&rchan->desc.done); | |
1664 | INIT_LIST_HEAD(&rchan->desc.wait); | |
1665 | ||
87244fe5 LP |
1666 | /* Request the channel interrupt. */ |
1667 | sprintf(pdev_irqname, "ch%u", index); | |
427d5ecd NS |
1668 | rchan->irq = platform_get_irq_byname(pdev, pdev_irqname); |
1669 | if (rchan->irq < 0) { | |
87244fe5 LP |
1670 | dev_err(dmac->dev, "no IRQ specified for channel %u\n", index); |
1671 | return -ENODEV; | |
1672 | } | |
1673 | ||
1674 | irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", | |
1675 | dev_name(dmac->dev), index); | |
1676 | if (!irqname) | |
1677 | return -ENOMEM; | |
1678 | ||
427d5ecd NS |
1679 | ret = devm_request_threaded_irq(dmac->dev, rchan->irq, |
1680 | rcar_dmac_isr_channel, | |
87244fe5 LP |
1681 | rcar_dmac_isr_channel_thread, 0, |
1682 | irqname, rchan); | |
1683 | if (ret) { | |
427d5ecd NS |
1684 | dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", |
1685 | rchan->irq, ret); | |
87244fe5 LP |
1686 | return ret; |
1687 | } | |
1688 | ||
1689 | /* | |
1690 | * Initialize the DMA engine channel and add it to the DMA engine | |
1691 | * channels list. | |
1692 | */ | |
1693 | chan->device = &dmac->engine; | |
1694 | dma_cookie_init(chan); | |
1695 | ||
1696 | list_add_tail(&chan->device_node, &dmac->engine.channels); | |
1697 | ||
1698 | return 0; | |
1699 | } | |
1700 | ||
1701 | static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac) | |
1702 | { | |
1703 | struct device_node *np = dev->of_node; | |
1704 | int ret; | |
1705 | ||
1706 | ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); | |
1707 | if (ret < 0) { | |
1708 | dev_err(dev, "unable to read dma-channels property\n"); | |
1709 | return ret; | |
1710 | } | |
1711 | ||
1712 | if (dmac->n_channels <= 0 || dmac->n_channels >= 100) { | |
1713 | dev_err(dev, "invalid number of channels %u\n", | |
1714 | dmac->n_channels); | |
1715 | return -EINVAL; | |
1716 | } | |
1717 | ||
1718 | return 0; | |
1719 | } | |
1720 | ||
1721 | static int rcar_dmac_probe(struct platform_device *pdev) | |
1722 | { | |
1723 | const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | | |
1724 | DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | | |
1725 | DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | | |
1726 | DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; | |
be6893e1 | 1727 | unsigned int channels_offset = 0; |
87244fe5 LP |
1728 | struct dma_device *engine; |
1729 | struct rcar_dmac *dmac; | |
1730 | struct resource *mem; | |
1731 | unsigned int i; | |
1732 | char *irqname; | |
1733 | int irq; | |
1734 | int ret; | |
1735 | ||
1736 | dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); | |
1737 | if (!dmac) | |
1738 | return -ENOMEM; | |
1739 | ||
1740 | dmac->dev = &pdev->dev; | |
1741 | platform_set_drvdata(pdev, dmac); | |
dc312349 | 1742 | dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); |
87244fe5 LP |
1743 | |
1744 | ret = rcar_dmac_parse_of(&pdev->dev, dmac); | |
1745 | if (ret < 0) | |
1746 | return ret; | |
1747 | ||
be6893e1 LP |
1748 | /* |
1749 | * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be | |
1750 | * flushed correctly, resulting in memory corruption. DMAC 0 channel 0 | |
1751 | * is connected to microTLB 0 on currently supported platforms, so we | |
1752 | * can't use it with the IPMMU. As the IOMMU API operates at the device | |
1753 | * level we can't disable it selectively, so ignore channel 0 for now if | |
1754 | * the device is part of an IOMMU group. | |
1755 | */ | |
1756 | if (pdev->dev.iommu_group) { | |
1757 | dmac->n_channels--; | |
1758 | channels_offset = 1; | |
1759 | } | |
1760 | ||
87244fe5 LP |
1761 | dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, |
1762 | sizeof(*dmac->channels), GFP_KERNEL); | |
1763 | if (!dmac->channels) | |
1764 | return -ENOMEM; | |
1765 | ||
1766 | /* Request resources. */ | |
1767 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1768 | dmac->iomem = devm_ioremap_resource(&pdev->dev, mem); | |
1769 | if (IS_ERR(dmac->iomem)) | |
1770 | return PTR_ERR(dmac->iomem); | |
1771 | ||
1772 | irq = platform_get_irq_byname(pdev, "error"); | |
1773 | if (irq < 0) { | |
1774 | dev_err(&pdev->dev, "no error IRQ specified\n"); | |
1775 | return -ENODEV; | |
1776 | } | |
1777 | ||
1778 | irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error", | |
1779 | dev_name(dmac->dev)); | |
1780 | if (!irqname) | |
1781 | return -ENOMEM; | |
1782 | ||
1783 | ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0, | |
1784 | irqname, dmac); | |
1785 | if (ret) { | |
1786 | dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n", | |
1787 | irq, ret); | |
1788 | return ret; | |
1789 | } | |
1790 | ||
1791 | /* Enable runtime PM and initialize the device. */ | |
1792 | pm_runtime_enable(&pdev->dev); | |
1793 | ret = pm_runtime_get_sync(&pdev->dev); | |
1794 | if (ret < 0) { | |
1795 | dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); | |
1796 | return ret; | |
1797 | } | |
1798 | ||
1799 | ret = rcar_dmac_init(dmac); | |
1800 | pm_runtime_put(&pdev->dev); | |
1801 | ||
1802 | if (ret) { | |
1803 | dev_err(&pdev->dev, "failed to reset device\n"); | |
1804 | goto error; | |
1805 | } | |
1806 | ||
1807 | /* Initialize the channels. */ | |
1808 | INIT_LIST_HEAD(&dmac->engine.channels); | |
1809 | ||
1810 | for (i = 0; i < dmac->n_channels; ++i) { | |
be6893e1 LP |
1811 | ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], |
1812 | i + channels_offset); | |
87244fe5 LP |
1813 | if (ret < 0) |
1814 | goto error; | |
1815 | } | |
1816 | ||
1817 | /* Register the DMAC as a DMA provider for DT. */ | |
1818 | ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate, | |
1819 | NULL); | |
1820 | if (ret < 0) | |
1821 | goto error; | |
1822 | ||
1823 | /* | |
1824 | * Register the DMA engine device. | |
1825 | * | |
1826 | * Default transfer size of 32 bytes requires 32-byte alignment. | |
1827 | */ | |
1828 | engine = &dmac->engine; | |
1829 | dma_cap_set(DMA_MEMCPY, engine->cap_mask); | |
1830 | dma_cap_set(DMA_SLAVE, engine->cap_mask); | |
1831 | ||
1832 | engine->dev = &pdev->dev; | |
1833 | engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE); | |
1834 | ||
1835 | engine->src_addr_widths = widths; | |
1836 | engine->dst_addr_widths = widths; | |
1837 | engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); | |
1838 | engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
1839 | ||
1840 | engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources; | |
1841 | engine->device_free_chan_resources = rcar_dmac_free_chan_resources; | |
1842 | engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy; | |
1843 | engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg; | |
1844 | engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic; | |
1845 | engine->device_config = rcar_dmac_device_config; | |
1846 | engine->device_terminate_all = rcar_dmac_chan_terminate_all; | |
1847 | engine->device_tx_status = rcar_dmac_tx_status; | |
1848 | engine->device_issue_pending = rcar_dmac_issue_pending; | |
1849 | ||
1850 | ret = dma_async_device_register(engine); | |
1851 | if (ret < 0) | |
1852 | goto error; | |
1853 | ||
1854 | return 0; | |
1855 | ||
1856 | error: | |
1857 | of_dma_controller_free(pdev->dev.of_node); | |
1858 | pm_runtime_disable(&pdev->dev); | |
1859 | return ret; | |
1860 | } | |
1861 | ||
1862 | static int rcar_dmac_remove(struct platform_device *pdev) | |
1863 | { | |
1864 | struct rcar_dmac *dmac = platform_get_drvdata(pdev); | |
1865 | ||
1866 | of_dma_controller_free(pdev->dev.of_node); | |
1867 | dma_async_device_unregister(&dmac->engine); | |
1868 | ||
1869 | pm_runtime_disable(&pdev->dev); | |
1870 | ||
1871 | return 0; | |
1872 | } | |
1873 | ||
1874 | static void rcar_dmac_shutdown(struct platform_device *pdev) | |
1875 | { | |
1876 | struct rcar_dmac *dmac = platform_get_drvdata(pdev); | |
1877 | ||
1878 | rcar_dmac_stop(dmac); | |
1879 | } | |
1880 | ||
1881 | static const struct of_device_id rcar_dmac_of_ids[] = { | |
1882 | { .compatible = "renesas,rcar-dmac", }, | |
1883 | { /* Sentinel */ } | |
1884 | }; | |
1885 | MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids); | |
1886 | ||
1887 | static struct platform_driver rcar_dmac_driver = { | |
1888 | .driver = { | |
1889 | .pm = &rcar_dmac_pm, | |
1890 | .name = "rcar-dmac", | |
1891 | .of_match_table = rcar_dmac_of_ids, | |
1892 | }, | |
1893 | .probe = rcar_dmac_probe, | |
1894 | .remove = rcar_dmac_remove, | |
1895 | .shutdown = rcar_dmac_shutdown, | |
1896 | }; | |
1897 | ||
1898 | module_platform_driver(rcar_dmac_driver); | |
1899 | ||
1900 | MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver"); | |
1901 | MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); | |
1902 | MODULE_LICENSE("GPL v2"); |