Commit | Line | Data |
---|---|---|
32e74aab MY |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // | |
3 | // Copyright (C) 2018 Socionext Inc. | |
4 | // Author: Masahiro Yamada <yamada.masahiro@socionext.com> | |
5 | ||
6 | #include <linux/bits.h> | |
7 | #include <linux/clk.h> | |
8 | #include <linux/dma-mapping.h> | |
9 | #include <linux/dmaengine.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/iopoll.h> | |
12 | #include <linux/list.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/of.h> | |
15 | #include <linux/of_dma.h> | |
16 | #include <linux/platform_device.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/types.h> | |
19 | ||
20 | #include "virt-dma.h" | |
21 | ||
22 | /* registers common for all channels */ | |
23 | #define UNIPHIER_MDMAC_CMD 0x000 /* issue DMA start/abort */ | |
24 | #define UNIPHIER_MDMAC_CMD_ABORT BIT(31) /* 1: abort, 0: start */ | |
25 | ||
26 | /* per-channel registers */ | |
27 | #define UNIPHIER_MDMAC_CH_OFFSET 0x100 | |
28 | #define UNIPHIER_MDMAC_CH_STRIDE 0x040 | |
29 | ||
30 | #define UNIPHIER_MDMAC_CH_IRQ_STAT 0x010 /* current hw status (RO) */ | |
31 | #define UNIPHIER_MDMAC_CH_IRQ_REQ 0x014 /* latched STAT (WOC) */ | |
32 | #define UNIPHIER_MDMAC_CH_IRQ_EN 0x018 /* IRQ enable mask */ | |
33 | #define UNIPHIER_MDMAC_CH_IRQ_DET 0x01c /* REQ & EN (RO) */ | |
34 | #define UNIPHIER_MDMAC_CH_IRQ__ABORT BIT(13) | |
35 | #define UNIPHIER_MDMAC_CH_IRQ__DONE BIT(1) | |
36 | #define UNIPHIER_MDMAC_CH_SRC_MODE 0x020 /* mode of source */ | |
37 | #define UNIPHIER_MDMAC_CH_DEST_MODE 0x024 /* mode of destination */ | |
38 | #define UNIPHIER_MDMAC_CH_MODE__ADDR_INC (0 << 4) | |
39 | #define UNIPHIER_MDMAC_CH_MODE__ADDR_DEC (1 << 4) | |
40 | #define UNIPHIER_MDMAC_CH_MODE__ADDR_FIXED (2 << 4) | |
41 | #define UNIPHIER_MDMAC_CH_SRC_ADDR 0x028 /* source address */ | |
42 | #define UNIPHIER_MDMAC_CH_DEST_ADDR 0x02c /* destination address */ | |
43 | #define UNIPHIER_MDMAC_CH_SIZE 0x030 /* transfer bytes */ | |
44 | ||
45 | #define UNIPHIER_MDMAC_SLAVE_BUSWIDTHS \ | |
46 | (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | |
47 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | |
48 | BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ | |
49 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | |
50 | ||
51 | struct uniphier_mdmac_desc { | |
52 | struct virt_dma_desc vd; | |
53 | struct scatterlist *sgl; | |
54 | unsigned int sg_len; | |
55 | unsigned int sg_cur; | |
56 | enum dma_transfer_direction dir; | |
57 | }; | |
58 | ||
59 | struct uniphier_mdmac_chan { | |
60 | struct virt_dma_chan vc; | |
61 | struct uniphier_mdmac_device *mdev; | |
62 | struct uniphier_mdmac_desc *md; | |
63 | void __iomem *reg_ch_base; | |
64 | unsigned int chan_id; | |
65 | }; | |
66 | ||
67 | struct uniphier_mdmac_device { | |
68 | struct dma_device ddev; | |
69 | struct clk *clk; | |
70 | void __iomem *reg_base; | |
6a878508 | 71 | struct uniphier_mdmac_chan channels[]; |
32e74aab MY |
72 | }; |
73 | ||
74 | static struct uniphier_mdmac_chan * | |
75 | to_uniphier_mdmac_chan(struct virt_dma_chan *vc) | |
76 | { | |
77 | return container_of(vc, struct uniphier_mdmac_chan, vc); | |
78 | } | |
79 | ||
80 | static struct uniphier_mdmac_desc * | |
81 | to_uniphier_mdmac_desc(struct virt_dma_desc *vd) | |
82 | { | |
83 | return container_of(vd, struct uniphier_mdmac_desc, vd); | |
84 | } | |
85 | ||
86 | /* mc->vc.lock must be held by caller */ | |
87 | static struct uniphier_mdmac_desc * | |
88 | uniphier_mdmac_next_desc(struct uniphier_mdmac_chan *mc) | |
89 | { | |
90 | struct virt_dma_desc *vd; | |
91 | ||
92 | vd = vchan_next_desc(&mc->vc); | |
93 | if (!vd) { | |
94 | mc->md = NULL; | |
95 | return NULL; | |
96 | } | |
97 | ||
98 | list_del(&vd->node); | |
99 | ||
100 | mc->md = to_uniphier_mdmac_desc(vd); | |
101 | ||
102 | return mc->md; | |
103 | } | |
104 | ||
105 | /* mc->vc.lock must be held by caller */ | |
106 | static void uniphier_mdmac_handle(struct uniphier_mdmac_chan *mc, | |
107 | struct uniphier_mdmac_desc *md) | |
108 | { | |
109 | struct uniphier_mdmac_device *mdev = mc->mdev; | |
110 | struct scatterlist *sg; | |
111 | u32 irq_flag = UNIPHIER_MDMAC_CH_IRQ__DONE; | |
112 | u32 src_mode, src_addr, dest_mode, dest_addr, chunk_size; | |
113 | ||
114 | sg = &md->sgl[md->sg_cur]; | |
115 | ||
116 | if (md->dir == DMA_MEM_TO_DEV) { | |
117 | src_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_INC; | |
118 | src_addr = sg_dma_address(sg); | |
119 | dest_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_FIXED; | |
120 | dest_addr = 0; | |
121 | } else { | |
122 | src_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_FIXED; | |
123 | src_addr = 0; | |
124 | dest_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_INC; | |
125 | dest_addr = sg_dma_address(sg); | |
126 | } | |
127 | ||
128 | chunk_size = sg_dma_len(sg); | |
129 | ||
130 | writel(src_mode, mc->reg_ch_base + UNIPHIER_MDMAC_CH_SRC_MODE); | |
131 | writel(dest_mode, mc->reg_ch_base + UNIPHIER_MDMAC_CH_DEST_MODE); | |
132 | writel(src_addr, mc->reg_ch_base + UNIPHIER_MDMAC_CH_SRC_ADDR); | |
133 | writel(dest_addr, mc->reg_ch_base + UNIPHIER_MDMAC_CH_DEST_ADDR); | |
134 | writel(chunk_size, mc->reg_ch_base + UNIPHIER_MDMAC_CH_SIZE); | |
135 | ||
136 | /* write 1 to clear */ | |
137 | writel(irq_flag, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ); | |
138 | ||
139 | writel(irq_flag, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_EN); | |
140 | ||
141 | writel(BIT(mc->chan_id), mdev->reg_base + UNIPHIER_MDMAC_CMD); | |
142 | } | |
143 | ||
144 | /* mc->vc.lock must be held by caller */ | |
145 | static void uniphier_mdmac_start(struct uniphier_mdmac_chan *mc) | |
146 | { | |
147 | struct uniphier_mdmac_desc *md; | |
148 | ||
149 | md = uniphier_mdmac_next_desc(mc); | |
150 | if (md) | |
151 | uniphier_mdmac_handle(mc, md); | |
152 | } | |
153 | ||
154 | /* mc->vc.lock must be held by caller */ | |
155 | static int uniphier_mdmac_abort(struct uniphier_mdmac_chan *mc) | |
156 | { | |
157 | struct uniphier_mdmac_device *mdev = mc->mdev; | |
158 | u32 irq_flag = UNIPHIER_MDMAC_CH_IRQ__ABORT; | |
159 | u32 val; | |
160 | ||
161 | /* write 1 to clear */ | |
162 | writel(irq_flag, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ); | |
163 | ||
164 | writel(UNIPHIER_MDMAC_CMD_ABORT | BIT(mc->chan_id), | |
165 | mdev->reg_base + UNIPHIER_MDMAC_CMD); | |
166 | ||
167 | /* | |
168 | * Abort should be accepted soon. We poll the bit here instead of | |
169 | * waiting for the interrupt. | |
170 | */ | |
171 | return readl_poll_timeout(mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ, | |
172 | val, val & irq_flag, 0, 20); | |
173 | } | |
174 | ||
175 | static irqreturn_t uniphier_mdmac_interrupt(int irq, void *dev_id) | |
176 | { | |
177 | struct uniphier_mdmac_chan *mc = dev_id; | |
178 | struct uniphier_mdmac_desc *md; | |
179 | irqreturn_t ret = IRQ_HANDLED; | |
180 | u32 irq_stat; | |
181 | ||
182 | spin_lock(&mc->vc.lock); | |
183 | ||
184 | irq_stat = readl(mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_DET); | |
185 | ||
186 | /* | |
187 | * Some channels share a single interrupt line. If the IRQ status is 0, | |
188 | * this is probably triggered by a different channel. | |
189 | */ | |
190 | if (!irq_stat) { | |
191 | ret = IRQ_NONE; | |
192 | goto out; | |
193 | } | |
194 | ||
195 | /* write 1 to clear */ | |
196 | writel(irq_stat, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ); | |
197 | ||
198 | /* | |
199 | * UNIPHIER_MDMAC_CH_IRQ__DONE interrupt is asserted even when the DMA | |
200 | * is aborted. To distinguish the normal completion and the abort, | |
201 | * check mc->md. If it is NULL, we are aborting. | |
202 | */ | |
203 | md = mc->md; | |
204 | if (!md) | |
205 | goto out; | |
206 | ||
207 | md->sg_cur++; | |
208 | ||
209 | if (md->sg_cur >= md->sg_len) { | |
210 | vchan_cookie_complete(&md->vd); | |
211 | md = uniphier_mdmac_next_desc(mc); | |
212 | if (!md) | |
213 | goto out; | |
214 | } | |
215 | ||
216 | uniphier_mdmac_handle(mc, md); | |
217 | ||
218 | out: | |
219 | spin_unlock(&mc->vc.lock); | |
220 | ||
221 | return ret; | |
222 | } | |
223 | ||
224 | static void uniphier_mdmac_free_chan_resources(struct dma_chan *chan) | |
225 | { | |
226 | vchan_free_chan_resources(to_virt_chan(chan)); | |
227 | } | |
228 | ||
229 | static struct dma_async_tx_descriptor * | |
230 | uniphier_mdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
231 | unsigned int sg_len, | |
232 | enum dma_transfer_direction direction, | |
233 | unsigned long flags, void *context) | |
234 | { | |
235 | struct virt_dma_chan *vc = to_virt_chan(chan); | |
236 | struct uniphier_mdmac_desc *md; | |
237 | ||
238 | if (!is_slave_direction(direction)) | |
239 | return NULL; | |
240 | ||
241 | md = kzalloc(sizeof(*md), GFP_NOWAIT); | |
242 | if (!md) | |
243 | return NULL; | |
244 | ||
245 | md->sgl = sgl; | |
246 | md->sg_len = sg_len; | |
247 | md->dir = direction; | |
248 | ||
249 | return vchan_tx_prep(vc, &md->vd, flags); | |
250 | } | |
251 | ||
252 | static int uniphier_mdmac_terminate_all(struct dma_chan *chan) | |
253 | { | |
254 | struct virt_dma_chan *vc = to_virt_chan(chan); | |
255 | struct uniphier_mdmac_chan *mc = to_uniphier_mdmac_chan(vc); | |
256 | unsigned long flags; | |
257 | int ret = 0; | |
258 | LIST_HEAD(head); | |
259 | ||
260 | spin_lock_irqsave(&vc->lock, flags); | |
261 | ||
262 | if (mc->md) { | |
263 | vchan_terminate_vdesc(&mc->md->vd); | |
264 | mc->md = NULL; | |
265 | ret = uniphier_mdmac_abort(mc); | |
266 | } | |
267 | vchan_get_all_descriptors(vc, &head); | |
268 | ||
269 | spin_unlock_irqrestore(&vc->lock, flags); | |
270 | ||
271 | vchan_dma_desc_free_list(vc, &head); | |
272 | ||
273 | return ret; | |
274 | } | |
275 | ||
276 | static void uniphier_mdmac_synchronize(struct dma_chan *chan) | |
277 | { | |
278 | vchan_synchronize(to_virt_chan(chan)); | |
279 | } | |
280 | ||
281 | static enum dma_status uniphier_mdmac_tx_status(struct dma_chan *chan, | |
282 | dma_cookie_t cookie, | |
283 | struct dma_tx_state *txstate) | |
284 | { | |
285 | struct virt_dma_chan *vc; | |
286 | struct virt_dma_desc *vd; | |
287 | struct uniphier_mdmac_chan *mc; | |
288 | struct uniphier_mdmac_desc *md = NULL; | |
289 | enum dma_status stat; | |
290 | unsigned long flags; | |
291 | int i; | |
292 | ||
293 | stat = dma_cookie_status(chan, cookie, txstate); | |
294 | /* Return immediately if we do not need to compute the residue. */ | |
295 | if (stat == DMA_COMPLETE || !txstate) | |
296 | return stat; | |
297 | ||
298 | vc = to_virt_chan(chan); | |
299 | ||
300 | spin_lock_irqsave(&vc->lock, flags); | |
301 | ||
302 | mc = to_uniphier_mdmac_chan(vc); | |
303 | ||
304 | if (mc->md && mc->md->vd.tx.cookie == cookie) { | |
305 | /* residue from the on-flight chunk */ | |
306 | txstate->residue = readl(mc->reg_ch_base + | |
307 | UNIPHIER_MDMAC_CH_SIZE); | |
308 | md = mc->md; | |
309 | } | |
310 | ||
311 | if (!md) { | |
312 | vd = vchan_find_desc(vc, cookie); | |
313 | if (vd) | |
314 | md = to_uniphier_mdmac_desc(vd); | |
315 | } | |
316 | ||
317 | if (md) { | |
318 | /* residue from the queued chunks */ | |
319 | for (i = md->sg_cur; i < md->sg_len; i++) | |
320 | txstate->residue += sg_dma_len(&md->sgl[i]); | |
321 | } | |
322 | ||
323 | spin_unlock_irqrestore(&vc->lock, flags); | |
324 | ||
325 | return stat; | |
326 | } | |
327 | ||
328 | static void uniphier_mdmac_issue_pending(struct dma_chan *chan) | |
329 | { | |
330 | struct virt_dma_chan *vc = to_virt_chan(chan); | |
331 | struct uniphier_mdmac_chan *mc = to_uniphier_mdmac_chan(vc); | |
332 | unsigned long flags; | |
333 | ||
334 | spin_lock_irqsave(&vc->lock, flags); | |
335 | ||
336 | if (vchan_issue_pending(vc) && !mc->md) | |
337 | uniphier_mdmac_start(mc); | |
338 | ||
339 | spin_unlock_irqrestore(&vc->lock, flags); | |
340 | } | |
341 | ||
342 | static void uniphier_mdmac_desc_free(struct virt_dma_desc *vd) | |
343 | { | |
344 | kfree(to_uniphier_mdmac_desc(vd)); | |
345 | } | |
346 | ||
347 | static int uniphier_mdmac_chan_init(struct platform_device *pdev, | |
348 | struct uniphier_mdmac_device *mdev, | |
349 | int chan_id) | |
350 | { | |
351 | struct device *dev = &pdev->dev; | |
352 | struct uniphier_mdmac_chan *mc = &mdev->channels[chan_id]; | |
353 | char *irq_name; | |
354 | int irq, ret; | |
355 | ||
356 | irq = platform_get_irq(pdev, chan_id); | |
e17be6e1 | 357 | if (irq < 0) |
32e74aab | 358 | return irq; |
32e74aab MY |
359 | |
360 | irq_name = devm_kasprintf(dev, GFP_KERNEL, "uniphier-mio-dmac-ch%d", | |
361 | chan_id); | |
362 | if (!irq_name) | |
363 | return -ENOMEM; | |
364 | ||
365 | ret = devm_request_irq(dev, irq, uniphier_mdmac_interrupt, | |
366 | IRQF_SHARED, irq_name, mc); | |
367 | if (ret) | |
368 | return ret; | |
369 | ||
370 | mc->mdev = mdev; | |
371 | mc->reg_ch_base = mdev->reg_base + UNIPHIER_MDMAC_CH_OFFSET + | |
372 | UNIPHIER_MDMAC_CH_STRIDE * chan_id; | |
373 | mc->chan_id = chan_id; | |
374 | mc->vc.desc_free = uniphier_mdmac_desc_free; | |
375 | vchan_init(&mc->vc, &mdev->ddev); | |
376 | ||
377 | return 0; | |
378 | } | |
379 | ||
380 | static int uniphier_mdmac_probe(struct platform_device *pdev) | |
381 | { | |
382 | struct device *dev = &pdev->dev; | |
383 | struct uniphier_mdmac_device *mdev; | |
384 | struct dma_device *ddev; | |
32e74aab MY |
385 | int nr_chans, ret, i; |
386 | ||
387 | nr_chans = platform_irq_count(pdev); | |
388 | if (nr_chans < 0) | |
389 | return nr_chans; | |
390 | ||
391 | ret = dma_set_mask(dev, DMA_BIT_MASK(32)); | |
392 | if (ret) | |
393 | return ret; | |
394 | ||
395 | mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans), | |
396 | GFP_KERNEL); | |
397 | if (!mdev) | |
398 | return -ENOMEM; | |
399 | ||
5b3e3606 | 400 | mdev->reg_base = devm_platform_ioremap_resource(pdev, 0); |
32e74aab MY |
401 | if (IS_ERR(mdev->reg_base)) |
402 | return PTR_ERR(mdev->reg_base); | |
403 | ||
404 | mdev->clk = devm_clk_get(dev, NULL); | |
405 | if (IS_ERR(mdev->clk)) { | |
406 | dev_err(dev, "failed to get clock\n"); | |
407 | return PTR_ERR(mdev->clk); | |
408 | } | |
409 | ||
410 | ret = clk_prepare_enable(mdev->clk); | |
411 | if (ret) | |
412 | return ret; | |
413 | ||
414 | ddev = &mdev->ddev; | |
415 | ddev->dev = dev; | |
416 | dma_cap_set(DMA_PRIVATE, ddev->cap_mask); | |
417 | ddev->src_addr_widths = UNIPHIER_MDMAC_SLAVE_BUSWIDTHS; | |
418 | ddev->dst_addr_widths = UNIPHIER_MDMAC_SLAVE_BUSWIDTHS; | |
419 | ddev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); | |
420 | ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | |
421 | ddev->device_free_chan_resources = uniphier_mdmac_free_chan_resources; | |
422 | ddev->device_prep_slave_sg = uniphier_mdmac_prep_slave_sg; | |
423 | ddev->device_terminate_all = uniphier_mdmac_terminate_all; | |
424 | ddev->device_synchronize = uniphier_mdmac_synchronize; | |
425 | ddev->device_tx_status = uniphier_mdmac_tx_status; | |
426 | ddev->device_issue_pending = uniphier_mdmac_issue_pending; | |
427 | INIT_LIST_HEAD(&ddev->channels); | |
428 | ||
429 | for (i = 0; i < nr_chans; i++) { | |
430 | ret = uniphier_mdmac_chan_init(pdev, mdev, i); | |
431 | if (ret) | |
432 | goto disable_clk; | |
433 | } | |
434 | ||
435 | ret = dma_async_device_register(ddev); | |
436 | if (ret) | |
437 | goto disable_clk; | |
438 | ||
439 | ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id, | |
440 | ddev); | |
441 | if (ret) | |
442 | goto unregister_dmac; | |
443 | ||
444 | platform_set_drvdata(pdev, mdev); | |
445 | ||
446 | return 0; | |
447 | ||
448 | unregister_dmac: | |
449 | dma_async_device_unregister(ddev); | |
450 | disable_clk: | |
451 | clk_disable_unprepare(mdev->clk); | |
452 | ||
453 | return ret; | |
454 | } | |
455 | ||
456 | static int uniphier_mdmac_remove(struct platform_device *pdev) | |
457 | { | |
458 | struct uniphier_mdmac_device *mdev = platform_get_drvdata(pdev); | |
459 | struct dma_chan *chan; | |
460 | int ret; | |
461 | ||
462 | /* | |
463 | * Before reaching here, almost all descriptors have been freed by the | |
464 | * ->device_free_chan_resources() hook. However, each channel might | |
465 | * be still holding one descriptor that was on-flight at that moment. | |
466 | * Terminate it to make sure this hardware is no longer running. Then, | |
467 | * free the channel resources once again to avoid memory leak. | |
468 | */ | |
469 | list_for_each_entry(chan, &mdev->ddev.channels, device_node) { | |
470 | ret = dmaengine_terminate_sync(chan); | |
471 | if (ret) | |
472 | return ret; | |
473 | uniphier_mdmac_free_chan_resources(chan); | |
474 | } | |
475 | ||
476 | of_dma_controller_free(pdev->dev.of_node); | |
477 | dma_async_device_unregister(&mdev->ddev); | |
478 | clk_disable_unprepare(mdev->clk); | |
479 | ||
480 | return 0; | |
481 | } | |
482 | ||
483 | static const struct of_device_id uniphier_mdmac_match[] = { | |
484 | { .compatible = "socionext,uniphier-mio-dmac" }, | |
485 | { /* sentinel */ } | |
486 | }; | |
487 | MODULE_DEVICE_TABLE(of, uniphier_mdmac_match); | |
488 | ||
489 | static struct platform_driver uniphier_mdmac_driver = { | |
490 | .probe = uniphier_mdmac_probe, | |
491 | .remove = uniphier_mdmac_remove, | |
492 | .driver = { | |
493 | .name = "uniphier-mio-dmac", | |
494 | .of_match_table = uniphier_mdmac_match, | |
495 | }, | |
496 | }; | |
497 | module_platform_driver(uniphier_mdmac_driver); | |
498 | ||
499 | MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); | |
500 | MODULE_DESCRIPTION("UniPhier MIO DMAC driver"); | |
501 | MODULE_LICENSE("GPL v2"); |