Commit | Line | Data |
---|---|---|
d6be34fb JL |
1 | /* |
2 | * drivers/dma/fsl-edma.c | |
3 | * | |
4 | * Copyright 2013-2014 Freescale Semiconductor, Inc. | |
5 | * | |
6 | * Driver for the Freescale eDMA engine with flexible channel multiplexing | |
7 | * capability for DMA request sources. The eDMA block can be found on some | |
8 | * Vybrid and Layerscape SoCs. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify it | |
11 | * under the terms of the GNU General Public License as published by the | |
12 | * Free Software Foundation; either version 2 of the License, or (at your | |
13 | * option) any later version. | |
14 | */ | |
15 | ||
d6be34fb JL |
16 | #include <linux/module.h> |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/clk.h> | |
d6be34fb JL |
19 | #include <linux/of.h> |
20 | #include <linux/of_device.h> | |
21 | #include <linux/of_address.h> | |
22 | #include <linux/of_irq.h> | |
23 | #include <linux/of_dma.h> | |
24 | ||
9d831528 | 25 | #include "fsl-edma-common.h" |
d6be34fb JL |
26 | |
27 | static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id) | |
28 | { | |
29 | struct fsl_edma_engine *fsl_edma = dev_id; | |
30 | unsigned int intr, ch; | |
377eaf3b | 31 | struct edma_regs *regs = &fsl_edma->regs; |
d6be34fb JL |
32 | struct fsl_edma_chan *fsl_chan; |
33 | ||
377eaf3b | 34 | intr = edma_readl(fsl_edma, regs->intl); |
d6be34fb JL |
35 | if (!intr) |
36 | return IRQ_NONE; | |
37 | ||
38 | for (ch = 0; ch < fsl_edma->n_chans; ch++) { | |
39 | if (intr & (0x1 << ch)) { | |
377eaf3b | 40 | edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint); |
d6be34fb JL |
41 | |
42 | fsl_chan = &fsl_edma->chans[ch]; | |
43 | ||
44 | spin_lock(&fsl_chan->vchan.lock); | |
45 | if (!fsl_chan->edesc->iscyclic) { | |
46 | list_del(&fsl_chan->edesc->vdesc.node); | |
47 | vchan_cookie_complete(&fsl_chan->edesc->vdesc); | |
48 | fsl_chan->edesc = NULL; | |
49 | fsl_chan->status = DMA_COMPLETE; | |
82d149b8 | 50 | fsl_chan->idle = true; |
d6be34fb JL |
51 | } else { |
52 | vchan_cyclic_callback(&fsl_chan->edesc->vdesc); | |
53 | } | |
54 | ||
55 | if (!fsl_chan->edesc) | |
56 | fsl_edma_xfer_desc(fsl_chan); | |
57 | ||
58 | spin_unlock(&fsl_chan->vchan.lock); | |
59 | } | |
60 | } | |
61 | return IRQ_HANDLED; | |
62 | } | |
63 | ||
64 | static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id) | |
65 | { | |
66 | struct fsl_edma_engine *fsl_edma = dev_id; | |
67 | unsigned int err, ch; | |
377eaf3b | 68 | struct edma_regs *regs = &fsl_edma->regs; |
d6be34fb | 69 | |
377eaf3b | 70 | err = edma_readl(fsl_edma, regs->errl); |
d6be34fb JL |
71 | if (!err) |
72 | return IRQ_NONE; | |
73 | ||
74 | for (ch = 0; ch < fsl_edma->n_chans; ch++) { | |
75 | if (err & (0x1 << ch)) { | |
76 | fsl_edma_disable_request(&fsl_edma->chans[ch]); | |
377eaf3b | 77 | edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr); |
d6be34fb | 78 | fsl_edma->chans[ch].status = DMA_ERROR; |
82d149b8 | 79 | fsl_edma->chans[ch].idle = true; |
d6be34fb JL |
80 | } |
81 | } | |
82 | return IRQ_HANDLED; | |
83 | } | |
84 | ||
85 | static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id) | |
86 | { | |
87 | if (fsl_edma_tx_handler(irq, dev_id) == IRQ_HANDLED) | |
88 | return IRQ_HANDLED; | |
89 | ||
90 | return fsl_edma_err_handler(irq, dev_id); | |
91 | } | |
92 | ||
d6be34fb JL |
93 | static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, |
94 | struct of_dma *ofdma) | |
95 | { | |
96 | struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data; | |
178c81e5 | 97 | struct dma_chan *chan, *_chan; |
82d149b8 | 98 | struct fsl_edma_chan *fsl_chan; |
211bfef7 | 99 | unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR; |
d6be34fb JL |
100 | |
101 | if (dma_spec->args_count != 2) | |
102 | return NULL; | |
103 | ||
104 | mutex_lock(&fsl_edma->fsl_edma_mutex); | |
178c81e5 | 105 | list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) { |
d6be34fb JL |
106 | if (chan->client_count) |
107 | continue; | |
211bfef7 | 108 | if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) { |
d6be34fb JL |
109 | chan = dma_get_slave_channel(chan); |
110 | if (chan) { | |
111 | chan->device->privatecnt++; | |
82d149b8 YY |
112 | fsl_chan = to_fsl_edma_chan(chan); |
113 | fsl_chan->slave_id = dma_spec->args[1]; | |
114 | fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, | |
115 | true); | |
d6be34fb JL |
116 | mutex_unlock(&fsl_edma->fsl_edma_mutex); |
117 | return chan; | |
118 | } | |
119 | } | |
120 | } | |
121 | mutex_unlock(&fsl_edma->fsl_edma_mutex); | |
122 | return NULL; | |
123 | } | |
124 | ||
d6be34fb JL |
125 | static int |
126 | fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) | |
127 | { | |
128 | int ret; | |
129 | ||
130 | fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx"); | |
131 | if (fsl_edma->txirq < 0) { | |
132 | dev_err(&pdev->dev, "Can't get edma-tx irq.\n"); | |
133 | return fsl_edma->txirq; | |
134 | } | |
135 | ||
136 | fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err"); | |
137 | if (fsl_edma->errirq < 0) { | |
138 | dev_err(&pdev->dev, "Can't get edma-err irq.\n"); | |
139 | return fsl_edma->errirq; | |
140 | } | |
141 | ||
142 | if (fsl_edma->txirq == fsl_edma->errirq) { | |
143 | ret = devm_request_irq(&pdev->dev, fsl_edma->txirq, | |
144 | fsl_edma_irq_handler, 0, "eDMA", fsl_edma); | |
145 | if (ret) { | |
146 | dev_err(&pdev->dev, "Can't register eDMA IRQ.\n"); | |
147 | return ret; | |
148 | } | |
149 | } else { | |
150 | ret = devm_request_irq(&pdev->dev, fsl_edma->txirq, | |
151 | fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma); | |
152 | if (ret) { | |
153 | dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n"); | |
154 | return ret; | |
155 | } | |
156 | ||
157 | ret = devm_request_irq(&pdev->dev, fsl_edma->errirq, | |
158 | fsl_edma_err_handler, 0, "eDMA err", fsl_edma); | |
159 | if (ret) { | |
160 | dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n"); | |
161 | return ret; | |
162 | } | |
163 | } | |
164 | ||
165 | return 0; | |
166 | } | |
167 | ||
476c7c80 VK |
168 | static void fsl_edma_irq_exit( |
169 | struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) | |
170 | { | |
171 | if (fsl_edma->txirq == fsl_edma->errirq) { | |
172 | devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma); | |
173 | } else { | |
174 | devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma); | |
175 | devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma); | |
176 | } | |
177 | } | |
178 | ||
2610acf4 | 179 | static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks) |
5e2fe1e7 PG |
180 | { |
181 | int i; | |
182 | ||
2610acf4 | 183 | for (i = 0; i < nr_clocks; i++) |
5e2fe1e7 PG |
184 | clk_disable_unprepare(fsl_edma->muxclk[i]); |
185 | } | |
186 | ||
d6be34fb JL |
187 | static int fsl_edma_probe(struct platform_device *pdev) |
188 | { | |
189 | struct device_node *np = pdev->dev.of_node; | |
190 | struct fsl_edma_engine *fsl_edma; | |
191 | struct fsl_edma_chan *fsl_chan; | |
377eaf3b | 192 | struct edma_regs *regs; |
d6be34fb JL |
193 | struct resource *res; |
194 | int len, chans; | |
195 | int ret, i; | |
196 | ||
197 | ret = of_property_read_u32(np, "dma-channels", &chans); | |
198 | if (ret) { | |
199 | dev_err(&pdev->dev, "Can't get dma-channels.\n"); | |
200 | return ret; | |
201 | } | |
202 | ||
203 | len = sizeof(*fsl_edma) + sizeof(*fsl_chan) * chans; | |
204 | fsl_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); | |
205 | if (!fsl_edma) | |
206 | return -ENOMEM; | |
207 | ||
377eaf3b | 208 | fsl_edma->version = v1; |
d6be34fb JL |
209 | fsl_edma->n_chans = chans; |
210 | mutex_init(&fsl_edma->fsl_edma_mutex); | |
211 | ||
212 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
213 | fsl_edma->membase = devm_ioremap_resource(&pdev->dev, res); | |
214 | if (IS_ERR(fsl_edma->membase)) | |
215 | return PTR_ERR(fsl_edma->membase); | |
216 | ||
377eaf3b AD |
217 | fsl_edma_setup_regs(fsl_edma); |
218 | regs = &fsl_edma->regs; | |
219 | ||
d6be34fb JL |
220 | for (i = 0; i < DMAMUX_NR; i++) { |
221 | char clkname[32]; | |
222 | ||
223 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i); | |
224 | fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res); | |
2610acf4 AP |
225 | if (IS_ERR(fsl_edma->muxbase[i])) { |
226 | /* on error: disable all previously enabled clks */ | |
227 | fsl_disable_clocks(fsl_edma, i); | |
d6be34fb | 228 | return PTR_ERR(fsl_edma->muxbase[i]); |
2610acf4 | 229 | } |
d6be34fb JL |
230 | |
231 | sprintf(clkname, "dmamux%d", i); | |
232 | fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname); | |
233 | if (IS_ERR(fsl_edma->muxclk[i])) { | |
234 | dev_err(&pdev->dev, "Missing DMAMUX block clock.\n"); | |
2610acf4 AP |
235 | /* on error: disable all previously enabled clks */ |
236 | fsl_disable_clocks(fsl_edma, i); | |
d6be34fb JL |
237 | return PTR_ERR(fsl_edma->muxclk[i]); |
238 | } | |
239 | ||
240 | ret = clk_prepare_enable(fsl_edma->muxclk[i]); | |
2610acf4 AP |
241 | if (ret) |
242 | /* on error: disable all previously enabled clks */ | |
243 | fsl_disable_clocks(fsl_edma, i); | |
d6be34fb JL |
244 | |
245 | } | |
246 | ||
d6be34fb JL |
247 | fsl_edma->big_endian = of_property_read_bool(np, "big-endian"); |
248 | ||
249 | INIT_LIST_HEAD(&fsl_edma->dma_dev.channels); | |
250 | for (i = 0; i < fsl_edma->n_chans; i++) { | |
251 | struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i]; | |
252 | ||
253 | fsl_chan->edma = fsl_edma; | |
82d149b8 YY |
254 | fsl_chan->pm_state = RUNNING; |
255 | fsl_chan->slave_id = 0; | |
256 | fsl_chan->idle = true; | |
d6be34fb JL |
257 | fsl_chan->vchan.desc_free = fsl_edma_free_desc; |
258 | vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); | |
259 | ||
377eaf3b | 260 | edma_writew(fsl_edma, 0x0, ®s->tcd[i].csr); |
d6be34fb JL |
261 | fsl_edma_chan_mux(fsl_chan, 0, false); |
262 | } | |
263 | ||
377eaf3b | 264 | edma_writel(fsl_edma, ~0, regs->intl); |
0fe25d61 SA |
265 | ret = fsl_edma_irq_init(pdev, fsl_edma); |
266 | if (ret) | |
267 | return ret; | |
268 | ||
d6be34fb JL |
269 | dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask); |
270 | dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask); | |
271 | dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask); | |
272 | ||
273 | fsl_edma->dma_dev.dev = &pdev->dev; | |
274 | fsl_edma->dma_dev.device_alloc_chan_resources | |
275 | = fsl_edma_alloc_chan_resources; | |
276 | fsl_edma->dma_dev.device_free_chan_resources | |
277 | = fsl_edma_free_chan_resources; | |
278 | fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status; | |
279 | fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg; | |
280 | fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic; | |
d80f381f MR |
281 | fsl_edma->dma_dev.device_config = fsl_edma_slave_config; |
282 | fsl_edma->dma_dev.device_pause = fsl_edma_pause; | |
283 | fsl_edma->dma_dev.device_resume = fsl_edma_resume; | |
284 | fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all; | |
d6be34fb | 285 | fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending; |
f45c4311 MR |
286 | |
287 | fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS; | |
288 | fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS; | |
289 | fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
d6be34fb JL |
290 | |
291 | platform_set_drvdata(pdev, fsl_edma); | |
292 | ||
293 | ret = dma_async_device_register(&fsl_edma->dma_dev); | |
294 | if (ret) { | |
a86144da PG |
295 | dev_err(&pdev->dev, |
296 | "Can't register Freescale eDMA engine. (%d)\n", ret); | |
2610acf4 | 297 | fsl_disable_clocks(fsl_edma, DMAMUX_NR); |
d6be34fb JL |
298 | return ret; |
299 | } | |
300 | ||
301 | ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma); | |
302 | if (ret) { | |
a86144da PG |
303 | dev_err(&pdev->dev, |
304 | "Can't register Freescale eDMA of_dma. (%d)\n", ret); | |
d6be34fb | 305 | dma_async_device_unregister(&fsl_edma->dma_dev); |
2610acf4 | 306 | fsl_disable_clocks(fsl_edma, DMAMUX_NR); |
d6be34fb JL |
307 | return ret; |
308 | } | |
309 | ||
310 | /* enable round robin arbitration */ | |
377eaf3b | 311 | edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr); |
d6be34fb JL |
312 | |
313 | return 0; | |
314 | } | |
315 | ||
316 | static int fsl_edma_remove(struct platform_device *pdev) | |
317 | { | |
318 | struct device_node *np = pdev->dev.of_node; | |
319 | struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev); | |
d6be34fb | 320 | |
476c7c80 | 321 | fsl_edma_irq_exit(pdev, fsl_edma); |
6f93b93b | 322 | fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); |
d6be34fb JL |
323 | of_dma_controller_free(np); |
324 | dma_async_device_unregister(&fsl_edma->dma_dev); | |
2610acf4 | 325 | fsl_disable_clocks(fsl_edma, DMAMUX_NR); |
d6be34fb JL |
326 | |
327 | return 0; | |
328 | } | |
329 | ||
82d149b8 YY |
330 | static int fsl_edma_suspend_late(struct device *dev) |
331 | { | |
332 | struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev); | |
333 | struct fsl_edma_chan *fsl_chan; | |
334 | unsigned long flags; | |
335 | int i; | |
336 | ||
337 | for (i = 0; i < fsl_edma->n_chans; i++) { | |
338 | fsl_chan = &fsl_edma->chans[i]; | |
339 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
340 | /* Make sure chan is idle or will force disable. */ | |
341 | if (unlikely(!fsl_chan->idle)) { | |
342 | dev_warn(dev, "WARN: There is non-idle channel."); | |
343 | fsl_edma_disable_request(fsl_chan); | |
344 | fsl_edma_chan_mux(fsl_chan, 0, false); | |
345 | } | |
346 | ||
347 | fsl_chan->pm_state = SUSPENDED; | |
348 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
349 | } | |
350 | ||
351 | return 0; | |
352 | } | |
353 | ||
354 | static int fsl_edma_resume_early(struct device *dev) | |
355 | { | |
356 | struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev); | |
357 | struct fsl_edma_chan *fsl_chan; | |
377eaf3b | 358 | struct edma_regs *regs = &fsl_edma->regs; |
82d149b8 YY |
359 | int i; |
360 | ||
361 | for (i = 0; i < fsl_edma->n_chans; i++) { | |
362 | fsl_chan = &fsl_edma->chans[i]; | |
363 | fsl_chan->pm_state = RUNNING; | |
377eaf3b | 364 | edma_writew(fsl_edma, 0x0, ®s->tcd[i].csr); |
82d149b8 YY |
365 | if (fsl_chan->slave_id != 0) |
366 | fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true); | |
367 | } | |
368 | ||
377eaf3b | 369 | edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr); |
82d149b8 YY |
370 | |
371 | return 0; | |
372 | } | |
373 | ||
374 | /* | |
375 | * eDMA provides the service to others, so it should be suspend late | |
376 | * and resume early. When eDMA suspend, all of the clients should stop | |
377 | * the DMA data transmission and let the channel idle. | |
378 | */ | |
379 | static const struct dev_pm_ops fsl_edma_pm_ops = { | |
380 | .suspend_late = fsl_edma_suspend_late, | |
381 | .resume_early = fsl_edma_resume_early, | |
382 | }; | |
383 | ||
d6be34fb JL |
384 | static const struct of_device_id fsl_edma_dt_ids[] = { |
385 | { .compatible = "fsl,vf610-edma", }, | |
386 | { /* sentinel */ } | |
387 | }; | |
388 | MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids); | |
389 | ||
390 | static struct platform_driver fsl_edma_driver = { | |
391 | .driver = { | |
392 | .name = "fsl-edma", | |
d6be34fb | 393 | .of_match_table = fsl_edma_dt_ids, |
82d149b8 | 394 | .pm = &fsl_edma_pm_ops, |
d6be34fb JL |
395 | }, |
396 | .probe = fsl_edma_probe, | |
397 | .remove = fsl_edma_remove, | |
398 | }; | |
399 | ||
8edc51c1 YY |
400 | static int __init fsl_edma_init(void) |
401 | { | |
402 | return platform_driver_register(&fsl_edma_driver); | |
403 | } | |
404 | subsys_initcall(fsl_edma_init); | |
405 | ||
406 | static void __exit fsl_edma_exit(void) | |
407 | { | |
408 | platform_driver_unregister(&fsl_edma_driver); | |
409 | } | |
410 | module_exit(fsl_edma_exit); | |
d6be34fb JL |
411 | |
412 | MODULE_ALIAS("platform:fsl-edma"); | |
413 | MODULE_DESCRIPTION("Freescale eDMA engine driver"); | |
414 | MODULE_LICENSE("GPL v2"); |