Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
d6be34fb JL |
2 | /* |
3 | * drivers/dma/fsl-edma.c | |
4 | * | |
5 | * Copyright 2013-2014 Freescale Semiconductor, Inc. | |
6 | * | |
7 | * Driver for the Freescale eDMA engine with flexible channel multiplexing | |
8 | * capability for DMA request sources. The eDMA block can be found on some | |
9 | * Vybrid and Layerscape SoCs. | |
d6be34fb JL |
10 | */ |
11 | ||
d6be34fb JL |
12 | #include <linux/module.h> |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/clk.h> | |
d6be34fb JL |
15 | #include <linux/of.h> |
16 | #include <linux/of_device.h> | |
17 | #include <linux/of_address.h> | |
18 | #include <linux/of_irq.h> | |
19 | #include <linux/of_dma.h> | |
e0674853 | 20 | #include <linux/dma-mapping.h> |
d6be34fb | 21 | |
9d831528 | 22 | #include "fsl-edma-common.h" |
d6be34fb | 23 | |
ba1cab79 AS |
24 | static void fsl_edma_synchronize(struct dma_chan *chan) |
25 | { | |
26 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | |
27 | ||
28 | vchan_synchronize(&fsl_chan->vchan); | |
29 | } | |
30 | ||
d6be34fb JL |
31 | static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id) |
32 | { | |
33 | struct fsl_edma_engine *fsl_edma = dev_id; | |
34 | unsigned int intr, ch; | |
377eaf3b | 35 | struct edma_regs *regs = &fsl_edma->regs; |
d6be34fb JL |
36 | struct fsl_edma_chan *fsl_chan; |
37 | ||
377eaf3b | 38 | intr = edma_readl(fsl_edma, regs->intl); |
d6be34fb JL |
39 | if (!intr) |
40 | return IRQ_NONE; | |
41 | ||
42 | for (ch = 0; ch < fsl_edma->n_chans; ch++) { | |
43 | if (intr & (0x1 << ch)) { | |
377eaf3b | 44 | edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint); |
d6be34fb JL |
45 | |
46 | fsl_chan = &fsl_edma->chans[ch]; | |
47 | ||
48 | spin_lock(&fsl_chan->vchan.lock); | |
f5e5677c KK |
49 | |
50 | if (!fsl_chan->edesc) { | |
51 | /* terminate_all called before */ | |
52 | spin_unlock(&fsl_chan->vchan.lock); | |
53 | continue; | |
54 | } | |
55 | ||
d6be34fb JL |
56 | if (!fsl_chan->edesc->iscyclic) { |
57 | list_del(&fsl_chan->edesc->vdesc.node); | |
58 | vchan_cookie_complete(&fsl_chan->edesc->vdesc); | |
59 | fsl_chan->edesc = NULL; | |
60 | fsl_chan->status = DMA_COMPLETE; | |
82d149b8 | 61 | fsl_chan->idle = true; |
d6be34fb JL |
62 | } else { |
63 | vchan_cyclic_callback(&fsl_chan->edesc->vdesc); | |
64 | } | |
65 | ||
66 | if (!fsl_chan->edesc) | |
67 | fsl_edma_xfer_desc(fsl_chan); | |
68 | ||
69 | spin_unlock(&fsl_chan->vchan.lock); | |
70 | } | |
71 | } | |
72 | return IRQ_HANDLED; | |
73 | } | |
74 | ||
75 | static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id) | |
76 | { | |
77 | struct fsl_edma_engine *fsl_edma = dev_id; | |
78 | unsigned int err, ch; | |
377eaf3b | 79 | struct edma_regs *regs = &fsl_edma->regs; |
d6be34fb | 80 | |
377eaf3b | 81 | err = edma_readl(fsl_edma, regs->errl); |
d6be34fb JL |
82 | if (!err) |
83 | return IRQ_NONE; | |
84 | ||
85 | for (ch = 0; ch < fsl_edma->n_chans; ch++) { | |
86 | if (err & (0x1 << ch)) { | |
87 | fsl_edma_disable_request(&fsl_edma->chans[ch]); | |
377eaf3b | 88 | edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr); |
d6be34fb | 89 | fsl_edma->chans[ch].status = DMA_ERROR; |
82d149b8 | 90 | fsl_edma->chans[ch].idle = true; |
d6be34fb JL |
91 | } |
92 | } | |
93 | return IRQ_HANDLED; | |
94 | } | |
95 | ||
96 | static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id) | |
97 | { | |
98 | if (fsl_edma_tx_handler(irq, dev_id) == IRQ_HANDLED) | |
99 | return IRQ_HANDLED; | |
100 | ||
101 | return fsl_edma_err_handler(irq, dev_id); | |
102 | } | |
103 | ||
d6be34fb JL |
104 | static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, |
105 | struct of_dma *ofdma) | |
106 | { | |
107 | struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data; | |
178c81e5 | 108 | struct dma_chan *chan, *_chan; |
82d149b8 | 109 | struct fsl_edma_chan *fsl_chan; |
af802728 RG |
110 | u32 dmamux_nr = fsl_edma->drvdata->dmamuxs; |
111 | unsigned long chans_per_mux = fsl_edma->n_chans / dmamux_nr; | |
d6be34fb JL |
112 | |
113 | if (dma_spec->args_count != 2) | |
114 | return NULL; | |
115 | ||
116 | mutex_lock(&fsl_edma->fsl_edma_mutex); | |
178c81e5 | 117 | list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) { |
d6be34fb JL |
118 | if (chan->client_count) |
119 | continue; | |
211bfef7 | 120 | if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) { |
d6be34fb JL |
121 | chan = dma_get_slave_channel(chan); |
122 | if (chan) { | |
123 | chan->device->privatecnt++; | |
82d149b8 YY |
124 | fsl_chan = to_fsl_edma_chan(chan); |
125 | fsl_chan->slave_id = dma_spec->args[1]; | |
126 | fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, | |
127 | true); | |
d6be34fb JL |
128 | mutex_unlock(&fsl_edma->fsl_edma_mutex); |
129 | return chan; | |
130 | } | |
131 | } | |
132 | } | |
133 | mutex_unlock(&fsl_edma->fsl_edma_mutex); | |
134 | return NULL; | |
135 | } | |
136 | ||
d6be34fb JL |
137 | static int |
138 | fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) | |
139 | { | |
140 | int ret; | |
141 | ||
142 | fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx"); | |
e17be6e1 | 143 | if (fsl_edma->txirq < 0) |
d6be34fb | 144 | return fsl_edma->txirq; |
d6be34fb JL |
145 | |
146 | fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err"); | |
e17be6e1 | 147 | if (fsl_edma->errirq < 0) |
d6be34fb | 148 | return fsl_edma->errirq; |
d6be34fb JL |
149 | |
150 | if (fsl_edma->txirq == fsl_edma->errirq) { | |
151 | ret = devm_request_irq(&pdev->dev, fsl_edma->txirq, | |
152 | fsl_edma_irq_handler, 0, "eDMA", fsl_edma); | |
153 | if (ret) { | |
154 | dev_err(&pdev->dev, "Can't register eDMA IRQ.\n"); | |
e095189a | 155 | return ret; |
d6be34fb JL |
156 | } |
157 | } else { | |
158 | ret = devm_request_irq(&pdev->dev, fsl_edma->txirq, | |
159 | fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma); | |
160 | if (ret) { | |
161 | dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n"); | |
e095189a | 162 | return ret; |
d6be34fb JL |
163 | } |
164 | ||
165 | ret = devm_request_irq(&pdev->dev, fsl_edma->errirq, | |
166 | fsl_edma_err_handler, 0, "eDMA err", fsl_edma); | |
167 | if (ret) { | |
168 | dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n"); | |
e095189a | 169 | return ret; |
d6be34fb JL |
170 | } |
171 | } | |
172 | ||
173 | return 0; | |
174 | } | |
175 | ||
232a7f18 RG |
176 | static int |
177 | fsl_edma2_irq_init(struct platform_device *pdev, | |
178 | struct fsl_edma_engine *fsl_edma) | |
179 | { | |
180 | int i, ret, irq; | |
181 | int count; | |
182 | ||
183 | count = platform_irq_count(pdev); | |
184 | dev_dbg(&pdev->dev, "%s Found %d interrupts\r\n", __func__, count); | |
185 | if (count <= 2) { | |
186 | dev_err(&pdev->dev, "Interrupts in DTS not correct.\n"); | |
187 | return -EINVAL; | |
188 | } | |
189 | /* | |
190 | * 16 channel independent interrupts + 1 error interrupt on i.mx7ulp. | |
191 | * 2 channel share one interrupt, for example, ch0/ch16, ch1/ch17... | |
192 | * For now, just simply request irq without IRQF_SHARED flag, since 16 | |
193 | * channels are enough on i.mx7ulp whose M4 domain own some peripherals. | |
194 | */ | |
195 | for (i = 0; i < count; i++) { | |
196 | irq = platform_get_irq(pdev, i); | |
197 | if (irq < 0) | |
198 | return -ENXIO; | |
199 | ||
200 | sprintf(fsl_edma->chans[i].chan_name, "eDMA2-CH%02d", i); | |
201 | ||
202 | /* The last IRQ is for eDMA err */ | |
203 | if (i == count - 1) | |
204 | ret = devm_request_irq(&pdev->dev, irq, | |
205 | fsl_edma_err_handler, | |
206 | 0, "eDMA2-ERR", fsl_edma); | |
207 | else | |
208 | ret = devm_request_irq(&pdev->dev, irq, | |
209 | fsl_edma_tx_handler, 0, | |
210 | fsl_edma->chans[i].chan_name, | |
211 | fsl_edma); | |
212 | if (ret) | |
213 | return ret; | |
214 | } | |
215 | ||
216 | return 0; | |
217 | } | |
218 | ||
476c7c80 VK |
219 | static void fsl_edma_irq_exit( |
220 | struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) | |
221 | { | |
222 | if (fsl_edma->txirq == fsl_edma->errirq) { | |
223 | devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma); | |
224 | } else { | |
225 | devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma); | |
226 | devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma); | |
227 | } | |
228 | } | |
229 | ||
2610acf4 | 230 | static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks) |
5e2fe1e7 PG |
231 | { |
232 | int i; | |
233 | ||
2610acf4 | 234 | for (i = 0; i < nr_clocks; i++) |
5e2fe1e7 PG |
235 | clk_disable_unprepare(fsl_edma->muxclk[i]); |
236 | } | |
237 | ||
af802728 RG |
238 | static struct fsl_edma_drvdata vf610_data = { |
239 | .version = v1, | |
240 | .dmamuxs = DMAMUX_NR, | |
241 | .setup_irq = fsl_edma_irq_init, | |
242 | }; | |
243 | ||
ed5a0ab4 PM |
244 | static struct fsl_edma_drvdata ls1028a_data = { |
245 | .version = v1, | |
246 | .dmamuxs = DMAMUX_NR, | |
247 | .mux_swap = true, | |
248 | .setup_irq = fsl_edma_irq_init, | |
249 | }; | |
250 | ||
232a7f18 RG |
251 | static struct fsl_edma_drvdata imx7ulp_data = { |
252 | .version = v3, | |
253 | .dmamuxs = 1, | |
254 | .has_dmaclk = true, | |
255 | .setup_irq = fsl_edma2_irq_init, | |
256 | }; | |
257 | ||
af802728 RG |
258 | static const struct of_device_id fsl_edma_dt_ids[] = { |
259 | { .compatible = "fsl,vf610-edma", .data = &vf610_data}, | |
ed5a0ab4 | 260 | { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data}, |
232a7f18 | 261 | { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data}, |
af802728 RG |
262 | { /* sentinel */ } |
263 | }; | |
264 | MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids); | |
265 | ||
d6be34fb JL |
266 | static int fsl_edma_probe(struct platform_device *pdev) |
267 | { | |
af802728 RG |
268 | const struct of_device_id *of_id = |
269 | of_match_device(fsl_edma_dt_ids, &pdev->dev); | |
d6be34fb JL |
270 | struct device_node *np = pdev->dev.of_node; |
271 | struct fsl_edma_engine *fsl_edma; | |
af802728 | 272 | const struct fsl_edma_drvdata *drvdata = NULL; |
d6be34fb | 273 | struct fsl_edma_chan *fsl_chan; |
377eaf3b | 274 | struct edma_regs *regs; |
d6be34fb JL |
275 | int len, chans; |
276 | int ret, i; | |
277 | ||
af802728 RG |
278 | if (of_id) |
279 | drvdata = of_id->data; | |
280 | if (!drvdata) { | |
281 | dev_err(&pdev->dev, "unable to find driver data\n"); | |
282 | return -EINVAL; | |
283 | } | |
284 | ||
d6be34fb JL |
285 | ret = of_property_read_u32(np, "dma-channels", &chans); |
286 | if (ret) { | |
287 | dev_err(&pdev->dev, "Can't get dma-channels.\n"); | |
288 | return ret; | |
289 | } | |
290 | ||
291 | len = sizeof(*fsl_edma) + sizeof(*fsl_chan) * chans; | |
292 | fsl_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); | |
293 | if (!fsl_edma) | |
294 | return -ENOMEM; | |
295 | ||
af802728 | 296 | fsl_edma->drvdata = drvdata; |
d6be34fb JL |
297 | fsl_edma->n_chans = chans; |
298 | mutex_init(&fsl_edma->fsl_edma_mutex); | |
299 | ||
4b23603a | 300 | fsl_edma->membase = devm_platform_ioremap_resource(pdev, 0); |
d6be34fb JL |
301 | if (IS_ERR(fsl_edma->membase)) |
302 | return PTR_ERR(fsl_edma->membase); | |
303 | ||
377eaf3b AD |
304 | fsl_edma_setup_regs(fsl_edma); |
305 | regs = &fsl_edma->regs; | |
306 | ||
232a7f18 RG |
307 | if (drvdata->has_dmaclk) { |
308 | fsl_edma->dmaclk = devm_clk_get(&pdev->dev, "dma"); | |
309 | if (IS_ERR(fsl_edma->dmaclk)) { | |
310 | dev_err(&pdev->dev, "Missing DMA block clock.\n"); | |
311 | return PTR_ERR(fsl_edma->dmaclk); | |
312 | } | |
313 | ||
314 | ret = clk_prepare_enable(fsl_edma->dmaclk); | |
315 | if (ret) { | |
316 | dev_err(&pdev->dev, "DMA clk block failed.\n"); | |
317 | return ret; | |
318 | } | |
319 | } | |
320 | ||
af802728 | 321 | for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) { |
d6be34fb JL |
322 | char clkname[32]; |
323 | ||
4b23603a TA |
324 | fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev, |
325 | 1 + i); | |
2610acf4 AP |
326 | if (IS_ERR(fsl_edma->muxbase[i])) { |
327 | /* on error: disable all previously enabled clks */ | |
328 | fsl_disable_clocks(fsl_edma, i); | |
d6be34fb | 329 | return PTR_ERR(fsl_edma->muxbase[i]); |
2610acf4 | 330 | } |
d6be34fb JL |
331 | |
332 | sprintf(clkname, "dmamux%d", i); | |
333 | fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname); | |
334 | if (IS_ERR(fsl_edma->muxclk[i])) { | |
335 | dev_err(&pdev->dev, "Missing DMAMUX block clock.\n"); | |
2610acf4 AP |
336 | /* on error: disable all previously enabled clks */ |
337 | fsl_disable_clocks(fsl_edma, i); | |
d6be34fb JL |
338 | return PTR_ERR(fsl_edma->muxclk[i]); |
339 | } | |
340 | ||
341 | ret = clk_prepare_enable(fsl_edma->muxclk[i]); | |
2610acf4 AP |
342 | if (ret) |
343 | /* on error: disable all previously enabled clks */ | |
344 | fsl_disable_clocks(fsl_edma, i); | |
d6be34fb JL |
345 | |
346 | } | |
347 | ||
d6be34fb JL |
348 | fsl_edma->big_endian = of_property_read_bool(np, "big-endian"); |
349 | ||
350 | INIT_LIST_HEAD(&fsl_edma->dma_dev.channels); | |
351 | for (i = 0; i < fsl_edma->n_chans; i++) { | |
352 | struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i]; | |
353 | ||
354 | fsl_chan->edma = fsl_edma; | |
82d149b8 YY |
355 | fsl_chan->pm_state = RUNNING; |
356 | fsl_chan->slave_id = 0; | |
357 | fsl_chan->idle = true; | |
0fa89f97 | 358 | fsl_chan->dma_dir = DMA_NONE; |
d6be34fb JL |
359 | fsl_chan->vchan.desc_free = fsl_edma_free_desc; |
360 | vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); | |
361 | ||
377eaf3b | 362 | edma_writew(fsl_edma, 0x0, ®s->tcd[i].csr); |
d6be34fb JL |
363 | fsl_edma_chan_mux(fsl_chan, 0, false); |
364 | } | |
365 | ||
377eaf3b | 366 | edma_writel(fsl_edma, ~0, regs->intl); |
af802728 | 367 | ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma); |
0fe25d61 SA |
368 | if (ret) |
369 | return ret; | |
370 | ||
d6be34fb JL |
371 | dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask); |
372 | dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask); | |
373 | dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask); | |
e0674853 | 374 | dma_cap_set(DMA_MEMCPY, fsl_edma->dma_dev.cap_mask); |
d6be34fb JL |
375 | |
376 | fsl_edma->dma_dev.dev = &pdev->dev; | |
377 | fsl_edma->dma_dev.device_alloc_chan_resources | |
378 | = fsl_edma_alloc_chan_resources; | |
379 | fsl_edma->dma_dev.device_free_chan_resources | |
380 | = fsl_edma_free_chan_resources; | |
381 | fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status; | |
382 | fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg; | |
383 | fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic; | |
e0674853 | 384 | fsl_edma->dma_dev.device_prep_dma_memcpy = fsl_edma_prep_memcpy; |
d80f381f MR |
385 | fsl_edma->dma_dev.device_config = fsl_edma_slave_config; |
386 | fsl_edma->dma_dev.device_pause = fsl_edma_pause; | |
387 | fsl_edma->dma_dev.device_resume = fsl_edma_resume; | |
388 | fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all; | |
ba1cab79 | 389 | fsl_edma->dma_dev.device_synchronize = fsl_edma_synchronize; |
d6be34fb | 390 | fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending; |
f45c4311 MR |
391 | |
392 | fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS; | |
393 | fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS; | |
394 | fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
d6be34fb | 395 | |
e0674853 JZ |
396 | fsl_edma->dma_dev.copy_align = DMAENGINE_ALIGN_32_BYTES; |
397 | /* Per worst case 'nbytes = 1' take CITER as the max_seg_size */ | |
398 | dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff); | |
399 | ||
d6be34fb JL |
400 | platform_set_drvdata(pdev, fsl_edma); |
401 | ||
402 | ret = dma_async_device_register(&fsl_edma->dma_dev); | |
403 | if (ret) { | |
a86144da PG |
404 | dev_err(&pdev->dev, |
405 | "Can't register Freescale eDMA engine. (%d)\n", ret); | |
af802728 | 406 | fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); |
d6be34fb JL |
407 | return ret; |
408 | } | |
409 | ||
410 | ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma); | |
411 | if (ret) { | |
a86144da PG |
412 | dev_err(&pdev->dev, |
413 | "Can't register Freescale eDMA of_dma. (%d)\n", ret); | |
d6be34fb | 414 | dma_async_device_unregister(&fsl_edma->dma_dev); |
af802728 | 415 | fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); |
d6be34fb JL |
416 | return ret; |
417 | } | |
418 | ||
419 | /* enable round robin arbitration */ | |
377eaf3b | 420 | edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr); |
d6be34fb JL |
421 | |
422 | return 0; | |
423 | } | |
424 | ||
425 | static int fsl_edma_remove(struct platform_device *pdev) | |
426 | { | |
427 | struct device_node *np = pdev->dev.of_node; | |
428 | struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev); | |
d6be34fb | 429 | |
476c7c80 | 430 | fsl_edma_irq_exit(pdev, fsl_edma); |
6f93b93b | 431 | fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); |
d6be34fb JL |
432 | of_dma_controller_free(np); |
433 | dma_async_device_unregister(&fsl_edma->dma_dev); | |
af802728 | 434 | fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); |
d6be34fb JL |
435 | |
436 | return 0; | |
437 | } | |
438 | ||
82d149b8 YY |
439 | static int fsl_edma_suspend_late(struct device *dev) |
440 | { | |
441 | struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev); | |
442 | struct fsl_edma_chan *fsl_chan; | |
443 | unsigned long flags; | |
444 | int i; | |
445 | ||
446 | for (i = 0; i < fsl_edma->n_chans; i++) { | |
447 | fsl_chan = &fsl_edma->chans[i]; | |
448 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | |
449 | /* Make sure chan is idle or will force disable. */ | |
450 | if (unlikely(!fsl_chan->idle)) { | |
451 | dev_warn(dev, "WARN: There is non-idle channel."); | |
452 | fsl_edma_disable_request(fsl_chan); | |
453 | fsl_edma_chan_mux(fsl_chan, 0, false); | |
454 | } | |
455 | ||
456 | fsl_chan->pm_state = SUSPENDED; | |
457 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | |
458 | } | |
459 | ||
460 | return 0; | |
461 | } | |
462 | ||
463 | static int fsl_edma_resume_early(struct device *dev) | |
464 | { | |
465 | struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev); | |
466 | struct fsl_edma_chan *fsl_chan; | |
377eaf3b | 467 | struct edma_regs *regs = &fsl_edma->regs; |
82d149b8 YY |
468 | int i; |
469 | ||
470 | for (i = 0; i < fsl_edma->n_chans; i++) { | |
471 | fsl_chan = &fsl_edma->chans[i]; | |
472 | fsl_chan->pm_state = RUNNING; | |
377eaf3b | 473 | edma_writew(fsl_edma, 0x0, ®s->tcd[i].csr); |
82d149b8 YY |
474 | if (fsl_chan->slave_id != 0) |
475 | fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true); | |
476 | } | |
477 | ||
377eaf3b | 478 | edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr); |
82d149b8 YY |
479 | |
480 | return 0; | |
481 | } | |
482 | ||
483 | /* | |
484 | * eDMA provides the service to others, so it should be suspend late | |
485 | * and resume early. When eDMA suspend, all of the clients should stop | |
486 | * the DMA data transmission and let the channel idle. | |
487 | */ | |
488 | static const struct dev_pm_ops fsl_edma_pm_ops = { | |
489 | .suspend_late = fsl_edma_suspend_late, | |
490 | .resume_early = fsl_edma_resume_early, | |
491 | }; | |
492 | ||
d6be34fb JL |
493 | static struct platform_driver fsl_edma_driver = { |
494 | .driver = { | |
495 | .name = "fsl-edma", | |
d6be34fb | 496 | .of_match_table = fsl_edma_dt_ids, |
82d149b8 | 497 | .pm = &fsl_edma_pm_ops, |
d6be34fb JL |
498 | }, |
499 | .probe = fsl_edma_probe, | |
500 | .remove = fsl_edma_remove, | |
501 | }; | |
502 | ||
8edc51c1 YY |
503 | static int __init fsl_edma_init(void) |
504 | { | |
505 | return platform_driver_register(&fsl_edma_driver); | |
506 | } | |
507 | subsys_initcall(fsl_edma_init); | |
508 | ||
509 | static void __exit fsl_edma_exit(void) | |
510 | { | |
511 | platform_driver_unregister(&fsl_edma_driver); | |
512 | } | |
513 | module_exit(fsl_edma_exit); | |
d6be34fb JL |
514 | |
515 | MODULE_ALIAS("platform:fsl-edma"); | |
516 | MODULE_DESCRIPTION("Freescale eDMA engine driver"); | |
517 | MODULE_LICENSE("GPL v2"); |