Commit | Line | Data |
---|---|---|
75a6faf6 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
5689ba7f AB |
2 | /* |
3 | * IMG Multi-threaded DMA Controller (MDC) | |
4 | * | |
5 | * Copyright (C) 2009,2012,2013 Imagination Technologies Ltd. | |
6 | * Copyright (C) 2014 Google, Inc. | |
5689ba7f AB |
7 | */ |
8 | ||
9 | #include <linux/clk.h> | |
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/dmaengine.h> | |
12 | #include <linux/dmapool.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/io.h> | |
15 | #include <linux/irq.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/mfd/syscon.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/of.h> | |
20 | #include <linux/of_device.h> | |
21 | #include <linux/of_dma.h> | |
22 | #include <linux/platform_device.h> | |
56d355e6 | 23 | #include <linux/pm_runtime.h> |
5689ba7f AB |
24 | #include <linux/regmap.h> |
25 | #include <linux/slab.h> | |
26 | #include <linux/spinlock.h> | |
27 | ||
28 | #include "dmaengine.h" | |
29 | #include "virt-dma.h" | |
30 | ||
31 | #define MDC_MAX_DMA_CHANNELS 32 | |
32 | ||
33 | #define MDC_GENERAL_CONFIG 0x000 | |
34 | #define MDC_GENERAL_CONFIG_LIST_IEN BIT(31) | |
35 | #define MDC_GENERAL_CONFIG_IEN BIT(29) | |
36 | #define MDC_GENERAL_CONFIG_LEVEL_INT BIT(28) | |
37 | #define MDC_GENERAL_CONFIG_INC_W BIT(12) | |
38 | #define MDC_GENERAL_CONFIG_INC_R BIT(8) | |
39 | #define MDC_GENERAL_CONFIG_PHYSICAL_W BIT(7) | |
40 | #define MDC_GENERAL_CONFIG_WIDTH_W_SHIFT 4 | |
41 | #define MDC_GENERAL_CONFIG_WIDTH_W_MASK 0x7 | |
42 | #define MDC_GENERAL_CONFIG_PHYSICAL_R BIT(3) | |
43 | #define MDC_GENERAL_CONFIG_WIDTH_R_SHIFT 0 | |
44 | #define MDC_GENERAL_CONFIG_WIDTH_R_MASK 0x7 | |
45 | ||
46 | #define MDC_READ_PORT_CONFIG 0x004 | |
47 | #define MDC_READ_PORT_CONFIG_STHREAD_SHIFT 28 | |
48 | #define MDC_READ_PORT_CONFIG_STHREAD_MASK 0xf | |
49 | #define MDC_READ_PORT_CONFIG_RTHREAD_SHIFT 24 | |
50 | #define MDC_READ_PORT_CONFIG_RTHREAD_MASK 0xf | |
51 | #define MDC_READ_PORT_CONFIG_WTHREAD_SHIFT 16 | |
52 | #define MDC_READ_PORT_CONFIG_WTHREAD_MASK 0xf | |
53 | #define MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT 4 | |
54 | #define MDC_READ_PORT_CONFIG_BURST_SIZE_MASK 0xff | |
55 | #define MDC_READ_PORT_CONFIG_DREQ_ENABLE BIT(1) | |
56 | ||
57 | #define MDC_READ_ADDRESS 0x008 | |
58 | ||
59 | #define MDC_WRITE_ADDRESS 0x00c | |
60 | ||
61 | #define MDC_TRANSFER_SIZE 0x010 | |
62 | #define MDC_TRANSFER_SIZE_MASK 0xffffff | |
63 | ||
64 | #define MDC_LIST_NODE_ADDRESS 0x014 | |
65 | ||
66 | #define MDC_CMDS_PROCESSED 0x018 | |
67 | #define MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT 16 | |
68 | #define MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK 0x3f | |
69 | #define MDC_CMDS_PROCESSED_INT_ACTIVE BIT(8) | |
70 | #define MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT 0 | |
71 | #define MDC_CMDS_PROCESSED_CMDS_DONE_MASK 0x3f | |
72 | ||
73 | #define MDC_CONTROL_AND_STATUS 0x01c | |
74 | #define MDC_CONTROL_AND_STATUS_CANCEL BIT(20) | |
75 | #define MDC_CONTROL_AND_STATUS_LIST_EN BIT(4) | |
76 | #define MDC_CONTROL_AND_STATUS_EN BIT(0) | |
77 | ||
78 | #define MDC_ACTIVE_TRANSFER_SIZE 0x030 | |
79 | ||
80 | #define MDC_GLOBAL_CONFIG_A 0x900 | |
81 | #define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT 16 | |
82 | #define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK 0xff | |
83 | #define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT 8 | |
84 | #define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK 0xff | |
85 | #define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT 0 | |
86 | #define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK 0xff | |
87 | ||
88 | struct mdc_hw_list_desc { | |
89 | u32 gen_conf; | |
90 | u32 readport_conf; | |
91 | u32 read_addr; | |
92 | u32 write_addr; | |
93 | u32 xfer_size; | |
94 | u32 node_addr; | |
95 | u32 cmds_done; | |
96 | u32 ctrl_status; | |
97 | /* | |
98 | * Not part of the list descriptor, but instead used by the CPU to | |
99 | * traverse the list. | |
100 | */ | |
101 | struct mdc_hw_list_desc *next_desc; | |
102 | }; | |
103 | ||
104 | struct mdc_tx_desc { | |
105 | struct mdc_chan *chan; | |
106 | struct virt_dma_desc vd; | |
107 | dma_addr_t list_phys; | |
108 | struct mdc_hw_list_desc *list; | |
109 | bool cyclic; | |
110 | bool cmd_loaded; | |
111 | unsigned int list_len; | |
112 | unsigned int list_period_len; | |
113 | size_t list_xfer_size; | |
114 | unsigned int list_cmds_done; | |
115 | }; | |
116 | ||
117 | struct mdc_chan { | |
118 | struct mdc_dma *mdma; | |
119 | struct virt_dma_chan vc; | |
120 | struct dma_slave_config config; | |
121 | struct mdc_tx_desc *desc; | |
122 | int irq; | |
123 | unsigned int periph; | |
124 | unsigned int thread; | |
125 | unsigned int chan_nr; | |
126 | }; | |
127 | ||
128 | struct mdc_dma_soc_data { | |
129 | void (*enable_chan)(struct mdc_chan *mchan); | |
130 | void (*disable_chan)(struct mdc_chan *mchan); | |
131 | }; | |
132 | ||
133 | struct mdc_dma { | |
134 | struct dma_device dma_dev; | |
135 | void __iomem *regs; | |
136 | struct clk *clk; | |
137 | struct dma_pool *desc_pool; | |
138 | struct regmap *periph_regs; | |
139 | spinlock_t lock; | |
140 | unsigned int nr_threads; | |
141 | unsigned int nr_channels; | |
142 | unsigned int bus_width; | |
143 | unsigned int max_burst_mult; | |
144 | unsigned int max_xfer_size; | |
145 | const struct mdc_dma_soc_data *soc; | |
146 | struct mdc_chan channels[MDC_MAX_DMA_CHANNELS]; | |
147 | }; | |
148 | ||
149 | static inline u32 mdc_readl(struct mdc_dma *mdma, u32 reg) | |
150 | { | |
151 | return readl(mdma->regs + reg); | |
152 | } | |
153 | ||
154 | static inline void mdc_writel(struct mdc_dma *mdma, u32 val, u32 reg) | |
155 | { | |
156 | writel(val, mdma->regs + reg); | |
157 | } | |
158 | ||
159 | static inline u32 mdc_chan_readl(struct mdc_chan *mchan, u32 reg) | |
160 | { | |
161 | return mdc_readl(mchan->mdma, mchan->chan_nr * 0x040 + reg); | |
162 | } | |
163 | ||
164 | static inline void mdc_chan_writel(struct mdc_chan *mchan, u32 val, u32 reg) | |
165 | { | |
166 | mdc_writel(mchan->mdma, val, mchan->chan_nr * 0x040 + reg); | |
167 | } | |
168 | ||
169 | static inline struct mdc_chan *to_mdc_chan(struct dma_chan *c) | |
170 | { | |
171 | return container_of(to_virt_chan(c), struct mdc_chan, vc); | |
172 | } | |
173 | ||
174 | static inline struct mdc_tx_desc *to_mdc_desc(struct dma_async_tx_descriptor *t) | |
175 | { | |
176 | struct virt_dma_desc *vdesc = container_of(t, struct virt_dma_desc, tx); | |
177 | ||
178 | return container_of(vdesc, struct mdc_tx_desc, vd); | |
179 | } | |
180 | ||
181 | static inline struct device *mdma2dev(struct mdc_dma *mdma) | |
182 | { | |
183 | return mdma->dma_dev.dev; | |
184 | } | |
185 | ||
186 | static inline unsigned int to_mdc_width(unsigned int bytes) | |
187 | { | |
188 | return ffs(bytes) - 1; | |
189 | } | |
190 | ||
191 | static inline void mdc_set_read_width(struct mdc_hw_list_desc *ldesc, | |
192 | unsigned int bytes) | |
193 | { | |
194 | ldesc->gen_conf |= to_mdc_width(bytes) << | |
195 | MDC_GENERAL_CONFIG_WIDTH_R_SHIFT; | |
196 | } | |
197 | ||
198 | static inline void mdc_set_write_width(struct mdc_hw_list_desc *ldesc, | |
199 | unsigned int bytes) | |
200 | { | |
201 | ldesc->gen_conf |= to_mdc_width(bytes) << | |
202 | MDC_GENERAL_CONFIG_WIDTH_W_SHIFT; | |
203 | } | |
204 | ||
205 | static void mdc_list_desc_config(struct mdc_chan *mchan, | |
206 | struct mdc_hw_list_desc *ldesc, | |
207 | enum dma_transfer_direction dir, | |
208 | dma_addr_t src, dma_addr_t dst, size_t len) | |
209 | { | |
210 | struct mdc_dma *mdma = mchan->mdma; | |
211 | unsigned int max_burst, burst_size; | |
212 | ||
213 | ldesc->gen_conf = MDC_GENERAL_CONFIG_IEN | MDC_GENERAL_CONFIG_LIST_IEN | | |
214 | MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W | | |
215 | MDC_GENERAL_CONFIG_PHYSICAL_R; | |
216 | ldesc->readport_conf = | |
217 | (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) | | |
218 | (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) | | |
219 | (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT); | |
220 | ldesc->read_addr = src; | |
221 | ldesc->write_addr = dst; | |
222 | ldesc->xfer_size = len - 1; | |
223 | ldesc->node_addr = 0; | |
224 | ldesc->cmds_done = 0; | |
225 | ldesc->ctrl_status = MDC_CONTROL_AND_STATUS_LIST_EN | | |
226 | MDC_CONTROL_AND_STATUS_EN; | |
227 | ldesc->next_desc = NULL; | |
228 | ||
229 | if (IS_ALIGNED(dst, mdma->bus_width) && | |
230 | IS_ALIGNED(src, mdma->bus_width)) | |
231 | max_burst = mdma->bus_width * mdma->max_burst_mult; | |
232 | else | |
233 | max_burst = mdma->bus_width * (mdma->max_burst_mult - 1); | |
234 | ||
235 | if (dir == DMA_MEM_TO_DEV) { | |
236 | ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R; | |
237 | ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE; | |
238 | mdc_set_read_width(ldesc, mdma->bus_width); | |
239 | mdc_set_write_width(ldesc, mchan->config.dst_addr_width); | |
240 | burst_size = min(max_burst, mchan->config.dst_maxburst * | |
241 | mchan->config.dst_addr_width); | |
242 | } else if (dir == DMA_DEV_TO_MEM) { | |
243 | ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_W; | |
244 | ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE; | |
245 | mdc_set_read_width(ldesc, mchan->config.src_addr_width); | |
246 | mdc_set_write_width(ldesc, mdma->bus_width); | |
247 | burst_size = min(max_burst, mchan->config.src_maxburst * | |
248 | mchan->config.src_addr_width); | |
249 | } else { | |
250 | ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R | | |
251 | MDC_GENERAL_CONFIG_INC_W; | |
252 | mdc_set_read_width(ldesc, mdma->bus_width); | |
253 | mdc_set_write_width(ldesc, mdma->bus_width); | |
254 | burst_size = max_burst; | |
255 | } | |
256 | ldesc->readport_conf |= (burst_size - 1) << | |
257 | MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT; | |
258 | } | |
259 | ||
260 | static void mdc_list_desc_free(struct mdc_tx_desc *mdesc) | |
261 | { | |
262 | struct mdc_dma *mdma = mdesc->chan->mdma; | |
263 | struct mdc_hw_list_desc *curr, *next; | |
264 | dma_addr_t curr_phys, next_phys; | |
265 | ||
266 | curr = mdesc->list; | |
267 | curr_phys = mdesc->list_phys; | |
268 | while (curr) { | |
269 | next = curr->next_desc; | |
270 | next_phys = curr->node_addr; | |
271 | dma_pool_free(mdma->desc_pool, curr, curr_phys); | |
272 | curr = next; | |
273 | curr_phys = next_phys; | |
274 | } | |
275 | } | |
276 | ||
277 | static void mdc_desc_free(struct virt_dma_desc *vd) | |
278 | { | |
279 | struct mdc_tx_desc *mdesc = to_mdc_desc(&vd->tx); | |
280 | ||
281 | mdc_list_desc_free(mdesc); | |
282 | kfree(mdesc); | |
283 | } | |
284 | ||
285 | static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy( | |
286 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, | |
287 | unsigned long flags) | |
288 | { | |
289 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
290 | struct mdc_dma *mdma = mchan->mdma; | |
291 | struct mdc_tx_desc *mdesc; | |
292 | struct mdc_hw_list_desc *curr, *prev = NULL; | |
e5a6b3d5 | 293 | dma_addr_t curr_phys; |
5689ba7f AB |
294 | |
295 | if (!len) | |
296 | return NULL; | |
297 | ||
298 | mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT); | |
299 | if (!mdesc) | |
300 | return NULL; | |
301 | mdesc->chan = mchan; | |
302 | mdesc->list_xfer_size = len; | |
303 | ||
304 | while (len > 0) { | |
305 | size_t xfer_size; | |
306 | ||
307 | curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys); | |
308 | if (!curr) | |
309 | goto free_desc; | |
310 | ||
311 | if (prev) { | |
312 | prev->node_addr = curr_phys; | |
313 | prev->next_desc = curr; | |
314 | } else { | |
315 | mdesc->list_phys = curr_phys; | |
316 | mdesc->list = curr; | |
317 | } | |
318 | ||
319 | xfer_size = min_t(size_t, mdma->max_xfer_size, len); | |
320 | ||
321 | mdc_list_desc_config(mchan, curr, DMA_MEM_TO_MEM, src, dest, | |
322 | xfer_size); | |
323 | ||
324 | prev = curr; | |
5689ba7f AB |
325 | |
326 | mdesc->list_len++; | |
327 | src += xfer_size; | |
328 | dest += xfer_size; | |
329 | len -= xfer_size; | |
330 | } | |
331 | ||
332 | return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags); | |
333 | ||
334 | free_desc: | |
335 | mdc_desc_free(&mdesc->vd); | |
336 | ||
337 | return NULL; | |
338 | } | |
339 | ||
340 | static int mdc_check_slave_width(struct mdc_chan *mchan, | |
341 | enum dma_transfer_direction dir) | |
342 | { | |
343 | enum dma_slave_buswidth width; | |
344 | ||
345 | if (dir == DMA_MEM_TO_DEV) | |
346 | width = mchan->config.dst_addr_width; | |
347 | else | |
348 | width = mchan->config.src_addr_width; | |
349 | ||
350 | switch (width) { | |
351 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
352 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
353 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
354 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | |
355 | break; | |
356 | default: | |
357 | return -EINVAL; | |
358 | } | |
359 | ||
360 | if (width > mchan->mdma->bus_width) | |
361 | return -EINVAL; | |
362 | ||
363 | return 0; | |
364 | } | |
365 | ||
366 | static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic( | |
367 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |
368 | size_t period_len, enum dma_transfer_direction dir, | |
369 | unsigned long flags) | |
370 | { | |
371 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
372 | struct mdc_dma *mdma = mchan->mdma; | |
373 | struct mdc_tx_desc *mdesc; | |
374 | struct mdc_hw_list_desc *curr, *prev = NULL; | |
e5a6b3d5 | 375 | dma_addr_t curr_phys; |
5689ba7f AB |
376 | |
377 | if (!buf_len && !period_len) | |
378 | return NULL; | |
379 | ||
380 | if (!is_slave_direction(dir)) | |
381 | return NULL; | |
382 | ||
383 | if (mdc_check_slave_width(mchan, dir) < 0) | |
384 | return NULL; | |
385 | ||
386 | mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT); | |
387 | if (!mdesc) | |
388 | return NULL; | |
389 | mdesc->chan = mchan; | |
390 | mdesc->cyclic = true; | |
391 | mdesc->list_xfer_size = buf_len; | |
392 | mdesc->list_period_len = DIV_ROUND_UP(period_len, | |
393 | mdma->max_xfer_size); | |
394 | ||
395 | while (buf_len > 0) { | |
396 | size_t remainder = min(period_len, buf_len); | |
397 | ||
398 | while (remainder > 0) { | |
399 | size_t xfer_size; | |
400 | ||
401 | curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, | |
402 | &curr_phys); | |
403 | if (!curr) | |
404 | goto free_desc; | |
405 | ||
406 | if (!prev) { | |
407 | mdesc->list_phys = curr_phys; | |
408 | mdesc->list = curr; | |
409 | } else { | |
410 | prev->node_addr = curr_phys; | |
411 | prev->next_desc = curr; | |
412 | } | |
413 | ||
414 | xfer_size = min_t(size_t, mdma->max_xfer_size, | |
415 | remainder); | |
416 | ||
417 | if (dir == DMA_MEM_TO_DEV) { | |
418 | mdc_list_desc_config(mchan, curr, dir, | |
419 | buf_addr, | |
420 | mchan->config.dst_addr, | |
421 | xfer_size); | |
422 | } else { | |
423 | mdc_list_desc_config(mchan, curr, dir, | |
424 | mchan->config.src_addr, | |
425 | buf_addr, | |
426 | xfer_size); | |
427 | } | |
428 | ||
429 | prev = curr; | |
5689ba7f AB |
430 | |
431 | mdesc->list_len++; | |
432 | buf_addr += xfer_size; | |
433 | buf_len -= xfer_size; | |
434 | remainder -= xfer_size; | |
435 | } | |
436 | } | |
437 | prev->node_addr = mdesc->list_phys; | |
438 | ||
439 | return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags); | |
440 | ||
441 | free_desc: | |
442 | mdc_desc_free(&mdesc->vd); | |
443 | ||
444 | return NULL; | |
445 | } | |
446 | ||
447 | static struct dma_async_tx_descriptor *mdc_prep_slave_sg( | |
448 | struct dma_chan *chan, struct scatterlist *sgl, | |
449 | unsigned int sg_len, enum dma_transfer_direction dir, | |
450 | unsigned long flags, void *context) | |
451 | { | |
452 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
453 | struct mdc_dma *mdma = mchan->mdma; | |
454 | struct mdc_tx_desc *mdesc; | |
455 | struct scatterlist *sg; | |
456 | struct mdc_hw_list_desc *curr, *prev = NULL; | |
e5a6b3d5 | 457 | dma_addr_t curr_phys; |
5689ba7f AB |
458 | unsigned int i; |
459 | ||
460 | if (!sgl) | |
461 | return NULL; | |
462 | ||
463 | if (!is_slave_direction(dir)) | |
464 | return NULL; | |
465 | ||
466 | if (mdc_check_slave_width(mchan, dir) < 0) | |
467 | return NULL; | |
468 | ||
469 | mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT); | |
470 | if (!mdesc) | |
471 | return NULL; | |
472 | mdesc->chan = mchan; | |
473 | ||
474 | for_each_sg(sgl, sg, sg_len, i) { | |
475 | dma_addr_t buf = sg_dma_address(sg); | |
476 | size_t buf_len = sg_dma_len(sg); | |
477 | ||
478 | while (buf_len > 0) { | |
479 | size_t xfer_size; | |
480 | ||
481 | curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, | |
482 | &curr_phys); | |
483 | if (!curr) | |
484 | goto free_desc; | |
485 | ||
486 | if (!prev) { | |
487 | mdesc->list_phys = curr_phys; | |
488 | mdesc->list = curr; | |
489 | } else { | |
490 | prev->node_addr = curr_phys; | |
491 | prev->next_desc = curr; | |
492 | } | |
493 | ||
494 | xfer_size = min_t(size_t, mdma->max_xfer_size, | |
495 | buf_len); | |
496 | ||
497 | if (dir == DMA_MEM_TO_DEV) { | |
498 | mdc_list_desc_config(mchan, curr, dir, buf, | |
499 | mchan->config.dst_addr, | |
500 | xfer_size); | |
501 | } else { | |
502 | mdc_list_desc_config(mchan, curr, dir, | |
503 | mchan->config.src_addr, | |
504 | buf, xfer_size); | |
505 | } | |
506 | ||
507 | prev = curr; | |
5689ba7f AB |
508 | |
509 | mdesc->list_len++; | |
510 | mdesc->list_xfer_size += xfer_size; | |
511 | buf += xfer_size; | |
512 | buf_len -= xfer_size; | |
513 | } | |
514 | } | |
515 | ||
516 | return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags); | |
517 | ||
518 | free_desc: | |
519 | mdc_desc_free(&mdesc->vd); | |
520 | ||
521 | return NULL; | |
522 | } | |
523 | ||
524 | static void mdc_issue_desc(struct mdc_chan *mchan) | |
525 | { | |
526 | struct mdc_dma *mdma = mchan->mdma; | |
527 | struct virt_dma_desc *vd; | |
528 | struct mdc_tx_desc *mdesc; | |
529 | u32 val; | |
530 | ||
531 | vd = vchan_next_desc(&mchan->vc); | |
532 | if (!vd) | |
533 | return; | |
534 | ||
535 | list_del(&vd->node); | |
536 | ||
537 | mdesc = to_mdc_desc(&vd->tx); | |
538 | mchan->desc = mdesc; | |
539 | ||
540 | dev_dbg(mdma2dev(mdma), "Issuing descriptor on channel %d\n", | |
541 | mchan->chan_nr); | |
542 | ||
543 | mdma->soc->enable_chan(mchan); | |
544 | ||
545 | val = mdc_chan_readl(mchan, MDC_GENERAL_CONFIG); | |
546 | val |= MDC_GENERAL_CONFIG_LIST_IEN | MDC_GENERAL_CONFIG_IEN | | |
547 | MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W | | |
548 | MDC_GENERAL_CONFIG_PHYSICAL_R; | |
549 | mdc_chan_writel(mchan, val, MDC_GENERAL_CONFIG); | |
550 | val = (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) | | |
551 | (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) | | |
552 | (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT); | |
553 | mdc_chan_writel(mchan, val, MDC_READ_PORT_CONFIG); | |
554 | mdc_chan_writel(mchan, mdesc->list_phys, MDC_LIST_NODE_ADDRESS); | |
555 | val = mdc_chan_readl(mchan, MDC_CONTROL_AND_STATUS); | |
556 | val |= MDC_CONTROL_AND_STATUS_LIST_EN; | |
557 | mdc_chan_writel(mchan, val, MDC_CONTROL_AND_STATUS); | |
558 | } | |
559 | ||
560 | static void mdc_issue_pending(struct dma_chan *chan) | |
561 | { | |
562 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
563 | unsigned long flags; | |
564 | ||
565 | spin_lock_irqsave(&mchan->vc.lock, flags); | |
566 | if (vchan_issue_pending(&mchan->vc) && !mchan->desc) | |
567 | mdc_issue_desc(mchan); | |
568 | spin_unlock_irqrestore(&mchan->vc.lock, flags); | |
569 | } | |
570 | ||
571 | static enum dma_status mdc_tx_status(struct dma_chan *chan, | |
572 | dma_cookie_t cookie, struct dma_tx_state *txstate) | |
573 | { | |
574 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
575 | struct mdc_tx_desc *mdesc; | |
576 | struct virt_dma_desc *vd; | |
577 | unsigned long flags; | |
578 | size_t bytes = 0; | |
579 | int ret; | |
580 | ||
581 | ret = dma_cookie_status(chan, cookie, txstate); | |
582 | if (ret == DMA_COMPLETE) | |
583 | return ret; | |
584 | ||
585 | if (!txstate) | |
586 | return ret; | |
587 | ||
588 | spin_lock_irqsave(&mchan->vc.lock, flags); | |
589 | vd = vchan_find_desc(&mchan->vc, cookie); | |
590 | if (vd) { | |
591 | mdesc = to_mdc_desc(&vd->tx); | |
592 | bytes = mdesc->list_xfer_size; | |
593 | } else if (mchan->desc && mchan->desc->vd.tx.cookie == cookie) { | |
594 | struct mdc_hw_list_desc *ldesc; | |
595 | u32 val1, val2, done, processed, residue; | |
596 | int i, cmds; | |
597 | ||
598 | mdesc = mchan->desc; | |
599 | ||
600 | /* | |
601 | * Determine the number of commands that haven't been | |
602 | * processed (handled by the IRQ handler) yet. | |
603 | */ | |
604 | do { | |
605 | val1 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) & | |
606 | ~MDC_CMDS_PROCESSED_INT_ACTIVE; | |
607 | residue = mdc_chan_readl(mchan, | |
608 | MDC_ACTIVE_TRANSFER_SIZE); | |
609 | val2 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) & | |
610 | ~MDC_CMDS_PROCESSED_INT_ACTIVE; | |
611 | } while (val1 != val2); | |
612 | ||
613 | done = (val1 >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & | |
614 | MDC_CMDS_PROCESSED_CMDS_DONE_MASK; | |
615 | processed = (val1 >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) & | |
616 | MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK; | |
617 | cmds = (done - processed) % | |
618 | (MDC_CMDS_PROCESSED_CMDS_DONE_MASK + 1); | |
619 | ||
620 | /* | |
621 | * If the command loaded event hasn't been processed yet, then | |
622 | * the difference above includes an extra command. | |
623 | */ | |
624 | if (!mdesc->cmd_loaded) | |
625 | cmds--; | |
626 | else | |
627 | cmds += mdesc->list_cmds_done; | |
628 | ||
629 | bytes = mdesc->list_xfer_size; | |
630 | ldesc = mdesc->list; | |
631 | for (i = 0; i < cmds; i++) { | |
632 | bytes -= ldesc->xfer_size + 1; | |
633 | ldesc = ldesc->next_desc; | |
634 | } | |
635 | if (ldesc) { | |
636 | if (residue != MDC_TRANSFER_SIZE_MASK) | |
637 | bytes -= ldesc->xfer_size - residue; | |
638 | else | |
639 | bytes -= ldesc->xfer_size + 1; | |
640 | } | |
641 | } | |
642 | spin_unlock_irqrestore(&mchan->vc.lock, flags); | |
643 | ||
644 | dma_set_residue(txstate, bytes); | |
645 | ||
646 | return ret; | |
647 | } | |
648 | ||
0c328de7 DH |
649 | static unsigned int mdc_get_new_events(struct mdc_chan *mchan) |
650 | { | |
651 | u32 val, processed, done1, done2; | |
652 | unsigned int ret; | |
653 | ||
654 | val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); | |
655 | processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) & | |
656 | MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK; | |
657 | /* | |
658 | * CMDS_DONE may have incremented between reading CMDS_PROCESSED | |
659 | * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we | |
660 | * didn't miss a command completion. | |
661 | */ | |
662 | do { | |
663 | val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); | |
664 | ||
665 | done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & | |
666 | MDC_CMDS_PROCESSED_CMDS_DONE_MASK; | |
667 | ||
668 | val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK << | |
669 | MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) | | |
670 | MDC_CMDS_PROCESSED_INT_ACTIVE); | |
671 | ||
672 | val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT; | |
673 | ||
674 | mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED); | |
675 | ||
676 | val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); | |
677 | ||
678 | done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & | |
679 | MDC_CMDS_PROCESSED_CMDS_DONE_MASK; | |
680 | } while (done1 != done2); | |
681 | ||
682 | if (done1 >= processed) | |
683 | ret = done1 - processed; | |
684 | else | |
685 | ret = ((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1) - | |
686 | processed) + done1; | |
687 | ||
688 | return ret; | |
689 | } | |
690 | ||
5689ba7f AB |
691 | static int mdc_terminate_all(struct dma_chan *chan) |
692 | { | |
693 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
5689ba7f AB |
694 | unsigned long flags; |
695 | LIST_HEAD(head); | |
696 | ||
697 | spin_lock_irqsave(&mchan->vc.lock, flags); | |
698 | ||
699 | mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL, | |
700 | MDC_CONTROL_AND_STATUS); | |
701 | ||
397c59bc PU |
702 | if (mchan->desc) { |
703 | vchan_terminate_vdesc(&mchan->desc->vd); | |
704 | mchan->desc = NULL; | |
705 | } | |
5689ba7f AB |
706 | vchan_get_all_descriptors(&mchan->vc, &head); |
707 | ||
0c328de7 DH |
708 | mdc_get_new_events(mchan); |
709 | ||
5689ba7f AB |
710 | spin_unlock_irqrestore(&mchan->vc.lock, flags); |
711 | ||
5689ba7f AB |
712 | vchan_dma_desc_free_list(&mchan->vc, &head); |
713 | ||
714 | return 0; | |
715 | } | |
716 | ||
397c59bc PU |
717 | static void mdc_synchronize(struct dma_chan *chan) |
718 | { | |
719 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
720 | ||
721 | vchan_synchronize(&mchan->vc); | |
722 | } | |
723 | ||
5689ba7f AB |
724 | static int mdc_slave_config(struct dma_chan *chan, |
725 | struct dma_slave_config *config) | |
726 | { | |
727 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
728 | unsigned long flags; | |
729 | ||
730 | spin_lock_irqsave(&mchan->vc.lock, flags); | |
731 | mchan->config = *config; | |
732 | spin_unlock_irqrestore(&mchan->vc.lock, flags); | |
733 | ||
734 | return 0; | |
735 | } | |
736 | ||
56d355e6 EB |
737 | static int mdc_alloc_chan_resources(struct dma_chan *chan) |
738 | { | |
739 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
740 | struct device *dev = mdma2dev(mchan->mdma); | |
741 | ||
742 | return pm_runtime_get_sync(dev); | |
743 | } | |
744 | ||
5689ba7f AB |
745 | static void mdc_free_chan_resources(struct dma_chan *chan) |
746 | { | |
747 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
748 | struct mdc_dma *mdma = mchan->mdma; | |
56d355e6 | 749 | struct device *dev = mdma2dev(mdma); |
5689ba7f AB |
750 | |
751 | mdc_terminate_all(chan); | |
5689ba7f | 752 | mdma->soc->disable_chan(mchan); |
56d355e6 | 753 | pm_runtime_put(dev); |
5689ba7f AB |
754 | } |
755 | ||
756 | static irqreturn_t mdc_chan_irq(int irq, void *dev_id) | |
757 | { | |
758 | struct mdc_chan *mchan = (struct mdc_chan *)dev_id; | |
759 | struct mdc_tx_desc *mdesc; | |
0c328de7 | 760 | unsigned int i, new_events; |
5689ba7f AB |
761 | |
762 | spin_lock(&mchan->vc.lock); | |
763 | ||
5689ba7f AB |
764 | dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr); |
765 | ||
0c328de7 DH |
766 | new_events = mdc_get_new_events(mchan); |
767 | ||
768 | if (!new_events) | |
769 | goto out; | |
770 | ||
5689ba7f AB |
771 | mdesc = mchan->desc; |
772 | if (!mdesc) { | |
773 | dev_warn(mdma2dev(mchan->mdma), | |
774 | "IRQ with no active descriptor on channel %d\n", | |
775 | mchan->chan_nr); | |
776 | goto out; | |
777 | } | |
778 | ||
0c328de7 | 779 | for (i = 0; i < new_events; i++) { |
5689ba7f AB |
780 | /* |
781 | * The first interrupt in a transfer indicates that the | |
782 | * command list has been loaded, not that a command has | |
783 | * been completed. | |
784 | */ | |
785 | if (!mdesc->cmd_loaded) { | |
786 | mdesc->cmd_loaded = true; | |
787 | continue; | |
788 | } | |
789 | ||
790 | mdesc->list_cmds_done++; | |
791 | if (mdesc->cyclic) { | |
792 | mdesc->list_cmds_done %= mdesc->list_len; | |
793 | if (mdesc->list_cmds_done % mdesc->list_period_len == 0) | |
794 | vchan_cyclic_callback(&mdesc->vd); | |
795 | } else if (mdesc->list_cmds_done == mdesc->list_len) { | |
796 | mchan->desc = NULL; | |
797 | vchan_cookie_complete(&mdesc->vd); | |
798 | mdc_issue_desc(mchan); | |
799 | break; | |
800 | } | |
801 | } | |
802 | out: | |
803 | spin_unlock(&mchan->vc.lock); | |
804 | ||
805 | return IRQ_HANDLED; | |
806 | } | |
807 | ||
808 | static struct dma_chan *mdc_of_xlate(struct of_phandle_args *dma_spec, | |
809 | struct of_dma *ofdma) | |
810 | { | |
811 | struct mdc_dma *mdma = ofdma->of_dma_data; | |
812 | struct dma_chan *chan; | |
813 | ||
814 | if (dma_spec->args_count != 3) | |
815 | return NULL; | |
816 | ||
817 | list_for_each_entry(chan, &mdma->dma_dev.channels, device_node) { | |
818 | struct mdc_chan *mchan = to_mdc_chan(chan); | |
819 | ||
820 | if (!(dma_spec->args[1] & BIT(mchan->chan_nr))) | |
821 | continue; | |
822 | if (dma_get_slave_channel(chan)) { | |
823 | mchan->periph = dma_spec->args[0]; | |
824 | mchan->thread = dma_spec->args[2]; | |
825 | return chan; | |
826 | } | |
827 | } | |
828 | ||
829 | return NULL; | |
830 | } | |
831 | ||
832 | #define PISTACHIO_CR_PERIPH_DMA_ROUTE(ch) (0x120 + 0x4 * ((ch) / 4)) | |
833 | #define PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(ch) (8 * ((ch) % 4)) | |
834 | #define PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK 0x3f | |
835 | ||
836 | static void pistachio_mdc_enable_chan(struct mdc_chan *mchan) | |
837 | { | |
838 | struct mdc_dma *mdma = mchan->mdma; | |
839 | ||
840 | regmap_update_bits(mdma->periph_regs, | |
841 | PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr), | |
842 | PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK << | |
843 | PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr), | |
844 | mchan->periph << | |
845 | PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr)); | |
846 | } | |
847 | ||
848 | static void pistachio_mdc_disable_chan(struct mdc_chan *mchan) | |
849 | { | |
850 | struct mdc_dma *mdma = mchan->mdma; | |
851 | ||
852 | regmap_update_bits(mdma->periph_regs, | |
853 | PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr), | |
854 | PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK << | |
855 | PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr), | |
856 | 0); | |
857 | } | |
858 | ||
859 | static const struct mdc_dma_soc_data pistachio_mdc_data = { | |
860 | .enable_chan = pistachio_mdc_enable_chan, | |
861 | .disable_chan = pistachio_mdc_disable_chan, | |
862 | }; | |
863 | ||
864 | static const struct of_device_id mdc_dma_of_match[] = { | |
865 | { .compatible = "img,pistachio-mdc-dma", .data = &pistachio_mdc_data, }, | |
866 | { }, | |
867 | }; | |
868 | MODULE_DEVICE_TABLE(of, mdc_dma_of_match); | |
869 | ||
56d355e6 EB |
870 | static int img_mdc_runtime_suspend(struct device *dev) |
871 | { | |
872 | struct mdc_dma *mdma = dev_get_drvdata(dev); | |
873 | ||
874 | clk_disable_unprepare(mdma->clk); | |
875 | ||
876 | return 0; | |
877 | } | |
878 | ||
879 | static int img_mdc_runtime_resume(struct device *dev) | |
880 | { | |
881 | struct mdc_dma *mdma = dev_get_drvdata(dev); | |
882 | ||
883 | return clk_prepare_enable(mdma->clk); | |
884 | } | |
885 | ||
5689ba7f AB |
886 | static int mdc_dma_probe(struct platform_device *pdev) |
887 | { | |
888 | struct mdc_dma *mdma; | |
5689ba7f AB |
889 | unsigned int i; |
890 | u32 val; | |
891 | int ret; | |
892 | ||
893 | mdma = devm_kzalloc(&pdev->dev, sizeof(*mdma), GFP_KERNEL); | |
894 | if (!mdma) | |
895 | return -ENOMEM; | |
896 | platform_set_drvdata(pdev, mdma); | |
897 | ||
32e80820 | 898 | mdma->soc = of_device_get_match_data(&pdev->dev); |
5689ba7f | 899 | |
4b23603a | 900 | mdma->regs = devm_platform_ioremap_resource(pdev, 0); |
5689ba7f AB |
901 | if (IS_ERR(mdma->regs)) |
902 | return PTR_ERR(mdma->regs); | |
903 | ||
904 | mdma->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, | |
905 | "img,cr-periph"); | |
906 | if (IS_ERR(mdma->periph_regs)) | |
907 | return PTR_ERR(mdma->periph_regs); | |
908 | ||
909 | mdma->clk = devm_clk_get(&pdev->dev, "sys"); | |
910 | if (IS_ERR(mdma->clk)) | |
911 | return PTR_ERR(mdma->clk); | |
912 | ||
5689ba7f AB |
913 | dma_cap_zero(mdma->dma_dev.cap_mask); |
914 | dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask); | |
915 | dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask); | |
916 | dma_cap_set(DMA_CYCLIC, mdma->dma_dev.cap_mask); | |
917 | dma_cap_set(DMA_MEMCPY, mdma->dma_dev.cap_mask); | |
918 | ||
919 | val = mdc_readl(mdma, MDC_GLOBAL_CONFIG_A); | |
920 | mdma->nr_channels = (val >> MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT) & | |
921 | MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK; | |
922 | mdma->nr_threads = | |
923 | 1 << ((val >> MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT) & | |
924 | MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK); | |
925 | mdma->bus_width = | |
926 | (1 << ((val >> MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT) & | |
927 | MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK)) / 8; | |
928 | /* | |
929 | * Although transfer sizes of up to MDC_TRANSFER_SIZE_MASK + 1 bytes | |
930 | * are supported, this makes it possible for the value reported in | |
931 | * MDC_ACTIVE_TRANSFER_SIZE to be ambiguous - an active transfer size | |
932 | * of MDC_TRANSFER_SIZE_MASK may indicate either that 0 bytes or | |
933 | * MDC_TRANSFER_SIZE_MASK + 1 bytes are remaining. To eliminate this | |
934 | * ambiguity, restrict transfer sizes to one bus-width less than the | |
935 | * actual maximum. | |
936 | */ | |
937 | mdma->max_xfer_size = MDC_TRANSFER_SIZE_MASK + 1 - mdma->bus_width; | |
938 | ||
939 | of_property_read_u32(pdev->dev.of_node, "dma-channels", | |
940 | &mdma->nr_channels); | |
941 | ret = of_property_read_u32(pdev->dev.of_node, | |
942 | "img,max-burst-multiplier", | |
943 | &mdma->max_burst_mult); | |
944 | if (ret) | |
56d355e6 | 945 | return ret; |
5689ba7f AB |
946 | |
947 | mdma->dma_dev.dev = &pdev->dev; | |
948 | mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg; | |
949 | mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic; | |
950 | mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy; | |
56d355e6 | 951 | mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources; |
5689ba7f AB |
952 | mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources; |
953 | mdma->dma_dev.device_tx_status = mdc_tx_status; | |
954 | mdma->dma_dev.device_issue_pending = mdc_issue_pending; | |
955 | mdma->dma_dev.device_terminate_all = mdc_terminate_all; | |
397c59bc | 956 | mdma->dma_dev.device_synchronize = mdc_synchronize; |
5689ba7f AB |
957 | mdma->dma_dev.device_config = mdc_slave_config; |
958 | ||
959 | mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
960 | mdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
961 | for (i = 1; i <= mdma->bus_width; i <<= 1) { | |
962 | mdma->dma_dev.src_addr_widths |= BIT(i); | |
963 | mdma->dma_dev.dst_addr_widths |= BIT(i); | |
964 | } | |
965 | ||
966 | INIT_LIST_HEAD(&mdma->dma_dev.channels); | |
967 | for (i = 0; i < mdma->nr_channels; i++) { | |
968 | struct mdc_chan *mchan = &mdma->channels[i]; | |
969 | ||
970 | mchan->mdma = mdma; | |
971 | mchan->chan_nr = i; | |
972 | mchan->irq = platform_get_irq(pdev, i); | |
56d355e6 EB |
973 | if (mchan->irq < 0) |
974 | return mchan->irq; | |
975 | ||
5689ba7f AB |
976 | ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq, |
977 | IRQ_TYPE_LEVEL_HIGH, | |
978 | dev_name(&pdev->dev), mchan); | |
979 | if (ret < 0) | |
56d355e6 | 980 | return ret; |
5689ba7f AB |
981 | |
982 | mchan->vc.desc_free = mdc_desc_free; | |
983 | vchan_init(&mchan->vc, &mdma->dma_dev); | |
984 | } | |
985 | ||
986 | mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, | |
987 | sizeof(struct mdc_hw_list_desc), | |
988 | 4, 0); | |
56d355e6 EB |
989 | if (!mdma->desc_pool) |
990 | return -ENOMEM; | |
991 | ||
992 | pm_runtime_enable(&pdev->dev); | |
993 | if (!pm_runtime_enabled(&pdev->dev)) { | |
994 | ret = img_mdc_runtime_resume(&pdev->dev); | |
995 | if (ret) | |
996 | return ret; | |
5689ba7f AB |
997 | } |
998 | ||
999 | ret = dma_async_device_register(&mdma->dma_dev); | |
1000 | if (ret) | |
56d355e6 | 1001 | goto suspend; |
5689ba7f AB |
1002 | |
1003 | ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma); | |
1004 | if (ret) | |
1005 | goto unregister; | |
1006 | ||
1007 | dev_info(&pdev->dev, "MDC with %u channels and %u threads\n", | |
1008 | mdma->nr_channels, mdma->nr_threads); | |
1009 | ||
1010 | return 0; | |
1011 | ||
1012 | unregister: | |
1013 | dma_async_device_unregister(&mdma->dma_dev); | |
56d355e6 EB |
1014 | suspend: |
1015 | if (!pm_runtime_enabled(&pdev->dev)) | |
1016 | img_mdc_runtime_suspend(&pdev->dev); | |
1017 | pm_runtime_disable(&pdev->dev); | |
5689ba7f AB |
1018 | return ret; |
1019 | } | |
1020 | ||
1021 | static int mdc_dma_remove(struct platform_device *pdev) | |
1022 | { | |
1023 | struct mdc_dma *mdma = platform_get_drvdata(pdev); | |
1024 | struct mdc_chan *mchan, *next; | |
1025 | ||
1026 | of_dma_controller_free(pdev->dev.of_node); | |
1027 | dma_async_device_unregister(&mdma->dma_dev); | |
1028 | ||
1029 | list_for_each_entry_safe(mchan, next, &mdma->dma_dev.channels, | |
1030 | vc.chan.device_node) { | |
1031 | list_del(&mchan->vc.chan.device_node); | |
1032 | ||
5689ba7f AB |
1033 | devm_free_irq(&pdev->dev, mchan->irq, mchan); |
1034 | ||
1035 | tasklet_kill(&mchan->vc.task); | |
1036 | } | |
1037 | ||
56d355e6 EB |
1038 | pm_runtime_disable(&pdev->dev); |
1039 | if (!pm_runtime_status_suspended(&pdev->dev)) | |
1040 | img_mdc_runtime_suspend(&pdev->dev); | |
5689ba7f AB |
1041 | |
1042 | return 0; | |
1043 | } | |
1044 | ||
fd9f22ae EB |
1045 | #ifdef CONFIG_PM_SLEEP |
1046 | static int img_mdc_suspend_late(struct device *dev) | |
1047 | { | |
1048 | struct mdc_dma *mdma = dev_get_drvdata(dev); | |
1049 | int i; | |
1050 | ||
1051 | /* Check that all channels are idle */ | |
1052 | for (i = 0; i < mdma->nr_channels; i++) { | |
1053 | struct mdc_chan *mchan = &mdma->channels[i]; | |
1054 | ||
1055 | if (unlikely(mchan->desc)) | |
1056 | return -EBUSY; | |
1057 | } | |
1058 | ||
56d355e6 | 1059 | return pm_runtime_force_suspend(dev); |
fd9f22ae EB |
1060 | } |
1061 | ||
1062 | static int img_mdc_resume_early(struct device *dev) | |
1063 | { | |
56d355e6 | 1064 | return pm_runtime_force_resume(dev); |
fd9f22ae EB |
1065 | } |
1066 | #endif /* CONFIG_PM_SLEEP */ | |
1067 | ||
1068 | static const struct dev_pm_ops img_mdc_pm_ops = { | |
56d355e6 EB |
1069 | SET_RUNTIME_PM_OPS(img_mdc_runtime_suspend, |
1070 | img_mdc_runtime_resume, NULL) | |
fd9f22ae EB |
1071 | SET_LATE_SYSTEM_SLEEP_PM_OPS(img_mdc_suspend_late, |
1072 | img_mdc_resume_early) | |
1073 | }; | |
1074 | ||
5689ba7f AB |
1075 | static struct platform_driver mdc_dma_driver = { |
1076 | .driver = { | |
1077 | .name = "img-mdc-dma", | |
fd9f22ae | 1078 | .pm = &img_mdc_pm_ops, |
5689ba7f AB |
1079 | .of_match_table = of_match_ptr(mdc_dma_of_match), |
1080 | }, | |
1081 | .probe = mdc_dma_probe, | |
1082 | .remove = mdc_dma_remove, | |
1083 | }; | |
1084 | module_platform_driver(mdc_dma_driver); | |
1085 | ||
1086 | MODULE_DESCRIPTION("IMG Multi-threaded DMA Controller (MDC) driver"); | |
1087 | MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>"); | |
1088 | MODULE_LICENSE("GPL v2"); |