Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
667dfed9 AS |
2 | /* |
3 | * Core driver for the Intel integrated DMA 64-bit | |
4 | * | |
5 | * Copyright (C) 2015 Intel Corporation | |
6 | * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | |
667dfed9 AS |
7 | */ |
8 | ||
9 | #include <linux/bitops.h> | |
10 | #include <linux/delay.h> | |
11 | #include <linux/dmaengine.h> | |
12 | #include <linux/dma-mapping.h> | |
13 | #include <linux/dmapool.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/platform_device.h> | |
17 | #include <linux/slab.h> | |
18 | ||
ffcfc20f | 19 | #include <linux/dma/idma64.h> |
667dfed9 | 20 | |
ffcfc20f | 21 | #include "idma64.h" |
667dfed9 AS |
22 | |
23 | /* For now we support only two channels */ | |
24 | #define IDMA64_NR_CHAN 2 | |
25 | ||
26 | /* ---------------------------------------------------------------------- */ | |
27 | ||
28 | static struct device *chan2dev(struct dma_chan *chan) | |
29 | { | |
30 | return &chan->dev->device; | |
31 | } | |
32 | ||
33 | /* ---------------------------------------------------------------------- */ | |
34 | ||
35 | static void idma64_off(struct idma64 *idma64) | |
36 | { | |
37 | unsigned short count = 100; | |
38 | ||
39 | dma_writel(idma64, CFG, 0); | |
40 | ||
41 | channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask); | |
42 | channel_clear_bit(idma64, MASK(BLOCK), idma64->all_chan_mask); | |
43 | channel_clear_bit(idma64, MASK(SRC_TRAN), idma64->all_chan_mask); | |
44 | channel_clear_bit(idma64, MASK(DST_TRAN), idma64->all_chan_mask); | |
45 | channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask); | |
46 | ||
47 | do { | |
48 | cpu_relax(); | |
49 | } while (dma_readl(idma64, CFG) & IDMA64_CFG_DMA_EN && --count); | |
50 | } | |
51 | ||
52 | static void idma64_on(struct idma64 *idma64) | |
53 | { | |
54 | dma_writel(idma64, CFG, IDMA64_CFG_DMA_EN); | |
55 | } | |
56 | ||
57 | /* ---------------------------------------------------------------------- */ | |
58 | ||
59 | static void idma64_chan_init(struct idma64 *idma64, struct idma64_chan *idma64c) | |
60 | { | |
61 | u32 cfghi = IDMA64C_CFGH_SRC_PER(1) | IDMA64C_CFGH_DST_PER(0); | |
62 | u32 cfglo = 0; | |
63 | ||
667dfed9 AS |
64 | /* Set default burst alignment */ |
65 | cfglo |= IDMA64C_CFGL_DST_BURST_ALIGN | IDMA64C_CFGL_SRC_BURST_ALIGN; | |
66 | ||
67 | channel_writel(idma64c, CFG_LO, cfglo); | |
68 | channel_writel(idma64c, CFG_HI, cfghi); | |
69 | ||
70 | /* Enable interrupts */ | |
71 | channel_set_bit(idma64, MASK(XFER), idma64c->mask); | |
72 | channel_set_bit(idma64, MASK(ERROR), idma64c->mask); | |
73 | ||
74 | /* | |
75 | * Enforce the controller to be turned on. | |
76 | * | |
77 | * The iDMA is turned off in ->probe() and looses context during system | |
78 | * suspend / resume cycle. That's why we have to enable it each time we | |
79 | * use it. | |
80 | */ | |
81 | idma64_on(idma64); | |
82 | } | |
83 | ||
84 | static void idma64_chan_stop(struct idma64 *idma64, struct idma64_chan *idma64c) | |
85 | { | |
86 | channel_clear_bit(idma64, CH_EN, idma64c->mask); | |
87 | } | |
88 | ||
89 | static void idma64_chan_start(struct idma64 *idma64, struct idma64_chan *idma64c) | |
90 | { | |
91 | struct idma64_desc *desc = idma64c->desc; | |
92 | struct idma64_hw_desc *hw = &desc->hw[0]; | |
93 | ||
94 | channel_writeq(idma64c, SAR, 0); | |
95 | channel_writeq(idma64c, DAR, 0); | |
96 | ||
97 | channel_writel(idma64c, CTL_HI, IDMA64C_CTLH_BLOCK_TS(~0UL)); | |
98 | channel_writel(idma64c, CTL_LO, IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN); | |
99 | ||
100 | channel_writeq(idma64c, LLP, hw->llp); | |
101 | ||
102 | channel_set_bit(idma64, CH_EN, idma64c->mask); | |
103 | } | |
104 | ||
105 | static void idma64_stop_transfer(struct idma64_chan *idma64c) | |
106 | { | |
107 | struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device); | |
108 | ||
109 | idma64_chan_stop(idma64, idma64c); | |
110 | } | |
111 | ||
112 | static void idma64_start_transfer(struct idma64_chan *idma64c) | |
113 | { | |
114 | struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device); | |
115 | struct virt_dma_desc *vdesc; | |
116 | ||
117 | /* Get the next descriptor */ | |
118 | vdesc = vchan_next_desc(&idma64c->vchan); | |
119 | if (!vdesc) { | |
120 | idma64c->desc = NULL; | |
121 | return; | |
122 | } | |
123 | ||
124 | list_del(&vdesc->node); | |
125 | idma64c->desc = to_idma64_desc(vdesc); | |
126 | ||
127 | /* Configure the channel */ | |
128 | idma64_chan_init(idma64, idma64c); | |
129 | ||
130 | /* Start the channel with a new descriptor */ | |
131 | idma64_chan_start(idma64, idma64c); | |
132 | } | |
133 | ||
134 | /* ---------------------------------------------------------------------- */ | |
135 | ||
136 | static void idma64_chan_irq(struct idma64 *idma64, unsigned short c, | |
137 | u32 status_err, u32 status_xfer) | |
138 | { | |
139 | struct idma64_chan *idma64c = &idma64->chan[c]; | |
140 | struct idma64_desc *desc; | |
667dfed9 | 141 | |
7645d26f | 142 | spin_lock(&idma64c->vchan.lock); |
667dfed9 AS |
143 | desc = idma64c->desc; |
144 | if (desc) { | |
145 | if (status_err & (1 << c)) { | |
146 | dma_writel(idma64, CLEAR(ERROR), idma64c->mask); | |
147 | desc->status = DMA_ERROR; | |
148 | } else if (status_xfer & (1 << c)) { | |
149 | dma_writel(idma64, CLEAR(XFER), idma64c->mask); | |
150 | desc->status = DMA_COMPLETE; | |
151 | vchan_cookie_complete(&desc->vdesc); | |
152 | idma64_start_transfer(idma64c); | |
153 | } | |
154 | ||
155 | /* idma64_start_transfer() updates idma64c->desc */ | |
156 | if (idma64c->desc == NULL || desc->status == DMA_ERROR) | |
157 | idma64_stop_transfer(idma64c); | |
158 | } | |
7645d26f | 159 | spin_unlock(&idma64c->vchan.lock); |
667dfed9 AS |
160 | } |
161 | ||
162 | static irqreturn_t idma64_irq(int irq, void *dev) | |
163 | { | |
164 | struct idma64 *idma64 = dev; | |
165 | u32 status = dma_readl(idma64, STATUS_INT); | |
166 | u32 status_xfer; | |
167 | u32 status_err; | |
168 | unsigned short i; | |
169 | ||
170 | dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status); | |
171 | ||
172 | /* Check if we have any interrupt from the DMA controller */ | |
173 | if (!status) | |
174 | return IRQ_NONE; | |
175 | ||
667dfed9 AS |
176 | status_xfer = dma_readl(idma64, RAW(XFER)); |
177 | status_err = dma_readl(idma64, RAW(ERROR)); | |
178 | ||
179 | for (i = 0; i < idma64->dma.chancnt; i++) | |
180 | idma64_chan_irq(idma64, i, status_err, status_xfer); | |
181 | ||
667dfed9 AS |
182 | return IRQ_HANDLED; |
183 | } | |
184 | ||
185 | /* ---------------------------------------------------------------------- */ | |
186 | ||
187 | static struct idma64_desc *idma64_alloc_desc(unsigned int ndesc) | |
188 | { | |
189 | struct idma64_desc *desc; | |
190 | ||
191 | desc = kzalloc(sizeof(*desc), GFP_NOWAIT); | |
192 | if (!desc) | |
193 | return NULL; | |
194 | ||
195 | desc->hw = kcalloc(ndesc, sizeof(*desc->hw), GFP_NOWAIT); | |
196 | if (!desc->hw) { | |
197 | kfree(desc); | |
198 | return NULL; | |
199 | } | |
200 | ||
201 | return desc; | |
202 | } | |
203 | ||
204 | static void idma64_desc_free(struct idma64_chan *idma64c, | |
205 | struct idma64_desc *desc) | |
206 | { | |
207 | struct idma64_hw_desc *hw; | |
208 | ||
209 | if (desc->ndesc) { | |
210 | unsigned int i = desc->ndesc; | |
211 | ||
212 | do { | |
213 | hw = &desc->hw[--i]; | |
214 | dma_pool_free(idma64c->pool, hw->lli, hw->llp); | |
215 | } while (i); | |
216 | } | |
217 | ||
218 | kfree(desc->hw); | |
219 | kfree(desc); | |
220 | } | |
221 | ||
222 | static void idma64_vdesc_free(struct virt_dma_desc *vdesc) | |
223 | { | |
224 | struct idma64_chan *idma64c = to_idma64_chan(vdesc->tx.chan); | |
225 | ||
226 | idma64_desc_free(idma64c, to_idma64_desc(vdesc)); | |
227 | } | |
228 | ||
ac029794 | 229 | static void idma64_hw_desc_fill(struct idma64_hw_desc *hw, |
667dfed9 AS |
230 | struct dma_slave_config *config, |
231 | enum dma_transfer_direction direction, u64 llp) | |
232 | { | |
233 | struct idma64_lli *lli = hw->lli; | |
234 | u64 sar, dar; | |
235 | u32 ctlhi = IDMA64C_CTLH_BLOCK_TS(hw->len); | |
236 | u32 ctllo = IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN; | |
237 | u32 src_width, dst_width; | |
238 | ||
239 | if (direction == DMA_MEM_TO_DEV) { | |
240 | sar = hw->phys; | |
241 | dar = config->dst_addr; | |
242 | ctllo |= IDMA64C_CTLL_DST_FIX | IDMA64C_CTLL_SRC_INC | | |
243 | IDMA64C_CTLL_FC_M2P; | |
22b74406 | 244 | src_width = __ffs(sar | hw->len | 4); |
87b04596 | 245 | dst_width = __ffs(config->dst_addr_width); |
667dfed9 AS |
246 | } else { /* DMA_DEV_TO_MEM */ |
247 | sar = config->src_addr; | |
248 | dar = hw->phys; | |
249 | ctllo |= IDMA64C_CTLL_DST_INC | IDMA64C_CTLL_SRC_FIX | | |
250 | IDMA64C_CTLL_FC_P2M; | |
87b04596 | 251 | src_width = __ffs(config->src_addr_width); |
22b74406 | 252 | dst_width = __ffs(dar | hw->len | 4); |
667dfed9 AS |
253 | } |
254 | ||
255 | lli->sar = sar; | |
256 | lli->dar = dar; | |
257 | ||
258 | lli->ctlhi = ctlhi; | |
259 | lli->ctllo = ctllo | | |
260 | IDMA64C_CTLL_SRC_MSIZE(config->src_maxburst) | | |
261 | IDMA64C_CTLL_DST_MSIZE(config->dst_maxburst) | | |
262 | IDMA64C_CTLL_DST_WIDTH(dst_width) | | |
263 | IDMA64C_CTLL_SRC_WIDTH(src_width); | |
264 | ||
265 | lli->llp = llp; | |
667dfed9 AS |
266 | } |
267 | ||
268 | static void idma64_desc_fill(struct idma64_chan *idma64c, | |
269 | struct idma64_desc *desc) | |
270 | { | |
271 | struct dma_slave_config *config = &idma64c->config; | |
390c49f7 AS |
272 | unsigned int i = desc->ndesc; |
273 | struct idma64_hw_desc *hw = &desc->hw[i - 1]; | |
667dfed9 AS |
274 | struct idma64_lli *lli = hw->lli; |
275 | u64 llp = 0; | |
667dfed9 AS |
276 | |
277 | /* Fill the hardware descriptors and link them to a list */ | |
278 | do { | |
279 | hw = &desc->hw[--i]; | |
ac029794 AS |
280 | idma64_hw_desc_fill(hw, config, desc->direction, llp); |
281 | llp = hw->llp; | |
667dfed9 AS |
282 | desc->length += hw->len; |
283 | } while (i); | |
284 | ||
390c49f7 | 285 | /* Trigger an interrupt after the last block is transfered */ |
667dfed9 | 286 | lli->ctllo |= IDMA64C_CTLL_INT_EN; |
a2826e66 AS |
287 | |
288 | /* Disable LLP transfer in the last block */ | |
289 | lli->ctllo &= ~(IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN); | |
667dfed9 AS |
290 | } |
291 | ||
292 | static struct dma_async_tx_descriptor *idma64_prep_slave_sg( | |
293 | struct dma_chan *chan, struct scatterlist *sgl, | |
294 | unsigned int sg_len, enum dma_transfer_direction direction, | |
295 | unsigned long flags, void *context) | |
296 | { | |
297 | struct idma64_chan *idma64c = to_idma64_chan(chan); | |
298 | struct idma64_desc *desc; | |
299 | struct scatterlist *sg; | |
300 | unsigned int i; | |
301 | ||
302 | desc = idma64_alloc_desc(sg_len); | |
303 | if (!desc) | |
304 | return NULL; | |
305 | ||
306 | for_each_sg(sgl, sg, sg_len, i) { | |
307 | struct idma64_hw_desc *hw = &desc->hw[i]; | |
308 | ||
309 | /* Allocate DMA capable memory for hardware descriptor */ | |
310 | hw->lli = dma_pool_alloc(idma64c->pool, GFP_NOWAIT, &hw->llp); | |
311 | if (!hw->lli) { | |
312 | desc->ndesc = i; | |
313 | idma64_desc_free(idma64c, desc); | |
314 | return NULL; | |
315 | } | |
316 | ||
317 | hw->phys = sg_dma_address(sg); | |
318 | hw->len = sg_dma_len(sg); | |
319 | } | |
320 | ||
321 | desc->ndesc = sg_len; | |
322 | desc->direction = direction; | |
323 | desc->status = DMA_IN_PROGRESS; | |
324 | ||
325 | idma64_desc_fill(idma64c, desc); | |
326 | return vchan_tx_prep(&idma64c->vchan, &desc->vdesc, flags); | |
327 | } | |
328 | ||
329 | static void idma64_issue_pending(struct dma_chan *chan) | |
330 | { | |
331 | struct idma64_chan *idma64c = to_idma64_chan(chan); | |
332 | unsigned long flags; | |
333 | ||
334 | spin_lock_irqsave(&idma64c->vchan.lock, flags); | |
335 | if (vchan_issue_pending(&idma64c->vchan) && !idma64c->desc) | |
336 | idma64_start_transfer(idma64c); | |
337 | spin_unlock_irqrestore(&idma64c->vchan.lock, flags); | |
338 | } | |
339 | ||
340 | static size_t idma64_active_desc_size(struct idma64_chan *idma64c) | |
341 | { | |
342 | struct idma64_desc *desc = idma64c->desc; | |
343 | struct idma64_hw_desc *hw; | |
344 | size_t bytes = desc->length; | |
0b23a1ec AS |
345 | u64 llp = channel_readq(idma64c, LLP); |
346 | u32 ctlhi = channel_readl(idma64c, CTL_HI); | |
667dfed9 AS |
347 | unsigned int i = 0; |
348 | ||
667dfed9 AS |
349 | do { |
350 | hw = &desc->hw[i]; | |
0b23a1ec AS |
351 | if (hw->llp == llp) |
352 | break; | |
353 | bytes -= hw->len; | |
354 | } while (++i < desc->ndesc); | |
667dfed9 AS |
355 | |
356 | if (!i) | |
357 | return bytes; | |
358 | ||
0b23a1ec AS |
359 | /* The current chunk is not fully transfered yet */ |
360 | bytes += desc->hw[--i].len; | |
667dfed9 | 361 | |
667dfed9 AS |
362 | return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); |
363 | } | |
364 | ||
365 | static enum dma_status idma64_tx_status(struct dma_chan *chan, | |
366 | dma_cookie_t cookie, struct dma_tx_state *state) | |
367 | { | |
368 | struct idma64_chan *idma64c = to_idma64_chan(chan); | |
369 | struct virt_dma_desc *vdesc; | |
370 | enum dma_status status; | |
371 | size_t bytes; | |
372 | unsigned long flags; | |
373 | ||
374 | status = dma_cookie_status(chan, cookie, state); | |
375 | if (status == DMA_COMPLETE) | |
376 | return status; | |
377 | ||
378 | spin_lock_irqsave(&idma64c->vchan.lock, flags); | |
379 | vdesc = vchan_find_desc(&idma64c->vchan, cookie); | |
380 | if (idma64c->desc && cookie == idma64c->desc->vdesc.tx.cookie) { | |
381 | bytes = idma64_active_desc_size(idma64c); | |
382 | dma_set_residue(state, bytes); | |
383 | status = idma64c->desc->status; | |
384 | } else if (vdesc) { | |
385 | bytes = to_idma64_desc(vdesc)->length; | |
386 | dma_set_residue(state, bytes); | |
387 | } | |
388 | spin_unlock_irqrestore(&idma64c->vchan.lock, flags); | |
389 | ||
390 | return status; | |
391 | } | |
392 | ||
393 | static void convert_burst(u32 *maxburst) | |
394 | { | |
395 | if (*maxburst) | |
396 | *maxburst = __fls(*maxburst); | |
397 | else | |
398 | *maxburst = 0; | |
399 | } | |
400 | ||
401 | static int idma64_slave_config(struct dma_chan *chan, | |
402 | struct dma_slave_config *config) | |
403 | { | |
404 | struct idma64_chan *idma64c = to_idma64_chan(chan); | |
405 | ||
667dfed9 AS |
406 | memcpy(&idma64c->config, config, sizeof(idma64c->config)); |
407 | ||
408 | convert_burst(&idma64c->config.src_maxburst); | |
409 | convert_burst(&idma64c->config.dst_maxburst); | |
410 | ||
411 | return 0; | |
412 | } | |
413 | ||
2e9b55be | 414 | static void idma64_chan_deactivate(struct idma64_chan *idma64c, bool drain) |
667dfed9 AS |
415 | { |
416 | unsigned short count = 100; | |
417 | u32 cfglo; | |
418 | ||
419 | cfglo = channel_readl(idma64c, CFG_LO); | |
2e9b55be AS |
420 | if (drain) |
421 | cfglo |= IDMA64C_CFGL_CH_DRAIN; | |
422 | else | |
423 | cfglo &= ~IDMA64C_CFGL_CH_DRAIN; | |
424 | ||
667dfed9 AS |
425 | channel_writel(idma64c, CFG_LO, cfglo | IDMA64C_CFGL_CH_SUSP); |
426 | do { | |
427 | udelay(1); | |
428 | cfglo = channel_readl(idma64c, CFG_LO); | |
429 | } while (!(cfglo & IDMA64C_CFGL_FIFO_EMPTY) && --count); | |
430 | } | |
431 | ||
432 | static void idma64_chan_activate(struct idma64_chan *idma64c) | |
433 | { | |
434 | u32 cfglo; | |
435 | ||
436 | cfglo = channel_readl(idma64c, CFG_LO); | |
437 | channel_writel(idma64c, CFG_LO, cfglo & ~IDMA64C_CFGL_CH_SUSP); | |
438 | } | |
439 | ||
440 | static int idma64_pause(struct dma_chan *chan) | |
441 | { | |
442 | struct idma64_chan *idma64c = to_idma64_chan(chan); | |
443 | unsigned long flags; | |
444 | ||
445 | spin_lock_irqsave(&idma64c->vchan.lock, flags); | |
446 | if (idma64c->desc && idma64c->desc->status == DMA_IN_PROGRESS) { | |
2e9b55be | 447 | idma64_chan_deactivate(idma64c, false); |
667dfed9 AS |
448 | idma64c->desc->status = DMA_PAUSED; |
449 | } | |
450 | spin_unlock_irqrestore(&idma64c->vchan.lock, flags); | |
451 | ||
452 | return 0; | |
453 | } | |
454 | ||
455 | static int idma64_resume(struct dma_chan *chan) | |
456 | { | |
457 | struct idma64_chan *idma64c = to_idma64_chan(chan); | |
458 | unsigned long flags; | |
459 | ||
460 | spin_lock_irqsave(&idma64c->vchan.lock, flags); | |
461 | if (idma64c->desc && idma64c->desc->status == DMA_PAUSED) { | |
462 | idma64c->desc->status = DMA_IN_PROGRESS; | |
463 | idma64_chan_activate(idma64c); | |
464 | } | |
465 | spin_unlock_irqrestore(&idma64c->vchan.lock, flags); | |
466 | ||
467 | return 0; | |
468 | } | |
469 | ||
470 | static int idma64_terminate_all(struct dma_chan *chan) | |
471 | { | |
472 | struct idma64_chan *idma64c = to_idma64_chan(chan); | |
473 | unsigned long flags; | |
474 | LIST_HEAD(head); | |
475 | ||
476 | spin_lock_irqsave(&idma64c->vchan.lock, flags); | |
2e9b55be | 477 | idma64_chan_deactivate(idma64c, true); |
667dfed9 AS |
478 | idma64_stop_transfer(idma64c); |
479 | if (idma64c->desc) { | |
480 | idma64_vdesc_free(&idma64c->desc->vdesc); | |
481 | idma64c->desc = NULL; | |
482 | } | |
483 | vchan_get_all_descriptors(&idma64c->vchan, &head); | |
484 | spin_unlock_irqrestore(&idma64c->vchan.lock, flags); | |
485 | ||
486 | vchan_dma_desc_free_list(&idma64c->vchan, &head); | |
487 | return 0; | |
488 | } | |
489 | ||
bbacb8e7 AS |
490 | static void idma64_synchronize(struct dma_chan *chan) |
491 | { | |
492 | struct idma64_chan *idma64c = to_idma64_chan(chan); | |
493 | ||
494 | vchan_synchronize(&idma64c->vchan); | |
495 | } | |
496 | ||
667dfed9 AS |
497 | static int idma64_alloc_chan_resources(struct dma_chan *chan) |
498 | { | |
499 | struct idma64_chan *idma64c = to_idma64_chan(chan); | |
500 | ||
501 | /* Create a pool of consistent memory blocks for hardware descriptors */ | |
502 | idma64c->pool = dma_pool_create(dev_name(chan2dev(chan)), | |
503 | chan->device->dev, | |
504 | sizeof(struct idma64_lli), 8, 0); | |
505 | if (!idma64c->pool) { | |
506 | dev_err(chan2dev(chan), "No memory for descriptors\n"); | |
507 | return -ENOMEM; | |
508 | } | |
509 | ||
510 | return 0; | |
511 | } | |
512 | ||
513 | static void idma64_free_chan_resources(struct dma_chan *chan) | |
514 | { | |
515 | struct idma64_chan *idma64c = to_idma64_chan(chan); | |
516 | ||
517 | vchan_free_chan_resources(to_virt_chan(chan)); | |
518 | dma_pool_destroy(idma64c->pool); | |
519 | idma64c->pool = NULL; | |
520 | } | |
521 | ||
522 | /* ---------------------------------------------------------------------- */ | |
523 | ||
524 | #define IDMA64_BUSWIDTHS \ | |
525 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | |
526 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | |
527 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | |
528 | ||
529 | static int idma64_probe(struct idma64_chip *chip) | |
530 | { | |
531 | struct idma64 *idma64; | |
532 | unsigned short nr_chan = IDMA64_NR_CHAN; | |
533 | unsigned short i; | |
534 | int ret; | |
535 | ||
536 | idma64 = devm_kzalloc(chip->dev, sizeof(*idma64), GFP_KERNEL); | |
537 | if (!idma64) | |
538 | return -ENOMEM; | |
539 | ||
540 | idma64->regs = chip->regs; | |
541 | chip->idma64 = idma64; | |
542 | ||
543 | idma64->chan = devm_kcalloc(chip->dev, nr_chan, sizeof(*idma64->chan), | |
544 | GFP_KERNEL); | |
545 | if (!idma64->chan) | |
546 | return -ENOMEM; | |
547 | ||
548 | idma64->all_chan_mask = (1 << nr_chan) - 1; | |
549 | ||
550 | /* Turn off iDMA controller */ | |
551 | idma64_off(idma64); | |
552 | ||
553 | ret = devm_request_irq(chip->dev, chip->irq, idma64_irq, IRQF_SHARED, | |
554 | dev_name(chip->dev), idma64); | |
555 | if (ret) | |
556 | return ret; | |
557 | ||
558 | INIT_LIST_HEAD(&idma64->dma.channels); | |
559 | for (i = 0; i < nr_chan; i++) { | |
560 | struct idma64_chan *idma64c = &idma64->chan[i]; | |
561 | ||
562 | idma64c->vchan.desc_free = idma64_vdesc_free; | |
563 | vchan_init(&idma64c->vchan, &idma64->dma); | |
564 | ||
565 | idma64c->regs = idma64->regs + i * IDMA64_CH_LENGTH; | |
566 | idma64c->mask = BIT(i); | |
567 | } | |
568 | ||
569 | dma_cap_set(DMA_SLAVE, idma64->dma.cap_mask); | |
570 | dma_cap_set(DMA_PRIVATE, idma64->dma.cap_mask); | |
571 | ||
572 | idma64->dma.device_alloc_chan_resources = idma64_alloc_chan_resources; | |
573 | idma64->dma.device_free_chan_resources = idma64_free_chan_resources; | |
574 | ||
575 | idma64->dma.device_prep_slave_sg = idma64_prep_slave_sg; | |
576 | ||
577 | idma64->dma.device_issue_pending = idma64_issue_pending; | |
578 | idma64->dma.device_tx_status = idma64_tx_status; | |
579 | ||
580 | idma64->dma.device_config = idma64_slave_config; | |
581 | idma64->dma.device_pause = idma64_pause; | |
582 | idma64->dma.device_resume = idma64_resume; | |
583 | idma64->dma.device_terminate_all = idma64_terminate_all; | |
bbacb8e7 | 584 | idma64->dma.device_synchronize = idma64_synchronize; |
667dfed9 AS |
585 | |
586 | idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS; | |
587 | idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS; | |
588 | idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
589 | idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
590 | ||
5ba846b1 | 591 | idma64->dma.dev = chip->sysdev; |
667dfed9 | 592 | |
e3fdb189 AS |
593 | dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); |
594 | ||
667dfed9 AS |
595 | ret = dma_async_device_register(&idma64->dma); |
596 | if (ret) | |
597 | return ret; | |
598 | ||
599 | dev_info(chip->dev, "Found Intel integrated DMA 64-bit\n"); | |
600 | return 0; | |
601 | } | |
602 | ||
603 | static int idma64_remove(struct idma64_chip *chip) | |
604 | { | |
605 | struct idma64 *idma64 = chip->idma64; | |
606 | unsigned short i; | |
607 | ||
608 | dma_async_device_unregister(&idma64->dma); | |
609 | ||
610 | /* | |
611 | * Explicitly call devm_request_irq() to avoid the side effects with | |
612 | * the scheduled tasklets. | |
613 | */ | |
614 | devm_free_irq(chip->dev, chip->irq, idma64); | |
615 | ||
616 | for (i = 0; i < idma64->dma.chancnt; i++) { | |
617 | struct idma64_chan *idma64c = &idma64->chan[i]; | |
618 | ||
619 | tasklet_kill(&idma64c->vchan.task); | |
620 | } | |
621 | ||
622 | return 0; | |
623 | } | |
624 | ||
625 | /* ---------------------------------------------------------------------- */ | |
626 | ||
627 | static int idma64_platform_probe(struct platform_device *pdev) | |
628 | { | |
629 | struct idma64_chip *chip; | |
630 | struct device *dev = &pdev->dev; | |
5ba846b1 | 631 | struct device *sysdev = dev->parent; |
667dfed9 AS |
632 | struct resource *mem; |
633 | int ret; | |
634 | ||
635 | chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); | |
636 | if (!chip) | |
637 | return -ENOMEM; | |
638 | ||
639 | chip->irq = platform_get_irq(pdev, 0); | |
640 | if (chip->irq < 0) | |
641 | return chip->irq; | |
642 | ||
643 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
644 | chip->regs = devm_ioremap_resource(dev, mem); | |
645 | if (IS_ERR(chip->regs)) | |
646 | return PTR_ERR(chip->regs); | |
647 | ||
5ba846b1 | 648 | ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64)); |
667dfed9 AS |
649 | if (ret) |
650 | return ret; | |
651 | ||
652 | chip->dev = dev; | |
5ba846b1 | 653 | chip->sysdev = sysdev; |
667dfed9 AS |
654 | |
655 | ret = idma64_probe(chip); | |
656 | if (ret) | |
657 | return ret; | |
658 | ||
659 | platform_set_drvdata(pdev, chip); | |
660 | return 0; | |
661 | } | |
662 | ||
663 | static int idma64_platform_remove(struct platform_device *pdev) | |
664 | { | |
665 | struct idma64_chip *chip = platform_get_drvdata(pdev); | |
666 | ||
667 | return idma64_remove(chip); | |
668 | } | |
669 | ||
670 | #ifdef CONFIG_PM_SLEEP | |
671 | ||
672 | static int idma64_pm_suspend(struct device *dev) | |
673 | { | |
b7d69799 | 674 | struct idma64_chip *chip = dev_get_drvdata(dev); |
667dfed9 AS |
675 | |
676 | idma64_off(chip->idma64); | |
677 | return 0; | |
678 | } | |
679 | ||
680 | static int idma64_pm_resume(struct device *dev) | |
681 | { | |
b7d69799 | 682 | struct idma64_chip *chip = dev_get_drvdata(dev); |
667dfed9 AS |
683 | |
684 | idma64_on(chip->idma64); | |
685 | return 0; | |
686 | } | |
687 | ||
688 | #endif /* CONFIG_PM_SLEEP */ | |
689 | ||
690 | static const struct dev_pm_ops idma64_dev_pm_ops = { | |
691 | SET_SYSTEM_SLEEP_PM_OPS(idma64_pm_suspend, idma64_pm_resume) | |
692 | }; | |
693 | ||
694 | static struct platform_driver idma64_platform_driver = { | |
695 | .probe = idma64_platform_probe, | |
696 | .remove = idma64_platform_remove, | |
697 | .driver = { | |
ffcfc20f | 698 | .name = LPSS_IDMA64_DRIVER_NAME, |
667dfed9 AS |
699 | .pm = &idma64_dev_pm_ops, |
700 | }, | |
701 | }; | |
702 | ||
703 | module_platform_driver(idma64_platform_driver); | |
704 | ||
705 | MODULE_LICENSE("GPL v2"); | |
706 | MODULE_DESCRIPTION("iDMA64 core driver"); | |
707 | MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); | |
ffcfc20f | 708 | MODULE_ALIAS("platform:" LPSS_IDMA64_DRIVER_NAME); |