Commit | Line | Data |
---|---|---|
e8689e63 LW |
1 | /* |
2 | * Copyright (c) 2006 ARM Ltd. | |
3 | * Copyright (c) 2010 ST-Ericsson SA | |
4 | * | |
5 | * Author: Peter Pearse <peter.pearse@arm.com> | |
6 | * Author: Linus Walleij <linus.walleij@stericsson.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the Free | |
10 | * Software Foundation; either version 2 of the License, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
16 | * more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License along with | |
19 | * this program; if not, write to the Free Software Foundation, Inc., 59 | |
20 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
21 | * | |
e8b5e11d | 22 | * The full GNU General Public License is in this distribution in the |
e8689e63 LW |
23 | * file called COPYING. |
24 | * | |
25 | * Documentation: ARM DDI 0196G == PL080 | |
26 | * Documentation: ARM DDI 0218E == PL081 | |
27 | * | |
28 | * PL080 & PL081 both have 16 sets of DMA signals that can be routed to | |
29 | * any channel. | |
30 | * | |
31 | * The PL080 has 8 channels available for simultaneous use, and the PL081 | |
32 | * has only two channels. So on these DMA controllers the number of channels | |
33 | * and the number of incoming DMA signals are two totally different things. | |
34 | * It is usually not possible to theoretically handle all physical signals, | |
35 | * so a multiplexing scheme with possible denial of use is necessary. | |
36 | * | |
37 | * The PL080 has a dual bus master, PL081 has a single master. | |
38 | * | |
39 | * Memory to peripheral transfer may be visualized as | |
40 | * Get data from memory to DMAC | |
41 | * Until no data left | |
42 | * On burst request from peripheral | |
43 | * Destination burst from DMAC to peripheral | |
44 | * Clear burst request | |
45 | * Raise terminal count interrupt | |
46 | * | |
47 | * For peripherals with a FIFO: | |
48 | * Source burst size == half the depth of the peripheral FIFO | |
49 | * Destination burst size == the depth of the peripheral FIFO | |
50 | * | |
51 | * (Bursts are irrelevant for mem to mem transfers - there are no burst | |
52 | * signals, the DMA controller will simply facilitate its AHB master.) | |
53 | * | |
54 | * ASSUMES default (little) endianness for DMA transfers | |
55 | * | |
9dc2c200 RKAL |
56 | * The PL08x has two flow control settings: |
57 | * - DMAC flow control: the transfer size defines the number of transfers | |
58 | * which occur for the current LLI entry, and the DMAC raises TC at the | |
59 | * end of every LLI entry. Observed behaviour shows the DMAC listening | |
60 | * to both the BREQ and SREQ signals (contrary to documented), | |
61 | * transferring data if either is active. The LBREQ and LSREQ signals | |
62 | * are ignored. | |
63 | * | |
64 | * - Peripheral flow control: the transfer size is ignored (and should be | |
65 | * zero). The data is transferred from the current LLI entry, until | |
66 | * after the final transfer signalled by LBREQ or LSREQ. The DMAC | |
67 | * will then move to the next LLI entry. | |
68 | * | |
69 | * Only the former works sanely with scatter lists, so we only implement | |
70 | * the DMAC flow control method. However, peripherals which use the LBREQ | |
71 | * and LSREQ signals (eg, MMCI) are unable to use this mode, which through | |
72 | * these hardware restrictions prevents them from using scatter DMA. | |
e8689e63 LW |
73 | * |
74 | * Global TODO: | |
75 | * - Break out common code from arch/arm/mach-s3c64xx and share | |
76 | */ | |
77 | #include <linux/device.h> | |
78 | #include <linux/init.h> | |
79 | #include <linux/module.h> | |
80 | #include <linux/pci.h> | |
81 | #include <linux/interrupt.h> | |
82 | #include <linux/slab.h> | |
83 | #include <linux/dmapool.h> | |
84 | #include <linux/amba/bus.h> | |
85 | #include <linux/dmaengine.h> | |
86 | #include <linux/amba/pl08x.h> | |
87 | #include <linux/debugfs.h> | |
88 | #include <linux/seq_file.h> | |
89 | ||
90 | #include <asm/hardware/pl080.h> | |
91 | #include <asm/dma.h> | |
92 | #include <asm/mach/dma.h> | |
e8689e63 LW |
93 | #include <asm/processor.h> |
94 | #include <asm/cacheflush.h> | |
95 | ||
96 | #define DRIVER_NAME "pl08xdmac" | |
97 | ||
98 | /** | |
99 | * struct vendor_data - vendor-specific config parameters | |
e8b5e11d | 100 | * for PL08x derivatives |
e8689e63 LW |
101 | * @channels: the number of channels available in this variant |
102 | * @dualmaster: whether this version supports dual AHB masters | |
103 | * or not. | |
104 | */ | |
105 | struct vendor_data { | |
e8689e63 LW |
106 | u8 channels; |
107 | bool dualmaster; | |
108 | }; | |
109 | ||
110 | /* | |
111 | * PL08X private data structures | |
e8b5e11d RKAL |
112 | * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit, |
113 | * start & end do not - their bus bit info is in cctl. | |
e8689e63 LW |
114 | */ |
115 | struct lli { | |
116 | dma_addr_t src; | |
117 | dma_addr_t dst; | |
118 | dma_addr_t next; | |
119 | u32 cctl; | |
120 | }; | |
121 | ||
122 | /** | |
123 | * struct pl08x_driver_data - the local state holder for the PL08x | |
124 | * @slave: slave engine for this instance | |
125 | * @memcpy: memcpy engine for this instance | |
126 | * @base: virtual memory base (remapped) for the PL08x | |
127 | * @adev: the corresponding AMBA (PrimeCell) bus entry | |
128 | * @vd: vendor data for this PL08x variant | |
129 | * @pd: platform data passed in from the platform/machine | |
130 | * @phy_chans: array of data for the physical channels | |
131 | * @pool: a pool for the LLI descriptors | |
132 | * @pool_ctr: counter of LLIs in the pool | |
133 | * @lock: a spinlock for this struct | |
134 | */ | |
135 | struct pl08x_driver_data { | |
136 | struct dma_device slave; | |
137 | struct dma_device memcpy; | |
138 | void __iomem *base; | |
139 | struct amba_device *adev; | |
140 | struct vendor_data *vd; | |
141 | struct pl08x_platform_data *pd; | |
142 | struct pl08x_phy_chan *phy_chans; | |
143 | struct dma_pool *pool; | |
144 | int pool_ctr; | |
145 | spinlock_t lock; | |
146 | }; | |
147 | ||
148 | /* | |
149 | * PL08X specific defines | |
150 | */ | |
151 | ||
152 | /* | |
153 | * Memory boundaries: the manual for PL08x says that the controller | |
154 | * cannot read past a 1KiB boundary, so these defines are used to | |
155 | * create transfer LLIs that do not cross such boundaries. | |
156 | */ | |
157 | #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ | |
158 | #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) | |
159 | ||
160 | /* Minimum period between work queue runs */ | |
161 | #define PL08X_WQ_PERIODMIN 20 | |
162 | ||
163 | /* Size (bytes) of each LLI buffer allocated for one transfer */ | |
164 | # define PL08X_LLI_TSFR_SIZE 0x2000 | |
165 | ||
e8b5e11d | 166 | /* Maximum times we call dma_pool_alloc on this pool without freeing */ |
e8689e63 LW |
167 | #define PL08X_MAX_ALLOCS 0x40 |
168 | #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct lli)) | |
169 | #define PL08X_ALIGN 8 | |
170 | ||
171 | static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) | |
172 | { | |
173 | return container_of(chan, struct pl08x_dma_chan, chan); | |
174 | } | |
175 | ||
176 | /* | |
177 | * Physical channel handling | |
178 | */ | |
179 | ||
180 | /* Whether a certain channel is busy or not */ | |
181 | static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) | |
182 | { | |
183 | unsigned int val; | |
184 | ||
185 | val = readl(ch->base + PL080_CH_CONFIG); | |
186 | return val & PL080_CONFIG_ACTIVE; | |
187 | } | |
188 | ||
189 | /* | |
190 | * Set the initial DMA register values i.e. those for the first LLI | |
e8b5e11d | 191 | * The next LLI pointer and the configuration interrupt bit have |
e8689e63 LW |
192 | * been set when the LLIs were constructed |
193 | */ | |
194 | static void pl08x_set_cregs(struct pl08x_driver_data *pl08x, | |
195 | struct pl08x_phy_chan *ch) | |
196 | { | |
197 | /* Wait for channel inactive */ | |
198 | while (pl08x_phy_channel_busy(ch)) | |
199 | ; | |
200 | ||
201 | dev_vdbg(&pl08x->adev->dev, | |
202 | "WRITE channel %d: csrc=%08x, cdst=%08x, " | |
203 | "cctl=%08x, clli=%08x, ccfg=%08x\n", | |
204 | ch->id, | |
205 | ch->csrc, | |
206 | ch->cdst, | |
207 | ch->cctl, | |
208 | ch->clli, | |
209 | ch->ccfg); | |
210 | ||
211 | writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR); | |
212 | writel(ch->cdst, ch->base + PL080_CH_DST_ADDR); | |
213 | writel(ch->clli, ch->base + PL080_CH_LLI); | |
214 | writel(ch->cctl, ch->base + PL080_CH_CONTROL); | |
215 | writel(ch->ccfg, ch->base + PL080_CH_CONFIG); | |
216 | } | |
217 | ||
218 | static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan) | |
219 | { | |
220 | struct pl08x_channel_data *cd = plchan->cd; | |
221 | struct pl08x_phy_chan *phychan = plchan->phychan; | |
222 | struct pl08x_txd *txd = plchan->at; | |
223 | ||
224 | /* Copy the basic control register calculated at transfer config */ | |
225 | phychan->csrc = txd->csrc; | |
226 | phychan->cdst = txd->cdst; | |
227 | phychan->clli = txd->clli; | |
228 | phychan->cctl = txd->cctl; | |
229 | ||
230 | /* Assign the signal to the proper control registers */ | |
231 | phychan->ccfg = cd->ccfg; | |
232 | phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK; | |
233 | phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK; | |
234 | /* If it wasn't set from AMBA, ignore it */ | |
235 | if (txd->direction == DMA_TO_DEVICE) | |
236 | /* Select signal as destination */ | |
237 | phychan->ccfg |= | |
238 | (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT); | |
239 | else if (txd->direction == DMA_FROM_DEVICE) | |
240 | /* Select signal as source */ | |
241 | phychan->ccfg |= | |
242 | (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT); | |
243 | /* Always enable error interrupts */ | |
244 | phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK; | |
245 | /* Always enable terminal interrupts */ | |
246 | phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK; | |
247 | } | |
248 | ||
249 | /* | |
250 | * Enable the DMA channel | |
251 | * Assumes all other configuration bits have been set | |
252 | * as desired before this code is called | |
253 | */ | |
254 | static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x, | |
255 | struct pl08x_phy_chan *ch) | |
256 | { | |
257 | u32 val; | |
258 | ||
259 | /* | |
260 | * Do not access config register until channel shows as disabled | |
261 | */ | |
262 | while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id)) | |
263 | ; | |
264 | ||
265 | /* | |
266 | * Do not access config register until channel shows as inactive | |
267 | */ | |
268 | val = readl(ch->base + PL080_CH_CONFIG); | |
269 | while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) | |
270 | val = readl(ch->base + PL080_CH_CONFIG); | |
271 | ||
272 | writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG); | |
273 | } | |
274 | ||
275 | /* | |
276 | * Overall DMAC remains enabled always. | |
277 | * | |
278 | * Disabling individual channels could lose data. | |
279 | * | |
280 | * Disable the peripheral DMA after disabling the DMAC | |
281 | * in order to allow the DMAC FIFO to drain, and | |
282 | * hence allow the channel to show inactive | |
283 | * | |
284 | */ | |
285 | static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) | |
286 | { | |
287 | u32 val; | |
288 | ||
289 | /* Set the HALT bit and wait for the FIFO to drain */ | |
290 | val = readl(ch->base + PL080_CH_CONFIG); | |
291 | val |= PL080_CONFIG_HALT; | |
292 | writel(val, ch->base + PL080_CH_CONFIG); | |
293 | ||
294 | /* Wait for channel inactive */ | |
295 | while (pl08x_phy_channel_busy(ch)) | |
296 | ; | |
297 | } | |
298 | ||
299 | static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) | |
300 | { | |
301 | u32 val; | |
302 | ||
303 | /* Clear the HALT bit */ | |
304 | val = readl(ch->base + PL080_CH_CONFIG); | |
305 | val &= ~PL080_CONFIG_HALT; | |
306 | writel(val, ch->base + PL080_CH_CONFIG); | |
307 | } | |
308 | ||
309 | ||
310 | /* Stops the channel */ | |
311 | static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch) | |
312 | { | |
313 | u32 val; | |
314 | ||
315 | pl08x_pause_phy_chan(ch); | |
316 | ||
317 | /* Disable channel */ | |
318 | val = readl(ch->base + PL080_CH_CONFIG); | |
319 | val &= ~PL080_CONFIG_ENABLE; | |
320 | val &= ~PL080_CONFIG_ERR_IRQ_MASK; | |
321 | val &= ~PL080_CONFIG_TC_IRQ_MASK; | |
322 | writel(val, ch->base + PL080_CH_CONFIG); | |
323 | } | |
324 | ||
325 | static inline u32 get_bytes_in_cctl(u32 cctl) | |
326 | { | |
327 | /* The source width defines the number of bytes */ | |
328 | u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; | |
329 | ||
330 | switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { | |
331 | case PL080_WIDTH_8BIT: | |
332 | break; | |
333 | case PL080_WIDTH_16BIT: | |
334 | bytes *= 2; | |
335 | break; | |
336 | case PL080_WIDTH_32BIT: | |
337 | bytes *= 4; | |
338 | break; | |
339 | } | |
340 | return bytes; | |
341 | } | |
342 | ||
343 | /* The channel should be paused when calling this */ | |
344 | static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) | |
345 | { | |
346 | struct pl08x_phy_chan *ch; | |
347 | struct pl08x_txd *txdi = NULL; | |
348 | struct pl08x_txd *txd; | |
349 | unsigned long flags; | |
350 | u32 bytes = 0; | |
351 | ||
352 | spin_lock_irqsave(&plchan->lock, flags); | |
353 | ||
354 | ch = plchan->phychan; | |
355 | txd = plchan->at; | |
356 | ||
357 | /* | |
358 | * Next follow the LLIs to get the number of pending bytes in the | |
359 | * currently active transaction. | |
360 | */ | |
361 | if (ch && txd) { | |
362 | struct lli *llis_va = txd->llis_va; | |
363 | struct lli *llis_bus = (struct lli *) txd->llis_bus; | |
364 | u32 clli = readl(ch->base + PL080_CH_LLI); | |
365 | ||
366 | /* First get the bytes in the current active LLI */ | |
367 | bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); | |
368 | ||
369 | if (clli) { | |
370 | int i = 0; | |
371 | ||
372 | /* Forward to the LLI pointed to by clli */ | |
373 | while ((clli != (u32) &(llis_bus[i])) && | |
374 | (i < MAX_NUM_TSFR_LLIS)) | |
375 | i++; | |
376 | ||
377 | while (clli) { | |
378 | bytes += get_bytes_in_cctl(llis_va[i].cctl); | |
379 | /* | |
e8b5e11d | 380 | * A LLI pointer of 0 terminates the LLI list |
e8689e63 LW |
381 | */ |
382 | clli = llis_va[i].next; | |
383 | i++; | |
384 | } | |
385 | } | |
386 | } | |
387 | ||
388 | /* Sum up all queued transactions */ | |
389 | if (!list_empty(&plchan->desc_list)) { | |
390 | list_for_each_entry(txdi, &plchan->desc_list, node) { | |
391 | bytes += txdi->len; | |
392 | } | |
393 | ||
394 | } | |
395 | ||
396 | spin_unlock_irqrestore(&plchan->lock, flags); | |
397 | ||
398 | return bytes; | |
399 | } | |
400 | ||
401 | /* | |
402 | * Allocate a physical channel for a virtual channel | |
403 | */ | |
404 | static struct pl08x_phy_chan * | |
405 | pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, | |
406 | struct pl08x_dma_chan *virt_chan) | |
407 | { | |
408 | struct pl08x_phy_chan *ch = NULL; | |
409 | unsigned long flags; | |
410 | int i; | |
411 | ||
412 | /* | |
413 | * Try to locate a physical channel to be used for | |
414 | * this transfer. If all are taken return NULL and | |
415 | * the requester will have to cope by using some fallback | |
416 | * PIO mode or retrying later. | |
417 | */ | |
418 | for (i = 0; i < pl08x->vd->channels; i++) { | |
419 | ch = &pl08x->phy_chans[i]; | |
420 | ||
421 | spin_lock_irqsave(&ch->lock, flags); | |
422 | ||
423 | if (!ch->serving) { | |
424 | ch->serving = virt_chan; | |
425 | ch->signal = -1; | |
426 | spin_unlock_irqrestore(&ch->lock, flags); | |
427 | break; | |
428 | } | |
429 | ||
430 | spin_unlock_irqrestore(&ch->lock, flags); | |
431 | } | |
432 | ||
433 | if (i == pl08x->vd->channels) { | |
434 | /* No physical channel available, cope with it */ | |
435 | return NULL; | |
436 | } | |
437 | ||
438 | return ch; | |
439 | } | |
440 | ||
441 | static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, | |
442 | struct pl08x_phy_chan *ch) | |
443 | { | |
444 | unsigned long flags; | |
445 | ||
446 | /* Stop the channel and clear its interrupts */ | |
447 | pl08x_stop_phy_chan(ch); | |
448 | writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR); | |
449 | writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR); | |
450 | ||
451 | /* Mark it as free */ | |
452 | spin_lock_irqsave(&ch->lock, flags); | |
453 | ch->serving = NULL; | |
454 | spin_unlock_irqrestore(&ch->lock, flags); | |
455 | } | |
456 | ||
457 | /* | |
458 | * LLI handling | |
459 | */ | |
460 | ||
461 | static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) | |
462 | { | |
463 | switch (coded) { | |
464 | case PL080_WIDTH_8BIT: | |
465 | return 1; | |
466 | case PL080_WIDTH_16BIT: | |
467 | return 2; | |
468 | case PL080_WIDTH_32BIT: | |
469 | return 4; | |
470 | default: | |
471 | break; | |
472 | } | |
473 | BUG(); | |
474 | return 0; | |
475 | } | |
476 | ||
477 | static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, | |
478 | u32 tsize) | |
479 | { | |
480 | u32 retbits = cctl; | |
481 | ||
e8b5e11d | 482 | /* Remove all src, dst and transfer size bits */ |
e8689e63 LW |
483 | retbits &= ~PL080_CONTROL_DWIDTH_MASK; |
484 | retbits &= ~PL080_CONTROL_SWIDTH_MASK; | |
485 | retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; | |
486 | ||
487 | /* Then set the bits according to the parameters */ | |
488 | switch (srcwidth) { | |
489 | case 1: | |
490 | retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; | |
491 | break; | |
492 | case 2: | |
493 | retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; | |
494 | break; | |
495 | case 4: | |
496 | retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; | |
497 | break; | |
498 | default: | |
499 | BUG(); | |
500 | break; | |
501 | } | |
502 | ||
503 | switch (dstwidth) { | |
504 | case 1: | |
505 | retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; | |
506 | break; | |
507 | case 2: | |
508 | retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; | |
509 | break; | |
510 | case 4: | |
511 | retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; | |
512 | break; | |
513 | default: | |
514 | BUG(); | |
515 | break; | |
516 | } | |
517 | ||
518 | retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; | |
519 | return retbits; | |
520 | } | |
521 | ||
522 | /* | |
523 | * Autoselect a master bus to use for the transfer | |
524 | * this prefers the destination bus if both available | |
525 | * if fixed address on one bus the other will be chosen | |
526 | */ | |
3e2a037c | 527 | static void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus, |
e8689e63 LW |
528 | struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus, |
529 | struct pl08x_bus_data **sbus, u32 cctl) | |
530 | { | |
531 | if (!(cctl & PL080_CONTROL_DST_INCR)) { | |
532 | *mbus = src_bus; | |
533 | *sbus = dst_bus; | |
534 | } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { | |
535 | *mbus = dst_bus; | |
536 | *sbus = src_bus; | |
537 | } else { | |
538 | if (dst_bus->buswidth == 4) { | |
539 | *mbus = dst_bus; | |
540 | *sbus = src_bus; | |
541 | } else if (src_bus->buswidth == 4) { | |
542 | *mbus = src_bus; | |
543 | *sbus = dst_bus; | |
544 | } else if (dst_bus->buswidth == 2) { | |
545 | *mbus = dst_bus; | |
546 | *sbus = src_bus; | |
547 | } else if (src_bus->buswidth == 2) { | |
548 | *mbus = src_bus; | |
549 | *sbus = dst_bus; | |
550 | } else { | |
551 | /* src_bus->buswidth == 1 */ | |
552 | *mbus = dst_bus; | |
553 | *sbus = src_bus; | |
554 | } | |
555 | } | |
556 | } | |
557 | ||
558 | /* | |
559 | * Fills in one LLI for a certain transfer descriptor | |
560 | * and advance the counter | |
561 | */ | |
3e2a037c | 562 | static int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, |
e8689e63 LW |
563 | struct pl08x_txd *txd, int num_llis, int len, |
564 | u32 cctl, u32 *remainder) | |
565 | { | |
566 | struct lli *llis_va = txd->llis_va; | |
567 | struct lli *llis_bus = (struct lli *) txd->llis_bus; | |
568 | ||
569 | BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); | |
570 | ||
571 | llis_va[num_llis].cctl = cctl; | |
572 | llis_va[num_llis].src = txd->srcbus.addr; | |
573 | llis_va[num_llis].dst = txd->dstbus.addr; | |
574 | ||
575 | /* | |
576 | * On versions with dual masters, you can optionally AND on | |
577 | * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read | |
578 | * in new LLIs with that controller, but we always try to | |
579 | * choose AHB1 to point into memory. The idea is to have AHB2 | |
580 | * fixed on the peripheral and AHB1 messing around in the | |
581 | * memory. So we don't manipulate this bit currently. | |
582 | */ | |
583 | ||
584 | llis_va[num_llis].next = | |
585 | (dma_addr_t)((u32) &(llis_bus[num_llis + 1])); | |
586 | ||
587 | if (cctl & PL080_CONTROL_SRC_INCR) | |
588 | txd->srcbus.addr += len; | |
589 | if (cctl & PL080_CONTROL_DST_INCR) | |
590 | txd->dstbus.addr += len; | |
591 | ||
592 | *remainder -= len; | |
593 | ||
594 | return num_llis + 1; | |
595 | } | |
596 | ||
597 | /* | |
598 | * Return number of bytes to fill to boundary, or len | |
599 | */ | |
600 | static inline u32 pl08x_pre_boundary(u32 addr, u32 len) | |
601 | { | |
602 | u32 boundary; | |
603 | ||
604 | boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1) | |
605 | << PL08X_BOUNDARY_SHIFT; | |
606 | ||
607 | if (boundary < addr + len) | |
608 | return boundary - addr; | |
609 | else | |
610 | return len; | |
611 | } | |
612 | ||
613 | /* | |
614 | * This fills in the table of LLIs for the transfer descriptor | |
615 | * Note that we assume we never have to change the burst sizes | |
616 | * Return 0 for error | |
617 | */ | |
618 | static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |
619 | struct pl08x_txd *txd) | |
620 | { | |
621 | struct pl08x_channel_data *cd = txd->cd; | |
622 | struct pl08x_bus_data *mbus, *sbus; | |
623 | u32 remainder; | |
624 | int num_llis = 0; | |
625 | u32 cctl; | |
626 | int max_bytes_per_lli; | |
627 | int total_bytes = 0; | |
628 | struct lli *llis_va; | |
629 | struct lli *llis_bus; | |
630 | ||
631 | if (!txd) { | |
632 | dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__); | |
633 | return 0; | |
634 | } | |
635 | ||
636 | txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, | |
637 | &txd->llis_bus); | |
638 | if (!txd->llis_va) { | |
639 | dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); | |
640 | return 0; | |
641 | } | |
642 | ||
643 | pl08x->pool_ctr++; | |
644 | ||
645 | /* | |
646 | * Initialize bus values for this transfer | |
647 | * from the passed optimal values | |
648 | */ | |
649 | if (!cd) { | |
650 | dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__); | |
651 | return 0; | |
652 | } | |
653 | ||
654 | /* Get the default CCTL from the platform data */ | |
655 | cctl = cd->cctl; | |
656 | ||
657 | /* | |
658 | * On the PL080 we have two bus masters and we | |
659 | * should select one for source and one for | |
660 | * destination. We try to use AHB2 for the | |
661 | * bus which does not increment (typically the | |
662 | * peripheral) else we just choose something. | |
663 | */ | |
664 | cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); | |
665 | if (pl08x->vd->dualmaster) { | |
666 | if (cctl & PL080_CONTROL_SRC_INCR) | |
667 | /* Source increments, use AHB2 for destination */ | |
668 | cctl |= PL080_CONTROL_DST_AHB2; | |
669 | else if (cctl & PL080_CONTROL_DST_INCR) | |
670 | /* Destination increments, use AHB2 for source */ | |
671 | cctl |= PL080_CONTROL_SRC_AHB2; | |
672 | else | |
673 | /* Just pick something, source AHB1 dest AHB2 */ | |
674 | cctl |= PL080_CONTROL_DST_AHB2; | |
675 | } | |
676 | ||
677 | /* Find maximum width of the source bus */ | |
678 | txd->srcbus.maxwidth = | |
679 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> | |
680 | PL080_CONTROL_SWIDTH_SHIFT); | |
681 | ||
682 | /* Find maximum width of the destination bus */ | |
683 | txd->dstbus.maxwidth = | |
684 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> | |
685 | PL080_CONTROL_DWIDTH_SHIFT); | |
686 | ||
687 | /* Set up the bus widths to the maximum */ | |
688 | txd->srcbus.buswidth = txd->srcbus.maxwidth; | |
689 | txd->dstbus.buswidth = txd->dstbus.maxwidth; | |
690 | dev_vdbg(&pl08x->adev->dev, | |
691 | "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", | |
692 | __func__, txd->srcbus.buswidth, txd->dstbus.buswidth); | |
693 | ||
694 | ||
695 | /* | |
696 | * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) | |
697 | */ | |
698 | max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) * | |
699 | PL080_CONTROL_TRANSFER_SIZE_MASK; | |
700 | dev_vdbg(&pl08x->adev->dev, | |
701 | "%s max bytes per lli = %d\n", | |
702 | __func__, max_bytes_per_lli); | |
703 | ||
704 | /* We need to count this down to zero */ | |
705 | remainder = txd->len; | |
706 | dev_vdbg(&pl08x->adev->dev, | |
707 | "%s remainder = %d\n", | |
708 | __func__, remainder); | |
709 | ||
710 | /* | |
711 | * Choose bus to align to | |
712 | * - prefers destination bus if both available | |
713 | * - if fixed address on one bus chooses other | |
e8b5e11d | 714 | * - modifies cctl to choose an appropriate master |
e8689e63 LW |
715 | */ |
716 | pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus, | |
717 | &mbus, &sbus, cctl); | |
718 | ||
719 | ||
720 | /* | |
721 | * The lowest bit of the LLI register | |
722 | * is also used to indicate which master to | |
723 | * use for reading the LLIs. | |
724 | */ | |
725 | ||
726 | if (txd->len < mbus->buswidth) { | |
727 | /* | |
728 | * Less than a bus width available | |
729 | * - send as single bytes | |
730 | */ | |
731 | while (remainder) { | |
732 | dev_vdbg(&pl08x->adev->dev, | |
733 | "%s single byte LLIs for a transfer of " | |
734 | "less than a bus width (remain %08x)\n", | |
735 | __func__, remainder); | |
736 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | |
737 | num_llis = | |
738 | pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1, | |
739 | cctl, &remainder); | |
740 | total_bytes++; | |
741 | } | |
742 | } else { | |
743 | /* | |
744 | * Make one byte LLIs until master bus is aligned | |
745 | * - slave will then be aligned also | |
746 | */ | |
747 | while ((mbus->addr) % (mbus->buswidth)) { | |
748 | dev_vdbg(&pl08x->adev->dev, | |
749 | "%s adjustment lli for less than bus width " | |
750 | "(remain %08x)\n", | |
751 | __func__, remainder); | |
752 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | |
753 | num_llis = pl08x_fill_lli_for_desc | |
754 | (pl08x, txd, num_llis, 1, cctl, &remainder); | |
755 | total_bytes++; | |
756 | } | |
757 | ||
758 | /* | |
759 | * Master now aligned | |
760 | * - if slave is not then we must set its width down | |
761 | */ | |
762 | if (sbus->addr % sbus->buswidth) { | |
763 | dev_dbg(&pl08x->adev->dev, | |
764 | "%s set down bus width to one byte\n", | |
765 | __func__); | |
766 | ||
767 | sbus->buswidth = 1; | |
768 | } | |
769 | ||
770 | /* | |
771 | * Make largest possible LLIs until less than one bus | |
772 | * width left | |
773 | */ | |
774 | while (remainder > (mbus->buswidth - 1)) { | |
775 | int lli_len, target_len; | |
776 | int tsize; | |
777 | int odd_bytes; | |
778 | ||
779 | /* | |
780 | * If enough left try to send max possible, | |
781 | * otherwise try to send the remainder | |
782 | */ | |
783 | target_len = remainder; | |
784 | if (remainder > max_bytes_per_lli) | |
785 | target_len = max_bytes_per_lli; | |
786 | ||
787 | /* | |
e8b5e11d | 788 | * Set bus lengths for incrementing buses |
e8689e63 LW |
789 | * to number of bytes which fill to next memory |
790 | * boundary | |
791 | */ | |
792 | if (cctl & PL080_CONTROL_SRC_INCR) | |
793 | txd->srcbus.fill_bytes = | |
794 | pl08x_pre_boundary( | |
795 | txd->srcbus.addr, | |
796 | remainder); | |
797 | else | |
798 | txd->srcbus.fill_bytes = | |
799 | max_bytes_per_lli; | |
800 | ||
801 | if (cctl & PL080_CONTROL_DST_INCR) | |
802 | txd->dstbus.fill_bytes = | |
803 | pl08x_pre_boundary( | |
804 | txd->dstbus.addr, | |
805 | remainder); | |
806 | else | |
807 | txd->dstbus.fill_bytes = | |
808 | max_bytes_per_lli; | |
809 | ||
810 | /* | |
811 | * Find the nearest | |
812 | */ | |
813 | lli_len = min(txd->srcbus.fill_bytes, | |
814 | txd->dstbus.fill_bytes); | |
815 | ||
816 | BUG_ON(lli_len > remainder); | |
817 | ||
818 | if (lli_len <= 0) { | |
819 | dev_err(&pl08x->adev->dev, | |
820 | "%s lli_len is %d, <= 0\n", | |
821 | __func__, lli_len); | |
822 | return 0; | |
823 | } | |
824 | ||
825 | if (lli_len == target_len) { | |
826 | /* | |
827 | * Can send what we wanted | |
828 | */ | |
829 | /* | |
830 | * Maintain alignment | |
831 | */ | |
832 | lli_len = (lli_len/mbus->buswidth) * | |
833 | mbus->buswidth; | |
834 | odd_bytes = 0; | |
835 | } else { | |
836 | /* | |
837 | * So now we know how many bytes to transfer | |
838 | * to get to the nearest boundary | |
e8b5e11d | 839 | * The next LLI will past the boundary |
e8689e63 LW |
840 | * - however we may be working to a boundary |
841 | * on the slave bus | |
842 | * We need to ensure the master stays aligned | |
843 | */ | |
844 | odd_bytes = lli_len % mbus->buswidth; | |
845 | /* | |
846 | * - and that we are working in multiples | |
847 | * of the bus widths | |
848 | */ | |
849 | lli_len -= odd_bytes; | |
850 | ||
851 | } | |
852 | ||
853 | if (lli_len) { | |
854 | /* | |
855 | * Check against minimum bus alignment: | |
856 | * Calculate actual transfer size in relation | |
857 | * to bus width an get a maximum remainder of | |
858 | * the smallest bus width - 1 | |
859 | */ | |
860 | /* FIXME: use round_down()? */ | |
861 | tsize = lli_len / min(mbus->buswidth, | |
862 | sbus->buswidth); | |
863 | lli_len = tsize * min(mbus->buswidth, | |
864 | sbus->buswidth); | |
865 | ||
866 | if (target_len != lli_len) { | |
867 | dev_vdbg(&pl08x->adev->dev, | |
868 | "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n", | |
869 | __func__, target_len, lli_len, txd->len); | |
870 | } | |
871 | ||
872 | cctl = pl08x_cctl_bits(cctl, | |
873 | txd->srcbus.buswidth, | |
874 | txd->dstbus.buswidth, | |
875 | tsize); | |
876 | ||
877 | dev_vdbg(&pl08x->adev->dev, | |
878 | "%s fill lli with single lli chunk of size %08x (remainder %08x)\n", | |
879 | __func__, lli_len, remainder); | |
880 | num_llis = pl08x_fill_lli_for_desc(pl08x, txd, | |
881 | num_llis, lli_len, cctl, | |
882 | &remainder); | |
883 | total_bytes += lli_len; | |
884 | } | |
885 | ||
886 | ||
887 | if (odd_bytes) { | |
888 | /* | |
889 | * Creep past the boundary, | |
890 | * maintaining master alignment | |
891 | */ | |
892 | int j; | |
893 | for (j = 0; (j < mbus->buswidth) | |
894 | && (remainder); j++) { | |
895 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | |
896 | dev_vdbg(&pl08x->adev->dev, | |
e8b5e11d | 897 | "%s align with boundary, single byte (remain %08x)\n", |
e8689e63 LW |
898 | __func__, remainder); |
899 | num_llis = | |
900 | pl08x_fill_lli_for_desc(pl08x, | |
901 | txd, num_llis, 1, | |
902 | cctl, &remainder); | |
903 | total_bytes++; | |
904 | } | |
905 | } | |
906 | } | |
907 | ||
908 | /* | |
909 | * Send any odd bytes | |
910 | */ | |
911 | if (remainder < 0) { | |
912 | dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n", | |
913 | __func__, remainder); | |
914 | return 0; | |
915 | } | |
916 | ||
917 | while (remainder) { | |
918 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | |
919 | dev_vdbg(&pl08x->adev->dev, | |
e8b5e11d | 920 | "%s align with boundary, single odd byte (remain %d)\n", |
e8689e63 LW |
921 | __func__, remainder); |
922 | num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis, | |
923 | 1, cctl, &remainder); | |
924 | total_bytes++; | |
925 | } | |
926 | } | |
927 | if (total_bytes != txd->len) { | |
928 | dev_err(&pl08x->adev->dev, | |
929 | "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n", | |
930 | __func__, total_bytes, txd->len); | |
931 | return 0; | |
932 | } | |
933 | ||
934 | if (num_llis >= MAX_NUM_TSFR_LLIS) { | |
935 | dev_err(&pl08x->adev->dev, | |
936 | "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", | |
937 | __func__, (u32) MAX_NUM_TSFR_LLIS); | |
938 | return 0; | |
939 | } | |
940 | /* | |
941 | * Decide whether this is a loop or a terminated transfer | |
942 | */ | |
943 | llis_va = txd->llis_va; | |
944 | llis_bus = (struct lli *) txd->llis_bus; | |
945 | ||
946 | if (cd->circular_buffer) { | |
947 | /* | |
948 | * Loop the circular buffer so that the next element | |
949 | * points back to the beginning of the LLI. | |
950 | */ | |
951 | llis_va[num_llis - 1].next = | |
952 | (dma_addr_t)((unsigned int)&(llis_bus[0])); | |
953 | } else { | |
954 | /* | |
955 | * On non-circular buffers, the final LLI terminates | |
956 | * the LLI. | |
957 | */ | |
958 | llis_va[num_llis - 1].next = 0; | |
959 | /* | |
960 | * The final LLI element shall also fire an interrupt | |
961 | */ | |
962 | llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; | |
963 | } | |
964 | ||
965 | /* Now store the channel register values */ | |
966 | txd->csrc = llis_va[0].src; | |
967 | txd->cdst = llis_va[0].dst; | |
968 | if (num_llis > 1) | |
969 | txd->clli = llis_va[0].next; | |
970 | else | |
971 | txd->clli = 0; | |
972 | ||
973 | txd->cctl = llis_va[0].cctl; | |
974 | /* ccfg will be set at physical channel allocation time */ | |
975 | ||
976 | #ifdef VERBOSE_DEBUG | |
977 | { | |
978 | int i; | |
979 | ||
980 | for (i = 0; i < num_llis; i++) { | |
981 | dev_vdbg(&pl08x->adev->dev, | |
982 | "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n", | |
983 | i, | |
984 | &llis_va[i], | |
985 | llis_va[i].src, | |
986 | llis_va[i].dst, | |
987 | llis_va[i].cctl, | |
988 | llis_va[i].next | |
989 | ); | |
990 | } | |
991 | } | |
992 | #endif | |
993 | ||
994 | return num_llis; | |
995 | } | |
996 | ||
997 | /* You should call this with the struct pl08x lock held */ | |
998 | static void pl08x_free_txd(struct pl08x_driver_data *pl08x, | |
999 | struct pl08x_txd *txd) | |
1000 | { | |
1001 | if (!txd) | |
1002 | dev_err(&pl08x->adev->dev, | |
1003 | "%s no descriptor to free\n", | |
1004 | __func__); | |
1005 | ||
1006 | /* Free the LLI */ | |
1007 | dma_pool_free(pl08x->pool, txd->llis_va, | |
1008 | txd->llis_bus); | |
1009 | ||
1010 | pl08x->pool_ctr--; | |
1011 | ||
1012 | kfree(txd); | |
1013 | } | |
1014 | ||
1015 | static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, | |
1016 | struct pl08x_dma_chan *plchan) | |
1017 | { | |
1018 | struct pl08x_txd *txdi = NULL; | |
1019 | struct pl08x_txd *next; | |
1020 | ||
1021 | if (!list_empty(&plchan->desc_list)) { | |
1022 | list_for_each_entry_safe(txdi, | |
1023 | next, &plchan->desc_list, node) { | |
1024 | list_del(&txdi->node); | |
1025 | pl08x_free_txd(pl08x, txdi); | |
1026 | } | |
1027 | ||
1028 | } | |
1029 | } | |
1030 | ||
1031 | /* | |
1032 | * The DMA ENGINE API | |
1033 | */ | |
1034 | static int pl08x_alloc_chan_resources(struct dma_chan *chan) | |
1035 | { | |
1036 | return 0; | |
1037 | } | |
1038 | ||
1039 | static void pl08x_free_chan_resources(struct dma_chan *chan) | |
1040 | { | |
1041 | } | |
1042 | ||
1043 | /* | |
1044 | * This should be called with the channel plchan->lock held | |
1045 | */ | |
1046 | static int prep_phy_channel(struct pl08x_dma_chan *plchan, | |
1047 | struct pl08x_txd *txd) | |
1048 | { | |
1049 | struct pl08x_driver_data *pl08x = plchan->host; | |
1050 | struct pl08x_phy_chan *ch; | |
1051 | int ret; | |
1052 | ||
1053 | /* Check if we already have a channel */ | |
1054 | if (plchan->phychan) | |
1055 | return 0; | |
1056 | ||
1057 | ch = pl08x_get_phy_channel(pl08x, plchan); | |
1058 | if (!ch) { | |
1059 | /* No physical channel available, cope with it */ | |
1060 | dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); | |
1061 | return -EBUSY; | |
1062 | } | |
1063 | ||
1064 | /* | |
1065 | * OK we have a physical channel: for memcpy() this is all we | |
1066 | * need, but for slaves the physical signals may be muxed! | |
1067 | * Can the platform allow us to use this channel? | |
1068 | */ | |
1069 | if (plchan->slave && | |
1070 | ch->signal < 0 && | |
1071 | pl08x->pd->get_signal) { | |
1072 | ret = pl08x->pd->get_signal(plchan); | |
1073 | if (ret < 0) { | |
1074 | dev_dbg(&pl08x->adev->dev, | |
1075 | "unable to use physical channel %d for transfer on %s due to platform restrictions\n", | |
1076 | ch->id, plchan->name); | |
1077 | /* Release physical channel & return */ | |
1078 | pl08x_put_phy_channel(pl08x, ch); | |
1079 | return -EBUSY; | |
1080 | } | |
1081 | ch->signal = ret; | |
1082 | } | |
1083 | ||
1084 | dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", | |
1085 | ch->id, | |
1086 | ch->signal, | |
1087 | plchan->name); | |
1088 | ||
1089 | plchan->phychan = ch; | |
1090 | ||
1091 | return 0; | |
1092 | } | |
1093 | ||
1094 | static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) | |
1095 | { | |
1096 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); | |
1097 | ||
91aa5fad RKAL |
1098 | plchan->chan.cookie += 1; |
1099 | if (plchan->chan.cookie < 0) | |
1100 | plchan->chan.cookie = 1; | |
1101 | tx->cookie = plchan->chan.cookie; | |
e8689e63 LW |
1102 | /* This unlock follows the lock in the prep() function */ |
1103 | spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); | |
1104 | ||
1105 | return tx->cookie; | |
1106 | } | |
1107 | ||
1108 | static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( | |
1109 | struct dma_chan *chan, unsigned long flags) | |
1110 | { | |
1111 | struct dma_async_tx_descriptor *retval = NULL; | |
1112 | ||
1113 | return retval; | |
1114 | } | |
1115 | ||
1116 | /* | |
1117 | * Code accessing dma_async_is_complete() in a tight loop | |
1118 | * may give problems - could schedule where indicated. | |
1119 | * If slaves are relying on interrupts to signal completion this | |
1120 | * function must not be called with interrupts disabled | |
1121 | */ | |
1122 | static enum dma_status | |
1123 | pl08x_dma_tx_status(struct dma_chan *chan, | |
1124 | dma_cookie_t cookie, | |
1125 | struct dma_tx_state *txstate) | |
1126 | { | |
1127 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | |
1128 | dma_cookie_t last_used; | |
1129 | dma_cookie_t last_complete; | |
1130 | enum dma_status ret; | |
1131 | u32 bytesleft = 0; | |
1132 | ||
91aa5fad | 1133 | last_used = plchan->chan.cookie; |
e8689e63 LW |
1134 | last_complete = plchan->lc; |
1135 | ||
1136 | ret = dma_async_is_complete(cookie, last_complete, last_used); | |
1137 | if (ret == DMA_SUCCESS) { | |
1138 | dma_set_tx_state(txstate, last_complete, last_used, 0); | |
1139 | return ret; | |
1140 | } | |
1141 | ||
1142 | /* | |
1143 | * schedule(); could be inserted here | |
1144 | */ | |
1145 | ||
1146 | /* | |
1147 | * This cookie not complete yet | |
1148 | */ | |
91aa5fad | 1149 | last_used = plchan->chan.cookie; |
e8689e63 LW |
1150 | last_complete = plchan->lc; |
1151 | ||
1152 | /* Get number of bytes left in the active transactions and queue */ | |
1153 | bytesleft = pl08x_getbytes_chan(plchan); | |
1154 | ||
1155 | dma_set_tx_state(txstate, last_complete, last_used, | |
1156 | bytesleft); | |
1157 | ||
1158 | if (plchan->state == PL08X_CHAN_PAUSED) | |
1159 | return DMA_PAUSED; | |
1160 | ||
1161 | /* Whether waiting or running, we're in progress */ | |
1162 | return DMA_IN_PROGRESS; | |
1163 | } | |
1164 | ||
1165 | /* PrimeCell DMA extension */ | |
1166 | struct burst_table { | |
1167 | int burstwords; | |
1168 | u32 reg; | |
1169 | }; | |
1170 | ||
1171 | static const struct burst_table burst_sizes[] = { | |
1172 | { | |
1173 | .burstwords = 256, | |
1174 | .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) | | |
1175 | (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT), | |
1176 | }, | |
1177 | { | |
1178 | .burstwords = 128, | |
1179 | .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) | | |
1180 | (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT), | |
1181 | }, | |
1182 | { | |
1183 | .burstwords = 64, | |
1184 | .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) | | |
1185 | (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT), | |
1186 | }, | |
1187 | { | |
1188 | .burstwords = 32, | |
1189 | .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) | | |
1190 | (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT), | |
1191 | }, | |
1192 | { | |
1193 | .burstwords = 16, | |
1194 | .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) | | |
1195 | (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT), | |
1196 | }, | |
1197 | { | |
1198 | .burstwords = 8, | |
1199 | .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) | | |
1200 | (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT), | |
1201 | }, | |
1202 | { | |
1203 | .burstwords = 4, | |
1204 | .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) | | |
1205 | (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT), | |
1206 | }, | |
1207 | { | |
1208 | .burstwords = 1, | |
1209 | .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | | |
1210 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT), | |
1211 | }, | |
1212 | }; | |
1213 | ||
1214 | static void dma_set_runtime_config(struct dma_chan *chan, | |
1215 | struct dma_slave_config *config) | |
1216 | { | |
1217 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | |
1218 | struct pl08x_driver_data *pl08x = plchan->host; | |
1219 | struct pl08x_channel_data *cd = plchan->cd; | |
1220 | enum dma_slave_buswidth addr_width; | |
1221 | u32 maxburst; | |
1222 | u32 cctl = 0; | |
1223 | /* Mask out all except src and dst channel */ | |
1224 | u32 ccfg = cd->ccfg & 0x000003DEU; | |
4440aacf | 1225 | int i; |
e8689e63 LW |
1226 | |
1227 | /* Transfer direction */ | |
1228 | plchan->runtime_direction = config->direction; | |
1229 | if (config->direction == DMA_TO_DEVICE) { | |
1230 | plchan->runtime_addr = config->dst_addr; | |
1231 | cctl |= PL080_CONTROL_SRC_INCR; | |
1232 | ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; | |
1233 | addr_width = config->dst_addr_width; | |
1234 | maxburst = config->dst_maxburst; | |
1235 | } else if (config->direction == DMA_FROM_DEVICE) { | |
1236 | plchan->runtime_addr = config->src_addr; | |
1237 | cctl |= PL080_CONTROL_DST_INCR; | |
1238 | ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; | |
1239 | addr_width = config->src_addr_width; | |
1240 | maxburst = config->src_maxburst; | |
1241 | } else { | |
1242 | dev_err(&pl08x->adev->dev, | |
1243 | "bad runtime_config: alien transfer direction\n"); | |
1244 | return; | |
1245 | } | |
1246 | ||
1247 | switch (addr_width) { | |
1248 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
1249 | cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) | | |
1250 | (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT); | |
1251 | break; | |
1252 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
1253 | cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) | | |
1254 | (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT); | |
1255 | break; | |
1256 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
1257 | cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) | | |
1258 | (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT); | |
1259 | break; | |
1260 | default: | |
1261 | dev_err(&pl08x->adev->dev, | |
1262 | "bad runtime_config: alien address width\n"); | |
1263 | return; | |
1264 | } | |
1265 | ||
1266 | /* | |
1267 | * Now decide on a maxburst: | |
4440aacf RKAL |
1268 | * If this channel will only request single transfers, set this |
1269 | * down to ONE element. Also select one element if no maxburst | |
1270 | * is specified. | |
e8689e63 | 1271 | */ |
4440aacf | 1272 | if (plchan->cd->single || maxburst == 0) { |
e8689e63 LW |
1273 | cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | |
1274 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); | |
1275 | } else { | |
4440aacf | 1276 | for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) |
e8689e63 LW |
1277 | if (burst_sizes[i].burstwords <= maxburst) |
1278 | break; | |
e8689e63 LW |
1279 | cctl |= burst_sizes[i].reg; |
1280 | } | |
1281 | ||
1282 | /* Access the cell in privileged mode, non-bufferable, non-cacheable */ | |
1283 | cctl &= ~PL080_CONTROL_PROT_MASK; | |
1284 | cctl |= PL080_CONTROL_PROT_SYS; | |
1285 | ||
1286 | /* Modify the default channel data to fit PrimeCell request */ | |
1287 | cd->cctl = cctl; | |
1288 | cd->ccfg = ccfg; | |
1289 | ||
1290 | dev_dbg(&pl08x->adev->dev, | |
1291 | "configured channel %s (%s) for %s, data width %d, " | |
1292 | "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n", | |
1293 | dma_chan_name(chan), plchan->name, | |
1294 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", | |
1295 | addr_width, | |
1296 | maxburst, | |
1297 | cctl, ccfg); | |
1298 | } | |
1299 | ||
1300 | /* | |
1301 | * Slave transactions callback to the slave device to allow | |
1302 | * synchronization of slave DMA signals with the DMAC enable | |
1303 | */ | |
1304 | static void pl08x_issue_pending(struct dma_chan *chan) | |
1305 | { | |
1306 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | |
1307 | struct pl08x_driver_data *pl08x = plchan->host; | |
1308 | unsigned long flags; | |
1309 | ||
1310 | spin_lock_irqsave(&plchan->lock, flags); | |
9c0bb43b RKAL |
1311 | /* Something is already active, or we're waiting for a channel... */ |
1312 | if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { | |
1313 | spin_unlock_irqrestore(&plchan->lock, flags); | |
e8689e63 | 1314 | return; |
9c0bb43b | 1315 | } |
e8689e63 LW |
1316 | |
1317 | /* Take the first element in the queue and execute it */ | |
1318 | if (!list_empty(&plchan->desc_list)) { | |
1319 | struct pl08x_txd *next; | |
1320 | ||
1321 | next = list_first_entry(&plchan->desc_list, | |
1322 | struct pl08x_txd, | |
1323 | node); | |
1324 | list_del(&next->node); | |
1325 | plchan->at = next; | |
1326 | plchan->state = PL08X_CHAN_RUNNING; | |
1327 | ||
1328 | /* Configure the physical channel for the active txd */ | |
1329 | pl08x_config_phychan_for_txd(plchan); | |
1330 | pl08x_set_cregs(pl08x, plchan->phychan); | |
1331 | pl08x_enable_phy_chan(pl08x, plchan->phychan); | |
1332 | } | |
1333 | ||
1334 | spin_unlock_irqrestore(&plchan->lock, flags); | |
1335 | } | |
1336 | ||
1337 | static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | |
1338 | struct pl08x_txd *txd) | |
1339 | { | |
1340 | int num_llis; | |
1341 | struct pl08x_driver_data *pl08x = plchan->host; | |
1342 | int ret; | |
1343 | ||
1344 | num_llis = pl08x_fill_llis_for_desc(pl08x, txd); | |
dafa7317 RKAL |
1345 | if (!num_llis) { |
1346 | kfree(txd); | |
e8689e63 | 1347 | return -EINVAL; |
dafa7317 | 1348 | } |
e8689e63 LW |
1349 | |
1350 | spin_lock_irqsave(&plchan->lock, plchan->lockflags); | |
1351 | ||
1352 | /* | |
1353 | * If this device is not using a circular buffer then | |
1354 | * queue this new descriptor for transfer. | |
1355 | * The descriptor for a circular buffer continues | |
1356 | * to be used until the channel is freed. | |
1357 | */ | |
1358 | if (txd->cd->circular_buffer) | |
1359 | dev_err(&pl08x->adev->dev, | |
1360 | "%s attempting to queue a circular buffer\n", | |
1361 | __func__); | |
1362 | else | |
1363 | list_add_tail(&txd->node, | |
1364 | &plchan->desc_list); | |
1365 | ||
1366 | /* | |
1367 | * See if we already have a physical channel allocated, | |
1368 | * else this is the time to try to get one. | |
1369 | */ | |
1370 | ret = prep_phy_channel(plchan, txd); | |
1371 | if (ret) { | |
1372 | /* | |
1373 | * No physical channel available, we will | |
1374 | * stack up the memcpy channels until there is a channel | |
1375 | * available to handle it whereas slave transfers may | |
1376 | * have been denied due to platform channel muxing restrictions | |
1377 | * and since there is no guarantee that this will ever be | |
e8b5e11d RKAL |
1378 | * resolved, and since the signal must be acquired AFTER |
1379 | * acquiring the physical channel, we will let them be NACK:ed | |
e8689e63 LW |
1380 | * with -EBUSY here. The drivers can alway retry the prep() |
1381 | * call if they are eager on doing this using DMA. | |
1382 | */ | |
1383 | if (plchan->slave) { | |
1384 | pl08x_free_txd_list(pl08x, plchan); | |
1385 | spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); | |
1386 | return -EBUSY; | |
1387 | } | |
1388 | /* Do this memcpy whenever there is a channel ready */ | |
1389 | plchan->state = PL08X_CHAN_WAITING; | |
1390 | plchan->waiting = txd; | |
1391 | } else | |
1392 | /* | |
1393 | * Else we're all set, paused and ready to roll, | |
1394 | * status will switch to PL08X_CHAN_RUNNING when | |
1395 | * we call issue_pending(). If there is something | |
1396 | * running on the channel already we don't change | |
1397 | * its state. | |
1398 | */ | |
1399 | if (plchan->state == PL08X_CHAN_IDLE) | |
1400 | plchan->state = PL08X_CHAN_PAUSED; | |
1401 | ||
1402 | /* | |
1403 | * Notice that we leave plchan->lock locked on purpose: | |
1404 | * it will be unlocked in the subsequent tx_submit() | |
1405 | * call. This is a consequence of the current API. | |
1406 | */ | |
1407 | ||
1408 | return 0; | |
1409 | } | |
1410 | ||
1411 | /* | |
1412 | * Initialize a descriptor to be used by memcpy submit | |
1413 | */ | |
1414 | static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |
1415 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
1416 | size_t len, unsigned long flags) | |
1417 | { | |
1418 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | |
1419 | struct pl08x_driver_data *pl08x = plchan->host; | |
1420 | struct pl08x_txd *txd; | |
1421 | int ret; | |
1422 | ||
1423 | txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); | |
1424 | if (!txd) { | |
1425 | dev_err(&pl08x->adev->dev, | |
1426 | "%s no memory for descriptor\n", __func__); | |
1427 | return NULL; | |
1428 | } | |
1429 | ||
1430 | dma_async_tx_descriptor_init(&txd->tx, chan); | |
1431 | txd->direction = DMA_NONE; | |
1432 | txd->srcbus.addr = src; | |
1433 | txd->dstbus.addr = dest; | |
1434 | ||
1435 | /* Set platform data for m2m */ | |
1436 | txd->cd = &pl08x->pd->memcpy_channel; | |
1437 | /* Both to be incremented or the code will break */ | |
1438 | txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; | |
1439 | txd->tx.tx_submit = pl08x_tx_submit; | |
1440 | txd->tx.callback = NULL; | |
1441 | txd->tx.callback_param = NULL; | |
1442 | txd->len = len; | |
1443 | ||
1444 | INIT_LIST_HEAD(&txd->node); | |
1445 | ret = pl08x_prep_channel_resources(plchan, txd); | |
1446 | if (ret) | |
1447 | return NULL; | |
1448 | /* | |
1449 | * NB: the channel lock is held at this point so tx_submit() | |
1450 | * must be called in direct succession. | |
1451 | */ | |
1452 | ||
1453 | return &txd->tx; | |
1454 | } | |
1455 | ||
3e2a037c | 1456 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( |
e8689e63 LW |
1457 | struct dma_chan *chan, struct scatterlist *sgl, |
1458 | unsigned int sg_len, enum dma_data_direction direction, | |
1459 | unsigned long flags) | |
1460 | { | |
1461 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | |
1462 | struct pl08x_driver_data *pl08x = plchan->host; | |
1463 | struct pl08x_txd *txd; | |
1464 | int ret; | |
1465 | ||
1466 | /* | |
1467 | * Current implementation ASSUMES only one sg | |
1468 | */ | |
1469 | if (sg_len != 1) { | |
1470 | dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n", | |
1471 | __func__); | |
1472 | BUG(); | |
1473 | } | |
1474 | ||
1475 | dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", | |
1476 | __func__, sgl->length, plchan->name); | |
1477 | ||
1478 | txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); | |
1479 | if (!txd) { | |
1480 | dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); | |
1481 | return NULL; | |
1482 | } | |
1483 | ||
1484 | dma_async_tx_descriptor_init(&txd->tx, chan); | |
1485 | ||
1486 | if (direction != plchan->runtime_direction) | |
1487 | dev_err(&pl08x->adev->dev, "%s DMA setup does not match " | |
1488 | "the direction configured for the PrimeCell\n", | |
1489 | __func__); | |
1490 | ||
1491 | /* | |
1492 | * Set up addresses, the PrimeCell configured address | |
1493 | * will take precedence since this may configure the | |
1494 | * channel target address dynamically at runtime. | |
1495 | */ | |
1496 | txd->direction = direction; | |
1497 | if (direction == DMA_TO_DEVICE) { | |
1498 | txd->srcbus.addr = sgl->dma_address; | |
1499 | if (plchan->runtime_addr) | |
1500 | txd->dstbus.addr = plchan->runtime_addr; | |
1501 | else | |
1502 | txd->dstbus.addr = plchan->cd->addr; | |
1503 | } else if (direction == DMA_FROM_DEVICE) { | |
1504 | if (plchan->runtime_addr) | |
1505 | txd->srcbus.addr = plchan->runtime_addr; | |
1506 | else | |
1507 | txd->srcbus.addr = plchan->cd->addr; | |
1508 | txd->dstbus.addr = sgl->dma_address; | |
1509 | } else { | |
1510 | dev_err(&pl08x->adev->dev, | |
1511 | "%s direction unsupported\n", __func__); | |
1512 | return NULL; | |
1513 | } | |
1514 | txd->cd = plchan->cd; | |
1515 | txd->tx.tx_submit = pl08x_tx_submit; | |
1516 | txd->tx.callback = NULL; | |
1517 | txd->tx.callback_param = NULL; | |
1518 | txd->len = sgl->length; | |
1519 | INIT_LIST_HEAD(&txd->node); | |
1520 | ||
1521 | ret = pl08x_prep_channel_resources(plchan, txd); | |
1522 | if (ret) | |
1523 | return NULL; | |
1524 | /* | |
1525 | * NB: the channel lock is held at this point so tx_submit() | |
1526 | * must be called in direct succession. | |
1527 | */ | |
1528 | ||
1529 | return &txd->tx; | |
1530 | } | |
1531 | ||
1532 | static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
1533 | unsigned long arg) | |
1534 | { | |
1535 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | |
1536 | struct pl08x_driver_data *pl08x = plchan->host; | |
1537 | unsigned long flags; | |
1538 | int ret = 0; | |
1539 | ||
1540 | /* Controls applicable to inactive channels */ | |
1541 | if (cmd == DMA_SLAVE_CONFIG) { | |
1542 | dma_set_runtime_config(chan, | |
1543 | (struct dma_slave_config *) | |
1544 | arg); | |
1545 | return 0; | |
1546 | } | |
1547 | ||
1548 | /* | |
1549 | * Anything succeeds on channels with no physical allocation and | |
1550 | * no queued transfers. | |
1551 | */ | |
1552 | spin_lock_irqsave(&plchan->lock, flags); | |
1553 | if (!plchan->phychan && !plchan->at) { | |
1554 | spin_unlock_irqrestore(&plchan->lock, flags); | |
1555 | return 0; | |
1556 | } | |
1557 | ||
1558 | switch (cmd) { | |
1559 | case DMA_TERMINATE_ALL: | |
1560 | plchan->state = PL08X_CHAN_IDLE; | |
1561 | ||
1562 | if (plchan->phychan) { | |
1563 | pl08x_stop_phy_chan(plchan->phychan); | |
1564 | ||
1565 | /* | |
1566 | * Mark physical channel as free and free any slave | |
1567 | * signal | |
1568 | */ | |
1569 | if ((plchan->phychan->signal >= 0) && | |
1570 | pl08x->pd->put_signal) { | |
1571 | pl08x->pd->put_signal(plchan); | |
1572 | plchan->phychan->signal = -1; | |
1573 | } | |
1574 | pl08x_put_phy_channel(pl08x, plchan->phychan); | |
1575 | plchan->phychan = NULL; | |
1576 | } | |
e8689e63 LW |
1577 | /* Dequeue jobs and free LLIs */ |
1578 | if (plchan->at) { | |
1579 | pl08x_free_txd(pl08x, plchan->at); | |
1580 | plchan->at = NULL; | |
1581 | } | |
1582 | /* Dequeue jobs not yet fired as well */ | |
1583 | pl08x_free_txd_list(pl08x, plchan); | |
1584 | break; | |
1585 | case DMA_PAUSE: | |
1586 | pl08x_pause_phy_chan(plchan->phychan); | |
1587 | plchan->state = PL08X_CHAN_PAUSED; | |
1588 | break; | |
1589 | case DMA_RESUME: | |
1590 | pl08x_resume_phy_chan(plchan->phychan); | |
1591 | plchan->state = PL08X_CHAN_RUNNING; | |
1592 | break; | |
1593 | default: | |
1594 | /* Unknown command */ | |
1595 | ret = -ENXIO; | |
1596 | break; | |
1597 | } | |
1598 | ||
1599 | spin_unlock_irqrestore(&plchan->lock, flags); | |
1600 | ||
1601 | return ret; | |
1602 | } | |
1603 | ||
1604 | bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) | |
1605 | { | |
1606 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | |
1607 | char *name = chan_id; | |
1608 | ||
1609 | /* Check that the channel is not taken! */ | |
1610 | if (!strcmp(plchan->name, name)) | |
1611 | return true; | |
1612 | ||
1613 | return false; | |
1614 | } | |
1615 | ||
1616 | /* | |
1617 | * Just check that the device is there and active | |
1618 | * TODO: turn this bit on/off depending on the number of | |
1619 | * physical channels actually used, if it is zero... well | |
1620 | * shut it off. That will save some power. Cut the clock | |
1621 | * at the same time. | |
1622 | */ | |
1623 | static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) | |
1624 | { | |
1625 | u32 val; | |
1626 | ||
1627 | val = readl(pl08x->base + PL080_CONFIG); | |
1628 | val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); | |
e8b5e11d | 1629 | /* We implicitly clear bit 1 and that means little-endian mode */ |
e8689e63 LW |
1630 | val |= PL080_CONFIG_ENABLE; |
1631 | writel(val, pl08x->base + PL080_CONFIG); | |
1632 | } | |
1633 | ||
1634 | static void pl08x_tasklet(unsigned long data) | |
1635 | { | |
1636 | struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; | |
1637 | struct pl08x_phy_chan *phychan = plchan->phychan; | |
1638 | struct pl08x_driver_data *pl08x = plchan->host; | |
bf072af4 | 1639 | unsigned long flags; |
e8689e63 LW |
1640 | |
1641 | if (!plchan) | |
1642 | BUG(); | |
1643 | ||
bf072af4 | 1644 | spin_lock_irqsave(&plchan->lock, flags); |
e8689e63 LW |
1645 | |
1646 | if (plchan->at) { | |
1647 | dma_async_tx_callback callback = | |
1648 | plchan->at->tx.callback; | |
1649 | void *callback_param = | |
1650 | plchan->at->tx.callback_param; | |
1651 | ||
1652 | /* | |
1653 | * Update last completed | |
1654 | */ | |
91aa5fad | 1655 | plchan->lc = plchan->at->tx.cookie; |
e8689e63 LW |
1656 | |
1657 | /* | |
1658 | * Callback to signal completion | |
1659 | */ | |
1660 | if (callback) | |
1661 | callback(callback_param); | |
1662 | ||
1663 | /* | |
1664 | * Device callbacks should NOT clear | |
1665 | * the current transaction on the channel | |
1666 | * Linus: sometimes they should? | |
1667 | */ | |
1668 | if (!plchan->at) | |
1669 | BUG(); | |
1670 | ||
1671 | /* | |
1672 | * Free the descriptor if it's not for a device | |
1673 | * using a circular buffer | |
1674 | */ | |
1675 | if (!plchan->at->cd->circular_buffer) { | |
1676 | pl08x_free_txd(pl08x, plchan->at); | |
1677 | plchan->at = NULL; | |
1678 | } | |
1679 | /* | |
1680 | * else descriptor for circular | |
1681 | * buffers only freed when | |
1682 | * client has disabled dma | |
1683 | */ | |
1684 | } | |
1685 | /* | |
1686 | * If a new descriptor is queued, set it up | |
1687 | * plchan->at is NULL here | |
1688 | */ | |
1689 | if (!list_empty(&plchan->desc_list)) { | |
1690 | struct pl08x_txd *next; | |
1691 | ||
1692 | next = list_first_entry(&plchan->desc_list, | |
1693 | struct pl08x_txd, | |
1694 | node); | |
1695 | list_del(&next->node); | |
1696 | plchan->at = next; | |
1697 | /* Configure the physical channel for the next txd */ | |
1698 | pl08x_config_phychan_for_txd(plchan); | |
1699 | pl08x_set_cregs(pl08x, plchan->phychan); | |
1700 | pl08x_enable_phy_chan(pl08x, plchan->phychan); | |
1701 | } else { | |
1702 | struct pl08x_dma_chan *waiting = NULL; | |
1703 | ||
1704 | /* | |
1705 | * No more jobs, so free up the physical channel | |
1706 | * Free any allocated signal on slave transfers too | |
1707 | */ | |
1708 | if ((phychan->signal >= 0) && pl08x->pd->put_signal) { | |
1709 | pl08x->pd->put_signal(plchan); | |
1710 | phychan->signal = -1; | |
1711 | } | |
1712 | pl08x_put_phy_channel(pl08x, phychan); | |
1713 | plchan->phychan = NULL; | |
1714 | plchan->state = PL08X_CHAN_IDLE; | |
1715 | ||
1716 | /* | |
1717 | * And NOW before anyone else can grab that free:d | |
1718 | * up physical channel, see if there is some memcpy | |
1719 | * pending that seriously needs to start because of | |
1720 | * being stacked up while we were choking the | |
1721 | * physical channels with data. | |
1722 | */ | |
1723 | list_for_each_entry(waiting, &pl08x->memcpy.channels, | |
1724 | chan.device_node) { | |
1725 | if (waiting->state == PL08X_CHAN_WAITING && | |
1726 | waiting->waiting != NULL) { | |
1727 | int ret; | |
1728 | ||
1729 | /* This should REALLY not fail now */ | |
1730 | ret = prep_phy_channel(waiting, | |
1731 | waiting->waiting); | |
1732 | BUG_ON(ret); | |
1733 | waiting->state = PL08X_CHAN_RUNNING; | |
1734 | waiting->waiting = NULL; | |
1735 | pl08x_issue_pending(&waiting->chan); | |
1736 | break; | |
1737 | } | |
1738 | } | |
1739 | } | |
1740 | ||
bf072af4 | 1741 | spin_unlock_irqrestore(&plchan->lock, flags); |
e8689e63 LW |
1742 | } |
1743 | ||
1744 | static irqreturn_t pl08x_irq(int irq, void *dev) | |
1745 | { | |
1746 | struct pl08x_driver_data *pl08x = dev; | |
1747 | u32 mask = 0; | |
1748 | u32 val; | |
1749 | int i; | |
1750 | ||
1751 | val = readl(pl08x->base + PL080_ERR_STATUS); | |
1752 | if (val) { | |
1753 | /* | |
1754 | * An error interrupt (on one or more channels) | |
1755 | */ | |
1756 | dev_err(&pl08x->adev->dev, | |
1757 | "%s error interrupt, register value 0x%08x\n", | |
1758 | __func__, val); | |
1759 | /* | |
1760 | * Simply clear ALL PL08X error interrupts, | |
1761 | * regardless of channel and cause | |
1762 | * FIXME: should be 0x00000003 on PL081 really. | |
1763 | */ | |
1764 | writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); | |
1765 | } | |
1766 | val = readl(pl08x->base + PL080_INT_STATUS); | |
1767 | for (i = 0; i < pl08x->vd->channels; i++) { | |
1768 | if ((1 << i) & val) { | |
1769 | /* Locate physical channel */ | |
1770 | struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; | |
1771 | struct pl08x_dma_chan *plchan = phychan->serving; | |
1772 | ||
1773 | /* Schedule tasklet on this channel */ | |
1774 | tasklet_schedule(&plchan->tasklet); | |
1775 | ||
1776 | mask |= (1 << i); | |
1777 | } | |
1778 | } | |
1779 | /* | |
1780 | * Clear only the terminal interrupts on channels we processed | |
1781 | */ | |
1782 | writel(mask, pl08x->base + PL080_TC_CLEAR); | |
1783 | ||
1784 | return mask ? IRQ_HANDLED : IRQ_NONE; | |
1785 | } | |
1786 | ||
1787 | /* | |
1788 | * Initialise the DMAC memcpy/slave channels. | |
1789 | * Make a local wrapper to hold required data | |
1790 | */ | |
1791 | static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | |
1792 | struct dma_device *dmadev, | |
1793 | unsigned int channels, | |
1794 | bool slave) | |
1795 | { | |
1796 | struct pl08x_dma_chan *chan; | |
1797 | int i; | |
1798 | ||
1799 | INIT_LIST_HEAD(&dmadev->channels); | |
1800 | /* | |
1801 | * Register as many many memcpy as we have physical channels, | |
1802 | * we won't always be able to use all but the code will have | |
1803 | * to cope with that situation. | |
1804 | */ | |
1805 | for (i = 0; i < channels; i++) { | |
1806 | chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); | |
1807 | if (!chan) { | |
1808 | dev_err(&pl08x->adev->dev, | |
1809 | "%s no memory for channel\n", __func__); | |
1810 | return -ENOMEM; | |
1811 | } | |
1812 | ||
1813 | chan->host = pl08x; | |
1814 | chan->state = PL08X_CHAN_IDLE; | |
1815 | ||
1816 | if (slave) { | |
1817 | chan->slave = true; | |
1818 | chan->name = pl08x->pd->slave_channels[i].bus_id; | |
1819 | chan->cd = &pl08x->pd->slave_channels[i]; | |
1820 | } else { | |
1821 | chan->cd = &pl08x->pd->memcpy_channel; | |
1822 | chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); | |
1823 | if (!chan->name) { | |
1824 | kfree(chan); | |
1825 | return -ENOMEM; | |
1826 | } | |
1827 | } | |
1828 | dev_info(&pl08x->adev->dev, | |
1829 | "initialize virtual channel \"%s\"\n", | |
1830 | chan->name); | |
1831 | ||
1832 | chan->chan.device = dmadev; | |
91aa5fad RKAL |
1833 | chan->chan.cookie = 0; |
1834 | chan->lc = 0; | |
e8689e63 LW |
1835 | |
1836 | spin_lock_init(&chan->lock); | |
1837 | INIT_LIST_HEAD(&chan->desc_list); | |
1838 | tasklet_init(&chan->tasklet, pl08x_tasklet, | |
1839 | (unsigned long) chan); | |
1840 | ||
1841 | list_add_tail(&chan->chan.device_node, &dmadev->channels); | |
1842 | } | |
1843 | dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", | |
1844 | i, slave ? "slave" : "memcpy"); | |
1845 | return i; | |
1846 | } | |
1847 | ||
1848 | static void pl08x_free_virtual_channels(struct dma_device *dmadev) | |
1849 | { | |
1850 | struct pl08x_dma_chan *chan = NULL; | |
1851 | struct pl08x_dma_chan *next; | |
1852 | ||
1853 | list_for_each_entry_safe(chan, | |
1854 | next, &dmadev->channels, chan.device_node) { | |
1855 | list_del(&chan->chan.device_node); | |
1856 | kfree(chan); | |
1857 | } | |
1858 | } | |
1859 | ||
1860 | #ifdef CONFIG_DEBUG_FS | |
1861 | static const char *pl08x_state_str(enum pl08x_dma_chan_state state) | |
1862 | { | |
1863 | switch (state) { | |
1864 | case PL08X_CHAN_IDLE: | |
1865 | return "idle"; | |
1866 | case PL08X_CHAN_RUNNING: | |
1867 | return "running"; | |
1868 | case PL08X_CHAN_PAUSED: | |
1869 | return "paused"; | |
1870 | case PL08X_CHAN_WAITING: | |
1871 | return "waiting"; | |
1872 | default: | |
1873 | break; | |
1874 | } | |
1875 | return "UNKNOWN STATE"; | |
1876 | } | |
1877 | ||
1878 | static int pl08x_debugfs_show(struct seq_file *s, void *data) | |
1879 | { | |
1880 | struct pl08x_driver_data *pl08x = s->private; | |
1881 | struct pl08x_dma_chan *chan; | |
1882 | struct pl08x_phy_chan *ch; | |
1883 | unsigned long flags; | |
1884 | int i; | |
1885 | ||
1886 | seq_printf(s, "PL08x physical channels:\n"); | |
1887 | seq_printf(s, "CHANNEL:\tUSER:\n"); | |
1888 | seq_printf(s, "--------\t-----\n"); | |
1889 | for (i = 0; i < pl08x->vd->channels; i++) { | |
1890 | struct pl08x_dma_chan *virt_chan; | |
1891 | ||
1892 | ch = &pl08x->phy_chans[i]; | |
1893 | ||
1894 | spin_lock_irqsave(&ch->lock, flags); | |
1895 | virt_chan = ch->serving; | |
1896 | ||
1897 | seq_printf(s, "%d\t\t%s\n", | |
1898 | ch->id, virt_chan ? virt_chan->name : "(none)"); | |
1899 | ||
1900 | spin_unlock_irqrestore(&ch->lock, flags); | |
1901 | } | |
1902 | ||
1903 | seq_printf(s, "\nPL08x virtual memcpy channels:\n"); | |
1904 | seq_printf(s, "CHANNEL:\tSTATE:\n"); | |
1905 | seq_printf(s, "--------\t------\n"); | |
1906 | list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { | |
3e2a037c | 1907 | seq_printf(s, "%s\t\t%s\n", chan->name, |
e8689e63 LW |
1908 | pl08x_state_str(chan->state)); |
1909 | } | |
1910 | ||
1911 | seq_printf(s, "\nPL08x virtual slave channels:\n"); | |
1912 | seq_printf(s, "CHANNEL:\tSTATE:\n"); | |
1913 | seq_printf(s, "--------\t------\n"); | |
1914 | list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { | |
3e2a037c | 1915 | seq_printf(s, "%s\t\t%s\n", chan->name, |
e8689e63 LW |
1916 | pl08x_state_str(chan->state)); |
1917 | } | |
1918 | ||
1919 | return 0; | |
1920 | } | |
1921 | ||
1922 | static int pl08x_debugfs_open(struct inode *inode, struct file *file) | |
1923 | { | |
1924 | return single_open(file, pl08x_debugfs_show, inode->i_private); | |
1925 | } | |
1926 | ||
1927 | static const struct file_operations pl08x_debugfs_operations = { | |
1928 | .open = pl08x_debugfs_open, | |
1929 | .read = seq_read, | |
1930 | .llseek = seq_lseek, | |
1931 | .release = single_release, | |
1932 | }; | |
1933 | ||
1934 | static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) | |
1935 | { | |
1936 | /* Expose a simple debugfs interface to view all clocks */ | |
1937 | (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, | |
1938 | NULL, pl08x, | |
1939 | &pl08x_debugfs_operations); | |
1940 | } | |
1941 | ||
1942 | #else | |
1943 | static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) | |
1944 | { | |
1945 | } | |
1946 | #endif | |
1947 | ||
1948 | static int pl08x_probe(struct amba_device *adev, struct amba_id *id) | |
1949 | { | |
1950 | struct pl08x_driver_data *pl08x; | |
1951 | struct vendor_data *vd = id->data; | |
1952 | int ret = 0; | |
1953 | int i; | |
1954 | ||
1955 | ret = amba_request_regions(adev, NULL); | |
1956 | if (ret) | |
1957 | return ret; | |
1958 | ||
1959 | /* Create the driver state holder */ | |
1960 | pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); | |
1961 | if (!pl08x) { | |
1962 | ret = -ENOMEM; | |
1963 | goto out_no_pl08x; | |
1964 | } | |
1965 | ||
1966 | /* Initialize memcpy engine */ | |
1967 | dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); | |
1968 | pl08x->memcpy.dev = &adev->dev; | |
1969 | pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources; | |
1970 | pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; | |
1971 | pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; | |
1972 | pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; | |
1973 | pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; | |
1974 | pl08x->memcpy.device_issue_pending = pl08x_issue_pending; | |
1975 | pl08x->memcpy.device_control = pl08x_control; | |
1976 | ||
1977 | /* Initialize slave engine */ | |
1978 | dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); | |
1979 | pl08x->slave.dev = &adev->dev; | |
1980 | pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; | |
1981 | pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; | |
1982 | pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; | |
1983 | pl08x->slave.device_tx_status = pl08x_dma_tx_status; | |
1984 | pl08x->slave.device_issue_pending = pl08x_issue_pending; | |
1985 | pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; | |
1986 | pl08x->slave.device_control = pl08x_control; | |
1987 | ||
1988 | /* Get the platform data */ | |
1989 | pl08x->pd = dev_get_platdata(&adev->dev); | |
1990 | if (!pl08x->pd) { | |
1991 | dev_err(&adev->dev, "no platform data supplied\n"); | |
1992 | goto out_no_platdata; | |
1993 | } | |
1994 | ||
1995 | /* Assign useful pointers to the driver state */ | |
1996 | pl08x->adev = adev; | |
1997 | pl08x->vd = vd; | |
1998 | ||
1999 | /* A DMA memory pool for LLIs, align on 1-byte boundary */ | |
2000 | pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, | |
2001 | PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); | |
2002 | if (!pl08x->pool) { | |
2003 | ret = -ENOMEM; | |
2004 | goto out_no_lli_pool; | |
2005 | } | |
2006 | ||
2007 | spin_lock_init(&pl08x->lock); | |
2008 | ||
2009 | pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); | |
2010 | if (!pl08x->base) { | |
2011 | ret = -ENOMEM; | |
2012 | goto out_no_ioremap; | |
2013 | } | |
2014 | ||
2015 | /* Turn on the PL08x */ | |
2016 | pl08x_ensure_on(pl08x); | |
2017 | ||
2018 | /* | |
2019 | * Attach the interrupt handler | |
2020 | */ | |
2021 | writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); | |
2022 | writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); | |
2023 | ||
2024 | ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, | |
b05cd8f4 | 2025 | DRIVER_NAME, pl08x); |
e8689e63 LW |
2026 | if (ret) { |
2027 | dev_err(&adev->dev, "%s failed to request interrupt %d\n", | |
2028 | __func__, adev->irq[0]); | |
2029 | goto out_no_irq; | |
2030 | } | |
2031 | ||
2032 | /* Initialize physical channels */ | |
2033 | pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), | |
2034 | GFP_KERNEL); | |
2035 | if (!pl08x->phy_chans) { | |
2036 | dev_err(&adev->dev, "%s failed to allocate " | |
2037 | "physical channel holders\n", | |
2038 | __func__); | |
2039 | goto out_no_phychans; | |
2040 | } | |
2041 | ||
2042 | for (i = 0; i < vd->channels; i++) { | |
2043 | struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; | |
2044 | ||
2045 | ch->id = i; | |
2046 | ch->base = pl08x->base + PL080_Cx_BASE(i); | |
2047 | spin_lock_init(&ch->lock); | |
2048 | ch->serving = NULL; | |
2049 | ch->signal = -1; | |
2050 | dev_info(&adev->dev, | |
2051 | "physical channel %d is %s\n", i, | |
2052 | pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); | |
2053 | } | |
2054 | ||
2055 | /* Register as many memcpy channels as there are physical channels */ | |
2056 | ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, | |
2057 | pl08x->vd->channels, false); | |
2058 | if (ret <= 0) { | |
2059 | dev_warn(&pl08x->adev->dev, | |
2060 | "%s failed to enumerate memcpy channels - %d\n", | |
2061 | __func__, ret); | |
2062 | goto out_no_memcpy; | |
2063 | } | |
2064 | pl08x->memcpy.chancnt = ret; | |
2065 | ||
2066 | /* Register slave channels */ | |
2067 | ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, | |
2068 | pl08x->pd->num_slave_channels, | |
2069 | true); | |
2070 | if (ret <= 0) { | |
2071 | dev_warn(&pl08x->adev->dev, | |
2072 | "%s failed to enumerate slave channels - %d\n", | |
2073 | __func__, ret); | |
2074 | goto out_no_slave; | |
2075 | } | |
2076 | pl08x->slave.chancnt = ret; | |
2077 | ||
2078 | ret = dma_async_device_register(&pl08x->memcpy); | |
2079 | if (ret) { | |
2080 | dev_warn(&pl08x->adev->dev, | |
2081 | "%s failed to register memcpy as an async device - %d\n", | |
2082 | __func__, ret); | |
2083 | goto out_no_memcpy_reg; | |
2084 | } | |
2085 | ||
2086 | ret = dma_async_device_register(&pl08x->slave); | |
2087 | if (ret) { | |
2088 | dev_warn(&pl08x->adev->dev, | |
2089 | "%s failed to register slave as an async device - %d\n", | |
2090 | __func__, ret); | |
2091 | goto out_no_slave_reg; | |
2092 | } | |
2093 | ||
2094 | amba_set_drvdata(adev, pl08x); | |
2095 | init_pl08x_debugfs(pl08x); | |
b05cd8f4 RKAL |
2096 | dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", |
2097 | amba_part(adev), amba_rev(adev), | |
2098 | (unsigned long long)adev->res.start, adev->irq[0]); | |
e8689e63 LW |
2099 | return 0; |
2100 | ||
2101 | out_no_slave_reg: | |
2102 | dma_async_device_unregister(&pl08x->memcpy); | |
2103 | out_no_memcpy_reg: | |
2104 | pl08x_free_virtual_channels(&pl08x->slave); | |
2105 | out_no_slave: | |
2106 | pl08x_free_virtual_channels(&pl08x->memcpy); | |
2107 | out_no_memcpy: | |
2108 | kfree(pl08x->phy_chans); | |
2109 | out_no_phychans: | |
2110 | free_irq(adev->irq[0], pl08x); | |
2111 | out_no_irq: | |
2112 | iounmap(pl08x->base); | |
2113 | out_no_ioremap: | |
2114 | dma_pool_destroy(pl08x->pool); | |
2115 | out_no_lli_pool: | |
2116 | out_no_platdata: | |
2117 | kfree(pl08x); | |
2118 | out_no_pl08x: | |
2119 | amba_release_regions(adev); | |
2120 | return ret; | |
2121 | } | |
2122 | ||
2123 | /* PL080 has 8 channels and the PL080 have just 2 */ | |
2124 | static struct vendor_data vendor_pl080 = { | |
e8689e63 LW |
2125 | .channels = 8, |
2126 | .dualmaster = true, | |
2127 | }; | |
2128 | ||
2129 | static struct vendor_data vendor_pl081 = { | |
e8689e63 LW |
2130 | .channels = 2, |
2131 | .dualmaster = false, | |
2132 | }; | |
2133 | ||
2134 | static struct amba_id pl08x_ids[] = { | |
2135 | /* PL080 */ | |
2136 | { | |
2137 | .id = 0x00041080, | |
2138 | .mask = 0x000fffff, | |
2139 | .data = &vendor_pl080, | |
2140 | }, | |
2141 | /* PL081 */ | |
2142 | { | |
2143 | .id = 0x00041081, | |
2144 | .mask = 0x000fffff, | |
2145 | .data = &vendor_pl081, | |
2146 | }, | |
2147 | /* Nomadik 8815 PL080 variant */ | |
2148 | { | |
2149 | .id = 0x00280880, | |
2150 | .mask = 0x00ffffff, | |
2151 | .data = &vendor_pl080, | |
2152 | }, | |
2153 | { 0, 0 }, | |
2154 | }; | |
2155 | ||
2156 | static struct amba_driver pl08x_amba_driver = { | |
2157 | .drv.name = DRIVER_NAME, | |
2158 | .id_table = pl08x_ids, | |
2159 | .probe = pl08x_probe, | |
2160 | }; | |
2161 | ||
2162 | static int __init pl08x_init(void) | |
2163 | { | |
2164 | int retval; | |
2165 | retval = amba_driver_register(&pl08x_amba_driver); | |
2166 | if (retval) | |
2167 | printk(KERN_WARNING DRIVER_NAME | |
e8b5e11d | 2168 | "failed to register as an AMBA device (%d)\n", |
e8689e63 LW |
2169 | retval); |
2170 | return retval; | |
2171 | } | |
2172 | subsys_initcall(pl08x_init); |