Commit | Line | Data |
---|---|---|
1802d0be | 1 | // SPDX-License-Identifier: GPL-2.0-only |
de5d4453 RR |
2 | /* |
3 | * timb_dma.c timberdale FPGA DMA driver | |
4 | * Copyright (c) 2010 Intel Corporation | |
de5d4453 RR |
5 | */ |
6 | ||
7 | /* Supports: | |
8 | * Timberdale FPGA DMA engine | |
9 | */ | |
10 | ||
11 | #include <linux/dmaengine.h> | |
12 | #include <linux/dma-mapping.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/io.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/platform_device.h> | |
6a3cd3ea | 18 | #include <linux/slab.h> |
de5d4453 RR |
19 | |
20 | #include <linux/timb_dma.h> | |
21 | ||
d2ebfb33 RKAL |
22 | #include "dmaengine.h" |
23 | ||
de5d4453 RR |
24 | #define DRIVER_NAME "timb-dma" |
25 | ||
26 | /* Global DMA registers */ | |
27 | #define TIMBDMA_ACR 0x34 | |
28 | #define TIMBDMA_32BIT_ADDR 0x01 | |
29 | ||
30 | #define TIMBDMA_ISR 0x080000 | |
31 | #define TIMBDMA_IPR 0x080004 | |
32 | #define TIMBDMA_IER 0x080008 | |
33 | ||
34 | /* Channel specific registers */ | |
35 | /* RX instances base addresses are 0x00, 0x40, 0x80 ... | |
36 | * TX instances base addresses are 0x18, 0x58, 0x98 ... | |
37 | */ | |
38 | #define TIMBDMA_INSTANCE_OFFSET 0x40 | |
39 | #define TIMBDMA_INSTANCE_TX_OFFSET 0x18 | |
40 | ||
41 | /* RX registers, relative the instance base */ | |
42 | #define TIMBDMA_OFFS_RX_DHAR 0x00 | |
43 | #define TIMBDMA_OFFS_RX_DLAR 0x04 | |
44 | #define TIMBDMA_OFFS_RX_LR 0x0C | |
45 | #define TIMBDMA_OFFS_RX_BLR 0x10 | |
46 | #define TIMBDMA_OFFS_RX_ER 0x14 | |
47 | #define TIMBDMA_RX_EN 0x01 | |
48 | /* bytes per Row, video specific register | |
49 | * which is placed after the TX registers... | |
50 | */ | |
51 | #define TIMBDMA_OFFS_RX_BPRR 0x30 | |
52 | ||
53 | /* TX registers, relative the instance base */ | |
54 | #define TIMBDMA_OFFS_TX_DHAR 0x00 | |
55 | #define TIMBDMA_OFFS_TX_DLAR 0x04 | |
56 | #define TIMBDMA_OFFS_TX_BLR 0x0C | |
57 | #define TIMBDMA_OFFS_TX_LR 0x14 | |
58 | ||
59 | ||
60 | #define TIMB_DMA_DESC_SIZE 8 | |
61 | ||
62 | struct timb_dma_desc { | |
63 | struct list_head desc_node; | |
64 | struct dma_async_tx_descriptor txd; | |
65 | u8 *desc_list; | |
66 | unsigned int desc_list_len; | |
67 | bool interrupt; | |
68 | }; | |
69 | ||
70 | struct timb_dma_chan { | |
71 | struct dma_chan chan; | |
72 | void __iomem *membase; | |
0f65169b RR |
73 | spinlock_t lock; /* Used to protect data structures, |
74 | especially the lists and descriptors, | |
75 | from races between the tasklet and calls | |
76 | from above */ | |
de5d4453 RR |
77 | bool ongoing; |
78 | struct list_head active_list; | |
79 | struct list_head queue; | |
80 | struct list_head free_list; | |
81 | unsigned int bytes_per_line; | |
db8196df | 82 | enum dma_transfer_direction direction; |
de5d4453 RR |
83 | unsigned int descs; /* Descriptors to allocate */ |
84 | unsigned int desc_elems; /* number of elems per descriptor */ | |
85 | }; | |
86 | ||
87 | struct timb_dma { | |
88 | struct dma_device dma; | |
89 | void __iomem *membase; | |
90 | struct tasklet_struct tasklet; | |
466f966b | 91 | struct timb_dma_chan channels[]; |
de5d4453 RR |
92 | }; |
93 | ||
94 | static struct device *chan2dev(struct dma_chan *chan) | |
95 | { | |
96 | return &chan->dev->device; | |
97 | } | |
98 | static struct device *chan2dmadev(struct dma_chan *chan) | |
99 | { | |
100 | return chan2dev(chan)->parent->parent; | |
101 | } | |
102 | ||
103 | static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan) | |
104 | { | |
105 | int id = td_chan->chan.chan_id; | |
106 | return (struct timb_dma *)((u8 *)td_chan - | |
107 | id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); | |
108 | } | |
109 | ||
110 | /* Must be called with the spinlock held */ | |
111 | static void __td_enable_chan_irq(struct timb_dma_chan *td_chan) | |
112 | { | |
113 | int id = td_chan->chan.chan_id; | |
114 | struct timb_dma *td = tdchantotd(td_chan); | |
115 | u32 ier; | |
116 | ||
117 | /* enable interrupt for this channel */ | |
118 | ier = ioread32(td->membase + TIMBDMA_IER); | |
119 | ier |= 1 << id; | |
120 | dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id, | |
121 | ier); | |
122 | iowrite32(ier, td->membase + TIMBDMA_IER); | |
123 | } | |
124 | ||
125 | /* Should be called with the spinlock held */ | |
126 | static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) | |
127 | { | |
128 | int id = td_chan->chan.chan_id; | |
129 | struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan - | |
130 | id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); | |
131 | u32 isr; | |
132 | bool done = false; | |
133 | ||
134 | dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td); | |
135 | ||
136 | isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id); | |
137 | if (isr) { | |
138 | iowrite32(isr, td->membase + TIMBDMA_ISR); | |
139 | done = true; | |
140 | } | |
141 | ||
142 | return done; | |
143 | } | |
144 | ||
de5d4453 RR |
145 | static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, |
146 | struct scatterlist *sg, bool last) | |
147 | { | |
4be929be | 148 | if (sg_dma_len(sg) > USHRT_MAX) { |
de5d4453 RR |
149 | dev_err(chan2dev(&td_chan->chan), "Too big sg element\n"); |
150 | return -EINVAL; | |
151 | } | |
152 | ||
153 | /* length must be word aligned */ | |
154 | if (sg_dma_len(sg) % sizeof(u32)) { | |
155 | dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n", | |
156 | sg_dma_len(sg)); | |
157 | return -EINVAL; | |
158 | } | |
159 | ||
efcc2898 DC |
160 | dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n", |
161 | dma_desc, (unsigned long long)sg_dma_address(sg)); | |
de5d4453 RR |
162 | |
163 | dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff; | |
164 | dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff; | |
165 | dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff; | |
166 | dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff; | |
167 | ||
168 | dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff; | |
169 | dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff; | |
170 | ||
171 | dma_desc[1] = 0x00; | |
172 | dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */ | |
173 | ||
174 | return 0; | |
175 | } | |
176 | ||
177 | /* Must be called with the spinlock held */ | |
178 | static void __td_start_dma(struct timb_dma_chan *td_chan) | |
179 | { | |
180 | struct timb_dma_desc *td_desc; | |
181 | ||
182 | if (td_chan->ongoing) { | |
183 | dev_err(chan2dev(&td_chan->chan), | |
184 | "Transfer already ongoing\n"); | |
185 | return; | |
186 | } | |
187 | ||
188 | td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, | |
189 | desc_node); | |
190 | ||
191 | dev_dbg(chan2dev(&td_chan->chan), | |
192 | "td_chan: %p, chan: %d, membase: %p\n", | |
193 | td_chan, td_chan->chan.chan_id, td_chan->membase); | |
194 | ||
db8196df | 195 | if (td_chan->direction == DMA_DEV_TO_MEM) { |
de5d4453 RR |
196 | |
197 | /* descriptor address */ | |
198 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); | |
199 | iowrite32(td_desc->txd.phys, td_chan->membase + | |
200 | TIMBDMA_OFFS_RX_DLAR); | |
201 | /* Bytes per line */ | |
202 | iowrite32(td_chan->bytes_per_line, td_chan->membase + | |
203 | TIMBDMA_OFFS_RX_BPRR); | |
204 | /* enable RX */ | |
205 | iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER); | |
206 | } else { | |
207 | /* address high */ | |
208 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR); | |
209 | iowrite32(td_desc->txd.phys, td_chan->membase + | |
210 | TIMBDMA_OFFS_TX_DLAR); | |
211 | } | |
212 | ||
213 | td_chan->ongoing = true; | |
214 | ||
215 | if (td_desc->interrupt) | |
216 | __td_enable_chan_irq(td_chan); | |
217 | } | |
218 | ||
219 | static void __td_finish(struct timb_dma_chan *td_chan) | |
220 | { | |
a06a5bb9 | 221 | struct dmaengine_desc_callback cb; |
de5d4453 RR |
222 | struct dma_async_tx_descriptor *txd; |
223 | struct timb_dma_desc *td_desc; | |
224 | ||
225 | /* can happen if the descriptor is canceled */ | |
226 | if (list_empty(&td_chan->active_list)) | |
227 | return; | |
228 | ||
229 | td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, | |
230 | desc_node); | |
231 | txd = &td_desc->txd; | |
232 | ||
233 | dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n", | |
234 | txd->cookie); | |
235 | ||
236 | /* make sure to stop the transfer */ | |
db8196df | 237 | if (td_chan->direction == DMA_DEV_TO_MEM) |
de5d4453 RR |
238 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); |
239 | /* Currently no support for stopping DMA transfers | |
240 | else | |
241 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); | |
242 | */ | |
f7fbce07 | 243 | dma_cookie_complete(txd); |
de5d4453 RR |
244 | td_chan->ongoing = false; |
245 | ||
a06a5bb9 | 246 | dmaengine_desc_get_callback(txd, &cb); |
de5d4453 RR |
247 | |
248 | list_move(&td_desc->desc_node, &td_chan->free_list); | |
249 | ||
d38a8c62 | 250 | dma_descriptor_unmap(txd); |
de5d4453 RR |
251 | /* |
252 | * The API requires that no submissions are done from a | |
253 | * callback, so we don't need to drop the lock here | |
254 | */ | |
a06a5bb9 | 255 | dmaengine_desc_callback_invoke(&cb, NULL); |
de5d4453 RR |
256 | } |
257 | ||
258 | static u32 __td_ier_mask(struct timb_dma *td) | |
259 | { | |
260 | int i; | |
261 | u32 ret = 0; | |
262 | ||
263 | for (i = 0; i < td->dma.chancnt; i++) { | |
264 | struct timb_dma_chan *td_chan = td->channels + i; | |
265 | if (td_chan->ongoing) { | |
266 | struct timb_dma_desc *td_desc = | |
267 | list_entry(td_chan->active_list.next, | |
268 | struct timb_dma_desc, desc_node); | |
269 | if (td_desc->interrupt) | |
270 | ret |= 1 << i; | |
271 | } | |
272 | } | |
273 | ||
274 | return ret; | |
275 | } | |
276 | ||
277 | static void __td_start_next(struct timb_dma_chan *td_chan) | |
278 | { | |
279 | struct timb_dma_desc *td_desc; | |
280 | ||
281 | BUG_ON(list_empty(&td_chan->queue)); | |
282 | BUG_ON(td_chan->ongoing); | |
283 | ||
284 | td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc, | |
285 | desc_node); | |
286 | ||
287 | dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n", | |
288 | __func__, td_desc->txd.cookie); | |
289 | ||
290 | list_move(&td_desc->desc_node, &td_chan->active_list); | |
291 | __td_start_dma(td_chan); | |
292 | } | |
293 | ||
294 | static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd) | |
295 | { | |
296 | struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc, | |
297 | txd); | |
298 | struct timb_dma_chan *td_chan = container_of(txd->chan, | |
299 | struct timb_dma_chan, chan); | |
300 | dma_cookie_t cookie; | |
301 | ||
302 | spin_lock_bh(&td_chan->lock); | |
884485e1 | 303 | cookie = dma_cookie_assign(txd); |
de5d4453 RR |
304 | |
305 | if (list_empty(&td_chan->active_list)) { | |
306 | dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, | |
307 | txd->cookie); | |
308 | list_add_tail(&td_desc->desc_node, &td_chan->active_list); | |
309 | __td_start_dma(td_chan); | |
310 | } else { | |
311 | dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n", | |
312 | txd->cookie); | |
313 | ||
314 | list_add_tail(&td_desc->desc_node, &td_chan->queue); | |
315 | } | |
316 | ||
317 | spin_unlock_bh(&td_chan->lock); | |
318 | ||
319 | return cookie; | |
320 | } | |
321 | ||
322 | static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) | |
323 | { | |
324 | struct dma_chan *chan = &td_chan->chan; | |
325 | struct timb_dma_desc *td_desc; | |
326 | int err; | |
327 | ||
328 | td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL); | |
aef94fea | 329 | if (!td_desc) |
48568005 | 330 | goto out; |
de5d4453 RR |
331 | |
332 | td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE; | |
333 | ||
334 | td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL); | |
aef94fea | 335 | if (!td_desc->desc_list) |
de5d4453 | 336 | goto err; |
de5d4453 RR |
337 | |
338 | dma_async_tx_descriptor_init(&td_desc->txd, chan); | |
339 | td_desc->txd.tx_submit = td_tx_submit; | |
340 | td_desc->txd.flags = DMA_CTRL_ACK; | |
341 | ||
342 | td_desc->txd.phys = dma_map_single(chan2dmadev(chan), | |
d5613947 | 343 | td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE); |
de5d4453 RR |
344 | |
345 | err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys); | |
346 | if (err) { | |
347 | dev_err(chan2dev(chan), "DMA mapping error: %d\n", err); | |
348 | goto err; | |
349 | } | |
350 | ||
351 | return td_desc; | |
352 | err: | |
353 | kfree(td_desc->desc_list); | |
354 | kfree(td_desc); | |
48568005 | 355 | out: |
de5d4453 RR |
356 | return NULL; |
357 | ||
358 | } | |
359 | ||
360 | static void td_free_desc(struct timb_dma_desc *td_desc) | |
361 | { | |
362 | dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc); | |
363 | dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys, | |
d5613947 | 364 | td_desc->desc_list_len, DMA_TO_DEVICE); |
de5d4453 RR |
365 | |
366 | kfree(td_desc->desc_list); | |
367 | kfree(td_desc); | |
368 | } | |
369 | ||
370 | static void td_desc_put(struct timb_dma_chan *td_chan, | |
371 | struct timb_dma_desc *td_desc) | |
372 | { | |
373 | dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc); | |
374 | ||
375 | spin_lock_bh(&td_chan->lock); | |
376 | list_add(&td_desc->desc_node, &td_chan->free_list); | |
377 | spin_unlock_bh(&td_chan->lock); | |
378 | } | |
379 | ||
380 | static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan) | |
381 | { | |
382 | struct timb_dma_desc *td_desc, *_td_desc; | |
383 | struct timb_dma_desc *ret = NULL; | |
384 | ||
385 | spin_lock_bh(&td_chan->lock); | |
386 | list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list, | |
387 | desc_node) { | |
388 | if (async_tx_test_ack(&td_desc->txd)) { | |
389 | list_del(&td_desc->desc_node); | |
390 | ret = td_desc; | |
391 | break; | |
392 | } | |
393 | dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n", | |
394 | td_desc); | |
395 | } | |
396 | spin_unlock_bh(&td_chan->lock); | |
397 | ||
398 | return ret; | |
399 | } | |
400 | ||
401 | static int td_alloc_chan_resources(struct dma_chan *chan) | |
402 | { | |
403 | struct timb_dma_chan *td_chan = | |
404 | container_of(chan, struct timb_dma_chan, chan); | |
405 | int i; | |
406 | ||
407 | dev_dbg(chan2dev(chan), "%s: entry\n", __func__); | |
408 | ||
409 | BUG_ON(!list_empty(&td_chan->free_list)); | |
410 | for (i = 0; i < td_chan->descs; i++) { | |
411 | struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan); | |
412 | if (!td_desc) { | |
413 | if (i) | |
414 | break; | |
415 | else { | |
416 | dev_err(chan2dev(chan), | |
dd54006f | 417 | "Couldn't allocate any descriptors\n"); |
de5d4453 RR |
418 | return -ENOMEM; |
419 | } | |
420 | } | |
421 | ||
422 | td_desc_put(td_chan, td_desc); | |
423 | } | |
424 | ||
425 | spin_lock_bh(&td_chan->lock); | |
d3ee98cd | 426 | dma_cookie_init(chan); |
de5d4453 RR |
427 | spin_unlock_bh(&td_chan->lock); |
428 | ||
429 | return 0; | |
430 | } | |
431 | ||
432 | static void td_free_chan_resources(struct dma_chan *chan) | |
433 | { | |
434 | struct timb_dma_chan *td_chan = | |
435 | container_of(chan, struct timb_dma_chan, chan); | |
436 | struct timb_dma_desc *td_desc, *_td_desc; | |
437 | LIST_HEAD(list); | |
438 | ||
439 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | |
440 | ||
441 | /* check that all descriptors are free */ | |
442 | BUG_ON(!list_empty(&td_chan->active_list)); | |
443 | BUG_ON(!list_empty(&td_chan->queue)); | |
444 | ||
445 | spin_lock_bh(&td_chan->lock); | |
446 | list_splice_init(&td_chan->free_list, &list); | |
447 | spin_unlock_bh(&td_chan->lock); | |
448 | ||
449 | list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) { | |
450 | dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__, | |
451 | td_desc); | |
452 | td_free_desc(td_desc); | |
453 | } | |
454 | } | |
455 | ||
07934481 LW |
456 | static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
457 | struct dma_tx_state *txstate) | |
de5d4453 | 458 | { |
96a2af41 | 459 | enum dma_status ret; |
de5d4453 RR |
460 | |
461 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | |
462 | ||
96a2af41 | 463 | ret = dma_cookie_status(chan, cookie, txstate); |
de5d4453 | 464 | |
949ff5b8 | 465 | dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret); |
de5d4453 RR |
466 | |
467 | return ret; | |
468 | } | |
469 | ||
470 | static void td_issue_pending(struct dma_chan *chan) | |
471 | { | |
472 | struct timb_dma_chan *td_chan = | |
473 | container_of(chan, struct timb_dma_chan, chan); | |
474 | ||
475 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | |
476 | spin_lock_bh(&td_chan->lock); | |
477 | ||
478 | if (!list_empty(&td_chan->active_list)) | |
479 | /* transfer ongoing */ | |
480 | if (__td_dma_done_ack(td_chan)) | |
481 | __td_finish(td_chan); | |
482 | ||
483 | if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue)) | |
484 | __td_start_next(td_chan); | |
485 | ||
486 | spin_unlock_bh(&td_chan->lock); | |
487 | } | |
488 | ||
489 | static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, | |
490 | struct scatterlist *sgl, unsigned int sg_len, | |
185ecb5f AB |
491 | enum dma_transfer_direction direction, unsigned long flags, |
492 | void *context) | |
de5d4453 RR |
493 | { |
494 | struct timb_dma_chan *td_chan = | |
495 | container_of(chan, struct timb_dma_chan, chan); | |
496 | struct timb_dma_desc *td_desc; | |
497 | struct scatterlist *sg; | |
498 | unsigned int i; | |
499 | unsigned int desc_usage = 0; | |
500 | ||
501 | if (!sgl || !sg_len) { | |
502 | dev_err(chan2dev(chan), "%s: No SG list\n", __func__); | |
503 | return NULL; | |
504 | } | |
505 | ||
506 | /* even channels are for RX, odd for TX */ | |
507 | if (td_chan->direction != direction) { | |
508 | dev_err(chan2dev(chan), | |
509 | "Requesting channel in wrong direction\n"); | |
510 | return NULL; | |
511 | } | |
512 | ||
513 | td_desc = td_desc_get(td_chan); | |
514 | if (!td_desc) { | |
515 | dev_err(chan2dev(chan), "Not enough descriptors available\n"); | |
516 | return NULL; | |
517 | } | |
518 | ||
519 | td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; | |
520 | ||
521 | for_each_sg(sgl, sg, sg_len, i) { | |
522 | int err; | |
523 | if (desc_usage > td_desc->desc_list_len) { | |
524 | dev_err(chan2dev(chan), "No descriptor space\n"); | |
525 | return NULL; | |
526 | } | |
527 | ||
528 | err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg, | |
529 | i == (sg_len - 1)); | |
530 | if (err) { | |
531 | dev_err(chan2dev(chan), "Failed to update desc: %d\n", | |
532 | err); | |
533 | td_desc_put(td_chan, td_desc); | |
534 | return NULL; | |
535 | } | |
536 | desc_usage += TIMB_DMA_DESC_SIZE; | |
537 | } | |
538 | ||
539 | dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, | |
5e621f5d | 540 | td_desc->desc_list_len, DMA_TO_DEVICE); |
de5d4453 RR |
541 | |
542 | return &td_desc->txd; | |
543 | } | |
544 | ||
2c55536a | 545 | static int td_terminate_all(struct dma_chan *chan) |
de5d4453 RR |
546 | { |
547 | struct timb_dma_chan *td_chan = | |
548 | container_of(chan, struct timb_dma_chan, chan); | |
549 | struct timb_dma_desc *td_desc, *_td_desc; | |
550 | ||
551 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | |
552 | ||
553 | /* first the easy part, put the queue into the free list */ | |
554 | spin_lock_bh(&td_chan->lock); | |
555 | list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, | |
556 | desc_node) | |
557 | list_move(&td_desc->desc_node, &td_chan->free_list); | |
558 | ||
ae0e47f0 | 559 | /* now tear down the running */ |
de5d4453 RR |
560 | __td_finish(td_chan); |
561 | spin_unlock_bh(&td_chan->lock); | |
c3635c78 LW |
562 | |
563 | return 0; | |
de5d4453 RR |
564 | } |
565 | ||
83547958 | 566 | static void td_tasklet(struct tasklet_struct *t) |
de5d4453 | 567 | { |
83547958 | 568 | struct timb_dma *td = from_tasklet(td, t, tasklet); |
de5d4453 RR |
569 | u32 isr; |
570 | u32 ipr; | |
571 | u32 ier; | |
572 | int i; | |
573 | ||
574 | isr = ioread32(td->membase + TIMBDMA_ISR); | |
575 | ipr = isr & __td_ier_mask(td); | |
576 | ||
577 | /* ack the interrupts */ | |
578 | iowrite32(ipr, td->membase + TIMBDMA_ISR); | |
579 | ||
580 | for (i = 0; i < td->dma.chancnt; i++) | |
581 | if (ipr & (1 << i)) { | |
582 | struct timb_dma_chan *td_chan = td->channels + i; | |
583 | spin_lock(&td_chan->lock); | |
584 | __td_finish(td_chan); | |
585 | if (!list_empty(&td_chan->queue)) | |
586 | __td_start_next(td_chan); | |
587 | spin_unlock(&td_chan->lock); | |
588 | } | |
589 | ||
590 | ier = __td_ier_mask(td); | |
591 | iowrite32(ier, td->membase + TIMBDMA_IER); | |
592 | } | |
593 | ||
594 | ||
595 | static irqreturn_t td_irq(int irq, void *devid) | |
596 | { | |
597 | struct timb_dma *td = devid; | |
598 | u32 ipr = ioread32(td->membase + TIMBDMA_IPR); | |
599 | ||
600 | if (ipr) { | |
601 | /* disable interrupts, will be re-enabled in tasklet */ | |
602 | iowrite32(0, td->membase + TIMBDMA_IER); | |
603 | ||
604 | tasklet_schedule(&td->tasklet); | |
605 | ||
606 | return IRQ_HANDLED; | |
607 | } else | |
608 | return IRQ_NONE; | |
609 | } | |
610 | ||
611 | ||
463a1f8b | 612 | static int td_probe(struct platform_device *pdev) |
de5d4453 | 613 | { |
d4adcc01 | 614 | struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev); |
de5d4453 RR |
615 | struct timb_dma *td; |
616 | struct resource *iomem; | |
617 | int irq; | |
618 | int err; | |
619 | int i; | |
620 | ||
621 | if (!pdata) { | |
622 | dev_err(&pdev->dev, "No platform data\n"); | |
623 | return -EINVAL; | |
624 | } | |
625 | ||
626 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
627 | if (!iomem) | |
628 | return -EINVAL; | |
629 | ||
630 | irq = platform_get_irq(pdev, 0); | |
631 | if (irq < 0) | |
632 | return irq; | |
633 | ||
634 | if (!request_mem_region(iomem->start, resource_size(iomem), | |
635 | DRIVER_NAME)) | |
636 | return -EBUSY; | |
637 | ||
3c215fd8 GS |
638 | td = kzalloc(struct_size(td, channels, pdata->nr_channels), |
639 | GFP_KERNEL); | |
de5d4453 RR |
640 | if (!td) { |
641 | err = -ENOMEM; | |
642 | goto err_release_region; | |
643 | } | |
644 | ||
645 | dev_dbg(&pdev->dev, "Allocated TD: %p\n", td); | |
646 | ||
647 | td->membase = ioremap(iomem->start, resource_size(iomem)); | |
648 | if (!td->membase) { | |
649 | dev_err(&pdev->dev, "Failed to remap I/O memory\n"); | |
650 | err = -ENOMEM; | |
651 | goto err_free_mem; | |
652 | } | |
653 | ||
654 | /* 32bit addressing */ | |
655 | iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR); | |
656 | ||
657 | /* disable and clear any interrupts */ | |
658 | iowrite32(0x0, td->membase + TIMBDMA_IER); | |
659 | iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR); | |
660 | ||
83547958 | 661 | tasklet_setup(&td->tasklet, td_tasklet); |
de5d4453 RR |
662 | |
663 | err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td); | |
664 | if (err) { | |
665 | dev_err(&pdev->dev, "Failed to request IRQ\n"); | |
666 | goto err_tasklet_kill; | |
667 | } | |
668 | ||
669 | td->dma.device_alloc_chan_resources = td_alloc_chan_resources; | |
670 | td->dma.device_free_chan_resources = td_free_chan_resources; | |
07934481 | 671 | td->dma.device_tx_status = td_tx_status; |
de5d4453 RR |
672 | td->dma.device_issue_pending = td_issue_pending; |
673 | ||
674 | dma_cap_set(DMA_SLAVE, td->dma.cap_mask); | |
675 | dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); | |
676 | td->dma.device_prep_slave_sg = td_prep_slave_sg; | |
2c55536a | 677 | td->dma.device_terminate_all = td_terminate_all; |
de5d4453 RR |
678 | |
679 | td->dma.dev = &pdev->dev; | |
680 | ||
681 | INIT_LIST_HEAD(&td->dma.channels); | |
682 | ||
46389470 | 683 | for (i = 0; i < pdata->nr_channels; i++) { |
de5d4453 RR |
684 | struct timb_dma_chan *td_chan = &td->channels[i]; |
685 | struct timb_dma_platform_data_channel *pchan = | |
686 | pdata->channels + i; | |
687 | ||
688 | /* even channels are RX, odd are TX */ | |
9cb047d4 | 689 | if ((i % 2) == pchan->rx) { |
de5d4453 RR |
690 | dev_err(&pdev->dev, "Wrong channel configuration\n"); |
691 | err = -EINVAL; | |
f80befe0 | 692 | goto err_free_irq; |
de5d4453 RR |
693 | } |
694 | ||
695 | td_chan->chan.device = &td->dma; | |
d3ee98cd | 696 | dma_cookie_init(&td_chan->chan); |
de5d4453 RR |
697 | spin_lock_init(&td_chan->lock); |
698 | INIT_LIST_HEAD(&td_chan->active_list); | |
699 | INIT_LIST_HEAD(&td_chan->queue); | |
700 | INIT_LIST_HEAD(&td_chan->free_list); | |
701 | ||
702 | td_chan->descs = pchan->descriptors; | |
703 | td_chan->desc_elems = pchan->descriptor_elements; | |
704 | td_chan->bytes_per_line = pchan->bytes_per_line; | |
db8196df VK |
705 | td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM : |
706 | DMA_MEM_TO_DEV; | |
de5d4453 RR |
707 | |
708 | td_chan->membase = td->membase + | |
709 | (i / 2) * TIMBDMA_INSTANCE_OFFSET + | |
710 | (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET); | |
711 | ||
712 | dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n", | |
713 | i, td_chan->membase); | |
714 | ||
715 | list_add_tail(&td_chan->chan.device_node, &td->dma.channels); | |
716 | } | |
717 | ||
718 | err = dma_async_device_register(&td->dma); | |
719 | if (err) { | |
720 | dev_err(&pdev->dev, "Failed to register async device\n"); | |
721 | goto err_free_irq; | |
722 | } | |
723 | ||
724 | platform_set_drvdata(pdev, td); | |
725 | ||
726 | dev_dbg(&pdev->dev, "Probe result: %d\n", err); | |
727 | return err; | |
728 | ||
729 | err_free_irq: | |
730 | free_irq(irq, td); | |
731 | err_tasklet_kill: | |
732 | tasklet_kill(&td->tasklet); | |
733 | iounmap(td->membase); | |
734 | err_free_mem: | |
735 | kfree(td); | |
736 | err_release_region: | |
737 | release_mem_region(iomem->start, resource_size(iomem)); | |
738 | ||
739 | return err; | |
740 | ||
741 | } | |
742 | ||
4bf27b8b | 743 | static int td_remove(struct platform_device *pdev) |
de5d4453 RR |
744 | { |
745 | struct timb_dma *td = platform_get_drvdata(pdev); | |
746 | struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
747 | int irq = platform_get_irq(pdev, 0); | |
748 | ||
749 | dma_async_device_unregister(&td->dma); | |
750 | free_irq(irq, td); | |
751 | tasklet_kill(&td->tasklet); | |
752 | iounmap(td->membase); | |
753 | kfree(td); | |
754 | release_mem_region(iomem->start, resource_size(iomem)); | |
755 | ||
de5d4453 RR |
756 | dev_dbg(&pdev->dev, "Removed...\n"); |
757 | return 0; | |
758 | } | |
759 | ||
760 | static struct platform_driver td_driver = { | |
761 | .driver = { | |
762 | .name = DRIVER_NAME, | |
de5d4453 RR |
763 | }, |
764 | .probe = td_probe, | |
234846d4 | 765 | .remove = td_remove, |
de5d4453 RR |
766 | }; |
767 | ||
c94e9105 | 768 | module_platform_driver(td_driver); |
de5d4453 RR |
769 | |
770 | MODULE_LICENSE("GPL v2"); | |
771 | MODULE_DESCRIPTION("Timberdale DMA controller driver"); | |
772 | MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>"); | |
773 | MODULE_ALIAS("platform:"DRIVER_NAME); |