Commit | Line | Data |
---|---|---|
901fd852 | 1 | // SPDX-License-Identifier: GPL-2.0 |
9a7b8e00 GL |
2 | /* |
3 | * Dmaengine driver base library for DMA controllers, found on SH-based SoCs | |
4 | * | |
5 | * extracted from shdma.c | |
6 | * | |
7 | * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | |
8 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | |
9 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | |
10 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | |
9a7b8e00 GL |
11 | */ |
12 | ||
13 | #include <linux/delay.h> | |
14 | #include <linux/shdma-base.h> | |
15 | #include <linux/dmaengine.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/pm_runtime.h> | |
20 | #include <linux/slab.h> | |
21 | #include <linux/spinlock.h> | |
22 | ||
23 | #include "../dmaengine.h" | |
24 | ||
25 | /* DMA descriptor control */ | |
26 | enum shdma_desc_status { | |
27 | DESC_IDLE, | |
28 | DESC_PREPARED, | |
29 | DESC_SUBMITTED, | |
30 | DESC_COMPLETED, /* completed, have to call callback */ | |
31 | DESC_WAITING, /* callback called, waiting for ack / re-submit */ | |
32 | }; | |
33 | ||
34 | #define NR_DESCS_PER_CHANNEL 32 | |
35 | ||
36 | #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) | |
37 | #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev) | |
38 | ||
39 | /* | |
40 | * For slave DMA we assume, that there is a finite number of DMA slaves in the | |
41 | * system, and that each such slave can only use a finite number of channels. | |
42 | * We use slave channel IDs to make sure, that no such slave channel ID is | |
43 | * allocated more than once. | |
44 | */ | |
45 | static unsigned int slave_num = 256; | |
46 | module_param(slave_num, uint, 0444); | |
47 | ||
48 | /* A bitmask with slave_num bits */ | |
49 | static unsigned long *shdma_slave_used; | |
50 | ||
51 | /* Called under spin_lock_irq(&schan->chan_lock") */ | |
52 | static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan) | |
53 | { | |
54 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | |
55 | const struct shdma_ops *ops = sdev->ops; | |
56 | struct shdma_desc *sdesc; | |
57 | ||
58 | /* DMA work check */ | |
59 | if (ops->channel_busy(schan)) | |
60 | return; | |
61 | ||
62 | /* Find the first not transferred descriptor */ | |
63 | list_for_each_entry(sdesc, &schan->ld_queue, node) | |
64 | if (sdesc->mark == DESC_SUBMITTED) { | |
65 | ops->start_xfer(schan, sdesc); | |
66 | break; | |
67 | } | |
68 | } | |
69 | ||
70 | static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) | |
71 | { | |
72 | struct shdma_desc *chunk, *c, *desc = | |
91ea74e9 | 73 | container_of(tx, struct shdma_desc, async_tx); |
9a7b8e00 | 74 | struct shdma_chan *schan = to_shdma_chan(tx->chan); |
9a7b8e00 GL |
75 | dma_async_tx_callback callback = tx->callback; |
76 | dma_cookie_t cookie; | |
77 | bool power_up; | |
78 | ||
79 | spin_lock_irq(&schan->chan_lock); | |
80 | ||
81 | power_up = list_empty(&schan->ld_queue); | |
82 | ||
83 | cookie = dma_cookie_assign(tx); | |
84 | ||
85 | /* Mark all chunks of this descriptor as submitted, move to the queue */ | |
86 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { | |
87 | /* | |
88 | * All chunks are on the global ld_free, so, we have to find | |
89 | * the end of the chain ourselves | |
90 | */ | |
91 | if (chunk != desc && (chunk->mark == DESC_IDLE || | |
92 | chunk->async_tx.cookie > 0 || | |
93 | chunk->async_tx.cookie == -EBUSY || | |
94 | &chunk->node == &schan->ld_free)) | |
95 | break; | |
96 | chunk->mark = DESC_SUBMITTED; | |
91ea74e9 KM |
97 | if (chunk->chunks == 1) { |
98 | chunk->async_tx.callback = callback; | |
99 | chunk->async_tx.callback_param = tx->callback_param; | |
100 | } else { | |
101 | /* Callback goes to the last chunk */ | |
102 | chunk->async_tx.callback = NULL; | |
103 | } | |
9a7b8e00 GL |
104 | chunk->cookie = cookie; |
105 | list_move_tail(&chunk->node, &schan->ld_queue); | |
9a7b8e00 GL |
106 | |
107 | dev_dbg(schan->dev, "submit #%d@%p on %d\n", | |
91ea74e9 | 108 | tx->cookie, &chunk->async_tx, schan->id); |
9a7b8e00 GL |
109 | } |
110 | ||
9a7b8e00 GL |
111 | if (power_up) { |
112 | int ret; | |
113 | schan->pm_state = SHDMA_PM_BUSY; | |
114 | ||
115 | ret = pm_runtime_get(schan->dev); | |
116 | ||
117 | spin_unlock_irq(&schan->chan_lock); | |
d143f939 | 118 | if (ret < 0) |
9a7b8e00 GL |
119 | dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); |
120 | ||
121 | pm_runtime_barrier(schan->dev); | |
122 | ||
123 | spin_lock_irq(&schan->chan_lock); | |
124 | ||
125 | /* Have we been reset, while waiting? */ | |
126 | if (schan->pm_state != SHDMA_PM_ESTABLISHED) { | |
127 | struct shdma_dev *sdev = | |
128 | to_shdma_dev(schan->dma_chan.device); | |
129 | const struct shdma_ops *ops = sdev->ops; | |
130 | dev_dbg(schan->dev, "Bring up channel %d\n", | |
131 | schan->id); | |
132 | /* | |
133 | * TODO: .xfer_setup() might fail on some platforms. | |
134 | * Make it int then, on error remove chunks from the | |
135 | * queue again | |
136 | */ | |
c2cdb7e4 | 137 | ops->setup_xfer(schan, schan->slave_id); |
9a7b8e00 GL |
138 | |
139 | if (schan->pm_state == SHDMA_PM_PENDING) | |
140 | shdma_chan_xfer_ld_queue(schan); | |
141 | schan->pm_state = SHDMA_PM_ESTABLISHED; | |
142 | } | |
143 | } else { | |
144 | /* | |
145 | * Tell .device_issue_pending() not to run the queue, interrupts | |
146 | * will do it anyway | |
147 | */ | |
148 | schan->pm_state = SHDMA_PM_PENDING; | |
149 | } | |
150 | ||
151 | spin_unlock_irq(&schan->chan_lock); | |
152 | ||
153 | return cookie; | |
154 | } | |
155 | ||
156 | /* Called with desc_lock held */ | |
157 | static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan) | |
158 | { | |
159 | struct shdma_desc *sdesc; | |
160 | ||
161 | list_for_each_entry(sdesc, &schan->ld_free, node) | |
162 | if (sdesc->mark != DESC_PREPARED) { | |
163 | BUG_ON(sdesc->mark != DESC_IDLE); | |
164 | list_del(&sdesc->node); | |
165 | return sdesc; | |
166 | } | |
167 | ||
168 | return NULL; | |
169 | } | |
170 | ||
411fdaf8 | 171 | static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr) |
1ff8df4f GL |
172 | { |
173 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | |
174 | const struct shdma_ops *ops = sdev->ops; | |
67eacc15 GL |
175 | int ret, match; |
176 | ||
177 | if (schan->dev->of_node) { | |
178 | match = schan->hw_req; | |
4981c4dc | 179 | ret = ops->set_slave(schan, match, slave_addr, true); |
67eacc15 GL |
180 | if (ret < 0) |
181 | return ret; | |
67eacc15 | 182 | } else { |
411fdaf8 | 183 | match = schan->real_slave_id; |
67eacc15 | 184 | } |
1ff8df4f | 185 | |
411fdaf8 | 186 | if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num) |
1ff8df4f GL |
187 | return -EINVAL; |
188 | ||
411fdaf8 | 189 | if (test_and_set_bit(schan->real_slave_id, shdma_slave_used)) |
1ff8df4f GL |
190 | return -EBUSY; |
191 | ||
4981c4dc | 192 | ret = ops->set_slave(schan, match, slave_addr, false); |
1ff8df4f | 193 | if (ret < 0) { |
411fdaf8 | 194 | clear_bit(schan->real_slave_id, shdma_slave_used); |
1ff8df4f GL |
195 | return ret; |
196 | } | |
197 | ||
411fdaf8 | 198 | schan->slave_id = schan->real_slave_id; |
1ff8df4f GL |
199 | |
200 | return 0; | |
201 | } | |
202 | ||
9a7b8e00 GL |
203 | static int shdma_alloc_chan_resources(struct dma_chan *chan) |
204 | { | |
205 | struct shdma_chan *schan = to_shdma_chan(chan); | |
206 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | |
207 | const struct shdma_ops *ops = sdev->ops; | |
208 | struct shdma_desc *desc; | |
209 | struct shdma_slave *slave = chan->private; | |
210 | int ret, i; | |
211 | ||
212 | /* | |
213 | * This relies on the guarantee from dmaengine that alloc_chan_resources | |
214 | * never runs concurrently with itself or free_chan_resources. | |
215 | */ | |
216 | if (slave) { | |
1ff8df4f | 217 | /* Legacy mode: .private is set in filter */ |
411fdaf8 AB |
218 | schan->real_slave_id = slave->slave_id; |
219 | ret = shdma_setup_slave(schan, 0); | |
9a7b8e00 GL |
220 | if (ret < 0) |
221 | goto esetslave; | |
c2cdb7e4 | 222 | } else { |
411fdaf8 | 223 | /* Normal mode: real_slave_id was set by filter */ |
c2cdb7e4 | 224 | schan->slave_id = -EINVAL; |
9a7b8e00 GL |
225 | } |
226 | ||
227 | schan->desc = kcalloc(NR_DESCS_PER_CHANNEL, | |
228 | sdev->desc_size, GFP_KERNEL); | |
229 | if (!schan->desc) { | |
230 | ret = -ENOMEM; | |
231 | goto edescalloc; | |
232 | } | |
233 | schan->desc_num = NR_DESCS_PER_CHANNEL; | |
234 | ||
235 | for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) { | |
236 | desc = ops->embedded_desc(schan->desc, i); | |
237 | dma_async_tx_descriptor_init(&desc->async_tx, | |
238 | &schan->dma_chan); | |
239 | desc->async_tx.tx_submit = shdma_tx_submit; | |
240 | desc->mark = DESC_IDLE; | |
241 | ||
242 | list_add(&desc->node, &schan->ld_free); | |
243 | } | |
244 | ||
245 | return NR_DESCS_PER_CHANNEL; | |
246 | ||
247 | edescalloc: | |
248 | if (slave) | |
249 | esetslave: | |
250 | clear_bit(slave->slave_id, shdma_slave_used); | |
9a7b8e00 GL |
251 | chan->private = NULL; |
252 | return ret; | |
253 | } | |
254 | ||
c091ff51 LP |
255 | /* |
256 | * This is the standard shdma filter function to be used as a replacement to the | |
411fdaf8 AB |
257 | * "old" method, using the .private pointer. |
258 | * You always have to pass a valid slave id as the argument, old drivers that | |
259 | * pass ERR_PTR(-EINVAL) as a filter parameter and set it up in dma_slave_config | |
260 | * need to be updated so we can remove the slave_id field from dma_slave_config. | |
c091ff51 LP |
261 | * parameter. If this filter is used, the slave driver, after calling |
262 | * dma_request_channel(), will also have to call dmaengine_slave_config() with | |
411fdaf8 AB |
263 | * .direction, and either .src_addr or .dst_addr set. |
264 | * | |
c091ff51 LP |
265 | * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE |
266 | * capability! If this becomes a requirement, hardware glue drivers, using this | |
267 | * services would have to provide their own filters, which first would check | |
268 | * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do | |
269 | * this, and only then, in case of a match, call this common filter. | |
270 | * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate(). | |
271 | * In that case the MID-RID value is used for slave channel filtering and is | |
272 | * passed to this function in the "arg" parameter. | |
273 | */ | |
274 | bool shdma_chan_filter(struct dma_chan *chan, void *arg) | |
275 | { | |
276 | struct shdma_chan *schan; | |
277 | struct shdma_dev *sdev; | |
411fdaf8 | 278 | int slave_id = (long)arg; |
c091ff51 LP |
279 | int ret; |
280 | ||
281 | /* Only support channels handled by this driver. */ | |
282 | if (chan->device->device_alloc_chan_resources != | |
283 | shdma_alloc_chan_resources) | |
284 | return false; | |
285 | ||
411fdaf8 AB |
286 | schan = to_shdma_chan(chan); |
287 | sdev = to_shdma_dev(chan->device); | |
288 | ||
289 | /* | |
290 | * For DT, the schan->slave_id field is generated by the | |
291 | * set_slave function from the slave ID that is passed in | |
292 | * from xlate. For the non-DT case, the slave ID is | |
293 | * directly passed into the filter function by the driver | |
294 | */ | |
295 | if (schan->dev->of_node) { | |
296 | ret = sdev->ops->set_slave(schan, slave_id, 0, true); | |
297 | if (ret < 0) | |
298 | return false; | |
299 | ||
300 | schan->real_slave_id = schan->slave_id; | |
301 | return true; | |
302 | } | |
303 | ||
304 | if (slave_id < 0) { | |
c091ff51 | 305 | /* No slave requested - arbitrary channel */ |
411fdaf8 | 306 | dev_warn(sdev->dma_dev.dev, "invalid slave ID passed to dma_request_slave\n"); |
c091ff51 | 307 | return true; |
411fdaf8 | 308 | } |
c091ff51 | 309 | |
411fdaf8 | 310 | if (slave_id >= slave_num) |
c091ff51 LP |
311 | return false; |
312 | ||
411fdaf8 | 313 | ret = sdev->ops->set_slave(schan, slave_id, 0, true); |
c091ff51 LP |
314 | if (ret < 0) |
315 | return false; | |
316 | ||
411fdaf8 AB |
317 | schan->real_slave_id = slave_id; |
318 | ||
c091ff51 LP |
319 | return true; |
320 | } | |
321 | EXPORT_SYMBOL(shdma_chan_filter); | |
322 | ||
9a7b8e00 GL |
323 | static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) |
324 | { | |
325 | struct shdma_desc *desc, *_desc; | |
326 | /* Is the "exposed" head of a chain acked? */ | |
327 | bool head_acked = false; | |
328 | dma_cookie_t cookie = 0; | |
329 | dma_async_tx_callback callback = NULL; | |
73fc45e3 | 330 | struct dmaengine_desc_callback cb; |
9a7b8e00 | 331 | unsigned long flags; |
dfbb85ca | 332 | LIST_HEAD(cyclic_list); |
9a7b8e00 | 333 | |
73fc45e3 | 334 | memset(&cb, 0, sizeof(cb)); |
9a7b8e00 GL |
335 | spin_lock_irqsave(&schan->chan_lock, flags); |
336 | list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { | |
337 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | |
338 | ||
339 | BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); | |
340 | BUG_ON(desc->mark != DESC_SUBMITTED && | |
341 | desc->mark != DESC_COMPLETED && | |
342 | desc->mark != DESC_WAITING); | |
343 | ||
344 | /* | |
345 | * queue is ordered, and we use this loop to (1) clean up all | |
346 | * completed descriptors, and to (2) update descriptor flags of | |
347 | * any chunks in a (partially) completed chain | |
348 | */ | |
349 | if (!all && desc->mark == DESC_SUBMITTED && | |
350 | desc->cookie != cookie) | |
351 | break; | |
352 | ||
353 | if (tx->cookie > 0) | |
354 | cookie = tx->cookie; | |
355 | ||
356 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { | |
357 | if (schan->dma_chan.completed_cookie != desc->cookie - 1) | |
358 | dev_dbg(schan->dev, | |
359 | "Completing cookie %d, expected %d\n", | |
360 | desc->cookie, | |
361 | schan->dma_chan.completed_cookie + 1); | |
362 | schan->dma_chan.completed_cookie = desc->cookie; | |
363 | } | |
364 | ||
365 | /* Call callback on the last chunk */ | |
366 | if (desc->mark == DESC_COMPLETED && tx->callback) { | |
367 | desc->mark = DESC_WAITING; | |
73fc45e3 | 368 | dmaengine_desc_get_callback(tx, &cb); |
9a7b8e00 | 369 | callback = tx->callback; |
9a7b8e00 GL |
370 | dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n", |
371 | tx->cookie, tx, schan->id); | |
372 | BUG_ON(desc->chunks != 1); | |
373 | break; | |
374 | } | |
375 | ||
376 | if (tx->cookie > 0 || tx->cookie == -EBUSY) { | |
377 | if (desc->mark == DESC_COMPLETED) { | |
378 | BUG_ON(tx->cookie < 0); | |
379 | desc->mark = DESC_WAITING; | |
380 | } | |
381 | head_acked = async_tx_test_ack(tx); | |
382 | } else { | |
383 | switch (desc->mark) { | |
384 | case DESC_COMPLETED: | |
385 | desc->mark = DESC_WAITING; | |
df561f66 | 386 | fallthrough; |
9a7b8e00 GL |
387 | case DESC_WAITING: |
388 | if (head_acked) | |
389 | async_tx_ack(&desc->async_tx); | |
390 | } | |
391 | } | |
392 | ||
393 | dev_dbg(schan->dev, "descriptor %p #%d completed.\n", | |
394 | tx, tx->cookie); | |
395 | ||
396 | if (((desc->mark == DESC_COMPLETED || | |
397 | desc->mark == DESC_WAITING) && | |
398 | async_tx_test_ack(&desc->async_tx)) || all) { | |
9a7b8e00 | 399 | |
dfbb85ca KM |
400 | if (all || !desc->cyclic) { |
401 | /* Remove from ld_queue list */ | |
402 | desc->mark = DESC_IDLE; | |
403 | list_move(&desc->node, &schan->ld_free); | |
404 | } else { | |
405 | /* reuse as cyclic */ | |
406 | desc->mark = DESC_SUBMITTED; | |
407 | list_move_tail(&desc->node, &cyclic_list); | |
408 | } | |
9a7b8e00 GL |
409 | |
410 | if (list_empty(&schan->ld_queue)) { | |
411 | dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); | |
412 | pm_runtime_put(schan->dev); | |
413 | schan->pm_state = SHDMA_PM_ESTABLISHED; | |
26fd830a YS |
414 | } else if (schan->pm_state == SHDMA_PM_PENDING) { |
415 | shdma_chan_xfer_ld_queue(schan); | |
9a7b8e00 GL |
416 | } |
417 | } | |
418 | } | |
419 | ||
420 | if (all && !callback) | |
421 | /* | |
422 | * Terminating and the loop completed normally: forgive | |
423 | * uncompleted cookies | |
424 | */ | |
425 | schan->dma_chan.completed_cookie = schan->dma_chan.cookie; | |
426 | ||
dfbb85ca KM |
427 | list_splice_tail(&cyclic_list, &schan->ld_queue); |
428 | ||
9a7b8e00 GL |
429 | spin_unlock_irqrestore(&schan->chan_lock, flags); |
430 | ||
73fc45e3 | 431 | dmaengine_desc_callback_invoke(&cb, NULL); |
9a7b8e00 GL |
432 | |
433 | return callback; | |
434 | } | |
435 | ||
436 | /* | |
437 | * shdma_chan_ld_cleanup - Clean up link descriptors | |
438 | * | |
439 | * Clean up the ld_queue of DMA channel. | |
440 | */ | |
441 | static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all) | |
442 | { | |
443 | while (__ld_cleanup(schan, all)) | |
444 | ; | |
445 | } | |
446 | ||
447 | /* | |
448 | * shdma_free_chan_resources - Free all resources of the channel. | |
449 | */ | |
450 | static void shdma_free_chan_resources(struct dma_chan *chan) | |
451 | { | |
452 | struct shdma_chan *schan = to_shdma_chan(chan); | |
453 | struct shdma_dev *sdev = to_shdma_dev(chan->device); | |
454 | const struct shdma_ops *ops = sdev->ops; | |
455 | LIST_HEAD(list); | |
456 | ||
457 | /* Protect against ISR */ | |
458 | spin_lock_irq(&schan->chan_lock); | |
459 | ops->halt_channel(schan); | |
460 | spin_unlock_irq(&schan->chan_lock); | |
461 | ||
462 | /* Now no new interrupts will occur */ | |
463 | ||
464 | /* Prepared and not submitted descriptors can still be on the queue */ | |
465 | if (!list_empty(&schan->ld_queue)) | |
466 | shdma_chan_ld_cleanup(schan, true); | |
467 | ||
c2cdb7e4 | 468 | if (schan->slave_id >= 0) { |
9a7b8e00 | 469 | /* The caller is holding dma_list_mutex */ |
c2cdb7e4 | 470 | clear_bit(schan->slave_id, shdma_slave_used); |
9a7b8e00 GL |
471 | chan->private = NULL; |
472 | } | |
473 | ||
411fdaf8 AB |
474 | schan->real_slave_id = 0; |
475 | ||
9a7b8e00 GL |
476 | spin_lock_irq(&schan->chan_lock); |
477 | ||
478 | list_splice_init(&schan->ld_free, &list); | |
479 | schan->desc_num = 0; | |
480 | ||
481 | spin_unlock_irq(&schan->chan_lock); | |
482 | ||
483 | kfree(schan->desc); | |
484 | } | |
485 | ||
486 | /** | |
487 | * shdma_add_desc - get, set up and return one transfer descriptor | |
488 | * @schan: DMA channel | |
489 | * @flags: DMA transfer flags | |
490 | * @dst: destination DMA address, incremented when direction equals | |
491 | * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM | |
492 | * @src: source DMA address, incremented when direction equals | |
493 | * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM | |
494 | * @len: DMA transfer length | |
495 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY | |
496 | * @direction: needed for slave DMA to decide which address to keep constant, | |
497 | * equals DMA_MEM_TO_MEM for MEMCPY | |
498 | * Returns 0 or an error | |
499 | * Locks: called with desc_lock held | |
500 | */ | |
501 | static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, | |
502 | unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len, | |
503 | struct shdma_desc **first, enum dma_transfer_direction direction) | |
504 | { | |
505 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | |
506 | const struct shdma_ops *ops = sdev->ops; | |
507 | struct shdma_desc *new; | |
508 | size_t copy_size = *len; | |
509 | ||
510 | if (!copy_size) | |
511 | return NULL; | |
512 | ||
513 | /* Allocate the link descriptor from the free list */ | |
514 | new = shdma_get_desc(schan); | |
515 | if (!new) { | |
516 | dev_err(schan->dev, "No free link descriptor available\n"); | |
517 | return NULL; | |
518 | } | |
519 | ||
520 | ops->desc_setup(schan, new, *src, *dst, ©_size); | |
521 | ||
522 | if (!*first) { | |
523 | /* First desc */ | |
524 | new->async_tx.cookie = -EBUSY; | |
525 | *first = new; | |
526 | } else { | |
527 | /* Other desc - invisible to the user */ | |
528 | new->async_tx.cookie = -EINVAL; | |
529 | } | |
530 | ||
531 | dev_dbg(schan->dev, | |
42e4a12a LP |
532 | "chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n", |
533 | copy_size, *len, src, dst, &new->async_tx, | |
9a7b8e00 GL |
534 | new->async_tx.cookie); |
535 | ||
536 | new->mark = DESC_PREPARED; | |
537 | new->async_tx.flags = flags; | |
538 | new->direction = direction; | |
4f46f8ac | 539 | new->partial = 0; |
9a7b8e00 GL |
540 | |
541 | *len -= copy_size; | |
542 | if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) | |
543 | *src += copy_size; | |
544 | if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) | |
545 | *dst += copy_size; | |
546 | ||
547 | return new; | |
548 | } | |
549 | ||
550 | /* | |
551 | * shdma_prep_sg - prepare transfer descriptors from an SG list | |
552 | * | |
553 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also | |
554 | * converted to scatter-gather to guarantee consistent locking and a correct | |
555 | * list manipulation. For slave DMA direction carries the usual meaning, and, | |
556 | * logically, the SG list is RAM and the addr variable contains slave address, | |
557 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM | |
558 | * and the SG list contains only one element and points at the source buffer. | |
559 | */ | |
560 | static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, | |
561 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, | |
dfbb85ca | 562 | enum dma_transfer_direction direction, unsigned long flags, bool cyclic) |
9a7b8e00 GL |
563 | { |
564 | struct scatterlist *sg; | |
565 | struct shdma_desc *first = NULL, *new = NULL /* compiler... */; | |
566 | LIST_HEAD(tx_list); | |
567 | int chunks = 0; | |
568 | unsigned long irq_flags; | |
569 | int i; | |
570 | ||
571 | for_each_sg(sgl, sg, sg_len, i) | |
572 | chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); | |
573 | ||
574 | /* Have to lock the whole loop to protect against concurrent release */ | |
575 | spin_lock_irqsave(&schan->chan_lock, irq_flags); | |
576 | ||
577 | /* | |
578 | * Chaining: | |
579 | * first descriptor is what user is dealing with in all API calls, its | |
580 | * cookie is at first set to -EBUSY, at tx-submit to a positive | |
581 | * number | |
582 | * if more than one chunk is needed further chunks have cookie = -EINVAL | |
583 | * the last chunk, if not equal to the first, has cookie = -ENOSPC | |
584 | * all chunks are linked onto the tx_list head with their .node heads | |
585 | * only during this function, then they are immediately spliced | |
586 | * back onto the free list in form of a chain | |
587 | */ | |
588 | for_each_sg(sgl, sg, sg_len, i) { | |
589 | dma_addr_t sg_addr = sg_dma_address(sg); | |
590 | size_t len = sg_dma_len(sg); | |
591 | ||
592 | if (!len) | |
593 | goto err_get_desc; | |
594 | ||
595 | do { | |
42e4a12a LP |
596 | dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n", |
597 | i, sg, len, &sg_addr); | |
9a7b8e00 GL |
598 | |
599 | if (direction == DMA_DEV_TO_MEM) | |
600 | new = shdma_add_desc(schan, flags, | |
601 | &sg_addr, addr, &len, &first, | |
602 | direction); | |
603 | else | |
604 | new = shdma_add_desc(schan, flags, | |
605 | addr, &sg_addr, &len, &first, | |
606 | direction); | |
607 | if (!new) | |
608 | goto err_get_desc; | |
609 | ||
dfbb85ca KM |
610 | new->cyclic = cyclic; |
611 | if (cyclic) | |
612 | new->chunks = 1; | |
613 | else | |
614 | new->chunks = chunks--; | |
9a7b8e00 GL |
615 | list_add_tail(&new->node, &tx_list); |
616 | } while (len); | |
617 | } | |
618 | ||
619 | if (new != first) | |
620 | new->async_tx.cookie = -ENOSPC; | |
621 | ||
622 | /* Put them back on the free list, so, they don't get lost */ | |
623 | list_splice_tail(&tx_list, &schan->ld_free); | |
624 | ||
625 | spin_unlock_irqrestore(&schan->chan_lock, irq_flags); | |
626 | ||
627 | return &first->async_tx; | |
628 | ||
629 | err_get_desc: | |
630 | list_for_each_entry(new, &tx_list, node) | |
631 | new->mark = DESC_IDLE; | |
632 | list_splice(&tx_list, &schan->ld_free); | |
633 | ||
634 | spin_unlock_irqrestore(&schan->chan_lock, irq_flags); | |
635 | ||
636 | return NULL; | |
637 | } | |
638 | ||
639 | static struct dma_async_tx_descriptor *shdma_prep_memcpy( | |
640 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | |
641 | size_t len, unsigned long flags) | |
642 | { | |
643 | struct shdma_chan *schan = to_shdma_chan(chan); | |
644 | struct scatterlist sg; | |
645 | ||
646 | if (!chan || !len) | |
647 | return NULL; | |
648 | ||
649 | BUG_ON(!schan->desc_num); | |
650 | ||
651 | sg_init_table(&sg, 1); | |
652 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, | |
653 | offset_in_page(dma_src)); | |
654 | sg_dma_address(&sg) = dma_src; | |
655 | sg_dma_len(&sg) = len; | |
656 | ||
dfbb85ca KM |
657 | return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, |
658 | flags, false); | |
9a7b8e00 GL |
659 | } |
660 | ||
661 | static struct dma_async_tx_descriptor *shdma_prep_slave_sg( | |
662 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | |
663 | enum dma_transfer_direction direction, unsigned long flags, void *context) | |
664 | { | |
665 | struct shdma_chan *schan = to_shdma_chan(chan); | |
666 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | |
667 | const struct shdma_ops *ops = sdev->ops; | |
c2cdb7e4 | 668 | int slave_id = schan->slave_id; |
9a7b8e00 GL |
669 | dma_addr_t slave_addr; |
670 | ||
671 | if (!chan) | |
672 | return NULL; | |
673 | ||
674 | BUG_ON(!schan->desc_num); | |
675 | ||
676 | /* Someone calling slave DMA on a generic channel? */ | |
c2cdb7e4 GL |
677 | if (slave_id < 0 || !sg_len) { |
678 | dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n", | |
679 | __func__, sg_len, slave_id); | |
9a7b8e00 GL |
680 | return NULL; |
681 | } | |
682 | ||
683 | slave_addr = ops->slave_addr(schan); | |
684 | ||
685 | return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, | |
dfbb85ca KM |
686 | direction, flags, false); |
687 | } | |
688 | ||
877d8425 VK |
689 | #define SHDMA_MAX_SG_LEN 32 |
690 | ||
a6876543 | 691 | static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( |
dfbb85ca KM |
692 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
693 | size_t period_len, enum dma_transfer_direction direction, | |
31c1e5a1 | 694 | unsigned long flags) |
dfbb85ca KM |
695 | { |
696 | struct shdma_chan *schan = to_shdma_chan(chan); | |
697 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | |
4415b03a | 698 | struct dma_async_tx_descriptor *desc; |
dfbb85ca KM |
699 | const struct shdma_ops *ops = sdev->ops; |
700 | unsigned int sg_len = buf_len / period_len; | |
701 | int slave_id = schan->slave_id; | |
702 | dma_addr_t slave_addr; | |
4415b03a | 703 | struct scatterlist *sgl; |
dfbb85ca KM |
704 | int i; |
705 | ||
706 | if (!chan) | |
707 | return NULL; | |
708 | ||
709 | BUG_ON(!schan->desc_num); | |
710 | ||
877d8425 | 711 | if (sg_len > SHDMA_MAX_SG_LEN) { |
1986f03b | 712 | dev_err(schan->dev, "sg length %d exceeds limit %d", |
877d8425 VK |
713 | sg_len, SHDMA_MAX_SG_LEN); |
714 | return NULL; | |
715 | } | |
716 | ||
dfbb85ca KM |
717 | /* Someone calling slave DMA on a generic channel? */ |
718 | if (slave_id < 0 || (buf_len < period_len)) { | |
719 | dev_warn(schan->dev, | |
9d9f71a8 | 720 | "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n", |
dfbb85ca KM |
721 | __func__, buf_len, period_len, slave_id); |
722 | return NULL; | |
723 | } | |
724 | ||
725 | slave_addr = ops->slave_addr(schan); | |
726 | ||
4415b03a LP |
727 | /* |
728 | * Allocate the sg list dynamically as it would consumer too much stack | |
729 | * space. | |
730 | */ | |
aafa88f3 | 731 | sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_KERNEL); |
4415b03a LP |
732 | if (!sgl) |
733 | return NULL; | |
734 | ||
dfbb85ca | 735 | sg_init_table(sgl, sg_len); |
4415b03a | 736 | |
dfbb85ca KM |
737 | for (i = 0; i < sg_len; i++) { |
738 | dma_addr_t src = buf_addr + (period_len * i); | |
739 | ||
740 | sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len, | |
741 | offset_in_page(src)); | |
742 | sg_dma_address(&sgl[i]) = src; | |
743 | sg_dma_len(&sgl[i]) = period_len; | |
744 | } | |
745 | ||
4415b03a | 746 | desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr, |
dfbb85ca | 747 | direction, flags, true); |
4415b03a LP |
748 | |
749 | kfree(sgl); | |
750 | return desc; | |
9a7b8e00 GL |
751 | } |
752 | ||
be60f940 | 753 | static int shdma_terminate_all(struct dma_chan *chan) |
9a7b8e00 GL |
754 | { |
755 | struct shdma_chan *schan = to_shdma_chan(chan); | |
756 | struct shdma_dev *sdev = to_shdma_dev(chan->device); | |
757 | const struct shdma_ops *ops = sdev->ops; | |
758 | unsigned long flags; | |
9a7b8e00 | 759 | |
be60f940 MR |
760 | spin_lock_irqsave(&schan->chan_lock, flags); |
761 | ops->halt_channel(schan); | |
4f46f8ac | 762 | |
be60f940 MR |
763 | if (ops->get_partial && !list_empty(&schan->ld_queue)) { |
764 | /* Record partial transfer */ | |
765 | struct shdma_desc *desc = list_first_entry(&schan->ld_queue, | |
766 | struct shdma_desc, node); | |
767 | desc->partial = ops->get_partial(schan, desc); | |
768 | } | |
4f46f8ac | 769 | |
be60f940 | 770 | spin_unlock_irqrestore(&schan->chan_lock, flags); |
9a7b8e00 | 771 | |
be60f940 | 772 | shdma_chan_ld_cleanup(schan, true); |
9a7b8e00 GL |
773 | |
774 | return 0; | |
775 | } | |
776 | ||
be60f940 MR |
777 | static int shdma_config(struct dma_chan *chan, |
778 | struct dma_slave_config *config) | |
779 | { | |
780 | struct shdma_chan *schan = to_shdma_chan(chan); | |
781 | ||
782 | /* | |
783 | * So far only .slave_id is used, but the slave drivers are | |
784 | * encouraged to also set a transfer direction and an address. | |
785 | */ | |
786 | if (!config) | |
787 | return -EINVAL; | |
411fdaf8 | 788 | |
be60f940 MR |
789 | /* |
790 | * We could lock this, but you shouldn't be configuring the | |
791 | * channel, while using it... | |
792 | */ | |
411fdaf8 | 793 | return shdma_setup_slave(schan, |
be60f940 MR |
794 | config->direction == DMA_DEV_TO_MEM ? |
795 | config->src_addr : config->dst_addr); | |
796 | } | |
797 | ||
9a7b8e00 GL |
798 | static void shdma_issue_pending(struct dma_chan *chan) |
799 | { | |
800 | struct shdma_chan *schan = to_shdma_chan(chan); | |
801 | ||
802 | spin_lock_irq(&schan->chan_lock); | |
803 | if (schan->pm_state == SHDMA_PM_ESTABLISHED) | |
804 | shdma_chan_xfer_ld_queue(schan); | |
805 | else | |
806 | schan->pm_state = SHDMA_PM_PENDING; | |
807 | spin_unlock_irq(&schan->chan_lock); | |
808 | } | |
809 | ||
810 | static enum dma_status shdma_tx_status(struct dma_chan *chan, | |
811 | dma_cookie_t cookie, | |
812 | struct dma_tx_state *txstate) | |
813 | { | |
814 | struct shdma_chan *schan = to_shdma_chan(chan); | |
815 | enum dma_status status; | |
816 | unsigned long flags; | |
817 | ||
818 | shdma_chan_ld_cleanup(schan, false); | |
819 | ||
820 | spin_lock_irqsave(&schan->chan_lock, flags); | |
821 | ||
822 | status = dma_cookie_status(chan, cookie, txstate); | |
823 | ||
824 | /* | |
825 | * If we don't find cookie on the queue, it has been aborted and we have | |
826 | * to report error | |
827 | */ | |
a8d8d268 | 828 | if (status != DMA_COMPLETE) { |
9a7b8e00 GL |
829 | struct shdma_desc *sdesc; |
830 | status = DMA_ERROR; | |
831 | list_for_each_entry(sdesc, &schan->ld_queue, node) | |
832 | if (sdesc->cookie == cookie) { | |
833 | status = DMA_IN_PROGRESS; | |
834 | break; | |
835 | } | |
836 | } | |
837 | ||
838 | spin_unlock_irqrestore(&schan->chan_lock, flags); | |
839 | ||
840 | return status; | |
841 | } | |
842 | ||
843 | /* Called from error IRQ or NMI */ | |
844 | bool shdma_reset(struct shdma_dev *sdev) | |
845 | { | |
846 | const struct shdma_ops *ops = sdev->ops; | |
847 | struct shdma_chan *schan; | |
848 | unsigned int handled = 0; | |
849 | int i; | |
850 | ||
851 | /* Reset all channels */ | |
852 | shdma_for_each_chan(schan, sdev, i) { | |
853 | struct shdma_desc *sdesc; | |
854 | LIST_HEAD(dl); | |
855 | ||
856 | if (!schan) | |
857 | continue; | |
858 | ||
859 | spin_lock(&schan->chan_lock); | |
860 | ||
861 | /* Stop the channel */ | |
862 | ops->halt_channel(schan); | |
863 | ||
864 | list_splice_init(&schan->ld_queue, &dl); | |
865 | ||
866 | if (!list_empty(&dl)) { | |
867 | dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); | |
868 | pm_runtime_put(schan->dev); | |
869 | } | |
870 | schan->pm_state = SHDMA_PM_ESTABLISHED; | |
871 | ||
872 | spin_unlock(&schan->chan_lock); | |
873 | ||
874 | /* Complete all */ | |
875 | list_for_each_entry(sdesc, &dl, node) { | |
876 | struct dma_async_tx_descriptor *tx = &sdesc->async_tx; | |
73fc45e3 | 877 | |
9a7b8e00 | 878 | sdesc->mark = DESC_IDLE; |
73fc45e3 | 879 | dmaengine_desc_get_callback_invoke(tx, NULL); |
9a7b8e00 GL |
880 | } |
881 | ||
882 | spin_lock(&schan->chan_lock); | |
883 | list_splice(&dl, &schan->ld_free); | |
884 | spin_unlock(&schan->chan_lock); | |
885 | ||
886 | handled++; | |
887 | } | |
888 | ||
889 | return !!handled; | |
890 | } | |
891 | EXPORT_SYMBOL(shdma_reset); | |
892 | ||
893 | static irqreturn_t chan_irq(int irq, void *dev) | |
894 | { | |
895 | struct shdma_chan *schan = dev; | |
896 | const struct shdma_ops *ops = | |
897 | to_shdma_dev(schan->dma_chan.device)->ops; | |
898 | irqreturn_t ret; | |
899 | ||
900 | spin_lock(&schan->chan_lock); | |
901 | ||
902 | ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE; | |
903 | ||
904 | spin_unlock(&schan->chan_lock); | |
905 | ||
906 | return ret; | |
907 | } | |
908 | ||
909 | static irqreturn_t chan_irqt(int irq, void *dev) | |
910 | { | |
911 | struct shdma_chan *schan = dev; | |
912 | const struct shdma_ops *ops = | |
913 | to_shdma_dev(schan->dma_chan.device)->ops; | |
914 | struct shdma_desc *sdesc; | |
915 | ||
916 | spin_lock_irq(&schan->chan_lock); | |
917 | list_for_each_entry(sdesc, &schan->ld_queue, node) { | |
918 | if (sdesc->mark == DESC_SUBMITTED && | |
919 | ops->desc_completed(schan, sdesc)) { | |
920 | dev_dbg(schan->dev, "done #%d@%p\n", | |
921 | sdesc->async_tx.cookie, &sdesc->async_tx); | |
922 | sdesc->mark = DESC_COMPLETED; | |
923 | break; | |
924 | } | |
925 | } | |
926 | /* Next desc */ | |
927 | shdma_chan_xfer_ld_queue(schan); | |
928 | spin_unlock_irq(&schan->chan_lock); | |
929 | ||
930 | shdma_chan_ld_cleanup(schan, false); | |
931 | ||
932 | return IRQ_HANDLED; | |
933 | } | |
934 | ||
935 | int shdma_request_irq(struct shdma_chan *schan, int irq, | |
936 | unsigned long flags, const char *name) | |
937 | { | |
c1c63a14 GL |
938 | int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq, |
939 | chan_irqt, flags, name, schan); | |
9a7b8e00 GL |
940 | |
941 | schan->irq = ret < 0 ? ret : irq; | |
942 | ||
943 | return ret; | |
944 | } | |
945 | EXPORT_SYMBOL(shdma_request_irq); | |
946 | ||
9a7b8e00 GL |
947 | void shdma_chan_probe(struct shdma_dev *sdev, |
948 | struct shdma_chan *schan, int id) | |
949 | { | |
950 | schan->pm_state = SHDMA_PM_ESTABLISHED; | |
951 | ||
952 | /* reference struct dma_device */ | |
953 | schan->dma_chan.device = &sdev->dma_dev; | |
954 | dma_cookie_init(&schan->dma_chan); | |
955 | ||
956 | schan->dev = sdev->dma_dev.dev; | |
957 | schan->id = id; | |
958 | ||
959 | if (!schan->max_xfer_len) | |
960 | schan->max_xfer_len = PAGE_SIZE; | |
961 | ||
962 | spin_lock_init(&schan->chan_lock); | |
963 | ||
964 | /* Init descripter manage list */ | |
965 | INIT_LIST_HEAD(&schan->ld_queue); | |
966 | INIT_LIST_HEAD(&schan->ld_free); | |
967 | ||
968 | /* Add the channel to DMA device channel list */ | |
969 | list_add_tail(&schan->dma_chan.device_node, | |
970 | &sdev->dma_dev.channels); | |
1e916474 | 971 | sdev->schan[id] = schan; |
9a7b8e00 GL |
972 | } |
973 | EXPORT_SYMBOL(shdma_chan_probe); | |
974 | ||
975 | void shdma_chan_remove(struct shdma_chan *schan) | |
976 | { | |
977 | list_del(&schan->dma_chan.device_node); | |
978 | } | |
979 | EXPORT_SYMBOL(shdma_chan_remove); | |
980 | ||
981 | int shdma_init(struct device *dev, struct shdma_dev *sdev, | |
982 | int chan_num) | |
983 | { | |
984 | struct dma_device *dma_dev = &sdev->dma_dev; | |
985 | ||
986 | /* | |
987 | * Require all call-backs for now, they can trivially be made optional | |
988 | * later as required | |
989 | */ | |
990 | if (!sdev->ops || | |
991 | !sdev->desc_size || | |
992 | !sdev->ops->embedded_desc || | |
993 | !sdev->ops->start_xfer || | |
994 | !sdev->ops->setup_xfer || | |
995 | !sdev->ops->set_slave || | |
996 | !sdev->ops->desc_setup || | |
997 | !sdev->ops->slave_addr || | |
998 | !sdev->ops->channel_busy || | |
999 | !sdev->ops->halt_channel || | |
1000 | !sdev->ops->desc_completed) | |
1001 | return -EINVAL; | |
1002 | ||
1003 | sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL); | |
1004 | if (!sdev->schan) | |
1005 | return -ENOMEM; | |
1006 | ||
1007 | INIT_LIST_HEAD(&dma_dev->channels); | |
1008 | ||
1009 | /* Common and MEMCPY operations */ | |
1010 | dma_dev->device_alloc_chan_resources | |
1011 | = shdma_alloc_chan_resources; | |
1012 | dma_dev->device_free_chan_resources = shdma_free_chan_resources; | |
1013 | dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy; | |
1014 | dma_dev->device_tx_status = shdma_tx_status; | |
1015 | dma_dev->device_issue_pending = shdma_issue_pending; | |
1016 | ||
1017 | /* Compulsory for DMA_SLAVE fields */ | |
1018 | dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; | |
dfbb85ca | 1019 | dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic; |
be60f940 MR |
1020 | dma_dev->device_config = shdma_config; |
1021 | dma_dev->device_terminate_all = shdma_terminate_all; | |
9a7b8e00 GL |
1022 | |
1023 | dma_dev->dev = dev; | |
1024 | ||
1025 | return 0; | |
1026 | } | |
1027 | EXPORT_SYMBOL(shdma_init); | |
1028 | ||
1029 | void shdma_cleanup(struct shdma_dev *sdev) | |
1030 | { | |
1031 | kfree(sdev->schan); | |
1032 | } | |
1033 | EXPORT_SYMBOL(shdma_cleanup); | |
1034 | ||
1035 | static int __init shdma_enter(void) | |
1036 | { | |
d5aeba45 | 1037 | shdma_slave_used = bitmap_zalloc(slave_num, GFP_KERNEL); |
9a7b8e00 GL |
1038 | if (!shdma_slave_used) |
1039 | return -ENOMEM; | |
1040 | return 0; | |
1041 | } | |
1042 | module_init(shdma_enter); | |
1043 | ||
1044 | static void __exit shdma_exit(void) | |
1045 | { | |
d5aeba45 | 1046 | bitmap_free(shdma_slave_used); |
9a7b8e00 GL |
1047 | } |
1048 | module_exit(shdma_exit); | |
1049 | ||
9a7b8e00 GL |
1050 | MODULE_DESCRIPTION("SH-DMA driver base library"); |
1051 | MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); |