Commit | Line | Data |
---|---|---|
9ab65aff | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
c13c8260 CL |
2 | /* |
3 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. | |
c13c8260 CL |
4 | */ |
5 | ||
6 | /* | |
7 | * This code implements the DMA subsystem. It provides a HW-neutral interface | |
8 | * for other kernel code to use asynchronous memory copy capabilities, | |
9 | * if present, and allows different HW DMA drivers to register as providing | |
10 | * this capability. | |
11 | * | |
12 | * Due to the fact we are accelerating what is already a relatively fast | |
13 | * operation, the code goes to great lengths to avoid additional overhead, | |
14 | * such as locking. | |
15 | * | |
16 | * LOCKING: | |
17 | * | |
aa1e6f1a DW |
18 | * The subsystem keeps a global list of dma_device structs it is protected by a |
19 | * mutex, dma_list_mutex. | |
c13c8260 | 20 | * |
f27c580c DW |
21 | * A subsystem can get access to a channel by calling dmaengine_get() followed |
22 | * by dma_find_channel(), or if it has need for an exclusive channel it can call | |
23 | * dma_request_channel(). Once a channel is allocated a reference is taken | |
24 | * against its corresponding driver to disable removal. | |
25 | * | |
c13c8260 CL |
26 | * Each device has a channels list, which runs unlocked but is never modified |
27 | * once the device is registered, it's just setup by the driver. | |
28 | * | |
44348e8a | 29 | * See Documentation/driver-api/dmaengine for more details |
c13c8260 CL |
30 | */ |
31 | ||
63433250 JP |
32 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
33 | ||
a8135d0d | 34 | #include <linux/platform_device.h> |
b7f080cf | 35 | #include <linux/dma-mapping.h> |
c13c8260 CL |
36 | #include <linux/init.h> |
37 | #include <linux/module.h> | |
7405f74b | 38 | #include <linux/mm.h> |
c13c8260 CL |
39 | #include <linux/device.h> |
40 | #include <linux/dmaengine.h> | |
41 | #include <linux/hardirq.h> | |
42 | #include <linux/spinlock.h> | |
43 | #include <linux/percpu.h> | |
44 | #include <linux/rcupdate.h> | |
45 | #include <linux/mutex.h> | |
7405f74b | 46 | #include <linux/jiffies.h> |
2ba05622 | 47 | #include <linux/rculist.h> |
864498aa | 48 | #include <linux/idr.h> |
5a0e3ad6 | 49 | #include <linux/slab.h> |
4e82f5dd AS |
50 | #include <linux/acpi.h> |
51 | #include <linux/acpi_dma.h> | |
9a6cecc8 | 52 | #include <linux/of_dma.h> |
45c463ae | 53 | #include <linux/mempool.h> |
98fa15f3 | 54 | #include <linux/numa.h> |
c13c8260 CL |
55 | |
56 | static DEFINE_MUTEX(dma_list_mutex); | |
adc064cd | 57 | static DEFINE_IDA(dma_ida); |
c13c8260 | 58 | static LIST_HEAD(dma_device_list); |
6f49a57a | 59 | static long dmaengine_ref_count; |
c13c8260 CL |
60 | |
61 | /* --- sysfs implementation --- */ | |
62 | ||
41d5e59c | 63 | /** |
fe333389 | 64 | * dev_to_dma_chan - convert a device pointer to its sysfs container object |
41d5e59c DW |
65 | * @dev - device node |
66 | * | |
67 | * Must be called under dma_list_mutex | |
68 | */ | |
69 | static struct dma_chan *dev_to_dma_chan(struct device *dev) | |
70 | { | |
71 | struct dma_chan_dev *chan_dev; | |
72 | ||
73 | chan_dev = container_of(dev, typeof(*chan_dev), device); | |
74 | return chan_dev->chan; | |
75 | } | |
76 | ||
58b267d3 GKH |
77 | static ssize_t memcpy_count_show(struct device *dev, |
78 | struct device_attribute *attr, char *buf) | |
c13c8260 | 79 | { |
41d5e59c | 80 | struct dma_chan *chan; |
c13c8260 CL |
81 | unsigned long count = 0; |
82 | int i; | |
41d5e59c | 83 | int err; |
c13c8260 | 84 | |
41d5e59c DW |
85 | mutex_lock(&dma_list_mutex); |
86 | chan = dev_to_dma_chan(dev); | |
87 | if (chan) { | |
88 | for_each_possible_cpu(i) | |
89 | count += per_cpu_ptr(chan->local, i)->memcpy_count; | |
90 | err = sprintf(buf, "%lu\n", count); | |
91 | } else | |
92 | err = -ENODEV; | |
93 | mutex_unlock(&dma_list_mutex); | |
c13c8260 | 94 | |
41d5e59c | 95 | return err; |
c13c8260 | 96 | } |
58b267d3 | 97 | static DEVICE_ATTR_RO(memcpy_count); |
c13c8260 | 98 | |
58b267d3 GKH |
99 | static ssize_t bytes_transferred_show(struct device *dev, |
100 | struct device_attribute *attr, char *buf) | |
c13c8260 | 101 | { |
41d5e59c | 102 | struct dma_chan *chan; |
c13c8260 CL |
103 | unsigned long count = 0; |
104 | int i; | |
41d5e59c | 105 | int err; |
c13c8260 | 106 | |
41d5e59c DW |
107 | mutex_lock(&dma_list_mutex); |
108 | chan = dev_to_dma_chan(dev); | |
109 | if (chan) { | |
110 | for_each_possible_cpu(i) | |
111 | count += per_cpu_ptr(chan->local, i)->bytes_transferred; | |
112 | err = sprintf(buf, "%lu\n", count); | |
113 | } else | |
114 | err = -ENODEV; | |
115 | mutex_unlock(&dma_list_mutex); | |
c13c8260 | 116 | |
41d5e59c | 117 | return err; |
c13c8260 | 118 | } |
58b267d3 | 119 | static DEVICE_ATTR_RO(bytes_transferred); |
c13c8260 | 120 | |
58b267d3 GKH |
121 | static ssize_t in_use_show(struct device *dev, struct device_attribute *attr, |
122 | char *buf) | |
c13c8260 | 123 | { |
41d5e59c DW |
124 | struct dma_chan *chan; |
125 | int err; | |
c13c8260 | 126 | |
41d5e59c DW |
127 | mutex_lock(&dma_list_mutex); |
128 | chan = dev_to_dma_chan(dev); | |
129 | if (chan) | |
130 | err = sprintf(buf, "%d\n", chan->client_count); | |
131 | else | |
132 | err = -ENODEV; | |
133 | mutex_unlock(&dma_list_mutex); | |
134 | ||
135 | return err; | |
c13c8260 | 136 | } |
58b267d3 | 137 | static DEVICE_ATTR_RO(in_use); |
c13c8260 | 138 | |
58b267d3 GKH |
139 | static struct attribute *dma_dev_attrs[] = { |
140 | &dev_attr_memcpy_count.attr, | |
141 | &dev_attr_bytes_transferred.attr, | |
142 | &dev_attr_in_use.attr, | |
143 | NULL, | |
c13c8260 | 144 | }; |
58b267d3 | 145 | ATTRIBUTE_GROUPS(dma_dev); |
c13c8260 | 146 | |
41d5e59c DW |
147 | static void chan_dev_release(struct device *dev) |
148 | { | |
149 | struct dma_chan_dev *chan_dev; | |
150 | ||
151 | chan_dev = container_of(dev, typeof(*chan_dev), device); | |
864498aa | 152 | if (atomic_dec_and_test(chan_dev->idr_ref)) { |
485258b4 | 153 | ida_free(&dma_ida, chan_dev->dev_id); |
864498aa DW |
154 | kfree(chan_dev->idr_ref); |
155 | } | |
41d5e59c DW |
156 | kfree(chan_dev); |
157 | } | |
158 | ||
c13c8260 | 159 | static struct class dma_devclass = { |
891f78ea | 160 | .name = "dma", |
58b267d3 | 161 | .dev_groups = dma_dev_groups, |
41d5e59c | 162 | .dev_release = chan_dev_release, |
c13c8260 CL |
163 | }; |
164 | ||
165 | /* --- client and device registration --- */ | |
166 | ||
59b5ec21 DW |
167 | #define dma_device_satisfies_mask(device, mask) \ |
168 | __dma_device_satisfies_mask((device), &(mask)) | |
d379b01e | 169 | static int |
a53e28da LPC |
170 | __dma_device_satisfies_mask(struct dma_device *device, |
171 | const dma_cap_mask_t *want) | |
d379b01e DW |
172 | { |
173 | dma_cap_mask_t has; | |
174 | ||
59b5ec21 | 175 | bitmap_and(has.bits, want->bits, device->cap_mask.bits, |
d379b01e DW |
176 | DMA_TX_TYPE_END); |
177 | return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); | |
178 | } | |
179 | ||
6f49a57a DW |
180 | static struct module *dma_chan_to_owner(struct dma_chan *chan) |
181 | { | |
182 | return chan->device->dev->driver->owner; | |
183 | } | |
184 | ||
185 | /** | |
186 | * balance_ref_count - catch up the channel reference count | |
187 | * @chan - channel to balance ->client_count versus dmaengine_ref_count | |
188 | * | |
189 | * balance_ref_count must be called under dma_list_mutex | |
190 | */ | |
191 | static void balance_ref_count(struct dma_chan *chan) | |
192 | { | |
193 | struct module *owner = dma_chan_to_owner(chan); | |
194 | ||
195 | while (chan->client_count < dmaengine_ref_count) { | |
196 | __module_get(owner); | |
197 | chan->client_count++; | |
198 | } | |
199 | } | |
200 | ||
201 | /** | |
202 | * dma_chan_get - try to grab a dma channel's parent driver module | |
203 | * @chan - channel to grab | |
204 | * | |
205 | * Must be called under dma_list_mutex | |
206 | */ | |
207 | static int dma_chan_get(struct dma_chan *chan) | |
208 | { | |
6f49a57a | 209 | struct module *owner = dma_chan_to_owner(chan); |
d2f4f99d | 210 | int ret; |
6f49a57a | 211 | |
d2f4f99d | 212 | /* The channel is already in use, update client count */ |
6f49a57a DW |
213 | if (chan->client_count) { |
214 | __module_get(owner); | |
d2f4f99d MR |
215 | goto out; |
216 | } | |
6f49a57a | 217 | |
d2f4f99d MR |
218 | if (!try_module_get(owner)) |
219 | return -ENODEV; | |
6f49a57a DW |
220 | |
221 | /* allocate upon first client reference */ | |
c4b54a64 MR |
222 | if (chan->device->device_alloc_chan_resources) { |
223 | ret = chan->device->device_alloc_chan_resources(chan); | |
224 | if (ret < 0) | |
225 | goto err_out; | |
226 | } | |
6f49a57a | 227 | |
d2f4f99d MR |
228 | if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) |
229 | balance_ref_count(chan); | |
230 | ||
231 | out: | |
232 | chan->client_count++; | |
233 | return 0; | |
234 | ||
235 | err_out: | |
236 | module_put(owner); | |
237 | return ret; | |
6f49a57a DW |
238 | } |
239 | ||
240 | /** | |
241 | * dma_chan_put - drop a reference to a dma channel's parent driver module | |
242 | * @chan - channel to release | |
243 | * | |
244 | * Must be called under dma_list_mutex | |
245 | */ | |
246 | static void dma_chan_put(struct dma_chan *chan) | |
247 | { | |
c4b54a64 | 248 | /* This channel is not in use, bail out */ |
6f49a57a | 249 | if (!chan->client_count) |
c4b54a64 MR |
250 | return; |
251 | ||
6f49a57a DW |
252 | chan->client_count--; |
253 | module_put(dma_chan_to_owner(chan)); | |
c4b54a64 MR |
254 | |
255 | /* This channel is not in use anymore, free it */ | |
b36f09c3 LPC |
256 | if (!chan->client_count && chan->device->device_free_chan_resources) { |
257 | /* Make sure all operations have completed */ | |
258 | dmaengine_synchronize(chan); | |
6f49a57a | 259 | chan->device->device_free_chan_resources(chan); |
b36f09c3 | 260 | } |
56f13c0d PU |
261 | |
262 | /* If the channel is used via a DMA request router, free the mapping */ | |
263 | if (chan->router && chan->router->route_free) { | |
264 | chan->router->route_free(chan->router->dev, chan->route_data); | |
265 | chan->router = NULL; | |
266 | chan->route_data = NULL; | |
267 | } | |
6f49a57a DW |
268 | } |
269 | ||
7405f74b DW |
270 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) |
271 | { | |
272 | enum dma_status status; | |
273 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); | |
274 | ||
275 | dma_async_issue_pending(chan); | |
276 | do { | |
277 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | |
278 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | |
ef859312 | 279 | dev_err(chan->device->dev, "%s: timeout!\n", __func__); |
7405f74b DW |
280 | return DMA_ERROR; |
281 | } | |
2cbe7feb BZ |
282 | if (status != DMA_IN_PROGRESS) |
283 | break; | |
284 | cpu_relax(); | |
285 | } while (1); | |
7405f74b DW |
286 | |
287 | return status; | |
288 | } | |
289 | EXPORT_SYMBOL(dma_sync_wait); | |
290 | ||
bec08513 DW |
291 | /** |
292 | * dma_cap_mask_all - enable iteration over all operation types | |
293 | */ | |
294 | static dma_cap_mask_t dma_cap_mask_all; | |
295 | ||
296 | /** | |
297 | * dma_chan_tbl_ent - tracks channel allocations per core/operation | |
298 | * @chan - associated channel for this entry | |
299 | */ | |
300 | struct dma_chan_tbl_ent { | |
301 | struct dma_chan *chan; | |
302 | }; | |
303 | ||
304 | /** | |
305 | * channel_table - percpu lookup table for memory-to-memory offload providers | |
306 | */ | |
a29d8b8e | 307 | static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; |
bec08513 DW |
308 | |
309 | static int __init dma_channel_table_init(void) | |
310 | { | |
311 | enum dma_transaction_type cap; | |
312 | int err = 0; | |
313 | ||
314 | bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); | |
315 | ||
59b5ec21 DW |
316 | /* 'interrupt', 'private', and 'slave' are channel capabilities, |
317 | * but are not associated with an operation so they do not need | |
318 | * an entry in the channel_table | |
bec08513 DW |
319 | */ |
320 | clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); | |
59b5ec21 | 321 | clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); |
bec08513 DW |
322 | clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); |
323 | ||
324 | for_each_dma_cap_mask(cap, dma_cap_mask_all) { | |
325 | channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); | |
326 | if (!channel_table[cap]) { | |
327 | err = -ENOMEM; | |
328 | break; | |
329 | } | |
330 | } | |
331 | ||
332 | if (err) { | |
63433250 | 333 | pr_err("initialization failure\n"); |
bec08513 | 334 | for_each_dma_cap_mask(cap, dma_cap_mask_all) |
a9507ca3 | 335 | free_percpu(channel_table[cap]); |
bec08513 DW |
336 | } |
337 | ||
338 | return err; | |
339 | } | |
652afc27 | 340 | arch_initcall(dma_channel_table_init); |
bec08513 DW |
341 | |
342 | /** | |
343 | * dma_find_channel - find a channel to carry out the operation | |
344 | * @tx_type: transaction type | |
345 | */ | |
346 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | |
347 | { | |
e7dcaa47 | 348 | return this_cpu_read(channel_table[tx_type]->chan); |
bec08513 DW |
349 | } |
350 | EXPORT_SYMBOL(dma_find_channel); | |
a2bd1140 | 351 | |
2ba05622 DW |
352 | /** |
353 | * dma_issue_pending_all - flush all pending operations across all channels | |
354 | */ | |
355 | void dma_issue_pending_all(void) | |
356 | { | |
357 | struct dma_device *device; | |
358 | struct dma_chan *chan; | |
359 | ||
2ba05622 | 360 | rcu_read_lock(); |
59b5ec21 DW |
361 | list_for_each_entry_rcu(device, &dma_device_list, global_node) { |
362 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
363 | continue; | |
2ba05622 DW |
364 | list_for_each_entry(chan, &device->channels, device_node) |
365 | if (chan->client_count) | |
366 | device->device_issue_pending(chan); | |
59b5ec21 | 367 | } |
2ba05622 DW |
368 | rcu_read_unlock(); |
369 | } | |
370 | EXPORT_SYMBOL(dma_issue_pending_all); | |
371 | ||
bec08513 | 372 | /** |
c4d27c4d BG |
373 | * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu |
374 | */ | |
375 | static bool dma_chan_is_local(struct dma_chan *chan, int cpu) | |
376 | { | |
377 | int node = dev_to_node(chan->device->dev); | |
98fa15f3 AK |
378 | return node == NUMA_NO_NODE || |
379 | cpumask_test_cpu(cpu, cpumask_of_node(node)); | |
c4d27c4d BG |
380 | } |
381 | ||
382 | /** | |
383 | * min_chan - returns the channel with min count and in the same numa-node as the cpu | |
bec08513 | 384 | * @cap: capability to match |
c4d27c4d | 385 | * @cpu: cpu index which the channel should be close to |
bec08513 | 386 | * |
c4d27c4d BG |
387 | * If some channels are close to the given cpu, the one with the lowest |
388 | * reference count is returned. Otherwise, cpu is ignored and only the | |
389 | * reference count is taken into account. | |
390 | * Must be called under dma_list_mutex. | |
bec08513 | 391 | */ |
c4d27c4d | 392 | static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) |
bec08513 DW |
393 | { |
394 | struct dma_device *device; | |
395 | struct dma_chan *chan; | |
bec08513 | 396 | struct dma_chan *min = NULL; |
c4d27c4d | 397 | struct dma_chan *localmin = NULL; |
bec08513 DW |
398 | |
399 | list_for_each_entry(device, &dma_device_list, global_node) { | |
59b5ec21 DW |
400 | if (!dma_has_cap(cap, device->cap_mask) || |
401 | dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
bec08513 DW |
402 | continue; |
403 | list_for_each_entry(chan, &device->channels, device_node) { | |
404 | if (!chan->client_count) | |
405 | continue; | |
c4d27c4d | 406 | if (!min || chan->table_count < min->table_count) |
bec08513 DW |
407 | min = chan; |
408 | ||
c4d27c4d BG |
409 | if (dma_chan_is_local(chan, cpu)) |
410 | if (!localmin || | |
411 | chan->table_count < localmin->table_count) | |
412 | localmin = chan; | |
bec08513 | 413 | } |
bec08513 DW |
414 | } |
415 | ||
c4d27c4d | 416 | chan = localmin ? localmin : min; |
bec08513 | 417 | |
c4d27c4d BG |
418 | if (chan) |
419 | chan->table_count++; | |
bec08513 | 420 | |
c4d27c4d | 421 | return chan; |
bec08513 DW |
422 | } |
423 | ||
424 | /** | |
425 | * dma_channel_rebalance - redistribute the available channels | |
426 | * | |
427 | * Optimize for cpu isolation (each cpu gets a dedicated channel for an | |
428 | * operation type) in the SMP case, and operation isolation (avoid | |
429 | * multi-tasking channels) in the non-SMP case. Must be called under | |
430 | * dma_list_mutex. | |
431 | */ | |
432 | static void dma_channel_rebalance(void) | |
433 | { | |
434 | struct dma_chan *chan; | |
435 | struct dma_device *device; | |
436 | int cpu; | |
437 | int cap; | |
bec08513 DW |
438 | |
439 | /* undo the last distribution */ | |
440 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | |
441 | for_each_possible_cpu(cpu) | |
442 | per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; | |
443 | ||
59b5ec21 DW |
444 | list_for_each_entry(device, &dma_device_list, global_node) { |
445 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
446 | continue; | |
bec08513 DW |
447 | list_for_each_entry(chan, &device->channels, device_node) |
448 | chan->table_count = 0; | |
59b5ec21 | 449 | } |
bec08513 DW |
450 | |
451 | /* don't populate the channel_table if no clients are available */ | |
452 | if (!dmaengine_ref_count) | |
453 | return; | |
454 | ||
455 | /* redistribute available channels */ | |
bec08513 DW |
456 | for_each_dma_cap_mask(cap, dma_cap_mask_all) |
457 | for_each_online_cpu(cpu) { | |
c4d27c4d | 458 | chan = min_chan(cap, cpu); |
bec08513 DW |
459 | per_cpu_ptr(channel_table[cap], cpu)->chan = chan; |
460 | } | |
461 | } | |
462 | ||
0d5484b1 LP |
463 | int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) |
464 | { | |
465 | struct dma_device *device; | |
466 | ||
467 | if (!chan || !caps) | |
468 | return -EINVAL; | |
469 | ||
470 | device = chan->device; | |
471 | ||
472 | /* check if the channel supports slave transactions */ | |
dd4e91d5 AS |
473 | if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) || |
474 | test_bit(DMA_CYCLIC, device->cap_mask.bits))) | |
0d5484b1 LP |
475 | return -ENXIO; |
476 | ||
477 | /* | |
478 | * Check whether it reports it uses the generic slave | |
479 | * capabilities, if not, that means it doesn't support any | |
480 | * kind of slave capabilities reporting. | |
481 | */ | |
482 | if (!device->directions) | |
483 | return -ENXIO; | |
484 | ||
485 | caps->src_addr_widths = device->src_addr_widths; | |
486 | caps->dst_addr_widths = device->dst_addr_widths; | |
487 | caps->directions = device->directions; | |
6d5bbed3 | 488 | caps->max_burst = device->max_burst; |
0d5484b1 | 489 | caps->residue_granularity = device->residue_granularity; |
9eeacd3a | 490 | caps->descriptor_reuse = device->descriptor_reuse; |
d8095f94 MS |
491 | caps->cmd_pause = !!device->device_pause; |
492 | caps->cmd_resume = !!device->device_resume; | |
0d5484b1 LP |
493 | caps->cmd_terminate = !!device->device_terminate_all; |
494 | ||
495 | return 0; | |
496 | } | |
497 | EXPORT_SYMBOL_GPL(dma_get_slave_caps); | |
498 | ||
a53e28da LPC |
499 | static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, |
500 | struct dma_device *dev, | |
e2346677 | 501 | dma_filter_fn fn, void *fn_param) |
59b5ec21 DW |
502 | { |
503 | struct dma_chan *chan; | |
59b5ec21 | 504 | |
26b64256 | 505 | if (mask && !__dma_device_satisfies_mask(dev, mask)) { |
ef859312 | 506 | dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__); |
59b5ec21 DW |
507 | return NULL; |
508 | } | |
509 | /* devices with multiple channels need special handling as we need to | |
510 | * ensure that all channels are either private or public. | |
511 | */ | |
512 | if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) | |
513 | list_for_each_entry(chan, &dev->channels, device_node) { | |
514 | /* some channels are already publicly allocated */ | |
515 | if (chan->client_count) | |
516 | return NULL; | |
517 | } | |
518 | ||
519 | list_for_each_entry(chan, &dev->channels, device_node) { | |
520 | if (chan->client_count) { | |
ef859312 | 521 | dev_dbg(dev->dev, "%s: %s busy\n", |
41d5e59c | 522 | __func__, dma_chan_name(chan)); |
59b5ec21 DW |
523 | continue; |
524 | } | |
e2346677 | 525 | if (fn && !fn(chan, fn_param)) { |
ef859312 | 526 | dev_dbg(dev->dev, "%s: %s filter said false\n", |
e2346677 DW |
527 | __func__, dma_chan_name(chan)); |
528 | continue; | |
529 | } | |
530 | return chan; | |
59b5ec21 DW |
531 | } |
532 | ||
e2346677 | 533 | return NULL; |
59b5ec21 DW |
534 | } |
535 | ||
7bd903c5 PU |
536 | static struct dma_chan *find_candidate(struct dma_device *device, |
537 | const dma_cap_mask_t *mask, | |
538 | dma_filter_fn fn, void *fn_param) | |
539 | { | |
540 | struct dma_chan *chan = private_candidate(mask, device, fn, fn_param); | |
541 | int err; | |
542 | ||
543 | if (chan) { | |
544 | /* Found a suitable channel, try to grab, prep, and return it. | |
545 | * We first set DMA_PRIVATE to disable balance_ref_count as this | |
546 | * channel will not be published in the general-purpose | |
547 | * allocator | |
548 | */ | |
549 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | |
550 | device->privatecnt++; | |
551 | err = dma_chan_get(chan); | |
552 | ||
553 | if (err) { | |
554 | if (err == -ENODEV) { | |
ef859312 JN |
555 | dev_dbg(device->dev, "%s: %s module removed\n", |
556 | __func__, dma_chan_name(chan)); | |
7bd903c5 PU |
557 | list_del_rcu(&device->global_node); |
558 | } else | |
ef859312 JN |
559 | dev_dbg(device->dev, |
560 | "%s: failed to get %s: (%d)\n", | |
7bd903c5 PU |
561 | __func__, dma_chan_name(chan), err); |
562 | ||
563 | if (--device->privatecnt == 0) | |
564 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | |
565 | ||
566 | chan = ERR_PTR(err); | |
567 | } | |
568 | } | |
569 | ||
570 | return chan ? chan : ERR_PTR(-EPROBE_DEFER); | |
571 | } | |
572 | ||
59b5ec21 | 573 | /** |
19d643d6 | 574 | * dma_get_slave_channel - try to get specific channel exclusively |
7bb587f4 ZG |
575 | * @chan: target channel |
576 | */ | |
577 | struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) | |
578 | { | |
579 | int err = -EBUSY; | |
580 | ||
581 | /* lock against __dma_request_channel */ | |
582 | mutex_lock(&dma_list_mutex); | |
583 | ||
d9a6c8f5 | 584 | if (chan->client_count == 0) { |
214fc4e4 PU |
585 | struct dma_device *device = chan->device; |
586 | ||
587 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | |
588 | device->privatecnt++; | |
7bb587f4 | 589 | err = dma_chan_get(chan); |
214fc4e4 | 590 | if (err) { |
ef859312 JN |
591 | dev_dbg(chan->device->dev, |
592 | "%s: failed to get %s: (%d)\n", | |
d9a6c8f5 | 593 | __func__, dma_chan_name(chan), err); |
214fc4e4 PU |
594 | chan = NULL; |
595 | if (--device->privatecnt == 0) | |
596 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | |
597 | } | |
d9a6c8f5 | 598 | } else |
7bb587f4 ZG |
599 | chan = NULL; |
600 | ||
601 | mutex_unlock(&dma_list_mutex); | |
602 | ||
7bb587f4 ZG |
603 | |
604 | return chan; | |
605 | } | |
606 | EXPORT_SYMBOL_GPL(dma_get_slave_channel); | |
607 | ||
8010dad5 SW |
608 | struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) |
609 | { | |
610 | dma_cap_mask_t mask; | |
611 | struct dma_chan *chan; | |
8010dad5 SW |
612 | |
613 | dma_cap_zero(mask); | |
614 | dma_cap_set(DMA_SLAVE, mask); | |
615 | ||
616 | /* lock against __dma_request_channel */ | |
617 | mutex_lock(&dma_list_mutex); | |
618 | ||
7bd903c5 | 619 | chan = find_candidate(device, &mask, NULL, NULL); |
8010dad5 SW |
620 | |
621 | mutex_unlock(&dma_list_mutex); | |
622 | ||
7bd903c5 | 623 | return IS_ERR(chan) ? NULL : chan; |
8010dad5 SW |
624 | } |
625 | EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); | |
626 | ||
59b5ec21 | 627 | /** |
6b9019a7 | 628 | * __dma_request_channel - try to allocate an exclusive channel |
59b5ec21 DW |
629 | * @mask: capabilities that the channel must satisfy |
630 | * @fn: optional callback to disposition available channels | |
631 | * @fn_param: opaque parameter to pass to dma_filter_fn | |
f5151311 | 632 | * @np: device node to look for DMA channels |
0ad7c000 SW |
633 | * |
634 | * Returns pointer to appropriate DMA channel on success or NULL. | |
59b5ec21 | 635 | */ |
a53e28da | 636 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, |
f5151311 BW |
637 | dma_filter_fn fn, void *fn_param, |
638 | struct device_node *np) | |
59b5ec21 DW |
639 | { |
640 | struct dma_device *device, *_d; | |
641 | struct dma_chan *chan = NULL; | |
59b5ec21 DW |
642 | |
643 | /* Find a channel */ | |
644 | mutex_lock(&dma_list_mutex); | |
645 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { | |
f5151311 BW |
646 | /* Finds a DMA controller with matching device node */ |
647 | if (np && device->dev->of_node && np != device->dev->of_node) | |
648 | continue; | |
649 | ||
7bd903c5 PU |
650 | chan = find_candidate(device, mask, fn, fn_param); |
651 | if (!IS_ERR(chan)) | |
652 | break; | |
59b5ec21 | 653 | |
7bd903c5 | 654 | chan = NULL; |
59b5ec21 DW |
655 | } |
656 | mutex_unlock(&dma_list_mutex); | |
657 | ||
4c4d7f87 | 658 | pr_debug("%s: %s (%s)\n", |
63433250 JP |
659 | __func__, |
660 | chan ? "success" : "fail", | |
41d5e59c | 661 | chan ? dma_chan_name(chan) : NULL); |
59b5ec21 DW |
662 | |
663 | return chan; | |
664 | } | |
665 | EXPORT_SYMBOL_GPL(__dma_request_channel); | |
666 | ||
a8135d0d PU |
667 | static const struct dma_slave_map *dma_filter_match(struct dma_device *device, |
668 | const char *name, | |
669 | struct device *dev) | |
670 | { | |
671 | int i; | |
672 | ||
673 | if (!device->filter.mapcnt) | |
674 | return NULL; | |
675 | ||
676 | for (i = 0; i < device->filter.mapcnt; i++) { | |
677 | const struct dma_slave_map *map = &device->filter.map[i]; | |
678 | ||
679 | if (!strcmp(map->devname, dev_name(dev)) && | |
680 | !strcmp(map->slave, name)) | |
681 | return map; | |
682 | } | |
683 | ||
684 | return NULL; | |
685 | } | |
686 | ||
9a6cecc8 | 687 | /** |
a8135d0d | 688 | * dma_request_chan - try to allocate an exclusive slave channel |
9a6cecc8 JH |
689 | * @dev: pointer to client device structure |
690 | * @name: slave channel name | |
0ad7c000 SW |
691 | * |
692 | * Returns pointer to appropriate DMA channel on success or an error pointer. | |
9a6cecc8 | 693 | */ |
a8135d0d | 694 | struct dma_chan *dma_request_chan(struct device *dev, const char *name) |
9a6cecc8 | 695 | { |
a8135d0d PU |
696 | struct dma_device *d, *_d; |
697 | struct dma_chan *chan = NULL; | |
698 | ||
9a6cecc8 JH |
699 | /* If device-tree is present get slave info from here */ |
700 | if (dev->of_node) | |
a8135d0d | 701 | chan = of_dma_request_slave_channel(dev->of_node, name); |
9a6cecc8 | 702 | |
4e82f5dd | 703 | /* If device was enumerated by ACPI get slave info from here */ |
a8135d0d PU |
704 | if (has_acpi_companion(dev) && !chan) |
705 | chan = acpi_dma_request_slave_chan_by_name(dev, name); | |
706 | ||
707 | if (chan) { | |
fe333389 | 708 | /* Valid channel found or requester needs to be deferred */ |
a8135d0d PU |
709 | if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) |
710 | return chan; | |
711 | } | |
712 | ||
713 | /* Try to find the channel via the DMA filter map(s) */ | |
714 | mutex_lock(&dma_list_mutex); | |
715 | list_for_each_entry_safe(d, _d, &dma_device_list, global_node) { | |
716 | dma_cap_mask_t mask; | |
717 | const struct dma_slave_map *map = dma_filter_match(d, name, dev); | |
4e82f5dd | 718 | |
a8135d0d PU |
719 | if (!map) |
720 | continue; | |
721 | ||
722 | dma_cap_zero(mask); | |
723 | dma_cap_set(DMA_SLAVE, mask); | |
4e82f5dd | 724 | |
a8135d0d PU |
725 | chan = find_candidate(d, &mask, d->filter.fn, map->param); |
726 | if (!IS_ERR(chan)) | |
727 | break; | |
728 | } | |
729 | mutex_unlock(&dma_list_mutex); | |
730 | ||
731 | return chan ? chan : ERR_PTR(-EPROBE_DEFER); | |
0ad7c000 | 732 | } |
a8135d0d | 733 | EXPORT_SYMBOL_GPL(dma_request_chan); |
0ad7c000 SW |
734 | |
735 | /** | |
736 | * dma_request_slave_channel - try to allocate an exclusive slave channel | |
737 | * @dev: pointer to client device structure | |
738 | * @name: slave channel name | |
739 | * | |
740 | * Returns pointer to appropriate DMA channel on success or NULL. | |
741 | */ | |
742 | struct dma_chan *dma_request_slave_channel(struct device *dev, | |
743 | const char *name) | |
744 | { | |
a8135d0d | 745 | struct dma_chan *ch = dma_request_chan(dev, name); |
0ad7c000 SW |
746 | if (IS_ERR(ch)) |
747 | return NULL; | |
05aa1a77 | 748 | |
0ad7c000 | 749 | return ch; |
9a6cecc8 JH |
750 | } |
751 | EXPORT_SYMBOL_GPL(dma_request_slave_channel); | |
752 | ||
a8135d0d PU |
753 | /** |
754 | * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities | |
755 | * @mask: capabilities that the channel must satisfy | |
756 | * | |
757 | * Returns pointer to appropriate DMA channel on success or an error pointer. | |
758 | */ | |
759 | struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask) | |
760 | { | |
761 | struct dma_chan *chan; | |
762 | ||
763 | if (!mask) | |
764 | return ERR_PTR(-ENODEV); | |
765 | ||
f5151311 | 766 | chan = __dma_request_channel(mask, NULL, NULL, NULL); |
ec8ca8e3 PU |
767 | if (!chan) { |
768 | mutex_lock(&dma_list_mutex); | |
769 | if (list_empty(&dma_device_list)) | |
770 | chan = ERR_PTR(-EPROBE_DEFER); | |
771 | else | |
772 | chan = ERR_PTR(-ENODEV); | |
773 | mutex_unlock(&dma_list_mutex); | |
774 | } | |
a8135d0d PU |
775 | |
776 | return chan; | |
777 | } | |
778 | EXPORT_SYMBOL_GPL(dma_request_chan_by_mask); | |
779 | ||
59b5ec21 DW |
780 | void dma_release_channel(struct dma_chan *chan) |
781 | { | |
782 | mutex_lock(&dma_list_mutex); | |
783 | WARN_ONCE(chan->client_count != 1, | |
784 | "chan reference count %d != 1\n", chan->client_count); | |
785 | dma_chan_put(chan); | |
0f571515 AN |
786 | /* drop PRIVATE cap enabled by __dma_request_channel() */ |
787 | if (--chan->device->privatecnt == 0) | |
788 | dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); | |
59b5ec21 DW |
789 | mutex_unlock(&dma_list_mutex); |
790 | } | |
791 | EXPORT_SYMBOL_GPL(dma_release_channel); | |
792 | ||
d379b01e | 793 | /** |
209b84a8 | 794 | * dmaengine_get - register interest in dma_channels |
d379b01e | 795 | */ |
209b84a8 | 796 | void dmaengine_get(void) |
d379b01e | 797 | { |
6f49a57a DW |
798 | struct dma_device *device, *_d; |
799 | struct dma_chan *chan; | |
800 | int err; | |
801 | ||
c13c8260 | 802 | mutex_lock(&dma_list_mutex); |
6f49a57a DW |
803 | dmaengine_ref_count++; |
804 | ||
805 | /* try to grab channels */ | |
59b5ec21 DW |
806 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { |
807 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
808 | continue; | |
6f49a57a DW |
809 | list_for_each_entry(chan, &device->channels, device_node) { |
810 | err = dma_chan_get(chan); | |
811 | if (err == -ENODEV) { | |
812 | /* module removed before we could use it */ | |
2ba05622 | 813 | list_del_rcu(&device->global_node); |
6f49a57a DW |
814 | break; |
815 | } else if (err) | |
ef859312 JN |
816 | dev_dbg(chan->device->dev, |
817 | "%s: failed to get %s: (%d)\n", | |
818 | __func__, dma_chan_name(chan), err); | |
6f49a57a | 819 | } |
59b5ec21 | 820 | } |
6f49a57a | 821 | |
bec08513 DW |
822 | /* if this is the first reference and there were channels |
823 | * waiting we need to rebalance to get those channels | |
824 | * incorporated into the channel table | |
825 | */ | |
826 | if (dmaengine_ref_count == 1) | |
827 | dma_channel_rebalance(); | |
c13c8260 | 828 | mutex_unlock(&dma_list_mutex); |
c13c8260 | 829 | } |
209b84a8 | 830 | EXPORT_SYMBOL(dmaengine_get); |
c13c8260 CL |
831 | |
832 | /** | |
209b84a8 | 833 | * dmaengine_put - let dma drivers be removed when ref_count == 0 |
c13c8260 | 834 | */ |
209b84a8 | 835 | void dmaengine_put(void) |
c13c8260 | 836 | { |
d379b01e | 837 | struct dma_device *device; |
c13c8260 CL |
838 | struct dma_chan *chan; |
839 | ||
c13c8260 | 840 | mutex_lock(&dma_list_mutex); |
6f49a57a DW |
841 | dmaengine_ref_count--; |
842 | BUG_ON(dmaengine_ref_count < 0); | |
843 | /* drop channel references */ | |
59b5ec21 DW |
844 | list_for_each_entry(device, &dma_device_list, global_node) { |
845 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
846 | continue; | |
6f49a57a DW |
847 | list_for_each_entry(chan, &device->channels, device_node) |
848 | dma_chan_put(chan); | |
59b5ec21 | 849 | } |
c13c8260 | 850 | mutex_unlock(&dma_list_mutex); |
c13c8260 | 851 | } |
209b84a8 | 852 | EXPORT_SYMBOL(dmaengine_put); |
c13c8260 | 853 | |
138f4c35 DW |
854 | static bool device_has_all_tx_types(struct dma_device *device) |
855 | { | |
856 | /* A device that satisfies this test has channels that will never cause | |
857 | * an async_tx channel switch event as all possible operation types can | |
858 | * be handled. | |
859 | */ | |
860 | #ifdef CONFIG_ASYNC_TX_DMA | |
861 | if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) | |
862 | return false; | |
863 | #endif | |
864 | ||
d57d3a48 | 865 | #if IS_ENABLED(CONFIG_ASYNC_MEMCPY) |
138f4c35 DW |
866 | if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) |
867 | return false; | |
868 | #endif | |
869 | ||
d57d3a48 | 870 | #if IS_ENABLED(CONFIG_ASYNC_XOR) |
138f4c35 DW |
871 | if (!dma_has_cap(DMA_XOR, device->cap_mask)) |
872 | return false; | |
7b3cc2b1 DW |
873 | |
874 | #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA | |
4499a24d DW |
875 | if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) |
876 | return false; | |
138f4c35 | 877 | #endif |
7b3cc2b1 | 878 | #endif |
138f4c35 | 879 | |
d57d3a48 | 880 | #if IS_ENABLED(CONFIG_ASYNC_PQ) |
138f4c35 DW |
881 | if (!dma_has_cap(DMA_PQ, device->cap_mask)) |
882 | return false; | |
7b3cc2b1 DW |
883 | |
884 | #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA | |
4499a24d DW |
885 | if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) |
886 | return false; | |
138f4c35 | 887 | #endif |
7b3cc2b1 | 888 | #endif |
138f4c35 DW |
889 | |
890 | return true; | |
891 | } | |
892 | ||
257b17ca DW |
893 | static int get_dma_id(struct dma_device *device) |
894 | { | |
485258b4 | 895 | int rc = ida_alloc(&dma_ida, GFP_KERNEL); |
69ee266b | 896 | |
485258b4 MW |
897 | if (rc < 0) |
898 | return rc; | |
899 | device->dev_id = rc; | |
900 | return 0; | |
257b17ca DW |
901 | } |
902 | ||
c13c8260 | 903 | /** |
6508871e | 904 | * dma_async_device_register - registers DMA devices found |
c13c8260 CL |
905 | * @device: &dma_device |
906 | */ | |
907 | int dma_async_device_register(struct dma_device *device) | |
908 | { | |
ff487fb7 | 909 | int chancnt = 0, rc; |
c13c8260 | 910 | struct dma_chan* chan; |
864498aa | 911 | atomic_t *idr_ref; |
c13c8260 CL |
912 | |
913 | if (!device) | |
914 | return -ENODEV; | |
915 | ||
7405f74b | 916 | /* validate device routines */ |
3eeb5156 VK |
917 | if (!device->dev) { |
918 | pr_err("DMAdevice must have dev\n"); | |
919 | return -EIO; | |
920 | } | |
921 | ||
922 | if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { | |
923 | dev_err(device->dev, | |
924 | "Device claims capability %s, but op is not defined\n", | |
925 | "DMA_MEMCPY"); | |
926 | return -EIO; | |
927 | } | |
928 | ||
929 | if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) { | |
930 | dev_err(device->dev, | |
931 | "Device claims capability %s, but op is not defined\n", | |
932 | "DMA_XOR"); | |
933 | return -EIO; | |
934 | } | |
935 | ||
936 | if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) { | |
937 | dev_err(device->dev, | |
938 | "Device claims capability %s, but op is not defined\n", | |
939 | "DMA_XOR_VAL"); | |
940 | return -EIO; | |
941 | } | |
942 | ||
943 | if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) { | |
944 | dev_err(device->dev, | |
945 | "Device claims capability %s, but op is not defined\n", | |
946 | "DMA_PQ"); | |
947 | return -EIO; | |
948 | } | |
949 | ||
950 | if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) { | |
951 | dev_err(device->dev, | |
952 | "Device claims capability %s, but op is not defined\n", | |
953 | "DMA_PQ_VAL"); | |
954 | return -EIO; | |
955 | } | |
956 | ||
957 | if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) { | |
958 | dev_err(device->dev, | |
959 | "Device claims capability %s, but op is not defined\n", | |
960 | "DMA_MEMSET"); | |
961 | return -EIO; | |
962 | } | |
963 | ||
964 | if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) { | |
965 | dev_err(device->dev, | |
966 | "Device claims capability %s, but op is not defined\n", | |
967 | "DMA_INTERRUPT"); | |
968 | return -EIO; | |
969 | } | |
970 | ||
971 | if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) { | |
972 | dev_err(device->dev, | |
973 | "Device claims capability %s, but op is not defined\n", | |
974 | "DMA_CYCLIC"); | |
975 | return -EIO; | |
976 | } | |
977 | ||
978 | if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) { | |
979 | dev_err(device->dev, | |
980 | "Device claims capability %s, but op is not defined\n", | |
981 | "DMA_INTERLEAVE"); | |
982 | return -EIO; | |
983 | } | |
984 | ||
985 | ||
986 | if (!device->device_tx_status) { | |
987 | dev_err(device->dev, "Device tx_status is not defined\n"); | |
988 | return -EIO; | |
989 | } | |
990 | ||
991 | ||
992 | if (!device->device_issue_pending) { | |
993 | dev_err(device->dev, "Device issue_pending is not defined\n"); | |
994 | return -EIO; | |
995 | } | |
7405f74b | 996 | |
138f4c35 | 997 | /* note: this only matters in the |
5fc6d897 | 998 | * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case |
138f4c35 DW |
999 | */ |
1000 | if (device_has_all_tx_types(device)) | |
1001 | dma_cap_set(DMA_ASYNC_TX, device->cap_mask); | |
1002 | ||
864498aa DW |
1003 | idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); |
1004 | if (!idr_ref) | |
1005 | return -ENOMEM; | |
257b17ca DW |
1006 | rc = get_dma_id(device); |
1007 | if (rc != 0) { | |
1008 | kfree(idr_ref); | |
864498aa | 1009 | return rc; |
257b17ca DW |
1010 | } |
1011 | ||
1012 | atomic_set(idr_ref, 0); | |
c13c8260 CL |
1013 | |
1014 | /* represent channels in sysfs. Probably want devs too */ | |
1015 | list_for_each_entry(chan, &device->channels, device_node) { | |
257b17ca | 1016 | rc = -ENOMEM; |
c13c8260 CL |
1017 | chan->local = alloc_percpu(typeof(*chan->local)); |
1018 | if (chan->local == NULL) | |
257b17ca | 1019 | goto err_out; |
41d5e59c DW |
1020 | chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); |
1021 | if (chan->dev == NULL) { | |
1022 | free_percpu(chan->local); | |
257b17ca DW |
1023 | chan->local = NULL; |
1024 | goto err_out; | |
41d5e59c | 1025 | } |
c13c8260 CL |
1026 | |
1027 | chan->chan_id = chancnt++; | |
41d5e59c DW |
1028 | chan->dev->device.class = &dma_devclass; |
1029 | chan->dev->device.parent = device->dev; | |
1030 | chan->dev->chan = chan; | |
864498aa DW |
1031 | chan->dev->idr_ref = idr_ref; |
1032 | chan->dev->dev_id = device->dev_id; | |
1033 | atomic_inc(idr_ref); | |
41d5e59c | 1034 | dev_set_name(&chan->dev->device, "dma%dchan%d", |
06190d84 | 1035 | device->dev_id, chan->chan_id); |
c13c8260 | 1036 | |
41d5e59c | 1037 | rc = device_register(&chan->dev->device); |
ff487fb7 | 1038 | if (rc) { |
ff487fb7 JG |
1039 | free_percpu(chan->local); |
1040 | chan->local = NULL; | |
257b17ca DW |
1041 | kfree(chan->dev); |
1042 | atomic_dec(idr_ref); | |
ff487fb7 JG |
1043 | goto err_out; |
1044 | } | |
7cc5bf9a | 1045 | chan->client_count = 0; |
c13c8260 | 1046 | } |
76d7b84b VK |
1047 | |
1048 | if (!chancnt) { | |
1049 | dev_err(device->dev, "%s: device has no channels!\n", __func__); | |
1050 | rc = -ENODEV; | |
1051 | goto err_out; | |
1052 | } | |
1053 | ||
59b5ec21 | 1054 | device->chancnt = chancnt; |
c13c8260 CL |
1055 | |
1056 | mutex_lock(&dma_list_mutex); | |
59b5ec21 DW |
1057 | /* take references on public channels */ |
1058 | if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
6f49a57a DW |
1059 | list_for_each_entry(chan, &device->channels, device_node) { |
1060 | /* if clients are already waiting for channels we need | |
1061 | * to take references on their behalf | |
1062 | */ | |
1063 | if (dma_chan_get(chan) == -ENODEV) { | |
1064 | /* note we can only get here for the first | |
1065 | * channel as the remaining channels are | |
1066 | * guaranteed to get a reference | |
1067 | */ | |
1068 | rc = -ENODEV; | |
1069 | mutex_unlock(&dma_list_mutex); | |
1070 | goto err_out; | |
1071 | } | |
1072 | } | |
2ba05622 | 1073 | list_add_tail_rcu(&device->global_node, &dma_device_list); |
0f571515 AN |
1074 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) |
1075 | device->privatecnt++; /* Always private */ | |
bec08513 | 1076 | dma_channel_rebalance(); |
c13c8260 CL |
1077 | mutex_unlock(&dma_list_mutex); |
1078 | ||
c13c8260 | 1079 | return 0; |
ff487fb7 JG |
1080 | |
1081 | err_out: | |
257b17ca DW |
1082 | /* if we never registered a channel just release the idr */ |
1083 | if (atomic_read(idr_ref) == 0) { | |
485258b4 | 1084 | ida_free(&dma_ida, device->dev_id); |
257b17ca DW |
1085 | kfree(idr_ref); |
1086 | return rc; | |
1087 | } | |
1088 | ||
ff487fb7 JG |
1089 | list_for_each_entry(chan, &device->channels, device_node) { |
1090 | if (chan->local == NULL) | |
1091 | continue; | |
41d5e59c DW |
1092 | mutex_lock(&dma_list_mutex); |
1093 | chan->dev->chan = NULL; | |
1094 | mutex_unlock(&dma_list_mutex); | |
1095 | device_unregister(&chan->dev->device); | |
ff487fb7 JG |
1096 | free_percpu(chan->local); |
1097 | } | |
1098 | return rc; | |
c13c8260 | 1099 | } |
765e3d8a | 1100 | EXPORT_SYMBOL(dma_async_device_register); |
c13c8260 | 1101 | |
6508871e | 1102 | /** |
6f49a57a | 1103 | * dma_async_device_unregister - unregister a DMA device |
6508871e | 1104 | * @device: &dma_device |
f27c580c DW |
1105 | * |
1106 | * This routine is called by dma driver exit routines, dmaengine holds module | |
1107 | * references to prevent it being called while channels are in use. | |
6508871e RD |
1108 | */ |
1109 | void dma_async_device_unregister(struct dma_device *device) | |
c13c8260 CL |
1110 | { |
1111 | struct dma_chan *chan; | |
c13c8260 CL |
1112 | |
1113 | mutex_lock(&dma_list_mutex); | |
2ba05622 | 1114 | list_del_rcu(&device->global_node); |
bec08513 | 1115 | dma_channel_rebalance(); |
c13c8260 CL |
1116 | mutex_unlock(&dma_list_mutex); |
1117 | ||
1118 | list_for_each_entry(chan, &device->channels, device_node) { | |
6f49a57a DW |
1119 | WARN_ONCE(chan->client_count, |
1120 | "%s called while %d clients hold a reference\n", | |
1121 | __func__, chan->client_count); | |
41d5e59c DW |
1122 | mutex_lock(&dma_list_mutex); |
1123 | chan->dev->chan = NULL; | |
1124 | mutex_unlock(&dma_list_mutex); | |
1125 | device_unregister(&chan->dev->device); | |
adef4772 | 1126 | free_percpu(chan->local); |
c13c8260 | 1127 | } |
c13c8260 | 1128 | } |
765e3d8a | 1129 | EXPORT_SYMBOL(dma_async_device_unregister); |
c13c8260 | 1130 | |
f39b948d HS |
1131 | static void dmam_device_release(struct device *dev, void *res) |
1132 | { | |
1133 | struct dma_device *device; | |
1134 | ||
1135 | device = *(struct dma_device **)res; | |
1136 | dma_async_device_unregister(device); | |
1137 | } | |
1138 | ||
1139 | /** | |
1140 | * dmaenginem_async_device_register - registers DMA devices found | |
1141 | * @device: &dma_device | |
1142 | * | |
1143 | * The operation is managed and will be undone on driver detach. | |
1144 | */ | |
1145 | int dmaenginem_async_device_register(struct dma_device *device) | |
1146 | { | |
1147 | void *p; | |
1148 | int ret; | |
1149 | ||
1150 | p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL); | |
1151 | if (!p) | |
1152 | return -ENOMEM; | |
1153 | ||
1154 | ret = dma_async_device_register(device); | |
1155 | if (!ret) { | |
1156 | *(struct dma_device **)p = device; | |
1157 | devres_add(device->dev, p); | |
1158 | } else { | |
1159 | devres_free(p); | |
1160 | } | |
1161 | ||
1162 | return ret; | |
1163 | } | |
1164 | EXPORT_SYMBOL(dmaenginem_async_device_register); | |
1165 | ||
45c463ae DW |
1166 | struct dmaengine_unmap_pool { |
1167 | struct kmem_cache *cache; | |
1168 | const char *name; | |
1169 | mempool_t *pool; | |
1170 | size_t size; | |
1171 | }; | |
7405f74b | 1172 | |
45c463ae DW |
1173 | #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } |
1174 | static struct dmaengine_unmap_pool unmap_pool[] = { | |
1175 | __UNMAP_POOL(2), | |
3cc377b9 | 1176 | #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) |
45c463ae DW |
1177 | __UNMAP_POOL(16), |
1178 | __UNMAP_POOL(128), | |
1179 | __UNMAP_POOL(256), | |
1180 | #endif | |
1181 | }; | |
0036731c | 1182 | |
45c463ae DW |
1183 | static struct dmaengine_unmap_pool *__get_unmap_pool(int nr) |
1184 | { | |
1185 | int order = get_count_order(nr); | |
1186 | ||
1187 | switch (order) { | |
1188 | case 0 ... 1: | |
1189 | return &unmap_pool[0]; | |
23f963e9 | 1190 | #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) |
45c463ae DW |
1191 | case 2 ... 4: |
1192 | return &unmap_pool[1]; | |
1193 | case 5 ... 7: | |
1194 | return &unmap_pool[2]; | |
1195 | case 8: | |
1196 | return &unmap_pool[3]; | |
23f963e9 | 1197 | #endif |
45c463ae DW |
1198 | default: |
1199 | BUG(); | |
1200 | return NULL; | |
0036731c | 1201 | } |
45c463ae | 1202 | } |
7405f74b | 1203 | |
45c463ae DW |
1204 | static void dmaengine_unmap(struct kref *kref) |
1205 | { | |
1206 | struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); | |
1207 | struct device *dev = unmap->dev; | |
1208 | int cnt, i; | |
1209 | ||
1210 | cnt = unmap->to_cnt; | |
1211 | for (i = 0; i < cnt; i++) | |
1212 | dma_unmap_page(dev, unmap->addr[i], unmap->len, | |
1213 | DMA_TO_DEVICE); | |
1214 | cnt += unmap->from_cnt; | |
1215 | for (; i < cnt; i++) | |
1216 | dma_unmap_page(dev, unmap->addr[i], unmap->len, | |
1217 | DMA_FROM_DEVICE); | |
1218 | cnt += unmap->bidi_cnt; | |
7476bd79 DW |
1219 | for (; i < cnt; i++) { |
1220 | if (unmap->addr[i] == 0) | |
1221 | continue; | |
45c463ae DW |
1222 | dma_unmap_page(dev, unmap->addr[i], unmap->len, |
1223 | DMA_BIDIRECTIONAL); | |
7476bd79 | 1224 | } |
c1f43dd9 | 1225 | cnt = unmap->map_cnt; |
45c463ae DW |
1226 | mempool_free(unmap, __get_unmap_pool(cnt)->pool); |
1227 | } | |
7405f74b | 1228 | |
45c463ae DW |
1229 | void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) |
1230 | { | |
1231 | if (unmap) | |
1232 | kref_put(&unmap->kref, dmaengine_unmap); | |
1233 | } | |
1234 | EXPORT_SYMBOL_GPL(dmaengine_unmap_put); | |
7405f74b | 1235 | |
45c463ae DW |
1236 | static void dmaengine_destroy_unmap_pool(void) |
1237 | { | |
1238 | int i; | |
1239 | ||
1240 | for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { | |
1241 | struct dmaengine_unmap_pool *p = &unmap_pool[i]; | |
1242 | ||
240eb916 | 1243 | mempool_destroy(p->pool); |
45c463ae | 1244 | p->pool = NULL; |
240eb916 | 1245 | kmem_cache_destroy(p->cache); |
45c463ae DW |
1246 | p->cache = NULL; |
1247 | } | |
7405f74b | 1248 | } |
7405f74b | 1249 | |
45c463ae | 1250 | static int __init dmaengine_init_unmap_pool(void) |
7405f74b | 1251 | { |
45c463ae | 1252 | int i; |
7405f74b | 1253 | |
45c463ae DW |
1254 | for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { |
1255 | struct dmaengine_unmap_pool *p = &unmap_pool[i]; | |
1256 | size_t size; | |
0036731c | 1257 | |
45c463ae DW |
1258 | size = sizeof(struct dmaengine_unmap_data) + |
1259 | sizeof(dma_addr_t) * p->size; | |
1260 | ||
1261 | p->cache = kmem_cache_create(p->name, size, 0, | |
1262 | SLAB_HWCACHE_ALIGN, NULL); | |
1263 | if (!p->cache) | |
1264 | break; | |
1265 | p->pool = mempool_create_slab_pool(1, p->cache); | |
1266 | if (!p->pool) | |
1267 | break; | |
0036731c | 1268 | } |
7405f74b | 1269 | |
45c463ae DW |
1270 | if (i == ARRAY_SIZE(unmap_pool)) |
1271 | return 0; | |
7405f74b | 1272 | |
45c463ae DW |
1273 | dmaengine_destroy_unmap_pool(); |
1274 | return -ENOMEM; | |
1275 | } | |
7405f74b | 1276 | |
89716462 | 1277 | struct dmaengine_unmap_data * |
45c463ae DW |
1278 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) |
1279 | { | |
1280 | struct dmaengine_unmap_data *unmap; | |
1281 | ||
1282 | unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); | |
1283 | if (!unmap) | |
1284 | return NULL; | |
1285 | ||
1286 | memset(unmap, 0, sizeof(*unmap)); | |
1287 | kref_init(&unmap->kref); | |
1288 | unmap->dev = dev; | |
c1f43dd9 | 1289 | unmap->map_cnt = nr; |
45c463ae DW |
1290 | |
1291 | return unmap; | |
7405f74b | 1292 | } |
89716462 | 1293 | EXPORT_SYMBOL(dmaengine_get_unmap_data); |
7405f74b | 1294 | |
7405f74b DW |
1295 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, |
1296 | struct dma_chan *chan) | |
1297 | { | |
1298 | tx->chan = chan; | |
5fc6d897 | 1299 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
7405f74b | 1300 | spin_lock_init(&tx->lock); |
caa20d97 | 1301 | #endif |
7405f74b DW |
1302 | } |
1303 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | |
1304 | ||
07f2211e DW |
1305 | /* dma_wait_for_async_tx - spin wait for a transaction to complete |
1306 | * @tx: in-flight transaction to wait on | |
07f2211e DW |
1307 | */ |
1308 | enum dma_status | |
1309 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |
1310 | { | |
95475e57 | 1311 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); |
07f2211e DW |
1312 | |
1313 | if (!tx) | |
adfedd9a | 1314 | return DMA_COMPLETE; |
07f2211e | 1315 | |
95475e57 DW |
1316 | while (tx->cookie == -EBUSY) { |
1317 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | |
ef859312 JN |
1318 | dev_err(tx->chan->device->dev, |
1319 | "%s timeout waiting for descriptor submission\n", | |
1320 | __func__); | |
95475e57 DW |
1321 | return DMA_ERROR; |
1322 | } | |
1323 | cpu_relax(); | |
1324 | } | |
1325 | return dma_sync_wait(tx->chan, tx->cookie); | |
07f2211e DW |
1326 | } |
1327 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | |
1328 | ||
1329 | /* dma_run_dependencies - helper routine for dma drivers to process | |
1330 | * (start) dependent operations on their target channel | |
1331 | * @tx: transaction with dependencies | |
1332 | */ | |
1333 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx) | |
1334 | { | |
caa20d97 | 1335 | struct dma_async_tx_descriptor *dep = txd_next(tx); |
07f2211e DW |
1336 | struct dma_async_tx_descriptor *dep_next; |
1337 | struct dma_chan *chan; | |
1338 | ||
1339 | if (!dep) | |
1340 | return; | |
1341 | ||
dd59b853 | 1342 | /* we'll submit tx->next now, so clear the link */ |
caa20d97 | 1343 | txd_clear_next(tx); |
07f2211e DW |
1344 | chan = dep->chan; |
1345 | ||
1346 | /* keep submitting up until a channel switch is detected | |
1347 | * in that case we will be called again as a result of | |
1348 | * processing the interrupt from async_tx_channel_switch | |
1349 | */ | |
1350 | for (; dep; dep = dep_next) { | |
caa20d97 DW |
1351 | txd_lock(dep); |
1352 | txd_clear_parent(dep); | |
1353 | dep_next = txd_next(dep); | |
07f2211e | 1354 | if (dep_next && dep_next->chan == chan) |
caa20d97 | 1355 | txd_clear_next(dep); /* ->next will be submitted */ |
07f2211e DW |
1356 | else |
1357 | dep_next = NULL; /* submit current dep and terminate */ | |
caa20d97 | 1358 | txd_unlock(dep); |
07f2211e DW |
1359 | |
1360 | dep->tx_submit(dep); | |
1361 | } | |
1362 | ||
1363 | chan->device->device_issue_pending(chan); | |
1364 | } | |
1365 | EXPORT_SYMBOL_GPL(dma_run_dependencies); | |
1366 | ||
c13c8260 CL |
1367 | static int __init dma_bus_init(void) |
1368 | { | |
45c463ae DW |
1369 | int err = dmaengine_init_unmap_pool(); |
1370 | ||
1371 | if (err) | |
1372 | return err; | |
c13c8260 CL |
1373 | return class_register(&dma_devclass); |
1374 | } | |
652afc27 | 1375 | arch_initcall(dma_bus_init); |
c13c8260 | 1376 | |
bec08513 | 1377 |