Commit | Line | Data |
---|---|---|
9ab65aff | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
c13c8260 CL |
2 | /* |
3 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. | |
c13c8260 CL |
4 | */ |
5 | ||
6 | /* | |
7 | * This code implements the DMA subsystem. It provides a HW-neutral interface | |
8 | * for other kernel code to use asynchronous memory copy capabilities, | |
9 | * if present, and allows different HW DMA drivers to register as providing | |
10 | * this capability. | |
11 | * | |
12 | * Due to the fact we are accelerating what is already a relatively fast | |
13 | * operation, the code goes to great lengths to avoid additional overhead, | |
14 | * such as locking. | |
15 | * | |
16 | * LOCKING: | |
17 | * | |
aa1e6f1a DW |
18 | * The subsystem keeps a global list of dma_device structs it is protected by a |
19 | * mutex, dma_list_mutex. | |
c13c8260 | 20 | * |
f27c580c DW |
21 | * A subsystem can get access to a channel by calling dmaengine_get() followed |
22 | * by dma_find_channel(), or if it has need for an exclusive channel it can call | |
23 | * dma_request_channel(). Once a channel is allocated a reference is taken | |
24 | * against its corresponding driver to disable removal. | |
25 | * | |
c13c8260 CL |
26 | * Each device has a channels list, which runs unlocked but is never modified |
27 | * once the device is registered, it's just setup by the driver. | |
28 | * | |
44348e8a | 29 | * See Documentation/driver-api/dmaengine for more details |
c13c8260 CL |
30 | */ |
31 | ||
63433250 JP |
32 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
33 | ||
a8135d0d | 34 | #include <linux/platform_device.h> |
b7f080cf | 35 | #include <linux/dma-mapping.h> |
c13c8260 CL |
36 | #include <linux/init.h> |
37 | #include <linux/module.h> | |
7405f74b | 38 | #include <linux/mm.h> |
c13c8260 CL |
39 | #include <linux/device.h> |
40 | #include <linux/dmaengine.h> | |
41 | #include <linux/hardirq.h> | |
42 | #include <linux/spinlock.h> | |
43 | #include <linux/percpu.h> | |
44 | #include <linux/rcupdate.h> | |
45 | #include <linux/mutex.h> | |
7405f74b | 46 | #include <linux/jiffies.h> |
2ba05622 | 47 | #include <linux/rculist.h> |
864498aa | 48 | #include <linux/idr.h> |
5a0e3ad6 | 49 | #include <linux/slab.h> |
4e82f5dd AS |
50 | #include <linux/acpi.h> |
51 | #include <linux/acpi_dma.h> | |
9a6cecc8 | 52 | #include <linux/of_dma.h> |
45c463ae | 53 | #include <linux/mempool.h> |
98fa15f3 | 54 | #include <linux/numa.h> |
c13c8260 | 55 | |
833d88f3 AS |
56 | #include "dmaengine.h" |
57 | ||
c13c8260 | 58 | static DEFINE_MUTEX(dma_list_mutex); |
adc064cd | 59 | static DEFINE_IDA(dma_ida); |
c13c8260 | 60 | static LIST_HEAD(dma_device_list); |
6f49a57a | 61 | static long dmaengine_ref_count; |
c13c8260 | 62 | |
e937cc1d PU |
63 | /* --- debugfs implementation --- */ |
64 | #ifdef CONFIG_DEBUG_FS | |
65 | #include <linux/debugfs.h> | |
66 | ||
26cf132d PU |
67 | static struct dentry *rootdir; |
68 | ||
69 | static void dmaengine_debug_register(struct dma_device *dma_dev) | |
70 | { | |
71 | dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev), | |
72 | rootdir); | |
73 | if (IS_ERR(dma_dev->dbg_dev_root)) | |
74 | dma_dev->dbg_dev_root = NULL; | |
75 | } | |
76 | ||
77 | static void dmaengine_debug_unregister(struct dma_device *dma_dev) | |
78 | { | |
79 | debugfs_remove_recursive(dma_dev->dbg_dev_root); | |
80 | dma_dev->dbg_dev_root = NULL; | |
81 | } | |
82 | ||
e937cc1d PU |
83 | static void dmaengine_dbg_summary_show(struct seq_file *s, |
84 | struct dma_device *dma_dev) | |
85 | { | |
86 | struct dma_chan *chan; | |
87 | ||
88 | list_for_each_entry(chan, &dma_dev->channels, device_node) { | |
89 | if (chan->client_count) { | |
90 | seq_printf(s, " %-13s| %s", dma_chan_name(chan), | |
91 | chan->dbg_client_name ?: "in-use"); | |
92 | ||
93 | if (chan->router) | |
94 | seq_printf(s, " (via router: %s)\n", | |
95 | dev_name(chan->router->dev)); | |
96 | else | |
97 | seq_puts(s, "\n"); | |
98 | } | |
99 | } | |
100 | } | |
101 | ||
102 | static int dmaengine_summary_show(struct seq_file *s, void *data) | |
103 | { | |
104 | struct dma_device *dma_dev = NULL; | |
105 | ||
106 | mutex_lock(&dma_list_mutex); | |
107 | list_for_each_entry(dma_dev, &dma_device_list, global_node) { | |
108 | seq_printf(s, "dma%d (%s): number of channels: %u\n", | |
109 | dma_dev->dev_id, dev_name(dma_dev->dev), | |
110 | dma_dev->chancnt); | |
111 | ||
112 | if (dma_dev->dbg_summary_show) | |
113 | dma_dev->dbg_summary_show(s, dma_dev); | |
114 | else | |
115 | dmaengine_dbg_summary_show(s, dma_dev); | |
116 | ||
117 | if (!list_is_last(&dma_dev->global_node, &dma_device_list)) | |
118 | seq_puts(s, "\n"); | |
119 | } | |
120 | mutex_unlock(&dma_list_mutex); | |
121 | ||
122 | return 0; | |
123 | } | |
124 | DEFINE_SHOW_ATTRIBUTE(dmaengine_summary); | |
125 | ||
126 | static void __init dmaengine_debugfs_init(void) | |
127 | { | |
26cf132d | 128 | rootdir = debugfs_create_dir("dmaengine", NULL); |
e937cc1d PU |
129 | |
130 | /* /sys/kernel/debug/dmaengine/summary */ | |
131 | debugfs_create_file("summary", 0444, rootdir, NULL, | |
132 | &dmaengine_summary_fops); | |
133 | } | |
134 | #else | |
135 | static inline void dmaengine_debugfs_init(void) { } | |
26cf132d PU |
136 | static inline int dmaengine_debug_register(struct dma_device *dma_dev) |
137 | { | |
138 | return 0; | |
139 | } | |
140 | ||
141 | static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { } | |
e937cc1d PU |
142 | #endif /* DEBUG_FS */ |
143 | ||
c13c8260 CL |
144 | /* --- sysfs implementation --- */ |
145 | ||
71723a96 GU |
146 | #define DMA_SLAVE_NAME "slave" |
147 | ||
41d5e59c | 148 | /** |
fe333389 | 149 | * dev_to_dma_chan - convert a device pointer to its sysfs container object |
9872e23d | 150 | * @dev: device node |
41d5e59c | 151 | * |
9872e23d | 152 | * Must be called under dma_list_mutex. |
41d5e59c DW |
153 | */ |
154 | static struct dma_chan *dev_to_dma_chan(struct device *dev) | |
155 | { | |
156 | struct dma_chan_dev *chan_dev; | |
157 | ||
158 | chan_dev = container_of(dev, typeof(*chan_dev), device); | |
159 | return chan_dev->chan; | |
160 | } | |
161 | ||
58b267d3 GKH |
162 | static ssize_t memcpy_count_show(struct device *dev, |
163 | struct device_attribute *attr, char *buf) | |
c13c8260 | 164 | { |
41d5e59c | 165 | struct dma_chan *chan; |
c13c8260 CL |
166 | unsigned long count = 0; |
167 | int i; | |
41d5e59c | 168 | int err; |
c13c8260 | 169 | |
41d5e59c DW |
170 | mutex_lock(&dma_list_mutex); |
171 | chan = dev_to_dma_chan(dev); | |
172 | if (chan) { | |
173 | for_each_possible_cpu(i) | |
174 | count += per_cpu_ptr(chan->local, i)->memcpy_count; | |
175 | err = sprintf(buf, "%lu\n", count); | |
176 | } else | |
177 | err = -ENODEV; | |
178 | mutex_unlock(&dma_list_mutex); | |
c13c8260 | 179 | |
41d5e59c | 180 | return err; |
c13c8260 | 181 | } |
58b267d3 | 182 | static DEVICE_ATTR_RO(memcpy_count); |
c13c8260 | 183 | |
58b267d3 GKH |
184 | static ssize_t bytes_transferred_show(struct device *dev, |
185 | struct device_attribute *attr, char *buf) | |
c13c8260 | 186 | { |
41d5e59c | 187 | struct dma_chan *chan; |
c13c8260 CL |
188 | unsigned long count = 0; |
189 | int i; | |
41d5e59c | 190 | int err; |
c13c8260 | 191 | |
41d5e59c DW |
192 | mutex_lock(&dma_list_mutex); |
193 | chan = dev_to_dma_chan(dev); | |
194 | if (chan) { | |
195 | for_each_possible_cpu(i) | |
196 | count += per_cpu_ptr(chan->local, i)->bytes_transferred; | |
197 | err = sprintf(buf, "%lu\n", count); | |
198 | } else | |
199 | err = -ENODEV; | |
200 | mutex_unlock(&dma_list_mutex); | |
c13c8260 | 201 | |
41d5e59c | 202 | return err; |
c13c8260 | 203 | } |
58b267d3 | 204 | static DEVICE_ATTR_RO(bytes_transferred); |
c13c8260 | 205 | |
58b267d3 GKH |
206 | static ssize_t in_use_show(struct device *dev, struct device_attribute *attr, |
207 | char *buf) | |
c13c8260 | 208 | { |
41d5e59c DW |
209 | struct dma_chan *chan; |
210 | int err; | |
c13c8260 | 211 | |
41d5e59c DW |
212 | mutex_lock(&dma_list_mutex); |
213 | chan = dev_to_dma_chan(dev); | |
214 | if (chan) | |
215 | err = sprintf(buf, "%d\n", chan->client_count); | |
216 | else | |
217 | err = -ENODEV; | |
218 | mutex_unlock(&dma_list_mutex); | |
219 | ||
220 | return err; | |
c13c8260 | 221 | } |
58b267d3 | 222 | static DEVICE_ATTR_RO(in_use); |
c13c8260 | 223 | |
58b267d3 GKH |
224 | static struct attribute *dma_dev_attrs[] = { |
225 | &dev_attr_memcpy_count.attr, | |
226 | &dev_attr_bytes_transferred.attr, | |
227 | &dev_attr_in_use.attr, | |
228 | NULL, | |
c13c8260 | 229 | }; |
58b267d3 | 230 | ATTRIBUTE_GROUPS(dma_dev); |
c13c8260 | 231 | |
41d5e59c DW |
232 | static void chan_dev_release(struct device *dev) |
233 | { | |
234 | struct dma_chan_dev *chan_dev; | |
235 | ||
236 | chan_dev = container_of(dev, typeof(*chan_dev), device); | |
237 | kfree(chan_dev); | |
238 | } | |
239 | ||
c13c8260 | 240 | static struct class dma_devclass = { |
891f78ea | 241 | .name = "dma", |
58b267d3 | 242 | .dev_groups = dma_dev_groups, |
41d5e59c | 243 | .dev_release = chan_dev_release, |
c13c8260 CL |
244 | }; |
245 | ||
246 | /* --- client and device registration --- */ | |
247 | ||
9872e23d | 248 | /* enable iteration over all operation types */ |
11a0fd2b LG |
249 | static dma_cap_mask_t dma_cap_mask_all; |
250 | ||
251 | /** | |
9872e23d AS |
252 | * struct dma_chan_tbl_ent - tracks channel allocations per core/operation |
253 | * @chan: associated channel for this entry | |
11a0fd2b LG |
254 | */ |
255 | struct dma_chan_tbl_ent { | |
256 | struct dma_chan *chan; | |
257 | }; | |
258 | ||
9872e23d | 259 | /* percpu lookup table for memory-to-memory offload providers */ |
11a0fd2b LG |
260 | static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; |
261 | ||
262 | static int __init dma_channel_table_init(void) | |
263 | { | |
264 | enum dma_transaction_type cap; | |
265 | int err = 0; | |
266 | ||
267 | bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); | |
268 | ||
269 | /* 'interrupt', 'private', and 'slave' are channel capabilities, | |
270 | * but are not associated with an operation so they do not need | |
271 | * an entry in the channel_table | |
272 | */ | |
273 | clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); | |
274 | clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); | |
275 | clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); | |
276 | ||
277 | for_each_dma_cap_mask(cap, dma_cap_mask_all) { | |
278 | channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); | |
279 | if (!channel_table[cap]) { | |
280 | err = -ENOMEM; | |
281 | break; | |
282 | } | |
283 | } | |
284 | ||
285 | if (err) { | |
08baca42 | 286 | pr_err("dmaengine dma_channel_table_init failure: %d\n", err); |
11a0fd2b LG |
287 | for_each_dma_cap_mask(cap, dma_cap_mask_all) |
288 | free_percpu(channel_table[cap]); | |
289 | } | |
290 | ||
291 | return err; | |
292 | } | |
293 | arch_initcall(dma_channel_table_init); | |
294 | ||
295 | /** | |
9872e23d AS |
296 | * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU |
297 | * @chan: DMA channel to test | |
298 | * @cpu: CPU index which the channel should be close to | |
299 | * | |
300 | * Returns true if the channel is in the same NUMA-node as the CPU. | |
11a0fd2b LG |
301 | */ |
302 | static bool dma_chan_is_local(struct dma_chan *chan, int cpu) | |
303 | { | |
304 | int node = dev_to_node(chan->device->dev); | |
305 | return node == NUMA_NO_NODE || | |
306 | cpumask_test_cpu(cpu, cpumask_of_node(node)); | |
307 | } | |
308 | ||
309 | /** | |
9872e23d AS |
310 | * min_chan - finds the channel with min count and in the same NUMA-node as the CPU |
311 | * @cap: capability to match | |
312 | * @cpu: CPU index which the channel should be close to | |
11a0fd2b | 313 | * |
9872e23d AS |
314 | * If some channels are close to the given CPU, the one with the lowest |
315 | * reference count is returned. Otherwise, CPU is ignored and only the | |
11a0fd2b | 316 | * reference count is taken into account. |
9872e23d | 317 | * |
11a0fd2b LG |
318 | * Must be called under dma_list_mutex. |
319 | */ | |
320 | static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) | |
321 | { | |
322 | struct dma_device *device; | |
323 | struct dma_chan *chan; | |
324 | struct dma_chan *min = NULL; | |
325 | struct dma_chan *localmin = NULL; | |
326 | ||
327 | list_for_each_entry(device, &dma_device_list, global_node) { | |
328 | if (!dma_has_cap(cap, device->cap_mask) || | |
329 | dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
330 | continue; | |
331 | list_for_each_entry(chan, &device->channels, device_node) { | |
332 | if (!chan->client_count) | |
333 | continue; | |
334 | if (!min || chan->table_count < min->table_count) | |
335 | min = chan; | |
336 | ||
337 | if (dma_chan_is_local(chan, cpu)) | |
338 | if (!localmin || | |
339 | chan->table_count < localmin->table_count) | |
340 | localmin = chan; | |
341 | } | |
342 | } | |
343 | ||
344 | chan = localmin ? localmin : min; | |
345 | ||
346 | if (chan) | |
347 | chan->table_count++; | |
348 | ||
349 | return chan; | |
350 | } | |
351 | ||
352 | /** | |
353 | * dma_channel_rebalance - redistribute the available channels | |
354 | * | |
9872e23d AS |
355 | * Optimize for CPU isolation (each CPU gets a dedicated channel for an |
356 | * operation type) in the SMP case, and operation isolation (avoid | |
357 | * multi-tasking channels) in the non-SMP case. | |
358 | * | |
359 | * Must be called under dma_list_mutex. | |
11a0fd2b LG |
360 | */ |
361 | static void dma_channel_rebalance(void) | |
362 | { | |
363 | struct dma_chan *chan; | |
364 | struct dma_device *device; | |
365 | int cpu; | |
366 | int cap; | |
367 | ||
368 | /* undo the last distribution */ | |
369 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | |
370 | for_each_possible_cpu(cpu) | |
371 | per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; | |
372 | ||
373 | list_for_each_entry(device, &dma_device_list, global_node) { | |
374 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
375 | continue; | |
376 | list_for_each_entry(chan, &device->channels, device_node) | |
377 | chan->table_count = 0; | |
378 | } | |
379 | ||
380 | /* don't populate the channel_table if no clients are available */ | |
381 | if (!dmaengine_ref_count) | |
382 | return; | |
383 | ||
384 | /* redistribute available channels */ | |
385 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | |
386 | for_each_online_cpu(cpu) { | |
387 | chan = min_chan(cap, cpu); | |
388 | per_cpu_ptr(channel_table[cap], cpu)->chan = chan; | |
389 | } | |
390 | } | |
391 | ||
69b1189b GU |
392 | static int dma_device_satisfies_mask(struct dma_device *device, |
393 | const dma_cap_mask_t *want) | |
d379b01e DW |
394 | { |
395 | dma_cap_mask_t has; | |
396 | ||
59b5ec21 | 397 | bitmap_and(has.bits, want->bits, device->cap_mask.bits, |
d379b01e DW |
398 | DMA_TX_TYPE_END); |
399 | return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); | |
400 | } | |
401 | ||
6f49a57a DW |
402 | static struct module *dma_chan_to_owner(struct dma_chan *chan) |
403 | { | |
dae7a589 | 404 | return chan->device->owner; |
6f49a57a DW |
405 | } |
406 | ||
407 | /** | |
408 | * balance_ref_count - catch up the channel reference count | |
9872e23d | 409 | * @chan: channel to balance ->client_count versus dmaengine_ref_count |
6f49a57a | 410 | * |
9872e23d | 411 | * Must be called under dma_list_mutex. |
6f49a57a DW |
412 | */ |
413 | static void balance_ref_count(struct dma_chan *chan) | |
414 | { | |
415 | struct module *owner = dma_chan_to_owner(chan); | |
416 | ||
417 | while (chan->client_count < dmaengine_ref_count) { | |
418 | __module_get(owner); | |
419 | chan->client_count++; | |
420 | } | |
421 | } | |
422 | ||
8ad342a8 LG |
423 | static void dma_device_release(struct kref *ref) |
424 | { | |
425 | struct dma_device *device = container_of(ref, struct dma_device, ref); | |
426 | ||
427 | list_del_rcu(&device->global_node); | |
428 | dma_channel_rebalance(); | |
429 | ||
430 | if (device->device_release) | |
431 | device->device_release(device); | |
432 | } | |
433 | ||
434 | static void dma_device_put(struct dma_device *device) | |
435 | { | |
436 | lockdep_assert_held(&dma_list_mutex); | |
437 | kref_put(&device->ref, dma_device_release); | |
438 | } | |
439 | ||
6f49a57a | 440 | /** |
9872e23d AS |
441 | * dma_chan_get - try to grab a DMA channel's parent driver module |
442 | * @chan: channel to grab | |
6f49a57a | 443 | * |
9872e23d | 444 | * Must be called under dma_list_mutex. |
6f49a57a DW |
445 | */ |
446 | static int dma_chan_get(struct dma_chan *chan) | |
447 | { | |
6f49a57a | 448 | struct module *owner = dma_chan_to_owner(chan); |
d2f4f99d | 449 | int ret; |
6f49a57a | 450 | |
d2f4f99d | 451 | /* The channel is already in use, update client count */ |
6f49a57a DW |
452 | if (chan->client_count) { |
453 | __module_get(owner); | |
d2f4f99d MR |
454 | goto out; |
455 | } | |
6f49a57a | 456 | |
d2f4f99d MR |
457 | if (!try_module_get(owner)) |
458 | return -ENODEV; | |
6f49a57a | 459 | |
8ad342a8 LG |
460 | ret = kref_get_unless_zero(&chan->device->ref); |
461 | if (!ret) { | |
462 | ret = -ENODEV; | |
463 | goto module_put_out; | |
464 | } | |
465 | ||
6f49a57a | 466 | /* allocate upon first client reference */ |
c4b54a64 MR |
467 | if (chan->device->device_alloc_chan_resources) { |
468 | ret = chan->device->device_alloc_chan_resources(chan); | |
469 | if (ret < 0) | |
470 | goto err_out; | |
471 | } | |
6f49a57a | 472 | |
d2f4f99d MR |
473 | if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) |
474 | balance_ref_count(chan); | |
475 | ||
476 | out: | |
477 | chan->client_count++; | |
478 | return 0; | |
479 | ||
480 | err_out: | |
8ad342a8 LG |
481 | dma_device_put(chan->device); |
482 | module_put_out: | |
d2f4f99d MR |
483 | module_put(owner); |
484 | return ret; | |
6f49a57a DW |
485 | } |
486 | ||
487 | /** | |
9872e23d AS |
488 | * dma_chan_put - drop a reference to a DMA channel's parent driver module |
489 | * @chan: channel to release | |
6f49a57a | 490 | * |
9872e23d | 491 | * Must be called under dma_list_mutex. |
6f49a57a DW |
492 | */ |
493 | static void dma_chan_put(struct dma_chan *chan) | |
494 | { | |
c4b54a64 | 495 | /* This channel is not in use, bail out */ |
6f49a57a | 496 | if (!chan->client_count) |
c4b54a64 MR |
497 | return; |
498 | ||
6f49a57a | 499 | chan->client_count--; |
c4b54a64 MR |
500 | |
501 | /* This channel is not in use anymore, free it */ | |
b36f09c3 LPC |
502 | if (!chan->client_count && chan->device->device_free_chan_resources) { |
503 | /* Make sure all operations have completed */ | |
504 | dmaengine_synchronize(chan); | |
6f49a57a | 505 | chan->device->device_free_chan_resources(chan); |
b36f09c3 | 506 | } |
56f13c0d PU |
507 | |
508 | /* If the channel is used via a DMA request router, free the mapping */ | |
509 | if (chan->router && chan->router->route_free) { | |
510 | chan->router->route_free(chan->router->dev, chan->route_data); | |
511 | chan->router = NULL; | |
512 | chan->route_data = NULL; | |
513 | } | |
83c77940 VK |
514 | |
515 | dma_device_put(chan->device); | |
516 | module_put(dma_chan_to_owner(chan)); | |
6f49a57a DW |
517 | } |
518 | ||
7405f74b DW |
519 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) |
520 | { | |
521 | enum dma_status status; | |
522 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); | |
523 | ||
524 | dma_async_issue_pending(chan); | |
525 | do { | |
526 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | |
527 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | |
ef859312 | 528 | dev_err(chan->device->dev, "%s: timeout!\n", __func__); |
7405f74b DW |
529 | return DMA_ERROR; |
530 | } | |
2cbe7feb BZ |
531 | if (status != DMA_IN_PROGRESS) |
532 | break; | |
533 | cpu_relax(); | |
534 | } while (1); | |
7405f74b DW |
535 | |
536 | return status; | |
537 | } | |
538 | EXPORT_SYMBOL(dma_sync_wait); | |
539 | ||
bec08513 DW |
540 | /** |
541 | * dma_find_channel - find a channel to carry out the operation | |
9872e23d | 542 | * @tx_type: transaction type |
bec08513 DW |
543 | */ |
544 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | |
545 | { | |
e7dcaa47 | 546 | return this_cpu_read(channel_table[tx_type]->chan); |
bec08513 DW |
547 | } |
548 | EXPORT_SYMBOL(dma_find_channel); | |
a2bd1140 | 549 | |
2ba05622 DW |
550 | /** |
551 | * dma_issue_pending_all - flush all pending operations across all channels | |
552 | */ | |
553 | void dma_issue_pending_all(void) | |
554 | { | |
555 | struct dma_device *device; | |
556 | struct dma_chan *chan; | |
557 | ||
2ba05622 | 558 | rcu_read_lock(); |
59b5ec21 DW |
559 | list_for_each_entry_rcu(device, &dma_device_list, global_node) { |
560 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
561 | continue; | |
2ba05622 DW |
562 | list_for_each_entry(chan, &device->channels, device_node) |
563 | if (chan->client_count) | |
564 | device->device_issue_pending(chan); | |
59b5ec21 | 565 | } |
2ba05622 DW |
566 | rcu_read_unlock(); |
567 | } | |
568 | EXPORT_SYMBOL(dma_issue_pending_all); | |
569 | ||
0d5484b1 LP |
570 | int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) |
571 | { | |
572 | struct dma_device *device; | |
573 | ||
574 | if (!chan || !caps) | |
575 | return -EINVAL; | |
576 | ||
577 | device = chan->device; | |
578 | ||
579 | /* check if the channel supports slave transactions */ | |
dd4e91d5 AS |
580 | if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) || |
581 | test_bit(DMA_CYCLIC, device->cap_mask.bits))) | |
0d5484b1 LP |
582 | return -ENXIO; |
583 | ||
584 | /* | |
585 | * Check whether it reports it uses the generic slave | |
586 | * capabilities, if not, that means it doesn't support any | |
587 | * kind of slave capabilities reporting. | |
588 | */ | |
589 | if (!device->directions) | |
590 | return -ENXIO; | |
591 | ||
592 | caps->src_addr_widths = device->src_addr_widths; | |
593 | caps->dst_addr_widths = device->dst_addr_widths; | |
594 | caps->directions = device->directions; | |
d97758e0 | 595 | caps->min_burst = device->min_burst; |
6d5bbed3 | 596 | caps->max_burst = device->max_burst; |
b1b40b8f | 597 | caps->max_sg_burst = device->max_sg_burst; |
0d5484b1 | 598 | caps->residue_granularity = device->residue_granularity; |
9eeacd3a | 599 | caps->descriptor_reuse = device->descriptor_reuse; |
d8095f94 MS |
600 | caps->cmd_pause = !!device->device_pause; |
601 | caps->cmd_resume = !!device->device_resume; | |
0d5484b1 LP |
602 | caps->cmd_terminate = !!device->device_terminate_all; |
603 | ||
3b6d694e SS |
604 | /* |
605 | * DMA engine device might be configured with non-uniformly | |
606 | * distributed slave capabilities per device channels. In this | |
607 | * case the corresponding driver may provide the device_caps | |
608 | * callback to override the generic capabilities with | |
609 | * channel-specific ones. | |
610 | */ | |
611 | if (device->device_caps) | |
612 | device->device_caps(chan, caps); | |
613 | ||
0d5484b1 LP |
614 | return 0; |
615 | } | |
616 | EXPORT_SYMBOL_GPL(dma_get_slave_caps); | |
617 | ||
a53e28da LPC |
618 | static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, |
619 | struct dma_device *dev, | |
e2346677 | 620 | dma_filter_fn fn, void *fn_param) |
59b5ec21 DW |
621 | { |
622 | struct dma_chan *chan; | |
59b5ec21 | 623 | |
69b1189b | 624 | if (mask && !dma_device_satisfies_mask(dev, mask)) { |
ef859312 | 625 | dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__); |
59b5ec21 DW |
626 | return NULL; |
627 | } | |
628 | /* devices with multiple channels need special handling as we need to | |
629 | * ensure that all channels are either private or public. | |
630 | */ | |
631 | if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) | |
632 | list_for_each_entry(chan, &dev->channels, device_node) { | |
633 | /* some channels are already publicly allocated */ | |
634 | if (chan->client_count) | |
635 | return NULL; | |
636 | } | |
637 | ||
638 | list_for_each_entry(chan, &dev->channels, device_node) { | |
639 | if (chan->client_count) { | |
ef859312 | 640 | dev_dbg(dev->dev, "%s: %s busy\n", |
41d5e59c | 641 | __func__, dma_chan_name(chan)); |
59b5ec21 DW |
642 | continue; |
643 | } | |
e2346677 | 644 | if (fn && !fn(chan, fn_param)) { |
ef859312 | 645 | dev_dbg(dev->dev, "%s: %s filter said false\n", |
e2346677 DW |
646 | __func__, dma_chan_name(chan)); |
647 | continue; | |
648 | } | |
649 | return chan; | |
59b5ec21 DW |
650 | } |
651 | ||
e2346677 | 652 | return NULL; |
59b5ec21 DW |
653 | } |
654 | ||
7bd903c5 PU |
655 | static struct dma_chan *find_candidate(struct dma_device *device, |
656 | const dma_cap_mask_t *mask, | |
657 | dma_filter_fn fn, void *fn_param) | |
658 | { | |
659 | struct dma_chan *chan = private_candidate(mask, device, fn, fn_param); | |
660 | int err; | |
661 | ||
662 | if (chan) { | |
663 | /* Found a suitable channel, try to grab, prep, and return it. | |
664 | * We first set DMA_PRIVATE to disable balance_ref_count as this | |
665 | * channel will not be published in the general-purpose | |
666 | * allocator | |
667 | */ | |
668 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | |
669 | device->privatecnt++; | |
670 | err = dma_chan_get(chan); | |
671 | ||
672 | if (err) { | |
673 | if (err == -ENODEV) { | |
ef859312 JN |
674 | dev_dbg(device->dev, "%s: %s module removed\n", |
675 | __func__, dma_chan_name(chan)); | |
7bd903c5 PU |
676 | list_del_rcu(&device->global_node); |
677 | } else | |
ef859312 JN |
678 | dev_dbg(device->dev, |
679 | "%s: failed to get %s: (%d)\n", | |
7bd903c5 PU |
680 | __func__, dma_chan_name(chan), err); |
681 | ||
682 | if (--device->privatecnt == 0) | |
683 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | |
684 | ||
685 | chan = ERR_PTR(err); | |
686 | } | |
687 | } | |
688 | ||
689 | return chan ? chan : ERR_PTR(-EPROBE_DEFER); | |
690 | } | |
691 | ||
59b5ec21 | 692 | /** |
19d643d6 | 693 | * dma_get_slave_channel - try to get specific channel exclusively |
9872e23d | 694 | * @chan: target channel |
7bb587f4 ZG |
695 | */ |
696 | struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) | |
697 | { | |
698 | int err = -EBUSY; | |
699 | ||
700 | /* lock against __dma_request_channel */ | |
701 | mutex_lock(&dma_list_mutex); | |
702 | ||
d9a6c8f5 | 703 | if (chan->client_count == 0) { |
214fc4e4 PU |
704 | struct dma_device *device = chan->device; |
705 | ||
706 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | |
707 | device->privatecnt++; | |
7bb587f4 | 708 | err = dma_chan_get(chan); |
214fc4e4 | 709 | if (err) { |
ef859312 JN |
710 | dev_dbg(chan->device->dev, |
711 | "%s: failed to get %s: (%d)\n", | |
d9a6c8f5 | 712 | __func__, dma_chan_name(chan), err); |
214fc4e4 PU |
713 | chan = NULL; |
714 | if (--device->privatecnt == 0) | |
715 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | |
716 | } | |
d9a6c8f5 | 717 | } else |
7bb587f4 ZG |
718 | chan = NULL; |
719 | ||
720 | mutex_unlock(&dma_list_mutex); | |
721 | ||
7bb587f4 ZG |
722 | |
723 | return chan; | |
724 | } | |
725 | EXPORT_SYMBOL_GPL(dma_get_slave_channel); | |
726 | ||
8010dad5 SW |
727 | struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) |
728 | { | |
729 | dma_cap_mask_t mask; | |
730 | struct dma_chan *chan; | |
8010dad5 SW |
731 | |
732 | dma_cap_zero(mask); | |
733 | dma_cap_set(DMA_SLAVE, mask); | |
734 | ||
735 | /* lock against __dma_request_channel */ | |
736 | mutex_lock(&dma_list_mutex); | |
737 | ||
7bd903c5 | 738 | chan = find_candidate(device, &mask, NULL, NULL); |
8010dad5 SW |
739 | |
740 | mutex_unlock(&dma_list_mutex); | |
741 | ||
7bd903c5 | 742 | return IS_ERR(chan) ? NULL : chan; |
8010dad5 SW |
743 | } |
744 | EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); | |
745 | ||
59b5ec21 | 746 | /** |
6b9019a7 | 747 | * __dma_request_channel - try to allocate an exclusive channel |
9872e23d AS |
748 | * @mask: capabilities that the channel must satisfy |
749 | * @fn: optional callback to disposition available channels | |
750 | * @fn_param: opaque parameter to pass to dma_filter_fn() | |
751 | * @np: device node to look for DMA channels | |
0ad7c000 SW |
752 | * |
753 | * Returns pointer to appropriate DMA channel on success or NULL. | |
59b5ec21 | 754 | */ |
a53e28da | 755 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, |
f5151311 BW |
756 | dma_filter_fn fn, void *fn_param, |
757 | struct device_node *np) | |
59b5ec21 DW |
758 | { |
759 | struct dma_device *device, *_d; | |
760 | struct dma_chan *chan = NULL; | |
59b5ec21 DW |
761 | |
762 | /* Find a channel */ | |
763 | mutex_lock(&dma_list_mutex); | |
764 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { | |
f5151311 BW |
765 | /* Finds a DMA controller with matching device node */ |
766 | if (np && device->dev->of_node && np != device->dev->of_node) | |
767 | continue; | |
768 | ||
7bd903c5 PU |
769 | chan = find_candidate(device, mask, fn, fn_param); |
770 | if (!IS_ERR(chan)) | |
771 | break; | |
59b5ec21 | 772 | |
7bd903c5 | 773 | chan = NULL; |
59b5ec21 DW |
774 | } |
775 | mutex_unlock(&dma_list_mutex); | |
776 | ||
4c4d7f87 | 777 | pr_debug("%s: %s (%s)\n", |
63433250 JP |
778 | __func__, |
779 | chan ? "success" : "fail", | |
41d5e59c | 780 | chan ? dma_chan_name(chan) : NULL); |
59b5ec21 DW |
781 | |
782 | return chan; | |
783 | } | |
784 | EXPORT_SYMBOL_GPL(__dma_request_channel); | |
785 | ||
a8135d0d PU |
786 | static const struct dma_slave_map *dma_filter_match(struct dma_device *device, |
787 | const char *name, | |
788 | struct device *dev) | |
789 | { | |
790 | int i; | |
791 | ||
792 | if (!device->filter.mapcnt) | |
793 | return NULL; | |
794 | ||
795 | for (i = 0; i < device->filter.mapcnt; i++) { | |
796 | const struct dma_slave_map *map = &device->filter.map[i]; | |
797 | ||
798 | if (!strcmp(map->devname, dev_name(dev)) && | |
799 | !strcmp(map->slave, name)) | |
800 | return map; | |
801 | } | |
802 | ||
803 | return NULL; | |
804 | } | |
805 | ||
9a6cecc8 | 806 | /** |
a8135d0d | 807 | * dma_request_chan - try to allocate an exclusive slave channel |
9a6cecc8 JH |
808 | * @dev: pointer to client device structure |
809 | * @name: slave channel name | |
0ad7c000 SW |
810 | * |
811 | * Returns pointer to appropriate DMA channel on success or an error pointer. | |
9a6cecc8 | 812 | */ |
a8135d0d | 813 | struct dma_chan *dma_request_chan(struct device *dev, const char *name) |
9a6cecc8 | 814 | { |
a8135d0d PU |
815 | struct dma_device *d, *_d; |
816 | struct dma_chan *chan = NULL; | |
817 | ||
9a6cecc8 JH |
818 | /* If device-tree is present get slave info from here */ |
819 | if (dev->of_node) | |
a8135d0d | 820 | chan = of_dma_request_slave_channel(dev->of_node, name); |
9a6cecc8 | 821 | |
4e82f5dd | 822 | /* If device was enumerated by ACPI get slave info from here */ |
a8135d0d PU |
823 | if (has_acpi_companion(dev) && !chan) |
824 | chan = acpi_dma_request_slave_chan_by_name(dev, name); | |
825 | ||
71723a96 GU |
826 | if (PTR_ERR(chan) == -EPROBE_DEFER) |
827 | return chan; | |
828 | ||
829 | if (!IS_ERR_OR_NULL(chan)) | |
830 | goto found; | |
a8135d0d PU |
831 | |
832 | /* Try to find the channel via the DMA filter map(s) */ | |
833 | mutex_lock(&dma_list_mutex); | |
834 | list_for_each_entry_safe(d, _d, &dma_device_list, global_node) { | |
835 | dma_cap_mask_t mask; | |
836 | const struct dma_slave_map *map = dma_filter_match(d, name, dev); | |
4e82f5dd | 837 | |
a8135d0d PU |
838 | if (!map) |
839 | continue; | |
840 | ||
841 | dma_cap_zero(mask); | |
842 | dma_cap_set(DMA_SLAVE, mask); | |
4e82f5dd | 843 | |
a8135d0d PU |
844 | chan = find_candidate(d, &mask, d->filter.fn, map->param); |
845 | if (!IS_ERR(chan)) | |
846 | break; | |
847 | } | |
848 | mutex_unlock(&dma_list_mutex); | |
849 | ||
5d7e816e AS |
850 | if (IS_ERR(chan)) |
851 | return chan; | |
852 | if (!chan) | |
853 | return ERR_PTR(-EPROBE_DEFER); | |
71723a96 GU |
854 | |
855 | found: | |
e937cc1d PU |
856 | #ifdef CONFIG_DEBUG_FS |
857 | chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev), | |
858 | name); | |
859 | #endif | |
860 | ||
71723a96 GU |
861 | chan->name = kasprintf(GFP_KERNEL, "dma:%s", name); |
862 | if (!chan->name) | |
bad83565 PU |
863 | return chan; |
864 | chan->slave = dev; | |
71723a96 GU |
865 | |
866 | if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj, | |
867 | DMA_SLAVE_NAME)) | |
bad83565 | 868 | dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME); |
71723a96 | 869 | if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name)) |
bad83565 PU |
870 | dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name); |
871 | ||
71723a96 | 872 | return chan; |
0ad7c000 | 873 | } |
a8135d0d | 874 | EXPORT_SYMBOL_GPL(dma_request_chan); |
0ad7c000 | 875 | |
a8135d0d PU |
876 | /** |
877 | * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities | |
9872e23d | 878 | * @mask: capabilities that the channel must satisfy |
a8135d0d PU |
879 | * |
880 | * Returns pointer to appropriate DMA channel on success or an error pointer. | |
881 | */ | |
882 | struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask) | |
883 | { | |
884 | struct dma_chan *chan; | |
885 | ||
886 | if (!mask) | |
887 | return ERR_PTR(-ENODEV); | |
888 | ||
f5151311 | 889 | chan = __dma_request_channel(mask, NULL, NULL, NULL); |
ec8ca8e3 PU |
890 | if (!chan) { |
891 | mutex_lock(&dma_list_mutex); | |
892 | if (list_empty(&dma_device_list)) | |
893 | chan = ERR_PTR(-EPROBE_DEFER); | |
894 | else | |
895 | chan = ERR_PTR(-ENODEV); | |
896 | mutex_unlock(&dma_list_mutex); | |
897 | } | |
a8135d0d PU |
898 | |
899 | return chan; | |
900 | } | |
901 | EXPORT_SYMBOL_GPL(dma_request_chan_by_mask); | |
902 | ||
59b5ec21 DW |
903 | void dma_release_channel(struct dma_chan *chan) |
904 | { | |
905 | mutex_lock(&dma_list_mutex); | |
906 | WARN_ONCE(chan->client_count != 1, | |
907 | "chan reference count %d != 1\n", chan->client_count); | |
908 | dma_chan_put(chan); | |
0f571515 AN |
909 | /* drop PRIVATE cap enabled by __dma_request_channel() */ |
910 | if (--chan->device->privatecnt == 0) | |
911 | dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); | |
bad83565 | 912 | |
71723a96 | 913 | if (chan->slave) { |
bad83565 | 914 | sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME); |
71723a96 GU |
915 | sysfs_remove_link(&chan->slave->kobj, chan->name); |
916 | kfree(chan->name); | |
917 | chan->name = NULL; | |
918 | chan->slave = NULL; | |
919 | } | |
e937cc1d PU |
920 | |
921 | #ifdef CONFIG_DEBUG_FS | |
922 | kfree(chan->dbg_client_name); | |
923 | chan->dbg_client_name = NULL; | |
924 | #endif | |
59b5ec21 DW |
925 | mutex_unlock(&dma_list_mutex); |
926 | } | |
927 | EXPORT_SYMBOL_GPL(dma_release_channel); | |
928 | ||
d379b01e | 929 | /** |
209b84a8 | 930 | * dmaengine_get - register interest in dma_channels |
d379b01e | 931 | */ |
209b84a8 | 932 | void dmaengine_get(void) |
d379b01e | 933 | { |
6f49a57a DW |
934 | struct dma_device *device, *_d; |
935 | struct dma_chan *chan; | |
936 | int err; | |
937 | ||
c13c8260 | 938 | mutex_lock(&dma_list_mutex); |
6f49a57a DW |
939 | dmaengine_ref_count++; |
940 | ||
941 | /* try to grab channels */ | |
59b5ec21 DW |
942 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { |
943 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
944 | continue; | |
6f49a57a DW |
945 | list_for_each_entry(chan, &device->channels, device_node) { |
946 | err = dma_chan_get(chan); | |
947 | if (err == -ENODEV) { | |
948 | /* module removed before we could use it */ | |
2ba05622 | 949 | list_del_rcu(&device->global_node); |
6f49a57a DW |
950 | break; |
951 | } else if (err) | |
ef859312 JN |
952 | dev_dbg(chan->device->dev, |
953 | "%s: failed to get %s: (%d)\n", | |
954 | __func__, dma_chan_name(chan), err); | |
6f49a57a | 955 | } |
59b5ec21 | 956 | } |
6f49a57a | 957 | |
bec08513 DW |
958 | /* if this is the first reference and there were channels |
959 | * waiting we need to rebalance to get those channels | |
960 | * incorporated into the channel table | |
961 | */ | |
962 | if (dmaengine_ref_count == 1) | |
963 | dma_channel_rebalance(); | |
c13c8260 | 964 | mutex_unlock(&dma_list_mutex); |
c13c8260 | 965 | } |
209b84a8 | 966 | EXPORT_SYMBOL(dmaengine_get); |
c13c8260 CL |
967 | |
968 | /** | |
9872e23d | 969 | * dmaengine_put - let DMA drivers be removed when ref_count == 0 |
c13c8260 | 970 | */ |
209b84a8 | 971 | void dmaengine_put(void) |
c13c8260 | 972 | { |
8ad342a8 | 973 | struct dma_device *device, *_d; |
c13c8260 CL |
974 | struct dma_chan *chan; |
975 | ||
c13c8260 | 976 | mutex_lock(&dma_list_mutex); |
6f49a57a DW |
977 | dmaengine_ref_count--; |
978 | BUG_ON(dmaengine_ref_count < 0); | |
979 | /* drop channel references */ | |
8ad342a8 | 980 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { |
59b5ec21 DW |
981 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) |
982 | continue; | |
6f49a57a DW |
983 | list_for_each_entry(chan, &device->channels, device_node) |
984 | dma_chan_put(chan); | |
59b5ec21 | 985 | } |
c13c8260 | 986 | mutex_unlock(&dma_list_mutex); |
c13c8260 | 987 | } |
209b84a8 | 988 | EXPORT_SYMBOL(dmaengine_put); |
c13c8260 | 989 | |
138f4c35 DW |
990 | static bool device_has_all_tx_types(struct dma_device *device) |
991 | { | |
992 | /* A device that satisfies this test has channels that will never cause | |
993 | * an async_tx channel switch event as all possible operation types can | |
994 | * be handled. | |
995 | */ | |
996 | #ifdef CONFIG_ASYNC_TX_DMA | |
997 | if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) | |
998 | return false; | |
999 | #endif | |
1000 | ||
d57d3a48 | 1001 | #if IS_ENABLED(CONFIG_ASYNC_MEMCPY) |
138f4c35 DW |
1002 | if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) |
1003 | return false; | |
1004 | #endif | |
1005 | ||
d57d3a48 | 1006 | #if IS_ENABLED(CONFIG_ASYNC_XOR) |
138f4c35 DW |
1007 | if (!dma_has_cap(DMA_XOR, device->cap_mask)) |
1008 | return false; | |
7b3cc2b1 DW |
1009 | |
1010 | #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA | |
4499a24d DW |
1011 | if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) |
1012 | return false; | |
138f4c35 | 1013 | #endif |
7b3cc2b1 | 1014 | #endif |
138f4c35 | 1015 | |
d57d3a48 | 1016 | #if IS_ENABLED(CONFIG_ASYNC_PQ) |
138f4c35 DW |
1017 | if (!dma_has_cap(DMA_PQ, device->cap_mask)) |
1018 | return false; | |
7b3cc2b1 DW |
1019 | |
1020 | #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA | |
4499a24d DW |
1021 | if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) |
1022 | return false; | |
138f4c35 | 1023 | #endif |
7b3cc2b1 | 1024 | #endif |
138f4c35 DW |
1025 | |
1026 | return true; | |
1027 | } | |
1028 | ||
257b17ca DW |
1029 | static int get_dma_id(struct dma_device *device) |
1030 | { | |
485258b4 | 1031 | int rc = ida_alloc(&dma_ida, GFP_KERNEL); |
69ee266b | 1032 | |
485258b4 MW |
1033 | if (rc < 0) |
1034 | return rc; | |
1035 | device->dev_id = rc; | |
1036 | return 0; | |
257b17ca DW |
1037 | } |
1038 | ||
d2fb0a04 | 1039 | static int __dma_async_device_channel_register(struct dma_device *device, |
08210094 | 1040 | struct dma_chan *chan) |
d2fb0a04 | 1041 | { |
7e4be129 | 1042 | int rc; |
d2fb0a04 DJ |
1043 | |
1044 | chan->local = alloc_percpu(typeof(*chan->local)); | |
1045 | if (!chan->local) | |
7e4be129 | 1046 | return -ENOMEM; |
d2fb0a04 DJ |
1047 | chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); |
1048 | if (!chan->dev) { | |
7e4be129 DC |
1049 | rc = -ENOMEM; |
1050 | goto err_free_local; | |
d2fb0a04 DJ |
1051 | } |
1052 | ||
1053 | /* | |
1054 | * When the chan_id is a negative value, we are dynamically adding | |
1055 | * the channel. Otherwise we are static enumerating. | |
1056 | */ | |
08210094 DJ |
1057 | mutex_lock(&device->chan_mutex); |
1058 | chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL); | |
1059 | mutex_unlock(&device->chan_mutex); | |
1060 | if (chan->chan_id < 0) { | |
1061 | pr_err("%s: unable to alloc ida for chan: %d\n", | |
1062 | __func__, chan->chan_id); | |
7e4be129 DC |
1063 | rc = chan->chan_id; |
1064 | goto err_free_dev; | |
08210094 DJ |
1065 | } |
1066 | ||
d2fb0a04 DJ |
1067 | chan->dev->device.class = &dma_devclass; |
1068 | chan->dev->device.parent = device->dev; | |
1069 | chan->dev->chan = chan; | |
d2fb0a04 | 1070 | chan->dev->dev_id = device->dev_id; |
d2fb0a04 DJ |
1071 | dev_set_name(&chan->dev->device, "dma%dchan%d", |
1072 | device->dev_id, chan->chan_id); | |
d2fb0a04 DJ |
1073 | rc = device_register(&chan->dev->device); |
1074 | if (rc) | |
08210094 | 1075 | goto err_out_ida; |
d2fb0a04 | 1076 | chan->client_count = 0; |
08210094 | 1077 | device->chancnt++; |
d2fb0a04 DJ |
1078 | |
1079 | return 0; | |
1080 | ||
08210094 DJ |
1081 | err_out_ida: |
1082 | mutex_lock(&device->chan_mutex); | |
1083 | ida_free(&device->chan_ida, chan->chan_id); | |
1084 | mutex_unlock(&device->chan_mutex); | |
7e4be129 | 1085 | err_free_dev: |
d2fb0a04 | 1086 | kfree(chan->dev); |
7e4be129 DC |
1087 | err_free_local: |
1088 | free_percpu(chan->local); | |
d2fb0a04 DJ |
1089 | return rc; |
1090 | } | |
1091 | ||
e81274cd DJ |
1092 | int dma_async_device_channel_register(struct dma_device *device, |
1093 | struct dma_chan *chan) | |
1094 | { | |
1095 | int rc; | |
1096 | ||
08210094 | 1097 | rc = __dma_async_device_channel_register(device, chan); |
e81274cd DJ |
1098 | if (rc < 0) |
1099 | return rc; | |
1100 | ||
1101 | dma_channel_rebalance(); | |
1102 | return 0; | |
1103 | } | |
1104 | EXPORT_SYMBOL_GPL(dma_async_device_channel_register); | |
1105 | ||
d2fb0a04 DJ |
1106 | static void __dma_async_device_channel_unregister(struct dma_device *device, |
1107 | struct dma_chan *chan) | |
1108 | { | |
1109 | WARN_ONCE(!device->device_release && chan->client_count, | |
1110 | "%s called while %d clients hold a reference\n", | |
1111 | __func__, chan->client_count); | |
1112 | mutex_lock(&dma_list_mutex); | |
e81274cd DJ |
1113 | list_del(&chan->device_node); |
1114 | device->chancnt--; | |
d2fb0a04 DJ |
1115 | chan->dev->chan = NULL; |
1116 | mutex_unlock(&dma_list_mutex); | |
08210094 DJ |
1117 | mutex_lock(&device->chan_mutex); |
1118 | ida_free(&device->chan_ida, chan->chan_id); | |
1119 | mutex_unlock(&device->chan_mutex); | |
d2fb0a04 DJ |
1120 | device_unregister(&chan->dev->device); |
1121 | free_percpu(chan->local); | |
1122 | } | |
1123 | ||
e81274cd DJ |
1124 | void dma_async_device_channel_unregister(struct dma_device *device, |
1125 | struct dma_chan *chan) | |
1126 | { | |
1127 | __dma_async_device_channel_unregister(device, chan); | |
1128 | dma_channel_rebalance(); | |
1129 | } | |
1130 | EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister); | |
1131 | ||
c13c8260 | 1132 | /** |
6508871e | 1133 | * dma_async_device_register - registers DMA devices found |
9872e23d | 1134 | * @device: pointer to &struct dma_device |
8ad342a8 LG |
1135 | * |
1136 | * After calling this routine the structure should not be freed except in the | |
1137 | * device_release() callback which will be called after | |
1138 | * dma_async_device_unregister() is called and no further references are taken. | |
c13c8260 CL |
1139 | */ |
1140 | int dma_async_device_register(struct dma_device *device) | |
1141 | { | |
08210094 | 1142 | int rc; |
c13c8260 CL |
1143 | struct dma_chan* chan; |
1144 | ||
1145 | if (!device) | |
1146 | return -ENODEV; | |
1147 | ||
7405f74b | 1148 | /* validate device routines */ |
3eeb5156 VK |
1149 | if (!device->dev) { |
1150 | pr_err("DMAdevice must have dev\n"); | |
1151 | return -EIO; | |
1152 | } | |
1153 | ||
dae7a589 LG |
1154 | device->owner = device->dev->driver->owner; |
1155 | ||
3eeb5156 VK |
1156 | if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { |
1157 | dev_err(device->dev, | |
1158 | "Device claims capability %s, but op is not defined\n", | |
1159 | "DMA_MEMCPY"); | |
1160 | return -EIO; | |
1161 | } | |
1162 | ||
1163 | if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) { | |
1164 | dev_err(device->dev, | |
1165 | "Device claims capability %s, but op is not defined\n", | |
1166 | "DMA_XOR"); | |
1167 | return -EIO; | |
1168 | } | |
1169 | ||
1170 | if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) { | |
1171 | dev_err(device->dev, | |
1172 | "Device claims capability %s, but op is not defined\n", | |
1173 | "DMA_XOR_VAL"); | |
1174 | return -EIO; | |
1175 | } | |
1176 | ||
1177 | if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) { | |
1178 | dev_err(device->dev, | |
1179 | "Device claims capability %s, but op is not defined\n", | |
1180 | "DMA_PQ"); | |
1181 | return -EIO; | |
1182 | } | |
1183 | ||
1184 | if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) { | |
1185 | dev_err(device->dev, | |
1186 | "Device claims capability %s, but op is not defined\n", | |
1187 | "DMA_PQ_VAL"); | |
1188 | return -EIO; | |
1189 | } | |
1190 | ||
1191 | if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) { | |
1192 | dev_err(device->dev, | |
1193 | "Device claims capability %s, but op is not defined\n", | |
1194 | "DMA_MEMSET"); | |
1195 | return -EIO; | |
1196 | } | |
1197 | ||
1198 | if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) { | |
1199 | dev_err(device->dev, | |
1200 | "Device claims capability %s, but op is not defined\n", | |
1201 | "DMA_INTERRUPT"); | |
1202 | return -EIO; | |
1203 | } | |
1204 | ||
1205 | if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) { | |
1206 | dev_err(device->dev, | |
1207 | "Device claims capability %s, but op is not defined\n", | |
1208 | "DMA_CYCLIC"); | |
1209 | return -EIO; | |
1210 | } | |
1211 | ||
1212 | if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) { | |
1213 | dev_err(device->dev, | |
1214 | "Device claims capability %s, but op is not defined\n", | |
1215 | "DMA_INTERLEAVE"); | |
1216 | return -EIO; | |
1217 | } | |
1218 | ||
1219 | ||
1220 | if (!device->device_tx_status) { | |
1221 | dev_err(device->dev, "Device tx_status is not defined\n"); | |
1222 | return -EIO; | |
1223 | } | |
1224 | ||
1225 | ||
1226 | if (!device->device_issue_pending) { | |
1227 | dev_err(device->dev, "Device issue_pending is not defined\n"); | |
1228 | return -EIO; | |
1229 | } | |
7405f74b | 1230 | |
8ad342a8 | 1231 | if (!device->device_release) |
f91da3bd | 1232 | dev_dbg(device->dev, |
8ad342a8 LG |
1233 | "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n"); |
1234 | ||
1235 | kref_init(&device->ref); | |
1236 | ||
138f4c35 | 1237 | /* note: this only matters in the |
5fc6d897 | 1238 | * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case |
138f4c35 DW |
1239 | */ |
1240 | if (device_has_all_tx_types(device)) | |
1241 | dma_cap_set(DMA_ASYNC_TX, device->cap_mask); | |
1242 | ||
257b17ca | 1243 | rc = get_dma_id(device); |
d2fb0a04 | 1244 | if (rc != 0) |
864498aa | 1245 | return rc; |
c13c8260 | 1246 | |
08210094 DJ |
1247 | mutex_init(&device->chan_mutex); |
1248 | ida_init(&device->chan_ida); | |
1249 | ||
c13c8260 CL |
1250 | /* represent channels in sysfs. Probably want devs too */ |
1251 | list_for_each_entry(chan, &device->channels, device_node) { | |
08210094 | 1252 | rc = __dma_async_device_channel_register(device, chan); |
d2fb0a04 | 1253 | if (rc < 0) |
ff487fb7 | 1254 | goto err_out; |
c13c8260 | 1255 | } |
76d7b84b | 1256 | |
c13c8260 | 1257 | mutex_lock(&dma_list_mutex); |
59b5ec21 DW |
1258 | /* take references on public channels */ |
1259 | if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
6f49a57a DW |
1260 | list_for_each_entry(chan, &device->channels, device_node) { |
1261 | /* if clients are already waiting for channels we need | |
1262 | * to take references on their behalf | |
1263 | */ | |
1264 | if (dma_chan_get(chan) == -ENODEV) { | |
1265 | /* note we can only get here for the first | |
1266 | * channel as the remaining channels are | |
1267 | * guaranteed to get a reference | |
1268 | */ | |
1269 | rc = -ENODEV; | |
1270 | mutex_unlock(&dma_list_mutex); | |
1271 | goto err_out; | |
1272 | } | |
1273 | } | |
2ba05622 | 1274 | list_add_tail_rcu(&device->global_node, &dma_device_list); |
0f571515 AN |
1275 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) |
1276 | device->privatecnt++; /* Always private */ | |
bec08513 | 1277 | dma_channel_rebalance(); |
c13c8260 CL |
1278 | mutex_unlock(&dma_list_mutex); |
1279 | ||
26cf132d PU |
1280 | dmaengine_debug_register(device); |
1281 | ||
c13c8260 | 1282 | return 0; |
ff487fb7 JG |
1283 | |
1284 | err_out: | |
257b17ca | 1285 | /* if we never registered a channel just release the idr */ |
d2fb0a04 | 1286 | if (!device->chancnt) { |
485258b4 | 1287 | ida_free(&dma_ida, device->dev_id); |
257b17ca DW |
1288 | return rc; |
1289 | } | |
1290 | ||
ff487fb7 JG |
1291 | list_for_each_entry(chan, &device->channels, device_node) { |
1292 | if (chan->local == NULL) | |
1293 | continue; | |
41d5e59c DW |
1294 | mutex_lock(&dma_list_mutex); |
1295 | chan->dev->chan = NULL; | |
1296 | mutex_unlock(&dma_list_mutex); | |
1297 | device_unregister(&chan->dev->device); | |
ff487fb7 JG |
1298 | free_percpu(chan->local); |
1299 | } | |
1300 | return rc; | |
c13c8260 | 1301 | } |
765e3d8a | 1302 | EXPORT_SYMBOL(dma_async_device_register); |
c13c8260 | 1303 | |
6508871e | 1304 | /** |
6f49a57a | 1305 | * dma_async_device_unregister - unregister a DMA device |
9872e23d | 1306 | * @device: pointer to &struct dma_device |
f27c580c DW |
1307 | * |
1308 | * This routine is called by dma driver exit routines, dmaengine holds module | |
1309 | * references to prevent it being called while channels are in use. | |
6508871e RD |
1310 | */ |
1311 | void dma_async_device_unregister(struct dma_device *device) | |
c13c8260 | 1312 | { |
e81274cd | 1313 | struct dma_chan *chan, *n; |
c13c8260 | 1314 | |
26cf132d PU |
1315 | dmaengine_debug_unregister(device); |
1316 | ||
e81274cd | 1317 | list_for_each_entry_safe(chan, n, &device->channels, device_node) |
d2fb0a04 | 1318 | __dma_async_device_channel_unregister(device, chan); |
8ad342a8 LG |
1319 | |
1320 | mutex_lock(&dma_list_mutex); | |
1321 | /* | |
1322 | * setting DMA_PRIVATE ensures the device being torn down will not | |
1323 | * be used in the channel_table | |
1324 | */ | |
1325 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | |
1326 | dma_channel_rebalance(); | |
08210094 | 1327 | ida_free(&dma_ida, device->dev_id); |
8ad342a8 LG |
1328 | dma_device_put(device); |
1329 | mutex_unlock(&dma_list_mutex); | |
c13c8260 | 1330 | } |
765e3d8a | 1331 | EXPORT_SYMBOL(dma_async_device_unregister); |
c13c8260 | 1332 | |
f39b948d HS |
1333 | static void dmam_device_release(struct device *dev, void *res) |
1334 | { | |
1335 | struct dma_device *device; | |
1336 | ||
1337 | device = *(struct dma_device **)res; | |
1338 | dma_async_device_unregister(device); | |
1339 | } | |
1340 | ||
1341 | /** | |
1342 | * dmaenginem_async_device_register - registers DMA devices found | |
9872e23d | 1343 | * @device: pointer to &struct dma_device |
f39b948d HS |
1344 | * |
1345 | * The operation is managed and will be undone on driver detach. | |
1346 | */ | |
1347 | int dmaenginem_async_device_register(struct dma_device *device) | |
1348 | { | |
1349 | void *p; | |
1350 | int ret; | |
1351 | ||
1352 | p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL); | |
1353 | if (!p) | |
1354 | return -ENOMEM; | |
1355 | ||
1356 | ret = dma_async_device_register(device); | |
1357 | if (!ret) { | |
1358 | *(struct dma_device **)p = device; | |
1359 | devres_add(device->dev, p); | |
1360 | } else { | |
1361 | devres_free(p); | |
1362 | } | |
1363 | ||
1364 | return ret; | |
1365 | } | |
1366 | EXPORT_SYMBOL(dmaenginem_async_device_register); | |
1367 | ||
45c463ae DW |
1368 | struct dmaengine_unmap_pool { |
1369 | struct kmem_cache *cache; | |
1370 | const char *name; | |
1371 | mempool_t *pool; | |
1372 | size_t size; | |
1373 | }; | |
7405f74b | 1374 | |
45c463ae DW |
1375 | #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } |
1376 | static struct dmaengine_unmap_pool unmap_pool[] = { | |
1377 | __UNMAP_POOL(2), | |
3cc377b9 | 1378 | #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) |
45c463ae DW |
1379 | __UNMAP_POOL(16), |
1380 | __UNMAP_POOL(128), | |
1381 | __UNMAP_POOL(256), | |
1382 | #endif | |
1383 | }; | |
0036731c | 1384 | |
45c463ae DW |
1385 | static struct dmaengine_unmap_pool *__get_unmap_pool(int nr) |
1386 | { | |
1387 | int order = get_count_order(nr); | |
1388 | ||
1389 | switch (order) { | |
1390 | case 0 ... 1: | |
1391 | return &unmap_pool[0]; | |
23f963e9 | 1392 | #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) |
45c463ae DW |
1393 | case 2 ... 4: |
1394 | return &unmap_pool[1]; | |
1395 | case 5 ... 7: | |
1396 | return &unmap_pool[2]; | |
1397 | case 8: | |
1398 | return &unmap_pool[3]; | |
23f963e9 | 1399 | #endif |
45c463ae DW |
1400 | default: |
1401 | BUG(); | |
1402 | return NULL; | |
0036731c | 1403 | } |
45c463ae | 1404 | } |
7405f74b | 1405 | |
45c463ae DW |
1406 | static void dmaengine_unmap(struct kref *kref) |
1407 | { | |
1408 | struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); | |
1409 | struct device *dev = unmap->dev; | |
1410 | int cnt, i; | |
1411 | ||
1412 | cnt = unmap->to_cnt; | |
1413 | for (i = 0; i < cnt; i++) | |
1414 | dma_unmap_page(dev, unmap->addr[i], unmap->len, | |
1415 | DMA_TO_DEVICE); | |
1416 | cnt += unmap->from_cnt; | |
1417 | for (; i < cnt; i++) | |
1418 | dma_unmap_page(dev, unmap->addr[i], unmap->len, | |
1419 | DMA_FROM_DEVICE); | |
1420 | cnt += unmap->bidi_cnt; | |
7476bd79 DW |
1421 | for (; i < cnt; i++) { |
1422 | if (unmap->addr[i] == 0) | |
1423 | continue; | |
45c463ae DW |
1424 | dma_unmap_page(dev, unmap->addr[i], unmap->len, |
1425 | DMA_BIDIRECTIONAL); | |
7476bd79 | 1426 | } |
c1f43dd9 | 1427 | cnt = unmap->map_cnt; |
45c463ae DW |
1428 | mempool_free(unmap, __get_unmap_pool(cnt)->pool); |
1429 | } | |
7405f74b | 1430 | |
45c463ae DW |
1431 | void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) |
1432 | { | |
1433 | if (unmap) | |
1434 | kref_put(&unmap->kref, dmaengine_unmap); | |
1435 | } | |
1436 | EXPORT_SYMBOL_GPL(dmaengine_unmap_put); | |
7405f74b | 1437 | |
45c463ae DW |
1438 | static void dmaengine_destroy_unmap_pool(void) |
1439 | { | |
1440 | int i; | |
1441 | ||
1442 | for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { | |
1443 | struct dmaengine_unmap_pool *p = &unmap_pool[i]; | |
1444 | ||
240eb916 | 1445 | mempool_destroy(p->pool); |
45c463ae | 1446 | p->pool = NULL; |
240eb916 | 1447 | kmem_cache_destroy(p->cache); |
45c463ae DW |
1448 | p->cache = NULL; |
1449 | } | |
7405f74b | 1450 | } |
7405f74b | 1451 | |
45c463ae | 1452 | static int __init dmaengine_init_unmap_pool(void) |
7405f74b | 1453 | { |
45c463ae | 1454 | int i; |
7405f74b | 1455 | |
45c463ae DW |
1456 | for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { |
1457 | struct dmaengine_unmap_pool *p = &unmap_pool[i]; | |
1458 | size_t size; | |
0036731c | 1459 | |
45c463ae DW |
1460 | size = sizeof(struct dmaengine_unmap_data) + |
1461 | sizeof(dma_addr_t) * p->size; | |
1462 | ||
1463 | p->cache = kmem_cache_create(p->name, size, 0, | |
1464 | SLAB_HWCACHE_ALIGN, NULL); | |
1465 | if (!p->cache) | |
1466 | break; | |
1467 | p->pool = mempool_create_slab_pool(1, p->cache); | |
1468 | if (!p->pool) | |
1469 | break; | |
0036731c | 1470 | } |
7405f74b | 1471 | |
45c463ae DW |
1472 | if (i == ARRAY_SIZE(unmap_pool)) |
1473 | return 0; | |
7405f74b | 1474 | |
45c463ae DW |
1475 | dmaengine_destroy_unmap_pool(); |
1476 | return -ENOMEM; | |
1477 | } | |
7405f74b | 1478 | |
89716462 | 1479 | struct dmaengine_unmap_data * |
45c463ae DW |
1480 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) |
1481 | { | |
1482 | struct dmaengine_unmap_data *unmap; | |
1483 | ||
1484 | unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); | |
1485 | if (!unmap) | |
1486 | return NULL; | |
1487 | ||
1488 | memset(unmap, 0, sizeof(*unmap)); | |
1489 | kref_init(&unmap->kref); | |
1490 | unmap->dev = dev; | |
c1f43dd9 | 1491 | unmap->map_cnt = nr; |
45c463ae DW |
1492 | |
1493 | return unmap; | |
7405f74b | 1494 | } |
89716462 | 1495 | EXPORT_SYMBOL(dmaengine_get_unmap_data); |
7405f74b | 1496 | |
7405f74b DW |
1497 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, |
1498 | struct dma_chan *chan) | |
1499 | { | |
1500 | tx->chan = chan; | |
5fc6d897 | 1501 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
7405f74b | 1502 | spin_lock_init(&tx->lock); |
caa20d97 | 1503 | #endif |
7405f74b DW |
1504 | } |
1505 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | |
1506 | ||
4db8fd32 PU |
1507 | static inline int desc_check_and_set_metadata_mode( |
1508 | struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode) | |
1509 | { | |
1510 | /* Make sure that the metadata mode is not mixed */ | |
1511 | if (!desc->desc_metadata_mode) { | |
1512 | if (dmaengine_is_metadata_mode_supported(desc->chan, mode)) | |
1513 | desc->desc_metadata_mode = mode; | |
1514 | else | |
1515 | return -ENOTSUPP; | |
1516 | } else if (desc->desc_metadata_mode != mode) { | |
1517 | return -EINVAL; | |
1518 | } | |
1519 | ||
1520 | return 0; | |
1521 | } | |
1522 | ||
1523 | int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, | |
1524 | void *data, size_t len) | |
1525 | { | |
1526 | int ret; | |
1527 | ||
1528 | if (!desc) | |
1529 | return -EINVAL; | |
1530 | ||
1531 | ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT); | |
1532 | if (ret) | |
1533 | return ret; | |
1534 | ||
1535 | if (!desc->metadata_ops || !desc->metadata_ops->attach) | |
1536 | return -ENOTSUPP; | |
1537 | ||
1538 | return desc->metadata_ops->attach(desc, data, len); | |
1539 | } | |
1540 | EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata); | |
1541 | ||
1542 | void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, | |
1543 | size_t *payload_len, size_t *max_len) | |
1544 | { | |
1545 | int ret; | |
1546 | ||
1547 | if (!desc) | |
1548 | return ERR_PTR(-EINVAL); | |
1549 | ||
1550 | ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); | |
1551 | if (ret) | |
1552 | return ERR_PTR(ret); | |
1553 | ||
1554 | if (!desc->metadata_ops || !desc->metadata_ops->get_ptr) | |
1555 | return ERR_PTR(-ENOTSUPP); | |
1556 | ||
1557 | return desc->metadata_ops->get_ptr(desc, payload_len, max_len); | |
1558 | } | |
1559 | EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr); | |
1560 | ||
1561 | int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, | |
1562 | size_t payload_len) | |
1563 | { | |
1564 | int ret; | |
1565 | ||
1566 | if (!desc) | |
1567 | return -EINVAL; | |
1568 | ||
1569 | ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); | |
1570 | if (ret) | |
1571 | return ret; | |
1572 | ||
1573 | if (!desc->metadata_ops || !desc->metadata_ops->set_len) | |
1574 | return -ENOTSUPP; | |
1575 | ||
1576 | return desc->metadata_ops->set_len(desc, payload_len); | |
1577 | } | |
1578 | EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len); | |
1579 | ||
9872e23d AS |
1580 | /** |
1581 | * dma_wait_for_async_tx - spin wait for a transaction to complete | |
1582 | * @tx: in-flight transaction to wait on | |
07f2211e DW |
1583 | */ |
1584 | enum dma_status | |
1585 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |
1586 | { | |
95475e57 | 1587 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); |
07f2211e DW |
1588 | |
1589 | if (!tx) | |
adfedd9a | 1590 | return DMA_COMPLETE; |
07f2211e | 1591 | |
95475e57 DW |
1592 | while (tx->cookie == -EBUSY) { |
1593 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | |
ef859312 JN |
1594 | dev_err(tx->chan->device->dev, |
1595 | "%s timeout waiting for descriptor submission\n", | |
1596 | __func__); | |
95475e57 DW |
1597 | return DMA_ERROR; |
1598 | } | |
1599 | cpu_relax(); | |
1600 | } | |
1601 | return dma_sync_wait(tx->chan, tx->cookie); | |
07f2211e DW |
1602 | } |
1603 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | |
1604 | ||
9872e23d AS |
1605 | /** |
1606 | * dma_run_dependencies - process dependent operations on the target channel | |
1607 | * @tx: transaction with dependencies | |
1608 | * | |
1609 | * Helper routine for DMA drivers to process (start) dependent operations | |
1610 | * on their target channel. | |
07f2211e DW |
1611 | */ |
1612 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx) | |
1613 | { | |
caa20d97 | 1614 | struct dma_async_tx_descriptor *dep = txd_next(tx); |
07f2211e DW |
1615 | struct dma_async_tx_descriptor *dep_next; |
1616 | struct dma_chan *chan; | |
1617 | ||
1618 | if (!dep) | |
1619 | return; | |
1620 | ||
dd59b853 | 1621 | /* we'll submit tx->next now, so clear the link */ |
caa20d97 | 1622 | txd_clear_next(tx); |
07f2211e DW |
1623 | chan = dep->chan; |
1624 | ||
1625 | /* keep submitting up until a channel switch is detected | |
1626 | * in that case we will be called again as a result of | |
1627 | * processing the interrupt from async_tx_channel_switch | |
1628 | */ | |
1629 | for (; dep; dep = dep_next) { | |
caa20d97 DW |
1630 | txd_lock(dep); |
1631 | txd_clear_parent(dep); | |
1632 | dep_next = txd_next(dep); | |
07f2211e | 1633 | if (dep_next && dep_next->chan == chan) |
caa20d97 | 1634 | txd_clear_next(dep); /* ->next will be submitted */ |
07f2211e DW |
1635 | else |
1636 | dep_next = NULL; /* submit current dep and terminate */ | |
caa20d97 | 1637 | txd_unlock(dep); |
07f2211e DW |
1638 | |
1639 | dep->tx_submit(dep); | |
1640 | } | |
1641 | ||
1642 | chan->device->device_issue_pending(chan); | |
1643 | } | |
1644 | EXPORT_SYMBOL_GPL(dma_run_dependencies); | |
1645 | ||
c13c8260 CL |
1646 | static int __init dma_bus_init(void) |
1647 | { | |
45c463ae DW |
1648 | int err = dmaengine_init_unmap_pool(); |
1649 | ||
1650 | if (err) | |
1651 | return err; | |
e937cc1d PU |
1652 | |
1653 | err = class_register(&dma_devclass); | |
1654 | if (!err) | |
1655 | dmaengine_debugfs_init(); | |
1656 | ||
1657 | return err; | |
c13c8260 | 1658 | } |
652afc27 | 1659 | arch_initcall(dma_bus_init); |