1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
7 * This code implements the DMA subsystem. It provides a HW-neutral interface
8 * for other kernel code to use asynchronous memory copy capabilities,
9 * if present, and allows different HW DMA drivers to register as providing
12 * Due to the fact we are accelerating what is already a relatively fast
13 * operation, the code goes to great lengths to avoid additional overhead,
18 * The subsystem keeps a global list of dma_device structs it is protected by a
19 * mutex, dma_list_mutex.
21 * A subsystem can get access to a channel by calling dmaengine_get() followed
22 * by dma_find_channel(), or if it has need for an exclusive channel it can call
23 * dma_request_channel(). Once a channel is allocated a reference is taken
24 * against its corresponding driver to disable removal.
26 * Each device has a channels list, which runs unlocked but is never modified
27 * once the device is registered, it's just setup by the driver.
29 * See Documentation/driver-api/dmaengine for more details
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <linux/platform_device.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/init.h>
37 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/dmaengine.h>
41 #include <linux/hardirq.h>
42 #include <linux/spinlock.h>
43 #include <linux/percpu.h>
44 #include <linux/rcupdate.h>
45 #include <linux/mutex.h>
46 #include <linux/jiffies.h>
47 #include <linux/rculist.h>
48 #include <linux/idr.h>
49 #include <linux/slab.h>
50 #include <linux/acpi.h>
51 #include <linux/acpi_dma.h>
52 #include <linux/of_dma.h>
53 #include <linux/mempool.h>
54 #include <linux/numa.h>
56 static DEFINE_MUTEX(dma_list_mutex);
57 static DEFINE_IDA(dma_ida);
58 static LIST_HEAD(dma_device_list);
59 static long dmaengine_ref_count;
61 /* --- sysfs implementation --- */
64 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
67 * Must be called under dma_list_mutex
69 static struct dma_chan *dev_to_dma_chan(struct device *dev)
71 struct dma_chan_dev *chan_dev;
73 chan_dev = container_of(dev, typeof(*chan_dev), device);
74 return chan_dev->chan;
77 static ssize_t memcpy_count_show(struct device *dev,
78 struct device_attribute *attr, char *buf)
80 struct dma_chan *chan;
81 unsigned long count = 0;
85 mutex_lock(&dma_list_mutex);
86 chan = dev_to_dma_chan(dev);
88 for_each_possible_cpu(i)
89 count += per_cpu_ptr(chan->local, i)->memcpy_count;
90 err = sprintf(buf, "%lu\n", count);
93 mutex_unlock(&dma_list_mutex);
97 static DEVICE_ATTR_RO(memcpy_count);
99 static ssize_t bytes_transferred_show(struct device *dev,
100 struct device_attribute *attr, char *buf)
102 struct dma_chan *chan;
103 unsigned long count = 0;
107 mutex_lock(&dma_list_mutex);
108 chan = dev_to_dma_chan(dev);
110 for_each_possible_cpu(i)
111 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
112 err = sprintf(buf, "%lu\n", count);
115 mutex_unlock(&dma_list_mutex);
119 static DEVICE_ATTR_RO(bytes_transferred);
121 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
124 struct dma_chan *chan;
127 mutex_lock(&dma_list_mutex);
128 chan = dev_to_dma_chan(dev);
130 err = sprintf(buf, "%d\n", chan->client_count);
133 mutex_unlock(&dma_list_mutex);
137 static DEVICE_ATTR_RO(in_use);
139 static struct attribute *dma_dev_attrs[] = {
140 &dev_attr_memcpy_count.attr,
141 &dev_attr_bytes_transferred.attr,
142 &dev_attr_in_use.attr,
145 ATTRIBUTE_GROUPS(dma_dev);
147 static void chan_dev_release(struct device *dev)
149 struct dma_chan_dev *chan_dev;
151 chan_dev = container_of(dev, typeof(*chan_dev), device);
152 if (atomic_dec_and_test(chan_dev->idr_ref)) {
153 ida_free(&dma_ida, chan_dev->dev_id);
154 kfree(chan_dev->idr_ref);
159 static struct class dma_devclass = {
161 .dev_groups = dma_dev_groups,
162 .dev_release = chan_dev_release,
165 /* --- client and device registration --- */
167 #define dma_device_satisfies_mask(device, mask) \
168 __dma_device_satisfies_mask((device), &(mask))
170 __dma_device_satisfies_mask(struct dma_device *device,
171 const dma_cap_mask_t *want)
175 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
177 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
180 static struct module *dma_chan_to_owner(struct dma_chan *chan)
182 return chan->device->dev->driver->owner;
186 * balance_ref_count - catch up the channel reference count
187 * @chan - channel to balance ->client_count versus dmaengine_ref_count
189 * balance_ref_count must be called under dma_list_mutex
191 static void balance_ref_count(struct dma_chan *chan)
193 struct module *owner = dma_chan_to_owner(chan);
195 while (chan->client_count < dmaengine_ref_count) {
197 chan->client_count++;
202 * dma_chan_get - try to grab a dma channel's parent driver module
203 * @chan - channel to grab
205 * Must be called under dma_list_mutex
207 static int dma_chan_get(struct dma_chan *chan)
209 struct module *owner = dma_chan_to_owner(chan);
212 /* The channel is already in use, update client count */
213 if (chan->client_count) {
218 if (!try_module_get(owner))
221 /* allocate upon first client reference */
222 if (chan->device->device_alloc_chan_resources) {
223 ret = chan->device->device_alloc_chan_resources(chan);
228 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
229 balance_ref_count(chan);
232 chan->client_count++;
241 * dma_chan_put - drop a reference to a dma channel's parent driver module
242 * @chan - channel to release
244 * Must be called under dma_list_mutex
246 static void dma_chan_put(struct dma_chan *chan)
248 /* This channel is not in use, bail out */
249 if (!chan->client_count)
252 chan->client_count--;
253 module_put(dma_chan_to_owner(chan));
255 /* This channel is not in use anymore, free it */
256 if (!chan->client_count && chan->device->device_free_chan_resources) {
257 /* Make sure all operations have completed */
258 dmaengine_synchronize(chan);
259 chan->device->device_free_chan_resources(chan);
262 /* If the channel is used via a DMA request router, free the mapping */
263 if (chan->router && chan->router->route_free) {
264 chan->router->route_free(chan->router->dev, chan->route_data);
266 chan->route_data = NULL;
270 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
272 enum dma_status status;
273 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
275 dma_async_issue_pending(chan);
277 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
278 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
279 dev_err(chan->device->dev, "%s: timeout!\n", __func__);
282 if (status != DMA_IN_PROGRESS)
289 EXPORT_SYMBOL(dma_sync_wait);
292 * dma_cap_mask_all - enable iteration over all operation types
294 static dma_cap_mask_t dma_cap_mask_all;
297 * dma_chan_tbl_ent - tracks channel allocations per core/operation
298 * @chan - associated channel for this entry
300 struct dma_chan_tbl_ent {
301 struct dma_chan *chan;
305 * channel_table - percpu lookup table for memory-to-memory offload providers
307 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
309 static int __init dma_channel_table_init(void)
311 enum dma_transaction_type cap;
314 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
316 /* 'interrupt', 'private', and 'slave' are channel capabilities,
317 * but are not associated with an operation so they do not need
318 * an entry in the channel_table
320 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
321 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
322 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
324 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
325 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
326 if (!channel_table[cap]) {
333 pr_err("initialization failure\n");
334 for_each_dma_cap_mask(cap, dma_cap_mask_all)
335 free_percpu(channel_table[cap]);
340 arch_initcall(dma_channel_table_init);
343 * dma_find_channel - find a channel to carry out the operation
344 * @tx_type: transaction type
346 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
348 return this_cpu_read(channel_table[tx_type]->chan);
350 EXPORT_SYMBOL(dma_find_channel);
353 * dma_issue_pending_all - flush all pending operations across all channels
355 void dma_issue_pending_all(void)
357 struct dma_device *device;
358 struct dma_chan *chan;
361 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
362 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
364 list_for_each_entry(chan, &device->channels, device_node)
365 if (chan->client_count)
366 device->device_issue_pending(chan);
370 EXPORT_SYMBOL(dma_issue_pending_all);
373 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
375 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
377 int node = dev_to_node(chan->device->dev);
378 return node == NUMA_NO_NODE ||
379 cpumask_test_cpu(cpu, cpumask_of_node(node));
383 * min_chan - returns the channel with min count and in the same numa-node as the cpu
384 * @cap: capability to match
385 * @cpu: cpu index which the channel should be close to
387 * If some channels are close to the given cpu, the one with the lowest
388 * reference count is returned. Otherwise, cpu is ignored and only the
389 * reference count is taken into account.
390 * Must be called under dma_list_mutex.
392 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
394 struct dma_device *device;
395 struct dma_chan *chan;
396 struct dma_chan *min = NULL;
397 struct dma_chan *localmin = NULL;
399 list_for_each_entry(device, &dma_device_list, global_node) {
400 if (!dma_has_cap(cap, device->cap_mask) ||
401 dma_has_cap(DMA_PRIVATE, device->cap_mask))
403 list_for_each_entry(chan, &device->channels, device_node) {
404 if (!chan->client_count)
406 if (!min || chan->table_count < min->table_count)
409 if (dma_chan_is_local(chan, cpu))
411 chan->table_count < localmin->table_count)
416 chan = localmin ? localmin : min;
425 * dma_channel_rebalance - redistribute the available channels
427 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
428 * operation type) in the SMP case, and operation isolation (avoid
429 * multi-tasking channels) in the non-SMP case. Must be called under
432 static void dma_channel_rebalance(void)
434 struct dma_chan *chan;
435 struct dma_device *device;
439 /* undo the last distribution */
440 for_each_dma_cap_mask(cap, dma_cap_mask_all)
441 for_each_possible_cpu(cpu)
442 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
444 list_for_each_entry(device, &dma_device_list, global_node) {
445 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
447 list_for_each_entry(chan, &device->channels, device_node)
448 chan->table_count = 0;
451 /* don't populate the channel_table if no clients are available */
452 if (!dmaengine_ref_count)
455 /* redistribute available channels */
456 for_each_dma_cap_mask(cap, dma_cap_mask_all)
457 for_each_online_cpu(cpu) {
458 chan = min_chan(cap, cpu);
459 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
463 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
465 struct dma_device *device;
470 device = chan->device;
472 /* check if the channel supports slave transactions */
473 if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
474 test_bit(DMA_CYCLIC, device->cap_mask.bits)))
478 * Check whether it reports it uses the generic slave
479 * capabilities, if not, that means it doesn't support any
480 * kind of slave capabilities reporting.
482 if (!device->directions)
485 caps->src_addr_widths = device->src_addr_widths;
486 caps->dst_addr_widths = device->dst_addr_widths;
487 caps->directions = device->directions;
488 caps->max_burst = device->max_burst;
489 caps->residue_granularity = device->residue_granularity;
490 caps->descriptor_reuse = device->descriptor_reuse;
491 caps->cmd_pause = !!device->device_pause;
492 caps->cmd_resume = !!device->device_resume;
493 caps->cmd_terminate = !!device->device_terminate_all;
497 EXPORT_SYMBOL_GPL(dma_get_slave_caps);
499 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
500 struct dma_device *dev,
501 dma_filter_fn fn, void *fn_param)
503 struct dma_chan *chan;
505 if (mask && !__dma_device_satisfies_mask(dev, mask)) {
506 dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
509 /* devices with multiple channels need special handling as we need to
510 * ensure that all channels are either private or public.
512 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
513 list_for_each_entry(chan, &dev->channels, device_node) {
514 /* some channels are already publicly allocated */
515 if (chan->client_count)
519 list_for_each_entry(chan, &dev->channels, device_node) {
520 if (chan->client_count) {
521 dev_dbg(dev->dev, "%s: %s busy\n",
522 __func__, dma_chan_name(chan));
525 if (fn && !fn(chan, fn_param)) {
526 dev_dbg(dev->dev, "%s: %s filter said false\n",
527 __func__, dma_chan_name(chan));
536 static struct dma_chan *find_candidate(struct dma_device *device,
537 const dma_cap_mask_t *mask,
538 dma_filter_fn fn, void *fn_param)
540 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
544 /* Found a suitable channel, try to grab, prep, and return it.
545 * We first set DMA_PRIVATE to disable balance_ref_count as this
546 * channel will not be published in the general-purpose
549 dma_cap_set(DMA_PRIVATE, device->cap_mask);
550 device->privatecnt++;
551 err = dma_chan_get(chan);
554 if (err == -ENODEV) {
555 dev_dbg(device->dev, "%s: %s module removed\n",
556 __func__, dma_chan_name(chan));
557 list_del_rcu(&device->global_node);
560 "%s: failed to get %s: (%d)\n",
561 __func__, dma_chan_name(chan), err);
563 if (--device->privatecnt == 0)
564 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
570 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
574 * dma_get_slave_channel - try to get specific channel exclusively
575 * @chan: target channel
577 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
581 /* lock against __dma_request_channel */
582 mutex_lock(&dma_list_mutex);
584 if (chan->client_count == 0) {
585 struct dma_device *device = chan->device;
587 dma_cap_set(DMA_PRIVATE, device->cap_mask);
588 device->privatecnt++;
589 err = dma_chan_get(chan);
591 dev_dbg(chan->device->dev,
592 "%s: failed to get %s: (%d)\n",
593 __func__, dma_chan_name(chan), err);
595 if (--device->privatecnt == 0)
596 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
601 mutex_unlock(&dma_list_mutex);
606 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
608 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
611 struct dma_chan *chan;
614 dma_cap_set(DMA_SLAVE, mask);
616 /* lock against __dma_request_channel */
617 mutex_lock(&dma_list_mutex);
619 chan = find_candidate(device, &mask, NULL, NULL);
621 mutex_unlock(&dma_list_mutex);
623 return IS_ERR(chan) ? NULL : chan;
625 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
628 * __dma_request_channel - try to allocate an exclusive channel
629 * @mask: capabilities that the channel must satisfy
630 * @fn: optional callback to disposition available channels
631 * @fn_param: opaque parameter to pass to dma_filter_fn
633 * Returns pointer to appropriate DMA channel on success or NULL.
635 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
636 dma_filter_fn fn, void *fn_param)
638 struct dma_device *device, *_d;
639 struct dma_chan *chan = NULL;
642 mutex_lock(&dma_list_mutex);
643 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
644 chan = find_candidate(device, mask, fn, fn_param);
650 mutex_unlock(&dma_list_mutex);
652 pr_debug("%s: %s (%s)\n",
654 chan ? "success" : "fail",
655 chan ? dma_chan_name(chan) : NULL);
659 EXPORT_SYMBOL_GPL(__dma_request_channel);
661 static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
667 if (!device->filter.mapcnt)
670 for (i = 0; i < device->filter.mapcnt; i++) {
671 const struct dma_slave_map *map = &device->filter.map[i];
673 if (!strcmp(map->devname, dev_name(dev)) &&
674 !strcmp(map->slave, name))
682 * dma_request_chan - try to allocate an exclusive slave channel
683 * @dev: pointer to client device structure
684 * @name: slave channel name
686 * Returns pointer to appropriate DMA channel on success or an error pointer.
688 struct dma_chan *dma_request_chan(struct device *dev, const char *name)
690 struct dma_device *d, *_d;
691 struct dma_chan *chan = NULL;
693 /* If device-tree is present get slave info from here */
695 chan = of_dma_request_slave_channel(dev->of_node, name);
697 /* If device was enumerated by ACPI get slave info from here */
698 if (has_acpi_companion(dev) && !chan)
699 chan = acpi_dma_request_slave_chan_by_name(dev, name);
702 /* Valid channel found or requester need to be deferred */
703 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
707 /* Try to find the channel via the DMA filter map(s) */
708 mutex_lock(&dma_list_mutex);
709 list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
711 const struct dma_slave_map *map = dma_filter_match(d, name, dev);
717 dma_cap_set(DMA_SLAVE, mask);
719 chan = find_candidate(d, &mask, d->filter.fn, map->param);
723 mutex_unlock(&dma_list_mutex);
725 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
727 EXPORT_SYMBOL_GPL(dma_request_chan);
730 * dma_request_slave_channel - try to allocate an exclusive slave channel
731 * @dev: pointer to client device structure
732 * @name: slave channel name
734 * Returns pointer to appropriate DMA channel on success or NULL.
736 struct dma_chan *dma_request_slave_channel(struct device *dev,
739 struct dma_chan *ch = dma_request_chan(dev, name);
745 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
748 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
749 * @mask: capabilities that the channel must satisfy
751 * Returns pointer to appropriate DMA channel on success or an error pointer.
753 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
755 struct dma_chan *chan;
758 return ERR_PTR(-ENODEV);
760 chan = __dma_request_channel(mask, NULL, NULL);
762 mutex_lock(&dma_list_mutex);
763 if (list_empty(&dma_device_list))
764 chan = ERR_PTR(-EPROBE_DEFER);
766 chan = ERR_PTR(-ENODEV);
767 mutex_unlock(&dma_list_mutex);
772 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
774 void dma_release_channel(struct dma_chan *chan)
776 mutex_lock(&dma_list_mutex);
777 WARN_ONCE(chan->client_count != 1,
778 "chan reference count %d != 1\n", chan->client_count);
780 /* drop PRIVATE cap enabled by __dma_request_channel() */
781 if (--chan->device->privatecnt == 0)
782 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
783 mutex_unlock(&dma_list_mutex);
785 EXPORT_SYMBOL_GPL(dma_release_channel);
788 * dmaengine_get - register interest in dma_channels
790 void dmaengine_get(void)
792 struct dma_device *device, *_d;
793 struct dma_chan *chan;
796 mutex_lock(&dma_list_mutex);
797 dmaengine_ref_count++;
799 /* try to grab channels */
800 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
801 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
803 list_for_each_entry(chan, &device->channels, device_node) {
804 err = dma_chan_get(chan);
805 if (err == -ENODEV) {
806 /* module removed before we could use it */
807 list_del_rcu(&device->global_node);
810 dev_dbg(chan->device->dev,
811 "%s: failed to get %s: (%d)\n",
812 __func__, dma_chan_name(chan), err);
816 /* if this is the first reference and there were channels
817 * waiting we need to rebalance to get those channels
818 * incorporated into the channel table
820 if (dmaengine_ref_count == 1)
821 dma_channel_rebalance();
822 mutex_unlock(&dma_list_mutex);
824 EXPORT_SYMBOL(dmaengine_get);
827 * dmaengine_put - let dma drivers be removed when ref_count == 0
829 void dmaengine_put(void)
831 struct dma_device *device;
832 struct dma_chan *chan;
834 mutex_lock(&dma_list_mutex);
835 dmaengine_ref_count--;
836 BUG_ON(dmaengine_ref_count < 0);
837 /* drop channel references */
838 list_for_each_entry(device, &dma_device_list, global_node) {
839 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
841 list_for_each_entry(chan, &device->channels, device_node)
844 mutex_unlock(&dma_list_mutex);
846 EXPORT_SYMBOL(dmaengine_put);
848 static bool device_has_all_tx_types(struct dma_device *device)
850 /* A device that satisfies this test has channels that will never cause
851 * an async_tx channel switch event as all possible operation types can
854 #ifdef CONFIG_ASYNC_TX_DMA
855 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
859 #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
860 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
864 #if IS_ENABLED(CONFIG_ASYNC_XOR)
865 if (!dma_has_cap(DMA_XOR, device->cap_mask))
868 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
869 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
874 #if IS_ENABLED(CONFIG_ASYNC_PQ)
875 if (!dma_has_cap(DMA_PQ, device->cap_mask))
878 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
879 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
887 static int get_dma_id(struct dma_device *device)
889 int rc = ida_alloc(&dma_ida, GFP_KERNEL);
898 * dma_async_device_register - registers DMA devices found
899 * @device: &dma_device
901 int dma_async_device_register(struct dma_device *device)
904 struct dma_chan* chan;
910 /* validate device routines */
912 pr_err("DMAdevice must have dev\n");
916 if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
918 "Device claims capability %s, but op is not defined\n",
923 if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
925 "Device claims capability %s, but op is not defined\n",
930 if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
932 "Device claims capability %s, but op is not defined\n",
937 if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
939 "Device claims capability %s, but op is not defined\n",
944 if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
946 "Device claims capability %s, but op is not defined\n",
951 if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
953 "Device claims capability %s, but op is not defined\n",
958 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
960 "Device claims capability %s, but op is not defined\n",
965 if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
967 "Device claims capability %s, but op is not defined\n",
972 if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
974 "Device claims capability %s, but op is not defined\n",
980 if (!device->device_tx_status) {
981 dev_err(device->dev, "Device tx_status is not defined\n");
986 if (!device->device_issue_pending) {
987 dev_err(device->dev, "Device issue_pending is not defined\n");
991 /* note: this only matters in the
992 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
994 if (device_has_all_tx_types(device))
995 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
997 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
1000 rc = get_dma_id(device);
1006 atomic_set(idr_ref, 0);
1008 /* represent channels in sysfs. Probably want devs too */
1009 list_for_each_entry(chan, &device->channels, device_node) {
1011 chan->local = alloc_percpu(typeof(*chan->local));
1012 if (chan->local == NULL)
1014 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1015 if (chan->dev == NULL) {
1016 free_percpu(chan->local);
1021 chan->chan_id = chancnt++;
1022 chan->dev->device.class = &dma_devclass;
1023 chan->dev->device.parent = device->dev;
1024 chan->dev->chan = chan;
1025 chan->dev->idr_ref = idr_ref;
1026 chan->dev->dev_id = device->dev_id;
1027 atomic_inc(idr_ref);
1028 dev_set_name(&chan->dev->device, "dma%dchan%d",
1029 device->dev_id, chan->chan_id);
1031 rc = device_register(&chan->dev->device);
1033 free_percpu(chan->local);
1036 atomic_dec(idr_ref);
1039 chan->client_count = 0;
1043 dev_err(device->dev, "%s: device has no channels!\n", __func__);
1048 device->chancnt = chancnt;
1050 mutex_lock(&dma_list_mutex);
1051 /* take references on public channels */
1052 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1053 list_for_each_entry(chan, &device->channels, device_node) {
1054 /* if clients are already waiting for channels we need
1055 * to take references on their behalf
1057 if (dma_chan_get(chan) == -ENODEV) {
1058 /* note we can only get here for the first
1059 * channel as the remaining channels are
1060 * guaranteed to get a reference
1063 mutex_unlock(&dma_list_mutex);
1067 list_add_tail_rcu(&device->global_node, &dma_device_list);
1068 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1069 device->privatecnt++; /* Always private */
1070 dma_channel_rebalance();
1071 mutex_unlock(&dma_list_mutex);
1076 /* if we never registered a channel just release the idr */
1077 if (atomic_read(idr_ref) == 0) {
1078 ida_free(&dma_ida, device->dev_id);
1083 list_for_each_entry(chan, &device->channels, device_node) {
1084 if (chan->local == NULL)
1086 mutex_lock(&dma_list_mutex);
1087 chan->dev->chan = NULL;
1088 mutex_unlock(&dma_list_mutex);
1089 device_unregister(&chan->dev->device);
1090 free_percpu(chan->local);
1094 EXPORT_SYMBOL(dma_async_device_register);
1097 * dma_async_device_unregister - unregister a DMA device
1098 * @device: &dma_device
1100 * This routine is called by dma driver exit routines, dmaengine holds module
1101 * references to prevent it being called while channels are in use.
1103 void dma_async_device_unregister(struct dma_device *device)
1105 struct dma_chan *chan;
1107 mutex_lock(&dma_list_mutex);
1108 list_del_rcu(&device->global_node);
1109 dma_channel_rebalance();
1110 mutex_unlock(&dma_list_mutex);
1112 list_for_each_entry(chan, &device->channels, device_node) {
1113 WARN_ONCE(chan->client_count,
1114 "%s called while %d clients hold a reference\n",
1115 __func__, chan->client_count);
1116 mutex_lock(&dma_list_mutex);
1117 chan->dev->chan = NULL;
1118 mutex_unlock(&dma_list_mutex);
1119 device_unregister(&chan->dev->device);
1120 free_percpu(chan->local);
1123 EXPORT_SYMBOL(dma_async_device_unregister);
1125 static void dmam_device_release(struct device *dev, void *res)
1127 struct dma_device *device;
1129 device = *(struct dma_device **)res;
1130 dma_async_device_unregister(device);
1134 * dmaenginem_async_device_register - registers DMA devices found
1135 * @device: &dma_device
1137 * The operation is managed and will be undone on driver detach.
1139 int dmaenginem_async_device_register(struct dma_device *device)
1144 p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
1148 ret = dma_async_device_register(device);
1150 *(struct dma_device **)p = device;
1151 devres_add(device->dev, p);
1158 EXPORT_SYMBOL(dmaenginem_async_device_register);
1160 struct dmaengine_unmap_pool {
1161 struct kmem_cache *cache;
1167 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1168 static struct dmaengine_unmap_pool unmap_pool[] = {
1170 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1177 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1179 int order = get_count_order(nr);
1183 return &unmap_pool[0];
1184 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1186 return &unmap_pool[1];
1188 return &unmap_pool[2];
1190 return &unmap_pool[3];
1198 static void dmaengine_unmap(struct kref *kref)
1200 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1201 struct device *dev = unmap->dev;
1204 cnt = unmap->to_cnt;
1205 for (i = 0; i < cnt; i++)
1206 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1208 cnt += unmap->from_cnt;
1209 for (; i < cnt; i++)
1210 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1212 cnt += unmap->bidi_cnt;
1213 for (; i < cnt; i++) {
1214 if (unmap->addr[i] == 0)
1216 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1219 cnt = unmap->map_cnt;
1220 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1223 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1226 kref_put(&unmap->kref, dmaengine_unmap);
1228 EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1230 static void dmaengine_destroy_unmap_pool(void)
1234 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1235 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1237 mempool_destroy(p->pool);
1239 kmem_cache_destroy(p->cache);
1244 static int __init dmaengine_init_unmap_pool(void)
1248 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1249 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1252 size = sizeof(struct dmaengine_unmap_data) +
1253 sizeof(dma_addr_t) * p->size;
1255 p->cache = kmem_cache_create(p->name, size, 0,
1256 SLAB_HWCACHE_ALIGN, NULL);
1259 p->pool = mempool_create_slab_pool(1, p->cache);
1264 if (i == ARRAY_SIZE(unmap_pool))
1267 dmaengine_destroy_unmap_pool();
1271 struct dmaengine_unmap_data *
1272 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1274 struct dmaengine_unmap_data *unmap;
1276 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1280 memset(unmap, 0, sizeof(*unmap));
1281 kref_init(&unmap->kref);
1283 unmap->map_cnt = nr;
1287 EXPORT_SYMBOL(dmaengine_get_unmap_data);
1289 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1290 struct dma_chan *chan)
1293 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1294 spin_lock_init(&tx->lock);
1297 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1299 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1300 * @tx: in-flight transaction to wait on
1303 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1305 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1308 return DMA_COMPLETE;
1310 while (tx->cookie == -EBUSY) {
1311 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1312 dev_err(tx->chan->device->dev,
1313 "%s timeout waiting for descriptor submission\n",
1319 return dma_sync_wait(tx->chan, tx->cookie);
1321 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1323 /* dma_run_dependencies - helper routine for dma drivers to process
1324 * (start) dependent operations on their target channel
1325 * @tx: transaction with dependencies
1327 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1329 struct dma_async_tx_descriptor *dep = txd_next(tx);
1330 struct dma_async_tx_descriptor *dep_next;
1331 struct dma_chan *chan;
1336 /* we'll submit tx->next now, so clear the link */
1340 /* keep submitting up until a channel switch is detected
1341 * in that case we will be called again as a result of
1342 * processing the interrupt from async_tx_channel_switch
1344 for (; dep; dep = dep_next) {
1346 txd_clear_parent(dep);
1347 dep_next = txd_next(dep);
1348 if (dep_next && dep_next->chan == chan)
1349 txd_clear_next(dep); /* ->next will be submitted */
1351 dep_next = NULL; /* submit current dep and terminate */
1354 dep->tx_submit(dep);
1357 chan->device->device_issue_pending(chan);
1359 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1361 static int __init dma_bus_init(void)
1363 int err = dmaengine_init_unmap_pool();
1367 return class_register(&dma_devclass);
1369 arch_initcall(dma_bus_init);