Commit | Line | Data |
---|---|---|
80503b23 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2d6ca60f LPC |
2 | /* |
3 | * Copyright 2014-2015 Analog Devices Inc. | |
4 | * Author: Lars-Peter Clausen <lars@metafoo.de> | |
2d6ca60f LPC |
5 | */ |
6 | ||
7 | #include <linux/slab.h> | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/dmaengine.h> | |
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/spinlock.h> | |
12 | #include <linux/err.h> | |
0c040d1d | 13 | #include <linux/module.h> |
2d6ca60f LPC |
14 | |
15 | #include <linux/iio/iio.h> | |
4538c185 | 16 | #include <linux/iio/sysfs.h> |
2d6ca60f | 17 | #include <linux/iio/buffer.h> |
7981dc07 | 18 | #include <linux/iio/buffer_impl.h> |
2d6ca60f LPC |
19 | #include <linux/iio/buffer-dma.h> |
20 | #include <linux/iio/buffer-dmaengine.h> | |
21 | ||
22 | /* | |
23 | * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure | |
24 | * with the DMAengine framework. The generic IIO DMA buffer infrastructure is | |
25 | * used to manage the buffer memory and implement the IIO buffer operations | |
26 | * while the DMAengine framework is used to perform the DMA transfers. Combined | |
27 | * this results in a device independent fully functional DMA buffer | |
28 | * implementation that can be used by device drivers for peripherals which are | |
29 | * connected to a DMA controller which has a DMAengine driver implementation. | |
30 | */ | |
31 | ||
32 | struct dmaengine_buffer { | |
33 | struct iio_dma_buffer_queue queue; | |
34 | ||
35 | struct dma_chan *chan; | |
36 | struct list_head active; | |
37 | ||
38 | size_t align; | |
39 | size_t max_size; | |
40 | }; | |
41 | ||
42 | static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer( | |
43 | struct iio_buffer *buffer) | |
44 | { | |
45 | return container_of(buffer, struct dmaengine_buffer, queue.buffer); | |
46 | } | |
47 | ||
65e02d0b AA |
48 | static void iio_dmaengine_buffer_block_done(void *data, |
49 | const struct dmaengine_result *result) | |
2d6ca60f LPC |
50 | { |
51 | struct iio_dma_buffer_block *block = data; | |
52 | unsigned long flags; | |
53 | ||
54 | spin_lock_irqsave(&block->queue->list_lock, flags); | |
55 | list_del(&block->head); | |
56 | spin_unlock_irqrestore(&block->queue->list_lock, flags); | |
65e02d0b | 57 | block->bytes_used -= result->residue; |
2d6ca60f LPC |
58 | iio_dma_buffer_block_done(block); |
59 | } | |
60 | ||
61 | static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue, | |
62 | struct iio_dma_buffer_block *block) | |
63 | { | |
64 | struct dmaengine_buffer *dmaengine_buffer = | |
65 | iio_buffer_to_dmaengine_buffer(&queue->buffer); | |
66 | struct dma_async_tx_descriptor *desc; | |
c1b91566 | 67 | enum dma_transfer_direction dma_dir; |
7a86d469 PC |
68 | struct scatterlist *sgl; |
69 | struct dma_vec *vecs; | |
c1b91566 | 70 | size_t max_size; |
2d6ca60f | 71 | dma_cookie_t cookie; |
7a86d469 PC |
72 | size_t len_total; |
73 | unsigned int i; | |
74 | int nents; | |
2d6ca60f | 75 | |
c1b91566 PC |
76 | max_size = min(block->size, dmaengine_buffer->max_size); |
77 | max_size = round_down(max_size, dmaengine_buffer->align); | |
78 | ||
7a86d469 | 79 | if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) |
c1b91566 | 80 | dma_dir = DMA_DEV_TO_MEM; |
7a86d469 | 81 | else |
c1b91566 | 82 | dma_dir = DMA_MEM_TO_DEV; |
c1b91566 | 83 | |
7a86d469 PC |
84 | if (block->sg_table) { |
85 | sgl = block->sg_table->sgl; | |
86 | nents = sg_nents_for_len(sgl, block->bytes_used); | |
87 | if (nents < 0) | |
88 | return nents; | |
89 | ||
90 | vecs = kmalloc_array(nents, sizeof(*vecs), GFP_ATOMIC); | |
91 | if (!vecs) | |
92 | return -ENOMEM; | |
93 | ||
94 | len_total = block->bytes_used; | |
2d6ca60f | 95 | |
7a86d469 PC |
96 | for (i = 0; i < nents; i++) { |
97 | vecs[i].addr = sg_dma_address(sgl); | |
98 | vecs[i].len = min(sg_dma_len(sgl), len_total); | |
99 | len_total -= vecs[i].len; | |
100 | ||
101 | sgl = sg_next(sgl); | |
102 | } | |
103 | ||
104 | desc = dmaengine_prep_peripheral_dma_vec(dmaengine_buffer->chan, | |
105 | vecs, nents, dma_dir, | |
106 | DMA_PREP_INTERRUPT); | |
107 | kfree(vecs); | |
108 | } else { | |
109 | max_size = min(block->size, dmaengine_buffer->max_size); | |
110 | max_size = round_down(max_size, dmaengine_buffer->align); | |
111 | ||
112 | if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) | |
113 | block->bytes_used = max_size; | |
114 | ||
115 | if (!block->bytes_used || block->bytes_used > max_size) | |
116 | return -EINVAL; | |
117 | ||
118 | desc = dmaengine_prep_slave_single(dmaengine_buffer->chan, | |
119 | block->phys_addr, | |
120 | block->bytes_used, | |
121 | dma_dir, | |
122 | DMA_PREP_INTERRUPT); | |
123 | } | |
2d6ca60f LPC |
124 | if (!desc) |
125 | return -ENOMEM; | |
126 | ||
65e02d0b | 127 | desc->callback_result = iio_dmaengine_buffer_block_done; |
2d6ca60f LPC |
128 | desc->callback_param = block; |
129 | ||
130 | cookie = dmaengine_submit(desc); | |
131 | if (dma_submit_error(cookie)) | |
132 | return dma_submit_error(cookie); | |
133 | ||
134 | spin_lock_irq(&dmaengine_buffer->queue.list_lock); | |
135 | list_add_tail(&block->head, &dmaengine_buffer->active); | |
136 | spin_unlock_irq(&dmaengine_buffer->queue.list_lock); | |
137 | ||
138 | dma_async_issue_pending(dmaengine_buffer->chan); | |
139 | ||
140 | return 0; | |
141 | } | |
142 | ||
143 | static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue) | |
144 | { | |
145 | struct dmaengine_buffer *dmaengine_buffer = | |
146 | iio_buffer_to_dmaengine_buffer(&queue->buffer); | |
147 | ||
9d452184 | 148 | dmaengine_terminate_sync(dmaengine_buffer->chan); |
2d6ca60f LPC |
149 | iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active); |
150 | } | |
151 | ||
152 | static void iio_dmaengine_buffer_release(struct iio_buffer *buf) | |
153 | { | |
154 | struct dmaengine_buffer *dmaengine_buffer = | |
155 | iio_buffer_to_dmaengine_buffer(buf); | |
156 | ||
157 | iio_dma_buffer_release(&dmaengine_buffer->queue); | |
158 | kfree(dmaengine_buffer); | |
159 | } | |
160 | ||
161 | static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = { | |
f6d4033d | 162 | .read = iio_dma_buffer_read, |
3afb27d1 | 163 | .write = iio_dma_buffer_write, |
2d6ca60f LPC |
164 | .set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum, |
165 | .set_length = iio_dma_buffer_set_length, | |
166 | .request_update = iio_dma_buffer_request_update, | |
167 | .enable = iio_dma_buffer_enable, | |
168 | .disable = iio_dma_buffer_disable, | |
04ae3b1a | 169 | .data_available = iio_dma_buffer_usage, |
3afb27d1 | 170 | .space_available = iio_dma_buffer_usage, |
2d6ca60f LPC |
171 | .release = iio_dmaengine_buffer_release, |
172 | ||
7a86d469 PC |
173 | .enqueue_dmabuf = iio_dma_buffer_enqueue_dmabuf, |
174 | .attach_dmabuf = iio_dma_buffer_attach_dmabuf, | |
175 | .detach_dmabuf = iio_dma_buffer_detach_dmabuf, | |
176 | ||
177 | .lock_queue = iio_dma_buffer_lock_queue, | |
178 | .unlock_queue = iio_dma_buffer_unlock_queue, | |
179 | ||
2d6ca60f LPC |
180 | .modes = INDIO_BUFFER_HARDWARE, |
181 | .flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK, | |
182 | }; | |
183 | ||
184 | static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = { | |
185 | .submit = iio_dmaengine_buffer_submit_block, | |
186 | .abort = iio_dmaengine_buffer_abort, | |
187 | }; | |
188 | ||
4538c185 LPC |
189 | static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev, |
190 | struct device_attribute *attr, char *buf) | |
191 | { | |
4991f3ea | 192 | struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; |
4538c185 | 193 | struct dmaengine_buffer *dmaengine_buffer = |
4991f3ea | 194 | iio_buffer_to_dmaengine_buffer(buffer); |
4538c185 | 195 | |
0ce1a30c | 196 | return sysfs_emit(buf, "%zu\n", dmaengine_buffer->align); |
4538c185 LPC |
197 | } |
198 | ||
199 | static IIO_DEVICE_ATTR(length_align_bytes, 0444, | |
200 | iio_dmaengine_buffer_get_length_align, NULL, 0); | |
201 | ||
0a33755c MV |
202 | static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = { |
203 | &iio_dev_attr_length_align_bytes, | |
4538c185 LPC |
204 | NULL, |
205 | }; | |
206 | ||
2d6ca60f LPC |
207 | /** |
208 | * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine | |
209 | * @dev: Parent device for the buffer | |
210 | * @channel: DMA channel name, typically "rx". | |
211 | * | |
212 | * This allocates a new IIO buffer which internally uses the DMAengine framework | |
213 | * to perform its transfers. The parent device will be used to request the DMA | |
214 | * channel. | |
215 | * | |
216 | * Once done using the buffer iio_dmaengine_buffer_free() should be used to | |
217 | * release it. | |
218 | */ | |
a094de22 | 219 | static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, |
2d6ca60f LPC |
220 | const char *channel) |
221 | { | |
222 | struct dmaengine_buffer *dmaengine_buffer; | |
223 | unsigned int width, src_width, dest_width; | |
224 | struct dma_slave_caps caps; | |
225 | struct dma_chan *chan; | |
226 | int ret; | |
227 | ||
228 | dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL); | |
229 | if (!dmaengine_buffer) | |
230 | return ERR_PTR(-ENOMEM); | |
231 | ||
f339f979 | 232 | chan = dma_request_chan(dev, channel); |
2d6ca60f LPC |
233 | if (IS_ERR(chan)) { |
234 | ret = PTR_ERR(chan); | |
235 | goto err_free; | |
236 | } | |
237 | ||
238 | ret = dma_get_slave_caps(chan, &caps); | |
239 | if (ret < 0) | |
84c65d80 | 240 | goto err_release; |
2d6ca60f LPC |
241 | |
242 | /* Needs to be aligned to the maximum of the minimums */ | |
243 | if (caps.src_addr_widths) | |
244 | src_width = __ffs(caps.src_addr_widths); | |
245 | else | |
246 | src_width = 1; | |
247 | if (caps.dst_addr_widths) | |
248 | dest_width = __ffs(caps.dst_addr_widths); | |
249 | else | |
250 | dest_width = 1; | |
251 | width = max(src_width, dest_width); | |
252 | ||
253 | INIT_LIST_HEAD(&dmaengine_buffer->active); | |
254 | dmaengine_buffer->chan = chan; | |
255 | dmaengine_buffer->align = width; | |
256 | dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev); | |
257 | ||
258 | iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev, | |
259 | &iio_dmaengine_default_ops); | |
260 | ||
5e6dc43e | 261 | dmaengine_buffer->queue.buffer.attrs = iio_dmaengine_buffer_attrs; |
2d6ca60f LPC |
262 | dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops; |
263 | ||
264 | return &dmaengine_buffer->queue.buffer; | |
265 | ||
84c65d80 DL |
266 | err_release: |
267 | dma_release_channel(chan); | |
2d6ca60f LPC |
268 | err_free: |
269 | kfree(dmaengine_buffer); | |
270 | return ERR_PTR(ret); | |
271 | } | |
2d6ca60f LPC |
272 | |
273 | /** | |
274 | * iio_dmaengine_buffer_free() - Free dmaengine buffer | |
275 | * @buffer: Buffer to free | |
276 | * | |
277 | * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc(). | |
278 | */ | |
9c446288 | 279 | void iio_dmaengine_buffer_free(struct iio_buffer *buffer) |
2d6ca60f LPC |
280 | { |
281 | struct dmaengine_buffer *dmaengine_buffer = | |
282 | iio_buffer_to_dmaengine_buffer(buffer); | |
283 | ||
284 | iio_dma_buffer_exit(&dmaengine_buffer->queue); | |
285 | dma_release_channel(dmaengine_buffer->chan); | |
286 | ||
287 | iio_buffer_put(buffer); | |
288 | } | |
9c446288 | 289 | EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER); |
0c040d1d | 290 | |
c1b91566 PC |
291 | struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev, |
292 | struct iio_dev *indio_dev, | |
293 | const char *channel, | |
294 | enum iio_buffer_direction dir) | |
e0fcca9f | 295 | { |
2c6a9587 YY |
296 | struct iio_buffer *buffer; |
297 | int ret; | |
e0fcca9f AA |
298 | |
299 | buffer = iio_dmaengine_buffer_alloc(dev, channel); | |
2c6a9587 | 300 | if (IS_ERR(buffer)) |
a094de22 NS |
301 | return ERR_CAST(buffer); |
302 | ||
303 | indio_dev->modes |= INDIO_BUFFER_HARDWARE; | |
e0fcca9f | 304 | |
c1b91566 PC |
305 | buffer->direction = dir; |
306 | ||
a094de22 NS |
307 | ret = iio_device_attach_buffer(indio_dev, buffer); |
308 | if (ret) { | |
309 | iio_dmaengine_buffer_free(buffer); | |
2c6a9587 | 310 | return ERR_PTR(ret); |
a094de22 | 311 | } |
e0fcca9f AA |
312 | |
313 | return buffer; | |
314 | } | |
c1b91566 | 315 | EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER); |
a094de22 NS |
316 | |
317 | static void __devm_iio_dmaengine_buffer_free(void *buffer) | |
318 | { | |
319 | iio_dmaengine_buffer_free(buffer); | |
320 | } | |
a02c09e4 AA |
321 | |
322 | /** | |
c1b91566 | 323 | * devm_iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device |
a02c09e4 AA |
324 | * @dev: Parent device for the buffer |
325 | * @indio_dev: IIO device to which to attach this buffer. | |
326 | * @channel: DMA channel name, typically "rx". | |
c1b91566 | 327 | * @dir: Direction of buffer (in or out) |
a02c09e4 AA |
328 | * |
329 | * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc() | |
330 | * and attaches it to an IIO device with iio_device_attach_buffer(). | |
331 | * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the | |
332 | * IIO device. | |
333 | */ | |
c1b91566 PC |
334 | int devm_iio_dmaengine_buffer_setup_ext(struct device *dev, |
335 | struct iio_dev *indio_dev, | |
336 | const char *channel, | |
337 | enum iio_buffer_direction dir) | |
a02c09e4 AA |
338 | { |
339 | struct iio_buffer *buffer; | |
340 | ||
c1b91566 | 341 | buffer = iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, dir); |
a02c09e4 AA |
342 | if (IS_ERR(buffer)) |
343 | return PTR_ERR(buffer); | |
344 | ||
a094de22 NS |
345 | return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free, |
346 | buffer); | |
a02c09e4 | 347 | } |
c1b91566 | 348 | EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER); |
e0fcca9f | 349 | |
0c040d1d LPC |
350 | MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); |
351 | MODULE_DESCRIPTION("DMA buffer for the IIO framework"); | |
352 | MODULE_LICENSE("GPL"); | |
8cfb75d7 | 353 | MODULE_IMPORT_NS(IIO_DMA_BUFFER); |