Commit | Line | Data |
---|---|---|
f4a9fe97 | 1 | // SPDX-License-Identifier: GPL-2.0 |
0e3b67b3 LPC |
2 | /* |
3 | * Driver for the Analog Devices AXI-DMAC core | |
4 | * | |
f4a9fe97 | 5 | * Copyright 2013-2019 Analog Devices Inc. |
0e3b67b3 | 6 | * Author: Lars-Peter Clausen <lars@metafoo.de> |
0e3b67b3 LPC |
7 | */ |
8 | ||
9 | #include <linux/clk.h> | |
10 | #include <linux/device.h> | |
11 | #include <linux/dma-mapping.h> | |
12 | #include <linux/dmaengine.h> | |
13 | #include <linux/err.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/io.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/of.h> | |
19 | #include <linux/of_dma.h> | |
20 | #include <linux/platform_device.h> | |
21 | #include <linux/slab.h> | |
a5b20600 | 22 | #include <linux/fpga/adi-axi-common.h> |
0e3b67b3 LPC |
23 | |
24 | #include <dt-bindings/dma/axi-dmac.h> | |
25 | ||
26 | #include "dmaengine.h" | |
27 | #include "virt-dma.h" | |
28 | ||
29 | /* | |
30 | * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has | |
31 | * various instantiation parameters which decided the exact feature set support | |
32 | * by the core. | |
33 | * | |
34 | * Each channel of the core has a source interface and a destination interface. | |
35 | * The number of channels and the type of the channel interfaces is selected at | |
36 | * configuration time. A interface can either be a connected to a central memory | |
37 | * interconnect, which allows access to system memory, or it can be connected to | |
38 | * a dedicated bus which is directly connected to a data port on a peripheral. | |
39 | * Given that those are configuration options of the core that are selected when | |
40 | * it is instantiated this means that they can not be changed by software at | |
41 | * runtime. By extension this means that each channel is uni-directional. It can | |
42 | * either be device to memory or memory to device, but not both. Also since the | |
43 | * device side is a dedicated data bus only connected to a single peripheral | |
44 | * there is no address than can or needs to be configured for the device side. | |
45 | */ | |
46 | ||
47 | #define AXI_DMAC_REG_IRQ_MASK 0x80 | |
48 | #define AXI_DMAC_REG_IRQ_PENDING 0x84 | |
49 | #define AXI_DMAC_REG_IRQ_SOURCE 0x88 | |
50 | ||
51 | #define AXI_DMAC_REG_CTRL 0x400 | |
52 | #define AXI_DMAC_REG_TRANSFER_ID 0x404 | |
53 | #define AXI_DMAC_REG_START_TRANSFER 0x408 | |
54 | #define AXI_DMAC_REG_FLAGS 0x40c | |
55 | #define AXI_DMAC_REG_DEST_ADDRESS 0x410 | |
56 | #define AXI_DMAC_REG_SRC_ADDRESS 0x414 | |
57 | #define AXI_DMAC_REG_X_LENGTH 0x418 | |
58 | #define AXI_DMAC_REG_Y_LENGTH 0x41c | |
59 | #define AXI_DMAC_REG_DEST_STRIDE 0x420 | |
60 | #define AXI_DMAC_REG_SRC_STRIDE 0x424 | |
61 | #define AXI_DMAC_REG_TRANSFER_DONE 0x428 | |
62 | #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c | |
63 | #define AXI_DMAC_REG_STATUS 0x430 | |
64 | #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434 | |
65 | #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438 | |
e3923592 AA |
66 | #define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c |
67 | #define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450 | |
0e3b67b3 LPC |
68 | |
69 | #define AXI_DMAC_CTRL_ENABLE BIT(0) | |
70 | #define AXI_DMAC_CTRL_PAUSE BIT(1) | |
71 | ||
72 | #define AXI_DMAC_IRQ_SOT BIT(0) | |
73 | #define AXI_DMAC_IRQ_EOT BIT(1) | |
74 | ||
75 | #define AXI_DMAC_FLAG_CYCLIC BIT(0) | |
a3ee0bf2 | 76 | #define AXI_DMAC_FLAG_LAST BIT(1) |
e3923592 AA |
77 | #define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2) |
78 | ||
79 | #define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31) | |
0e3b67b3 | 80 | |
008913db LPC |
81 | /* The maximum ID allocated by the hardware is 31 */ |
82 | #define AXI_DMAC_SG_UNUSED 32U | |
83 | ||
0e3b67b3 LPC |
84 | struct axi_dmac_sg { |
85 | dma_addr_t src_addr; | |
86 | dma_addr_t dest_addr; | |
87 | unsigned int x_len; | |
88 | unsigned int y_len; | |
89 | unsigned int dest_stride; | |
90 | unsigned int src_stride; | |
91 | unsigned int id; | |
e3923592 | 92 | unsigned int partial_len; |
008913db | 93 | bool schedule_when_free; |
0e3b67b3 LPC |
94 | }; |
95 | ||
96 | struct axi_dmac_desc { | |
97 | struct virt_dma_desc vdesc; | |
98 | bool cyclic; | |
e28d9155 | 99 | bool have_partial_xfer; |
0e3b67b3 LPC |
100 | |
101 | unsigned int num_submitted; | |
102 | unsigned int num_completed; | |
103 | unsigned int num_sgs; | |
104 | struct axi_dmac_sg sg[]; | |
105 | }; | |
106 | ||
107 | struct axi_dmac_chan { | |
108 | struct virt_dma_chan vchan; | |
109 | ||
110 | struct axi_dmac_desc *next_desc; | |
111 | struct list_head active_descs; | |
112 | enum dma_transfer_direction direction; | |
113 | ||
114 | unsigned int src_width; | |
115 | unsigned int dest_width; | |
116 | unsigned int src_type; | |
117 | unsigned int dest_type; | |
118 | ||
119 | unsigned int max_length; | |
a5b20600 LPC |
120 | unsigned int address_align_mask; |
121 | unsigned int length_align_mask; | |
0e3b67b3 | 122 | |
e3923592 | 123 | bool hw_partial_xfer; |
0e3b67b3 LPC |
124 | bool hw_cyclic; |
125 | bool hw_2d; | |
126 | }; | |
127 | ||
128 | struct axi_dmac { | |
129 | void __iomem *base; | |
130 | int irq; | |
131 | ||
132 | struct clk *clk; | |
133 | ||
134 | struct dma_device dma_dev; | |
135 | struct axi_dmac_chan chan; | |
136 | ||
137 | struct device_dma_parameters dma_parms; | |
138 | }; | |
139 | ||
140 | static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan) | |
141 | { | |
142 | return container_of(chan->vchan.chan.device, struct axi_dmac, | |
143 | dma_dev); | |
144 | } | |
145 | ||
146 | static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c) | |
147 | { | |
148 | return container_of(c, struct axi_dmac_chan, vchan.chan); | |
149 | } | |
150 | ||
151 | static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc) | |
152 | { | |
153 | return container_of(vdesc, struct axi_dmac_desc, vdesc); | |
154 | } | |
155 | ||
156 | static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg, | |
157 | unsigned int val) | |
158 | { | |
159 | writel(val, axi_dmac->base + reg); | |
160 | } | |
161 | ||
162 | static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg) | |
163 | { | |
164 | return readl(axi_dmac->base + reg); | |
165 | } | |
166 | ||
167 | static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan) | |
168 | { | |
169 | return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM; | |
170 | } | |
171 | ||
172 | static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan) | |
173 | { | |
174 | return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM; | |
175 | } | |
176 | ||
177 | static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len) | |
178 | { | |
921234e0 | 179 | if (len == 0) |
0e3b67b3 | 180 | return false; |
a5b20600 | 181 | if ((len & chan->length_align_mask) != 0) /* Not aligned */ |
0e3b67b3 LPC |
182 | return false; |
183 | return true; | |
184 | } | |
185 | ||
186 | static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr) | |
187 | { | |
a5b20600 | 188 | if ((addr & chan->address_align_mask) != 0) /* Not aligned */ |
0e3b67b3 LPC |
189 | return false; |
190 | return true; | |
191 | } | |
192 | ||
193 | static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) | |
194 | { | |
195 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); | |
196 | struct virt_dma_desc *vdesc; | |
197 | struct axi_dmac_desc *desc; | |
198 | struct axi_dmac_sg *sg; | |
199 | unsigned int flags = 0; | |
200 | unsigned int val; | |
201 | ||
202 | val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER); | |
203 | if (val) /* Queue is full, wait for the next SOT IRQ */ | |
204 | return; | |
205 | ||
206 | desc = chan->next_desc; | |
207 | ||
208 | if (!desc) { | |
209 | vdesc = vchan_next_desc(&chan->vchan); | |
210 | if (!vdesc) | |
211 | return; | |
212 | list_move_tail(&vdesc->node, &chan->active_descs); | |
213 | desc = to_axi_dmac_desc(vdesc); | |
214 | } | |
215 | sg = &desc->sg[desc->num_submitted]; | |
216 | ||
008913db LPC |
217 | /* Already queued in cyclic mode. Wait for it to finish */ |
218 | if (sg->id != AXI_DMAC_SG_UNUSED) { | |
219 | sg->schedule_when_free = true; | |
220 | return; | |
221 | } | |
222 | ||
0e3b67b3 | 223 | desc->num_submitted++; |
e28d9155 AA |
224 | if (desc->num_submitted == desc->num_sgs || |
225 | desc->have_partial_xfer) { | |
008913db LPC |
226 | if (desc->cyclic) |
227 | desc->num_submitted = 0; /* Start again */ | |
228 | else | |
229 | chan->next_desc = NULL; | |
a3ee0bf2 | 230 | flags |= AXI_DMAC_FLAG_LAST; |
008913db | 231 | } else { |
0e3b67b3 | 232 | chan->next_desc = desc; |
008913db | 233 | } |
0e3b67b3 LPC |
234 | |
235 | sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID); | |
236 | ||
237 | if (axi_dmac_dest_is_mem(chan)) { | |
238 | axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr); | |
239 | axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride); | |
240 | } | |
241 | ||
242 | if (axi_dmac_src_is_mem(chan)) { | |
243 | axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr); | |
244 | axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride); | |
245 | } | |
246 | ||
247 | /* | |
248 | * If the hardware supports cyclic transfers and there is no callback to | |
63ab76db LPC |
249 | * call and only a single segment, enable hw cyclic mode to avoid |
250 | * unnecessary interrupts. | |
0e3b67b3 | 251 | */ |
63ab76db LPC |
252 | if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback && |
253 | desc->num_sgs == 1) | |
0e3b67b3 LPC |
254 | flags |= AXI_DMAC_FLAG_CYCLIC; |
255 | ||
e3923592 AA |
256 | if (chan->hw_partial_xfer) |
257 | flags |= AXI_DMAC_FLAG_PARTIAL_REPORT; | |
258 | ||
0e3b67b3 LPC |
259 | axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1); |
260 | axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1); | |
261 | axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags); | |
262 | axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1); | |
263 | } | |
264 | ||
265 | static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan) | |
266 | { | |
267 | return list_first_entry_or_null(&chan->active_descs, | |
268 | struct axi_dmac_desc, vdesc.node); | |
269 | } | |
270 | ||
e3923592 AA |
271 | static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan, |
272 | struct axi_dmac_sg *sg) | |
273 | { | |
274 | if (chan->hw_2d) | |
275 | return sg->x_len * sg->y_len; | |
276 | else | |
277 | return sg->x_len; | |
278 | } | |
279 | ||
280 | static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan) | |
281 | { | |
282 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); | |
283 | struct axi_dmac_desc *desc; | |
284 | struct axi_dmac_sg *sg; | |
285 | u32 xfer_done, len, id, i; | |
286 | bool found_sg; | |
287 | ||
288 | do { | |
289 | len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN); | |
290 | id = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID); | |
291 | ||
292 | found_sg = false; | |
293 | list_for_each_entry(desc, &chan->active_descs, vdesc.node) { | |
294 | for (i = 0; i < desc->num_sgs; i++) { | |
295 | sg = &desc->sg[i]; | |
296 | if (sg->id == AXI_DMAC_SG_UNUSED) | |
297 | continue; | |
298 | if (sg->id == id) { | |
e28d9155 | 299 | desc->have_partial_xfer = true; |
e3923592 AA |
300 | sg->partial_len = len; |
301 | found_sg = true; | |
302 | break; | |
303 | } | |
304 | } | |
305 | if (found_sg) | |
306 | break; | |
307 | } | |
308 | ||
309 | if (found_sg) { | |
310 | dev_dbg(dmac->dma_dev.dev, | |
311 | "Found partial segment id=%u, len=%u\n", | |
312 | id, len); | |
313 | } else { | |
314 | dev_warn(dmac->dma_dev.dev, | |
315 | "Not found partial segment id=%u, len=%u\n", | |
316 | id, len); | |
317 | } | |
318 | ||
319 | /* Check if we have any more partial transfers */ | |
320 | xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); | |
321 | xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE); | |
322 | ||
323 | } while (!xfer_done); | |
324 | } | |
325 | ||
326 | static void axi_dmac_compute_residue(struct axi_dmac_chan *chan, | |
327 | struct axi_dmac_desc *active) | |
328 | { | |
329 | struct dmaengine_result *rslt = &active->vdesc.tx_result; | |
330 | unsigned int start = active->num_completed - 1; | |
331 | struct axi_dmac_sg *sg; | |
332 | unsigned int i, total; | |
333 | ||
334 | rslt->result = DMA_TRANS_NOERROR; | |
335 | rslt->residue = 0; | |
336 | ||
337 | /* | |
338 | * We get here if the last completed segment is partial, which | |
339 | * means we can compute the residue from that segment onwards | |
340 | */ | |
341 | for (i = start; i < active->num_sgs; i++) { | |
342 | sg = &active->sg[i]; | |
343 | total = axi_dmac_total_sg_bytes(chan, sg); | |
344 | rslt->residue += (total - sg->partial_len); | |
345 | } | |
346 | } | |
347 | ||
008913db | 348 | static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, |
0e3b67b3 LPC |
349 | unsigned int completed_transfers) |
350 | { | |
351 | struct axi_dmac_desc *active; | |
352 | struct axi_dmac_sg *sg; | |
008913db | 353 | bool start_next = false; |
0e3b67b3 LPC |
354 | |
355 | active = axi_dmac_active_desc(chan); | |
356 | if (!active) | |
008913db | 357 | return false; |
0e3b67b3 | 358 | |
e3923592 AA |
359 | if (chan->hw_partial_xfer && |
360 | (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE)) | |
361 | axi_dmac_dequeue_partial_xfers(chan); | |
362 | ||
008913db LPC |
363 | do { |
364 | sg = &active->sg[active->num_completed]; | |
365 | if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */ | |
366 | break; | |
367 | if (!(BIT(sg->id) & completed_transfers)) | |
368 | break; | |
369 | active->num_completed++; | |
370 | sg->id = AXI_DMAC_SG_UNUSED; | |
371 | if (sg->schedule_when_free) { | |
372 | sg->schedule_when_free = false; | |
373 | start_next = true; | |
374 | } | |
375 | ||
e3923592 AA |
376 | if (sg->partial_len) |
377 | axi_dmac_compute_residue(chan, active); | |
378 | ||
008913db LPC |
379 | if (active->cyclic) |
380 | vchan_cyclic_callback(&active->vdesc); | |
381 | ||
e3923592 AA |
382 | if (active->num_completed == active->num_sgs || |
383 | sg->partial_len) { | |
008913db LPC |
384 | if (active->cyclic) { |
385 | active->num_completed = 0; /* wrap around */ | |
386 | } else { | |
0e3b67b3 LPC |
387 | list_del(&active->vdesc.node); |
388 | vchan_cookie_complete(&active->vdesc); | |
389 | active = axi_dmac_active_desc(chan); | |
390 | } | |
008913db LPC |
391 | } |
392 | } while (active); | |
393 | ||
394 | return start_next; | |
0e3b67b3 LPC |
395 | } |
396 | ||
397 | static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) | |
398 | { | |
399 | struct axi_dmac *dmac = devid; | |
400 | unsigned int pending; | |
008913db | 401 | bool start_next = false; |
0e3b67b3 LPC |
402 | |
403 | pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING); | |
71831f65 LPC |
404 | if (!pending) |
405 | return IRQ_NONE; | |
406 | ||
0e3b67b3 LPC |
407 | axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending); |
408 | ||
409 | spin_lock(&dmac->chan.vchan.lock); | |
410 | /* One or more transfers have finished */ | |
411 | if (pending & AXI_DMAC_IRQ_EOT) { | |
412 | unsigned int completed; | |
413 | ||
414 | completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); | |
008913db | 415 | start_next = axi_dmac_transfer_done(&dmac->chan, completed); |
0e3b67b3 LPC |
416 | } |
417 | /* Space has become available in the descriptor queue */ | |
008913db | 418 | if ((pending & AXI_DMAC_IRQ_SOT) || start_next) |
0e3b67b3 LPC |
419 | axi_dmac_start_transfer(&dmac->chan); |
420 | spin_unlock(&dmac->chan.vchan.lock); | |
421 | ||
422 | return IRQ_HANDLED; | |
423 | } | |
424 | ||
425 | static int axi_dmac_terminate_all(struct dma_chan *c) | |
426 | { | |
427 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | |
428 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); | |
429 | unsigned long flags; | |
430 | LIST_HEAD(head); | |
431 | ||
432 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
433 | axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0); | |
434 | chan->next_desc = NULL; | |
435 | vchan_get_all_descriptors(&chan->vchan, &head); | |
436 | list_splice_tail_init(&chan->active_descs, &head); | |
437 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
438 | ||
439 | vchan_dma_desc_free_list(&chan->vchan, &head); | |
440 | ||
441 | return 0; | |
442 | } | |
443 | ||
860dd64c LPC |
444 | static void axi_dmac_synchronize(struct dma_chan *c) |
445 | { | |
446 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | |
447 | ||
448 | vchan_synchronize(&chan->vchan); | |
449 | } | |
450 | ||
0e3b67b3 LPC |
451 | static void axi_dmac_issue_pending(struct dma_chan *c) |
452 | { | |
453 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | |
454 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); | |
455 | unsigned long flags; | |
456 | ||
457 | axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE); | |
458 | ||
459 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
460 | if (vchan_issue_pending(&chan->vchan)) | |
461 | axi_dmac_start_transfer(chan); | |
462 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
463 | } | |
464 | ||
465 | static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) | |
466 | { | |
467 | struct axi_dmac_desc *desc; | |
008913db | 468 | unsigned int i; |
0e3b67b3 | 469 | |
48b02a85 | 470 | desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT); |
0e3b67b3 LPC |
471 | if (!desc) |
472 | return NULL; | |
473 | ||
008913db LPC |
474 | for (i = 0; i < num_sgs; i++) |
475 | desc->sg[i].id = AXI_DMAC_SG_UNUSED; | |
476 | ||
0e3b67b3 LPC |
477 | desc->num_sgs = num_sgs; |
478 | ||
479 | return desc; | |
480 | } | |
481 | ||
921234e0 LPC |
482 | static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan, |
483 | enum dma_transfer_direction direction, dma_addr_t addr, | |
484 | unsigned int num_periods, unsigned int period_len, | |
485 | struct axi_dmac_sg *sg) | |
486 | { | |
487 | unsigned int num_segments, i; | |
488 | unsigned int segment_size; | |
489 | unsigned int len; | |
490 | ||
491 | /* Split into multiple equally sized segments if necessary */ | |
492 | num_segments = DIV_ROUND_UP(period_len, chan->max_length); | |
493 | segment_size = DIV_ROUND_UP(period_len, num_segments); | |
494 | /* Take care of alignment */ | |
a5b20600 | 495 | segment_size = ((segment_size - 1) | chan->length_align_mask) + 1; |
921234e0 LPC |
496 | |
497 | for (i = 0; i < num_periods; i++) { | |
498 | len = period_len; | |
499 | ||
500 | while (len > segment_size) { | |
501 | if (direction == DMA_DEV_TO_MEM) | |
502 | sg->dest_addr = addr; | |
503 | else | |
504 | sg->src_addr = addr; | |
505 | sg->x_len = segment_size; | |
506 | sg->y_len = 1; | |
507 | sg++; | |
508 | addr += segment_size; | |
509 | len -= segment_size; | |
510 | } | |
511 | ||
512 | if (direction == DMA_DEV_TO_MEM) | |
513 | sg->dest_addr = addr; | |
514 | else | |
515 | sg->src_addr = addr; | |
516 | sg->x_len = len; | |
517 | sg->y_len = 1; | |
518 | sg++; | |
519 | addr += len; | |
520 | } | |
521 | ||
522 | return sg; | |
523 | } | |
524 | ||
0e3b67b3 LPC |
525 | static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( |
526 | struct dma_chan *c, struct scatterlist *sgl, | |
527 | unsigned int sg_len, enum dma_transfer_direction direction, | |
528 | unsigned long flags, void *context) | |
529 | { | |
530 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | |
531 | struct axi_dmac_desc *desc; | |
921234e0 | 532 | struct axi_dmac_sg *dsg; |
0e3b67b3 | 533 | struct scatterlist *sg; |
921234e0 | 534 | unsigned int num_sgs; |
0e3b67b3 LPC |
535 | unsigned int i; |
536 | ||
537 | if (direction != chan->direction) | |
538 | return NULL; | |
539 | ||
921234e0 LPC |
540 | num_sgs = 0; |
541 | for_each_sg(sgl, sg, sg_len, i) | |
542 | num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length); | |
543 | ||
544 | desc = axi_dmac_alloc_desc(num_sgs); | |
0e3b67b3 LPC |
545 | if (!desc) |
546 | return NULL; | |
547 | ||
921234e0 LPC |
548 | dsg = desc->sg; |
549 | ||
0e3b67b3 LPC |
550 | for_each_sg(sgl, sg, sg_len, i) { |
551 | if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) || | |
552 | !axi_dmac_check_len(chan, sg_dma_len(sg))) { | |
553 | kfree(desc); | |
554 | return NULL; | |
555 | } | |
556 | ||
921234e0 LPC |
557 | dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1, |
558 | sg_dma_len(sg), dsg); | |
0e3b67b3 LPC |
559 | } |
560 | ||
561 | desc->cyclic = false; | |
562 | ||
563 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
564 | } | |
565 | ||
566 | static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( | |
567 | struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, | |
568 | size_t period_len, enum dma_transfer_direction direction, | |
569 | unsigned long flags) | |
570 | { | |
571 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | |
572 | struct axi_dmac_desc *desc; | |
921234e0 | 573 | unsigned int num_periods, num_segments; |
0e3b67b3 LPC |
574 | |
575 | if (direction != chan->direction) | |
576 | return NULL; | |
577 | ||
578 | if (!axi_dmac_check_len(chan, buf_len) || | |
579 | !axi_dmac_check_addr(chan, buf_addr)) | |
580 | return NULL; | |
581 | ||
582 | if (period_len == 0 || buf_len % period_len) | |
583 | return NULL; | |
584 | ||
585 | num_periods = buf_len / period_len; | |
921234e0 | 586 | num_segments = DIV_ROUND_UP(period_len, chan->max_length); |
0e3b67b3 | 587 | |
921234e0 | 588 | desc = axi_dmac_alloc_desc(num_periods * num_segments); |
0e3b67b3 LPC |
589 | if (!desc) |
590 | return NULL; | |
591 | ||
921234e0 LPC |
592 | axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods, |
593 | period_len, desc->sg); | |
0e3b67b3 LPC |
594 | |
595 | desc->cyclic = true; | |
596 | ||
597 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
598 | } | |
599 | ||
600 | static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved( | |
601 | struct dma_chan *c, struct dma_interleaved_template *xt, | |
602 | unsigned long flags) | |
603 | { | |
604 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | |
605 | struct axi_dmac_desc *desc; | |
606 | size_t dst_icg, src_icg; | |
607 | ||
608 | if (xt->frame_size != 1) | |
609 | return NULL; | |
610 | ||
611 | if (xt->dir != chan->direction) | |
612 | return NULL; | |
613 | ||
614 | if (axi_dmac_src_is_mem(chan)) { | |
615 | if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start)) | |
616 | return NULL; | |
617 | } | |
618 | ||
619 | if (axi_dmac_dest_is_mem(chan)) { | |
620 | if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start)) | |
621 | return NULL; | |
622 | } | |
623 | ||
624 | dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]); | |
625 | src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]); | |
626 | ||
627 | if (chan->hw_2d) { | |
628 | if (!axi_dmac_check_len(chan, xt->sgl[0].size) || | |
648865a7 | 629 | xt->numf == 0) |
0e3b67b3 LPC |
630 | return NULL; |
631 | if (xt->sgl[0].size + dst_icg > chan->max_length || | |
632 | xt->sgl[0].size + src_icg > chan->max_length) | |
633 | return NULL; | |
634 | } else { | |
635 | if (dst_icg != 0 || src_icg != 0) | |
636 | return NULL; | |
637 | if (chan->max_length / xt->sgl[0].size < xt->numf) | |
638 | return NULL; | |
639 | if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf)) | |
640 | return NULL; | |
641 | } | |
642 | ||
643 | desc = axi_dmac_alloc_desc(1); | |
644 | if (!desc) | |
645 | return NULL; | |
646 | ||
647 | if (axi_dmac_src_is_mem(chan)) { | |
648 | desc->sg[0].src_addr = xt->src_start; | |
649 | desc->sg[0].src_stride = xt->sgl[0].size + src_icg; | |
650 | } | |
651 | ||
652 | if (axi_dmac_dest_is_mem(chan)) { | |
653 | desc->sg[0].dest_addr = xt->dst_start; | |
654 | desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg; | |
655 | } | |
656 | ||
657 | if (chan->hw_2d) { | |
658 | desc->sg[0].x_len = xt->sgl[0].size; | |
659 | desc->sg[0].y_len = xt->numf; | |
660 | } else { | |
661 | desc->sg[0].x_len = xt->sgl[0].size * xt->numf; | |
662 | desc->sg[0].y_len = 1; | |
663 | } | |
664 | ||
8add6cce DB |
665 | if (flags & DMA_CYCLIC) |
666 | desc->cyclic = true; | |
667 | ||
0e3b67b3 LPC |
668 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); |
669 | } | |
670 | ||
671 | static void axi_dmac_free_chan_resources(struct dma_chan *c) | |
672 | { | |
673 | vchan_free_chan_resources(to_virt_chan(c)); | |
674 | } | |
675 | ||
676 | static void axi_dmac_desc_free(struct virt_dma_desc *vdesc) | |
677 | { | |
678 | kfree(container_of(vdesc, struct axi_dmac_desc, vdesc)); | |
679 | } | |
680 | ||
681 | /* | |
682 | * The configuration stored in the devicetree matches the configuration | |
683 | * parameters of the peripheral instance and allows the driver to know which | |
684 | * features are implemented and how it should behave. | |
685 | */ | |
686 | static int axi_dmac_parse_chan_dt(struct device_node *of_chan, | |
687 | struct axi_dmac_chan *chan) | |
688 | { | |
689 | u32 val; | |
690 | int ret; | |
691 | ||
692 | ret = of_property_read_u32(of_chan, "reg", &val); | |
693 | if (ret) | |
694 | return ret; | |
695 | ||
696 | /* We only support 1 channel for now */ | |
697 | if (val != 0) | |
698 | return -EINVAL; | |
699 | ||
700 | ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val); | |
701 | if (ret) | |
702 | return ret; | |
703 | if (val > AXI_DMAC_BUS_TYPE_FIFO) | |
704 | return -EINVAL; | |
705 | chan->src_type = val; | |
706 | ||
707 | ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val); | |
708 | if (ret) | |
709 | return ret; | |
710 | if (val > AXI_DMAC_BUS_TYPE_FIFO) | |
711 | return -EINVAL; | |
712 | chan->dest_type = val; | |
713 | ||
714 | ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val); | |
715 | if (ret) | |
716 | return ret; | |
717 | chan->src_width = val / 8; | |
718 | ||
719 | ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val); | |
720 | if (ret) | |
721 | return ret; | |
722 | chan->dest_width = val / 8; | |
723 | ||
a5b20600 | 724 | chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1; |
0e3b67b3 LPC |
725 | |
726 | if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) | |
727 | chan->direction = DMA_MEM_TO_MEM; | |
728 | else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) | |
729 | chan->direction = DMA_MEM_TO_DEV; | |
730 | else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan)) | |
731 | chan->direction = DMA_DEV_TO_MEM; | |
732 | else | |
733 | chan->direction = DMA_DEV_TO_DEV; | |
734 | ||
0e3b67b3 LPC |
735 | return 0; |
736 | } | |
737 | ||
b5d89905 | 738 | static int axi_dmac_detect_caps(struct axi_dmac *dmac) |
56009f0d LPC |
739 | { |
740 | struct axi_dmac_chan *chan = &dmac->chan; | |
a5b20600 LPC |
741 | unsigned int version; |
742 | ||
743 | version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION); | |
56009f0d LPC |
744 | |
745 | axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC); | |
746 | if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC) | |
747 | chan->hw_cyclic = true; | |
748 | ||
749 | axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1); | |
750 | if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1) | |
751 | chan->hw_2d = true; | |
752 | ||
753 | axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff); | |
754 | chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH); | |
755 | if (chan->max_length != UINT_MAX) | |
756 | chan->max_length++; | |
b5d89905 LPC |
757 | |
758 | axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff); | |
759 | if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 && | |
760 | chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) { | |
761 | dev_err(dmac->dma_dev.dev, | |
762 | "Destination memory-mapped interface not supported."); | |
763 | return -ENODEV; | |
764 | } | |
765 | ||
766 | axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff); | |
767 | if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 && | |
768 | chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) { | |
769 | dev_err(dmac->dma_dev.dev, | |
770 | "Source memory-mapped interface not supported."); | |
771 | return -ENODEV; | |
772 | } | |
773 | ||
e3923592 AA |
774 | if (version >= ADI_AXI_PCORE_VER(4, 2, 'a')) |
775 | chan->hw_partial_xfer = true; | |
776 | ||
a5b20600 LPC |
777 | if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) { |
778 | axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00); | |
779 | chan->length_align_mask = | |
780 | axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH); | |
781 | } else { | |
782 | chan->length_align_mask = chan->address_align_mask; | |
783 | } | |
784 | ||
b5d89905 | 785 | return 0; |
56009f0d LPC |
786 | } |
787 | ||
0e3b67b3 LPC |
788 | static int axi_dmac_probe(struct platform_device *pdev) |
789 | { | |
790 | struct device_node *of_channels, *of_chan; | |
791 | struct dma_device *dma_dev; | |
792 | struct axi_dmac *dmac; | |
793 | struct resource *res; | |
794 | int ret; | |
795 | ||
796 | dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); | |
797 | if (!dmac) | |
798 | return -ENOMEM; | |
799 | ||
800 | dmac->irq = platform_get_irq(pdev, 0); | |
50dc60a2 LPC |
801 | if (dmac->irq < 0) |
802 | return dmac->irq; | |
803 | if (dmac->irq == 0) | |
0e3b67b3 LPC |
804 | return -EINVAL; |
805 | ||
806 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
807 | dmac->base = devm_ioremap_resource(&pdev->dev, res); | |
808 | if (IS_ERR(dmac->base)) | |
809 | return PTR_ERR(dmac->base); | |
810 | ||
811 | dmac->clk = devm_clk_get(&pdev->dev, NULL); | |
812 | if (IS_ERR(dmac->clk)) | |
813 | return PTR_ERR(dmac->clk); | |
814 | ||
815 | INIT_LIST_HEAD(&dmac->chan.active_descs); | |
816 | ||
817 | of_channels = of_get_child_by_name(pdev->dev.of_node, "adi,channels"); | |
818 | if (of_channels == NULL) | |
819 | return -ENODEV; | |
820 | ||
821 | for_each_child_of_node(of_channels, of_chan) { | |
822 | ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan); | |
823 | if (ret) { | |
824 | of_node_put(of_chan); | |
825 | of_node_put(of_channels); | |
826 | return -EINVAL; | |
827 | } | |
828 | } | |
829 | of_node_put(of_channels); | |
830 | ||
831 | pdev->dev.dma_parms = &dmac->dma_parms; | |
921234e0 | 832 | dma_set_max_seg_size(&pdev->dev, UINT_MAX); |
0e3b67b3 LPC |
833 | |
834 | dma_dev = &dmac->dma_dev; | |
835 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | |
836 | dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); | |
9a05045d | 837 | dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask); |
0e3b67b3 LPC |
838 | dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources; |
839 | dma_dev->device_tx_status = dma_cookie_status; | |
840 | dma_dev->device_issue_pending = axi_dmac_issue_pending; | |
841 | dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg; | |
842 | dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic; | |
843 | dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved; | |
844 | dma_dev->device_terminate_all = axi_dmac_terminate_all; | |
860dd64c | 845 | dma_dev->device_synchronize = axi_dmac_synchronize; |
0e3b67b3 LPC |
846 | dma_dev->dev = &pdev->dev; |
847 | dma_dev->chancnt = 1; | |
848 | dma_dev->src_addr_widths = BIT(dmac->chan.src_width); | |
849 | dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width); | |
850 | dma_dev->directions = BIT(dmac->chan.direction); | |
851 | dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | |
852 | INIT_LIST_HEAD(&dma_dev->channels); | |
853 | ||
854 | dmac->chan.vchan.desc_free = axi_dmac_desc_free; | |
855 | vchan_init(&dmac->chan.vchan, dma_dev); | |
856 | ||
857 | ret = clk_prepare_enable(dmac->clk); | |
858 | if (ret < 0) | |
859 | return ret; | |
860 | ||
b5d89905 LPC |
861 | ret = axi_dmac_detect_caps(dmac); |
862 | if (ret) | |
863 | goto err_clk_disable; | |
56009f0d | 864 | |
5b969bd1 AA |
865 | dma_dev->copy_align = (dmac->chan.address_align_mask + 1); |
866 | ||
0e3b67b3 LPC |
867 | axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00); |
868 | ||
869 | ret = dma_async_device_register(dma_dev); | |
870 | if (ret) | |
871 | goto err_clk_disable; | |
872 | ||
873 | ret = of_dma_controller_register(pdev->dev.of_node, | |
874 | of_dma_xlate_by_chan_id, dma_dev); | |
875 | if (ret) | |
876 | goto err_unregister_device; | |
877 | ||
9c87572e | 878 | ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED, |
0e3b67b3 LPC |
879 | dev_name(&pdev->dev), dmac); |
880 | if (ret) | |
881 | goto err_unregister_of; | |
882 | ||
883 | platform_set_drvdata(pdev, dmac); | |
884 | ||
885 | return 0; | |
886 | ||
887 | err_unregister_of: | |
888 | of_dma_controller_free(pdev->dev.of_node); | |
889 | err_unregister_device: | |
890 | dma_async_device_unregister(&dmac->dma_dev); | |
891 | err_clk_disable: | |
892 | clk_disable_unprepare(dmac->clk); | |
893 | ||
894 | return ret; | |
895 | } | |
896 | ||
897 | static int axi_dmac_remove(struct platform_device *pdev) | |
898 | { | |
899 | struct axi_dmac *dmac = platform_get_drvdata(pdev); | |
900 | ||
901 | of_dma_controller_free(pdev->dev.of_node); | |
902 | free_irq(dmac->irq, dmac); | |
903 | tasklet_kill(&dmac->chan.vchan.task); | |
904 | dma_async_device_unregister(&dmac->dma_dev); | |
905 | clk_disable_unprepare(dmac->clk); | |
906 | ||
907 | return 0; | |
908 | } | |
909 | ||
910 | static const struct of_device_id axi_dmac_of_match_table[] = { | |
911 | { .compatible = "adi,axi-dmac-1.00.a" }, | |
912 | { }, | |
913 | }; | |
9bcfe38f | 914 | MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table); |
0e3b67b3 LPC |
915 | |
916 | static struct platform_driver axi_dmac_driver = { | |
917 | .driver = { | |
918 | .name = "dma-axi-dmac", | |
919 | .of_match_table = axi_dmac_of_match_table, | |
920 | }, | |
921 | .probe = axi_dmac_probe, | |
922 | .remove = axi_dmac_remove, | |
923 | }; | |
924 | module_platform_driver(axi_dmac_driver); | |
925 | ||
926 | MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); | |
927 | MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller"); | |
928 | MODULE_LICENSE("GPL v2"); |