Commit | Line | Data |
---|---|---|
fda8d26e | 1 | // SPDX-License-Identifier: GPL-2.0-only |
0e3b67b3 LPC |
2 | /* |
3 | * Driver for the Analog Devices AXI-DMAC core | |
4 | * | |
f4a9fe97 | 5 | * Copyright 2013-2019 Analog Devices Inc. |
0e3b67b3 | 6 | * Author: Lars-Peter Clausen <lars@metafoo.de> |
0e3b67b3 LPC |
7 | */ |
8 | ||
9 | #include <linux/clk.h> | |
10 | #include <linux/device.h> | |
11 | #include <linux/dma-mapping.h> | |
12 | #include <linux/dmaengine.h> | |
13 | #include <linux/err.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/io.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/of.h> | |
19 | #include <linux/of_dma.h> | |
20 | #include <linux/platform_device.h> | |
fc15be39 | 21 | #include <linux/regmap.h> |
0e3b67b3 | 22 | #include <linux/slab.h> |
a5b20600 | 23 | #include <linux/fpga/adi-axi-common.h> |
0e3b67b3 LPC |
24 | |
25 | #include <dt-bindings/dma/axi-dmac.h> | |
26 | ||
27 | #include "dmaengine.h" | |
28 | #include "virt-dma.h" | |
29 | ||
30 | /* | |
31 | * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has | |
32 | * various instantiation parameters which decided the exact feature set support | |
33 | * by the core. | |
34 | * | |
35 | * Each channel of the core has a source interface and a destination interface. | |
36 | * The number of channels and the type of the channel interfaces is selected at | |
37 | * configuration time. A interface can either be a connected to a central memory | |
38 | * interconnect, which allows access to system memory, or it can be connected to | |
39 | * a dedicated bus which is directly connected to a data port on a peripheral. | |
40 | * Given that those are configuration options of the core that are selected when | |
41 | * it is instantiated this means that they can not be changed by software at | |
42 | * runtime. By extension this means that each channel is uni-directional. It can | |
43 | * either be device to memory or memory to device, but not both. Also since the | |
44 | * device side is a dedicated data bus only connected to a single peripheral | |
45 | * there is no address than can or needs to be configured for the device side. | |
46 | */ | |
47 | ||
48 | #define AXI_DMAC_REG_IRQ_MASK 0x80 | |
49 | #define AXI_DMAC_REG_IRQ_PENDING 0x84 | |
50 | #define AXI_DMAC_REG_IRQ_SOURCE 0x88 | |
51 | ||
52 | #define AXI_DMAC_REG_CTRL 0x400 | |
53 | #define AXI_DMAC_REG_TRANSFER_ID 0x404 | |
54 | #define AXI_DMAC_REG_START_TRANSFER 0x408 | |
55 | #define AXI_DMAC_REG_FLAGS 0x40c | |
56 | #define AXI_DMAC_REG_DEST_ADDRESS 0x410 | |
57 | #define AXI_DMAC_REG_SRC_ADDRESS 0x414 | |
58 | #define AXI_DMAC_REG_X_LENGTH 0x418 | |
59 | #define AXI_DMAC_REG_Y_LENGTH 0x41c | |
60 | #define AXI_DMAC_REG_DEST_STRIDE 0x420 | |
61 | #define AXI_DMAC_REG_SRC_STRIDE 0x424 | |
62 | #define AXI_DMAC_REG_TRANSFER_DONE 0x428 | |
63 | #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c | |
64 | #define AXI_DMAC_REG_STATUS 0x430 | |
65 | #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434 | |
66 | #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438 | |
e3923592 AA |
67 | #define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c |
68 | #define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450 | |
0e3b67b3 LPC |
69 | |
70 | #define AXI_DMAC_CTRL_ENABLE BIT(0) | |
71 | #define AXI_DMAC_CTRL_PAUSE BIT(1) | |
72 | ||
73 | #define AXI_DMAC_IRQ_SOT BIT(0) | |
74 | #define AXI_DMAC_IRQ_EOT BIT(1) | |
75 | ||
76 | #define AXI_DMAC_FLAG_CYCLIC BIT(0) | |
a3ee0bf2 | 77 | #define AXI_DMAC_FLAG_LAST BIT(1) |
e3923592 AA |
78 | #define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2) |
79 | ||
80 | #define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31) | |
0e3b67b3 | 81 | |
008913db LPC |
82 | /* The maximum ID allocated by the hardware is 31 */ |
83 | #define AXI_DMAC_SG_UNUSED 32U | |
84 | ||
0e3b67b3 LPC |
85 | struct axi_dmac_sg { |
86 | dma_addr_t src_addr; | |
87 | dma_addr_t dest_addr; | |
88 | unsigned int x_len; | |
89 | unsigned int y_len; | |
90 | unsigned int dest_stride; | |
91 | unsigned int src_stride; | |
92 | unsigned int id; | |
e3923592 | 93 | unsigned int partial_len; |
008913db | 94 | bool schedule_when_free; |
0e3b67b3 LPC |
95 | }; |
96 | ||
97 | struct axi_dmac_desc { | |
98 | struct virt_dma_desc vdesc; | |
99 | bool cyclic; | |
e28d9155 | 100 | bool have_partial_xfer; |
0e3b67b3 LPC |
101 | |
102 | unsigned int num_submitted; | |
103 | unsigned int num_completed; | |
104 | unsigned int num_sgs; | |
105 | struct axi_dmac_sg sg[]; | |
106 | }; | |
107 | ||
108 | struct axi_dmac_chan { | |
109 | struct virt_dma_chan vchan; | |
110 | ||
111 | struct axi_dmac_desc *next_desc; | |
112 | struct list_head active_descs; | |
113 | enum dma_transfer_direction direction; | |
114 | ||
115 | unsigned int src_width; | |
116 | unsigned int dest_width; | |
117 | unsigned int src_type; | |
118 | unsigned int dest_type; | |
119 | ||
120 | unsigned int max_length; | |
a5b20600 LPC |
121 | unsigned int address_align_mask; |
122 | unsigned int length_align_mask; | |
0e3b67b3 | 123 | |
e3923592 | 124 | bool hw_partial_xfer; |
0e3b67b3 LPC |
125 | bool hw_cyclic; |
126 | bool hw_2d; | |
127 | }; | |
128 | ||
129 | struct axi_dmac { | |
130 | void __iomem *base; | |
131 | int irq; | |
132 | ||
133 | struct clk *clk; | |
134 | ||
135 | struct dma_device dma_dev; | |
136 | struct axi_dmac_chan chan; | |
137 | ||
138 | struct device_dma_parameters dma_parms; | |
139 | }; | |
140 | ||
141 | static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan) | |
142 | { | |
143 | return container_of(chan->vchan.chan.device, struct axi_dmac, | |
144 | dma_dev); | |
145 | } | |
146 | ||
147 | static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c) | |
148 | { | |
149 | return container_of(c, struct axi_dmac_chan, vchan.chan); | |
150 | } | |
151 | ||
152 | static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc) | |
153 | { | |
154 | return container_of(vdesc, struct axi_dmac_desc, vdesc); | |
155 | } | |
156 | ||
157 | static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg, | |
158 | unsigned int val) | |
159 | { | |
160 | writel(val, axi_dmac->base + reg); | |
161 | } | |
162 | ||
163 | static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg) | |
164 | { | |
165 | return readl(axi_dmac->base + reg); | |
166 | } | |
167 | ||
168 | static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan) | |
169 | { | |
170 | return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM; | |
171 | } | |
172 | ||
173 | static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan) | |
174 | { | |
175 | return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM; | |
176 | } | |
177 | ||
178 | static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len) | |
179 | { | |
921234e0 | 180 | if (len == 0) |
0e3b67b3 | 181 | return false; |
a5b20600 | 182 | if ((len & chan->length_align_mask) != 0) /* Not aligned */ |
0e3b67b3 LPC |
183 | return false; |
184 | return true; | |
185 | } | |
186 | ||
187 | static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr) | |
188 | { | |
a5b20600 | 189 | if ((addr & chan->address_align_mask) != 0) /* Not aligned */ |
0e3b67b3 LPC |
190 | return false; |
191 | return true; | |
192 | } | |
193 | ||
194 | static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) | |
195 | { | |
196 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); | |
197 | struct virt_dma_desc *vdesc; | |
198 | struct axi_dmac_desc *desc; | |
199 | struct axi_dmac_sg *sg; | |
200 | unsigned int flags = 0; | |
201 | unsigned int val; | |
202 | ||
203 | val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER); | |
204 | if (val) /* Queue is full, wait for the next SOT IRQ */ | |
205 | return; | |
206 | ||
207 | desc = chan->next_desc; | |
208 | ||
209 | if (!desc) { | |
210 | vdesc = vchan_next_desc(&chan->vchan); | |
211 | if (!vdesc) | |
212 | return; | |
213 | list_move_tail(&vdesc->node, &chan->active_descs); | |
214 | desc = to_axi_dmac_desc(vdesc); | |
215 | } | |
216 | sg = &desc->sg[desc->num_submitted]; | |
217 | ||
008913db LPC |
218 | /* Already queued in cyclic mode. Wait for it to finish */ |
219 | if (sg->id != AXI_DMAC_SG_UNUSED) { | |
220 | sg->schedule_when_free = true; | |
221 | return; | |
222 | } | |
223 | ||
0e3b67b3 | 224 | desc->num_submitted++; |
e28d9155 AA |
225 | if (desc->num_submitted == desc->num_sgs || |
226 | desc->have_partial_xfer) { | |
008913db LPC |
227 | if (desc->cyclic) |
228 | desc->num_submitted = 0; /* Start again */ | |
229 | else | |
230 | chan->next_desc = NULL; | |
a3ee0bf2 | 231 | flags |= AXI_DMAC_FLAG_LAST; |
008913db | 232 | } else { |
0e3b67b3 | 233 | chan->next_desc = desc; |
008913db | 234 | } |
0e3b67b3 LPC |
235 | |
236 | sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID); | |
237 | ||
238 | if (axi_dmac_dest_is_mem(chan)) { | |
239 | axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr); | |
240 | axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride); | |
241 | } | |
242 | ||
243 | if (axi_dmac_src_is_mem(chan)) { | |
244 | axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr); | |
245 | axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride); | |
246 | } | |
247 | ||
248 | /* | |
249 | * If the hardware supports cyclic transfers and there is no callback to | |
63ab76db LPC |
250 | * call and only a single segment, enable hw cyclic mode to avoid |
251 | * unnecessary interrupts. | |
0e3b67b3 | 252 | */ |
63ab76db LPC |
253 | if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback && |
254 | desc->num_sgs == 1) | |
0e3b67b3 LPC |
255 | flags |= AXI_DMAC_FLAG_CYCLIC; |
256 | ||
e3923592 AA |
257 | if (chan->hw_partial_xfer) |
258 | flags |= AXI_DMAC_FLAG_PARTIAL_REPORT; | |
259 | ||
0e3b67b3 LPC |
260 | axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1); |
261 | axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1); | |
262 | axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags); | |
263 | axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1); | |
264 | } | |
265 | ||
266 | static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan) | |
267 | { | |
268 | return list_first_entry_or_null(&chan->active_descs, | |
269 | struct axi_dmac_desc, vdesc.node); | |
270 | } | |
271 | ||
e3923592 AA |
272 | static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan, |
273 | struct axi_dmac_sg *sg) | |
274 | { | |
275 | if (chan->hw_2d) | |
276 | return sg->x_len * sg->y_len; | |
277 | else | |
278 | return sg->x_len; | |
279 | } | |
280 | ||
281 | static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan) | |
282 | { | |
283 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); | |
284 | struct axi_dmac_desc *desc; | |
285 | struct axi_dmac_sg *sg; | |
286 | u32 xfer_done, len, id, i; | |
287 | bool found_sg; | |
288 | ||
289 | do { | |
290 | len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN); | |
291 | id = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID); | |
292 | ||
293 | found_sg = false; | |
294 | list_for_each_entry(desc, &chan->active_descs, vdesc.node) { | |
295 | for (i = 0; i < desc->num_sgs; i++) { | |
296 | sg = &desc->sg[i]; | |
297 | if (sg->id == AXI_DMAC_SG_UNUSED) | |
298 | continue; | |
299 | if (sg->id == id) { | |
e28d9155 | 300 | desc->have_partial_xfer = true; |
e3923592 AA |
301 | sg->partial_len = len; |
302 | found_sg = true; | |
303 | break; | |
304 | } | |
305 | } | |
306 | if (found_sg) | |
307 | break; | |
308 | } | |
309 | ||
310 | if (found_sg) { | |
311 | dev_dbg(dmac->dma_dev.dev, | |
312 | "Found partial segment id=%u, len=%u\n", | |
313 | id, len); | |
314 | } else { | |
315 | dev_warn(dmac->dma_dev.dev, | |
316 | "Not found partial segment id=%u, len=%u\n", | |
317 | id, len); | |
318 | } | |
319 | ||
320 | /* Check if we have any more partial transfers */ | |
321 | xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); | |
322 | xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE); | |
323 | ||
324 | } while (!xfer_done); | |
325 | } | |
326 | ||
327 | static void axi_dmac_compute_residue(struct axi_dmac_chan *chan, | |
328 | struct axi_dmac_desc *active) | |
329 | { | |
330 | struct dmaengine_result *rslt = &active->vdesc.tx_result; | |
331 | unsigned int start = active->num_completed - 1; | |
332 | struct axi_dmac_sg *sg; | |
333 | unsigned int i, total; | |
334 | ||
335 | rslt->result = DMA_TRANS_NOERROR; | |
336 | rslt->residue = 0; | |
337 | ||
338 | /* | |
339 | * We get here if the last completed segment is partial, which | |
340 | * means we can compute the residue from that segment onwards | |
341 | */ | |
342 | for (i = start; i < active->num_sgs; i++) { | |
343 | sg = &active->sg[i]; | |
344 | total = axi_dmac_total_sg_bytes(chan, sg); | |
345 | rslt->residue += (total - sg->partial_len); | |
346 | } | |
347 | } | |
348 | ||
008913db | 349 | static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, |
0e3b67b3 LPC |
350 | unsigned int completed_transfers) |
351 | { | |
352 | struct axi_dmac_desc *active; | |
353 | struct axi_dmac_sg *sg; | |
008913db | 354 | bool start_next = false; |
0e3b67b3 LPC |
355 | |
356 | active = axi_dmac_active_desc(chan); | |
357 | if (!active) | |
008913db | 358 | return false; |
0e3b67b3 | 359 | |
e3923592 AA |
360 | if (chan->hw_partial_xfer && |
361 | (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE)) | |
362 | axi_dmac_dequeue_partial_xfers(chan); | |
363 | ||
008913db LPC |
364 | do { |
365 | sg = &active->sg[active->num_completed]; | |
366 | if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */ | |
367 | break; | |
368 | if (!(BIT(sg->id) & completed_transfers)) | |
369 | break; | |
370 | active->num_completed++; | |
371 | sg->id = AXI_DMAC_SG_UNUSED; | |
372 | if (sg->schedule_when_free) { | |
373 | sg->schedule_when_free = false; | |
374 | start_next = true; | |
375 | } | |
376 | ||
e3923592 AA |
377 | if (sg->partial_len) |
378 | axi_dmac_compute_residue(chan, active); | |
379 | ||
008913db LPC |
380 | if (active->cyclic) |
381 | vchan_cyclic_callback(&active->vdesc); | |
382 | ||
e3923592 AA |
383 | if (active->num_completed == active->num_sgs || |
384 | sg->partial_len) { | |
008913db LPC |
385 | if (active->cyclic) { |
386 | active->num_completed = 0; /* wrap around */ | |
387 | } else { | |
0e3b67b3 LPC |
388 | list_del(&active->vdesc.node); |
389 | vchan_cookie_complete(&active->vdesc); | |
390 | active = axi_dmac_active_desc(chan); | |
391 | } | |
008913db LPC |
392 | } |
393 | } while (active); | |
394 | ||
395 | return start_next; | |
0e3b67b3 LPC |
396 | } |
397 | ||
398 | static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) | |
399 | { | |
400 | struct axi_dmac *dmac = devid; | |
401 | unsigned int pending; | |
008913db | 402 | bool start_next = false; |
0e3b67b3 LPC |
403 | |
404 | pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING); | |
71831f65 LPC |
405 | if (!pending) |
406 | return IRQ_NONE; | |
407 | ||
0e3b67b3 LPC |
408 | axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending); |
409 | ||
410 | spin_lock(&dmac->chan.vchan.lock); | |
411 | /* One or more transfers have finished */ | |
412 | if (pending & AXI_DMAC_IRQ_EOT) { | |
413 | unsigned int completed; | |
414 | ||
415 | completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); | |
008913db | 416 | start_next = axi_dmac_transfer_done(&dmac->chan, completed); |
0e3b67b3 LPC |
417 | } |
418 | /* Space has become available in the descriptor queue */ | |
008913db | 419 | if ((pending & AXI_DMAC_IRQ_SOT) || start_next) |
0e3b67b3 LPC |
420 | axi_dmac_start_transfer(&dmac->chan); |
421 | spin_unlock(&dmac->chan.vchan.lock); | |
422 | ||
423 | return IRQ_HANDLED; | |
424 | } | |
425 | ||
426 | static int axi_dmac_terminate_all(struct dma_chan *c) | |
427 | { | |
428 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | |
429 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); | |
430 | unsigned long flags; | |
431 | LIST_HEAD(head); | |
432 | ||
433 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
434 | axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0); | |
435 | chan->next_desc = NULL; | |
436 | vchan_get_all_descriptors(&chan->vchan, &head); | |
437 | list_splice_tail_init(&chan->active_descs, &head); | |
438 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
439 | ||
440 | vchan_dma_desc_free_list(&chan->vchan, &head); | |
441 | ||
442 | return 0; | |
443 | } | |
444 | ||
860dd64c LPC |
445 | static void axi_dmac_synchronize(struct dma_chan *c) |
446 | { | |
447 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | |
448 | ||
449 | vchan_synchronize(&chan->vchan); | |
450 | } | |
451 | ||
0e3b67b3 LPC |
452 | static void axi_dmac_issue_pending(struct dma_chan *c) |
453 | { | |
454 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | |
455 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); | |
456 | unsigned long flags; | |
457 | ||
458 | axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE); | |
459 | ||
460 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
461 | if (vchan_issue_pending(&chan->vchan)) | |
462 | axi_dmac_start_transfer(chan); | |
463 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
464 | } | |
465 | ||
466 | static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) | |
467 | { | |
468 | struct axi_dmac_desc *desc; | |
008913db | 469 | unsigned int i; |
0e3b67b3 | 470 | |
48b02a85 | 471 | desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT); |
0e3b67b3 LPC |
472 | if (!desc) |
473 | return NULL; | |
474 | ||
008913db LPC |
475 | for (i = 0; i < num_sgs; i++) |
476 | desc->sg[i].id = AXI_DMAC_SG_UNUSED; | |
477 | ||
0e3b67b3 LPC |
478 | desc->num_sgs = num_sgs; |
479 | ||
480 | return desc; | |
481 | } | |
482 | ||
921234e0 LPC |
483 | static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan, |
484 | enum dma_transfer_direction direction, dma_addr_t addr, | |
485 | unsigned int num_periods, unsigned int period_len, | |
486 | struct axi_dmac_sg *sg) | |
487 | { | |
488 | unsigned int num_segments, i; | |
489 | unsigned int segment_size; | |
490 | unsigned int len; | |
491 | ||
492 | /* Split into multiple equally sized segments if necessary */ | |
493 | num_segments = DIV_ROUND_UP(period_len, chan->max_length); | |
494 | segment_size = DIV_ROUND_UP(period_len, num_segments); | |
495 | /* Take care of alignment */ | |
a5b20600 | 496 | segment_size = ((segment_size - 1) | chan->length_align_mask) + 1; |
921234e0 LPC |
497 | |
498 | for (i = 0; i < num_periods; i++) { | |
499 | len = period_len; | |
500 | ||
501 | while (len > segment_size) { | |
502 | if (direction == DMA_DEV_TO_MEM) | |
503 | sg->dest_addr = addr; | |
504 | else | |
505 | sg->src_addr = addr; | |
506 | sg->x_len = segment_size; | |
507 | sg->y_len = 1; | |
508 | sg++; | |
509 | addr += segment_size; | |
510 | len -= segment_size; | |
511 | } | |
512 | ||
513 | if (direction == DMA_DEV_TO_MEM) | |
514 | sg->dest_addr = addr; | |
515 | else | |
516 | sg->src_addr = addr; | |
517 | sg->x_len = len; | |
518 | sg->y_len = 1; | |
519 | sg++; | |
520 | addr += len; | |
521 | } | |
522 | ||
523 | return sg; | |
524 | } | |
525 | ||
0e3b67b3 LPC |
526 | static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( |
527 | struct dma_chan *c, struct scatterlist *sgl, | |
528 | unsigned int sg_len, enum dma_transfer_direction direction, | |
529 | unsigned long flags, void *context) | |
530 | { | |
531 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | |
532 | struct axi_dmac_desc *desc; | |
921234e0 | 533 | struct axi_dmac_sg *dsg; |
0e3b67b3 | 534 | struct scatterlist *sg; |
921234e0 | 535 | unsigned int num_sgs; |
0e3b67b3 LPC |
536 | unsigned int i; |
537 | ||
538 | if (direction != chan->direction) | |
539 | return NULL; | |
540 | ||
921234e0 LPC |
541 | num_sgs = 0; |
542 | for_each_sg(sgl, sg, sg_len, i) | |
543 | num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length); | |
544 | ||
545 | desc = axi_dmac_alloc_desc(num_sgs); | |
0e3b67b3 LPC |
546 | if (!desc) |
547 | return NULL; | |
548 | ||
921234e0 LPC |
549 | dsg = desc->sg; |
550 | ||
0e3b67b3 LPC |
551 | for_each_sg(sgl, sg, sg_len, i) { |
552 | if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) || | |
553 | !axi_dmac_check_len(chan, sg_dma_len(sg))) { | |
554 | kfree(desc); | |
555 | return NULL; | |
556 | } | |
557 | ||
921234e0 LPC |
558 | dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1, |
559 | sg_dma_len(sg), dsg); | |
0e3b67b3 LPC |
560 | } |
561 | ||
562 | desc->cyclic = false; | |
563 | ||
564 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
565 | } | |
566 | ||
567 | static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( | |
568 | struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, | |
569 | size_t period_len, enum dma_transfer_direction direction, | |
570 | unsigned long flags) | |
571 | { | |
572 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | |
573 | struct axi_dmac_desc *desc; | |
921234e0 | 574 | unsigned int num_periods, num_segments; |
0e3b67b3 LPC |
575 | |
576 | if (direction != chan->direction) | |
577 | return NULL; | |
578 | ||
579 | if (!axi_dmac_check_len(chan, buf_len) || | |
580 | !axi_dmac_check_addr(chan, buf_addr)) | |
581 | return NULL; | |
582 | ||
583 | if (period_len == 0 || buf_len % period_len) | |
584 | return NULL; | |
585 | ||
586 | num_periods = buf_len / period_len; | |
921234e0 | 587 | num_segments = DIV_ROUND_UP(period_len, chan->max_length); |
0e3b67b3 | 588 | |
921234e0 | 589 | desc = axi_dmac_alloc_desc(num_periods * num_segments); |
0e3b67b3 LPC |
590 | if (!desc) |
591 | return NULL; | |
592 | ||
921234e0 LPC |
593 | axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods, |
594 | period_len, desc->sg); | |
0e3b67b3 LPC |
595 | |
596 | desc->cyclic = true; | |
597 | ||
598 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
599 | } | |
600 | ||
601 | static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved( | |
602 | struct dma_chan *c, struct dma_interleaved_template *xt, | |
603 | unsigned long flags) | |
604 | { | |
605 | struct axi_dmac_chan *chan = to_axi_dmac_chan(c); | |
606 | struct axi_dmac_desc *desc; | |
607 | size_t dst_icg, src_icg; | |
608 | ||
609 | if (xt->frame_size != 1) | |
610 | return NULL; | |
611 | ||
612 | if (xt->dir != chan->direction) | |
613 | return NULL; | |
614 | ||
615 | if (axi_dmac_src_is_mem(chan)) { | |
616 | if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start)) | |
617 | return NULL; | |
618 | } | |
619 | ||
620 | if (axi_dmac_dest_is_mem(chan)) { | |
621 | if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start)) | |
622 | return NULL; | |
623 | } | |
624 | ||
625 | dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]); | |
626 | src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]); | |
627 | ||
628 | if (chan->hw_2d) { | |
629 | if (!axi_dmac_check_len(chan, xt->sgl[0].size) || | |
648865a7 | 630 | xt->numf == 0) |
0e3b67b3 LPC |
631 | return NULL; |
632 | if (xt->sgl[0].size + dst_icg > chan->max_length || | |
633 | xt->sgl[0].size + src_icg > chan->max_length) | |
634 | return NULL; | |
635 | } else { | |
636 | if (dst_icg != 0 || src_icg != 0) | |
637 | return NULL; | |
638 | if (chan->max_length / xt->sgl[0].size < xt->numf) | |
639 | return NULL; | |
640 | if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf)) | |
641 | return NULL; | |
642 | } | |
643 | ||
644 | desc = axi_dmac_alloc_desc(1); | |
645 | if (!desc) | |
646 | return NULL; | |
647 | ||
648 | if (axi_dmac_src_is_mem(chan)) { | |
649 | desc->sg[0].src_addr = xt->src_start; | |
650 | desc->sg[0].src_stride = xt->sgl[0].size + src_icg; | |
651 | } | |
652 | ||
653 | if (axi_dmac_dest_is_mem(chan)) { | |
654 | desc->sg[0].dest_addr = xt->dst_start; | |
655 | desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg; | |
656 | } | |
657 | ||
658 | if (chan->hw_2d) { | |
659 | desc->sg[0].x_len = xt->sgl[0].size; | |
660 | desc->sg[0].y_len = xt->numf; | |
661 | } else { | |
662 | desc->sg[0].x_len = xt->sgl[0].size * xt->numf; | |
663 | desc->sg[0].y_len = 1; | |
664 | } | |
665 | ||
8add6cce DB |
666 | if (flags & DMA_CYCLIC) |
667 | desc->cyclic = true; | |
668 | ||
0e3b67b3 LPC |
669 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); |
670 | } | |
671 | ||
672 | static void axi_dmac_free_chan_resources(struct dma_chan *c) | |
673 | { | |
674 | vchan_free_chan_resources(to_virt_chan(c)); | |
675 | } | |
676 | ||
677 | static void axi_dmac_desc_free(struct virt_dma_desc *vdesc) | |
678 | { | |
679 | kfree(container_of(vdesc, struct axi_dmac_desc, vdesc)); | |
680 | } | |
681 | ||
fc15be39 AA |
682 | static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg) |
683 | { | |
684 | switch (reg) { | |
685 | case AXI_DMAC_REG_IRQ_MASK: | |
686 | case AXI_DMAC_REG_IRQ_SOURCE: | |
687 | case AXI_DMAC_REG_IRQ_PENDING: | |
688 | case AXI_DMAC_REG_CTRL: | |
689 | case AXI_DMAC_REG_TRANSFER_ID: | |
690 | case AXI_DMAC_REG_START_TRANSFER: | |
691 | case AXI_DMAC_REG_FLAGS: | |
692 | case AXI_DMAC_REG_DEST_ADDRESS: | |
693 | case AXI_DMAC_REG_SRC_ADDRESS: | |
694 | case AXI_DMAC_REG_X_LENGTH: | |
695 | case AXI_DMAC_REG_Y_LENGTH: | |
696 | case AXI_DMAC_REG_DEST_STRIDE: | |
697 | case AXI_DMAC_REG_SRC_STRIDE: | |
698 | case AXI_DMAC_REG_TRANSFER_DONE: | |
699 | case AXI_DMAC_REG_ACTIVE_TRANSFER_ID: | |
700 | case AXI_DMAC_REG_STATUS: | |
701 | case AXI_DMAC_REG_CURRENT_SRC_ADDR: | |
702 | case AXI_DMAC_REG_CURRENT_DEST_ADDR: | |
703 | case AXI_DMAC_REG_PARTIAL_XFER_LEN: | |
704 | case AXI_DMAC_REG_PARTIAL_XFER_ID: | |
705 | return true; | |
706 | default: | |
707 | return false; | |
708 | } | |
709 | } | |
710 | ||
711 | static const struct regmap_config axi_dmac_regmap_config = { | |
712 | .reg_bits = 32, | |
713 | .val_bits = 32, | |
714 | .reg_stride = 4, | |
715 | .max_register = AXI_DMAC_REG_PARTIAL_XFER_ID, | |
716 | .readable_reg = axi_dmac_regmap_rdwr, | |
717 | .writeable_reg = axi_dmac_regmap_rdwr, | |
718 | }; | |
719 | ||
0e3b67b3 LPC |
720 | /* |
721 | * The configuration stored in the devicetree matches the configuration | |
722 | * parameters of the peripheral instance and allows the driver to know which | |
723 | * features are implemented and how it should behave. | |
724 | */ | |
725 | static int axi_dmac_parse_chan_dt(struct device_node *of_chan, | |
726 | struct axi_dmac_chan *chan) | |
727 | { | |
728 | u32 val; | |
729 | int ret; | |
730 | ||
731 | ret = of_property_read_u32(of_chan, "reg", &val); | |
732 | if (ret) | |
733 | return ret; | |
734 | ||
735 | /* We only support 1 channel for now */ | |
736 | if (val != 0) | |
737 | return -EINVAL; | |
738 | ||
739 | ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val); | |
740 | if (ret) | |
741 | return ret; | |
742 | if (val > AXI_DMAC_BUS_TYPE_FIFO) | |
743 | return -EINVAL; | |
744 | chan->src_type = val; | |
745 | ||
746 | ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val); | |
747 | if (ret) | |
748 | return ret; | |
749 | if (val > AXI_DMAC_BUS_TYPE_FIFO) | |
750 | return -EINVAL; | |
751 | chan->dest_type = val; | |
752 | ||
753 | ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val); | |
754 | if (ret) | |
755 | return ret; | |
756 | chan->src_width = val / 8; | |
757 | ||
758 | ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val); | |
759 | if (ret) | |
760 | return ret; | |
761 | chan->dest_width = val / 8; | |
762 | ||
a5b20600 | 763 | chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1; |
0e3b67b3 LPC |
764 | |
765 | if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) | |
766 | chan->direction = DMA_MEM_TO_MEM; | |
767 | else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) | |
768 | chan->direction = DMA_MEM_TO_DEV; | |
769 | else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan)) | |
770 | chan->direction = DMA_DEV_TO_MEM; | |
771 | else | |
772 | chan->direction = DMA_DEV_TO_DEV; | |
773 | ||
0e3b67b3 LPC |
774 | return 0; |
775 | } | |
776 | ||
b5d89905 | 777 | static int axi_dmac_detect_caps(struct axi_dmac *dmac) |
56009f0d LPC |
778 | { |
779 | struct axi_dmac_chan *chan = &dmac->chan; | |
a5b20600 LPC |
780 | unsigned int version; |
781 | ||
782 | version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION); | |
56009f0d LPC |
783 | |
784 | axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC); | |
785 | if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC) | |
786 | chan->hw_cyclic = true; | |
787 | ||
788 | axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1); | |
789 | if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1) | |
790 | chan->hw_2d = true; | |
791 | ||
792 | axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff); | |
793 | chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH); | |
794 | if (chan->max_length != UINT_MAX) | |
795 | chan->max_length++; | |
b5d89905 LPC |
796 | |
797 | axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff); | |
798 | if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 && | |
799 | chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) { | |
800 | dev_err(dmac->dma_dev.dev, | |
801 | "Destination memory-mapped interface not supported."); | |
802 | return -ENODEV; | |
803 | } | |
804 | ||
805 | axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff); | |
806 | if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 && | |
807 | chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) { | |
808 | dev_err(dmac->dma_dev.dev, | |
809 | "Source memory-mapped interface not supported."); | |
810 | return -ENODEV; | |
811 | } | |
812 | ||
e3923592 AA |
813 | if (version >= ADI_AXI_PCORE_VER(4, 2, 'a')) |
814 | chan->hw_partial_xfer = true; | |
815 | ||
a5b20600 LPC |
816 | if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) { |
817 | axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00); | |
818 | chan->length_align_mask = | |
819 | axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH); | |
820 | } else { | |
821 | chan->length_align_mask = chan->address_align_mask; | |
822 | } | |
823 | ||
b5d89905 | 824 | return 0; |
56009f0d LPC |
825 | } |
826 | ||
0e3b67b3 LPC |
827 | static int axi_dmac_probe(struct platform_device *pdev) |
828 | { | |
829 | struct device_node *of_channels, *of_chan; | |
830 | struct dma_device *dma_dev; | |
831 | struct axi_dmac *dmac; | |
832 | struct resource *res; | |
833 | int ret; | |
834 | ||
835 | dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); | |
836 | if (!dmac) | |
837 | return -ENOMEM; | |
838 | ||
839 | dmac->irq = platform_get_irq(pdev, 0); | |
50dc60a2 LPC |
840 | if (dmac->irq < 0) |
841 | return dmac->irq; | |
842 | if (dmac->irq == 0) | |
0e3b67b3 LPC |
843 | return -EINVAL; |
844 | ||
845 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
846 | dmac->base = devm_ioremap_resource(&pdev->dev, res); | |
847 | if (IS_ERR(dmac->base)) | |
848 | return PTR_ERR(dmac->base); | |
849 | ||
850 | dmac->clk = devm_clk_get(&pdev->dev, NULL); | |
851 | if (IS_ERR(dmac->clk)) | |
852 | return PTR_ERR(dmac->clk); | |
853 | ||
854 | INIT_LIST_HEAD(&dmac->chan.active_descs); | |
855 | ||
856 | of_channels = of_get_child_by_name(pdev->dev.of_node, "adi,channels"); | |
857 | if (of_channels == NULL) | |
858 | return -ENODEV; | |
859 | ||
860 | for_each_child_of_node(of_channels, of_chan) { | |
861 | ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan); | |
862 | if (ret) { | |
863 | of_node_put(of_chan); | |
864 | of_node_put(of_channels); | |
865 | return -EINVAL; | |
866 | } | |
867 | } | |
868 | of_node_put(of_channels); | |
869 | ||
870 | pdev->dev.dma_parms = &dmac->dma_parms; | |
921234e0 | 871 | dma_set_max_seg_size(&pdev->dev, UINT_MAX); |
0e3b67b3 LPC |
872 | |
873 | dma_dev = &dmac->dma_dev; | |
874 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | |
875 | dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); | |
9a05045d | 876 | dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask); |
0e3b67b3 LPC |
877 | dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources; |
878 | dma_dev->device_tx_status = dma_cookie_status; | |
879 | dma_dev->device_issue_pending = axi_dmac_issue_pending; | |
880 | dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg; | |
881 | dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic; | |
882 | dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved; | |
883 | dma_dev->device_terminate_all = axi_dmac_terminate_all; | |
860dd64c | 884 | dma_dev->device_synchronize = axi_dmac_synchronize; |
0e3b67b3 LPC |
885 | dma_dev->dev = &pdev->dev; |
886 | dma_dev->chancnt = 1; | |
887 | dma_dev->src_addr_widths = BIT(dmac->chan.src_width); | |
888 | dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width); | |
889 | dma_dev->directions = BIT(dmac->chan.direction); | |
890 | dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | |
891 | INIT_LIST_HEAD(&dma_dev->channels); | |
892 | ||
893 | dmac->chan.vchan.desc_free = axi_dmac_desc_free; | |
894 | vchan_init(&dmac->chan.vchan, dma_dev); | |
895 | ||
896 | ret = clk_prepare_enable(dmac->clk); | |
897 | if (ret < 0) | |
898 | return ret; | |
899 | ||
b5d89905 LPC |
900 | ret = axi_dmac_detect_caps(dmac); |
901 | if (ret) | |
902 | goto err_clk_disable; | |
56009f0d | 903 | |
5b969bd1 | 904 | dma_dev->copy_align = (dmac->chan.address_align_mask + 1); |
56009f0d | 905 | |
0e3b67b3 LPC |
906 | axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00); |
907 | ||
908 | ret = dma_async_device_register(dma_dev); | |
909 | if (ret) | |
910 | goto err_clk_disable; | |
911 | ||
912 | ret = of_dma_controller_register(pdev->dev.of_node, | |
913 | of_dma_xlate_by_chan_id, dma_dev); | |
914 | if (ret) | |
915 | goto err_unregister_device; | |
916 | ||
9c87572e | 917 | ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED, |
0e3b67b3 LPC |
918 | dev_name(&pdev->dev), dmac); |
919 | if (ret) | |
920 | goto err_unregister_of; | |
921 | ||
922 | platform_set_drvdata(pdev, dmac); | |
923 | ||
fc15be39 AA |
924 | devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config); |
925 | ||
0e3b67b3 LPC |
926 | return 0; |
927 | ||
928 | err_unregister_of: | |
929 | of_dma_controller_free(pdev->dev.of_node); | |
930 | err_unregister_device: | |
931 | dma_async_device_unregister(&dmac->dma_dev); | |
932 | err_clk_disable: | |
933 | clk_disable_unprepare(dmac->clk); | |
934 | ||
935 | return ret; | |
936 | } | |
937 | ||
938 | static int axi_dmac_remove(struct platform_device *pdev) | |
939 | { | |
940 | struct axi_dmac *dmac = platform_get_drvdata(pdev); | |
941 | ||
942 | of_dma_controller_free(pdev->dev.of_node); | |
943 | free_irq(dmac->irq, dmac); | |
944 | tasklet_kill(&dmac->chan.vchan.task); | |
945 | dma_async_device_unregister(&dmac->dma_dev); | |
946 | clk_disable_unprepare(dmac->clk); | |
947 | ||
948 | return 0; | |
949 | } | |
950 | ||
951 | static const struct of_device_id axi_dmac_of_match_table[] = { | |
952 | { .compatible = "adi,axi-dmac-1.00.a" }, | |
953 | { }, | |
954 | }; | |
9bcfe38f | 955 | MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table); |
0e3b67b3 LPC |
956 | |
957 | static struct platform_driver axi_dmac_driver = { | |
958 | .driver = { | |
959 | .name = "dma-axi-dmac", | |
960 | .of_match_table = axi_dmac_of_match_table, | |
961 | }, | |
962 | .probe = axi_dmac_probe, | |
963 | .remove = axi_dmac_remove, | |
964 | }; | |
965 | module_platform_driver(axi_dmac_driver); | |
966 | ||
967 | MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); | |
968 | MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller"); | |
969 | MODULE_LICENSE("GPL v2"); |