Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-2.6-block.git] / drivers / dma / dma-axi-dmac.c
CommitLineData
fda8d26e 1// SPDX-License-Identifier: GPL-2.0-only
0e3b67b3
LPC
2/*
3 * Driver for the Analog Devices AXI-DMAC core
4 *
5 * Copyright 2013-2015 Analog Devices Inc.
6 * Author: Lars-Peter Clausen <lars@metafoo.de>
0e3b67b3
LPC
7 */
8
9#include <linux/clk.h>
10#include <linux/device.h>
11#include <linux/dma-mapping.h>
12#include <linux/dmaengine.h>
13#include <linux/err.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/of_dma.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h>
22
23#include <dt-bindings/dma/axi-dmac.h>
24
25#include "dmaengine.h"
26#include "virt-dma.h"
27
28/*
29 * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
30 * various instantiation parameters which decided the exact feature set support
31 * by the core.
32 *
33 * Each channel of the core has a source interface and a destination interface.
34 * The number of channels and the type of the channel interfaces is selected at
35 * configuration time. A interface can either be a connected to a central memory
36 * interconnect, which allows access to system memory, or it can be connected to
37 * a dedicated bus which is directly connected to a data port on a peripheral.
38 * Given that those are configuration options of the core that are selected when
39 * it is instantiated this means that they can not be changed by software at
40 * runtime. By extension this means that each channel is uni-directional. It can
41 * either be device to memory or memory to device, but not both. Also since the
42 * device side is a dedicated data bus only connected to a single peripheral
43 * there is no address than can or needs to be configured for the device side.
44 */
45
46#define AXI_DMAC_REG_IRQ_MASK 0x80
47#define AXI_DMAC_REG_IRQ_PENDING 0x84
48#define AXI_DMAC_REG_IRQ_SOURCE 0x88
49
50#define AXI_DMAC_REG_CTRL 0x400
51#define AXI_DMAC_REG_TRANSFER_ID 0x404
52#define AXI_DMAC_REG_START_TRANSFER 0x408
53#define AXI_DMAC_REG_FLAGS 0x40c
54#define AXI_DMAC_REG_DEST_ADDRESS 0x410
55#define AXI_DMAC_REG_SRC_ADDRESS 0x414
56#define AXI_DMAC_REG_X_LENGTH 0x418
57#define AXI_DMAC_REG_Y_LENGTH 0x41c
58#define AXI_DMAC_REG_DEST_STRIDE 0x420
59#define AXI_DMAC_REG_SRC_STRIDE 0x424
60#define AXI_DMAC_REG_TRANSFER_DONE 0x428
61#define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
62#define AXI_DMAC_REG_STATUS 0x430
63#define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434
64#define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
65
66#define AXI_DMAC_CTRL_ENABLE BIT(0)
67#define AXI_DMAC_CTRL_PAUSE BIT(1)
68
69#define AXI_DMAC_IRQ_SOT BIT(0)
70#define AXI_DMAC_IRQ_EOT BIT(1)
71
72#define AXI_DMAC_FLAG_CYCLIC BIT(0)
73
008913db
LPC
74/* The maximum ID allocated by the hardware is 31 */
75#define AXI_DMAC_SG_UNUSED 32U
76
0e3b67b3
LPC
77struct axi_dmac_sg {
78 dma_addr_t src_addr;
79 dma_addr_t dest_addr;
80 unsigned int x_len;
81 unsigned int y_len;
82 unsigned int dest_stride;
83 unsigned int src_stride;
84 unsigned int id;
008913db 85 bool schedule_when_free;
0e3b67b3
LPC
86};
87
88struct axi_dmac_desc {
89 struct virt_dma_desc vdesc;
90 bool cyclic;
91
92 unsigned int num_submitted;
93 unsigned int num_completed;
94 unsigned int num_sgs;
95 struct axi_dmac_sg sg[];
96};
97
98struct axi_dmac_chan {
99 struct virt_dma_chan vchan;
100
101 struct axi_dmac_desc *next_desc;
102 struct list_head active_descs;
103 enum dma_transfer_direction direction;
104
105 unsigned int src_width;
106 unsigned int dest_width;
107 unsigned int src_type;
108 unsigned int dest_type;
109
110 unsigned int max_length;
111 unsigned int align_mask;
112
113 bool hw_cyclic;
114 bool hw_2d;
115};
116
117struct axi_dmac {
118 void __iomem *base;
119 int irq;
120
121 struct clk *clk;
122
123 struct dma_device dma_dev;
124 struct axi_dmac_chan chan;
125
126 struct device_dma_parameters dma_parms;
127};
128
129static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
130{
131 return container_of(chan->vchan.chan.device, struct axi_dmac,
132 dma_dev);
133}
134
135static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c)
136{
137 return container_of(c, struct axi_dmac_chan, vchan.chan);
138}
139
140static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc)
141{
142 return container_of(vdesc, struct axi_dmac_desc, vdesc);
143}
144
145static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg,
146 unsigned int val)
147{
148 writel(val, axi_dmac->base + reg);
149}
150
151static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg)
152{
153 return readl(axi_dmac->base + reg);
154}
155
156static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan)
157{
158 return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM;
159}
160
161static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
162{
163 return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM;
164}
165
166static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
167{
921234e0 168 if (len == 0)
0e3b67b3
LPC
169 return false;
170 if ((len & chan->align_mask) != 0) /* Not aligned */
171 return false;
172 return true;
173}
174
175static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
176{
177 if ((addr & chan->align_mask) != 0) /* Not aligned */
178 return false;
179 return true;
180}
181
182static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
183{
184 struct axi_dmac *dmac = chan_to_axi_dmac(chan);
185 struct virt_dma_desc *vdesc;
186 struct axi_dmac_desc *desc;
187 struct axi_dmac_sg *sg;
188 unsigned int flags = 0;
189 unsigned int val;
190
191 val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
192 if (val) /* Queue is full, wait for the next SOT IRQ */
193 return;
194
195 desc = chan->next_desc;
196
197 if (!desc) {
198 vdesc = vchan_next_desc(&chan->vchan);
199 if (!vdesc)
200 return;
201 list_move_tail(&vdesc->node, &chan->active_descs);
202 desc = to_axi_dmac_desc(vdesc);
203 }
204 sg = &desc->sg[desc->num_submitted];
205
008913db
LPC
206 /* Already queued in cyclic mode. Wait for it to finish */
207 if (sg->id != AXI_DMAC_SG_UNUSED) {
208 sg->schedule_when_free = true;
209 return;
210 }
211
0e3b67b3 212 desc->num_submitted++;
008913db
LPC
213 if (desc->num_submitted == desc->num_sgs) {
214 if (desc->cyclic)
215 desc->num_submitted = 0; /* Start again */
216 else
217 chan->next_desc = NULL;
218 } else {
0e3b67b3 219 chan->next_desc = desc;
008913db 220 }
0e3b67b3
LPC
221
222 sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
223
224 if (axi_dmac_dest_is_mem(chan)) {
225 axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
226 axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
227 }
228
229 if (axi_dmac_src_is_mem(chan)) {
230 axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
231 axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
232 }
233
234 /*
235 * If the hardware supports cyclic transfers and there is no callback to
63ab76db
LPC
236 * call and only a single segment, enable hw cyclic mode to avoid
237 * unnecessary interrupts.
0e3b67b3 238 */
63ab76db
LPC
239 if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
240 desc->num_sgs == 1)
0e3b67b3
LPC
241 flags |= AXI_DMAC_FLAG_CYCLIC;
242
243 axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
244 axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
245 axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
246 axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
247}
248
249static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
250{
251 return list_first_entry_or_null(&chan->active_descs,
252 struct axi_dmac_desc, vdesc.node);
253}
254
008913db 255static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
0e3b67b3
LPC
256 unsigned int completed_transfers)
257{
258 struct axi_dmac_desc *active;
259 struct axi_dmac_sg *sg;
008913db 260 bool start_next = false;
0e3b67b3
LPC
261
262 active = axi_dmac_active_desc(chan);
263 if (!active)
008913db 264 return false;
0e3b67b3 265
008913db
LPC
266 do {
267 sg = &active->sg[active->num_completed];
268 if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
269 break;
270 if (!(BIT(sg->id) & completed_transfers))
271 break;
272 active->num_completed++;
273 sg->id = AXI_DMAC_SG_UNUSED;
274 if (sg->schedule_when_free) {
275 sg->schedule_when_free = false;
276 start_next = true;
277 }
278
279 if (active->cyclic)
280 vchan_cyclic_callback(&active->vdesc);
281
282 if (active->num_completed == active->num_sgs) {
283 if (active->cyclic) {
284 active->num_completed = 0; /* wrap around */
285 } else {
0e3b67b3
LPC
286 list_del(&active->vdesc.node);
287 vchan_cookie_complete(&active->vdesc);
288 active = axi_dmac_active_desc(chan);
289 }
008913db
LPC
290 }
291 } while (active);
292
293 return start_next;
0e3b67b3
LPC
294}
295
296static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
297{
298 struct axi_dmac *dmac = devid;
299 unsigned int pending;
008913db 300 bool start_next = false;
0e3b67b3
LPC
301
302 pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
71831f65
LPC
303 if (!pending)
304 return IRQ_NONE;
305
0e3b67b3
LPC
306 axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending);
307
308 spin_lock(&dmac->chan.vchan.lock);
309 /* One or more transfers have finished */
310 if (pending & AXI_DMAC_IRQ_EOT) {
311 unsigned int completed;
312
313 completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
008913db 314 start_next = axi_dmac_transfer_done(&dmac->chan, completed);
0e3b67b3
LPC
315 }
316 /* Space has become available in the descriptor queue */
008913db 317 if ((pending & AXI_DMAC_IRQ_SOT) || start_next)
0e3b67b3
LPC
318 axi_dmac_start_transfer(&dmac->chan);
319 spin_unlock(&dmac->chan.vchan.lock);
320
321 return IRQ_HANDLED;
322}
323
324static int axi_dmac_terminate_all(struct dma_chan *c)
325{
326 struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
327 struct axi_dmac *dmac = chan_to_axi_dmac(chan);
328 unsigned long flags;
329 LIST_HEAD(head);
330
331 spin_lock_irqsave(&chan->vchan.lock, flags);
332 axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
333 chan->next_desc = NULL;
334 vchan_get_all_descriptors(&chan->vchan, &head);
335 list_splice_tail_init(&chan->active_descs, &head);
336 spin_unlock_irqrestore(&chan->vchan.lock, flags);
337
338 vchan_dma_desc_free_list(&chan->vchan, &head);
339
340 return 0;
341}
342
860dd64c
LPC
343static void axi_dmac_synchronize(struct dma_chan *c)
344{
345 struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
346
347 vchan_synchronize(&chan->vchan);
348}
349
0e3b67b3
LPC
350static void axi_dmac_issue_pending(struct dma_chan *c)
351{
352 struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
353 struct axi_dmac *dmac = chan_to_axi_dmac(chan);
354 unsigned long flags;
355
356 axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
357
358 spin_lock_irqsave(&chan->vchan.lock, flags);
359 if (vchan_issue_pending(&chan->vchan))
360 axi_dmac_start_transfer(chan);
361 spin_unlock_irqrestore(&chan->vchan.lock, flags);
362}
363
364static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
365{
366 struct axi_dmac_desc *desc;
008913db 367 unsigned int i;
0e3b67b3 368
48b02a85 369 desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
0e3b67b3
LPC
370 if (!desc)
371 return NULL;
372
008913db
LPC
373 for (i = 0; i < num_sgs; i++)
374 desc->sg[i].id = AXI_DMAC_SG_UNUSED;
375
0e3b67b3
LPC
376 desc->num_sgs = num_sgs;
377
378 return desc;
379}
380
921234e0
LPC
381static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
382 enum dma_transfer_direction direction, dma_addr_t addr,
383 unsigned int num_periods, unsigned int period_len,
384 struct axi_dmac_sg *sg)
385{
386 unsigned int num_segments, i;
387 unsigned int segment_size;
388 unsigned int len;
389
390 /* Split into multiple equally sized segments if necessary */
391 num_segments = DIV_ROUND_UP(period_len, chan->max_length);
392 segment_size = DIV_ROUND_UP(period_len, num_segments);
393 /* Take care of alignment */
394 segment_size = ((segment_size - 1) | chan->align_mask) + 1;
395
396 for (i = 0; i < num_periods; i++) {
397 len = period_len;
398
399 while (len > segment_size) {
400 if (direction == DMA_DEV_TO_MEM)
401 sg->dest_addr = addr;
402 else
403 sg->src_addr = addr;
404 sg->x_len = segment_size;
405 sg->y_len = 1;
406 sg++;
407 addr += segment_size;
408 len -= segment_size;
409 }
410
411 if (direction == DMA_DEV_TO_MEM)
412 sg->dest_addr = addr;
413 else
414 sg->src_addr = addr;
415 sg->x_len = len;
416 sg->y_len = 1;
417 sg++;
418 addr += len;
419 }
420
421 return sg;
422}
423
0e3b67b3
LPC
424static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
425 struct dma_chan *c, struct scatterlist *sgl,
426 unsigned int sg_len, enum dma_transfer_direction direction,
427 unsigned long flags, void *context)
428{
429 struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
430 struct axi_dmac_desc *desc;
921234e0 431 struct axi_dmac_sg *dsg;
0e3b67b3 432 struct scatterlist *sg;
921234e0 433 unsigned int num_sgs;
0e3b67b3
LPC
434 unsigned int i;
435
436 if (direction != chan->direction)
437 return NULL;
438
921234e0
LPC
439 num_sgs = 0;
440 for_each_sg(sgl, sg, sg_len, i)
441 num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
442
443 desc = axi_dmac_alloc_desc(num_sgs);
0e3b67b3
LPC
444 if (!desc)
445 return NULL;
446
921234e0
LPC
447 dsg = desc->sg;
448
0e3b67b3
LPC
449 for_each_sg(sgl, sg, sg_len, i) {
450 if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
451 !axi_dmac_check_len(chan, sg_dma_len(sg))) {
452 kfree(desc);
453 return NULL;
454 }
455
921234e0
LPC
456 dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1,
457 sg_dma_len(sg), dsg);
0e3b67b3
LPC
458 }
459
460 desc->cyclic = false;
461
462 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
463}
464
465static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
466 struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
467 size_t period_len, enum dma_transfer_direction direction,
468 unsigned long flags)
469{
470 struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
471 struct axi_dmac_desc *desc;
921234e0 472 unsigned int num_periods, num_segments;
0e3b67b3
LPC
473
474 if (direction != chan->direction)
475 return NULL;
476
477 if (!axi_dmac_check_len(chan, buf_len) ||
478 !axi_dmac_check_addr(chan, buf_addr))
479 return NULL;
480
481 if (period_len == 0 || buf_len % period_len)
482 return NULL;
483
484 num_periods = buf_len / period_len;
921234e0 485 num_segments = DIV_ROUND_UP(period_len, chan->max_length);
0e3b67b3 486
921234e0 487 desc = axi_dmac_alloc_desc(num_periods * num_segments);
0e3b67b3
LPC
488 if (!desc)
489 return NULL;
490
921234e0
LPC
491 axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
492 period_len, desc->sg);
0e3b67b3
LPC
493
494 desc->cyclic = true;
495
496 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
497}
498
499static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
500 struct dma_chan *c, struct dma_interleaved_template *xt,
501 unsigned long flags)
502{
503 struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
504 struct axi_dmac_desc *desc;
505 size_t dst_icg, src_icg;
506
507 if (xt->frame_size != 1)
508 return NULL;
509
510 if (xt->dir != chan->direction)
511 return NULL;
512
513 if (axi_dmac_src_is_mem(chan)) {
514 if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start))
515 return NULL;
516 }
517
518 if (axi_dmac_dest_is_mem(chan)) {
519 if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start))
520 return NULL;
521 }
522
523 dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
524 src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
525
526 if (chan->hw_2d) {
527 if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
648865a7 528 xt->numf == 0)
0e3b67b3
LPC
529 return NULL;
530 if (xt->sgl[0].size + dst_icg > chan->max_length ||
531 xt->sgl[0].size + src_icg > chan->max_length)
532 return NULL;
533 } else {
534 if (dst_icg != 0 || src_icg != 0)
535 return NULL;
536 if (chan->max_length / xt->sgl[0].size < xt->numf)
537 return NULL;
538 if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf))
539 return NULL;
540 }
541
542 desc = axi_dmac_alloc_desc(1);
543 if (!desc)
544 return NULL;
545
546 if (axi_dmac_src_is_mem(chan)) {
547 desc->sg[0].src_addr = xt->src_start;
548 desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
549 }
550
551 if (axi_dmac_dest_is_mem(chan)) {
552 desc->sg[0].dest_addr = xt->dst_start;
553 desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
554 }
555
556 if (chan->hw_2d) {
557 desc->sg[0].x_len = xt->sgl[0].size;
558 desc->sg[0].y_len = xt->numf;
559 } else {
560 desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
561 desc->sg[0].y_len = 1;
562 }
563
564 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
565}
566
567static void axi_dmac_free_chan_resources(struct dma_chan *c)
568{
569 vchan_free_chan_resources(to_virt_chan(c));
570}
571
572static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
573{
574 kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
575}
576
577/*
578 * The configuration stored in the devicetree matches the configuration
579 * parameters of the peripheral instance and allows the driver to know which
580 * features are implemented and how it should behave.
581 */
582static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
583 struct axi_dmac_chan *chan)
584{
585 u32 val;
586 int ret;
587
588 ret = of_property_read_u32(of_chan, "reg", &val);
589 if (ret)
590 return ret;
591
592 /* We only support 1 channel for now */
593 if (val != 0)
594 return -EINVAL;
595
596 ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val);
597 if (ret)
598 return ret;
599 if (val > AXI_DMAC_BUS_TYPE_FIFO)
600 return -EINVAL;
601 chan->src_type = val;
602
603 ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val);
604 if (ret)
605 return ret;
606 if (val > AXI_DMAC_BUS_TYPE_FIFO)
607 return -EINVAL;
608 chan->dest_type = val;
609
610 ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val);
611 if (ret)
612 return ret;
613 chan->src_width = val / 8;
614
615 ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val);
616 if (ret)
617 return ret;
618 chan->dest_width = val / 8;
619
0e3b67b3
LPC
620 chan->align_mask = max(chan->dest_width, chan->src_width) - 1;
621
622 if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
623 chan->direction = DMA_MEM_TO_MEM;
624 else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
625 chan->direction = DMA_MEM_TO_DEV;
626 else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
627 chan->direction = DMA_DEV_TO_MEM;
628 else
629 chan->direction = DMA_DEV_TO_DEV;
630
0e3b67b3
LPC
631 return 0;
632}
633
56009f0d
LPC
634static void axi_dmac_detect_caps(struct axi_dmac *dmac)
635{
636 struct axi_dmac_chan *chan = &dmac->chan;
637
638 axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
639 if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
640 chan->hw_cyclic = true;
641
642 axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
643 if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
644 chan->hw_2d = true;
645
646 axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff);
647 chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
648 if (chan->max_length != UINT_MAX)
649 chan->max_length++;
650}
651
0e3b67b3
LPC
652static int axi_dmac_probe(struct platform_device *pdev)
653{
654 struct device_node *of_channels, *of_chan;
655 struct dma_device *dma_dev;
656 struct axi_dmac *dmac;
657 struct resource *res;
658 int ret;
659
660 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
661 if (!dmac)
662 return -ENOMEM;
663
664 dmac->irq = platform_get_irq(pdev, 0);
50dc60a2
LPC
665 if (dmac->irq < 0)
666 return dmac->irq;
667 if (dmac->irq == 0)
0e3b67b3
LPC
668 return -EINVAL;
669
670 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
671 dmac->base = devm_ioremap_resource(&pdev->dev, res);
672 if (IS_ERR(dmac->base))
673 return PTR_ERR(dmac->base);
674
675 dmac->clk = devm_clk_get(&pdev->dev, NULL);
676 if (IS_ERR(dmac->clk))
677 return PTR_ERR(dmac->clk);
678
679 INIT_LIST_HEAD(&dmac->chan.active_descs);
680
681 of_channels = of_get_child_by_name(pdev->dev.of_node, "adi,channels");
682 if (of_channels == NULL)
683 return -ENODEV;
684
685 for_each_child_of_node(of_channels, of_chan) {
686 ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
687 if (ret) {
688 of_node_put(of_chan);
689 of_node_put(of_channels);
690 return -EINVAL;
691 }
692 }
693 of_node_put(of_channels);
694
695 pdev->dev.dma_parms = &dmac->dma_parms;
921234e0 696 dma_set_max_seg_size(&pdev->dev, UINT_MAX);
0e3b67b3
LPC
697
698 dma_dev = &dmac->dma_dev;
699 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
700 dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
9a05045d 701 dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask);
0e3b67b3
LPC
702 dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
703 dma_dev->device_tx_status = dma_cookie_status;
704 dma_dev->device_issue_pending = axi_dmac_issue_pending;
705 dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg;
706 dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
707 dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
708 dma_dev->device_terminate_all = axi_dmac_terminate_all;
860dd64c 709 dma_dev->device_synchronize = axi_dmac_synchronize;
0e3b67b3
LPC
710 dma_dev->dev = &pdev->dev;
711 dma_dev->chancnt = 1;
712 dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
713 dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
714 dma_dev->directions = BIT(dmac->chan.direction);
715 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
716 INIT_LIST_HEAD(&dma_dev->channels);
717
718 dmac->chan.vchan.desc_free = axi_dmac_desc_free;
719 vchan_init(&dmac->chan.vchan, dma_dev);
720
721 ret = clk_prepare_enable(dmac->clk);
722 if (ret < 0)
723 return ret;
724
56009f0d
LPC
725 axi_dmac_detect_caps(dmac);
726
0e3b67b3
LPC
727 axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
728
729 ret = dma_async_device_register(dma_dev);
730 if (ret)
731 goto err_clk_disable;
732
733 ret = of_dma_controller_register(pdev->dev.of_node,
734 of_dma_xlate_by_chan_id, dma_dev);
735 if (ret)
736 goto err_unregister_device;
737
9c87572e 738 ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED,
0e3b67b3
LPC
739 dev_name(&pdev->dev), dmac);
740 if (ret)
741 goto err_unregister_of;
742
743 platform_set_drvdata(pdev, dmac);
744
745 return 0;
746
747err_unregister_of:
748 of_dma_controller_free(pdev->dev.of_node);
749err_unregister_device:
750 dma_async_device_unregister(&dmac->dma_dev);
751err_clk_disable:
752 clk_disable_unprepare(dmac->clk);
753
754 return ret;
755}
756
757static int axi_dmac_remove(struct platform_device *pdev)
758{
759 struct axi_dmac *dmac = platform_get_drvdata(pdev);
760
761 of_dma_controller_free(pdev->dev.of_node);
762 free_irq(dmac->irq, dmac);
763 tasklet_kill(&dmac->chan.vchan.task);
764 dma_async_device_unregister(&dmac->dma_dev);
765 clk_disable_unprepare(dmac->clk);
766
767 return 0;
768}
769
770static const struct of_device_id axi_dmac_of_match_table[] = {
771 { .compatible = "adi,axi-dmac-1.00.a" },
772 { },
773};
9bcfe38f 774MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table);
0e3b67b3
LPC
775
776static struct platform_driver axi_dmac_driver = {
777 .driver = {
778 .name = "dma-axi-dmac",
779 .of_match_table = axi_dmac_of_match_table,
780 },
781 .probe = axi_dmac_probe,
782 .remove = axi_dmac_remove,
783};
784module_platform_driver(axi_dmac_driver);
785
786MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
787MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
788MODULE_LICENSE("GPL v2");