dmaengine: axi-dmac: Discover length alignment requirement
[linux-2.6-block.git] / drivers / dma / dma-axi-dmac.c
1 /*
2  * Driver for the Analog Devices AXI-DMAC core
3  *
4  * Copyright 2013-2015 Analog Devices Inc.
5  *  Author: Lars-Peter Clausen <lars@metafoo.de>
6  *
7  * Licensed under the GPL-2.
8  */
9
10 #include <linux/clk.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
16 #include <linux/io.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_dma.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
23 #include <linux/fpga/adi-axi-common.h>
24
25 #include <dt-bindings/dma/axi-dmac.h>
26
27 #include "dmaengine.h"
28 #include "virt-dma.h"
29
30 /*
31  * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
32  * various instantiation parameters which decided the exact feature set support
33  * by the core.
34  *
35  * Each channel of the core has a source interface and a destination interface.
36  * The number of channels and the type of the channel interfaces is selected at
37  * configuration time. A interface can either be a connected to a central memory
38  * interconnect, which allows access to system memory, or it can be connected to
39  * a dedicated bus which is directly connected to a data port on a peripheral.
40  * Given that those are configuration options of the core that are selected when
41  * it is instantiated this means that they can not be changed by software at
42  * runtime. By extension this means that each channel is uni-directional. It can
43  * either be device to memory or memory to device, but not both. Also since the
44  * device side is a dedicated data bus only connected to a single peripheral
45  * there is no address than can or needs to be configured for the device side.
46  */
47
48 #define AXI_DMAC_REG_IRQ_MASK           0x80
49 #define AXI_DMAC_REG_IRQ_PENDING        0x84
50 #define AXI_DMAC_REG_IRQ_SOURCE         0x88
51
52 #define AXI_DMAC_REG_CTRL               0x400
53 #define AXI_DMAC_REG_TRANSFER_ID        0x404
54 #define AXI_DMAC_REG_START_TRANSFER     0x408
55 #define AXI_DMAC_REG_FLAGS              0x40c
56 #define AXI_DMAC_REG_DEST_ADDRESS       0x410
57 #define AXI_DMAC_REG_SRC_ADDRESS        0x414
58 #define AXI_DMAC_REG_X_LENGTH           0x418
59 #define AXI_DMAC_REG_Y_LENGTH           0x41c
60 #define AXI_DMAC_REG_DEST_STRIDE        0x420
61 #define AXI_DMAC_REG_SRC_STRIDE         0x424
62 #define AXI_DMAC_REG_TRANSFER_DONE      0x428
63 #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
64 #define AXI_DMAC_REG_STATUS             0x430
65 #define AXI_DMAC_REG_CURRENT_SRC_ADDR   0x434
66 #define AXI_DMAC_REG_CURRENT_DEST_ADDR  0x438
67
68 #define AXI_DMAC_CTRL_ENABLE            BIT(0)
69 #define AXI_DMAC_CTRL_PAUSE             BIT(1)
70
71 #define AXI_DMAC_IRQ_SOT                BIT(0)
72 #define AXI_DMAC_IRQ_EOT                BIT(1)
73
74 #define AXI_DMAC_FLAG_CYCLIC            BIT(0)
75 #define AXI_DMAC_FLAG_LAST              BIT(1)
76
77 /* The maximum ID allocated by the hardware is 31 */
78 #define AXI_DMAC_SG_UNUSED 32U
79
80 struct axi_dmac_sg {
81         dma_addr_t src_addr;
82         dma_addr_t dest_addr;
83         unsigned int x_len;
84         unsigned int y_len;
85         unsigned int dest_stride;
86         unsigned int src_stride;
87         unsigned int id;
88         bool schedule_when_free;
89 };
90
91 struct axi_dmac_desc {
92         struct virt_dma_desc vdesc;
93         bool cyclic;
94
95         unsigned int num_submitted;
96         unsigned int num_completed;
97         unsigned int num_sgs;
98         struct axi_dmac_sg sg[];
99 };
100
101 struct axi_dmac_chan {
102         struct virt_dma_chan vchan;
103
104         struct axi_dmac_desc *next_desc;
105         struct list_head active_descs;
106         enum dma_transfer_direction direction;
107
108         unsigned int src_width;
109         unsigned int dest_width;
110         unsigned int src_type;
111         unsigned int dest_type;
112
113         unsigned int max_length;
114         unsigned int address_align_mask;
115         unsigned int length_align_mask;
116
117         bool hw_cyclic;
118         bool hw_2d;
119 };
120
121 struct axi_dmac {
122         void __iomem *base;
123         int irq;
124
125         struct clk *clk;
126
127         struct dma_device dma_dev;
128         struct axi_dmac_chan chan;
129
130         struct device_dma_parameters dma_parms;
131 };
132
133 static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
134 {
135         return container_of(chan->vchan.chan.device, struct axi_dmac,
136                 dma_dev);
137 }
138
139 static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c)
140 {
141         return container_of(c, struct axi_dmac_chan, vchan.chan);
142 }
143
144 static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc)
145 {
146         return container_of(vdesc, struct axi_dmac_desc, vdesc);
147 }
148
149 static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg,
150         unsigned int val)
151 {
152         writel(val, axi_dmac->base + reg);
153 }
154
155 static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg)
156 {
157         return readl(axi_dmac->base + reg);
158 }
159
160 static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan)
161 {
162         return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM;
163 }
164
165 static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
166 {
167         return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM;
168 }
169
170 static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
171 {
172         if (len == 0)
173                 return false;
174         if ((len & chan->length_align_mask) != 0) /* Not aligned */
175                 return false;
176         return true;
177 }
178
179 static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
180 {
181         if ((addr & chan->address_align_mask) != 0) /* Not aligned */
182                 return false;
183         return true;
184 }
185
186 static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
187 {
188         struct axi_dmac *dmac = chan_to_axi_dmac(chan);
189         struct virt_dma_desc *vdesc;
190         struct axi_dmac_desc *desc;
191         struct axi_dmac_sg *sg;
192         unsigned int flags = 0;
193         unsigned int val;
194
195         val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
196         if (val) /* Queue is full, wait for the next SOT IRQ */
197                 return;
198
199         desc = chan->next_desc;
200
201         if (!desc) {
202                 vdesc = vchan_next_desc(&chan->vchan);
203                 if (!vdesc)
204                         return;
205                 list_move_tail(&vdesc->node, &chan->active_descs);
206                 desc = to_axi_dmac_desc(vdesc);
207         }
208         sg = &desc->sg[desc->num_submitted];
209
210         /* Already queued in cyclic mode. Wait for it to finish */
211         if (sg->id != AXI_DMAC_SG_UNUSED) {
212                 sg->schedule_when_free = true;
213                 return;
214         }
215
216         desc->num_submitted++;
217         if (desc->num_submitted == desc->num_sgs) {
218                 if (desc->cyclic)
219                         desc->num_submitted = 0; /* Start again */
220                 else
221                         chan->next_desc = NULL;
222                 flags |= AXI_DMAC_FLAG_LAST;
223         } else {
224                 chan->next_desc = desc;
225         }
226
227         sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
228
229         if (axi_dmac_dest_is_mem(chan)) {
230                 axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
231                 axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
232         }
233
234         if (axi_dmac_src_is_mem(chan)) {
235                 axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
236                 axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
237         }
238
239         /*
240          * If the hardware supports cyclic transfers and there is no callback to
241          * call and only a single segment, enable hw cyclic mode to avoid
242          * unnecessary interrupts.
243          */
244         if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
245                 desc->num_sgs == 1)
246                 flags |= AXI_DMAC_FLAG_CYCLIC;
247
248         axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
249         axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
250         axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
251         axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
252 }
253
254 static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
255 {
256         return list_first_entry_or_null(&chan->active_descs,
257                 struct axi_dmac_desc, vdesc.node);
258 }
259
260 static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
261         unsigned int completed_transfers)
262 {
263         struct axi_dmac_desc *active;
264         struct axi_dmac_sg *sg;
265         bool start_next = false;
266
267         active = axi_dmac_active_desc(chan);
268         if (!active)
269                 return false;
270
271         do {
272                 sg = &active->sg[active->num_completed];
273                 if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
274                         break;
275                 if (!(BIT(sg->id) & completed_transfers))
276                         break;
277                 active->num_completed++;
278                 sg->id = AXI_DMAC_SG_UNUSED;
279                 if (sg->schedule_when_free) {
280                         sg->schedule_when_free = false;
281                         start_next = true;
282                 }
283
284                 if (active->cyclic)
285                         vchan_cyclic_callback(&active->vdesc);
286
287                 if (active->num_completed == active->num_sgs) {
288                         if (active->cyclic) {
289                                 active->num_completed = 0; /* wrap around */
290                         } else {
291                                 list_del(&active->vdesc.node);
292                                 vchan_cookie_complete(&active->vdesc);
293                                 active = axi_dmac_active_desc(chan);
294                         }
295                 }
296         } while (active);
297
298         return start_next;
299 }
300
301 static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
302 {
303         struct axi_dmac *dmac = devid;
304         unsigned int pending;
305         bool start_next = false;
306
307         pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
308         if (!pending)
309                 return IRQ_NONE;
310
311         axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending);
312
313         spin_lock(&dmac->chan.vchan.lock);
314         /* One or more transfers have finished */
315         if (pending & AXI_DMAC_IRQ_EOT) {
316                 unsigned int completed;
317
318                 completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
319                 start_next = axi_dmac_transfer_done(&dmac->chan, completed);
320         }
321         /* Space has become available in the descriptor queue */
322         if ((pending & AXI_DMAC_IRQ_SOT) || start_next)
323                 axi_dmac_start_transfer(&dmac->chan);
324         spin_unlock(&dmac->chan.vchan.lock);
325
326         return IRQ_HANDLED;
327 }
328
329 static int axi_dmac_terminate_all(struct dma_chan *c)
330 {
331         struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
332         struct axi_dmac *dmac = chan_to_axi_dmac(chan);
333         unsigned long flags;
334         LIST_HEAD(head);
335
336         spin_lock_irqsave(&chan->vchan.lock, flags);
337         axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
338         chan->next_desc = NULL;
339         vchan_get_all_descriptors(&chan->vchan, &head);
340         list_splice_tail_init(&chan->active_descs, &head);
341         spin_unlock_irqrestore(&chan->vchan.lock, flags);
342
343         vchan_dma_desc_free_list(&chan->vchan, &head);
344
345         return 0;
346 }
347
348 static void axi_dmac_synchronize(struct dma_chan *c)
349 {
350         struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
351
352         vchan_synchronize(&chan->vchan);
353 }
354
355 static void axi_dmac_issue_pending(struct dma_chan *c)
356 {
357         struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
358         struct axi_dmac *dmac = chan_to_axi_dmac(chan);
359         unsigned long flags;
360
361         axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
362
363         spin_lock_irqsave(&chan->vchan.lock, flags);
364         if (vchan_issue_pending(&chan->vchan))
365                 axi_dmac_start_transfer(chan);
366         spin_unlock_irqrestore(&chan->vchan.lock, flags);
367 }
368
369 static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
370 {
371         struct axi_dmac_desc *desc;
372         unsigned int i;
373
374         desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
375         if (!desc)
376                 return NULL;
377
378         for (i = 0; i < num_sgs; i++)
379                 desc->sg[i].id = AXI_DMAC_SG_UNUSED;
380
381         desc->num_sgs = num_sgs;
382
383         return desc;
384 }
385
386 static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
387         enum dma_transfer_direction direction, dma_addr_t addr,
388         unsigned int num_periods, unsigned int period_len,
389         struct axi_dmac_sg *sg)
390 {
391         unsigned int num_segments, i;
392         unsigned int segment_size;
393         unsigned int len;
394
395         /* Split into multiple equally sized segments if necessary */
396         num_segments = DIV_ROUND_UP(period_len, chan->max_length);
397         segment_size = DIV_ROUND_UP(period_len, num_segments);
398         /* Take care of alignment */
399         segment_size = ((segment_size - 1) | chan->length_align_mask) + 1;
400
401         for (i = 0; i < num_periods; i++) {
402                 len = period_len;
403
404                 while (len > segment_size) {
405                         if (direction == DMA_DEV_TO_MEM)
406                                 sg->dest_addr = addr;
407                         else
408                                 sg->src_addr = addr;
409                         sg->x_len = segment_size;
410                         sg->y_len = 1;
411                         sg++;
412                         addr += segment_size;
413                         len -= segment_size;
414                 }
415
416                 if (direction == DMA_DEV_TO_MEM)
417                         sg->dest_addr = addr;
418                 else
419                         sg->src_addr = addr;
420                 sg->x_len = len;
421                 sg->y_len = 1;
422                 sg++;
423                 addr += len;
424         }
425
426         return sg;
427 }
428
429 static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
430         struct dma_chan *c, struct scatterlist *sgl,
431         unsigned int sg_len, enum dma_transfer_direction direction,
432         unsigned long flags, void *context)
433 {
434         struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
435         struct axi_dmac_desc *desc;
436         struct axi_dmac_sg *dsg;
437         struct scatterlist *sg;
438         unsigned int num_sgs;
439         unsigned int i;
440
441         if (direction != chan->direction)
442                 return NULL;
443
444         num_sgs = 0;
445         for_each_sg(sgl, sg, sg_len, i)
446                 num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
447
448         desc = axi_dmac_alloc_desc(num_sgs);
449         if (!desc)
450                 return NULL;
451
452         dsg = desc->sg;
453
454         for_each_sg(sgl, sg, sg_len, i) {
455                 if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
456                     !axi_dmac_check_len(chan, sg_dma_len(sg))) {
457                         kfree(desc);
458                         return NULL;
459                 }
460
461                 dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1,
462                         sg_dma_len(sg), dsg);
463         }
464
465         desc->cyclic = false;
466
467         return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
468 }
469
470 static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
471         struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
472         size_t period_len, enum dma_transfer_direction direction,
473         unsigned long flags)
474 {
475         struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
476         struct axi_dmac_desc *desc;
477         unsigned int num_periods, num_segments;
478
479         if (direction != chan->direction)
480                 return NULL;
481
482         if (!axi_dmac_check_len(chan, buf_len) ||
483             !axi_dmac_check_addr(chan, buf_addr))
484                 return NULL;
485
486         if (period_len == 0 || buf_len % period_len)
487                 return NULL;
488
489         num_periods = buf_len / period_len;
490         num_segments = DIV_ROUND_UP(period_len, chan->max_length);
491
492         desc = axi_dmac_alloc_desc(num_periods * num_segments);
493         if (!desc)
494                 return NULL;
495
496         axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
497                 period_len, desc->sg);
498
499         desc->cyclic = true;
500
501         return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
502 }
503
504 static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
505         struct dma_chan *c, struct dma_interleaved_template *xt,
506         unsigned long flags)
507 {
508         struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
509         struct axi_dmac_desc *desc;
510         size_t dst_icg, src_icg;
511
512         if (xt->frame_size != 1)
513                 return NULL;
514
515         if (xt->dir != chan->direction)
516                 return NULL;
517
518         if (axi_dmac_src_is_mem(chan)) {
519                 if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start))
520                         return NULL;
521         }
522
523         if (axi_dmac_dest_is_mem(chan)) {
524                 if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start))
525                         return NULL;
526         }
527
528         dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
529         src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
530
531         if (chan->hw_2d) {
532                 if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
533                     xt->numf == 0)
534                         return NULL;
535                 if (xt->sgl[0].size + dst_icg > chan->max_length ||
536                     xt->sgl[0].size + src_icg > chan->max_length)
537                         return NULL;
538         } else {
539                 if (dst_icg != 0 || src_icg != 0)
540                         return NULL;
541                 if (chan->max_length / xt->sgl[0].size < xt->numf)
542                         return NULL;
543                 if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf))
544                         return NULL;
545         }
546
547         desc = axi_dmac_alloc_desc(1);
548         if (!desc)
549                 return NULL;
550
551         if (axi_dmac_src_is_mem(chan)) {
552                 desc->sg[0].src_addr = xt->src_start;
553                 desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
554         }
555
556         if (axi_dmac_dest_is_mem(chan)) {
557                 desc->sg[0].dest_addr = xt->dst_start;
558                 desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
559         }
560
561         if (chan->hw_2d) {
562                 desc->sg[0].x_len = xt->sgl[0].size;
563                 desc->sg[0].y_len = xt->numf;
564         } else {
565                 desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
566                 desc->sg[0].y_len = 1;
567         }
568
569         if (flags & DMA_CYCLIC)
570                 desc->cyclic = true;
571
572         return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
573 }
574
575 static void axi_dmac_free_chan_resources(struct dma_chan *c)
576 {
577         vchan_free_chan_resources(to_virt_chan(c));
578 }
579
580 static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
581 {
582         kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
583 }
584
585 /*
586  * The configuration stored in the devicetree matches the configuration
587  * parameters of the peripheral instance and allows the driver to know which
588  * features are implemented and how it should behave.
589  */
590 static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
591         struct axi_dmac_chan *chan)
592 {
593         u32 val;
594         int ret;
595
596         ret = of_property_read_u32(of_chan, "reg", &val);
597         if (ret)
598                 return ret;
599
600         /* We only support 1 channel for now */
601         if (val != 0)
602                 return -EINVAL;
603
604         ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val);
605         if (ret)
606                 return ret;
607         if (val > AXI_DMAC_BUS_TYPE_FIFO)
608                 return -EINVAL;
609         chan->src_type = val;
610
611         ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val);
612         if (ret)
613                 return ret;
614         if (val > AXI_DMAC_BUS_TYPE_FIFO)
615                 return -EINVAL;
616         chan->dest_type = val;
617
618         ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val);
619         if (ret)
620                 return ret;
621         chan->src_width = val / 8;
622
623         ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val);
624         if (ret)
625                 return ret;
626         chan->dest_width = val / 8;
627
628         chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1;
629
630         if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
631                 chan->direction = DMA_MEM_TO_MEM;
632         else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
633                 chan->direction = DMA_MEM_TO_DEV;
634         else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
635                 chan->direction = DMA_DEV_TO_MEM;
636         else
637                 chan->direction = DMA_DEV_TO_DEV;
638
639         return 0;
640 }
641
642 static int axi_dmac_detect_caps(struct axi_dmac *dmac)
643 {
644         struct axi_dmac_chan *chan = &dmac->chan;
645         unsigned int version;
646
647         version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
648
649         axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
650         if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
651                 chan->hw_cyclic = true;
652
653         axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
654         if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
655                 chan->hw_2d = true;
656
657         axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff);
658         chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
659         if (chan->max_length != UINT_MAX)
660                 chan->max_length++;
661
662         axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff);
663         if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 &&
664             chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
665                 dev_err(dmac->dma_dev.dev,
666                         "Destination memory-mapped interface not supported.");
667                 return -ENODEV;
668         }
669
670         axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff);
671         if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 &&
672             chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
673                 dev_err(dmac->dma_dev.dev,
674                         "Source memory-mapped interface not supported.");
675                 return -ENODEV;
676         }
677
678         if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) {
679                 axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00);
680                 chan->length_align_mask =
681                         axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
682         } else {
683                 chan->length_align_mask = chan->address_align_mask;
684         }
685
686         return 0;
687 }
688
689 static int axi_dmac_probe(struct platform_device *pdev)
690 {
691         struct device_node *of_channels, *of_chan;
692         struct dma_device *dma_dev;
693         struct axi_dmac *dmac;
694         struct resource *res;
695         int ret;
696
697         dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
698         if (!dmac)
699                 return -ENOMEM;
700
701         dmac->irq = platform_get_irq(pdev, 0);
702         if (dmac->irq < 0)
703                 return dmac->irq;
704         if (dmac->irq == 0)
705                 return -EINVAL;
706
707         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
708         dmac->base = devm_ioremap_resource(&pdev->dev, res);
709         if (IS_ERR(dmac->base))
710                 return PTR_ERR(dmac->base);
711
712         dmac->clk = devm_clk_get(&pdev->dev, NULL);
713         if (IS_ERR(dmac->clk))
714                 return PTR_ERR(dmac->clk);
715
716         INIT_LIST_HEAD(&dmac->chan.active_descs);
717
718         of_channels = of_get_child_by_name(pdev->dev.of_node, "adi,channels");
719         if (of_channels == NULL)
720                 return -ENODEV;
721
722         for_each_child_of_node(of_channels, of_chan) {
723                 ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
724                 if (ret) {
725                         of_node_put(of_chan);
726                         of_node_put(of_channels);
727                         return -EINVAL;
728                 }
729         }
730         of_node_put(of_channels);
731
732         pdev->dev.dma_parms = &dmac->dma_parms;
733         dma_set_max_seg_size(&pdev->dev, UINT_MAX);
734
735         dma_dev = &dmac->dma_dev;
736         dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
737         dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
738         dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask);
739         dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
740         dma_dev->device_tx_status = dma_cookie_status;
741         dma_dev->device_issue_pending = axi_dmac_issue_pending;
742         dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg;
743         dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
744         dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
745         dma_dev->device_terminate_all = axi_dmac_terminate_all;
746         dma_dev->device_synchronize = axi_dmac_synchronize;
747         dma_dev->dev = &pdev->dev;
748         dma_dev->chancnt = 1;
749         dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
750         dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
751         dma_dev->directions = BIT(dmac->chan.direction);
752         dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
753         INIT_LIST_HEAD(&dma_dev->channels);
754
755         dmac->chan.vchan.desc_free = axi_dmac_desc_free;
756         vchan_init(&dmac->chan.vchan, dma_dev);
757
758         ret = clk_prepare_enable(dmac->clk);
759         if (ret < 0)
760                 return ret;
761
762         ret = axi_dmac_detect_caps(dmac);
763         if (ret)
764                 goto err_clk_disable;
765
766         axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
767
768         ret = dma_async_device_register(dma_dev);
769         if (ret)
770                 goto err_clk_disable;
771
772         ret = of_dma_controller_register(pdev->dev.of_node,
773                 of_dma_xlate_by_chan_id, dma_dev);
774         if (ret)
775                 goto err_unregister_device;
776
777         ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED,
778                 dev_name(&pdev->dev), dmac);
779         if (ret)
780                 goto err_unregister_of;
781
782         platform_set_drvdata(pdev, dmac);
783
784         return 0;
785
786 err_unregister_of:
787         of_dma_controller_free(pdev->dev.of_node);
788 err_unregister_device:
789         dma_async_device_unregister(&dmac->dma_dev);
790 err_clk_disable:
791         clk_disable_unprepare(dmac->clk);
792
793         return ret;
794 }
795
796 static int axi_dmac_remove(struct platform_device *pdev)
797 {
798         struct axi_dmac *dmac = platform_get_drvdata(pdev);
799
800         of_dma_controller_free(pdev->dev.of_node);
801         free_irq(dmac->irq, dmac);
802         tasklet_kill(&dmac->chan.vchan.task);
803         dma_async_device_unregister(&dmac->dma_dev);
804         clk_disable_unprepare(dmac->clk);
805
806         return 0;
807 }
808
809 static const struct of_device_id axi_dmac_of_match_table[] = {
810         { .compatible = "adi,axi-dmac-1.00.a" },
811         { },
812 };
813 MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table);
814
815 static struct platform_driver axi_dmac_driver = {
816         .driver = {
817                 .name = "dma-axi-dmac",
818                 .of_match_table = axi_dmac_of_match_table,
819         },
820         .probe = axi_dmac_probe,
821         .remove = axi_dmac_remove,
822 };
823 module_platform_driver(axi_dmac_driver);
824
825 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
826 MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
827 MODULE_LICENSE("GPL v2");