Merge tag 'vfio-v4.20-rc1.v2' of git://github.com/awilliam/linux-vfio
[linux-2.6-block.git] / drivers / staging / mt7621-dma / mtk-hsdma.c
1 /*
2  *  Copyright (C) 2015, Michael Lee <igvtee@gmail.com>
3  *  MTK HSDMA support
4  *
5  *  This program is free software; you can redistribute it and/or modify it
6  *  under  the terms of the GNU General  Public License as published by the
7  *  Free Software Foundation;  either version 2 of the License, or (at your
8  *  option) any later version.
9  *
10  */
11
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/init.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/irq.h>
22 #include <linux/of_dma.h>
23 #include <linux/reset.h>
24 #include <linux/of_device.h>
25
26 #include "virt-dma.h"
27
28 #define HSDMA_BASE_OFFSET               0x800
29
30 #define HSDMA_REG_TX_BASE               0x00
31 #define HSDMA_REG_TX_CNT                0x04
32 #define HSDMA_REG_TX_CTX                0x08
33 #define HSDMA_REG_TX_DTX                0x0c
34 #define HSDMA_REG_RX_BASE               0x100
35 #define HSDMA_REG_RX_CNT                0x104
36 #define HSDMA_REG_RX_CRX                0x108
37 #define HSDMA_REG_RX_DRX                0x10c
38 #define HSDMA_REG_INFO                  0x200
39 #define HSDMA_REG_GLO_CFG               0x204
40 #define HSDMA_REG_RST_CFG               0x208
41 #define HSDMA_REG_DELAY_INT             0x20c
42 #define HSDMA_REG_FREEQ_THRES           0x210
43 #define HSDMA_REG_INT_STATUS            0x220
44 #define HSDMA_REG_INT_MASK              0x228
45 #define HSDMA_REG_SCH_Q01               0x280
46 #define HSDMA_REG_SCH_Q23               0x284
47
48 #define HSDMA_DESCS_MAX                 0xfff
49 #define HSDMA_DESCS_NUM                 8
50 #define HSDMA_DESCS_MASK                (HSDMA_DESCS_NUM - 1)
51 #define HSDMA_NEXT_DESC(x)              (((x) + 1) & HSDMA_DESCS_MASK)
52
53 /* HSDMA_REG_INFO */
54 #define HSDMA_INFO_INDEX_MASK           0xf
55 #define HSDMA_INFO_INDEX_SHIFT          24
56 #define HSDMA_INFO_BASE_MASK            0xff
57 #define HSDMA_INFO_BASE_SHIFT           16
58 #define HSDMA_INFO_RX_MASK              0xff
59 #define HSDMA_INFO_RX_SHIFT             8
60 #define HSDMA_INFO_TX_MASK              0xff
61 #define HSDMA_INFO_TX_SHIFT             0
62
63 /* HSDMA_REG_GLO_CFG */
64 #define HSDMA_GLO_TX_2B_OFFSET          BIT(31)
65 #define HSDMA_GLO_CLK_GATE              BIT(30)
66 #define HSDMA_GLO_BYTE_SWAP             BIT(29)
67 #define HSDMA_GLO_MULTI_DMA             BIT(10)
68 #define HSDMA_GLO_TWO_BUF               BIT(9)
69 #define HSDMA_GLO_32B_DESC              BIT(8)
70 #define HSDMA_GLO_BIG_ENDIAN            BIT(7)
71 #define HSDMA_GLO_TX_DONE               BIT(6)
72 #define HSDMA_GLO_BT_MASK               0x3
73 #define HSDMA_GLO_BT_SHIFT              4
74 #define HSDMA_GLO_RX_BUSY               BIT(3)
75 #define HSDMA_GLO_RX_DMA                BIT(2)
76 #define HSDMA_GLO_TX_BUSY               BIT(1)
77 #define HSDMA_GLO_TX_DMA                BIT(0)
78
79 #define HSDMA_BT_SIZE_16BYTES           (0 << HSDMA_GLO_BT_SHIFT)
80 #define HSDMA_BT_SIZE_32BYTES           (1 << HSDMA_GLO_BT_SHIFT)
81 #define HSDMA_BT_SIZE_64BYTES           (2 << HSDMA_GLO_BT_SHIFT)
82 #define HSDMA_BT_SIZE_128BYTES          (3 << HSDMA_GLO_BT_SHIFT)
83
84 #define HSDMA_GLO_DEFAULT               (HSDMA_GLO_MULTI_DMA | \
85                 HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES)
86
87 /* HSDMA_REG_RST_CFG */
88 #define HSDMA_RST_RX_SHIFT              16
89 #define HSDMA_RST_TX_SHIFT              0
90
91 /* HSDMA_REG_DELAY_INT */
92 #define HSDMA_DELAY_INT_EN              BIT(15)
93 #define HSDMA_DELAY_PEND_OFFSET         8
94 #define HSDMA_DELAY_TIME_OFFSET         0
95 #define HSDMA_DELAY_TX_OFFSET           16
96 #define HSDMA_DELAY_RX_OFFSET           0
97
98 #define HSDMA_DELAY_INIT(x)             (HSDMA_DELAY_INT_EN | \
99                 ((x) << HSDMA_DELAY_PEND_OFFSET))
100 #define HSDMA_DELAY(x)                  ((HSDMA_DELAY_INIT(x) << \
101                 HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x))
102
103 /* HSDMA_REG_INT_STATUS */
104 #define HSDMA_INT_DELAY_RX_COH          BIT(31)
105 #define HSDMA_INT_DELAY_RX_INT          BIT(30)
106 #define HSDMA_INT_DELAY_TX_COH          BIT(29)
107 #define HSDMA_INT_DELAY_TX_INT          BIT(28)
108 #define HSDMA_INT_RX_MASK               0x3
109 #define HSDMA_INT_RX_SHIFT              16
110 #define HSDMA_INT_RX_Q0                 BIT(16)
111 #define HSDMA_INT_TX_MASK               0xf
112 #define HSDMA_INT_TX_SHIFT              0
113 #define HSDMA_INT_TX_Q0                 BIT(0)
114
115 /* tx/rx dma desc flags */
116 #define HSDMA_PLEN_MASK                 0x3fff
117 #define HSDMA_DESC_DONE                 BIT(31)
118 #define HSDMA_DESC_LS0                  BIT(30)
119 #define HSDMA_DESC_PLEN0(_x)            (((_x) & HSDMA_PLEN_MASK) << 16)
120 #define HSDMA_DESC_TAG                  BIT(15)
121 #define HSDMA_DESC_LS1                  BIT(14)
122 #define HSDMA_DESC_PLEN1(_x)            ((_x) & HSDMA_PLEN_MASK)
123
124 /* align 4 bytes */
125 #define HSDMA_ALIGN_SIZE                3
126 /* align size 128bytes */
127 #define HSDMA_MAX_PLEN                  0x3f80
128
129 struct hsdma_desc {
130         u32 addr0;
131         u32 flags;
132         u32 addr1;
133         u32 unused;
134 };
135
136 struct mtk_hsdma_sg {
137         dma_addr_t src_addr;
138         dma_addr_t dst_addr;
139         u32 len;
140 };
141
142 struct mtk_hsdma_desc {
143         struct virt_dma_desc vdesc;
144         unsigned int num_sgs;
145         struct mtk_hsdma_sg sg[1];
146 };
147
148 struct mtk_hsdma_chan {
149         struct virt_dma_chan vchan;
150         unsigned int id;
151         dma_addr_t desc_addr;
152         int tx_idx;
153         int rx_idx;
154         struct hsdma_desc *tx_ring;
155         struct hsdma_desc *rx_ring;
156         struct mtk_hsdma_desc *desc;
157         unsigned int next_sg;
158 };
159
160 struct mtk_hsdam_engine {
161         struct dma_device ddev;
162         struct device_dma_parameters dma_parms;
163         void __iomem *base;
164         struct tasklet_struct task;
165         volatile unsigned long chan_issued;
166
167         struct mtk_hsdma_chan chan[1];
168 };
169
170 static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev(
171                 struct mtk_hsdma_chan *chan)
172 {
173         return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine,
174                         ddev);
175 }
176
177 static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c)
178 {
179         return container_of(c, struct mtk_hsdma_chan, vchan.chan);
180 }
181
182 static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc(
183                 struct virt_dma_desc *vdesc)
184 {
185         return container_of(vdesc, struct mtk_hsdma_desc, vdesc);
186 }
187
188 static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg)
189 {
190         return readl(hsdma->base + reg);
191 }
192
193 static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma,
194                                    unsigned reg, u32 val)
195 {
196         writel(val, hsdma->base + reg);
197 }
198
199 static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma,
200                                  struct mtk_hsdma_chan *chan)
201 {
202         chan->tx_idx = 0;
203         chan->rx_idx = HSDMA_DESCS_NUM - 1;
204
205         mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
206         mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
207
208         mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
209                         0x1 << (chan->id + HSDMA_RST_TX_SHIFT));
210         mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
211                         0x1 << (chan->id + HSDMA_RST_RX_SHIFT));
212 }
213
214 static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
215 {
216         dev_dbg(hsdma->ddev.dev, "tbase %08x, tcnt %08x, " \
217                         "tctx %08x, tdtx: %08x, rbase %08x, " \
218                         "rcnt %08x, rctx %08x, rdtx %08x\n",
219                         mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE),
220                         mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT),
221                         mtk_hsdma_read(hsdma, HSDMA_REG_TX_CTX),
222                         mtk_hsdma_read(hsdma, HSDMA_REG_TX_DTX),
223                         mtk_hsdma_read(hsdma, HSDMA_REG_RX_BASE),
224                         mtk_hsdma_read(hsdma, HSDMA_REG_RX_CNT),
225                         mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
226                         mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
227
228         dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, " \
229                         "intr_stat %08x, intr_mask %08x\n",
230                         mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
231                         mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
232                         mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT),
233                         mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS),
234                         mtk_hsdma_read(hsdma, HSDMA_REG_INT_MASK));
235 }
236
237 static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
238                             struct mtk_hsdma_chan *chan)
239 {
240         struct hsdma_desc *tx_desc;
241         struct hsdma_desc *rx_desc;
242         int i;
243
244         dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n",
245                         chan->tx_idx, chan->rx_idx);
246
247         for (i = 0; i < HSDMA_DESCS_NUM; i++) {
248                 tx_desc = &chan->tx_ring[i];
249                 rx_desc = &chan->rx_ring[i];
250
251                 dev_dbg(hsdma->ddev.dev, "%d tx addr0: %08x, flags %08x, " \
252                                 "tx addr1: %08x, rx addr0 %08x, flags %08x\n",
253                                 i, tx_desc->addr0, tx_desc->flags, \
254                                 tx_desc->addr1, rx_desc->addr0, rx_desc->flags);
255         }
256 }
257
258 static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma,
259                             struct mtk_hsdma_chan *chan)
260 {
261         int i;
262
263         /* disable dma */
264         mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
265
266         /* disable intr */
267         mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
268
269         /* init desc value */
270         for (i = 0; i < HSDMA_DESCS_NUM; i++) {
271                 chan->tx_ring[i].addr0 = 0;
272                 chan->tx_ring[i].flags = HSDMA_DESC_LS0 |
273                         HSDMA_DESC_DONE;
274         }
275         for (i = 0; i < HSDMA_DESCS_NUM; i++) {
276                 chan->rx_ring[i].addr0 = 0;
277                 chan->rx_ring[i].flags = 0;
278         }
279
280         /* reset */
281         mtk_hsdma_reset_chan(hsdma, chan);
282
283         /* enable intr */
284         mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
285
286         /* enable dma */
287         mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
288 }
289
290 static int mtk_hsdma_terminate_all(struct dma_chan *c)
291 {
292         struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
293         struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
294         unsigned long timeout;
295         LIST_HEAD(head);
296
297         spin_lock_bh(&chan->vchan.lock);
298         chan->desc = NULL;
299         clear_bit(chan->id, &hsdma->chan_issued);
300         vchan_get_all_descriptors(&chan->vchan, &head);
301         spin_unlock_bh(&chan->vchan.lock);
302
303         vchan_dma_desc_free_list(&chan->vchan, &head);
304
305         /* wait dma transfer complete */
306         timeout = jiffies + msecs_to_jiffies(2000);
307         while (mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG) &
308                         (HSDMA_GLO_RX_BUSY | HSDMA_GLO_TX_BUSY)) {
309                 if (time_after_eq(jiffies, timeout)) {
310                         hsdma_dump_desc(hsdma, chan);
311                         mtk_hsdma_reset(hsdma, chan);
312                         dev_err(hsdma->ddev.dev, "timeout, reset it\n");
313                         break;
314                 }
315                 cpu_relax();
316         }
317
318         return 0;
319 }
320
321 static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
322                                     struct mtk_hsdma_chan *chan)
323 {
324         dma_addr_t src, dst;
325         size_t len, tlen;
326         struct hsdma_desc *tx_desc, *rx_desc;
327         struct mtk_hsdma_sg *sg;
328         unsigned int i;
329         int rx_idx;
330
331         sg = &chan->desc->sg[0];
332         len = sg->len;
333         chan->desc->num_sgs = DIV_ROUND_UP(len, HSDMA_MAX_PLEN);
334
335         /* tx desc */
336         src = sg->src_addr;
337         for (i = 0; i < chan->desc->num_sgs; i++) {
338                 if (len > HSDMA_MAX_PLEN)
339                         tlen = HSDMA_MAX_PLEN;
340                 else
341                         tlen = len;
342
343                 if (i & 0x1) {
344                         tx_desc->addr1 = src;
345                         tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
346                 } else {
347                         tx_desc = &chan->tx_ring[chan->tx_idx];
348                         tx_desc->addr0 = src;
349                         tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
350
351                         /* update index */
352                         chan->tx_idx = HSDMA_NEXT_DESC(chan->tx_idx);
353                 }
354
355                 src += tlen;
356                 len -= tlen;
357         }
358         if (i & 0x1)
359                 tx_desc->flags |= HSDMA_DESC_LS0;
360         else
361                 tx_desc->flags |= HSDMA_DESC_LS1;
362
363         /* rx desc */
364         rx_idx = HSDMA_NEXT_DESC(chan->rx_idx);
365         len = sg->len;
366         dst = sg->dst_addr;
367         for (i = 0; i < chan->desc->num_sgs; i++) {
368                 rx_desc = &chan->rx_ring[rx_idx];
369                 if (len > HSDMA_MAX_PLEN)
370                         tlen = HSDMA_MAX_PLEN;
371                 else
372                         tlen = len;
373
374                 rx_desc->addr0 = dst;
375                 rx_desc->flags = HSDMA_DESC_PLEN0(tlen);
376
377                 dst += tlen;
378                 len -= tlen;
379
380                 /* update index */
381                 rx_idx = HSDMA_NEXT_DESC(rx_idx);
382         }
383
384         /* make sure desc and index all up to date */
385         wmb();
386         mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
387
388         return 0;
389 }
390
391 static int gdma_next_desc(struct mtk_hsdma_chan *chan)
392 {
393         struct virt_dma_desc *vdesc;
394
395         vdesc = vchan_next_desc(&chan->vchan);
396         if (!vdesc) {
397                 chan->desc = NULL;
398                 return 0;
399         }
400         chan->desc = to_mtk_hsdma_desc(vdesc);
401         chan->next_sg = 0;
402
403         return 1;
404 }
405
406 static void mtk_hsdma_chan_done(struct mtk_hsdam_engine *hsdma,
407                                 struct mtk_hsdma_chan *chan)
408 {
409         struct mtk_hsdma_desc *desc;
410         int chan_issued;
411
412         chan_issued = 0;
413         spin_lock_bh(&chan->vchan.lock);
414         desc = chan->desc;
415         if (likely(desc)) {
416                 if (chan->next_sg == desc->num_sgs) {
417                         list_del(&desc->vdesc.node);
418                         vchan_cookie_complete(&desc->vdesc);
419                         chan_issued = gdma_next_desc(chan);
420                 }
421         } else
422                 dev_dbg(hsdma->ddev.dev, "no desc to complete\n");
423
424         if (chan_issued)
425                 set_bit(chan->id, &hsdma->chan_issued);
426         spin_unlock_bh(&chan->vchan.lock);
427 }
428
429 static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
430 {
431         struct mtk_hsdam_engine *hsdma = devid;
432         u32 status;
433
434         status = mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS);
435         if (unlikely(!status))
436                 return IRQ_NONE;
437
438         if (likely(status & HSDMA_INT_RX_Q0))
439                 tasklet_schedule(&hsdma->task);
440         else
441                 dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n",
442                         status);
443         /* clean intr bits */
444         mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status);
445
446         return IRQ_HANDLED;
447 }
448
449 static void mtk_hsdma_issue_pending(struct dma_chan *c)
450 {
451         struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
452         struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
453
454         spin_lock_bh(&chan->vchan.lock);
455         if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
456                 if (gdma_next_desc(chan)) {
457                         set_bit(chan->id, &hsdma->chan_issued);
458                         tasklet_schedule(&hsdma->task);
459                 } else
460                         dev_dbg(hsdma->ddev.dev, "no desc to issue\n");
461         }
462         spin_unlock_bh(&chan->vchan.lock);
463 }
464
465 static struct dma_async_tx_descriptor *mtk_hsdma_prep_dma_memcpy(
466                 struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
467                 size_t len, unsigned long flags)
468 {
469         struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
470         struct mtk_hsdma_desc *desc;
471
472         if (len <= 0)
473                 return NULL;
474
475         desc = kzalloc(sizeof(struct mtk_hsdma_desc), GFP_ATOMIC);
476         if (!desc) {
477                 dev_err(c->device->dev, "alloc memcpy decs error\n");
478                 return NULL;
479         }
480
481         desc->sg[0].src_addr = src;
482         desc->sg[0].dst_addr = dest;
483         desc->sg[0].len = len;
484
485         return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
486 }
487
488 static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
489                                            dma_cookie_t cookie,
490                                            struct dma_tx_state *state)
491 {
492         return dma_cookie_status(c, cookie, state);
493 }
494
495 static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
496 {
497         vchan_free_chan_resources(to_virt_chan(c));
498 }
499
500 static void mtk_hsdma_desc_free(struct virt_dma_desc *vdesc)
501 {
502         kfree(container_of(vdesc, struct mtk_hsdma_desc, vdesc));
503 }
504
505 static void mtk_hsdma_tx(struct mtk_hsdam_engine *hsdma)
506 {
507         struct mtk_hsdma_chan *chan;
508
509         if (test_and_clear_bit(0, &hsdma->chan_issued)) {
510                 chan = &hsdma->chan[0];
511                 if (chan->desc)
512                         mtk_hsdma_start_transfer(hsdma, chan);
513                 else
514                         dev_dbg(hsdma->ddev.dev, "chan 0 no desc to issue\n");
515         }
516 }
517
518 static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma)
519 {
520         struct mtk_hsdma_chan *chan;
521         int next_idx, drx_idx, cnt;
522
523         chan = &hsdma->chan[0];
524         next_idx = HSDMA_NEXT_DESC(chan->rx_idx);
525         drx_idx = mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX);
526
527         cnt = (drx_idx - next_idx) & HSDMA_DESCS_MASK;
528         if (!cnt)
529                 return;
530
531         chan->next_sg += cnt;
532         chan->rx_idx = (chan->rx_idx + cnt) & HSDMA_DESCS_MASK;
533
534         /* update rx crx */
535         wmb();
536         mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
537
538         mtk_hsdma_chan_done(hsdma, chan);
539 }
540
541 static void mtk_hsdma_tasklet(unsigned long arg)
542 {
543         struct mtk_hsdam_engine *hsdma = (struct mtk_hsdam_engine *)arg;
544
545         mtk_hsdma_rx(hsdma);
546         mtk_hsdma_tx(hsdma);
547 }
548
549 static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma,
550                                 struct mtk_hsdma_chan *chan)
551 {
552         int i;
553
554         chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
555                         2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
556                         &chan->desc_addr, GFP_ATOMIC | __GFP_ZERO);
557         if (!chan->tx_ring)
558                 goto no_mem;
559
560         chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM];
561
562         /* init tx ring value */
563         for (i = 0; i < HSDMA_DESCS_NUM; i++)
564                 chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
565
566         return 0;
567 no_mem:
568         return -ENOMEM;
569 }
570
571 static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma,
572                                 struct mtk_hsdma_chan *chan)
573 {
574         if (chan->tx_ring) {
575                 dma_free_coherent(hsdma->ddev.dev,
576                                 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
577                                 chan->tx_ring, chan->desc_addr);
578                 chan->tx_ring = NULL;
579                 chan->rx_ring = NULL;
580         }
581 }
582
583 static int mtk_hsdma_init(struct mtk_hsdam_engine *hsdma)
584 {
585         struct mtk_hsdma_chan *chan;
586         int ret;
587         u32 reg;
588
589         /* init desc */
590         chan = &hsdma->chan[0];
591         ret = mtk_hsdam_alloc_desc(hsdma, chan);
592         if (ret)
593                 return ret;
594
595         /* tx */
596         mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, chan->desc_addr);
597         mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, HSDMA_DESCS_NUM);
598         /* rx */
599         mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, chan->desc_addr +
600                         (sizeof(struct hsdma_desc) * HSDMA_DESCS_NUM));
601         mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, HSDMA_DESCS_NUM);
602         /* reset */
603         mtk_hsdma_reset_chan(hsdma, chan);
604
605         /* enable rx intr */
606         mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
607
608         /* enable dma */
609         mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
610
611         /* hardware info */
612         reg = mtk_hsdma_read(hsdma, HSDMA_REG_INFO);
613         dev_info(hsdma->ddev.dev, "rx: %d, tx: %d\n",
614                  (reg >> HSDMA_INFO_RX_SHIFT) & HSDMA_INFO_RX_MASK,
615                  (reg >> HSDMA_INFO_TX_SHIFT) & HSDMA_INFO_TX_MASK);
616
617         hsdma_dump_reg(hsdma);
618
619         return ret;
620 }
621
622 static void mtk_hsdma_uninit(struct mtk_hsdam_engine *hsdma)
623 {
624         struct mtk_hsdma_chan *chan;
625
626         /* disable dma */
627         mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
628
629         /* disable intr */
630         mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
631
632         /* free desc */
633         chan = &hsdma->chan[0];
634         mtk_hsdam_free_desc(hsdma, chan);
635
636         /* tx */
637         mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, 0);
638         mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, 0);
639         /* rx */
640         mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, 0);
641         mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, 0);
642         /* reset */
643         mtk_hsdma_reset_chan(hsdma, chan);
644 }
645
646 static const struct of_device_id mtk_hsdma_of_match[] = {
647         { .compatible = "mediatek,mt7621-hsdma" },
648         { },
649 };
650
651 static int mtk_hsdma_probe(struct platform_device *pdev)
652 {
653         const struct of_device_id *match;
654         struct mtk_hsdma_chan *chan;
655         struct mtk_hsdam_engine *hsdma;
656         struct dma_device *dd;
657         struct resource *res;
658         int ret;
659         int irq;
660         void __iomem *base;
661
662         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
663         if (ret)
664                 return ret;
665
666         match = of_match_device(mtk_hsdma_of_match, &pdev->dev);
667         if (!match)
668                 return -EINVAL;
669
670         hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
671         if (!hsdma) {
672                 dev_err(&pdev->dev, "alloc dma device failed\n");
673                 return -EINVAL;
674         }
675
676         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
677         base = devm_ioremap_resource(&pdev->dev, res);
678         if (IS_ERR(base))
679                 return PTR_ERR(base);
680         hsdma->base = base + HSDMA_BASE_OFFSET;
681         tasklet_init(&hsdma->task, mtk_hsdma_tasklet, (unsigned long)hsdma);
682
683         irq = platform_get_irq(pdev, 0);
684         if (irq < 0) {
685                 dev_err(&pdev->dev, "failed to get irq\n");
686                 return -EINVAL;
687         }
688         ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq,
689                                0, dev_name(&pdev->dev), hsdma);
690         if (ret) {
691                 dev_err(&pdev->dev, "failed to request irq\n");
692                 return ret;
693         }
694
695         device_reset(&pdev->dev);
696
697         dd = &hsdma->ddev;
698         dma_cap_set(DMA_MEMCPY, dd->cap_mask);
699         dd->copy_align = HSDMA_ALIGN_SIZE;
700         dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
701         dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
702         dd->device_terminate_all = mtk_hsdma_terminate_all;
703         dd->device_tx_status = mtk_hsdma_tx_status;
704         dd->device_issue_pending = mtk_hsdma_issue_pending;
705         dd->dev = &pdev->dev;
706         dd->dev->dma_parms = &hsdma->dma_parms;
707         dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN);
708         INIT_LIST_HEAD(&dd->channels);
709
710         chan = &hsdma->chan[0];
711         chan->id = 0;
712         chan->vchan.desc_free = mtk_hsdma_desc_free;
713         vchan_init(&chan->vchan, dd);
714
715         /* init hardware */
716         ret = mtk_hsdma_init(hsdma);
717         if (ret) {
718                 dev_err(&pdev->dev, "failed to alloc ring descs\n");
719                 return ret;
720         }
721
722         ret = dma_async_device_register(dd);
723         if (ret) {
724                 dev_err(&pdev->dev, "failed to register dma device\n");
725                 return ret;
726         }
727
728         ret = of_dma_controller_register(pdev->dev.of_node,
729                                          of_dma_xlate_by_chan_id, hsdma);
730         if (ret) {
731                 dev_err(&pdev->dev, "failed to register of dma controller\n");
732                 goto err_unregister;
733         }
734
735         platform_set_drvdata(pdev, hsdma);
736
737         return 0;
738
739 err_unregister:
740         dma_async_device_unregister(dd);
741         return ret;
742 }
743
744 static int mtk_hsdma_remove(struct platform_device *pdev)
745 {
746         struct mtk_hsdam_engine *hsdma = platform_get_drvdata(pdev);
747
748         mtk_hsdma_uninit(hsdma);
749
750         of_dma_controller_free(pdev->dev.of_node);
751         dma_async_device_unregister(&hsdma->ddev);
752
753         return 0;
754 }
755
756 static struct platform_driver mtk_hsdma_driver = {
757         .probe = mtk_hsdma_probe,
758         .remove = mtk_hsdma_remove,
759         .driver = {
760                 .name = "hsdma-mt7621",
761                 .of_match_table = mtk_hsdma_of_match,
762         },
763 };
764 module_platform_driver(mtk_hsdma_driver);
765
766 MODULE_AUTHOR("Michael Lee <igvtee@gmail.com>");
767 MODULE_DESCRIPTION("MTK HSDMA driver");
768 MODULE_LICENSE("GPL v2");