2 * Copyright (c) 2013 - 2015 Linaro Ltd.
3 * Copyright (c) 2013 Hisilicon Limited.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 #include <linux/sched.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmapool.h>
13 #include <linux/dmaengine.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/of_device.h>
23 #include <linux/clk.h>
24 #include <linux/of_dma.h>
28 #define DRIVER_NAME "k3-dma"
29 #define DMA_MAX_SIZE 0x1ffc
30 #define DMA_CYCLIC_MAX_PERIOD 0x1000
31 #define LLI_BLOCK_SIZE (4 * PAGE_SIZE)
38 #define INT_TC1_MASK 0x18
39 #define INT_TC2_MASK 0x1c
40 #define INT_ERR1_MASK 0x20
41 #define INT_ERR2_MASK 0x24
42 #define INT_TC1_RAW 0x600
43 #define INT_TC2_RAW 0x608
44 #define INT_ERR1_RAW 0x610
45 #define INT_ERR2_RAW 0x618
48 #define CX_CUR_CNT 0x704
56 #define CX_LLI_CHAIN_EN 0x2
58 #define CX_CFG_NODEIRQ BIT(1)
59 #define CX_CFG_MEM2PER (0x1 << 2)
60 #define CX_CFG_PER2MEM (0x2 << 2)
61 #define CX_CFG_SRCINCR (0x1 << 31)
62 #define CX_CFG_DSTINCR (0x1 << 30)
73 struct k3_dma_desc_sw {
74 struct virt_dma_desc vd;
75 dma_addr_t desc_hw_lli;
78 struct k3_desc_hw *desc_hw;
85 struct virt_dma_chan vc;
86 struct k3_dma_phy *phy;
87 struct list_head node;
89 enum dma_status status;
91 struct dma_slave_config slave_config;
97 struct k3_dma_chan *vchan;
98 struct k3_dma_desc_sw *ds_run;
99 struct k3_dma_desc_sw *ds_done;
103 struct dma_device slave;
105 struct tasklet_struct task;
107 struct list_head chan_pending;
108 struct k3_dma_phy *phy;
109 struct k3_dma_chan *chans;
111 struct dma_pool *pool;
114 u32 dma_channel_mask;
119 #define K3_FLAG_NOCLK BIT(1)
121 struct k3dma_soc_data {
126 #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
128 static int k3_dma_config_write(struct dma_chan *chan,
129 enum dma_transfer_direction dir,
130 struct dma_slave_config *cfg);
132 static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
134 return container_of(chan, struct k3_dma_chan, vc.chan);
137 static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
142 val = readl_relaxed(phy->base + CX_CFG);
144 writel_relaxed(val, phy->base + CX_CFG);
146 val = readl_relaxed(phy->base + CX_CFG);
148 writel_relaxed(val, phy->base + CX_CFG);
152 static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
156 k3_dma_pause_dma(phy, false);
158 val = 0x1 << phy->idx;
159 writel_relaxed(val, d->base + INT_TC1_RAW);
160 writel_relaxed(val, d->base + INT_TC2_RAW);
161 writel_relaxed(val, d->base + INT_ERR1_RAW);
162 writel_relaxed(val, d->base + INT_ERR2_RAW);
165 static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
167 writel_relaxed(hw->lli, phy->base + CX_LLI);
168 writel_relaxed(hw->count, phy->base + CX_CNT0);
169 writel_relaxed(hw->saddr, phy->base + CX_SRC);
170 writel_relaxed(hw->daddr, phy->base + CX_DST);
171 writel_relaxed(hw->config, phy->base + CX_CFG);
174 static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
178 cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
183 static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
185 return readl_relaxed(phy->base + CX_LLI);
188 static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
190 return readl_relaxed(d->base + CH_STAT);
193 static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
196 /* set same priority */
197 writel_relaxed(0x0, d->base + CH_PRI);
200 writel_relaxed(0xffff, d->base + INT_TC1_MASK);
201 writel_relaxed(0xffff, d->base + INT_TC2_MASK);
202 writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
203 writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
206 writel_relaxed(0x0, d->base + INT_TC1_MASK);
207 writel_relaxed(0x0, d->base + INT_TC2_MASK);
208 writel_relaxed(0x0, d->base + INT_ERR1_MASK);
209 writel_relaxed(0x0, d->base + INT_ERR2_MASK);
213 static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
215 struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
216 struct k3_dma_phy *p;
217 struct k3_dma_chan *c;
218 u32 stat = readl_relaxed(d->base + INT_STAT);
219 u32 tc1 = readl_relaxed(d->base + INT_TC1);
220 u32 tc2 = readl_relaxed(d->base + INT_TC2);
221 u32 err1 = readl_relaxed(d->base + INT_ERR1);
222 u32 err2 = readl_relaxed(d->base + INT_ERR2);
228 if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) {
233 if (c && (tc1 & BIT(i))) {
234 spin_lock_irqsave(&c->vc.lock, flags);
235 vchan_cookie_complete(&p->ds_run->vd);
236 p->ds_done = p->ds_run;
238 spin_unlock_irqrestore(&c->vc.lock, flags);
240 if (c && (tc2 & BIT(i))) {
241 spin_lock_irqsave(&c->vc.lock, flags);
242 if (p->ds_run != NULL)
243 vchan_cyclic_callback(&p->ds_run->vd);
244 spin_unlock_irqrestore(&c->vc.lock, flags);
248 if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
249 dev_warn(d->slave.dev, "DMA ERR\n");
252 writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
253 writel_relaxed(irq_chan, d->base + INT_TC2_RAW);
254 writel_relaxed(err1, d->base + INT_ERR1_RAW);
255 writel_relaxed(err2, d->base + INT_ERR2_RAW);
258 tasklet_schedule(&d->task);
260 if (irq_chan || err1 || err2)
266 static int k3_dma_start_txd(struct k3_dma_chan *c)
268 struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
269 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
274 if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
278 struct k3_dma_desc_sw *ds =
279 container_of(vd, struct k3_dma_desc_sw, vd);
281 * fetch and remove request from vc->desc_issued
282 * so vc->desc_issued only contains desc pending
284 list_del(&ds->vd.node);
287 c->phy->ds_done = NULL;
289 k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
292 c->phy->ds_run = NULL;
293 c->phy->ds_done = NULL;
297 static void k3_dma_tasklet(unsigned long arg)
299 struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
300 struct k3_dma_phy *p;
301 struct k3_dma_chan *c, *cn;
302 unsigned pch, pch_alloc = 0;
304 /* check new dma request of running channel in vc->desc_issued */
305 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
306 spin_lock_irq(&c->vc.lock);
308 if (p && p->ds_done) {
309 if (k3_dma_start_txd(c)) {
310 /* No current txd associated with this channel */
311 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
312 /* Mark this channel free */
317 spin_unlock_irq(&c->vc.lock);
320 /* check new channel request in d->chan_pending */
321 spin_lock_irq(&d->lock);
322 for (pch = 0; pch < d->dma_channels; pch++) {
323 if (!(d->dma_channel_mask & (1 << pch)))
328 if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
329 c = list_first_entry(&d->chan_pending,
330 struct k3_dma_chan, node);
331 /* remove from d->chan_pending */
332 list_del_init(&c->node);
333 pch_alloc |= 1 << pch;
334 /* Mark this channel allocated */
337 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
340 spin_unlock_irq(&d->lock);
342 for (pch = 0; pch < d->dma_channels; pch++) {
343 if (!(d->dma_channel_mask & (1 << pch)))
346 if (pch_alloc & (1 << pch)) {
350 spin_lock_irq(&c->vc.lock);
352 spin_unlock_irq(&c->vc.lock);
358 static void k3_dma_free_chan_resources(struct dma_chan *chan)
360 struct k3_dma_chan *c = to_k3_chan(chan);
361 struct k3_dma_dev *d = to_k3_dma(chan->device);
364 spin_lock_irqsave(&d->lock, flags);
365 list_del_init(&c->node);
366 spin_unlock_irqrestore(&d->lock, flags);
368 vchan_free_chan_resources(&c->vc);
372 static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
373 dma_cookie_t cookie, struct dma_tx_state *state)
375 struct k3_dma_chan *c = to_k3_chan(chan);
376 struct k3_dma_dev *d = to_k3_dma(chan->device);
377 struct k3_dma_phy *p;
378 struct virt_dma_desc *vd;
383 ret = dma_cookie_status(&c->vc.chan, cookie, state);
384 if (ret == DMA_COMPLETE)
387 spin_lock_irqsave(&c->vc.lock, flags);
392 * If the cookie is on our issue queue, then the residue is
395 vd = vchan_find_desc(&c->vc, cookie);
396 if (vd && !c->cyclic) {
397 bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
398 } else if ((!p) || (!p->ds_run)) {
401 struct k3_dma_desc_sw *ds = p->ds_run;
402 u32 clli = 0, index = 0;
404 bytes = k3_dma_get_curr_cnt(d, p);
405 clli = k3_dma_get_curr_lli(p);
406 index = ((clli - ds->desc_hw_lli) /
407 sizeof(struct k3_desc_hw)) + 1;
408 for (; index < ds->desc_num; index++) {
409 bytes += ds->desc_hw[index].count;
411 if (!ds->desc_hw[index].lli)
415 spin_unlock_irqrestore(&c->vc.lock, flags);
416 dma_set_residue(state, bytes);
420 static void k3_dma_issue_pending(struct dma_chan *chan)
422 struct k3_dma_chan *c = to_k3_chan(chan);
423 struct k3_dma_dev *d = to_k3_dma(chan->device);
426 spin_lock_irqsave(&c->vc.lock, flags);
427 /* add request to vc->desc_issued */
428 if (vchan_issue_pending(&c->vc)) {
431 if (list_empty(&c->node)) {
432 /* if new channel, add chan_pending */
433 list_add_tail(&c->node, &d->chan_pending);
434 /* check in tasklet */
435 tasklet_schedule(&d->task);
436 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
439 spin_unlock(&d->lock);
441 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
442 spin_unlock_irqrestore(&c->vc.lock, flags);
445 static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
446 dma_addr_t src, size_t len, u32 num, u32 ccfg)
448 if (num != ds->desc_num - 1)
449 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
450 sizeof(struct k3_desc_hw);
452 ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
453 ds->desc_hw[num].count = len;
454 ds->desc_hw[num].saddr = src;
455 ds->desc_hw[num].daddr = dst;
456 ds->desc_hw[num].config = ccfg;
459 static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
460 struct dma_chan *chan)
462 struct k3_dma_chan *c = to_k3_chan(chan);
463 struct k3_dma_desc_sw *ds;
464 struct k3_dma_dev *d = to_k3_dma(chan->device);
465 int lli_limit = LLI_BLOCK_SIZE / sizeof(struct k3_desc_hw);
467 if (num > lli_limit) {
468 dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
469 &c->vc, num, lli_limit);
473 ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
477 ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
479 dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
487 static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
488 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
489 size_t len, unsigned long flags)
491 struct k3_dma_chan *c = to_k3_chan(chan);
492 struct k3_dma_desc_sw *ds;
499 num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
501 ds = k3_dma_alloc_desc_resource(num, chan);
510 /* default is memtomem, without calling device_config */
511 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
512 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
513 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
517 copy = min_t(size_t, len, DMA_MAX_SIZE);
518 k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
525 ds->desc_hw[num-1].lli = 0; /* end of link */
526 return vchan_tx_prep(&c->vc, &ds->vd, flags);
529 static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
530 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
531 enum dma_transfer_direction dir, unsigned long flags, void *context)
533 struct k3_dma_chan *c = to_k3_chan(chan);
534 struct k3_dma_desc_sw *ds;
535 size_t len, avail, total = 0;
536 struct scatterlist *sg;
537 dma_addr_t addr, src = 0, dst = 0;
545 for_each_sg(sgl, sg, sglen, i) {
546 avail = sg_dma_len(sg);
547 if (avail > DMA_MAX_SIZE)
548 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
551 ds = k3_dma_alloc_desc_resource(num, chan);
555 k3_dma_config_write(chan, dir, &c->slave_config);
557 for_each_sg(sgl, sg, sglen, i) {
558 addr = sg_dma_address(sg);
559 avail = sg_dma_len(sg);
563 len = min_t(size_t, avail, DMA_MAX_SIZE);
565 if (dir == DMA_MEM_TO_DEV) {
568 } else if (dir == DMA_DEV_TO_MEM) {
573 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
580 ds->desc_hw[num-1].lli = 0; /* end of link */
582 return vchan_tx_prep(&c->vc, &ds->vd, flags);
585 static struct dma_async_tx_descriptor *
586 k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
587 size_t buf_len, size_t period_len,
588 enum dma_transfer_direction dir,
591 struct k3_dma_chan *c = to_k3_chan(chan);
592 struct k3_dma_desc_sw *ds;
593 size_t len, avail, total = 0;
594 dma_addr_t addr, src = 0, dst = 0;
595 int num = 1, since = 0;
596 size_t modulo = DMA_CYCLIC_MAX_PERIOD;
599 dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n",
600 __func__, &buf_addr, &to_k3_chan(chan)->dev_addr,
601 buf_len, period_len, (int)dir);
605 num += DIV_ROUND_UP(avail, modulo) - 1;
607 ds = k3_dma_alloc_desc_resource(num, chan);
616 k3_dma_config_write(chan, dir, &c->slave_config);
618 if (period_len < modulo)
622 len = min_t(size_t, avail, modulo);
624 if (dir == DMA_MEM_TO_DEV) {
627 } else if (dir == DMA_DEV_TO_MEM) {
632 if (since >= period_len) {
633 /* descriptor asks for TC2 interrupt on completion */
634 en_tc2 = CX_CFG_NODEIRQ;
639 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2);
645 /* "Cyclic" == end of link points back to start of link */
646 ds->desc_hw[num - 1].lli |= ds->desc_hw_lli;
650 return vchan_tx_prep(&c->vc, &ds->vd, flags);
653 static int k3_dma_config(struct dma_chan *chan,
654 struct dma_slave_config *cfg)
656 struct k3_dma_chan *c = to_k3_chan(chan);
658 memcpy(&c->slave_config, cfg, sizeof(*cfg));
663 static int k3_dma_config_write(struct dma_chan *chan,
664 enum dma_transfer_direction dir,
665 struct dma_slave_config *cfg)
667 struct k3_dma_chan *c = to_k3_chan(chan);
668 u32 maxburst = 0, val = 0;
669 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
671 if (dir == DMA_DEV_TO_MEM) {
672 c->ccfg = CX_CFG_DSTINCR;
673 c->dev_addr = cfg->src_addr;
674 maxburst = cfg->src_maxburst;
675 width = cfg->src_addr_width;
676 } else if (dir == DMA_MEM_TO_DEV) {
677 c->ccfg = CX_CFG_SRCINCR;
678 c->dev_addr = cfg->dst_addr;
679 maxburst = cfg->dst_maxburst;
680 width = cfg->dst_addr_width;
683 case DMA_SLAVE_BUSWIDTH_1_BYTE:
684 case DMA_SLAVE_BUSWIDTH_2_BYTES:
685 case DMA_SLAVE_BUSWIDTH_4_BYTES:
686 case DMA_SLAVE_BUSWIDTH_8_BYTES:
693 c->ccfg |= (val << 12) | (val << 16);
695 if ((maxburst == 0) || (maxburst > 16))
699 c->ccfg |= (val << 20) | (val << 24);
700 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
702 /* specific request line */
703 c->ccfg |= c->vc.chan.chan_id << 4;
708 static void k3_dma_free_desc(struct virt_dma_desc *vd)
710 struct k3_dma_desc_sw *ds =
711 container_of(vd, struct k3_dma_desc_sw, vd);
712 struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device);
714 dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
718 static int k3_dma_terminate_all(struct dma_chan *chan)
720 struct k3_dma_chan *c = to_k3_chan(chan);
721 struct k3_dma_dev *d = to_k3_dma(chan->device);
722 struct k3_dma_phy *p = c->phy;
726 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
728 /* Prevent this channel being scheduled */
730 list_del_init(&c->node);
731 spin_unlock(&d->lock);
733 /* Clear the tx descriptor lists */
734 spin_lock_irqsave(&c->vc.lock, flags);
735 vchan_get_all_descriptors(&c->vc, &head);
737 /* vchan is assigned to a pchan - stop the channel */
738 k3_dma_terminate_chan(p, d);
742 vchan_terminate_vdesc(&p->ds_run->vd);
747 spin_unlock_irqrestore(&c->vc.lock, flags);
748 vchan_dma_desc_free_list(&c->vc, &head);
753 static void k3_dma_synchronize(struct dma_chan *chan)
755 struct k3_dma_chan *c = to_k3_chan(chan);
757 vchan_synchronize(&c->vc);
760 static int k3_dma_transfer_pause(struct dma_chan *chan)
762 struct k3_dma_chan *c = to_k3_chan(chan);
763 struct k3_dma_dev *d = to_k3_dma(chan->device);
764 struct k3_dma_phy *p = c->phy;
766 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
767 if (c->status == DMA_IN_PROGRESS) {
768 c->status = DMA_PAUSED;
770 k3_dma_pause_dma(p, false);
773 list_del_init(&c->node);
774 spin_unlock(&d->lock);
781 static int k3_dma_transfer_resume(struct dma_chan *chan)
783 struct k3_dma_chan *c = to_k3_chan(chan);
784 struct k3_dma_dev *d = to_k3_dma(chan->device);
785 struct k3_dma_phy *p = c->phy;
788 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
789 spin_lock_irqsave(&c->vc.lock, flags);
790 if (c->status == DMA_PAUSED) {
791 c->status = DMA_IN_PROGRESS;
793 k3_dma_pause_dma(p, true);
794 } else if (!list_empty(&c->vc.desc_issued)) {
796 list_add_tail(&c->node, &d->chan_pending);
797 spin_unlock(&d->lock);
800 spin_unlock_irqrestore(&c->vc.lock, flags);
805 static const struct k3dma_soc_data k3_v1_dma_data = {
809 static const struct k3dma_soc_data asp_v1_dma_data = {
810 .flags = K3_FLAG_NOCLK,
813 static const struct of_device_id k3_pdma_dt_ids[] = {
814 { .compatible = "hisilicon,k3-dma-1.0",
815 .data = &k3_v1_dma_data
817 { .compatible = "hisilicon,hisi-pcm-asp-dma-1.0",
818 .data = &asp_v1_dma_data
822 MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
824 static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
825 struct of_dma *ofdma)
827 struct k3_dma_dev *d = ofdma->of_dma_data;
828 unsigned int request = dma_spec->args[0];
830 if (request >= d->dma_requests)
833 return dma_get_slave_channel(&(d->chans[request].vc.chan));
836 static int k3_dma_probe(struct platform_device *op)
838 const struct k3dma_soc_data *soc_data;
839 struct k3_dma_dev *d;
840 const struct of_device_id *of_id;
841 struct resource *iores;
844 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
848 d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
852 soc_data = device_get_match_data(&op->dev);
856 d->base = devm_ioremap_resource(&op->dev, iores);
858 return PTR_ERR(d->base);
860 of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
862 of_property_read_u32((&op->dev)->of_node,
863 "dma-channels", &d->dma_channels);
864 of_property_read_u32((&op->dev)->of_node,
865 "dma-requests", &d->dma_requests);
866 ret = of_property_read_u32((&op->dev)->of_node,
867 "dma-channel-mask", &d->dma_channel_mask);
870 "dma-channel-mask doesn't exist, considering all as available.\n");
871 d->dma_channel_mask = (u32)~0UL;
875 if (!(soc_data->flags & K3_FLAG_NOCLK)) {
876 d->clk = devm_clk_get(&op->dev, NULL);
877 if (IS_ERR(d->clk)) {
878 dev_err(&op->dev, "no dma clk\n");
879 return PTR_ERR(d->clk);
883 irq = platform_get_irq(op, 0);
884 ret = devm_request_irq(&op->dev, irq,
885 k3_dma_int_handler, 0, DRIVER_NAME, d);
891 /* A DMA memory pool for LLIs, align on 32-byte boundary */
892 d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
893 LLI_BLOCK_SIZE, 32, 0);
897 /* init phy channel */
898 d->phy = devm_kcalloc(&op->dev,
899 d->dma_channels, sizeof(struct k3_dma_phy), GFP_KERNEL);
903 for (i = 0; i < d->dma_channels; i++) {
904 struct k3_dma_phy *p;
906 if (!(d->dma_channel_mask & BIT(i)))
911 p->base = d->base + i * 0x40;
914 INIT_LIST_HEAD(&d->slave.channels);
915 dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
916 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
917 dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
918 d->slave.dev = &op->dev;
919 d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
920 d->slave.device_tx_status = k3_dma_tx_status;
921 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
922 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
923 d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic;
924 d->slave.device_issue_pending = k3_dma_issue_pending;
925 d->slave.device_config = k3_dma_config;
926 d->slave.device_pause = k3_dma_transfer_pause;
927 d->slave.device_resume = k3_dma_transfer_resume;
928 d->slave.device_terminate_all = k3_dma_terminate_all;
929 d->slave.device_synchronize = k3_dma_synchronize;
930 d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
932 /* init virtual channel */
933 d->chans = devm_kcalloc(&op->dev,
934 d->dma_requests, sizeof(struct k3_dma_chan), GFP_KERNEL);
935 if (d->chans == NULL)
938 for (i = 0; i < d->dma_requests; i++) {
939 struct k3_dma_chan *c = &d->chans[i];
941 c->status = DMA_IN_PROGRESS;
942 INIT_LIST_HEAD(&c->node);
943 c->vc.desc_free = k3_dma_free_desc;
944 vchan_init(&c->vc, &d->slave);
947 /* Enable clock before accessing registers */
948 ret = clk_prepare_enable(d->clk);
950 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
954 k3_dma_enable_dma(d, true);
956 ret = dma_async_device_register(&d->slave);
958 goto dma_async_register_fail;
960 ret = of_dma_controller_register((&op->dev)->of_node,
961 k3_of_dma_simple_xlate, d);
963 goto of_dma_register_fail;
965 spin_lock_init(&d->lock);
966 INIT_LIST_HEAD(&d->chan_pending);
967 tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
968 platform_set_drvdata(op, d);
969 dev_info(&op->dev, "initialized\n");
973 of_dma_register_fail:
974 dma_async_device_unregister(&d->slave);
975 dma_async_register_fail:
976 clk_disable_unprepare(d->clk);
980 static int k3_dma_remove(struct platform_device *op)
982 struct k3_dma_chan *c, *cn;
983 struct k3_dma_dev *d = platform_get_drvdata(op);
985 dma_async_device_unregister(&d->slave);
986 of_dma_controller_free((&op->dev)->of_node);
988 devm_free_irq(&op->dev, d->irq, d);
990 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
991 list_del(&c->vc.chan.device_node);
992 tasklet_kill(&c->vc.task);
994 tasklet_kill(&d->task);
995 clk_disable_unprepare(d->clk);
999 #ifdef CONFIG_PM_SLEEP
1000 static int k3_dma_suspend_dev(struct device *dev)
1002 struct k3_dma_dev *d = dev_get_drvdata(dev);
1005 stat = k3_dma_get_chan_stat(d);
1007 dev_warn(d->slave.dev,
1008 "chan %d is running fail to suspend\n", stat);
1011 k3_dma_enable_dma(d, false);
1012 clk_disable_unprepare(d->clk);
1016 static int k3_dma_resume_dev(struct device *dev)
1018 struct k3_dma_dev *d = dev_get_drvdata(dev);
1021 ret = clk_prepare_enable(d->clk);
1023 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
1026 k3_dma_enable_dma(d, true);
1031 static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
1033 static struct platform_driver k3_pdma_driver = {
1035 .name = DRIVER_NAME,
1036 .pm = &k3_dma_pmops,
1037 .of_match_table = k3_pdma_dt_ids,
1039 .probe = k3_dma_probe,
1040 .remove = k3_dma_remove,
1043 module_platform_driver(k3_pdma_driver);
1045 MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
1046 MODULE_ALIAS("platform:k3dma");
1047 MODULE_LICENSE("GPL v2");