1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/delay.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmapool.h>
13 #include <linux/err.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/list.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/sys_soc.h>
22 #include <linux/of_dma.h>
23 #include <linux/of_device.h>
24 #include <linux/of_irq.h>
25 #include <linux/workqueue.h>
26 #include <linux/completion.h>
27 #include <linux/soc/ti/k3-ringacc.h>
28 #include <linux/soc/ti/ti_sci_protocol.h>
29 #include <linux/soc/ti/ti_sci_inta_msi.h>
30 #include <linux/dma/k3-event-router.h>
31 #include <linux/dma/ti-cppi5.h>
33 #include "../virt-dma.h"
35 #include "k3-psil-priv.h"
37 struct udma_static_tr {
38 u8 elsize; /* RPSTR0 */
39 u16 elcnt; /* RPSTR0 */
40 u16 bstcnt; /* RPSTR1 */
43 #define K3_UDMA_MAX_RFLOWS 1024
44 #define K3_UDMA_DEFAULT_RING_SIZE 16
46 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
47 #define UDMA_RFLOW_SRCTAG_NONE 0
48 #define UDMA_RFLOW_SRCTAG_CFG_TAG 1
49 #define UDMA_RFLOW_SRCTAG_FLOW_ID 2
50 #define UDMA_RFLOW_SRCTAG_SRC_TAG 4
52 #define UDMA_RFLOW_DSTTAG_NONE 0
53 #define UDMA_RFLOW_DSTTAG_CFG_TAG 1
54 #define UDMA_RFLOW_DSTTAG_FLOW_ID 2
55 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
56 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
74 static const char * const mmr_names[] = {
76 [MMR_BCHANRT] = "bchanrt",
77 [MMR_RCHANRT] = "rchanrt",
78 [MMR_TCHANRT] = "tchanrt",
85 struct k3_ring *t_ring; /* Transmit ring */
86 struct k3_ring *tc_ring; /* Transmit Completion ring */
87 int tflow_id; /* applicable only for PKTDMA */
91 #define udma_bchan udma_tchan
95 struct k3_ring *fd_ring; /* Free Descriptor ring */
96 struct k3_ring *r_ring; /* Receive ring */
100 void __iomem *reg_rt;
105 struct udma_oes_offsets {
106 /* K3 UDMA Output Event Offset */
109 /* BCDMA Output Event Offsets */
110 u32 bcdma_bchan_data;
111 u32 bcdma_bchan_ring;
112 u32 bcdma_tchan_data;
113 u32 bcdma_tchan_ring;
114 u32 bcdma_rchan_data;
115 u32 bcdma_rchan_ring;
117 /* PKTDMA Output Event Offsets */
118 u32 pktdma_tchan_flow;
119 u32 pktdma_rchan_flow;
122 #define UDMA_FLAG_PDMA_ACC32 BIT(0)
123 #define UDMA_FLAG_PDMA_BURST BIT(1)
124 #define UDMA_FLAG_TDTYPE BIT(2)
125 #define UDMA_FLAG_BURST_SIZE BIT(3)
126 #define UDMA_FLAGS_J7_CLASS (UDMA_FLAG_PDMA_ACC32 | \
127 UDMA_FLAG_PDMA_BURST | \
129 UDMA_FLAG_BURST_SIZE)
131 struct udma_match_data {
132 enum k3_dma_type type;
134 bool enable_memcpy_support;
138 struct udma_soc_data *soc_data;
141 struct udma_soc_data {
142 struct udma_oes_offsets oes;
143 u32 bcdma_trigger_event_offset;
147 size_t cppi5_desc_size;
148 void *cppi5_desc_vaddr;
149 dma_addr_t cppi5_desc_paddr;
151 /* TR descriptor internal pointers */
153 struct cppi5_tr_resp_t *tr_resp_base;
156 struct udma_rx_flush {
157 struct udma_hwdesc hwdescs[2];
161 dma_addr_t buffer_paddr;
170 struct dma_device ddev;
172 void __iomem *mmrs[MMR_LAST];
173 const struct udma_match_data *match_data;
174 const struct udma_soc_data *soc_data;
176 struct udma_tpl bchan_tpl;
177 struct udma_tpl tchan_tpl;
178 struct udma_tpl rchan_tpl;
180 size_t desc_align; /* alignment to use for descriptors */
182 struct udma_tisci_rm tisci_rm;
184 struct k3_ringacc *ringacc;
186 struct work_struct purge_work;
187 struct list_head desc_to_purge;
190 struct udma_rx_flush rx_flush;
198 unsigned long *bchan_map;
199 unsigned long *tchan_map;
200 unsigned long *rchan_map;
201 unsigned long *rflow_gp_map;
202 unsigned long *rflow_gp_map_allocated;
203 unsigned long *rflow_in_use;
204 unsigned long *tflow_map;
206 struct udma_bchan *bchans;
207 struct udma_tchan *tchans;
208 struct udma_rchan *rchans;
209 struct udma_rflow *rflows;
211 struct udma_chan *channels;
218 struct virt_dma_desc vd;
222 enum dma_transfer_direction dir;
224 struct udma_static_tr static_tr;
228 unsigned int desc_idx; /* Only used for cyclic in packet mode */
232 void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
234 unsigned int hwdesc_count;
235 struct udma_hwdesc hwdesc[];
238 enum udma_chan_state {
239 UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
240 UDMA_CHAN_IS_ACTIVE, /* Normal operation */
241 UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
244 struct udma_tx_drain {
245 struct delayed_work work;
250 struct udma_chan_config {
251 bool pkt_mode; /* TR or packet */
252 bool needs_epib; /* EPIB is needed for the communication or not */
253 u32 psd_size; /* size of Protocol Specific Data */
254 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
255 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
256 bool notdpkt; /* Suppress sending TDC packet */
257 int remote_thread_id;
262 enum psil_endpoint_type ep_type;
265 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
268 unsigned long tx_flags;
270 /* PKDMA mapped channel */
271 int mapped_channel_id;
272 /* PKTDMA default tflow or rflow for mapped channel */
275 enum dma_transfer_direction dir;
279 struct virt_dma_chan vc;
280 struct dma_slave_config cfg;
282 struct device *dma_dev;
283 struct udma_desc *desc;
284 struct udma_desc *terminated_desc;
285 struct udma_static_tr static_tr;
288 struct udma_bchan *bchan;
289 struct udma_tchan *tchan;
290 struct udma_rchan *rchan;
291 struct udma_rflow *rflow;
301 enum udma_chan_state state;
302 struct completion teardown_completed;
304 struct udma_tx_drain tx_drain;
306 /* Channel configuration parameters */
307 struct udma_chan_config config;
309 /* dmapool for packet mode descriptors */
311 struct dma_pool *hdesc_pool;
316 static inline struct udma_dev *to_udma_dev(struct dma_device *d)
318 return container_of(d, struct udma_dev, ddev);
321 static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
323 return container_of(c, struct udma_chan, vc.chan);
326 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
328 return container_of(t, struct udma_desc, vd.tx);
331 /* Generic register access functions */
332 static inline u32 udma_read(void __iomem *base, int reg)
334 return readl(base + reg);
337 static inline void udma_write(void __iomem *base, int reg, u32 val)
339 writel(val, base + reg);
342 static inline void udma_update_bits(void __iomem *base, int reg,
347 orig = readl(base + reg);
352 writel(tmp, base + reg);
356 static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg)
360 return udma_read(uc->tchan->reg_rt, reg);
363 static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val)
367 udma_write(uc->tchan->reg_rt, reg, val);
370 static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg,
375 udma_update_bits(uc->tchan->reg_rt, reg, mask, val);
379 static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg)
383 return udma_read(uc->rchan->reg_rt, reg);
386 static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val)
390 udma_write(uc->rchan->reg_rt, reg, val);
393 static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg,
398 udma_update_bits(uc->rchan->reg_rt, reg, mask, val);
401 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
403 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
405 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
406 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
407 tisci_rm->tisci_navss_dev_id,
408 src_thread, dst_thread);
411 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
414 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
416 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
417 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
418 tisci_rm->tisci_navss_dev_id,
419 src_thread, dst_thread);
422 static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
424 struct device *chan_dev = &chan->dev->device;
427 /* No special handling for the channel */
428 chan->dev->chan_dma_dev = false;
430 chan_dev->dma_coherent = false;
431 chan_dev->dma_parms = NULL;
432 } else if (asel == 14 || asel == 15) {
433 chan->dev->chan_dma_dev = true;
435 chan_dev->dma_coherent = true;
436 dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48));
437 chan_dev->dma_parms = chan_dev->parent->dma_parms;
439 dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel);
441 chan_dev->dma_coherent = false;
442 chan_dev->dma_parms = NULL;
446 static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id)
450 for (i = 0; i < tpl_map->levels; i++) {
451 if (chan_id >= tpl_map->start_idx[i])
458 static void udma_reset_uchan(struct udma_chan *uc)
460 memset(&uc->config, 0, sizeof(uc->config));
461 uc->config.remote_thread_id = -1;
462 uc->config.mapped_channel_id = -1;
463 uc->config.default_flow_id = -1;
464 uc->state = UDMA_CHAN_IS_IDLE;
467 static void udma_dump_chan_stdata(struct udma_chan *uc)
469 struct device *dev = uc->ud->dev;
473 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
474 dev_dbg(dev, "TCHAN State data:\n");
475 for (i = 0; i < 32; i++) {
476 offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
477 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
478 udma_tchanrt_read(uc, offset));
482 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
483 dev_dbg(dev, "RCHAN State data:\n");
484 for (i = 0; i < 32; i++) {
485 offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
486 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
487 udma_rchanrt_read(uc, offset));
492 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
495 return d->hwdesc[idx].cppi5_desc_paddr;
498 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
500 return d->hwdesc[idx].cppi5_desc_vaddr;
503 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
506 struct udma_desc *d = uc->terminated_desc;
509 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
512 if (desc_paddr != paddr)
519 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
522 if (desc_paddr != paddr)
530 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
532 if (uc->use_dma_pool) {
535 for (i = 0; i < d->hwdesc_count; i++) {
536 if (!d->hwdesc[i].cppi5_desc_vaddr)
539 dma_pool_free(uc->hdesc_pool,
540 d->hwdesc[i].cppi5_desc_vaddr,
541 d->hwdesc[i].cppi5_desc_paddr);
543 d->hwdesc[i].cppi5_desc_vaddr = NULL;
545 } else if (d->hwdesc[0].cppi5_desc_vaddr) {
546 dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size,
547 d->hwdesc[0].cppi5_desc_vaddr,
548 d->hwdesc[0].cppi5_desc_paddr);
550 d->hwdesc[0].cppi5_desc_vaddr = NULL;
554 static void udma_purge_desc_work(struct work_struct *work)
556 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
557 struct virt_dma_desc *vd, *_vd;
561 spin_lock_irqsave(&ud->lock, flags);
562 list_splice_tail_init(&ud->desc_to_purge, &head);
563 spin_unlock_irqrestore(&ud->lock, flags);
565 list_for_each_entry_safe(vd, _vd, &head, node) {
566 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
567 struct udma_desc *d = to_udma_desc(&vd->tx);
569 udma_free_hwdesc(uc, d);
574 /* If more to purge, schedule the work again */
575 if (!list_empty(&ud->desc_to_purge))
576 schedule_work(&ud->purge_work);
579 static void udma_desc_free(struct virt_dma_desc *vd)
581 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
582 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
583 struct udma_desc *d = to_udma_desc(&vd->tx);
586 if (uc->terminated_desc == d)
587 uc->terminated_desc = NULL;
589 if (uc->use_dma_pool) {
590 udma_free_hwdesc(uc, d);
595 spin_lock_irqsave(&ud->lock, flags);
596 list_add_tail(&vd->node, &ud->desc_to_purge);
597 spin_unlock_irqrestore(&ud->lock, flags);
599 schedule_work(&ud->purge_work);
602 static bool udma_is_chan_running(struct udma_chan *uc)
608 trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
610 rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
612 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
618 static bool udma_is_chan_paused(struct udma_chan *uc)
622 switch (uc->config.dir) {
624 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
625 pause_mask = UDMA_PEER_RT_EN_PAUSE;
628 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
629 pause_mask = UDMA_PEER_RT_EN_PAUSE;
632 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
633 pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
639 if (val & pause_mask)
645 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
647 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
650 static int udma_push_to_ring(struct udma_chan *uc, int idx)
652 struct udma_desc *d = uc->desc;
653 struct k3_ring *ring = NULL;
656 switch (uc->config.dir) {
658 ring = uc->rflow->fd_ring;
662 ring = uc->tchan->t_ring;
668 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
670 paddr = udma_get_rx_flush_hwdesc_paddr(uc);
672 paddr = udma_curr_cppi5_desc_paddr(d, idx);
674 wmb(); /* Ensure that writes are not moved over this point */
677 return k3_ringacc_ring_push(ring, &paddr);
680 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
682 if (uc->config.dir != DMA_DEV_TO_MEM)
685 if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
691 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
693 struct k3_ring *ring = NULL;
696 switch (uc->config.dir) {
698 ring = uc->rflow->r_ring;
702 ring = uc->tchan->tc_ring;
708 ret = k3_ringacc_ring_pop(ring, addr);
712 rmb(); /* Ensure that reads are not moved before this point */
714 /* Teardown completion */
715 if (cppi5_desc_is_tdcm(*addr))
718 /* Check for flush descriptor */
719 if (udma_desc_is_rx_flush(uc, *addr))
725 static void udma_reset_rings(struct udma_chan *uc)
727 struct k3_ring *ring1 = NULL;
728 struct k3_ring *ring2 = NULL;
730 switch (uc->config.dir) {
733 ring1 = uc->rflow->fd_ring;
734 ring2 = uc->rflow->r_ring;
740 ring1 = uc->tchan->t_ring;
741 ring2 = uc->tchan->tc_ring;
749 k3_ringacc_ring_reset_dma(ring1,
750 k3_ringacc_ring_get_occ(ring1));
752 k3_ringacc_ring_reset(ring2);
754 /* make sure we are not leaking memory by stalled descriptor */
755 if (uc->terminated_desc) {
756 udma_desc_free(&uc->terminated_desc->vd);
757 uc->terminated_desc = NULL;
761 static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
763 if (uc->desc->dir == DMA_DEV_TO_MEM) {
764 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
765 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
766 if (uc->config.ep_type != PSIL_EP_NATIVE)
767 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
769 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
770 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
771 if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE)
772 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
776 static void udma_reset_counters(struct udma_chan *uc)
781 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
782 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
784 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
785 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
787 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
788 udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
791 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
792 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
797 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
798 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
800 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
801 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
803 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
804 udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
806 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
807 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
811 static int udma_reset_chan(struct udma_chan *uc, bool hard)
813 switch (uc->config.dir) {
815 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
816 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
819 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
820 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
823 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
824 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
830 /* Reset all counters */
831 udma_reset_counters(uc);
833 /* Hard reset: re-initialize the channel to reset */
835 struct udma_chan_config ucc_backup;
838 memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
839 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
841 /* restore the channel configuration */
842 memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
843 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
848 * Setting forced teardown after forced reset helps recovering
851 if (uc->config.dir == DMA_DEV_TO_MEM)
852 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
853 UDMA_CHAN_RT_CTL_EN |
854 UDMA_CHAN_RT_CTL_TDOWN |
855 UDMA_CHAN_RT_CTL_FTDOWN);
857 uc->state = UDMA_CHAN_IS_IDLE;
862 static void udma_start_desc(struct udma_chan *uc)
864 struct udma_chan_config *ucc = &uc->config;
866 if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode &&
867 (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
871 * UDMA only: Push all descriptors to ring for packet mode
873 * PKTDMA supports pre-linked descriptor and cyclic is not
876 for (i = 0; i < uc->desc->sglen; i++)
877 udma_push_to_ring(uc, i);
879 udma_push_to_ring(uc, 0);
883 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
885 /* Only PDMAs have staticTR */
886 if (uc->config.ep_type == PSIL_EP_NATIVE)
889 /* Check if the staticTR configuration has changed for TX */
890 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
896 static int udma_start(struct udma_chan *uc)
898 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
907 uc->desc = to_udma_desc(&vd->tx);
909 /* Channel is already running and does not need reconfiguration */
910 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
915 /* Make sure that we clear the teardown bit, if it is set */
916 udma_reset_chan(uc, false);
918 /* Push descriptors before we start the channel */
921 switch (uc->desc->dir) {
923 /* Config remote TR */
924 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
925 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
926 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
927 const struct udma_match_data *match_data =
930 if (uc->config.enable_acc32)
931 val |= PDMA_STATIC_TR_XY_ACC32;
932 if (uc->config.enable_burst)
933 val |= PDMA_STATIC_TR_XY_BURST;
935 udma_rchanrt_write(uc,
936 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
939 udma_rchanrt_write(uc,
940 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG,
941 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
942 match_data->statictr_z_mask));
944 /* save the current staticTR configuration */
945 memcpy(&uc->static_tr, &uc->desc->static_tr,
946 sizeof(uc->static_tr));
949 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
950 UDMA_CHAN_RT_CTL_EN);
953 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
954 UDMA_PEER_RT_EN_ENABLE);
958 /* Config remote TR */
959 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
960 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
961 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
963 if (uc->config.enable_acc32)
964 val |= PDMA_STATIC_TR_XY_ACC32;
965 if (uc->config.enable_burst)
966 val |= PDMA_STATIC_TR_XY_BURST;
968 udma_tchanrt_write(uc,
969 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
972 /* save the current staticTR configuration */
973 memcpy(&uc->static_tr, &uc->desc->static_tr,
974 sizeof(uc->static_tr));
978 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
979 UDMA_PEER_RT_EN_ENABLE);
981 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
982 UDMA_CHAN_RT_CTL_EN);
986 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
987 UDMA_CHAN_RT_CTL_EN);
988 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
989 UDMA_CHAN_RT_CTL_EN);
996 uc->state = UDMA_CHAN_IS_ACTIVE;
1002 static int udma_stop(struct udma_chan *uc)
1004 enum udma_chan_state old_state = uc->state;
1006 uc->state = UDMA_CHAN_IS_TERMINATING;
1007 reinit_completion(&uc->teardown_completed);
1009 switch (uc->config.dir) {
1010 case DMA_DEV_TO_MEM:
1011 if (!uc->cyclic && !uc->desc)
1012 udma_push_to_ring(uc, -1);
1014 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1015 UDMA_PEER_RT_EN_ENABLE |
1016 UDMA_PEER_RT_EN_TEARDOWN);
1018 case DMA_MEM_TO_DEV:
1019 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1020 UDMA_PEER_RT_EN_ENABLE |
1021 UDMA_PEER_RT_EN_FLUSH);
1022 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1023 UDMA_CHAN_RT_CTL_EN |
1024 UDMA_CHAN_RT_CTL_TDOWN);
1026 case DMA_MEM_TO_MEM:
1027 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1028 UDMA_CHAN_RT_CTL_EN |
1029 UDMA_CHAN_RT_CTL_TDOWN);
1032 uc->state = old_state;
1033 complete_all(&uc->teardown_completed);
1040 static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
1042 struct udma_desc *d = uc->desc;
1043 struct cppi5_host_desc_t *h_desc;
1045 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
1046 cppi5_hdesc_reset_to_original(h_desc);
1047 udma_push_to_ring(uc, d->desc_idx);
1048 d->desc_idx = (d->desc_idx + 1) % d->sglen;
1051 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
1053 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
1055 memcpy(d->metadata, h_desc->epib, d->metadata_size);
1058 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
1060 u32 peer_bcnt, bcnt;
1063 * Only TX towards PDMA is affected.
1064 * If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer
1065 * completion calculation, consumer must ensure that there is no stale
1066 * data in DMA fabric in this case.
1068 if (uc->config.ep_type == PSIL_EP_NATIVE ||
1069 uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT))
1072 peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
1073 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
1075 /* Transfer is incomplete, store current residue and time stamp */
1076 if (peer_bcnt < bcnt) {
1077 uc->tx_drain.residue = bcnt - peer_bcnt;
1078 uc->tx_drain.tstamp = ktime_get();
1085 static void udma_check_tx_completion(struct work_struct *work)
1087 struct udma_chan *uc = container_of(work, typeof(*uc),
1088 tx_drain.work.work);
1089 bool desc_done = true;
1092 unsigned long delay;
1096 /* Get previous residue and time stamp */
1097 residue_diff = uc->tx_drain.residue;
1098 time_diff = uc->tx_drain.tstamp;
1100 * Get current residue and time stamp or see if
1101 * transfer is complete
1103 desc_done = udma_is_desc_really_done(uc, uc->desc);
1108 * Find the time delta and residue delta w.r.t
1111 time_diff = ktime_sub(uc->tx_drain.tstamp,
1113 residue_diff -= uc->tx_drain.residue;
1116 * Try to guess when we should check
1117 * next time by calculating rate at
1118 * which data is being drained at the
1121 delay = (time_diff / residue_diff) *
1122 uc->tx_drain.residue;
1124 /* No progress, check again in 1 second */
1125 schedule_delayed_work(&uc->tx_drain.work, HZ);
1129 usleep_range(ktime_to_us(delay),
1130 ktime_to_us(delay) + 10);
1135 struct udma_desc *d = uc->desc;
1137 udma_decrement_byte_counters(uc, d->residue);
1139 vchan_cookie_complete(&d->vd);
1147 static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1149 struct udma_chan *uc = data;
1150 struct udma_desc *d;
1151 dma_addr_t paddr = 0;
1153 if (udma_pop_from_ring(uc, &paddr) || !paddr)
1156 spin_lock(&uc->vc.lock);
1158 /* Teardown completion message */
1159 if (cppi5_desc_is_tdcm(paddr)) {
1160 complete_all(&uc->teardown_completed);
1162 if (uc->terminated_desc) {
1163 udma_desc_free(&uc->terminated_desc->vd);
1164 uc->terminated_desc = NULL;
1173 d = udma_udma_desc_from_paddr(uc, paddr);
1176 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1178 if (desc_paddr != paddr) {
1179 dev_err(uc->ud->dev, "not matching descriptors!\n");
1183 if (d == uc->desc) {
1184 /* active descriptor */
1186 udma_cyclic_packet_elapsed(uc);
1187 vchan_cyclic_callback(&d->vd);
1189 if (udma_is_desc_really_done(uc, d)) {
1190 udma_decrement_byte_counters(uc, d->residue);
1192 vchan_cookie_complete(&d->vd);
1194 schedule_delayed_work(&uc->tx_drain.work,
1200 * terminated descriptor, mark the descriptor as
1201 * completed to update the channel's cookie marker
1203 dma_cookie_complete(&d->vd.tx);
1207 spin_unlock(&uc->vc.lock);
1212 static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1214 struct udma_chan *uc = data;
1215 struct udma_desc *d;
1217 spin_lock(&uc->vc.lock);
1220 d->tr_idx = (d->tr_idx + 1) % d->sglen;
1223 vchan_cyclic_callback(&d->vd);
1225 /* TODO: figure out the real amount of data */
1226 udma_decrement_byte_counters(uc, d->residue);
1228 vchan_cookie_complete(&d->vd);
1232 spin_unlock(&uc->vc.lock);
1238 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1240 * @from: Start the search from this flow id number
1241 * @cnt: Number of consecutive flow ids to allocate
1243 * Allocate range of RX flow ids for future use, those flows can be requested
1244 * only using explicit flow id number. if @from is set to -1 it will try to find
1245 * first free range. if @from is positive value it will force allocation only
1246 * of the specified range of flows.
1248 * Returns -ENOMEM if can't find free range.
1249 * -EEXIST if requested range is busy.
1250 * -EINVAL if wrong input values passed.
1251 * Returns flow id on success.
1253 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1255 int start, tmp_from;
1256 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1260 tmp_from = ud->rchan_cnt;
1261 /* default flows can't be allocated and accessible only by id */
1262 if (tmp_from < ud->rchan_cnt)
1265 if (tmp_from + cnt > ud->rflow_cnt)
1268 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1271 start = bitmap_find_next_zero_area(tmp,
1274 if (start >= ud->rflow_cnt)
1277 if (from >= 0 && start != from)
1280 bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1284 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1286 if (from < ud->rchan_cnt)
1288 if (from + cnt > ud->rflow_cnt)
1291 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1295 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1298 * Attempt to request rflow by ID can be made for any rflow
1299 * if not in use with assumption that caller knows what's doing.
1300 * TI-SCI FW will perform additional permission check ant way, it's
1304 if (id < 0 || id >= ud->rflow_cnt)
1305 return ERR_PTR(-ENOENT);
1307 if (test_bit(id, ud->rflow_in_use))
1308 return ERR_PTR(-ENOENT);
1310 if (ud->rflow_gp_map) {
1311 /* GP rflow has to be allocated first */
1312 if (!test_bit(id, ud->rflow_gp_map) &&
1313 !test_bit(id, ud->rflow_gp_map_allocated))
1314 return ERR_PTR(-EINVAL);
1317 dev_dbg(ud->dev, "get rflow%d\n", id);
1318 set_bit(id, ud->rflow_in_use);
1319 return &ud->rflows[id];
1322 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1324 if (!test_bit(rflow->id, ud->rflow_in_use)) {
1325 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1329 dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1330 clear_bit(rflow->id, ud->rflow_in_use);
1333 #define UDMA_RESERVE_RESOURCE(res) \
1334 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1335 enum udma_tp_level tpl, \
1339 if (test_bit(id, ud->res##_map)) { \
1340 dev_err(ud->dev, "res##%d is in use\n", id); \
1341 return ERR_PTR(-ENOENT); \
1346 if (tpl >= ud->res##_tpl.levels) \
1347 tpl = ud->res##_tpl.levels - 1; \
1349 start = ud->res##_tpl.start_idx[tpl]; \
1351 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1353 if (id == ud->res##_cnt) { \
1354 return ERR_PTR(-ENOENT); \
1358 set_bit(id, ud->res##_map); \
1359 return &ud->res##s[id]; \
1362 UDMA_RESERVE_RESOURCE(bchan);
1363 UDMA_RESERVE_RESOURCE(tchan);
1364 UDMA_RESERVE_RESOURCE(rchan);
1366 static int bcdma_get_bchan(struct udma_chan *uc)
1368 struct udma_dev *ud = uc->ud;
1369 enum udma_tp_level tpl;
1373 dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
1374 uc->id, uc->bchan->id);
1379 * Use normal channels for peripherals, and highest TPL channel for
1382 if (uc->config.tr_trigger_type)
1385 tpl = ud->bchan_tpl.levels - 1;
1387 uc->bchan = __udma_reserve_bchan(ud, tpl, -1);
1388 if (IS_ERR(uc->bchan)) {
1389 ret = PTR_ERR(uc->bchan);
1394 uc->tchan = uc->bchan;
1399 static int udma_get_tchan(struct udma_chan *uc)
1401 struct udma_dev *ud = uc->ud;
1405 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1406 uc->id, uc->tchan->id);
1411 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1412 * For PKTDMA mapped channels it is configured to a channel which must
1413 * be used to service the peripheral.
1415 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
1416 uc->config.mapped_channel_id);
1417 if (IS_ERR(uc->tchan)) {
1418 ret = PTR_ERR(uc->tchan);
1423 if (ud->tflow_cnt) {
1426 /* Only PKTDMA have support for tx flows */
1427 if (uc->config.default_flow_id >= 0)
1428 tflow_id = uc->config.default_flow_id;
1430 tflow_id = uc->tchan->id;
1432 if (test_bit(tflow_id, ud->tflow_map)) {
1433 dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
1434 clear_bit(uc->tchan->id, ud->tchan_map);
1439 uc->tchan->tflow_id = tflow_id;
1440 set_bit(tflow_id, ud->tflow_map);
1442 uc->tchan->tflow_id = -1;
1448 static int udma_get_rchan(struct udma_chan *uc)
1450 struct udma_dev *ud = uc->ud;
1454 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1455 uc->id, uc->rchan->id);
1460 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1461 * For PKTDMA mapped channels it is configured to a channel which must
1462 * be used to service the peripheral.
1464 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
1465 uc->config.mapped_channel_id);
1466 if (IS_ERR(uc->rchan)) {
1467 ret = PTR_ERR(uc->rchan);
1475 static int udma_get_chan_pair(struct udma_chan *uc)
1477 struct udma_dev *ud = uc->ud;
1480 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1481 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1482 uc->id, uc->tchan->id);
1487 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1488 uc->id, uc->tchan->id);
1490 } else if (uc->rchan) {
1491 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1492 uc->id, uc->rchan->id);
1496 /* Can be optimized, but let's have it like this for now */
1497 end = min(ud->tchan_cnt, ud->rchan_cnt);
1499 * Try to use the highest TPL channel pair for MEM_TO_MEM channels
1500 * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
1502 chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1];
1503 for (; chan_id < end; chan_id++) {
1504 if (!test_bit(chan_id, ud->tchan_map) &&
1505 !test_bit(chan_id, ud->rchan_map))
1512 set_bit(chan_id, ud->tchan_map);
1513 set_bit(chan_id, ud->rchan_map);
1514 uc->tchan = &ud->tchans[chan_id];
1515 uc->rchan = &ud->rchans[chan_id];
1517 /* UDMA does not use tx flows */
1518 uc->tchan->tflow_id = -1;
1523 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1525 struct udma_dev *ud = uc->ud;
1529 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1534 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1535 uc->id, uc->rflow->id);
1539 uc->rflow = __udma_get_rflow(ud, flow_id);
1540 if (IS_ERR(uc->rflow)) {
1541 ret = PTR_ERR(uc->rflow);
1549 static void bcdma_put_bchan(struct udma_chan *uc)
1551 struct udma_dev *ud = uc->ud;
1554 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
1556 clear_bit(uc->bchan->id, ud->bchan_map);
1562 static void udma_put_rchan(struct udma_chan *uc)
1564 struct udma_dev *ud = uc->ud;
1567 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1569 clear_bit(uc->rchan->id, ud->rchan_map);
1574 static void udma_put_tchan(struct udma_chan *uc)
1576 struct udma_dev *ud = uc->ud;
1579 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1581 clear_bit(uc->tchan->id, ud->tchan_map);
1583 if (uc->tchan->tflow_id >= 0)
1584 clear_bit(uc->tchan->tflow_id, ud->tflow_map);
1590 static void udma_put_rflow(struct udma_chan *uc)
1592 struct udma_dev *ud = uc->ud;
1595 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1597 __udma_put_rflow(ud, uc->rflow);
1602 static void bcdma_free_bchan_resources(struct udma_chan *uc)
1607 k3_ringacc_ring_free(uc->bchan->tc_ring);
1608 k3_ringacc_ring_free(uc->bchan->t_ring);
1609 uc->bchan->tc_ring = NULL;
1610 uc->bchan->t_ring = NULL;
1611 k3_configure_chan_coherency(&uc->vc.chan, 0);
1613 bcdma_put_bchan(uc);
1616 static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
1618 struct k3_ring_cfg ring_cfg;
1619 struct udma_dev *ud = uc->ud;
1622 ret = bcdma_get_bchan(uc);
1626 ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
1628 &uc->bchan->tc_ring);
1634 memset(&ring_cfg, 0, sizeof(ring_cfg));
1635 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1636 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1637 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1639 k3_configure_chan_coherency(&uc->vc.chan, ud->asel);
1640 ring_cfg.asel = ud->asel;
1641 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1643 ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
1650 k3_ringacc_ring_free(uc->bchan->tc_ring);
1651 uc->bchan->tc_ring = NULL;
1652 k3_ringacc_ring_free(uc->bchan->t_ring);
1653 uc->bchan->t_ring = NULL;
1654 k3_configure_chan_coherency(&uc->vc.chan, 0);
1656 bcdma_put_bchan(uc);
1661 static void udma_free_tx_resources(struct udma_chan *uc)
1666 k3_ringacc_ring_free(uc->tchan->t_ring);
1667 k3_ringacc_ring_free(uc->tchan->tc_ring);
1668 uc->tchan->t_ring = NULL;
1669 uc->tchan->tc_ring = NULL;
1674 static int udma_alloc_tx_resources(struct udma_chan *uc)
1676 struct k3_ring_cfg ring_cfg;
1677 struct udma_dev *ud = uc->ud;
1678 struct udma_tchan *tchan;
1681 ret = udma_get_tchan(uc);
1686 if (tchan->tflow_id >= 0)
1687 ring_idx = tchan->tflow_id;
1689 ring_idx = ud->bchan_cnt + tchan->id;
1691 ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
1699 memset(&ring_cfg, 0, sizeof(ring_cfg));
1700 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1701 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1702 if (ud->match_data->type == DMA_TYPE_UDMA) {
1703 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1705 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1707 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1708 ring_cfg.asel = uc->config.asel;
1709 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1712 ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg);
1713 ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg);
1721 k3_ringacc_ring_free(uc->tchan->tc_ring);
1722 uc->tchan->tc_ring = NULL;
1723 k3_ringacc_ring_free(uc->tchan->t_ring);
1724 uc->tchan->t_ring = NULL;
1731 static void udma_free_rx_resources(struct udma_chan *uc)
1737 struct udma_rflow *rflow = uc->rflow;
1739 k3_ringacc_ring_free(rflow->fd_ring);
1740 k3_ringacc_ring_free(rflow->r_ring);
1741 rflow->fd_ring = NULL;
1742 rflow->r_ring = NULL;
1750 static int udma_alloc_rx_resources(struct udma_chan *uc)
1752 struct udma_dev *ud = uc->ud;
1753 struct k3_ring_cfg ring_cfg;
1754 struct udma_rflow *rflow;
1758 ret = udma_get_rchan(uc);
1762 /* For MEM_TO_MEM we don't need rflow or rings */
1763 if (uc->config.dir == DMA_MEM_TO_MEM)
1766 if (uc->config.default_flow_id >= 0)
1767 ret = udma_get_rflow(uc, uc->config.default_flow_id);
1769 ret = udma_get_rflow(uc, uc->rchan->id);
1778 fd_ring_id = ud->tflow_cnt + rflow->id;
1780 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
1783 ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
1784 &rflow->fd_ring, &rflow->r_ring);
1790 memset(&ring_cfg, 0, sizeof(ring_cfg));
1792 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1793 if (ud->match_data->type == DMA_TYPE_UDMA) {
1794 if (uc->config.pkt_mode)
1795 ring_cfg.size = SG_MAX_SEGMENTS;
1797 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1799 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1801 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1802 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1804 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1805 ring_cfg.asel = uc->config.asel;
1806 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1809 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1811 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1812 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1820 k3_ringacc_ring_free(rflow->r_ring);
1821 rflow->r_ring = NULL;
1822 k3_ringacc_ring_free(rflow->fd_ring);
1823 rflow->fd_ring = NULL;
1832 #define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \
1833 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1834 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1836 #define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \
1837 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1838 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1840 #define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \
1841 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1843 #define TISCI_UDMA_TCHAN_VALID_PARAMS ( \
1844 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1845 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1846 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1847 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1848 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1849 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1850 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1851 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1853 #define TISCI_UDMA_RCHAN_VALID_PARAMS ( \
1854 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1855 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1856 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1857 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1858 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1859 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1860 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1861 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1862 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1864 static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1866 struct udma_dev *ud = uc->ud;
1867 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1868 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1869 struct udma_tchan *tchan = uc->tchan;
1870 struct udma_rchan *rchan = uc->rchan;
1875 /* Non synchronized - mem to mem type of transfer */
1876 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1877 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1878 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1880 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1881 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id);
1883 burst_size = ud->match_data->burst_size[tpl];
1886 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1887 req_tx.nav_id = tisci_rm->tisci_dev_id;
1888 req_tx.index = tchan->id;
1889 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1890 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1891 req_tx.txcq_qnum = tc_ring;
1892 req_tx.tx_atype = ud->atype;
1894 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1895 req_tx.tx_burst_size = burst_size;
1898 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1900 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1904 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
1905 req_rx.nav_id = tisci_rm->tisci_dev_id;
1906 req_rx.index = rchan->id;
1907 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1908 req_rx.rxcq_qnum = tc_ring;
1909 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1910 req_rx.rx_atype = ud->atype;
1912 req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1913 req_rx.rx_burst_size = burst_size;
1916 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1918 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1923 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1925 struct udma_dev *ud = uc->ud;
1926 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1927 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1928 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1929 struct udma_bchan *bchan = uc->bchan;
1934 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1935 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id);
1937 burst_size = ud->match_data->burst_size[tpl];
1940 req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1941 req_tx.nav_id = tisci_rm->tisci_dev_id;
1942 req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1943 req_tx.index = bchan->id;
1945 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1946 req_tx.tx_burst_size = burst_size;
1949 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1951 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1956 static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1958 struct udma_dev *ud = uc->ud;
1959 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1960 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1961 struct udma_tchan *tchan = uc->tchan;
1962 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1963 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1964 u32 mode, fetch_size;
1967 if (uc->config.pkt_mode) {
1968 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1969 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1970 uc->config.psd_size, 0);
1972 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1973 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1976 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1977 req_tx.nav_id = tisci_rm->tisci_dev_id;
1978 req_tx.index = tchan->id;
1979 req_tx.tx_chan_type = mode;
1980 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1981 req_tx.tx_fetch_size = fetch_size >> 2;
1982 req_tx.txcq_qnum = tc_ring;
1983 req_tx.tx_atype = uc->config.atype;
1984 if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
1985 ud->match_data->flags & UDMA_FLAG_TDTYPE) {
1986 /* wait for peer to complete the teardown for PDMAs */
1987 req_tx.valid_params |=
1988 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
1989 req_tx.tx_tdtype = 1;
1992 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1994 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1999 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
2001 struct udma_dev *ud = uc->ud;
2002 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2003 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2004 struct udma_tchan *tchan = uc->tchan;
2005 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
2008 req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
2009 req_tx.nav_id = tisci_rm->tisci_dev_id;
2010 req_tx.index = tchan->id;
2011 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
2012 if (ud->match_data->flags & UDMA_FLAG_TDTYPE) {
2013 /* wait for peer to complete the teardown for PDMAs */
2014 req_tx.valid_params |=
2015 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
2016 req_tx.tx_tdtype = 1;
2019 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
2021 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2026 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2028 static int udma_tisci_rx_channel_config(struct udma_chan *uc)
2030 struct udma_dev *ud = uc->ud;
2031 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2032 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2033 struct udma_rchan *rchan = uc->rchan;
2034 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
2035 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2036 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2037 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2038 u32 mode, fetch_size;
2041 if (uc->config.pkt_mode) {
2042 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
2043 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
2044 uc->config.psd_size, 0);
2046 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
2047 fetch_size = sizeof(struct cppi5_desc_hdr_t);
2050 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
2051 req_rx.nav_id = tisci_rm->tisci_dev_id;
2052 req_rx.index = rchan->id;
2053 req_rx.rx_fetch_size = fetch_size >> 2;
2054 req_rx.rxcq_qnum = rx_ring;
2055 req_rx.rx_chan_type = mode;
2056 req_rx.rx_atype = uc->config.atype;
2058 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2060 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2064 flow_req.valid_params =
2065 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2066 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2067 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
2068 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
2069 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
2070 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
2071 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
2072 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
2073 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
2074 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
2075 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
2076 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
2077 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
2079 flow_req.nav_id = tisci_rm->tisci_dev_id;
2080 flow_req.flow_index = rchan->id;
2082 if (uc->config.needs_epib)
2083 flow_req.rx_einfo_present = 1;
2085 flow_req.rx_einfo_present = 0;
2086 if (uc->config.psd_size)
2087 flow_req.rx_psinfo_present = 1;
2089 flow_req.rx_psinfo_present = 0;
2090 flow_req.rx_error_handling = 1;
2091 flow_req.rx_dest_qnum = rx_ring;
2092 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
2093 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
2094 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
2095 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
2096 flow_req.rx_fdq0_sz0_qnum = fd_ring;
2097 flow_req.rx_fdq1_qnum = fd_ring;
2098 flow_req.rx_fdq2_qnum = fd_ring;
2099 flow_req.rx_fdq3_qnum = fd_ring;
2101 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2104 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
2109 static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
2111 struct udma_dev *ud = uc->ud;
2112 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2113 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2114 struct udma_rchan *rchan = uc->rchan;
2115 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2118 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2119 req_rx.nav_id = tisci_rm->tisci_dev_id;
2120 req_rx.index = rchan->id;
2122 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2124 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2129 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2131 struct udma_dev *ud = uc->ud;
2132 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2133 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2134 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2135 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2138 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2139 req_rx.nav_id = tisci_rm->tisci_dev_id;
2140 req_rx.index = uc->rchan->id;
2142 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2144 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2148 flow_req.valid_params =
2149 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2150 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2151 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2153 flow_req.nav_id = tisci_rm->tisci_dev_id;
2154 flow_req.flow_index = uc->rflow->id;
2156 if (uc->config.needs_epib)
2157 flow_req.rx_einfo_present = 1;
2159 flow_req.rx_einfo_present = 0;
2160 if (uc->config.psd_size)
2161 flow_req.rx_psinfo_present = 1;
2163 flow_req.rx_psinfo_present = 0;
2164 flow_req.rx_error_handling = 1;
2166 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2169 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2175 static int udma_alloc_chan_resources(struct dma_chan *chan)
2177 struct udma_chan *uc = to_udma_chan(chan);
2178 struct udma_dev *ud = to_udma_dev(chan->device);
2179 const struct udma_soc_data *soc_data = ud->soc_data;
2180 struct k3_ring *irq_ring;
2184 uc->dma_dev = ud->dev;
2186 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
2187 uc->use_dma_pool = true;
2188 /* in case of MEM_TO_MEM we have maximum of two TRs */
2189 if (uc->config.dir == DMA_MEM_TO_MEM) {
2190 uc->config.hdesc_size = cppi5_trdesc_calc_size(
2191 sizeof(struct cppi5_tr_type15_t), 2);
2192 uc->config.pkt_mode = false;
2196 if (uc->use_dma_pool) {
2197 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2198 uc->config.hdesc_size,
2201 if (!uc->hdesc_pool) {
2202 dev_err(ud->ddev.dev,
2203 "Descriptor pool allocation failed\n");
2204 uc->use_dma_pool = false;
2211 * Make sure that the completion is in a known state:
2212 * No teardown, the channel is idle
2214 reinit_completion(&uc->teardown_completed);
2215 complete_all(&uc->teardown_completed);
2216 uc->state = UDMA_CHAN_IS_IDLE;
2218 switch (uc->config.dir) {
2219 case DMA_MEM_TO_MEM:
2220 /* Non synchronized - mem to mem type of transfer */
2221 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2224 ret = udma_get_chan_pair(uc);
2228 ret = udma_alloc_tx_resources(uc);
2234 ret = udma_alloc_rx_resources(uc);
2236 udma_free_tx_resources(uc);
2240 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2241 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2242 K3_PSIL_DST_THREAD_ID_OFFSET;
2244 irq_ring = uc->tchan->tc_ring;
2245 irq_udma_idx = uc->tchan->id;
2247 ret = udma_tisci_m2m_channel_config(uc);
2249 case DMA_MEM_TO_DEV:
2250 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2251 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2254 ret = udma_alloc_tx_resources(uc);
2258 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2259 uc->config.dst_thread = uc->config.remote_thread_id;
2260 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2262 irq_ring = uc->tchan->tc_ring;
2263 irq_udma_idx = uc->tchan->id;
2265 ret = udma_tisci_tx_channel_config(uc);
2267 case DMA_DEV_TO_MEM:
2268 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2269 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2272 ret = udma_alloc_rx_resources(uc);
2276 uc->config.src_thread = uc->config.remote_thread_id;
2277 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2278 K3_PSIL_DST_THREAD_ID_OFFSET;
2280 irq_ring = uc->rflow->r_ring;
2281 irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id;
2283 ret = udma_tisci_rx_channel_config(uc);
2286 /* Can not happen */
2287 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2288 __func__, uc->id, uc->config.dir);
2294 /* check if the channel configuration was successful */
2298 if (udma_is_chan_running(uc)) {
2299 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2300 udma_reset_chan(uc, false);
2301 if (udma_is_chan_running(uc)) {
2302 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2309 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2311 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2312 uc->config.src_thread, uc->config.dst_thread);
2316 uc->psil_paired = true;
2318 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
2319 if (uc->irq_num_ring <= 0) {
2320 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2321 k3_ringacc_get_ring_id(irq_ring));
2326 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2327 IRQF_TRIGGER_HIGH, uc->name, uc);
2329 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2333 /* Event from UDMA (TR events) only needed for slave TR mode channels */
2334 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
2335 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2336 if (uc->irq_num_udma <= 0) {
2337 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
2339 free_irq(uc->irq_num_ring, uc);
2344 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2347 dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
2349 free_irq(uc->irq_num_ring, uc);
2353 uc->irq_num_udma = 0;
2356 udma_reset_rings(uc);
2361 uc->irq_num_ring = 0;
2362 uc->irq_num_udma = 0;
2364 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2365 uc->psil_paired = false;
2367 udma_free_tx_resources(uc);
2368 udma_free_rx_resources(uc);
2370 udma_reset_uchan(uc);
2372 if (uc->use_dma_pool) {
2373 dma_pool_destroy(uc->hdesc_pool);
2374 uc->use_dma_pool = false;
2380 static int bcdma_alloc_chan_resources(struct dma_chan *chan)
2382 struct udma_chan *uc = to_udma_chan(chan);
2383 struct udma_dev *ud = to_udma_dev(chan->device);
2384 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2385 u32 irq_udma_idx, irq_ring_idx;
2388 /* Only TR mode is supported */
2389 uc->config.pkt_mode = false;
2392 * Make sure that the completion is in a known state:
2393 * No teardown, the channel is idle
2395 reinit_completion(&uc->teardown_completed);
2396 complete_all(&uc->teardown_completed);
2397 uc->state = UDMA_CHAN_IS_IDLE;
2399 switch (uc->config.dir) {
2400 case DMA_MEM_TO_MEM:
2401 /* Non synchronized - mem to mem type of transfer */
2402 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2405 ret = bcdma_alloc_bchan_resources(uc);
2409 irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring;
2410 irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data;
2412 ret = bcdma_tisci_m2m_channel_config(uc);
2414 case DMA_MEM_TO_DEV:
2415 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2416 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2419 ret = udma_alloc_tx_resources(uc);
2421 uc->config.remote_thread_id = -1;
2425 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2426 uc->config.dst_thread = uc->config.remote_thread_id;
2427 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2429 irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring;
2430 irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data;
2432 ret = bcdma_tisci_tx_channel_config(uc);
2434 case DMA_DEV_TO_MEM:
2435 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2436 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2439 ret = udma_alloc_rx_resources(uc);
2441 uc->config.remote_thread_id = -1;
2445 uc->config.src_thread = uc->config.remote_thread_id;
2446 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2447 K3_PSIL_DST_THREAD_ID_OFFSET;
2449 irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring;
2450 irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data;
2452 ret = bcdma_tisci_rx_channel_config(uc);
2455 /* Can not happen */
2456 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2457 __func__, uc->id, uc->config.dir);
2461 /* check if the channel configuration was successful */
2465 if (udma_is_chan_running(uc)) {
2466 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2467 udma_reset_chan(uc, false);
2468 if (udma_is_chan_running(uc)) {
2469 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2475 uc->dma_dev = dmaengine_get_dma_device(chan);
2476 if (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type) {
2477 uc->config.hdesc_size = cppi5_trdesc_calc_size(
2478 sizeof(struct cppi5_tr_type15_t), 2);
2480 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2481 uc->config.hdesc_size,
2484 if (!uc->hdesc_pool) {
2485 dev_err(ud->ddev.dev,
2486 "Descriptor pool allocation failed\n");
2487 uc->use_dma_pool = false;
2492 uc->use_dma_pool = true;
2493 } else if (uc->config.dir != DMA_MEM_TO_MEM) {
2495 ret = navss_psil_pair(ud, uc->config.src_thread,
2496 uc->config.dst_thread);
2499 "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2500 uc->config.src_thread, uc->config.dst_thread);
2504 uc->psil_paired = true;
2507 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2508 if (uc->irq_num_ring <= 0) {
2509 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2515 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2516 IRQF_TRIGGER_HIGH, uc->name, uc);
2518 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2522 /* Event from BCDMA (TR events) only needed for slave channels */
2523 if (is_slave_direction(uc->config.dir)) {
2524 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2525 if (uc->irq_num_udma <= 0) {
2526 dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
2528 free_irq(uc->irq_num_ring, uc);
2533 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2536 dev_err(ud->dev, "chan%d: BCDMA irq request failed\n",
2538 free_irq(uc->irq_num_ring, uc);
2542 uc->irq_num_udma = 0;
2545 udma_reset_rings(uc);
2547 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2548 udma_check_tx_completion);
2552 uc->irq_num_ring = 0;
2553 uc->irq_num_udma = 0;
2555 if (uc->psil_paired)
2556 navss_psil_unpair(ud, uc->config.src_thread,
2557 uc->config.dst_thread);
2558 uc->psil_paired = false;
2560 bcdma_free_bchan_resources(uc);
2561 udma_free_tx_resources(uc);
2562 udma_free_rx_resources(uc);
2564 udma_reset_uchan(uc);
2566 if (uc->use_dma_pool) {
2567 dma_pool_destroy(uc->hdesc_pool);
2568 uc->use_dma_pool = false;
2574 static int bcdma_router_config(struct dma_chan *chan)
2576 struct k3_event_route_data *router_data = chan->route_data;
2577 struct udma_chan *uc = to_udma_chan(chan);
2583 if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2)
2586 trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset;
2587 trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1;
2589 return router_data->set_event(router_data->priv, trigger_event);
2592 static int pktdma_alloc_chan_resources(struct dma_chan *chan)
2594 struct udma_chan *uc = to_udma_chan(chan);
2595 struct udma_dev *ud = to_udma_dev(chan->device);
2596 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2601 * Make sure that the completion is in a known state:
2602 * No teardown, the channel is idle
2604 reinit_completion(&uc->teardown_completed);
2605 complete_all(&uc->teardown_completed);
2606 uc->state = UDMA_CHAN_IS_IDLE;
2608 switch (uc->config.dir) {
2609 case DMA_MEM_TO_DEV:
2610 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2611 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2614 ret = udma_alloc_tx_resources(uc);
2616 uc->config.remote_thread_id = -1;
2620 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2621 uc->config.dst_thread = uc->config.remote_thread_id;
2622 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2624 irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow;
2626 ret = pktdma_tisci_tx_channel_config(uc);
2628 case DMA_DEV_TO_MEM:
2629 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2630 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2633 ret = udma_alloc_rx_resources(uc);
2635 uc->config.remote_thread_id = -1;
2639 uc->config.src_thread = uc->config.remote_thread_id;
2640 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2641 K3_PSIL_DST_THREAD_ID_OFFSET;
2643 irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow;
2645 ret = pktdma_tisci_rx_channel_config(uc);
2648 /* Can not happen */
2649 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2650 __func__, uc->id, uc->config.dir);
2654 /* check if the channel configuration was successful */
2658 if (udma_is_chan_running(uc)) {
2659 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2660 udma_reset_chan(uc, false);
2661 if (udma_is_chan_running(uc)) {
2662 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2668 uc->dma_dev = dmaengine_get_dma_device(chan);
2669 uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev,
2670 uc->config.hdesc_size, ud->desc_align,
2672 if (!uc->hdesc_pool) {
2673 dev_err(ud->ddev.dev,
2674 "Descriptor pool allocation failed\n");
2675 uc->use_dma_pool = false;
2680 uc->use_dma_pool = true;
2683 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2685 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2686 uc->config.src_thread, uc->config.dst_thread);
2690 uc->psil_paired = true;
2692 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2693 if (uc->irq_num_ring <= 0) {
2694 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2700 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2701 IRQF_TRIGGER_HIGH, uc->name, uc);
2703 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2707 uc->irq_num_udma = 0;
2709 udma_reset_rings(uc);
2711 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2712 udma_check_tx_completion);
2716 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2717 uc->id, uc->tchan->id, uc->tchan->tflow_id,
2718 uc->config.remote_thread_id);
2721 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2722 uc->id, uc->rchan->id, uc->rflow->id,
2723 uc->config.remote_thread_id);
2727 uc->irq_num_ring = 0;
2729 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2730 uc->psil_paired = false;
2732 udma_free_tx_resources(uc);
2733 udma_free_rx_resources(uc);
2735 udma_reset_uchan(uc);
2737 dma_pool_destroy(uc->hdesc_pool);
2738 uc->use_dma_pool = false;
2743 static int udma_slave_config(struct dma_chan *chan,
2744 struct dma_slave_config *cfg)
2746 struct udma_chan *uc = to_udma_chan(chan);
2748 memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
2753 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
2754 size_t tr_size, int tr_count,
2755 enum dma_transfer_direction dir)
2757 struct udma_hwdesc *hwdesc;
2758 struct cppi5_desc_hdr_t *tr_desc;
2759 struct udma_desc *d;
2760 u32 reload_count = 0;
2770 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
2774 /* We have only one descriptor containing multiple TRs */
2775 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
2779 d->sglen = tr_count;
2781 d->hwdesc_count = 1;
2782 hwdesc = &d->hwdesc[0];
2784 /* Allocate memory for DMA ring descriptor */
2785 if (uc->use_dma_pool) {
2786 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2787 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2789 &hwdesc->cppi5_desc_paddr);
2791 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
2793 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
2794 uc->ud->desc_align);
2795 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
2796 hwdesc->cppi5_desc_size,
2797 &hwdesc->cppi5_desc_paddr,
2801 if (!hwdesc->cppi5_desc_vaddr) {
2806 /* Start of the TR req records */
2807 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
2808 /* Start address of the TR response array */
2809 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
2811 tr_desc = hwdesc->cppi5_desc_vaddr;
2814 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
2816 if (dir == DMA_DEV_TO_MEM)
2817 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2819 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2821 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
2822 cppi5_desc_set_pktids(tr_desc, uc->id,
2823 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2824 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
2830 * udma_get_tr_counters - calculate TR counters for a given length
2831 * @len: Length of the trasnfer
2832 * @align_to: Preferred alignment
2833 * @tr0_cnt0: First TR icnt0
2834 * @tr0_cnt1: First TR icnt1
2835 * @tr1_cnt0: Second (if used) TR icnt0
2837 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2838 * For len >= SZ_64K two TRs are used in a simple way:
2839 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2840 * Second TR: the remaining length (tr1_cnt0)
2842 * Returns the number of TRs the length needs (1 or 2)
2843 * -EINVAL if the length can not be supported
2845 static int udma_get_tr_counters(size_t len, unsigned long align_to,
2846 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
2859 *tr0_cnt0 = SZ_64K - BIT(align_to);
2860 if (len / *tr0_cnt0 >= SZ_64K) {
2868 *tr0_cnt1 = len / *tr0_cnt0;
2869 *tr1_cnt0 = len % *tr0_cnt0;
2874 static struct udma_desc *
2875 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2876 unsigned int sglen, enum dma_transfer_direction dir,
2877 unsigned long tx_flags, void *context)
2879 struct scatterlist *sgent;
2880 struct udma_desc *d;
2881 struct cppi5_tr_type1_t *tr_req = NULL;
2882 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2889 /* estimate the number of TRs we will need */
2890 for_each_sg(sgl, sgent, sglen, i) {
2891 if (sg_dma_len(sgent) < SZ_64K)
2897 /* Now allocate and setup the descriptor. */
2898 tr_size = sizeof(struct cppi5_tr_type1_t);
2899 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
2905 if (uc->ud->match_data->type == DMA_TYPE_UDMA)
2908 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
2910 tr_req = d->hwdesc[0].tr_req_base;
2911 for_each_sg(sgl, sgent, sglen, i) {
2912 dma_addr_t sg_addr = sg_dma_address(sgent);
2914 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2915 &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2917 dev_err(uc->ud->dev, "size %u is not supported\n",
2919 udma_free_hwdesc(uc, d);
2924 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2925 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2926 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
2929 tr_req[tr_idx].addr = sg_addr;
2930 tr_req[tr_idx].icnt0 = tr0_cnt0;
2931 tr_req[tr_idx].icnt1 = tr0_cnt1;
2932 tr_req[tr_idx].dim1 = tr0_cnt0;
2936 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2938 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2939 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2940 CPPI5_TR_CSF_SUPR_EVT);
2942 tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2943 tr_req[tr_idx].icnt0 = tr1_cnt0;
2944 tr_req[tr_idx].icnt1 = 1;
2945 tr_req[tr_idx].dim1 = tr1_cnt0;
2949 d->residue += sg_dma_len(sgent);
2952 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
2953 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
2958 static struct udma_desc *
2959 udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
2961 enum dma_transfer_direction dir,
2962 unsigned long tx_flags, void *context)
2964 struct scatterlist *sgent;
2965 struct cppi5_tr_type15_t *tr_req = NULL;
2966 enum dma_slave_buswidth dev_width;
2967 u16 tr_cnt0, tr_cnt1;
2968 dma_addr_t dev_addr;
2969 struct udma_desc *d;
2971 size_t tr_size, sg_len;
2974 u32 burst, trigger_size, port_window;
2977 if (dir == DMA_DEV_TO_MEM) {
2978 dev_addr = uc->cfg.src_addr;
2979 dev_width = uc->cfg.src_addr_width;
2980 burst = uc->cfg.src_maxburst;
2981 port_window = uc->cfg.src_port_window_size;
2982 } else if (dir == DMA_MEM_TO_DEV) {
2983 dev_addr = uc->cfg.dst_addr;
2984 dev_width = uc->cfg.dst_addr_width;
2985 burst = uc->cfg.dst_maxburst;
2986 port_window = uc->cfg.dst_port_window_size;
2988 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2996 if (port_window != burst) {
2997 dev_err(uc->ud->dev,
2998 "The burst must be equal to port_window\n");
3002 tr_cnt0 = dev_width * port_window;
3005 tr_cnt0 = dev_width;
3008 trigger_size = tr_cnt0 * tr_cnt1;
3010 /* estimate the number of TRs we will need */
3011 for_each_sg(sgl, sgent, sglen, i) {
3012 sg_len = sg_dma_len(sgent);
3014 if (sg_len % trigger_size) {
3015 dev_err(uc->ud->dev,
3016 "Not aligned SG entry (%zu for %u)\n", sg_len,
3021 if (sg_len / trigger_size < SZ_64K)
3027 /* Now allocate and setup the descriptor. */
3028 tr_size = sizeof(struct cppi5_tr_type15_t);
3029 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
3035 if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
3038 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3042 tr_req = d->hwdesc[0].tr_req_base;
3043 for_each_sg(sgl, sgent, sglen, i) {
3044 u16 tr0_cnt2, tr0_cnt3, tr1_cnt2;
3045 dma_addr_t sg_addr = sg_dma_address(sgent);
3047 sg_len = sg_dma_len(sgent);
3048 num_tr = udma_get_tr_counters(sg_len / trigger_size, 0,
3049 &tr0_cnt2, &tr0_cnt3, &tr1_cnt2);
3051 dev_err(uc->ud->dev, "size %zu is not supported\n",
3053 udma_free_hwdesc(uc, d);
3058 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
3059 true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3060 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
3061 cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3062 uc->config.tr_trigger_type,
3063 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
3066 if (dir == DMA_DEV_TO_MEM) {
3067 tr_req[tr_idx].addr = dev_addr;
3068 tr_req[tr_idx].icnt0 = tr_cnt0;
3069 tr_req[tr_idx].icnt1 = tr_cnt1;
3070 tr_req[tr_idx].icnt2 = tr0_cnt2;
3071 tr_req[tr_idx].icnt3 = tr0_cnt3;
3072 tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3074 tr_req[tr_idx].daddr = sg_addr;
3075 tr_req[tr_idx].dicnt0 = tr_cnt0;
3076 tr_req[tr_idx].dicnt1 = tr_cnt1;
3077 tr_req[tr_idx].dicnt2 = tr0_cnt2;
3078 tr_req[tr_idx].dicnt3 = tr0_cnt3;
3079 tr_req[tr_idx].ddim1 = tr_cnt0;
3080 tr_req[tr_idx].ddim2 = trigger_size;
3081 tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2;
3083 tr_req[tr_idx].addr = sg_addr;
3084 tr_req[tr_idx].icnt0 = tr_cnt0;
3085 tr_req[tr_idx].icnt1 = tr_cnt1;
3086 tr_req[tr_idx].icnt2 = tr0_cnt2;
3087 tr_req[tr_idx].icnt3 = tr0_cnt3;
3088 tr_req[tr_idx].dim1 = tr_cnt0;
3089 tr_req[tr_idx].dim2 = trigger_size;
3090 tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2;
3092 tr_req[tr_idx].daddr = dev_addr;
3093 tr_req[tr_idx].dicnt0 = tr_cnt0;
3094 tr_req[tr_idx].dicnt1 = tr_cnt1;
3095 tr_req[tr_idx].dicnt2 = tr0_cnt2;
3096 tr_req[tr_idx].dicnt3 = tr0_cnt3;
3097 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3103 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
3105 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3106 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3107 CPPI5_TR_CSF_SUPR_EVT);
3108 cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3109 uc->config.tr_trigger_type,
3110 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
3113 sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3;
3114 if (dir == DMA_DEV_TO_MEM) {
3115 tr_req[tr_idx].addr = dev_addr;
3116 tr_req[tr_idx].icnt0 = tr_cnt0;
3117 tr_req[tr_idx].icnt1 = tr_cnt1;
3118 tr_req[tr_idx].icnt2 = tr1_cnt2;
3119 tr_req[tr_idx].icnt3 = 1;
3120 tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3122 tr_req[tr_idx].daddr = sg_addr;
3123 tr_req[tr_idx].dicnt0 = tr_cnt0;
3124 tr_req[tr_idx].dicnt1 = tr_cnt1;
3125 tr_req[tr_idx].dicnt2 = tr1_cnt2;
3126 tr_req[tr_idx].dicnt3 = 1;
3127 tr_req[tr_idx].ddim1 = tr_cnt0;
3128 tr_req[tr_idx].ddim2 = trigger_size;
3130 tr_req[tr_idx].addr = sg_addr;
3131 tr_req[tr_idx].icnt0 = tr_cnt0;
3132 tr_req[tr_idx].icnt1 = tr_cnt1;
3133 tr_req[tr_idx].icnt2 = tr1_cnt2;
3134 tr_req[tr_idx].icnt3 = 1;
3135 tr_req[tr_idx].dim1 = tr_cnt0;
3136 tr_req[tr_idx].dim2 = trigger_size;
3138 tr_req[tr_idx].daddr = dev_addr;
3139 tr_req[tr_idx].dicnt0 = tr_cnt0;
3140 tr_req[tr_idx].dicnt1 = tr_cnt1;
3141 tr_req[tr_idx].dicnt2 = tr1_cnt2;
3142 tr_req[tr_idx].dicnt3 = 1;
3143 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3148 d->residue += sg_len;
3151 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
3152 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
3157 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
3158 enum dma_slave_buswidth dev_width,
3161 if (uc->config.ep_type != PSIL_EP_PDMA_XY)
3164 /* Bus width translates to the element size (ES) */
3165 switch (dev_width) {
3166 case DMA_SLAVE_BUSWIDTH_1_BYTE:
3167 d->static_tr.elsize = 0;
3169 case DMA_SLAVE_BUSWIDTH_2_BYTES:
3170 d->static_tr.elsize = 1;
3172 case DMA_SLAVE_BUSWIDTH_3_BYTES:
3173 d->static_tr.elsize = 2;
3175 case DMA_SLAVE_BUSWIDTH_4_BYTES:
3176 d->static_tr.elsize = 3;
3178 case DMA_SLAVE_BUSWIDTH_8_BYTES:
3179 d->static_tr.elsize = 4;
3181 default: /* not reached */
3185 d->static_tr.elcnt = elcnt;
3188 * PDMA must to close the packet when the channel is in packet mode.
3189 * For TR mode when the channel is not cyclic we also need PDMA to close
3190 * the packet otherwise the transfer will stall because PDMA holds on
3191 * the data it has received from the peripheral.
3193 if (uc->config.pkt_mode || !uc->cyclic) {
3194 unsigned int div = dev_width * elcnt;
3197 d->static_tr.bstcnt = d->residue / d->sglen / div;
3199 d->static_tr.bstcnt = d->residue / div;
3201 if (uc->config.dir == DMA_DEV_TO_MEM &&
3202 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
3205 d->static_tr.bstcnt = 0;
3211 static struct udma_desc *
3212 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
3213 unsigned int sglen, enum dma_transfer_direction dir,
3214 unsigned long tx_flags, void *context)
3216 struct scatterlist *sgent;
3217 struct cppi5_host_desc_t *h_desc = NULL;
3218 struct udma_desc *d;
3223 d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
3228 d->hwdesc_count = sglen;
3230 if (dir == DMA_DEV_TO_MEM)
3231 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3233 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3235 if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3238 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3240 for_each_sg(sgl, sgent, sglen, i) {
3241 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3242 dma_addr_t sg_addr = sg_dma_address(sgent);
3243 struct cppi5_host_desc_t *desc;
3244 size_t sg_len = sg_dma_len(sgent);
3246 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3248 &hwdesc->cppi5_desc_paddr);
3249 if (!hwdesc->cppi5_desc_vaddr) {
3250 dev_err(uc->ud->dev,
3251 "descriptor%d allocation failed\n", i);
3253 udma_free_hwdesc(uc, d);
3258 d->residue += sg_len;
3259 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3260 desc = hwdesc->cppi5_desc_vaddr;
3263 cppi5_hdesc_init(desc, 0, 0);
3264 /* Flow and Packed ID */
3265 cppi5_desc_set_pktids(&desc->hdr, uc->id,
3266 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3267 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
3269 cppi5_hdesc_reset_hbdesc(desc);
3270 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
3273 /* attach the sg buffer to the descriptor */
3275 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
3277 /* Attach link as host buffer descriptor */
3279 cppi5_hdesc_link_hbdesc(h_desc,
3280 hwdesc->cppi5_desc_paddr | asel);
3282 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA ||
3283 dir == DMA_MEM_TO_DEV)
3287 if (d->residue >= SZ_4M) {
3288 dev_err(uc->ud->dev,
3289 "%s: Transfer size %u is over the supported 4M range\n",
3290 __func__, d->residue);
3291 udma_free_hwdesc(uc, d);
3296 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3297 cppi5_hdesc_set_pktlen(h_desc, d->residue);
3302 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
3303 void *data, size_t len)
3305 struct udma_desc *d = to_udma_desc(desc);
3306 struct udma_chan *uc = to_udma_chan(desc->chan);
3307 struct cppi5_host_desc_t *h_desc;
3311 if (!uc->config.pkt_mode || !uc->config.metadata_size)
3314 if (!data || len > uc->config.metadata_size)
3317 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3320 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3321 if (d->dir == DMA_MEM_TO_DEV)
3322 memcpy(h_desc->epib, data, len);
3324 if (uc->config.needs_epib)
3325 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3328 d->metadata_size = len;
3329 if (uc->config.needs_epib)
3330 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3332 cppi5_hdesc_update_flags(h_desc, flags);
3333 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3338 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
3339 size_t *payload_len, size_t *max_len)
3341 struct udma_desc *d = to_udma_desc(desc);
3342 struct udma_chan *uc = to_udma_chan(desc->chan);
3343 struct cppi5_host_desc_t *h_desc;
3345 if (!uc->config.pkt_mode || !uc->config.metadata_size)
3346 return ERR_PTR(-ENOTSUPP);
3348 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3350 *max_len = uc->config.metadata_size;
3352 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
3353 CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
3354 *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
3356 return h_desc->epib;
3359 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
3362 struct udma_desc *d = to_udma_desc(desc);
3363 struct udma_chan *uc = to_udma_chan(desc->chan);
3364 struct cppi5_host_desc_t *h_desc;
3365 u32 psd_size = payload_len;
3368 if (!uc->config.pkt_mode || !uc->config.metadata_size)
3371 if (payload_len > uc->config.metadata_size)
3374 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3377 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3379 if (uc->config.needs_epib) {
3380 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3381 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3384 cppi5_hdesc_update_flags(h_desc, flags);
3385 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3390 static struct dma_descriptor_metadata_ops metadata_ops = {
3391 .attach = udma_attach_metadata,
3392 .get_ptr = udma_get_metadata_ptr,
3393 .set_len = udma_set_metadata_len,
3396 static struct dma_async_tx_descriptor *
3397 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
3398 unsigned int sglen, enum dma_transfer_direction dir,
3399 unsigned long tx_flags, void *context)
3401 struct udma_chan *uc = to_udma_chan(chan);
3402 enum dma_slave_buswidth dev_width;
3403 struct udma_desc *d;
3406 if (dir != uc->config.dir &&
3407 (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) {
3408 dev_err(chan->device->dev,
3409 "%s: chan%d is for %s, not supporting %s\n",
3411 dmaengine_get_direction_text(uc->config.dir),
3412 dmaengine_get_direction_text(dir));
3416 if (dir == DMA_DEV_TO_MEM) {
3417 dev_width = uc->cfg.src_addr_width;
3418 burst = uc->cfg.src_maxburst;
3419 } else if (dir == DMA_MEM_TO_DEV) {
3420 dev_width = uc->cfg.dst_addr_width;
3421 burst = uc->cfg.dst_maxburst;
3423 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
3430 uc->config.tx_flags = tx_flags;
3432 if (uc->config.pkt_mode)
3433 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
3435 else if (is_slave_direction(uc->config.dir))
3436 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
3439 d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir,
3449 /* static TR for remote PDMA */
3450 if (udma_configure_statictr(uc, d, dev_width, burst)) {
3451 dev_err(uc->ud->dev,
3452 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3453 __func__, d->static_tr.bstcnt);
3455 udma_free_hwdesc(uc, d);
3460 if (uc->config.metadata_size)
3461 d->vd.tx.metadata_ops = &metadata_ops;
3463 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3466 static struct udma_desc *
3467 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
3468 size_t buf_len, size_t period_len,
3469 enum dma_transfer_direction dir, unsigned long flags)
3471 struct udma_desc *d;
3472 size_t tr_size, period_addr;
3473 struct cppi5_tr_type1_t *tr_req;
3474 unsigned int periods = buf_len / period_len;
3475 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3479 num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
3480 &tr0_cnt1, &tr1_cnt0);
3482 dev_err(uc->ud->dev, "size %zu is not supported\n",
3487 /* Now allocate and setup the descriptor. */
3488 tr_size = sizeof(struct cppi5_tr_type1_t);
3489 d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
3493 tr_req = d->hwdesc[0].tr_req_base;
3494 if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3495 period_addr = buf_addr;
3497 period_addr = buf_addr |
3498 ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
3500 for (i = 0; i < periods; i++) {
3501 int tr_idx = i * num_tr;
3503 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
3504 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3506 tr_req[tr_idx].addr = period_addr;
3507 tr_req[tr_idx].icnt0 = tr0_cnt0;
3508 tr_req[tr_idx].icnt1 = tr0_cnt1;
3509 tr_req[tr_idx].dim1 = tr0_cnt0;
3512 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3513 CPPI5_TR_CSF_SUPR_EVT);
3516 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
3518 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3520 tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
3521 tr_req[tr_idx].icnt0 = tr1_cnt0;
3522 tr_req[tr_idx].icnt1 = 1;
3523 tr_req[tr_idx].dim1 = tr1_cnt0;
3526 if (!(flags & DMA_PREP_INTERRUPT))
3527 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3528 CPPI5_TR_CSF_SUPR_EVT);
3530 period_addr += period_len;
3536 static struct udma_desc *
3537 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
3538 size_t buf_len, size_t period_len,
3539 enum dma_transfer_direction dir, unsigned long flags)
3541 struct udma_desc *d;
3544 int periods = buf_len / period_len;
3546 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
3549 if (period_len >= SZ_4M)
3552 d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT);
3556 d->hwdesc_count = periods;
3558 /* TODO: re-check this... */
3559 if (dir == DMA_DEV_TO_MEM)
3560 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3562 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3564 if (uc->ud->match_data->type != DMA_TYPE_UDMA)
3565 buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3567 for (i = 0; i < periods; i++) {
3568 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3569 dma_addr_t period_addr = buf_addr + (period_len * i);
3570 struct cppi5_host_desc_t *h_desc;
3572 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3574 &hwdesc->cppi5_desc_paddr);
3575 if (!hwdesc->cppi5_desc_vaddr) {
3576 dev_err(uc->ud->dev,
3577 "descriptor%d allocation failed\n", i);
3579 udma_free_hwdesc(uc, d);
3584 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3585 h_desc = hwdesc->cppi5_desc_vaddr;
3587 cppi5_hdesc_init(h_desc, 0, 0);
3588 cppi5_hdesc_set_pktlen(h_desc, period_len);
3590 /* Flow and Packed ID */
3591 cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
3592 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3593 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
3595 /* attach each period to a new descriptor */
3596 cppi5_hdesc_attach_buf(h_desc,
3597 period_addr, period_len,
3598 period_addr, period_len);
3604 static struct dma_async_tx_descriptor *
3605 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
3606 size_t period_len, enum dma_transfer_direction dir,
3607 unsigned long flags)
3609 struct udma_chan *uc = to_udma_chan(chan);
3610 enum dma_slave_buswidth dev_width;
3611 struct udma_desc *d;
3614 if (dir != uc->config.dir) {
3615 dev_err(chan->device->dev,
3616 "%s: chan%d is for %s, not supporting %s\n",
3618 dmaengine_get_direction_text(uc->config.dir),
3619 dmaengine_get_direction_text(dir));
3625 if (dir == DMA_DEV_TO_MEM) {
3626 dev_width = uc->cfg.src_addr_width;
3627 burst = uc->cfg.src_maxburst;
3628 } else if (dir == DMA_MEM_TO_DEV) {
3629 dev_width = uc->cfg.dst_addr_width;
3630 burst = uc->cfg.dst_maxburst;
3632 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
3639 if (uc->config.pkt_mode)
3640 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
3643 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
3649 d->sglen = buf_len / period_len;
3652 d->residue = buf_len;
3654 /* static TR for remote PDMA */
3655 if (udma_configure_statictr(uc, d, dev_width, burst)) {
3656 dev_err(uc->ud->dev,
3657 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3658 __func__, d->static_tr.bstcnt);
3660 udma_free_hwdesc(uc, d);
3665 if (uc->config.metadata_size)
3666 d->vd.tx.metadata_ops = &metadata_ops;
3668 return vchan_tx_prep(&uc->vc, &d->vd, flags);
3671 static struct dma_async_tx_descriptor *
3672 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
3673 size_t len, unsigned long tx_flags)
3675 struct udma_chan *uc = to_udma_chan(chan);
3676 struct udma_desc *d;
3677 struct cppi5_tr_type15_t *tr_req;
3679 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
3680 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3682 if (uc->config.dir != DMA_MEM_TO_MEM) {
3683 dev_err(chan->device->dev,
3684 "%s: chan%d is for %s, not supporting %s\n",
3686 dmaengine_get_direction_text(uc->config.dir),
3687 dmaengine_get_direction_text(DMA_MEM_TO_MEM));
3691 num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
3692 &tr0_cnt1, &tr1_cnt0);
3694 dev_err(uc->ud->dev, "size %zu is not supported\n",
3699 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
3703 d->dir = DMA_MEM_TO_MEM;
3708 if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
3709 src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3710 dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3713 tr_req = d->hwdesc[0].tr_req_base;
3715 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
3716 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3717 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
3719 tr_req[0].addr = src;
3720 tr_req[0].icnt0 = tr0_cnt0;
3721 tr_req[0].icnt1 = tr0_cnt1;
3722 tr_req[0].icnt2 = 1;
3723 tr_req[0].icnt3 = 1;
3724 tr_req[0].dim1 = tr0_cnt0;
3726 tr_req[0].daddr = dest;
3727 tr_req[0].dicnt0 = tr0_cnt0;
3728 tr_req[0].dicnt1 = tr0_cnt1;
3729 tr_req[0].dicnt2 = 1;
3730 tr_req[0].dicnt3 = 1;
3731 tr_req[0].ddim1 = tr0_cnt0;
3734 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
3735 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3736 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
3738 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
3739 tr_req[1].icnt0 = tr1_cnt0;
3740 tr_req[1].icnt1 = 1;
3741 tr_req[1].icnt2 = 1;
3742 tr_req[1].icnt3 = 1;
3744 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
3745 tr_req[1].dicnt0 = tr1_cnt0;
3746 tr_req[1].dicnt1 = 1;
3747 tr_req[1].dicnt2 = 1;
3748 tr_req[1].dicnt3 = 1;
3751 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
3752 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
3754 if (uc->config.metadata_size)
3755 d->vd.tx.metadata_ops = &metadata_ops;
3757 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3760 static void udma_issue_pending(struct dma_chan *chan)
3762 struct udma_chan *uc = to_udma_chan(chan);
3763 unsigned long flags;
3765 spin_lock_irqsave(&uc->vc.lock, flags);
3767 /* If we have something pending and no active descriptor, then */
3768 if (vchan_issue_pending(&uc->vc) && !uc->desc) {
3770 * start a descriptor if the channel is NOT [marked as
3771 * terminating _and_ it is still running (teardown has not
3774 if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
3775 udma_is_chan_running(uc)))
3779 spin_unlock_irqrestore(&uc->vc.lock, flags);
3782 static enum dma_status udma_tx_status(struct dma_chan *chan,
3783 dma_cookie_t cookie,
3784 struct dma_tx_state *txstate)
3786 struct udma_chan *uc = to_udma_chan(chan);
3787 enum dma_status ret;
3788 unsigned long flags;
3790 spin_lock_irqsave(&uc->vc.lock, flags);
3792 ret = dma_cookie_status(chan, cookie, txstate);
3794 if (!udma_is_chan_running(uc))
3797 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
3800 if (ret == DMA_COMPLETE || !txstate)
3803 if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
3806 u32 residue = uc->desc->residue;
3809 if (uc->desc->dir == DMA_MEM_TO_DEV) {
3810 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
3812 if (uc->config.ep_type != PSIL_EP_NATIVE) {
3813 peer_bcnt = udma_tchanrt_read(uc,
3814 UDMA_CHAN_RT_PEER_BCNT_REG);
3816 if (bcnt > peer_bcnt)
3817 delay = bcnt - peer_bcnt;
3819 } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
3820 bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3822 if (uc->config.ep_type != PSIL_EP_NATIVE) {
3823 peer_bcnt = udma_rchanrt_read(uc,
3824 UDMA_CHAN_RT_PEER_BCNT_REG);
3826 if (peer_bcnt > bcnt)
3827 delay = peer_bcnt - bcnt;
3830 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3833 if (bcnt && !(bcnt % uc->desc->residue))
3836 residue -= bcnt % uc->desc->residue;
3838 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
3843 dma_set_residue(txstate, residue);
3844 dma_set_in_flight_bytes(txstate, delay);
3851 spin_unlock_irqrestore(&uc->vc.lock, flags);
3855 static int udma_pause(struct dma_chan *chan)
3857 struct udma_chan *uc = to_udma_chan(chan);
3859 /* pause the channel */
3860 switch (uc->config.dir) {
3861 case DMA_DEV_TO_MEM:
3862 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3863 UDMA_PEER_RT_EN_PAUSE,
3864 UDMA_PEER_RT_EN_PAUSE);
3866 case DMA_MEM_TO_DEV:
3867 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3868 UDMA_PEER_RT_EN_PAUSE,
3869 UDMA_PEER_RT_EN_PAUSE);
3871 case DMA_MEM_TO_MEM:
3872 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3873 UDMA_CHAN_RT_CTL_PAUSE,
3874 UDMA_CHAN_RT_CTL_PAUSE);
3883 static int udma_resume(struct dma_chan *chan)
3885 struct udma_chan *uc = to_udma_chan(chan);
3887 /* resume the channel */
3888 switch (uc->config.dir) {
3889 case DMA_DEV_TO_MEM:
3890 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3891 UDMA_PEER_RT_EN_PAUSE, 0);
3894 case DMA_MEM_TO_DEV:
3895 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3896 UDMA_PEER_RT_EN_PAUSE, 0);
3898 case DMA_MEM_TO_MEM:
3899 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3900 UDMA_CHAN_RT_CTL_PAUSE, 0);
3909 static int udma_terminate_all(struct dma_chan *chan)
3911 struct udma_chan *uc = to_udma_chan(chan);
3912 unsigned long flags;
3915 spin_lock_irqsave(&uc->vc.lock, flags);
3917 if (udma_is_chan_running(uc))
3921 uc->terminated_desc = uc->desc;
3923 uc->terminated_desc->terminated = true;
3924 cancel_delayed_work(&uc->tx_drain.work);
3929 vchan_get_all_descriptors(&uc->vc, &head);
3930 spin_unlock_irqrestore(&uc->vc.lock, flags);
3931 vchan_dma_desc_free_list(&uc->vc, &head);
3936 static void udma_synchronize(struct dma_chan *chan)
3938 struct udma_chan *uc = to_udma_chan(chan);
3939 unsigned long timeout = msecs_to_jiffies(1000);
3941 vchan_synchronize(&uc->vc);
3943 if (uc->state == UDMA_CHAN_IS_TERMINATING) {
3944 timeout = wait_for_completion_timeout(&uc->teardown_completed,
3947 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
3949 udma_dump_chan_stdata(uc);
3950 udma_reset_chan(uc, true);
3954 udma_reset_chan(uc, false);
3955 if (udma_is_chan_running(uc))
3956 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
3958 cancel_delayed_work_sync(&uc->tx_drain.work);
3959 udma_reset_rings(uc);
3962 static void udma_desc_pre_callback(struct virt_dma_chan *vc,
3963 struct virt_dma_desc *vd,
3964 struct dmaengine_result *result)
3966 struct udma_chan *uc = to_udma_chan(&vc->chan);
3967 struct udma_desc *d;
3972 d = to_udma_desc(&vd->tx);
3974 if (d->metadata_size)
3975 udma_fetch_epib(uc, d);
3977 /* Provide residue information for the client */
3979 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
3981 if (cppi5_desc_get_type(desc_vaddr) ==
3982 CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
3983 result->residue = d->residue -
3984 cppi5_hdesc_get_pktlen(desc_vaddr);
3985 if (result->residue)
3986 result->result = DMA_TRANS_ABORTED;
3988 result->result = DMA_TRANS_NOERROR;
3990 result->residue = 0;
3991 result->result = DMA_TRANS_NOERROR;
3997 * This tasklet handles the completion of a DMA descriptor by
3998 * calling its callback and freeing it.
4000 static void udma_vchan_complete(struct tasklet_struct *t)
4002 struct virt_dma_chan *vc = from_tasklet(vc, t, task);
4003 struct virt_dma_desc *vd, *_vd;
4004 struct dmaengine_desc_callback cb;
4007 spin_lock_irq(&vc->lock);
4008 list_splice_tail_init(&vc->desc_completed, &head);
4012 dmaengine_desc_get_callback(&vd->tx, &cb);
4014 memset(&cb, 0, sizeof(cb));
4016 spin_unlock_irq(&vc->lock);
4018 udma_desc_pre_callback(vc, vd, NULL);
4019 dmaengine_desc_callback_invoke(&cb, NULL);
4021 list_for_each_entry_safe(vd, _vd, &head, node) {
4022 struct dmaengine_result result;
4024 dmaengine_desc_get_callback(&vd->tx, &cb);
4026 list_del(&vd->node);
4028 udma_desc_pre_callback(vc, vd, &result);
4029 dmaengine_desc_callback_invoke(&cb, &result);
4031 vchan_vdesc_fini(vd);
4035 static void udma_free_chan_resources(struct dma_chan *chan)
4037 struct udma_chan *uc = to_udma_chan(chan);
4038 struct udma_dev *ud = to_udma_dev(chan->device);
4040 udma_terminate_all(chan);
4041 if (uc->terminated_desc) {
4042 udma_reset_chan(uc, false);
4043 udma_reset_rings(uc);
4046 cancel_delayed_work_sync(&uc->tx_drain.work);
4048 if (uc->irq_num_ring > 0) {
4049 free_irq(uc->irq_num_ring, uc);
4051 uc->irq_num_ring = 0;
4053 if (uc->irq_num_udma > 0) {
4054 free_irq(uc->irq_num_udma, uc);
4056 uc->irq_num_udma = 0;
4059 /* Release PSI-L pairing */
4060 if (uc->psil_paired) {
4061 navss_psil_unpair(ud, uc->config.src_thread,
4062 uc->config.dst_thread);
4063 uc->psil_paired = false;
4066 vchan_free_chan_resources(&uc->vc);
4067 tasklet_kill(&uc->vc.task);
4069 bcdma_free_bchan_resources(uc);
4070 udma_free_tx_resources(uc);
4071 udma_free_rx_resources(uc);
4072 udma_reset_uchan(uc);
4074 if (uc->use_dma_pool) {
4075 dma_pool_destroy(uc->hdesc_pool);
4076 uc->use_dma_pool = false;
4080 static struct platform_driver udma_driver;
4081 static struct platform_driver bcdma_driver;
4082 static struct platform_driver pktdma_driver;
4084 struct udma_filter_param {
4085 int remote_thread_id;
4088 u32 tr_trigger_type;
4091 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
4093 struct udma_chan_config *ucc;
4094 struct psil_endpoint_config *ep_config;
4095 struct udma_filter_param *filter_param;
4096 struct udma_chan *uc;
4097 struct udma_dev *ud;
4099 if (chan->device->dev->driver != &udma_driver.driver &&
4100 chan->device->dev->driver != &bcdma_driver.driver &&
4101 chan->device->dev->driver != &pktdma_driver.driver)
4104 uc = to_udma_chan(chan);
4107 filter_param = param;
4109 if (filter_param->atype > 2) {
4110 dev_err(ud->dev, "Invalid channel atype: %u\n",
4111 filter_param->atype);
4115 if (filter_param->asel > 15) {
4116 dev_err(ud->dev, "Invalid channel asel: %u\n",
4117 filter_param->asel);
4121 ucc->remote_thread_id = filter_param->remote_thread_id;
4122 ucc->atype = filter_param->atype;
4123 ucc->asel = filter_param->asel;
4124 ucc->tr_trigger_type = filter_param->tr_trigger_type;
4126 if (ucc->tr_trigger_type) {
4127 ucc->dir = DMA_MEM_TO_MEM;
4128 goto triggered_bchan;
4129 } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) {
4130 ucc->dir = DMA_MEM_TO_DEV;
4132 ucc->dir = DMA_DEV_TO_MEM;
4135 ep_config = psil_get_ep_config(ucc->remote_thread_id);
4136 if (IS_ERR(ep_config)) {
4137 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
4138 ucc->remote_thread_id);
4139 ucc->dir = DMA_MEM_TO_MEM;
4140 ucc->remote_thread_id = -1;
4146 if (ud->match_data->type == DMA_TYPE_BCDMA &&
4147 ep_config->pkt_mode) {
4149 "Only TR mode is supported (psi-l thread 0x%04x)\n",
4150 ucc->remote_thread_id);
4151 ucc->dir = DMA_MEM_TO_MEM;
4152 ucc->remote_thread_id = -1;
4158 ucc->pkt_mode = ep_config->pkt_mode;
4159 ucc->channel_tpl = ep_config->channel_tpl;
4160 ucc->notdpkt = ep_config->notdpkt;
4161 ucc->ep_type = ep_config->ep_type;
4163 if (ud->match_data->type == DMA_TYPE_PKTDMA &&
4164 ep_config->mapped_channel_id >= 0) {
4165 ucc->mapped_channel_id = ep_config->mapped_channel_id;
4166 ucc->default_flow_id = ep_config->default_flow_id;
4168 ucc->mapped_channel_id = -1;
4169 ucc->default_flow_id = -1;
4172 if (ucc->ep_type != PSIL_EP_NATIVE) {
4173 const struct udma_match_data *match_data = ud->match_data;
4175 if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
4176 ucc->enable_acc32 = ep_config->pdma_acc32;
4177 if (match_data->flags & UDMA_FLAG_PDMA_BURST)
4178 ucc->enable_burst = ep_config->pdma_burst;
4181 ucc->needs_epib = ep_config->needs_epib;
4182 ucc->psd_size = ep_config->psd_size;
4183 ucc->metadata_size =
4184 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
4188 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
4189 ucc->metadata_size, ud->desc_align);
4191 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
4192 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
4197 dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id,
4198 ucc->tr_trigger_type);
4204 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
4205 struct of_dma *ofdma)
4207 struct udma_dev *ud = ofdma->of_dma_data;
4208 dma_cap_mask_t mask = ud->ddev.cap_mask;
4209 struct udma_filter_param filter_param;
4210 struct dma_chan *chan;
4212 if (ud->match_data->type == DMA_TYPE_BCDMA) {
4213 if (dma_spec->args_count != 3)
4216 filter_param.tr_trigger_type = dma_spec->args[0];
4217 filter_param.remote_thread_id = dma_spec->args[1];
4218 filter_param.asel = dma_spec->args[2];
4219 filter_param.atype = 0;
4221 if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
4224 filter_param.remote_thread_id = dma_spec->args[0];
4225 filter_param.tr_trigger_type = 0;
4226 if (dma_spec->args_count == 2) {
4227 if (ud->match_data->type == DMA_TYPE_UDMA) {
4228 filter_param.atype = dma_spec->args[1];
4229 filter_param.asel = 0;
4231 filter_param.atype = 0;
4232 filter_param.asel = dma_spec->args[1];
4235 filter_param.atype = 0;
4236 filter_param.asel = 0;
4240 chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
4243 dev_err(ud->dev, "get channel fail in %s.\n", __func__);
4244 return ERR_PTR(-EINVAL);
4250 static struct udma_match_data am654_main_data = {
4251 .type = DMA_TYPE_UDMA,
4252 .psil_base = 0x1000,
4253 .enable_memcpy_support = true,
4254 .statictr_z_mask = GENMASK(11, 0),
4256 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4257 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4258 0, /* No UH Channels */
4262 static struct udma_match_data am654_mcu_data = {
4263 .type = DMA_TYPE_UDMA,
4264 .psil_base = 0x6000,
4265 .enable_memcpy_support = false,
4266 .statictr_z_mask = GENMASK(11, 0),
4268 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4269 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4270 0, /* No UH Channels */
4274 static struct udma_match_data j721e_main_data = {
4275 .type = DMA_TYPE_UDMA,
4276 .psil_base = 0x1000,
4277 .enable_memcpy_support = true,
4278 .flags = UDMA_FLAGS_J7_CLASS,
4279 .statictr_z_mask = GENMASK(23, 0),
4281 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4282 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */
4283 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */
4287 static struct udma_match_data j721e_mcu_data = {
4288 .type = DMA_TYPE_UDMA,
4289 .psil_base = 0x6000,
4290 .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
4291 .flags = UDMA_FLAGS_J7_CLASS,
4292 .statictr_z_mask = GENMASK(23, 0),
4294 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4295 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */
4296 0, /* No UH Channels */
4300 static struct udma_soc_data am62a_dmss_csi_soc_data = {
4302 .bcdma_rchan_data = 0xe00,
4303 .bcdma_rchan_ring = 0x1000,
4307 static struct udma_match_data am62a_bcdma_csirx_data = {
4308 .type = DMA_TYPE_BCDMA,
4309 .psil_base = 0x3100,
4310 .enable_memcpy_support = false,
4312 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4313 0, /* No H Channels */
4314 0, /* No UH Channels */
4316 .soc_data = &am62a_dmss_csi_soc_data,
4319 static struct udma_match_data am64_bcdma_data = {
4320 .type = DMA_TYPE_BCDMA,
4321 .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
4322 .enable_memcpy_support = true, /* Supported via bchan */
4323 .flags = UDMA_FLAGS_J7_CLASS,
4324 .statictr_z_mask = GENMASK(23, 0),
4326 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4327 0, /* No H Channels */
4328 0, /* No UH Channels */
4332 static struct udma_match_data am64_pktdma_data = {
4333 .type = DMA_TYPE_PKTDMA,
4334 .psil_base = 0x1000,
4335 .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
4336 .flags = UDMA_FLAGS_J7_CLASS,
4337 .statictr_z_mask = GENMASK(23, 0),
4339 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4340 0, /* No H Channels */
4341 0, /* No UH Channels */
4345 static const struct of_device_id udma_of_match[] = {
4347 .compatible = "ti,am654-navss-main-udmap",
4348 .data = &am654_main_data,
4351 .compatible = "ti,am654-navss-mcu-udmap",
4352 .data = &am654_mcu_data,
4354 .compatible = "ti,j721e-navss-main-udmap",
4355 .data = &j721e_main_data,
4357 .compatible = "ti,j721e-navss-mcu-udmap",
4358 .data = &j721e_mcu_data,
4361 .compatible = "ti,am64-dmss-bcdma",
4362 .data = &am64_bcdma_data,
4365 .compatible = "ti,am64-dmss-pktdma",
4366 .data = &am64_pktdma_data,
4369 .compatible = "ti,am62a-dmss-bcdma-csirx",
4370 .data = &am62a_bcdma_csirx_data,
4375 static struct udma_soc_data am654_soc_data = {
4377 .udma_rchan = 0x200,
4381 static struct udma_soc_data j721e_soc_data = {
4383 .udma_rchan = 0x400,
4387 static struct udma_soc_data j7200_soc_data = {
4393 static struct udma_soc_data am64_soc_data = {
4395 .bcdma_bchan_data = 0x2200,
4396 .bcdma_bchan_ring = 0x2400,
4397 .bcdma_tchan_data = 0x2800,
4398 .bcdma_tchan_ring = 0x2a00,
4399 .bcdma_rchan_data = 0x2e00,
4400 .bcdma_rchan_ring = 0x3000,
4401 .pktdma_tchan_flow = 0x1200,
4402 .pktdma_rchan_flow = 0x1600,
4404 .bcdma_trigger_event_offset = 0xc400,
4407 static const struct soc_device_attribute k3_soc_devices[] = {
4408 { .family = "AM65X", .data = &am654_soc_data },
4409 { .family = "J721E", .data = &j721e_soc_data },
4410 { .family = "J7200", .data = &j7200_soc_data },
4411 { .family = "AM64X", .data = &am64_soc_data },
4412 { .family = "J721S2", .data = &j721e_soc_data},
4413 { .family = "AM62X", .data = &am64_soc_data },
4414 { .family = "AM62AX", .data = &am64_soc_data },
4418 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
4420 u32 cap2, cap3, cap4;
4423 ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]);
4424 if (IS_ERR(ud->mmrs[MMR_GCFG]))
4425 return PTR_ERR(ud->mmrs[MMR_GCFG]);
4427 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
4428 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4430 switch (ud->match_data->type) {
4432 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4433 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4434 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
4435 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4437 case DMA_TYPE_BCDMA:
4438 ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
4439 ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
4440 ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
4441 ud->rflow_cnt = ud->rchan_cnt;
4443 case DMA_TYPE_PKTDMA:
4444 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4445 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4446 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4447 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4448 ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4);
4454 for (i = 1; i < MMR_LAST; i++) {
4455 if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
4457 if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
4459 if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
4462 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
4463 if (IS_ERR(ud->mmrs[i]))
4464 return PTR_ERR(ud->mmrs[i]);
4470 static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map,
4471 struct ti_sci_resource_desc *rm_desc,
4474 bitmap_clear(map, rm_desc->start, rm_desc->num);
4475 bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec);
4476 dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name,
4477 rm_desc->start, rm_desc->num, rm_desc->start_sec,
4481 static const char * const range_names[] = {
4482 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
4483 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
4484 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
4485 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
4486 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
4489 static int udma_setup_resources(struct udma_dev *ud)
4492 struct device *dev = ud->dev;
4493 struct ti_sci_resource *rm_res, irq_res;
4494 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4497 /* Set up the throughput level start indexes */
4498 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4499 if (of_device_is_compatible(dev->of_node,
4500 "ti,am654-navss-main-udmap")) {
4501 ud->tchan_tpl.levels = 2;
4502 ud->tchan_tpl.start_idx[0] = 8;
4503 } else if (of_device_is_compatible(dev->of_node,
4504 "ti,am654-navss-mcu-udmap")) {
4505 ud->tchan_tpl.levels = 2;
4506 ud->tchan_tpl.start_idx[0] = 2;
4507 } else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4508 ud->tchan_tpl.levels = 3;
4509 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4510 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4511 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4512 ud->tchan_tpl.levels = 2;
4513 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4515 ud->tchan_tpl.levels = 1;
4518 ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4519 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4520 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4522 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4523 sizeof(unsigned long), GFP_KERNEL);
4524 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4526 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4527 sizeof(unsigned long), GFP_KERNEL);
4528 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4530 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
4531 sizeof(unsigned long),
4533 ud->rflow_gp_map_allocated = devm_kcalloc(dev,
4534 BITS_TO_LONGS(ud->rflow_cnt),
4535 sizeof(unsigned long),
4537 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4538 sizeof(unsigned long),
4540 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4543 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
4544 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
4545 !ud->rflows || !ud->rflow_in_use)
4549 * RX flows with the same Ids as RX channels are reserved to be used
4550 * as default flows if remote HW can't generate flow_ids. Those
4551 * RX flows can be requested only explicitly by id.
4553 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
4555 /* by default no GP rflows are assigned to Linux */
4556 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
4558 /* Get resource ranges from tisci */
4559 for (i = 0; i < RM_RANGE_LAST; i++) {
4560 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
4563 tisci_rm->rm_ranges[i] =
4564 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4565 tisci_rm->tisci_dev_id,
4566 (char *)range_names[i]);
4570 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4571 if (IS_ERR(rm_res)) {
4572 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4575 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4576 for (i = 0; i < rm_res->sets; i++)
4577 udma_mark_resource_ranges(ud, ud->tchan_map,
4578 &rm_res->desc[i], "tchan");
4579 irq_res.sets = rm_res->sets;
4582 /* rchan and matching default flow ranges */
4583 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4584 if (IS_ERR(rm_res)) {
4585 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4588 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4589 for (i = 0; i < rm_res->sets; i++)
4590 udma_mark_resource_ranges(ud, ud->rchan_map,
4591 &rm_res->desc[i], "rchan");
4592 irq_res.sets += rm_res->sets;
4595 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4598 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4599 if (IS_ERR(rm_res)) {
4600 irq_res.desc[0].start = 0;
4601 irq_res.desc[0].num = ud->tchan_cnt;
4604 for (i = 0; i < rm_res->sets; i++) {
4605 irq_res.desc[i].start = rm_res->desc[i].start;
4606 irq_res.desc[i].num = rm_res->desc[i].num;
4607 irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
4608 irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
4611 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4612 if (IS_ERR(rm_res)) {
4613 irq_res.desc[i].start = 0;
4614 irq_res.desc[i].num = ud->rchan_cnt;
4616 for (j = 0; j < rm_res->sets; j++, i++) {
4617 if (rm_res->desc[j].num) {
4618 irq_res.desc[i].start = rm_res->desc[j].start +
4619 ud->soc_data->oes.udma_rchan;
4620 irq_res.desc[i].num = rm_res->desc[j].num;
4622 if (rm_res->desc[j].num_sec) {
4623 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
4624 ud->soc_data->oes.udma_rchan;
4625 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
4629 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4630 kfree(irq_res.desc);
4632 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4636 /* GP rflow ranges */
4637 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4638 if (IS_ERR(rm_res)) {
4639 /* all gp flows are assigned exclusively to Linux */
4640 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
4641 ud->rflow_cnt - ud->rchan_cnt);
4643 for (i = 0; i < rm_res->sets; i++)
4644 udma_mark_resource_ranges(ud, ud->rflow_gp_map,
4645 &rm_res->desc[i], "gp-rflow");
4651 static int bcdma_setup_resources(struct udma_dev *ud)
4654 struct device *dev = ud->dev;
4655 struct ti_sci_resource *rm_res, irq_res;
4656 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4657 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4660 /* Set up the throughput level start indexes */
4661 cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4662 if (BCDMA_CAP3_UBCHAN_CNT(cap)) {
4663 ud->bchan_tpl.levels = 3;
4664 ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap);
4665 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4666 } else if (BCDMA_CAP3_HBCHAN_CNT(cap)) {
4667 ud->bchan_tpl.levels = 2;
4668 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4670 ud->bchan_tpl.levels = 1;
4673 cap = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4674 if (BCDMA_CAP4_URCHAN_CNT(cap)) {
4675 ud->rchan_tpl.levels = 3;
4676 ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap);
4677 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4678 } else if (BCDMA_CAP4_HRCHAN_CNT(cap)) {
4679 ud->rchan_tpl.levels = 2;
4680 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4682 ud->rchan_tpl.levels = 1;
4685 if (BCDMA_CAP4_UTCHAN_CNT(cap)) {
4686 ud->tchan_tpl.levels = 3;
4687 ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap);
4688 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4689 } else if (BCDMA_CAP4_HTCHAN_CNT(cap)) {
4690 ud->tchan_tpl.levels = 2;
4691 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4693 ud->tchan_tpl.levels = 1;
4696 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
4697 sizeof(unsigned long), GFP_KERNEL);
4698 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
4700 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4701 sizeof(unsigned long), GFP_KERNEL);
4702 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4704 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4705 sizeof(unsigned long), GFP_KERNEL);
4706 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4708 /* BCDMA do not really have flows, but the driver expect it */
4709 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
4710 sizeof(unsigned long),
4712 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
4715 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
4716 !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans ||
4720 /* Get resource ranges from tisci */
4721 for (i = 0; i < RM_RANGE_LAST; i++) {
4722 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
4724 if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0)
4726 if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0)
4728 if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0)
4731 tisci_rm->rm_ranges[i] =
4732 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4733 tisci_rm->tisci_dev_id,
4734 (char *)range_names[i]);
4740 if (ud->bchan_cnt) {
4741 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4742 if (IS_ERR(rm_res)) {
4743 bitmap_zero(ud->bchan_map, ud->bchan_cnt);
4746 bitmap_fill(ud->bchan_map, ud->bchan_cnt);
4747 for (i = 0; i < rm_res->sets; i++)
4748 udma_mark_resource_ranges(ud, ud->bchan_map,
4751 irq_res.sets += rm_res->sets;
4756 if (ud->tchan_cnt) {
4757 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4758 if (IS_ERR(rm_res)) {
4759 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4762 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4763 for (i = 0; i < rm_res->sets; i++)
4764 udma_mark_resource_ranges(ud, ud->tchan_map,
4767 irq_res.sets += rm_res->sets * 2;
4772 if (ud->rchan_cnt) {
4773 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4774 if (IS_ERR(rm_res)) {
4775 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4778 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4779 for (i = 0; i < rm_res->sets; i++)
4780 udma_mark_resource_ranges(ud, ud->rchan_map,
4783 irq_res.sets += rm_res->sets * 2;
4787 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4790 if (ud->bchan_cnt) {
4791 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4792 if (IS_ERR(rm_res)) {
4793 irq_res.desc[0].start = oes->bcdma_bchan_ring;
4794 irq_res.desc[0].num = ud->bchan_cnt;
4797 for (i = 0; i < rm_res->sets; i++) {
4798 irq_res.desc[i].start = rm_res->desc[i].start +
4799 oes->bcdma_bchan_ring;
4800 irq_res.desc[i].num = rm_res->desc[i].num;
4807 if (ud->tchan_cnt) {
4808 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4809 if (IS_ERR(rm_res)) {
4810 irq_res.desc[i].start = oes->bcdma_tchan_data;
4811 irq_res.desc[i].num = ud->tchan_cnt;
4812 irq_res.desc[i + 1].start = oes->bcdma_tchan_ring;
4813 irq_res.desc[i + 1].num = ud->tchan_cnt;
4816 for (j = 0; j < rm_res->sets; j++, i += 2) {
4817 irq_res.desc[i].start = rm_res->desc[j].start +
4818 oes->bcdma_tchan_data;
4819 irq_res.desc[i].num = rm_res->desc[j].num;
4821 irq_res.desc[i + 1].start = rm_res->desc[j].start +
4822 oes->bcdma_tchan_ring;
4823 irq_res.desc[i + 1].num = rm_res->desc[j].num;
4827 if (ud->rchan_cnt) {
4828 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4829 if (IS_ERR(rm_res)) {
4830 irq_res.desc[i].start = oes->bcdma_rchan_data;
4831 irq_res.desc[i].num = ud->rchan_cnt;
4832 irq_res.desc[i + 1].start = oes->bcdma_rchan_ring;
4833 irq_res.desc[i + 1].num = ud->rchan_cnt;
4836 for (j = 0; j < rm_res->sets; j++, i += 2) {
4837 irq_res.desc[i].start = rm_res->desc[j].start +
4838 oes->bcdma_rchan_data;
4839 irq_res.desc[i].num = rm_res->desc[j].num;
4841 irq_res.desc[i + 1].start = rm_res->desc[j].start +
4842 oes->bcdma_rchan_ring;
4843 irq_res.desc[i + 1].num = rm_res->desc[j].num;
4848 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4849 kfree(irq_res.desc);
4851 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4858 static int pktdma_setup_resources(struct udma_dev *ud)
4861 struct device *dev = ud->dev;
4862 struct ti_sci_resource *rm_res, irq_res;
4863 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4864 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4867 /* Set up the throughput level start indexes */
4868 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4869 if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4870 ud->tchan_tpl.levels = 3;
4871 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4872 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4873 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4874 ud->tchan_tpl.levels = 2;
4875 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4877 ud->tchan_tpl.levels = 1;
4880 ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4881 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4882 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4884 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4885 sizeof(unsigned long), GFP_KERNEL);
4886 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4888 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4889 sizeof(unsigned long), GFP_KERNEL);
4890 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4892 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4893 sizeof(unsigned long),
4895 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4897 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
4898 sizeof(unsigned long), GFP_KERNEL);
4900 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
4901 !ud->rchans || !ud->rflows || !ud->rflow_in_use)
4904 /* Get resource ranges from tisci */
4905 for (i = 0; i < RM_RANGE_LAST; i++) {
4906 if (i == RM_RANGE_BCHAN)
4909 tisci_rm->rm_ranges[i] =
4910 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4911 tisci_rm->tisci_dev_id,
4912 (char *)range_names[i]);
4916 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4917 if (IS_ERR(rm_res)) {
4918 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4920 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4921 for (i = 0; i < rm_res->sets; i++)
4922 udma_mark_resource_ranges(ud, ud->tchan_map,
4923 &rm_res->desc[i], "tchan");
4927 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4928 if (IS_ERR(rm_res)) {
4929 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4931 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4932 for (i = 0; i < rm_res->sets; i++)
4933 udma_mark_resource_ranges(ud, ud->rchan_map,
4934 &rm_res->desc[i], "rchan");
4938 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4939 if (IS_ERR(rm_res)) {
4940 /* all rflows are assigned exclusively to Linux */
4941 bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
4944 bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
4945 for (i = 0; i < rm_res->sets; i++)
4946 udma_mark_resource_ranges(ud, ud->rflow_in_use,
4947 &rm_res->desc[i], "rflow");
4948 irq_res.sets = rm_res->sets;
4952 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4953 if (IS_ERR(rm_res)) {
4954 /* all tflows are assigned exclusively to Linux */
4955 bitmap_zero(ud->tflow_map, ud->tflow_cnt);
4958 bitmap_fill(ud->tflow_map, ud->tflow_cnt);
4959 for (i = 0; i < rm_res->sets; i++)
4960 udma_mark_resource_ranges(ud, ud->tflow_map,
4961 &rm_res->desc[i], "tflow");
4962 irq_res.sets += rm_res->sets;
4965 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4968 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4969 if (IS_ERR(rm_res)) {
4970 irq_res.desc[0].start = oes->pktdma_tchan_flow;
4971 irq_res.desc[0].num = ud->tflow_cnt;
4974 for (i = 0; i < rm_res->sets; i++) {
4975 irq_res.desc[i].start = rm_res->desc[i].start +
4976 oes->pktdma_tchan_flow;
4977 irq_res.desc[i].num = rm_res->desc[i].num;
4980 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4981 if (IS_ERR(rm_res)) {
4982 irq_res.desc[i].start = oes->pktdma_rchan_flow;
4983 irq_res.desc[i].num = ud->rflow_cnt;
4985 for (j = 0; j < rm_res->sets; j++, i++) {
4986 irq_res.desc[i].start = rm_res->desc[j].start +
4987 oes->pktdma_rchan_flow;
4988 irq_res.desc[i].num = rm_res->desc[j].num;
4991 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4992 kfree(irq_res.desc);
4994 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
5001 static int setup_resources(struct udma_dev *ud)
5003 struct device *dev = ud->dev;
5006 switch (ud->match_data->type) {
5008 ret = udma_setup_resources(ud);
5010 case DMA_TYPE_BCDMA:
5011 ret = bcdma_setup_resources(ud);
5013 case DMA_TYPE_PKTDMA:
5014 ret = pktdma_setup_resources(ud);
5023 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
5025 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
5026 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
5027 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
5031 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
5036 switch (ud->match_data->type) {
5039 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
5041 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5043 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5045 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
5048 case DMA_TYPE_BCDMA:
5050 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
5052 ud->bchan_cnt - bitmap_weight(ud->bchan_map,
5054 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5056 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5059 case DMA_TYPE_PKTDMA:
5061 "Channels: %d (tchan: %u, rchan: %u)\n",
5063 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5065 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5075 static int udma_setup_rx_flush(struct udma_dev *ud)
5077 struct udma_rx_flush *rx_flush = &ud->rx_flush;
5078 struct cppi5_desc_hdr_t *tr_desc;
5079 struct cppi5_tr_type1_t *tr_req;
5080 struct cppi5_host_desc_t *desc;
5081 struct device *dev = ud->dev;
5082 struct udma_hwdesc *hwdesc;
5085 /* Allocate 1K buffer for discarded data on RX channel teardown */
5086 rx_flush->buffer_size = SZ_1K;
5087 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
5089 if (!rx_flush->buffer_vaddr)
5092 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
5093 rx_flush->buffer_size,
5095 if (dma_mapping_error(dev, rx_flush->buffer_paddr))
5098 /* Set up descriptor to be used for TR mode */
5099 hwdesc = &rx_flush->hwdescs[0];
5100 tr_size = sizeof(struct cppi5_tr_type1_t);
5101 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
5102 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
5105 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5107 if (!hwdesc->cppi5_desc_vaddr)
5110 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5111 hwdesc->cppi5_desc_size,
5113 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5116 /* Start of the TR req records */
5117 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
5118 /* Start address of the TR response array */
5119 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
5121 tr_desc = hwdesc->cppi5_desc_vaddr;
5122 cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
5123 cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5124 cppi5_desc_set_retpolicy(tr_desc, 0, 0);
5126 tr_req = hwdesc->tr_req_base;
5127 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
5128 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
5129 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
5131 tr_req->addr = rx_flush->buffer_paddr;
5132 tr_req->icnt0 = rx_flush->buffer_size;
5135 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5136 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5138 /* Set up descriptor to be used for packet mode */
5139 hwdesc = &rx_flush->hwdescs[1];
5140 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
5141 CPPI5_INFO0_HDESC_EPIB_SIZE +
5142 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
5145 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5147 if (!hwdesc->cppi5_desc_vaddr)
5150 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5151 hwdesc->cppi5_desc_size,
5153 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5156 desc = hwdesc->cppi5_desc_vaddr;
5157 cppi5_hdesc_init(desc, 0, 0);
5158 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5159 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
5161 cppi5_hdesc_attach_buf(desc,
5162 rx_flush->buffer_paddr, rx_flush->buffer_size,
5163 rx_flush->buffer_paddr, rx_flush->buffer_size);
5165 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5166 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5170 #ifdef CONFIG_DEBUG_FS
5171 static void udma_dbg_summary_show_chan(struct seq_file *s,
5172 struct dma_chan *chan)
5174 struct udma_chan *uc = to_udma_chan(chan);
5175 struct udma_chan_config *ucc = &uc->config;
5177 seq_printf(s, " %-13s| %s", dma_chan_name(chan),
5178 chan->dbg_client_name ?: "in-use");
5179 if (ucc->tr_trigger_type)
5180 seq_puts(s, " (triggered, ");
5182 seq_printf(s, " (%s, ",
5183 dmaengine_get_direction_text(uc->config.dir));
5185 switch (uc->config.dir) {
5186 case DMA_MEM_TO_MEM:
5187 if (uc->ud->match_data->type == DMA_TYPE_BCDMA) {
5188 seq_printf(s, "bchan%d)\n", uc->bchan->id);
5192 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
5193 ucc->src_thread, ucc->dst_thread);
5195 case DMA_DEV_TO_MEM:
5196 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
5197 ucc->src_thread, ucc->dst_thread);
5198 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5199 seq_printf(s, "rflow%d, ", uc->rflow->id);
5201 case DMA_MEM_TO_DEV:
5202 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
5203 ucc->src_thread, ucc->dst_thread);
5204 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5205 seq_printf(s, "tflow%d, ", uc->tchan->tflow_id);
5208 seq_printf(s, ")\n");
5212 if (ucc->ep_type == PSIL_EP_NATIVE) {
5213 seq_printf(s, "PSI-L Native");
5214 if (ucc->metadata_size) {
5215 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
5217 seq_printf(s, " PSDsize:%u", ucc->psd_size);
5218 seq_printf(s, " ]");
5221 seq_printf(s, "PDMA");
5222 if (ucc->enable_acc32 || ucc->enable_burst)
5223 seq_printf(s, "[%s%s ]",
5224 ucc->enable_acc32 ? " ACC32" : "",
5225 ucc->enable_burst ? " BURST" : "");
5228 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
5231 static void udma_dbg_summary_show(struct seq_file *s,
5232 struct dma_device *dma_dev)
5234 struct dma_chan *chan;
5236 list_for_each_entry(chan, &dma_dev->channels, device_node) {
5237 if (chan->client_count)
5238 udma_dbg_summary_show_chan(s, chan);
5241 #endif /* CONFIG_DEBUG_FS */
5243 static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud)
5245 const struct udma_match_data *match_data = ud->match_data;
5248 if (!match_data->enable_memcpy_support)
5249 return DMAENGINE_ALIGN_8_BYTES;
5251 /* Get the highest TPL level the device supports for memcpy */
5253 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0);
5254 else if (ud->tchan_cnt)
5255 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0);
5257 return DMAENGINE_ALIGN_8_BYTES;
5259 switch (match_data->burst_size[tpl]) {
5260 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES:
5261 return DMAENGINE_ALIGN_256_BYTES;
5262 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES:
5263 return DMAENGINE_ALIGN_128_BYTES;
5264 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES:
5267 return DMAENGINE_ALIGN_64_BYTES;
5271 #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
5272 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
5273 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
5274 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
5275 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
5277 static int udma_probe(struct platform_device *pdev)
5279 struct device_node *navss_node = pdev->dev.parent->of_node;
5280 const struct soc_device_attribute *soc;
5281 struct device *dev = &pdev->dev;
5282 struct udma_dev *ud;
5283 const struct of_device_id *match;
5287 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
5289 dev_err(dev, "failed to set dma mask stuff\n");
5291 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
5295 match = of_match_node(udma_of_match, dev->of_node);
5297 dev_err(dev, "No compatible match found\n");
5300 ud->match_data = match->data;
5302 ud->soc_data = ud->match_data->soc_data;
5303 if (!ud->soc_data) {
5304 soc = soc_device_match(k3_soc_devices);
5306 dev_err(dev, "No compatible SoC found\n");
5309 ud->soc_data = soc->data;
5312 ret = udma_get_mmrs(pdev, ud);
5316 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
5317 if (IS_ERR(ud->tisci_rm.tisci))
5318 return PTR_ERR(ud->tisci_rm.tisci);
5320 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
5321 &ud->tisci_rm.tisci_dev_id);
5323 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
5326 pdev->id = ud->tisci_rm.tisci_dev_id;
5328 ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
5329 &ud->tisci_rm.tisci_navss_dev_id);
5331 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
5335 if (ud->match_data->type == DMA_TYPE_UDMA) {
5336 ret = of_property_read_u32(dev->of_node, "ti,udma-atype",
5338 if (!ret && ud->atype > 2) {
5339 dev_err(dev, "Invalid atype: %u\n", ud->atype);
5343 ret = of_property_read_u32(dev->of_node, "ti,asel",
5345 if (!ret && ud->asel > 15) {
5346 dev_err(dev, "Invalid asel: %u\n", ud->asel);
5351 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
5352 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
5354 if (ud->match_data->type == DMA_TYPE_UDMA) {
5355 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
5357 struct k3_ringacc_init_data ring_init_data;
5359 ring_init_data.tisci = ud->tisci_rm.tisci;
5360 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
5361 if (ud->match_data->type == DMA_TYPE_BCDMA) {
5362 ring_init_data.num_rings = ud->bchan_cnt +
5366 ring_init_data.num_rings = ud->rflow_cnt +
5370 ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data);
5373 if (IS_ERR(ud->ringacc))
5374 return PTR_ERR(ud->ringacc);
5376 dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
5377 DOMAIN_BUS_TI_SCI_INTA_MSI);
5378 if (!dev->msi.domain) {
5379 return -EPROBE_DEFER;
5382 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
5383 /* cyclic operation is not supported via PKTDMA */
5384 if (ud->match_data->type != DMA_TYPE_PKTDMA) {
5385 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
5386 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
5389 ud->ddev.device_config = udma_slave_config;
5390 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
5391 ud->ddev.device_issue_pending = udma_issue_pending;
5392 ud->ddev.device_tx_status = udma_tx_status;
5393 ud->ddev.device_pause = udma_pause;
5394 ud->ddev.device_resume = udma_resume;
5395 ud->ddev.device_terminate_all = udma_terminate_all;
5396 ud->ddev.device_synchronize = udma_synchronize;
5397 #ifdef CONFIG_DEBUG_FS
5398 ud->ddev.dbg_summary_show = udma_dbg_summary_show;
5401 switch (ud->match_data->type) {
5403 ud->ddev.device_alloc_chan_resources =
5404 udma_alloc_chan_resources;
5406 case DMA_TYPE_BCDMA:
5407 ud->ddev.device_alloc_chan_resources =
5408 bcdma_alloc_chan_resources;
5409 ud->ddev.device_router_config = bcdma_router_config;
5411 case DMA_TYPE_PKTDMA:
5412 ud->ddev.device_alloc_chan_resources =
5413 pktdma_alloc_chan_resources;
5418 ud->ddev.device_free_chan_resources = udma_free_chan_resources;
5420 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
5421 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
5422 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
5423 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
5424 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
5425 DESC_METADATA_ENGINE;
5426 if (ud->match_data->enable_memcpy_support &&
5427 !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) {
5428 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
5429 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
5430 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
5435 ud->psil_base = ud->match_data->psil_base;
5437 INIT_LIST_HEAD(&ud->ddev.channels);
5438 INIT_LIST_HEAD(&ud->desc_to_purge);
5440 ch_count = setup_resources(ud);
5444 spin_lock_init(&ud->lock);
5445 INIT_WORK(&ud->purge_work, udma_purge_desc_work);
5447 ud->desc_align = 64;
5448 if (ud->desc_align < dma_get_cache_alignment())
5449 ud->desc_align = dma_get_cache_alignment();
5451 ret = udma_setup_rx_flush(ud);
5455 for (i = 0; i < ud->bchan_cnt; i++) {
5456 struct udma_bchan *bchan = &ud->bchans[i];
5459 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
5462 for (i = 0; i < ud->tchan_cnt; i++) {
5463 struct udma_tchan *tchan = &ud->tchans[i];
5466 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
5469 for (i = 0; i < ud->rchan_cnt; i++) {
5470 struct udma_rchan *rchan = &ud->rchans[i];
5473 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
5476 for (i = 0; i < ud->rflow_cnt; i++) {
5477 struct udma_rflow *rflow = &ud->rflows[i];
5482 for (i = 0; i < ch_count; i++) {
5483 struct udma_chan *uc = &ud->channels[i];
5486 uc->vc.desc_free = udma_desc_free;
5491 uc->config.remote_thread_id = -1;
5492 uc->config.mapped_channel_id = -1;
5493 uc->config.default_flow_id = -1;
5494 uc->config.dir = DMA_MEM_TO_MEM;
5495 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
5498 vchan_init(&uc->vc, &ud->ddev);
5499 /* Use custom vchan completion handling */
5500 tasklet_setup(&uc->vc.task, udma_vchan_complete);
5501 init_completion(&uc->teardown_completed);
5502 INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
5505 /* Configure the copy_align to the maximum burst size the device supports */
5506 ud->ddev.copy_align = udma_get_copy_align(ud);
5508 ret = dma_async_device_register(&ud->ddev);
5510 dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
5514 platform_set_drvdata(pdev, ud);
5516 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
5518 dev_err(dev, "failed to register of_dma controller\n");
5519 dma_async_device_unregister(&ud->ddev);
5525 static struct platform_driver udma_driver = {
5528 .of_match_table = udma_of_match,
5529 .suppress_bind_attrs = true,
5531 .probe = udma_probe,
5534 module_platform_driver(udma_driver);
5535 MODULE_LICENSE("GPL v2");
5537 /* Private interfaces to UDMA */
5538 #include "k3-udma-private.c"