1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * DMA driver for Xilinx DMA/Bridge Subsystem
5 * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
6 * Copyright (C) 2022, Advanced Micro Devices, Inc.
10 * The DMA/Bridge Subsystem for PCI Express allows for the movement of data
11 * between Host memory and the DMA subsystem. It does this by operating on
12 * 'descriptors' that contain information about the source, destination and
13 * amount of data to transfer. These direct memory transfers can be both in
14 * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be
15 * configured to have a single AXI4 Master interface shared by all channels
16 * or one AXI4-Stream interface for each channel enabled. Memory transfers are
17 * specified on a per-channel basis in descriptor linked lists, which the DMA
18 * fetches from host memory and processes. Events such as descriptor completion
19 * and errors are signaled using interrupts. The core also provides up to 16
20 * user interrupt wires that generate interrupts to the host.
23 #include <linux/mod_devicetable.h>
24 #include <linux/bitfield.h>
25 #include <linux/dmapool.h>
26 #include <linux/regmap.h>
27 #include <linux/dmaengine.h>
28 #include <linux/dma/amd_xdma.h>
29 #include <linux/platform_device.h>
30 #include <linux/platform_data/amd_xdma.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/pci.h>
33 #include "../virt-dma.h"
34 #include "xdma-regs.h"
36 /* mmio regmap config for all XDMA registers */
37 static const struct regmap_config xdma_regmap_config = {
41 .max_register = XDMA_REG_SPACE_LEN,
45 * struct xdma_desc_block - Descriptor block
46 * @virt_addr: Virtual address of block start
47 * @dma_addr: DMA address of block start
49 struct xdma_desc_block {
55 * struct xdma_chan - Driver specific DMA channel structure
56 * @vchan: Virtual channel
57 * @xdev_hdl: Pointer to DMA device structure
58 * @base: Offset of channel registers
59 * @desc_pool: Descriptor pool
60 * @busy: Busy flag of the channel
61 * @dir: Transferring direction of the channel
62 * @cfg: Transferring config of the channel
63 * @irq: IRQ assigned to the channel
66 struct virt_dma_chan vchan;
69 struct dma_pool *desc_pool;
71 enum dma_transfer_direction dir;
72 struct dma_slave_config cfg;
77 * struct xdma_desc - DMA desc structure
78 * @vdesc: Virtual DMA descriptor
79 * @chan: DMA channel pointer
80 * @dir: Transferring direction of the request
81 * @dev_addr: Physical address on DMA device side
82 * @desc_blocks: Hardware descriptor blocks
83 * @dblk_num: Number of hardware descriptor blocks
84 * @desc_num: Number of hardware descriptors
85 * @completed_desc_num: Completed hardware descriptors
88 struct virt_dma_desc vdesc;
89 struct xdma_chan *chan;
90 enum dma_transfer_direction dir;
92 struct xdma_desc_block *desc_blocks;
95 u32 completed_desc_num;
98 #define XDMA_DEV_STATUS_REG_DMA BIT(0)
99 #define XDMA_DEV_STATUS_INIT_MSIX BIT(1)
102 * struct xdma_device - DMA device structure
103 * @pdev: Platform device pointer
104 * @dma_dev: DMA device structure
105 * @rmap: MMIO regmap for DMA registers
106 * @h2c_chans: Host to Card channels
107 * @c2h_chans: Card to Host channels
108 * @h2c_chan_num: Number of H2C channels
109 * @c2h_chan_num: Number of C2H channels
110 * @irq_start: Start IRQ assigned to device
111 * @irq_num: Number of IRQ assigned to device
112 * @status: Initialization status
115 struct platform_device *pdev;
116 struct dma_device dma_dev;
118 struct xdma_chan *h2c_chans;
119 struct xdma_chan *c2h_chans;
127 #define xdma_err(xdev, fmt, args...) \
128 dev_err(&(xdev)->pdev->dev, fmt, ##args)
129 #define XDMA_CHAN_NUM(_xd) ({ \
130 typeof(_xd) (xd) = (_xd); \
131 ((xd)->h2c_chan_num + (xd)->c2h_chan_num); })
133 /* Get the last desc in a desc block */
134 static inline void *xdma_blk_last_desc(struct xdma_desc_block *block)
136 return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE;
140 * xdma_link_desc_blocks - Link descriptor blocks for DMA transfer
141 * @sw_desc: Tx descriptor pointer
143 static void xdma_link_desc_blocks(struct xdma_desc *sw_desc)
145 struct xdma_desc_block *block;
146 u32 last_blk_desc, desc_control;
147 struct xdma_hw_desc *desc;
150 desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0);
151 for (i = 1; i < sw_desc->dblk_num; i++) {
152 block = &sw_desc->desc_blocks[i - 1];
153 desc = xdma_blk_last_desc(block);
155 if (!(i & XDMA_DESC_BLOCK_MASK)) {
156 desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
159 desc->control = cpu_to_le32(desc_control);
160 desc->next_desc = cpu_to_le64(block[1].dma_addr);
163 /* update the last block */
164 last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
165 if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) {
166 block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2];
167 desc = xdma_blk_last_desc(block);
168 desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0);
169 desc->control = cpu_to_le32(desc_control);
172 block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1];
173 desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE;
174 desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
177 static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan)
179 return container_of(chan, struct xdma_chan, vchan.chan);
182 static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc)
184 return container_of(vdesc, struct xdma_desc, vdesc);
188 * xdma_channel_init - Initialize DMA channel registers
189 * @chan: DMA channel pointer
191 static int xdma_channel_init(struct xdma_chan *chan)
193 struct xdma_device *xdev = chan->xdev_hdl;
196 ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C,
197 CHAN_CTRL_NON_INCR_ADDR);
201 ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE,
210 * xdma_free_desc - Free descriptor
211 * @vdesc: Virtual DMA descriptor
213 static void xdma_free_desc(struct virt_dma_desc *vdesc)
215 struct xdma_desc *sw_desc;
218 sw_desc = to_xdma_desc(vdesc);
219 for (i = 0; i < sw_desc->dblk_num; i++) {
220 if (!sw_desc->desc_blocks[i].virt_addr)
222 dma_pool_free(sw_desc->chan->desc_pool,
223 sw_desc->desc_blocks[i].virt_addr,
224 sw_desc->desc_blocks[i].dma_addr);
226 kfree(sw_desc->desc_blocks);
231 * xdma_alloc_desc - Allocate descriptor
232 * @chan: DMA channel pointer
233 * @desc_num: Number of hardware descriptors
235 static struct xdma_desc *
236 xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num)
238 struct xdma_desc *sw_desc;
239 struct xdma_hw_desc *desc;
245 sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT);
249 sw_desc->chan = chan;
250 sw_desc->desc_num = desc_num;
251 dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
252 sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
254 if (!sw_desc->desc_blocks)
257 sw_desc->dblk_num = dblk_num;
258 for (i = 0; i < sw_desc->dblk_num; i++) {
259 addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr);
263 sw_desc->desc_blocks[i].virt_addr = addr;
264 sw_desc->desc_blocks[i].dma_addr = dma_addr;
265 for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++)
266 desc[j].control = cpu_to_le32(XDMA_DESC_CONTROL(1, 0));
269 xdma_link_desc_blocks(sw_desc);
274 xdma_free_desc(&sw_desc->vdesc);
279 * xdma_xfer_start - Start DMA transfer
280 * @xchan: DMA channel pointer
282 static int xdma_xfer_start(struct xdma_chan *xchan)
284 struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan);
285 struct xdma_device *xdev = xchan->xdev_hdl;
286 struct xdma_desc_block *block;
287 u32 val, completed_blocks;
288 struct xdma_desc *desc;
292 * check if there is not any submitted descriptor or channel is busy.
293 * vchan lock should be held where this function is called.
295 if (!vd || xchan->busy)
298 /* clear run stop bit to get ready for transfer */
299 ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
304 desc = to_xdma_desc(vd);
305 if (desc->dir != xchan->dir) {
306 xdma_err(xdev, "incorrect request direction");
310 /* set DMA engine to the first descriptor block */
311 completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT;
312 block = &desc->desc_blocks[completed_blocks];
313 val = lower_32_bits(block->dma_addr);
314 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val);
318 val = upper_32_bits(block->dma_addr);
319 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val);
323 if (completed_blocks + 1 == desc->dblk_num)
324 val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
326 val = XDMA_DESC_ADJACENT - 1;
327 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val);
331 /* kick off DMA transfer */
332 ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL,
342 * xdma_alloc_channels - Detect and allocate DMA channels
343 * @xdev: DMA device pointer
344 * @dir: Channel direction
346 static int xdma_alloc_channels(struct xdma_device *xdev,
347 enum dma_transfer_direction dir)
349 struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev);
350 struct xdma_chan **chans, *xchan;
351 u32 base, identifier, target;
355 if (dir == DMA_MEM_TO_DEV) {
356 base = XDMA_CHAN_H2C_OFFSET;
357 target = XDMA_CHAN_H2C_TARGET;
358 chans = &xdev->h2c_chans;
359 chan_num = &xdev->h2c_chan_num;
360 } else if (dir == DMA_DEV_TO_MEM) {
361 base = XDMA_CHAN_C2H_OFFSET;
362 target = XDMA_CHAN_C2H_TARGET;
363 chans = &xdev->c2h_chans;
364 chan_num = &xdev->c2h_chan_num;
366 xdma_err(xdev, "invalid direction specified");
370 /* detect number of available DMA channels */
371 for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) {
372 ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
377 /* check if it is available DMA channel */
378 if (XDMA_CHAN_CHECK_TARGET(identifier, target))
383 xdma_err(xdev, "does not probe any channel");
387 *chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans),
392 for (i = 0, j = 0; i < pdata->max_dma_channels; i++) {
393 ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
398 if (!XDMA_CHAN_CHECK_TARGET(identifier, target))
401 if (j == *chan_num) {
402 xdma_err(xdev, "invalid channel number");
406 /* init channel structure and hardware */
407 xchan = &(*chans)[j];
408 xchan->xdev_hdl = xdev;
409 xchan->base = base + i * XDMA_CHAN_STRIDE;
412 ret = xdma_channel_init(xchan);
415 xchan->vchan.desc_free = xdma_free_desc;
416 vchan_init(&xchan->vchan, &xdev->dma_dev);
421 dev_info(&xdev->pdev->dev, "configured %d %s channels", j,
422 (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H");
428 * xdma_issue_pending - Issue pending transactions
429 * @chan: DMA channel pointer
431 static void xdma_issue_pending(struct dma_chan *chan)
433 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
436 spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
437 if (vchan_issue_pending(&xdma_chan->vchan))
438 xdma_xfer_start(xdma_chan);
439 spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
443 * xdma_prep_device_sg - prepare a descriptor for a DMA transaction
444 * @chan: DMA channel pointer
445 * @sgl: Transfer scatter gather list
446 * @sg_len: Length of scatter gather list
447 * @dir: Transfer direction
448 * @flags: transfer ack flags
449 * @context: APP words of the descriptor
451 static struct dma_async_tx_descriptor *
452 xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
453 unsigned int sg_len, enum dma_transfer_direction dir,
454 unsigned long flags, void *context)
456 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
457 struct dma_async_tx_descriptor *tx_desc;
458 u32 desc_num = 0, i, len, rest;
459 struct xdma_desc_block *dblk;
460 struct xdma_hw_desc *desc;
461 struct xdma_desc *sw_desc;
462 u64 dev_addr, *src, *dst;
463 struct scatterlist *sg;
466 for_each_sg(sgl, sg, sg_len, i)
467 desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
469 sw_desc = xdma_alloc_desc(xdma_chan, desc_num);
474 if (dir == DMA_MEM_TO_DEV) {
475 dev_addr = xdma_chan->cfg.dst_addr;
479 dev_addr = xdma_chan->cfg.src_addr;
484 dblk = sw_desc->desc_blocks;
485 desc = dblk->virt_addr;
487 for_each_sg(sgl, sg, sg_len, i) {
488 addr = sg_dma_address(sg);
489 rest = sg_dma_len(sg);
492 len = min_t(u32, rest, XDMA_DESC_BLEN_MAX);
493 /* set hardware descriptor */
494 desc->bytes = cpu_to_le32(len);
495 desc->src_addr = cpu_to_le64(*src);
496 desc->dst_addr = cpu_to_le64(*dst);
498 if (!(desc_num & XDMA_DESC_ADJACENT_MASK)) {
500 desc = dblk->virt_addr;
512 tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
519 xdma_free_desc(&sw_desc->vdesc);
525 * xdma_device_config - Configure the DMA channel
527 * @cfg: channel configuration
529 static int xdma_device_config(struct dma_chan *chan,
530 struct dma_slave_config *cfg)
532 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
534 memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg));
540 * xdma_free_chan_resources - Free channel resources
543 static void xdma_free_chan_resources(struct dma_chan *chan)
545 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
547 vchan_free_chan_resources(&xdma_chan->vchan);
548 dma_pool_destroy(xdma_chan->desc_pool);
549 xdma_chan->desc_pool = NULL;
553 * xdma_alloc_chan_resources - Allocate channel resources
556 static int xdma_alloc_chan_resources(struct dma_chan *chan)
558 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
559 struct xdma_device *xdev = xdma_chan->xdev_hdl;
560 struct device *dev = xdev->dma_dev.dev;
562 while (dev && !dev_is_pci(dev))
565 xdma_err(xdev, "unable to find pci device");
569 xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan),
570 dev, XDMA_DESC_BLOCK_SIZE,
571 XDMA_DESC_BLOCK_ALIGN, 0);
572 if (!xdma_chan->desc_pool) {
573 xdma_err(xdev, "unable to allocate descriptor pool");
581 * xdma_channel_isr - XDMA channel interrupt handler
583 * @dev_id: Pointer to the DMA channel structure
585 static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
587 struct xdma_chan *xchan = dev_id;
588 u32 complete_desc_num = 0;
589 struct xdma_device *xdev;
590 struct virt_dma_desc *vd;
591 struct xdma_desc *desc;
594 spin_lock(&xchan->vchan.lock);
596 /* get submitted request */
597 vd = vchan_next_desc(&xchan->vchan);
602 desc = to_xdma_desc(vd);
603 xdev = xchan->xdev_hdl;
605 ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
610 desc->completed_desc_num += complete_desc_num;
612 * if all data blocks are transferred, remove and complete the request
614 if (desc->completed_desc_num == desc->desc_num) {
616 vchan_cookie_complete(vd);
620 if (desc->completed_desc_num > desc->desc_num ||
621 complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
624 /* transfer the rest of data */
625 xdma_xfer_start(xchan);
628 spin_unlock(&xchan->vchan.lock);
633 * xdma_irq_fini - Uninitialize IRQ
634 * @xdev: DMA device pointer
636 static void xdma_irq_fini(struct xdma_device *xdev)
640 /* disable interrupt */
641 regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0);
643 /* free irq handler */
644 for (i = 0; i < xdev->h2c_chan_num; i++)
645 free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
647 for (i = 0; i < xdev->c2h_chan_num; i++)
648 free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]);
652 * xdma_set_vector_reg - configure hardware IRQ registers
653 * @xdev: DMA device pointer
654 * @vec_tbl_start: Start of IRQ registers
655 * @irq_start: Start of IRQ
656 * @irq_num: Number of IRQ
658 static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start,
659 u32 irq_start, u32 irq_num)
661 u32 shift, i, val = 0;
664 /* Each IRQ register is 32 bit and contains 4 IRQs */
665 while (irq_num > 0) {
666 for (i = 0; i < 4; i++) {
667 shift = XDMA_IRQ_VEC_SHIFT * i;
668 val |= irq_start << shift;
673 /* write IRQ register */
674 ret = regmap_write(xdev->rmap, vec_tbl_start, val);
677 vec_tbl_start += sizeof(u32);
685 * xdma_irq_init - initialize IRQs
686 * @xdev: DMA device pointer
688 static int xdma_irq_init(struct xdma_device *xdev)
690 u32 irq = xdev->irq_start;
694 /* return failure if there are not enough IRQs */
695 if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) {
696 xdma_err(xdev, "not enough irq");
700 /* setup H2C interrupt handler */
701 for (i = 0; i < xdev->h2c_chan_num; i++) {
702 ret = request_irq(irq, xdma_channel_isr, 0,
703 "xdma-h2c-channel", &xdev->h2c_chans[i]);
705 xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
707 goto failed_init_h2c;
709 xdev->h2c_chans[i].irq = irq;
713 /* setup C2H interrupt handler */
714 for (j = 0; j < xdev->c2h_chan_num; j++) {
715 ret = request_irq(irq, xdma_channel_isr, 0,
716 "xdma-c2h-channel", &xdev->c2h_chans[j]);
718 xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
720 goto failed_init_c2h;
722 xdev->c2h_chans[j].irq = irq;
726 /* config hardware IRQ registers */
727 ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0,
728 XDMA_CHAN_NUM(xdev));
730 xdma_err(xdev, "failed to set channel vectors: %d", ret);
731 goto failed_init_c2h;
734 /* config user IRQ registers if needed */
735 user_irq_start = XDMA_CHAN_NUM(xdev);
736 if (xdev->irq_num > user_irq_start) {
737 ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM,
739 xdev->irq_num - user_irq_start);
741 xdma_err(xdev, "failed to set user vectors: %d", ret);
742 goto failed_init_c2h;
746 /* enable interrupt */
747 ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0);
749 goto failed_init_c2h;
755 free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]);
758 free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
763 static bool xdma_filter_fn(struct dma_chan *chan, void *param)
765 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
766 struct xdma_chan_info *chan_info = param;
768 return chan_info->dir == xdma_chan->dir;
772 * xdma_disable_user_irq - Disable user interrupt
773 * @pdev: Pointer to the platform_device structure
774 * @irq_num: System IRQ number
776 void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num)
778 struct xdma_device *xdev = platform_get_drvdata(pdev);
781 index = irq_num - xdev->irq_start;
782 if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
783 xdma_err(xdev, "invalid user irq number");
786 index -= XDMA_CHAN_NUM(xdev);
788 regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index);
790 EXPORT_SYMBOL(xdma_disable_user_irq);
793 * xdma_enable_user_irq - Enable user logic interrupt
794 * @pdev: Pointer to the platform_device structure
795 * @irq_num: System IRQ number
797 int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num)
799 struct xdma_device *xdev = platform_get_drvdata(pdev);
803 index = irq_num - xdev->irq_start;
804 if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
805 xdma_err(xdev, "invalid user irq number");
808 index -= XDMA_CHAN_NUM(xdev);
810 ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index);
816 EXPORT_SYMBOL(xdma_enable_user_irq);
819 * xdma_get_user_irq - Get system IRQ number
820 * @pdev: Pointer to the platform_device structure
821 * @user_irq_index: User logic IRQ wire index
823 * Return: The system IRQ number allocated for the given wire index.
825 int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index)
827 struct xdma_device *xdev = platform_get_drvdata(pdev);
829 if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) {
830 xdma_err(xdev, "invalid user irq index");
834 return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index;
836 EXPORT_SYMBOL(xdma_get_user_irq);
839 * xdma_remove - Driver remove function
840 * @pdev: Pointer to the platform_device structure
842 static int xdma_remove(struct platform_device *pdev)
844 struct xdma_device *xdev = platform_get_drvdata(pdev);
846 if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX)
849 if (xdev->status & XDMA_DEV_STATUS_REG_DMA)
850 dma_async_device_unregister(&xdev->dma_dev);
856 * xdma_probe - Driver probe function
857 * @pdev: Pointer to the platform_device structure
859 static int xdma_probe(struct platform_device *pdev)
861 struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev);
862 struct xdma_device *xdev;
863 void __iomem *reg_base;
864 struct resource *res;
867 if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) {
868 dev_err(&pdev->dev, "invalid max dma channels %d",
869 pdata->max_dma_channels);
873 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
877 platform_set_drvdata(pdev, xdev);
880 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
882 xdma_err(xdev, "failed to get irq resource");
885 xdev->irq_start = res->start;
886 xdev->irq_num = res->end - res->start + 1;
888 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
890 xdma_err(xdev, "failed to get io resource");
894 reg_base = devm_ioremap_resource(&pdev->dev, res);
896 xdma_err(xdev, "ioremap failed");
900 xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base,
901 &xdma_regmap_config);
903 xdma_err(xdev, "config regmap failed: %d", ret);
906 INIT_LIST_HEAD(&xdev->dma_dev.channels);
908 ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV);
910 xdma_err(xdev, "config H2C channels failed: %d", ret);
914 ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM);
916 xdma_err(xdev, "config C2H channels failed: %d", ret);
920 dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
921 dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
923 xdev->dma_dev.dev = &pdev->dev;
924 xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources;
925 xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources;
926 xdev->dma_dev.device_tx_status = dma_cookie_status;
927 xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
928 xdev->dma_dev.device_config = xdma_device_config;
929 xdev->dma_dev.device_issue_pending = xdma_issue_pending;
930 xdev->dma_dev.filter.map = pdata->device_map;
931 xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
932 xdev->dma_dev.filter.fn = xdma_filter_fn;
934 ret = dma_async_device_register(&xdev->dma_dev);
936 xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret);
939 xdev->status |= XDMA_DEV_STATUS_REG_DMA;
941 ret = xdma_irq_init(xdev);
943 xdma_err(xdev, "failed to init msix: %d", ret);
946 xdev->status |= XDMA_DEV_STATUS_INIT_MSIX;
956 static const struct platform_device_id xdma_id_table[] = {
961 static struct platform_driver xdma_driver = {
965 .id_table = xdma_id_table,
967 .remove = xdma_remove,
970 module_platform_driver(xdma_driver);
972 MODULE_DESCRIPTION("AMD XDMA driver");
973 MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
974 MODULE_LICENSE("GPL");