Merge tag 'dmaengine-4.18-rc1' of git://git.infradead.org/users/vkoul/slave-dma
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Jun 2018 18:02:21 +0000 (11:02 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Jun 2018 18:02:21 +0000 (11:02 -0700)
Pull dmaengine updates from Vinod Koul:

 - updates to sprd, bam_dma, stm drivers

 - remove VLAs in dmatest

 - move TI drivers to their own subdir

 - switch to SPDX tags for ima/mxs dma drivers

 - simplify getting .drvdata on bunch of drivers by Wolfram Sang

* tag 'dmaengine-4.18-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (32 commits)
  dmaengine: sprd: Add Spreadtrum DMA configuration
  dmaengine: sprd: Optimize the sprd_dma_prep_dma_memcpy()
  dmaengine: imx-dma: Switch to SPDX identifier
  dmaengine: mxs-dma: Switch to SPDX identifier
  dmaengine: imx-sdma: Switch to SPDX identifier
  dmaengine: usb-dmac: Document R8A7799{0,5} bindings
  dmaengine: qcom: bam_dma: fix some doc warnings.
  dmaengine: qcom: bam_dma: fix invalid assignment warning
  dmaengine: sprd: fix an NULL vs IS_ERR() bug
  dmaengine: sprd: Use devm_ioremap_resource() to map memory
  dmaengine: sprd: Fix potential NULL dereference in sprd_dma_probe()
  dmaengine: pl330: flush before wait, and add dev burst support.
  dmaengine: axi-dmac: Request IRQ with IRQF_SHARED
  dmaengine: stm32-mdma: fix spelling mistake: "avalaible" -> "available"
  dmaengine: rcar-dmac: Document R-Car D3 bindings
  dmaengine: sprd: Move DMA request mode and interrupt type into head file
  dmaengine: sprd: Define the DMA data width type
  dmaengine: sprd: Define the DMA transfer step type
  dmaengine: ti: New directory for Texas Instruments DMA drivers
  dmaengine: shdmac: Change platform check to CONFIG_ARCH_RENESAS
  ...

35 files changed:
Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
MAINTAINERS
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/at_hdmac.c
drivers/dma/at_xdmac.c
drivers/dma/cppi41.c [deleted file]
drivers/dma/dma-axi-dmac.c
drivers/dma/dmatest.c
drivers/dma/dw/platform.c
drivers/dma/edma.c [deleted file]
drivers/dma/fsldma.c
drivers/dma/idma64.c
drivers/dma/imx-dma.c
drivers/dma/imx-sdma.c
drivers/dma/mxs-dma.c
drivers/dma/omap-dma.c [deleted file]
drivers/dma/pl330.c
drivers/dma/qcom/bam_dma.c
drivers/dma/qcom/hidma.c
drivers/dma/qcom/hidma_mgmt_sys.c
drivers/dma/sh/shdmac.c
drivers/dma/sprd-dma.c
drivers/dma/ste_dma40.c
drivers/dma/stm32-mdma.c
drivers/dma/ti-dma-crossbar.c [deleted file]
drivers/dma/ti/Kconfig [new file with mode: 0644]
drivers/dma/ti/Makefile [new file with mode: 0644]
drivers/dma/ti/cppi41.c [new file with mode: 0644]
drivers/dma/ti/dma-crossbar.c [new file with mode: 0644]
drivers/dma/ti/edma.c [new file with mode: 0644]
drivers/dma/ti/omap-dma.c [new file with mode: 0644]
drivers/dma/txx9dmac.c
include/linux/dma/sprd-dma.h [new file with mode: 0644]

index 61315eaa76606d777a05d61a3caa3dbf845bd81a..b1ba639554c087c0de5bab69b3b6abb208c6931a 100644 (file)
@@ -29,6 +29,7 @@ Required Properties:
                - "renesas,dmac-r8a77965" (R-Car M3-N)
                - "renesas,dmac-r8a77970" (R-Car V3M)
                - "renesas,dmac-r8a77980" (R-Car V3H)
+               - "renesas,dmac-r8a77995" (R-Car D3)
 
 - reg: base address and length of the registers block for the DMAC
 
index 9dc935e24e558f4f6edaa166a29fbd038bc66d48..482e54362d3e0ba60bd1b5f731f14854d0168c30 100644 (file)
@@ -12,6 +12,8 @@ Required Properties:
          - "renesas,r8a7795-usb-dmac" (R-Car H3)
          - "renesas,r8a7796-usb-dmac" (R-Car M3-W)
          - "renesas,r8a77965-usb-dmac" (R-Car M3-N)
+         - "renesas,r8a77990-usb-dmac" (R-Car E3)
+         - "renesas,r8a77995-usb-dmac" (R-Car D3)
 - reg: base address and length of the registers block for the DMAC
 - interrupts: interrupt specifiers for the DMAC, one for each entry in
   interrupt-names.
index 3838c94a0d47670167151c7c9389bd7a7b33c95f..abec3381cf45e063c649d771e1f5b26a8a1caa2a 100644 (file)
@@ -11790,6 +11790,14 @@ T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rkuo/linux-hexagon-kernel.g
 S:     Supported
 F:     arch/hexagon/
 
+QUALCOMM HIDMA DRIVER
+M:     Sinan Kaya <okaya@codeaurora.org>
+L:     linux-arm-kernel@lists.infradead.org
+L:     linux-arm-msm@vger.kernel.org
+L:     dmaengine@vger.kernel.org
+S:     Supported
+F:     drivers/dma/qcom/hidma*
+
 QUALCOMM IOMMU
 M:     Rob Clark <robdclark@gmail.com>
 L:     iommu@lists.linux-foundation.org
index 6d61cd0236339172b2b1075134db65ed43a2f47d..ca1680afa20ae8ceea3a140edb3366f84dcc02df 100644 (file)
@@ -151,13 +151,6 @@ config DMA_JZ4780
          If you have a board based on such a SoC and wish to use DMA for
          devices which can use the DMA controller, say Y or M here.
 
-config DMA_OMAP
-       tristate "OMAP DMA support"
-       depends on ARCH_OMAP || COMPILE_TEST
-       select DMA_ENGINE
-       select DMA_VIRTUAL_CHANNELS
-       select TI_DMA_CROSSBAR if (SOC_DRA7XX || COMPILE_TEST)
-
 config DMA_SA11X0
        tristate "SA-11x0 DMA support"
        depends on ARCH_SA1100 || COMPILE_TEST
@@ -574,28 +567,6 @@ config TIMB_DMA
        help
          Enable support for the Timberdale FPGA DMA engine.
 
-config TI_CPPI41
-       tristate "CPPI 4.1 DMA support"
-       depends on (ARCH_OMAP || ARCH_DAVINCI_DA8XX)
-       select DMA_ENGINE
-       help
-         The Communications Port Programming Interface (CPPI) 4.1 DMA engine
-         is currently used by the USB driver on AM335x and DA8xx platforms.
-
-config TI_DMA_CROSSBAR
-       bool
-
-config TI_EDMA
-       bool "TI EDMA support"
-       depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE || COMPILE_TEST
-       select DMA_ENGINE
-       select DMA_VIRTUAL_CHANNELS
-       select TI_DMA_CROSSBAR if (ARCH_OMAP || COMPILE_TEST)
-       default n
-       help
-         Enable support for the TI EDMA controller. This DMA
-         engine is found on TI DaVinci and AM33xx parts.
-
 config XGENE_DMA
        tristate "APM X-Gene DMA support"
        depends on ARCH_XGENE || COMPILE_TEST
@@ -653,6 +624,8 @@ source "drivers/dma/hsu/Kconfig"
 
 source "drivers/dma/sh/Kconfig"
 
+source "drivers/dma/ti/Kconfig"
+
 # clients
 comment "DMA Clients"
        depends on DMA_ENGINE
index 0f62a4d49aabc91a13e1194df562cfb0d3a88aeb..203a99d68315cec8b20f57a601824699d6f10dd5 100644 (file)
@@ -24,7 +24,6 @@ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
 obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o
-obj-$(CONFIG_DMA_OMAP) += omap-dma.o
 obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
 obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o
 obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
@@ -69,13 +68,11 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
 obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
 obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o
 obj-$(CONFIG_TIMB_DMA) += timb_dma.o
-obj-$(CONFIG_TI_CPPI41) += cppi41.o
-obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
-obj-$(CONFIG_TI_EDMA) += edma.o
 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
 obj-$(CONFIG_ZX_DMA) += zx_dma.o
 obj-$(CONFIG_ST_FDMA) += st_fdma.o
 
 obj-y += mediatek/
 obj-y += qcom/
+obj-y += ti/
 obj-y += xilinx/
index a861b5b4d4437d6b3be7dcf5e9d0b3475205455d..75f38d19fcbed5dac47e4fd8b7d8b78cef228e66 100644 (file)
@@ -2041,8 +2041,7 @@ static void at_dma_shutdown(struct platform_device *pdev)
 
 static int at_dma_prepare(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct at_dma *atdma = platform_get_drvdata(pdev);
+       struct at_dma *atdma = dev_get_drvdata(dev);
        struct dma_chan *chan, *_chan;
 
        list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
@@ -2076,8 +2075,7 @@ static void atc_suspend_cyclic(struct at_dma_chan *atchan)
 
 static int at_dma_suspend_noirq(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct at_dma *atdma = platform_get_drvdata(pdev);
+       struct at_dma *atdma = dev_get_drvdata(dev);
        struct dma_chan *chan, *_chan;
 
        /* preserve data */
@@ -2118,8 +2116,7 @@ static void atc_resume_cyclic(struct at_dma_chan *atchan)
 
 static int at_dma_resume_noirq(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct at_dma *atdma = platform_get_drvdata(pdev);
+       struct at_dma *atdma = dev_get_drvdata(dev);
        struct dma_chan *chan, *_chan;
 
        /* bring back DMA controller */
index 94236ec9d4100fd6f1c0673f6c49e1cfd914d234..4bf72561667c7bd636749f37373477d5a8862f99 100644 (file)
@@ -1833,8 +1833,7 @@ static void at_xdmac_free_chan_resources(struct dma_chan *chan)
 #ifdef CONFIG_PM
 static int atmel_xdmac_prepare(struct device *dev)
 {
-       struct platform_device  *pdev = to_platform_device(dev);
-       struct at_xdmac         *atxdmac = platform_get_drvdata(pdev);
+       struct at_xdmac         *atxdmac = dev_get_drvdata(dev);
        struct dma_chan         *chan, *_chan;
 
        list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
@@ -1853,8 +1852,7 @@ static int atmel_xdmac_prepare(struct device *dev)
 #ifdef CONFIG_PM_SLEEP
 static int atmel_xdmac_suspend(struct device *dev)
 {
-       struct platform_device  *pdev = to_platform_device(dev);
-       struct at_xdmac         *atxdmac = platform_get_drvdata(pdev);
+       struct at_xdmac         *atxdmac = dev_get_drvdata(dev);
        struct dma_chan         *chan, *_chan;
 
        list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
@@ -1878,8 +1876,7 @@ static int atmel_xdmac_suspend(struct device *dev)
 
 static int atmel_xdmac_resume(struct device *dev)
 {
-       struct platform_device  *pdev = to_platform_device(dev);
-       struct at_xdmac         *atxdmac = platform_get_drvdata(pdev);
+       struct at_xdmac         *atxdmac = dev_get_drvdata(dev);
        struct at_xdmac_chan    *atchan;
        struct dma_chan         *chan, *_chan;
        int                     i;
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
deleted file mode 100644 (file)
index d9bee65..0000000
+++ /dev/null
@@ -1,1223 +0,0 @@
-#include <linux/delay.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/slab.h>
-#include <linux/of_dma.h>
-#include <linux/of_irq.h>
-#include <linux/dmapool.h>
-#include <linux/interrupt.h>
-#include <linux/of_address.h>
-#include <linux/pm_runtime.h>
-#include "dmaengine.h"
-
-#define DESC_TYPE      27
-#define DESC_TYPE_HOST 0x10
-#define DESC_TYPE_TEARD        0x13
-
-#define TD_DESC_IS_RX  (1 << 16)
-#define TD_DESC_DMA_NUM        10
-
-#define DESC_LENGTH_BITS_NUM   21
-
-#define DESC_TYPE_USB  (5 << 26)
-#define DESC_PD_COMPLETE       (1 << 31)
-
-/* DMA engine */
-#define DMA_TDFDQ      4
-#define DMA_TXGCR(x)   (0x800 + (x) * 0x20)
-#define DMA_RXGCR(x)   (0x808 + (x) * 0x20)
-#define RXHPCRA0               4
-
-#define GCR_CHAN_ENABLE                (1 << 31)
-#define GCR_TEARDOWN           (1 << 30)
-#define GCR_STARV_RETRY                (1 << 24)
-#define GCR_DESC_TYPE_HOST     (1 << 14)
-
-/* DMA scheduler */
-#define DMA_SCHED_CTRL         0
-#define DMA_SCHED_CTRL_EN      (1 << 31)
-#define DMA_SCHED_WORD(x)      ((x) * 4 + 0x800)
-
-#define SCHED_ENTRY0_CHAN(x)   ((x) << 0)
-#define SCHED_ENTRY0_IS_RX     (1 << 7)
-
-#define SCHED_ENTRY1_CHAN(x)   ((x) << 8)
-#define SCHED_ENTRY1_IS_RX     (1 << 15)
-
-#define SCHED_ENTRY2_CHAN(x)   ((x) << 16)
-#define SCHED_ENTRY2_IS_RX     (1 << 23)
-
-#define SCHED_ENTRY3_CHAN(x)   ((x) << 24)
-#define SCHED_ENTRY3_IS_RX     (1 << 31)
-
-/* Queue manager */
-/* 4 KiB of memory for descriptors, 2 for each endpoint */
-#define ALLOC_DECS_NUM         128
-#define DESCS_AREAS            1
-#define TOTAL_DESCS_NUM                (ALLOC_DECS_NUM * DESCS_AREAS)
-#define QMGR_SCRATCH_SIZE      (TOTAL_DESCS_NUM * 4)
-
-#define QMGR_LRAM0_BASE                0x80
-#define QMGR_LRAM_SIZE         0x84
-#define QMGR_LRAM1_BASE                0x88
-#define QMGR_MEMBASE(x)                (0x1000 + (x) * 0x10)
-#define QMGR_MEMCTRL(x)                (0x1004 + (x) * 0x10)
-#define QMGR_MEMCTRL_IDX_SH    16
-#define QMGR_MEMCTRL_DESC_SH   8
-
-#define QMGR_PEND(x)   (0x90 + (x) * 4)
-
-#define QMGR_PENDING_SLOT_Q(x) (x / 32)
-#define QMGR_PENDING_BIT_Q(x)  (x % 32)
-
-#define QMGR_QUEUE_A(n)        (0x2000 + (n) * 0x10)
-#define QMGR_QUEUE_B(n)        (0x2004 + (n) * 0x10)
-#define QMGR_QUEUE_C(n)        (0x2008 + (n) * 0x10)
-#define QMGR_QUEUE_D(n)        (0x200c + (n) * 0x10)
-
-/* Packet Descriptor */
-#define PD2_ZERO_LENGTH                (1 << 19)
-
-struct cppi41_channel {
-       struct dma_chan chan;
-       struct dma_async_tx_descriptor txd;
-       struct cppi41_dd *cdd;
-       struct cppi41_desc *desc;
-       dma_addr_t desc_phys;
-       void __iomem *gcr_reg;
-       int is_tx;
-       u32 residue;
-
-       unsigned int q_num;
-       unsigned int q_comp_num;
-       unsigned int port_num;
-
-       unsigned td_retry;
-       unsigned td_queued:1;
-       unsigned td_seen:1;
-       unsigned td_desc_seen:1;
-
-       struct list_head node;          /* Node for pending list */
-};
-
-struct cppi41_desc {
-       u32 pd0;
-       u32 pd1;
-       u32 pd2;
-       u32 pd3;
-       u32 pd4;
-       u32 pd5;
-       u32 pd6;
-       u32 pd7;
-} __aligned(32);
-
-struct chan_queues {
-       u16 submit;
-       u16 complete;
-};
-
-struct cppi41_dd {
-       struct dma_device ddev;
-
-       void *qmgr_scratch;
-       dma_addr_t scratch_phys;
-
-       struct cppi41_desc *cd;
-       dma_addr_t descs_phys;
-       u32 first_td_desc;
-       struct cppi41_channel *chan_busy[ALLOC_DECS_NUM];
-
-       void __iomem *ctrl_mem;
-       void __iomem *sched_mem;
-       void __iomem *qmgr_mem;
-       unsigned int irq;
-       const struct chan_queues *queues_rx;
-       const struct chan_queues *queues_tx;
-       struct chan_queues td_queue;
-       u16 first_completion_queue;
-       u16 qmgr_num_pend;
-       u32 n_chans;
-       u8 platform;
-
-       struct list_head pending;       /* Pending queued transfers */
-       spinlock_t lock;                /* Lock for pending list */
-
-       /* context for suspend/resume */
-       unsigned int dma_tdfdq;
-
-       bool is_suspended;
-};
-
-static struct chan_queues am335x_usb_queues_tx[] = {
-       /* USB0 ENDP 1 */
-       [ 0] = { .submit = 32, .complete =  93},
-       [ 1] = { .submit = 34, .complete =  94},
-       [ 2] = { .submit = 36, .complete =  95},
-       [ 3] = { .submit = 38, .complete =  96},
-       [ 4] = { .submit = 40, .complete =  97},
-       [ 5] = { .submit = 42, .complete =  98},
-       [ 6] = { .submit = 44, .complete =  99},
-       [ 7] = { .submit = 46, .complete = 100},
-       [ 8] = { .submit = 48, .complete = 101},
-       [ 9] = { .submit = 50, .complete = 102},
-       [10] = { .submit = 52, .complete = 103},
-       [11] = { .submit = 54, .complete = 104},
-       [12] = { .submit = 56, .complete = 105},
-       [13] = { .submit = 58, .complete = 106},
-       [14] = { .submit = 60, .complete = 107},
-
-       /* USB1 ENDP1 */
-       [15] = { .submit = 62, .complete = 125},
-       [16] = { .submit = 64, .complete = 126},
-       [17] = { .submit = 66, .complete = 127},
-       [18] = { .submit = 68, .complete = 128},
-       [19] = { .submit = 70, .complete = 129},
-       [20] = { .submit = 72, .complete = 130},
-       [21] = { .submit = 74, .complete = 131},
-       [22] = { .submit = 76, .complete = 132},
-       [23] = { .submit = 78, .complete = 133},
-       [24] = { .submit = 80, .complete = 134},
-       [25] = { .submit = 82, .complete = 135},
-       [26] = { .submit = 84, .complete = 136},
-       [27] = { .submit = 86, .complete = 137},
-       [28] = { .submit = 88, .complete = 138},
-       [29] = { .submit = 90, .complete = 139},
-};
-
-static const struct chan_queues am335x_usb_queues_rx[] = {
-       /* USB0 ENDP 1 */
-       [ 0] = { .submit =  1, .complete = 109},
-       [ 1] = { .submit =  2, .complete = 110},
-       [ 2] = { .submit =  3, .complete = 111},
-       [ 3] = { .submit =  4, .complete = 112},
-       [ 4] = { .submit =  5, .complete = 113},
-       [ 5] = { .submit =  6, .complete = 114},
-       [ 6] = { .submit =  7, .complete = 115},
-       [ 7] = { .submit =  8, .complete = 116},
-       [ 8] = { .submit =  9, .complete = 117},
-       [ 9] = { .submit = 10, .complete = 118},
-       [10] = { .submit = 11, .complete = 119},
-       [11] = { .submit = 12, .complete = 120},
-       [12] = { .submit = 13, .complete = 121},
-       [13] = { .submit = 14, .complete = 122},
-       [14] = { .submit = 15, .complete = 123},
-
-       /* USB1 ENDP 1 */
-       [15] = { .submit = 16, .complete = 141},
-       [16] = { .submit = 17, .complete = 142},
-       [17] = { .submit = 18, .complete = 143},
-       [18] = { .submit = 19, .complete = 144},
-       [19] = { .submit = 20, .complete = 145},
-       [20] = { .submit = 21, .complete = 146},
-       [21] = { .submit = 22, .complete = 147},
-       [22] = { .submit = 23, .complete = 148},
-       [23] = { .submit = 24, .complete = 149},
-       [24] = { .submit = 25, .complete = 150},
-       [25] = { .submit = 26, .complete = 151},
-       [26] = { .submit = 27, .complete = 152},
-       [27] = { .submit = 28, .complete = 153},
-       [28] = { .submit = 29, .complete = 154},
-       [29] = { .submit = 30, .complete = 155},
-};
-
-static const struct chan_queues da8xx_usb_queues_tx[] = {
-       [0] = { .submit =  16, .complete = 24},
-       [1] = { .submit =  18, .complete = 24},
-       [2] = { .submit =  20, .complete = 24},
-       [3] = { .submit =  22, .complete = 24},
-};
-
-static const struct chan_queues da8xx_usb_queues_rx[] = {
-       [0] = { .submit =  1, .complete = 26},
-       [1] = { .submit =  3, .complete = 26},
-       [2] = { .submit =  5, .complete = 26},
-       [3] = { .submit =  7, .complete = 26},
-};
-
-struct cppi_glue_infos {
-       const struct chan_queues *queues_rx;
-       const struct chan_queues *queues_tx;
-       struct chan_queues td_queue;
-       u16 first_completion_queue;
-       u16 qmgr_num_pend;
-};
-
-static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c)
-{
-       return container_of(c, struct cppi41_channel, chan);
-}
-
-static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
-{
-       struct cppi41_channel *c;
-       u32 descs_size;
-       u32 desc_num;
-
-       descs_size = sizeof(struct cppi41_desc) * ALLOC_DECS_NUM;
-
-       if (!((desc >= cdd->descs_phys) &&
-                       (desc < (cdd->descs_phys + descs_size)))) {
-               return NULL;
-       }
-
-       desc_num = (desc - cdd->descs_phys) / sizeof(struct cppi41_desc);
-       BUG_ON(desc_num >= ALLOC_DECS_NUM);
-       c = cdd->chan_busy[desc_num];
-       cdd->chan_busy[desc_num] = NULL;
-
-       /* Usecount for chan_busy[], paired with push_desc_queue() */
-       pm_runtime_put(cdd->ddev.dev);
-
-       return c;
-}
-
-static void cppi_writel(u32 val, void *__iomem *mem)
-{
-       __raw_writel(val, mem);
-}
-
-static u32 cppi_readl(void *__iomem *mem)
-{
-       return __raw_readl(mem);
-}
-
-static u32 pd_trans_len(u32 val)
-{
-       return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1);
-}
-
-static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
-{
-       u32 desc;
-
-       desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
-       desc &= ~0x1f;
-       return desc;
-}
-
-static irqreturn_t cppi41_irq(int irq, void *data)
-{
-       struct cppi41_dd *cdd = data;
-       u16 first_completion_queue = cdd->first_completion_queue;
-       u16 qmgr_num_pend = cdd->qmgr_num_pend;
-       struct cppi41_channel *c;
-       int i;
-
-       for (i = QMGR_PENDING_SLOT_Q(first_completion_queue); i < qmgr_num_pend;
-                       i++) {
-               u32 val;
-               u32 q_num;
-
-               val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i));
-               if (i == QMGR_PENDING_SLOT_Q(first_completion_queue) && val) {
-                       u32 mask;
-                       /* set corresponding bit for completetion Q 93 */
-                       mask = 1 << QMGR_PENDING_BIT_Q(first_completion_queue);
-                       /* not set all bits for queues less than Q 93 */
-                       mask--;
-                       /* now invert and keep only Q 93+ set */
-                       val &= ~mask;
-               }
-
-               if (val)
-                       __iormb();
-
-               while (val) {
-                       u32 desc, len;
-
-                       /*
-                        * This should never trigger, see the comments in
-                        * push_desc_queue()
-                        */
-                       WARN_ON(cdd->is_suspended);
-
-                       q_num = __fls(val);
-                       val &= ~(1 << q_num);
-                       q_num += 32 * i;
-                       desc = cppi41_pop_desc(cdd, q_num);
-                       c = desc_to_chan(cdd, desc);
-                       if (WARN_ON(!c)) {
-                               pr_err("%s() q %d desc %08x\n", __func__,
-                                               q_num, desc);
-                               continue;
-                       }
-
-                       if (c->desc->pd2 & PD2_ZERO_LENGTH)
-                               len = 0;
-                       else
-                               len = pd_trans_len(c->desc->pd0);
-
-                       c->residue = pd_trans_len(c->desc->pd6) - len;
-                       dma_cookie_complete(&c->txd);
-                       dmaengine_desc_get_callback_invoke(&c->txd, NULL);
-               }
-       }
-       return IRQ_HANDLED;
-}
-
-static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx)
-{
-       dma_cookie_t cookie;
-
-       cookie = dma_cookie_assign(tx);
-
-       return cookie;
-}
-
-static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
-{
-       struct cppi41_channel *c = to_cpp41_chan(chan);
-       struct cppi41_dd *cdd = c->cdd;
-       int error;
-
-       error = pm_runtime_get_sync(cdd->ddev.dev);
-       if (error < 0) {
-               dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
-                       __func__, error);
-               pm_runtime_put_noidle(cdd->ddev.dev);
-
-               return error;
-       }
-
-       dma_cookie_init(chan);
-       dma_async_tx_descriptor_init(&c->txd, chan);
-       c->txd.tx_submit = cppi41_tx_submit;
-
-       if (!c->is_tx)
-               cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
-
-       pm_runtime_mark_last_busy(cdd->ddev.dev);
-       pm_runtime_put_autosuspend(cdd->ddev.dev);
-
-       return 0;
-}
-
-static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
-{
-       struct cppi41_channel *c = to_cpp41_chan(chan);
-       struct cppi41_dd *cdd = c->cdd;
-       int error;
-
-       error = pm_runtime_get_sync(cdd->ddev.dev);
-       if (error < 0) {
-               pm_runtime_put_noidle(cdd->ddev.dev);
-
-               return;
-       }
-
-       WARN_ON(!list_empty(&cdd->pending));
-
-       pm_runtime_mark_last_busy(cdd->ddev.dev);
-       pm_runtime_put_autosuspend(cdd->ddev.dev);
-}
-
-static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
-       dma_cookie_t cookie, struct dma_tx_state *txstate)
-{
-       struct cppi41_channel *c = to_cpp41_chan(chan);
-       enum dma_status ret;
-
-       ret = dma_cookie_status(chan, cookie, txstate);
-
-       dma_set_residue(txstate, c->residue);
-
-       return ret;
-}
-
-static void push_desc_queue(struct cppi41_channel *c)
-{
-       struct cppi41_dd *cdd = c->cdd;
-       u32 desc_num;
-       u32 desc_phys;
-       u32 reg;
-
-       c->residue = 0;
-
-       reg = GCR_CHAN_ENABLE;
-       if (!c->is_tx) {
-               reg |= GCR_STARV_RETRY;
-               reg |= GCR_DESC_TYPE_HOST;
-               reg |= c->q_comp_num;
-       }
-
-       cppi_writel(reg, c->gcr_reg);
-
-       /*
-        * We don't use writel() but __raw_writel() so we have to make sure
-        * that the DMA descriptor in coherent memory made to the main memory
-        * before starting the dma engine.
-        */
-       __iowmb();
-
-       /*
-        * DMA transfers can take at least 200ms to complete with USB mass
-        * storage connected. To prevent autosuspend timeouts, we must use
-        * pm_runtime_get/put() when chan_busy[] is modified. This will get
-        * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
-        * outcome of the transfer.
-        */
-       pm_runtime_get(cdd->ddev.dev);
-
-       desc_phys = lower_32_bits(c->desc_phys);
-       desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
-       WARN_ON(cdd->chan_busy[desc_num]);
-       cdd->chan_busy[desc_num] = c;
-
-       reg = (sizeof(struct cppi41_desc) - 24) / 4;
-       reg |= desc_phys;
-       cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
-}
-
-/*
- * Caller must hold cdd->lock to prevent push_desc_queue()
- * getting called out of order. We have both cppi41_dma_issue_pending()
- * and cppi41_runtime_resume() call this function.
- */
-static void cppi41_run_queue(struct cppi41_dd *cdd)
-{
-       struct cppi41_channel *c, *_c;
-
-       list_for_each_entry_safe(c, _c, &cdd->pending, node) {
-               push_desc_queue(c);
-               list_del(&c->node);
-       }
-}
-
-static void cppi41_dma_issue_pending(struct dma_chan *chan)
-{
-       struct cppi41_channel *c = to_cpp41_chan(chan);
-       struct cppi41_dd *cdd = c->cdd;
-       unsigned long flags;
-       int error;
-
-       error = pm_runtime_get(cdd->ddev.dev);
-       if ((error != -EINPROGRESS) && error < 0) {
-               pm_runtime_put_noidle(cdd->ddev.dev);
-               dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
-                       error);
-
-               return;
-       }
-
-       spin_lock_irqsave(&cdd->lock, flags);
-       list_add_tail(&c->node, &cdd->pending);
-       if (!cdd->is_suspended)
-               cppi41_run_queue(cdd);
-       spin_unlock_irqrestore(&cdd->lock, flags);
-
-       pm_runtime_mark_last_busy(cdd->ddev.dev);
-       pm_runtime_put_autosuspend(cdd->ddev.dev);
-}
-
-static u32 get_host_pd0(u32 length)
-{
-       u32 reg;
-
-       reg = DESC_TYPE_HOST << DESC_TYPE;
-       reg |= length;
-
-       return reg;
-}
-
-static u32 get_host_pd1(struct cppi41_channel *c)
-{
-       u32 reg;
-
-       reg = 0;
-
-       return reg;
-}
-
-static u32 get_host_pd2(struct cppi41_channel *c)
-{
-       u32 reg;
-
-       reg = DESC_TYPE_USB;
-       reg |= c->q_comp_num;
-
-       return reg;
-}
-
-static u32 get_host_pd3(u32 length)
-{
-       u32 reg;
-
-       /* PD3 = packet size */
-       reg = length;
-
-       return reg;
-}
-
-static u32 get_host_pd6(u32 length)
-{
-       u32 reg;
-
-       /* PD6 buffer size */
-       reg = DESC_PD_COMPLETE;
-       reg |= length;
-
-       return reg;
-}
-
-static u32 get_host_pd4_or_7(u32 addr)
-{
-       u32 reg;
-
-       reg = addr;
-
-       return reg;
-}
-
-static u32 get_host_pd5(void)
-{
-       u32 reg;
-
-       reg = 0;
-
-       return reg;
-}
-
-static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
-       struct dma_chan *chan, struct scatterlist *sgl, unsigned sg_len,
-       enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
-{
-       struct cppi41_channel *c = to_cpp41_chan(chan);
-       struct cppi41_desc *d;
-       struct scatterlist *sg;
-       unsigned int i;
-
-       d = c->desc;
-       for_each_sg(sgl, sg, sg_len, i) {
-               u32 addr;
-               u32 len;
-
-               /* We need to use more than one desc once musb supports sg */
-               addr = lower_32_bits(sg_dma_address(sg));
-               len = sg_dma_len(sg);
-
-               d->pd0 = get_host_pd0(len);
-               d->pd1 = get_host_pd1(c);
-               d->pd2 = get_host_pd2(c);
-               d->pd3 = get_host_pd3(len);
-               d->pd4 = get_host_pd4_or_7(addr);
-               d->pd5 = get_host_pd5();
-               d->pd6 = get_host_pd6(len);
-               d->pd7 = get_host_pd4_or_7(addr);
-
-               d++;
-       }
-
-       return &c->txd;
-}
-
-static void cppi41_compute_td_desc(struct cppi41_desc *d)
-{
-       d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
-}
-
-static int cppi41_tear_down_chan(struct cppi41_channel *c)
-{
-       struct dmaengine_result abort_result;
-       struct cppi41_dd *cdd = c->cdd;
-       struct cppi41_desc *td;
-       u32 reg;
-       u32 desc_phys;
-       u32 td_desc_phys;
-
-       td = cdd->cd;
-       td += cdd->first_td_desc;
-
-       td_desc_phys = cdd->descs_phys;
-       td_desc_phys += cdd->first_td_desc * sizeof(struct cppi41_desc);
-
-       if (!c->td_queued) {
-               cppi41_compute_td_desc(td);
-               __iowmb();
-
-               reg = (sizeof(struct cppi41_desc) - 24) / 4;
-               reg |= td_desc_phys;
-               cppi_writel(reg, cdd->qmgr_mem +
-                               QMGR_QUEUE_D(cdd->td_queue.submit));
-
-               reg = GCR_CHAN_ENABLE;
-               if (!c->is_tx) {
-                       reg |= GCR_STARV_RETRY;
-                       reg |= GCR_DESC_TYPE_HOST;
-                       reg |= cdd->td_queue.complete;
-               }
-               reg |= GCR_TEARDOWN;
-               cppi_writel(reg, c->gcr_reg);
-               c->td_queued = 1;
-               c->td_retry = 500;
-       }
-
-       if (!c->td_seen || !c->td_desc_seen) {
-
-               desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete);
-               if (!desc_phys && c->is_tx)
-                       desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
-
-               if (desc_phys == c->desc_phys) {
-                       c->td_desc_seen = 1;
-
-               } else if (desc_phys == td_desc_phys) {
-                       u32 pd0;
-
-                       __iormb();
-                       pd0 = td->pd0;
-                       WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
-                       WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
-                       WARN_ON((pd0 & 0x1f) != c->port_num);
-                       c->td_seen = 1;
-               } else if (desc_phys) {
-                       WARN_ON_ONCE(1);
-               }
-       }
-       c->td_retry--;
-       /*
-        * If the TX descriptor / channel is in use, the caller needs to poke
-        * his TD bit multiple times. After that he hardware releases the
-        * transfer descriptor followed by TD descriptor. Waiting seems not to
-        * cause any difference.
-        * RX seems to be thrown out right away. However once the TearDown
-        * descriptor gets through we are done. If we have seens the transfer
-        * descriptor before the TD we fetch it from enqueue, it has to be
-        * there waiting for us.
-        */
-       if (!c->td_seen && c->td_retry) {
-               udelay(1);
-               return -EAGAIN;
-       }
-       WARN_ON(!c->td_retry);
-
-       if (!c->td_desc_seen) {
-               desc_phys = cppi41_pop_desc(cdd, c->q_num);
-               if (!desc_phys)
-                       desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
-               WARN_ON(!desc_phys);
-       }
-
-       c->td_queued = 0;
-       c->td_seen = 0;
-       c->td_desc_seen = 0;
-       cppi_writel(0, c->gcr_reg);
-
-       /* Invoke the callback to do the necessary clean-up */
-       abort_result.result = DMA_TRANS_ABORTED;
-       dma_cookie_complete(&c->txd);
-       dmaengine_desc_get_callback_invoke(&c->txd, &abort_result);
-
-       return 0;
-}
-
-static int cppi41_stop_chan(struct dma_chan *chan)
-{
-       struct cppi41_channel *c = to_cpp41_chan(chan);
-       struct cppi41_dd *cdd = c->cdd;
-       u32 desc_num;
-       u32 desc_phys;
-       int ret;
-
-       desc_phys = lower_32_bits(c->desc_phys);
-       desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
-       if (!cdd->chan_busy[desc_num])
-               return 0;
-
-       ret = cppi41_tear_down_chan(c);
-       if (ret)
-               return ret;
-
-       WARN_ON(!cdd->chan_busy[desc_num]);
-       cdd->chan_busy[desc_num] = NULL;
-
-       /* Usecount for chan_busy[], paired with push_desc_queue() */
-       pm_runtime_put(cdd->ddev.dev);
-
-       return 0;
-}
-
-static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
-{
-       struct cppi41_channel *cchan, *chans;
-       int i;
-       u32 n_chans = cdd->n_chans;
-
-       /*
-        * The channels can only be used as TX or as RX. So we add twice
-        * that much dma channels because USB can only do RX or TX.
-        */
-       n_chans *= 2;
-
-       chans = devm_kcalloc(dev, n_chans, sizeof(*chans), GFP_KERNEL);
-       if (!chans)
-               return -ENOMEM;
-
-       for (i = 0; i < n_chans; i++) {
-               cchan = &chans[i];
-
-               cchan->cdd = cdd;
-               if (i & 1) {
-                       cchan->gcr_reg = cdd->ctrl_mem + DMA_TXGCR(i >> 1);
-                       cchan->is_tx = 1;
-               } else {
-                       cchan->gcr_reg = cdd->ctrl_mem + DMA_RXGCR(i >> 1);
-                       cchan->is_tx = 0;
-               }
-               cchan->port_num = i >> 1;
-               cchan->desc = &cdd->cd[i];
-               cchan->desc_phys = cdd->descs_phys;
-               cchan->desc_phys += i * sizeof(struct cppi41_desc);
-               cchan->chan.device = &cdd->ddev;
-               list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels);
-       }
-       cdd->first_td_desc = n_chans;
-
-       return 0;
-}
-
-static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
-{
-       unsigned int mem_decs;
-       int i;
-
-       mem_decs = ALLOC_DECS_NUM * sizeof(struct cppi41_desc);
-
-       for (i = 0; i < DESCS_AREAS; i++) {
-
-               cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
-               cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
-
-               dma_free_coherent(dev, mem_decs, cdd->cd,
-                               cdd->descs_phys);
-       }
-}
-
-static void disable_sched(struct cppi41_dd *cdd)
-{
-       cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
-}
-
-static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
-{
-       disable_sched(cdd);
-
-       purge_descs(dev, cdd);
-
-       cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
-       cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
-       dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
-                       cdd->scratch_phys);
-}
-
-static int init_descs(struct device *dev, struct cppi41_dd *cdd)
-{
-       unsigned int desc_size;
-       unsigned int mem_decs;
-       int i;
-       u32 reg;
-       u32 idx;
-
-       BUILD_BUG_ON(sizeof(struct cppi41_desc) &
-                       (sizeof(struct cppi41_desc) - 1));
-       BUILD_BUG_ON(sizeof(struct cppi41_desc) < 32);
-       BUILD_BUG_ON(ALLOC_DECS_NUM < 32);
-
-       desc_size = sizeof(struct cppi41_desc);
-       mem_decs = ALLOC_DECS_NUM * desc_size;
-
-       idx = 0;
-       for (i = 0; i < DESCS_AREAS; i++) {
-
-               reg = idx << QMGR_MEMCTRL_IDX_SH;
-               reg |= (ilog2(desc_size) - 5) << QMGR_MEMCTRL_DESC_SH;
-               reg |= ilog2(ALLOC_DECS_NUM) - 5;
-
-               BUILD_BUG_ON(DESCS_AREAS != 1);
-               cdd->cd = dma_alloc_coherent(dev, mem_decs,
-                               &cdd->descs_phys, GFP_KERNEL);
-               if (!cdd->cd)
-                       return -ENOMEM;
-
-               cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
-               cppi_writel(reg, cdd->qmgr_mem + QMGR_MEMCTRL(i));
-
-               idx += ALLOC_DECS_NUM;
-       }
-       return 0;
-}
-
-static void init_sched(struct cppi41_dd *cdd)
-{
-       unsigned ch;
-       unsigned word;
-       u32 reg;
-
-       word = 0;
-       cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
-       for (ch = 0; ch < cdd->n_chans; ch += 2) {
-
-               reg = SCHED_ENTRY0_CHAN(ch);
-               reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX;
-
-               reg |= SCHED_ENTRY2_CHAN(ch + 1);
-               reg |= SCHED_ENTRY3_CHAN(ch + 1) | SCHED_ENTRY3_IS_RX;
-               cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word));
-               word++;
-       }
-       reg = cdd->n_chans * 2 - 1;
-       reg |= DMA_SCHED_CTRL_EN;
-       cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
-}
-
-static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
-{
-       int ret;
-
-       BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
-       cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
-                       &cdd->scratch_phys, GFP_KERNEL);
-       if (!cdd->qmgr_scratch)
-               return -ENOMEM;
-
-       cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
-       cppi_writel(TOTAL_DESCS_NUM, cdd->qmgr_mem + QMGR_LRAM_SIZE);
-       cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
-
-       ret = init_descs(dev, cdd);
-       if (ret)
-               goto err_td;
-
-       cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ);
-       init_sched(cdd);
-
-       return 0;
-err_td:
-       deinit_cppi41(dev, cdd);
-       return ret;
-}
-
-static struct platform_driver cpp41_dma_driver;
-/*
- * The param format is:
- * X Y
- * X: Port
- * Y: 0 = RX else TX
- */
-#define INFO_PORT      0
-#define INFO_IS_TX     1
-
-static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
-{
-       struct cppi41_channel *cchan;
-       struct cppi41_dd *cdd;
-       const struct chan_queues *queues;
-       u32 *num = param;
-
-       if (chan->device->dev->driver != &cpp41_dma_driver.driver)
-               return false;
-
-       cchan = to_cpp41_chan(chan);
-
-       if (cchan->port_num != num[INFO_PORT])
-               return false;
-
-       if (cchan->is_tx && !num[INFO_IS_TX])
-               return false;
-       cdd = cchan->cdd;
-       if (cchan->is_tx)
-               queues = cdd->queues_tx;
-       else
-               queues = cdd->queues_rx;
-
-       BUILD_BUG_ON(ARRAY_SIZE(am335x_usb_queues_rx) !=
-                    ARRAY_SIZE(am335x_usb_queues_tx));
-       if (WARN_ON(cchan->port_num >= ARRAY_SIZE(am335x_usb_queues_rx)))
-               return false;
-
-       cchan->q_num = queues[cchan->port_num].submit;
-       cchan->q_comp_num = queues[cchan->port_num].complete;
-       return true;
-}
-
-static struct of_dma_filter_info cpp41_dma_info = {
-       .filter_fn = cpp41_dma_filter_fn,
-};
-
-static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec,
-               struct of_dma *ofdma)
-{
-       int count = dma_spec->args_count;
-       struct of_dma_filter_info *info = ofdma->of_dma_data;
-
-       if (!info || !info->filter_fn)
-               return NULL;
-
-       if (count != 2)
-               return NULL;
-
-       return dma_request_channel(info->dma_cap, info->filter_fn,
-                       &dma_spec->args[0]);
-}
-
-static const struct cppi_glue_infos am335x_usb_infos = {
-       .queues_rx = am335x_usb_queues_rx,
-       .queues_tx = am335x_usb_queues_tx,
-       .td_queue = { .submit = 31, .complete = 0 },
-       .first_completion_queue = 93,
-       .qmgr_num_pend = 5,
-};
-
-static const struct cppi_glue_infos da8xx_usb_infos = {
-       .queues_rx = da8xx_usb_queues_rx,
-       .queues_tx = da8xx_usb_queues_tx,
-       .td_queue = { .submit = 31, .complete = 0 },
-       .first_completion_queue = 24,
-       .qmgr_num_pend = 2,
-};
-
-static const struct of_device_id cppi41_dma_ids[] = {
-       { .compatible = "ti,am3359-cppi41", .data = &am335x_usb_infos},
-       { .compatible = "ti,da830-cppi41", .data = &da8xx_usb_infos},
-       {},
-};
-MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
-
-static const struct cppi_glue_infos *get_glue_info(struct device *dev)
-{
-       const struct of_device_id *of_id;
-
-       of_id = of_match_node(cppi41_dma_ids, dev->of_node);
-       if (!of_id)
-               return NULL;
-       return of_id->data;
-}
-
-#define CPPI41_DMA_BUSWIDTHS   (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
-                               BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
-                               BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
-                               BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
-
-static int cppi41_dma_probe(struct platform_device *pdev)
-{
-       struct cppi41_dd *cdd;
-       struct device *dev = &pdev->dev;
-       const struct cppi_glue_infos *glue_info;
-       struct resource *mem;
-       int index;
-       int irq;
-       int ret;
-
-       glue_info = get_glue_info(dev);
-       if (!glue_info)
-               return -EINVAL;
-
-       cdd = devm_kzalloc(&pdev->dev, sizeof(*cdd), GFP_KERNEL);
-       if (!cdd)
-               return -ENOMEM;
-
-       dma_cap_set(DMA_SLAVE, cdd->ddev.cap_mask);
-       cdd->ddev.device_alloc_chan_resources = cppi41_dma_alloc_chan_resources;
-       cdd->ddev.device_free_chan_resources = cppi41_dma_free_chan_resources;
-       cdd->ddev.device_tx_status = cppi41_dma_tx_status;
-       cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
-       cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
-       cdd->ddev.device_terminate_all = cppi41_stop_chan;
-       cdd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-       cdd->ddev.src_addr_widths = CPPI41_DMA_BUSWIDTHS;
-       cdd->ddev.dst_addr_widths = CPPI41_DMA_BUSWIDTHS;
-       cdd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
-       cdd->ddev.dev = dev;
-       INIT_LIST_HEAD(&cdd->ddev.channels);
-       cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
-
-       index = of_property_match_string(dev->of_node,
-                                        "reg-names", "controller");
-       if (index < 0)
-               return index;
-
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, index);
-       cdd->ctrl_mem = devm_ioremap_resource(dev, mem);
-       if (IS_ERR(cdd->ctrl_mem))
-               return PTR_ERR(cdd->ctrl_mem);
-
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 1);
-       cdd->sched_mem = devm_ioremap_resource(dev, mem);
-       if (IS_ERR(cdd->sched_mem))
-               return PTR_ERR(cdd->sched_mem);
-
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 2);
-       cdd->qmgr_mem = devm_ioremap_resource(dev, mem);
-       if (IS_ERR(cdd->qmgr_mem))
-               return PTR_ERR(cdd->qmgr_mem);
-
-       spin_lock_init(&cdd->lock);
-       INIT_LIST_HEAD(&cdd->pending);
-
-       platform_set_drvdata(pdev, cdd);
-
-       pm_runtime_enable(dev);
-       pm_runtime_set_autosuspend_delay(dev, 100);
-       pm_runtime_use_autosuspend(dev);
-       ret = pm_runtime_get_sync(dev);
-       if (ret < 0)
-               goto err_get_sync;
-
-       cdd->queues_rx = glue_info->queues_rx;
-       cdd->queues_tx = glue_info->queues_tx;
-       cdd->td_queue = glue_info->td_queue;
-       cdd->qmgr_num_pend = glue_info->qmgr_num_pend;
-       cdd->first_completion_queue = glue_info->first_completion_queue;
-
-       ret = of_property_read_u32(dev->of_node,
-                                  "#dma-channels", &cdd->n_chans);
-       if (ret)
-               goto err_get_n_chans;
-
-       ret = init_cppi41(dev, cdd);
-       if (ret)
-               goto err_init_cppi;
-
-       ret = cppi41_add_chans(dev, cdd);
-       if (ret)
-               goto err_chans;
-
-       irq = irq_of_parse_and_map(dev->of_node, 0);
-       if (!irq) {
-               ret = -EINVAL;
-               goto err_chans;
-       }
-
-       ret = devm_request_irq(&pdev->dev, irq, cppi41_irq, IRQF_SHARED,
-                       dev_name(dev), cdd);
-       if (ret)
-               goto err_chans;
-       cdd->irq = irq;
-
-       ret = dma_async_device_register(&cdd->ddev);
-       if (ret)
-               goto err_chans;
-
-       ret = of_dma_controller_register(dev->of_node,
-                       cppi41_dma_xlate, &cpp41_dma_info);
-       if (ret)
-               goto err_of;
-
-       pm_runtime_mark_last_busy(dev);
-       pm_runtime_put_autosuspend(dev);
-
-       return 0;
-err_of:
-       dma_async_device_unregister(&cdd->ddev);
-err_chans:
-       deinit_cppi41(dev, cdd);
-err_init_cppi:
-       pm_runtime_dont_use_autosuspend(dev);
-err_get_n_chans:
-err_get_sync:
-       pm_runtime_put_sync(dev);
-       pm_runtime_disable(dev);
-       return ret;
-}
-
-static int cppi41_dma_remove(struct platform_device *pdev)
-{
-       struct cppi41_dd *cdd = platform_get_drvdata(pdev);
-       int error;
-
-       error = pm_runtime_get_sync(&pdev->dev);
-       if (error < 0)
-               dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n",
-                       __func__, error);
-       of_dma_controller_free(pdev->dev.of_node);
-       dma_async_device_unregister(&cdd->ddev);
-
-       devm_free_irq(&pdev->dev, cdd->irq, cdd);
-       deinit_cppi41(&pdev->dev, cdd);
-       pm_runtime_dont_use_autosuspend(&pdev->dev);
-       pm_runtime_put_sync(&pdev->dev);
-       pm_runtime_disable(&pdev->dev);
-       return 0;
-}
-
-static int __maybe_unused cppi41_suspend(struct device *dev)
-{
-       struct cppi41_dd *cdd = dev_get_drvdata(dev);
-
-       cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ);
-       disable_sched(cdd);
-
-       return 0;
-}
-
-static int __maybe_unused cppi41_resume(struct device *dev)
-{
-       struct cppi41_dd *cdd = dev_get_drvdata(dev);
-       struct cppi41_channel *c;
-       int i;
-
-       for (i = 0; i < DESCS_AREAS; i++)
-               cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
-
-       list_for_each_entry(c, &cdd->ddev.channels, chan.device_node)
-               if (!c->is_tx)
-                       cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
-
-       init_sched(cdd);
-
-       cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ);
-       cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
-       cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
-       cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
-
-       return 0;
-}
-
-static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
-{
-       struct cppi41_dd *cdd = dev_get_drvdata(dev);
-       unsigned long flags;
-
-       spin_lock_irqsave(&cdd->lock, flags);
-       cdd->is_suspended = true;
-       WARN_ON(!list_empty(&cdd->pending));
-       spin_unlock_irqrestore(&cdd->lock, flags);
-
-       return 0;
-}
-
-static int __maybe_unused cppi41_runtime_resume(struct device *dev)
-{
-       struct cppi41_dd *cdd = dev_get_drvdata(dev);
-       unsigned long flags;
-
-       spin_lock_irqsave(&cdd->lock, flags);
-       cdd->is_suspended = false;
-       cppi41_run_queue(cdd);
-       spin_unlock_irqrestore(&cdd->lock, flags);
-
-       return 0;
-}
-
-static const struct dev_pm_ops cppi41_pm_ops = {
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(cppi41_suspend, cppi41_resume)
-       SET_RUNTIME_PM_OPS(cppi41_runtime_suspend,
-                          cppi41_runtime_resume,
-                          NULL)
-};
-
-static struct platform_driver cpp41_dma_driver = {
-       .probe  = cppi41_dma_probe,
-       .remove = cppi41_dma_remove,
-       .driver = {
-               .name = "cppi41-dma-engine",
-               .pm = &cppi41_pm_ops,
-               .of_match_table = of_match_ptr(cppi41_dma_ids),
-       },
-};
-
-module_platform_driver(cpp41_dma_driver);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
index 2419fe524daac9e2d2edf67e5e16c6499cf16613..15b2453d2647fa49e790b6f063785a7d64d19497 100644 (file)
@@ -687,7 +687,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
        if (ret)
                goto err_unregister_device;
 
-       ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, 0,
+       ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED,
                dev_name(&pdev->dev), dmac);
        if (ret)
                goto err_unregister_of;
index b9339524d5bd38859e00e701c062acda5dba69e7..aa1712beb0cc355d454948dddb42d615c10b9762 100644 (file)
@@ -468,6 +468,8 @@ static int dmatest_func(void *data)
        unsigned long long      total_len = 0;
        u8                      align = 0;
        bool                    is_memset = false;
+       dma_addr_t              *srcs;
+       dma_addr_t              *dma_pq;
 
        set_freezable();
 
@@ -551,6 +553,14 @@ static int dmatest_func(void *data)
 
        set_user_nice(current, 10);
 
+       srcs = kcalloc(src_cnt, sizeof(dma_addr_t), GFP_KERNEL);
+       if (!srcs)
+               goto err_dstbuf;
+
+       dma_pq = kcalloc(dst_cnt, sizeof(dma_addr_t), GFP_KERNEL);
+       if (!dma_pq)
+               goto err_srcs_array;
+
        /*
         * src and dst buffers are freed by ourselves below
         */
@@ -561,7 +571,6 @@ static int dmatest_func(void *data)
               && !(params->iterations && total_tests >= params->iterations)) {
                struct dma_async_tx_descriptor *tx = NULL;
                struct dmaengine_unmap_data *um;
-               dma_addr_t srcs[src_cnt];
                dma_addr_t *dsts;
                unsigned int src_off, dst_off, len;
 
@@ -676,8 +685,6 @@ static int dmatest_func(void *data)
                                                      srcs, src_cnt,
                                                      len, flags);
                else if (thread->type == DMA_PQ) {
-                       dma_addr_t dma_pq[dst_cnt];
-
                        for (i = 0; i < dst_cnt; i++)
                                dma_pq[i] = dsts[i] + dst_off;
                        tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
@@ -779,6 +786,9 @@ static int dmatest_func(void *data)
        runtime = ktime_to_us(ktime);
 
        ret = 0;
+       kfree(dma_pq);
+err_srcs_array:
+       kfree(srcs);
 err_dstbuf:
        for (i = 0; thread->udsts[i]; i++)
                kfree(thread->udsts[i]);
index bc31fe8020619d6e2bbff5ec1e49786bb3193d7b..f62dd0944908d2015032859c20a2580a67fcf189 100644 (file)
@@ -293,8 +293,7 @@ MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
 
 static int dw_suspend_late(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+       struct dw_dma_chip *chip = dev_get_drvdata(dev);
 
        dw_dma_disable(chip);
        clk_disable_unprepare(chip->clk);
@@ -304,8 +303,7 @@ static int dw_suspend_late(struct device *dev)
 
 static int dw_resume_early(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+       struct dw_dma_chip *chip = dev_get_drvdata(dev);
        int ret;
 
        ret = clk_prepare_enable(chip->clk);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
deleted file mode 100644 (file)
index 9bc722c..0000000
+++ /dev/null
@@ -1,2565 +0,0 @@
-/*
- * TI EDMA DMA engine driver
- *
- * Copyright 2012 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/edma.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/of.h>
-#include <linux/of_dma.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/pm_runtime.h>
-
-#include <linux/platform_data/edma.h>
-
-#include "dmaengine.h"
-#include "virt-dma.h"
-
-/* Offsets matching "struct edmacc_param" */
-#define PARM_OPT               0x00
-#define PARM_SRC               0x04
-#define PARM_A_B_CNT           0x08
-#define PARM_DST               0x0c
-#define PARM_SRC_DST_BIDX      0x10
-#define PARM_LINK_BCNTRLD      0x14
-#define PARM_SRC_DST_CIDX      0x18
-#define PARM_CCNT              0x1c
-
-#define PARM_SIZE              0x20
-
-/* Offsets for EDMA CC global channel registers and their shadows */
-#define SH_ER                  0x00    /* 64 bits */
-#define SH_ECR                 0x08    /* 64 bits */
-#define SH_ESR                 0x10    /* 64 bits */
-#define SH_CER                 0x18    /* 64 bits */
-#define SH_EER                 0x20    /* 64 bits */
-#define SH_EECR                        0x28    /* 64 bits */
-#define SH_EESR                        0x30    /* 64 bits */
-#define SH_SER                 0x38    /* 64 bits */
-#define SH_SECR                        0x40    /* 64 bits */
-#define SH_IER                 0x50    /* 64 bits */
-#define SH_IECR                        0x58    /* 64 bits */
-#define SH_IESR                        0x60    /* 64 bits */
-#define SH_IPR                 0x68    /* 64 bits */
-#define SH_ICR                 0x70    /* 64 bits */
-#define SH_IEVAL               0x78
-#define SH_QER                 0x80
-#define SH_QEER                        0x84
-#define SH_QEECR               0x88
-#define SH_QEESR               0x8c
-#define SH_QSER                        0x90
-#define SH_QSECR               0x94
-#define SH_SIZE                        0x200
-
-/* Offsets for EDMA CC global registers */
-#define EDMA_REV               0x0000
-#define EDMA_CCCFG             0x0004
-#define EDMA_QCHMAP            0x0200  /* 8 registers */
-#define EDMA_DMAQNUM           0x0240  /* 8 registers (4 on OMAP-L1xx) */
-#define EDMA_QDMAQNUM          0x0260
-#define EDMA_QUETCMAP          0x0280
-#define EDMA_QUEPRI            0x0284
-#define EDMA_EMR               0x0300  /* 64 bits */
-#define EDMA_EMCR              0x0308  /* 64 bits */
-#define EDMA_QEMR              0x0310
-#define EDMA_QEMCR             0x0314
-#define EDMA_CCERR             0x0318
-#define EDMA_CCERRCLR          0x031c
-#define EDMA_EEVAL             0x0320
-#define EDMA_DRAE              0x0340  /* 4 x 64 bits*/
-#define EDMA_QRAE              0x0380  /* 4 registers */
-#define EDMA_QUEEVTENTRY       0x0400  /* 2 x 16 registers */
-#define EDMA_QSTAT             0x0600  /* 2 registers */
-#define EDMA_QWMTHRA           0x0620
-#define EDMA_QWMTHRB           0x0624
-#define EDMA_CCSTAT            0x0640
-
-#define EDMA_M                 0x1000  /* global channel registers */
-#define EDMA_ECR               0x1008
-#define EDMA_ECRH              0x100C
-#define EDMA_SHADOW0           0x2000  /* 4 shadow regions */
-#define EDMA_PARM              0x4000  /* PaRAM entries */
-
-#define PARM_OFFSET(param_no)  (EDMA_PARM + ((param_no) << 5))
-
-#define EDMA_DCHMAP            0x0100  /* 64 registers */
-
-/* CCCFG register */
-#define GET_NUM_DMACH(x)       (x & 0x7) /* bits 0-2 */
-#define GET_NUM_QDMACH(x)      ((x & 0x70) >> 4) /* bits 4-6 */
-#define GET_NUM_PAENTRY(x)     ((x & 0x7000) >> 12) /* bits 12-14 */
-#define GET_NUM_EVQUE(x)       ((x & 0x70000) >> 16) /* bits 16-18 */
-#define GET_NUM_REGN(x)                ((x & 0x300000) >> 20) /* bits 20-21 */
-#define CHMAP_EXIST            BIT(24)
-
-/* CCSTAT register */
-#define EDMA_CCSTAT_ACTV       BIT(4)
-
-/*
- * Max of 20 segments per channel to conserve PaRAM slots
- * Also note that MAX_NR_SG should be atleast the no.of periods
- * that are required for ASoC, otherwise DMA prep calls will
- * fail. Today davinci-pcm is the only user of this driver and
- * requires atleast 17 slots, so we setup the default to 20.
- */
-#define MAX_NR_SG              20
-#define EDMA_MAX_SLOTS         MAX_NR_SG
-#define EDMA_DESCRIPTORS       16
-
-#define EDMA_CHANNEL_ANY               -1      /* for edma_alloc_channel() */
-#define EDMA_SLOT_ANY                  -1      /* for edma_alloc_slot() */
-#define EDMA_CONT_PARAMS_ANY            1001
-#define EDMA_CONT_PARAMS_FIXED_EXACT    1002
-#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
-
-/* PaRAM slots are laid out like this */
-struct edmacc_param {
-       u32 opt;
-       u32 src;
-       u32 a_b_cnt;
-       u32 dst;
-       u32 src_dst_bidx;
-       u32 link_bcntrld;
-       u32 src_dst_cidx;
-       u32 ccnt;
-} __packed;
-
-/* fields in edmacc_param.opt */
-#define SAM            BIT(0)
-#define DAM            BIT(1)
-#define SYNCDIM                BIT(2)
-#define STATIC         BIT(3)
-#define EDMA_FWID      (0x07 << 8)
-#define TCCMODE                BIT(11)
-#define EDMA_TCC(t)    ((t) << 12)
-#define TCINTEN                BIT(20)
-#define ITCINTEN       BIT(21)
-#define TCCHEN         BIT(22)
-#define ITCCHEN                BIT(23)
-
-struct edma_pset {
-       u32                             len;
-       dma_addr_t                      addr;
-       struct edmacc_param             param;
-};
-
-struct edma_desc {
-       struct virt_dma_desc            vdesc;
-       struct list_head                node;
-       enum dma_transfer_direction     direction;
-       int                             cyclic;
-       int                             absync;
-       int                             pset_nr;
-       struct edma_chan                *echan;
-       int                             processed;
-
-       /*
-        * The following 4 elements are used for residue accounting.
-        *
-        * - processed_stat: the number of SG elements we have traversed
-        * so far to cover accounting. This is updated directly to processed
-        * during edma_callback and is always <= processed, because processed
-        * refers to the number of pending transfer (programmed to EDMA
-        * controller), where as processed_stat tracks number of transfers
-        * accounted for so far.
-        *
-        * - residue: The amount of bytes we have left to transfer for this desc
-        *
-        * - residue_stat: The residue in bytes of data we have covered
-        * so far for accounting. This is updated directly to residue
-        * during callbacks to keep it current.
-        *
-        * - sg_len: Tracks the length of the current intermediate transfer,
-        * this is required to update the residue during intermediate transfer
-        * completion callback.
-        */
-       int                             processed_stat;
-       u32                             sg_len;
-       u32                             residue;
-       u32                             residue_stat;
-
-       struct edma_pset                pset[0];
-};
-
-struct edma_cc;
-
-struct edma_tc {
-       struct device_node              *node;
-       u16                             id;
-};
-
-struct edma_chan {
-       struct virt_dma_chan            vchan;
-       struct list_head                node;
-       struct edma_desc                *edesc;
-       struct edma_cc                  *ecc;
-       struct edma_tc                  *tc;
-       int                             ch_num;
-       bool                            alloced;
-       bool                            hw_triggered;
-       int                             slot[EDMA_MAX_SLOTS];
-       int                             missed;
-       struct dma_slave_config         cfg;
-};
-
-struct edma_cc {
-       struct device                   *dev;
-       struct edma_soc_info            *info;
-       void __iomem                    *base;
-       int                             id;
-       bool                            legacy_mode;
-
-       /* eDMA3 resource information */
-       unsigned                        num_channels;
-       unsigned                        num_qchannels;
-       unsigned                        num_region;
-       unsigned                        num_slots;
-       unsigned                        num_tc;
-       bool                            chmap_exist;
-       enum dma_event_q                default_queue;
-
-       unsigned int                    ccint;
-       unsigned int                    ccerrint;
-
-       /*
-        * The slot_inuse bit for each PaRAM slot is clear unless the slot is
-        * in use by Linux or if it is allocated to be used by DSP.
-        */
-       unsigned long *slot_inuse;
-
-       struct dma_device               dma_slave;
-       struct dma_device               *dma_memcpy;
-       struct edma_chan                *slave_chans;
-       struct edma_tc                  *tc_list;
-       int                             dummy_slot;
-};
-
-/* dummy param set used to (re)initialize parameter RAM slots */
-static const struct edmacc_param dummy_paramset = {
-       .link_bcntrld = 0xffff,
-       .ccnt = 1,
-};
-
-#define EDMA_BINDING_LEGACY    0
-#define EDMA_BINDING_TPCC      1
-static const u32 edma_binding_type[] = {
-       [EDMA_BINDING_LEGACY] = EDMA_BINDING_LEGACY,
-       [EDMA_BINDING_TPCC] = EDMA_BINDING_TPCC,
-};
-
-static const struct of_device_id edma_of_ids[] = {
-       {
-               .compatible = "ti,edma3",
-               .data = &edma_binding_type[EDMA_BINDING_LEGACY],
-       },
-       {
-               .compatible = "ti,edma3-tpcc",
-               .data = &edma_binding_type[EDMA_BINDING_TPCC],
-       },
-       {}
-};
-MODULE_DEVICE_TABLE(of, edma_of_ids);
-
-static const struct of_device_id edma_tptc_of_ids[] = {
-       { .compatible = "ti,edma3-tptc", },
-       {}
-};
-MODULE_DEVICE_TABLE(of, edma_tptc_of_ids);
-
-static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
-{
-       return (unsigned int)__raw_readl(ecc->base + offset);
-}
-
-static inline void edma_write(struct edma_cc *ecc, int offset, int val)
-{
-       __raw_writel(val, ecc->base + offset);
-}
-
-static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
-                              unsigned or)
-{
-       unsigned val = edma_read(ecc, offset);
-
-       val &= and;
-       val |= or;
-       edma_write(ecc, offset, val);
-}
-
-static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and)
-{
-       unsigned val = edma_read(ecc, offset);
-
-       val &= and;
-       edma_write(ecc, offset, val);
-}
-
-static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
-{
-       unsigned val = edma_read(ecc, offset);
-
-       val |= or;
-       edma_write(ecc, offset, val);
-}
-
-static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset,
-                                          int i)
-{
-       return edma_read(ecc, offset + (i << 2));
-}
-
-static inline void edma_write_array(struct edma_cc *ecc, int offset, int i,
-                                   unsigned val)
-{
-       edma_write(ecc, offset + (i << 2), val);
-}
-
-static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
-                                    unsigned and, unsigned or)
-{
-       edma_modify(ecc, offset + (i << 2), and, or);
-}
-
-static inline void edma_or_array(struct edma_cc *ecc, int offset, int i,
-                                unsigned or)
-{
-       edma_or(ecc, offset + (i << 2), or);
-}
-
-static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
-                                 unsigned or)
-{
-       edma_or(ecc, offset + ((i * 2 + j) << 2), or);
-}
-
-static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
-                                    int j, unsigned val)
-{
-       edma_write(ecc, offset + ((i * 2 + j) << 2), val);
-}
-
-static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset)
-{
-       return edma_read(ecc, EDMA_SHADOW0 + offset);
-}
-
-static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
-                                                  int offset, int i)
-{
-       return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2));
-}
-
-static inline void edma_shadow0_write(struct edma_cc *ecc, int offset,
-                                     unsigned val)
-{
-       edma_write(ecc, EDMA_SHADOW0 + offset, val);
-}
-
-static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
-                                           int i, unsigned val)
-{
-       edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
-}
-
-static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset,
-                                          int param_no)
-{
-       return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
-}
-
-static inline void edma_param_write(struct edma_cc *ecc, int offset,
-                                   int param_no, unsigned val)
-{
-       edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
-}
-
-static inline void edma_param_modify(struct edma_cc *ecc, int offset,
-                                    int param_no, unsigned and, unsigned or)
-{
-       edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
-}
-
-static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no,
-                                 unsigned and)
-{
-       edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
-}
-
-static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
-                                unsigned or)
-{
-       edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
-}
-
-static inline void edma_set_bits(int offset, int len, unsigned long *p)
-{
-       for (; len > 0; len--)
-               set_bit(offset + (len - 1), p);
-}
-
-static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
-                                         int priority)
-{
-       int bit = queue_no * 4;
-
-       edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
-}
-
-static void edma_set_chmap(struct edma_chan *echan, int slot)
-{
-       struct edma_cc *ecc = echan->ecc;
-       int channel = EDMA_CHAN_SLOT(echan->ch_num);
-
-       if (ecc->chmap_exist) {
-               slot = EDMA_CHAN_SLOT(slot);
-               edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5));
-       }
-}
-
-static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
-{
-       struct edma_cc *ecc = echan->ecc;
-       int channel = EDMA_CHAN_SLOT(echan->ch_num);
-
-       if (enable) {
-               edma_shadow0_write_array(ecc, SH_ICR, channel >> 5,
-                                        BIT(channel & 0x1f));
-               edma_shadow0_write_array(ecc, SH_IESR, channel >> 5,
-                                        BIT(channel & 0x1f));
-       } else {
-               edma_shadow0_write_array(ecc, SH_IECR, channel >> 5,
-                                        BIT(channel & 0x1f));
-       }
-}
-
-/*
- * paRAM slot management functions
- */
-static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
-                           const struct edmacc_param *param)
-{
-       slot = EDMA_CHAN_SLOT(slot);
-       if (slot >= ecc->num_slots)
-               return;
-       memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
-}
-
-static int edma_read_slot(struct edma_cc *ecc, unsigned slot,
-                          struct edmacc_param *param)
-{
-       slot = EDMA_CHAN_SLOT(slot);
-       if (slot >= ecc->num_slots)
-               return -EINVAL;
-       memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
-
-       return 0;
-}
-
-/**
- * edma_alloc_slot - allocate DMA parameter RAM
- * @ecc: pointer to edma_cc struct
- * @slot: specific slot to allocate; negative for "any unused slot"
- *
- * This allocates a parameter RAM slot, initializing it to hold a
- * dummy transfer.  Slots allocated using this routine have not been
- * mapped to a hardware DMA channel, and will normally be used by
- * linking to them from a slot associated with a DMA channel.
- *
- * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
- * slots may be allocated on behalf of DSP firmware.
- *
- * Returns the number of the slot, else negative errno.
- */
-static int edma_alloc_slot(struct edma_cc *ecc, int slot)
-{
-       if (slot >= 0) {
-               slot = EDMA_CHAN_SLOT(slot);
-               /* Requesting entry paRAM slot for a HW triggered channel. */
-               if (ecc->chmap_exist && slot < ecc->num_channels)
-                       slot = EDMA_SLOT_ANY;
-       }
-
-       if (slot < 0) {
-               if (ecc->chmap_exist)
-                       slot = 0;
-               else
-                       slot = ecc->num_channels;
-               for (;;) {
-                       slot = find_next_zero_bit(ecc->slot_inuse,
-                                                 ecc->num_slots,
-                                                 slot);
-                       if (slot == ecc->num_slots)
-                               return -ENOMEM;
-                       if (!test_and_set_bit(slot, ecc->slot_inuse))
-                               break;
-               }
-       } else if (slot >= ecc->num_slots) {
-               return -EINVAL;
-       } else if (test_and_set_bit(slot, ecc->slot_inuse)) {
-               return -EBUSY;
-       }
-
-       edma_write_slot(ecc, slot, &dummy_paramset);
-
-       return EDMA_CTLR_CHAN(ecc->id, slot);
-}
-
-static void edma_free_slot(struct edma_cc *ecc, unsigned slot)
-{
-       slot = EDMA_CHAN_SLOT(slot);
-       if (slot >= ecc->num_slots)
-               return;
-
-       edma_write_slot(ecc, slot, &dummy_paramset);
-       clear_bit(slot, ecc->slot_inuse);
-}
-
-/**
- * edma_link - link one parameter RAM slot to another
- * @ecc: pointer to edma_cc struct
- * @from: parameter RAM slot originating the link
- * @to: parameter RAM slot which is the link target
- *
- * The originating slot should not be part of any active DMA transfer.
- */
-static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to)
-{
-       if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to)))
-               dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n");
-
-       from = EDMA_CHAN_SLOT(from);
-       to = EDMA_CHAN_SLOT(to);
-       if (from >= ecc->num_slots || to >= ecc->num_slots)
-               return;
-
-       edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000,
-                         PARM_OFFSET(to));
-}
-
-/**
- * edma_get_position - returns the current transfer point
- * @ecc: pointer to edma_cc struct
- * @slot: parameter RAM slot being examined
- * @dst:  true selects the dest position, false the source
- *
- * Returns the position of the current active slot
- */
-static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot,
-                                   bool dst)
-{
-       u32 offs;
-
-       slot = EDMA_CHAN_SLOT(slot);
-       offs = PARM_OFFSET(slot);
-       offs += dst ? PARM_DST : PARM_SRC;
-
-       return edma_read(ecc, offs);
-}
-
-/*
- * Channels with event associations will be triggered by their hardware
- * events, and channels without such associations will be triggered by
- * software.  (At this writing there is no interface for using software
- * triggers except with channels that don't support hardware triggers.)
- */
-static void edma_start(struct edma_chan *echan)
-{
-       struct edma_cc *ecc = echan->ecc;
-       int channel = EDMA_CHAN_SLOT(echan->ch_num);
-       int j = (channel >> 5);
-       unsigned int mask = BIT(channel & 0x1f);
-
-       if (!echan->hw_triggered) {
-               /* EDMA channels without event association */
-               dev_dbg(ecc->dev, "ESR%d %08x\n", j,
-                       edma_shadow0_read_array(ecc, SH_ESR, j));
-               edma_shadow0_write_array(ecc, SH_ESR, j, mask);
-       } else {
-               /* EDMA channel with event association */
-               dev_dbg(ecc->dev, "ER%d %08x\n", j,
-                       edma_shadow0_read_array(ecc, SH_ER, j));
-               /* Clear any pending event or error */
-               edma_write_array(ecc, EDMA_ECR, j, mask);
-               edma_write_array(ecc, EDMA_EMCR, j, mask);
-               /* Clear any SER */
-               edma_shadow0_write_array(ecc, SH_SECR, j, mask);
-               edma_shadow0_write_array(ecc, SH_EESR, j, mask);
-               dev_dbg(ecc->dev, "EER%d %08x\n", j,
-                       edma_shadow0_read_array(ecc, SH_EER, j));
-       }
-}
-
-static void edma_stop(struct edma_chan *echan)
-{
-       struct edma_cc *ecc = echan->ecc;
-       int channel = EDMA_CHAN_SLOT(echan->ch_num);
-       int j = (channel >> 5);
-       unsigned int mask = BIT(channel & 0x1f);
-
-       edma_shadow0_write_array(ecc, SH_EECR, j, mask);
-       edma_shadow0_write_array(ecc, SH_ECR, j, mask);
-       edma_shadow0_write_array(ecc, SH_SECR, j, mask);
-       edma_write_array(ecc, EDMA_EMCR, j, mask);
-
-       /* clear possibly pending completion interrupt */
-       edma_shadow0_write_array(ecc, SH_ICR, j, mask);
-
-       dev_dbg(ecc->dev, "EER%d %08x\n", j,
-               edma_shadow0_read_array(ecc, SH_EER, j));
-
-       /* REVISIT:  consider guarding against inappropriate event
-        * chaining by overwriting with dummy_paramset.
-        */
-}
-
-/*
- * Temporarily disable EDMA hardware events on the specified channel,
- * preventing them from triggering new transfers
- */
-static void edma_pause(struct edma_chan *echan)
-{
-       int channel = EDMA_CHAN_SLOT(echan->ch_num);
-       unsigned int mask = BIT(channel & 0x1f);
-
-       edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask);
-}
-
-/* Re-enable EDMA hardware events on the specified channel.  */
-static void edma_resume(struct edma_chan *echan)
-{
-       int channel = EDMA_CHAN_SLOT(echan->ch_num);
-       unsigned int mask = BIT(channel & 0x1f);
-
-       edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask);
-}
-
-static void edma_trigger_channel(struct edma_chan *echan)
-{
-       struct edma_cc *ecc = echan->ecc;
-       int channel = EDMA_CHAN_SLOT(echan->ch_num);
-       unsigned int mask = BIT(channel & 0x1f);
-
-       edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
-
-       dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
-               edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
-}
-
-static void edma_clean_channel(struct edma_chan *echan)
-{
-       struct edma_cc *ecc = echan->ecc;
-       int channel = EDMA_CHAN_SLOT(echan->ch_num);
-       int j = (channel >> 5);
-       unsigned int mask = BIT(channel & 0x1f);
-
-       dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j));
-       edma_shadow0_write_array(ecc, SH_ECR, j, mask);
-       /* Clear the corresponding EMR bits */
-       edma_write_array(ecc, EDMA_EMCR, j, mask);
-       /* Clear any SER */
-       edma_shadow0_write_array(ecc, SH_SECR, j, mask);
-       edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
-}
-
-/* Move channel to a specific event queue */
-static void edma_assign_channel_eventq(struct edma_chan *echan,
-                                      enum dma_event_q eventq_no)
-{
-       struct edma_cc *ecc = echan->ecc;
-       int channel = EDMA_CHAN_SLOT(echan->ch_num);
-       int bit = (channel & 0x7) * 4;
-
-       /* default to low priority queue */
-       if (eventq_no == EVENTQ_DEFAULT)
-               eventq_no = ecc->default_queue;
-       if (eventq_no >= ecc->num_tc)
-               return;
-
-       eventq_no &= 7;
-       edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit),
-                         eventq_no << bit);
-}
-
-static int edma_alloc_channel(struct edma_chan *echan,
-                             enum dma_event_q eventq_no)
-{
-       struct edma_cc *ecc = echan->ecc;
-       int channel = EDMA_CHAN_SLOT(echan->ch_num);
-
-       /* ensure access through shadow region 0 */
-       edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
-
-       /* ensure no events are pending */
-       edma_stop(echan);
-
-       edma_setup_interrupt(echan, true);
-
-       edma_assign_channel_eventq(echan, eventq_no);
-
-       return 0;
-}
-
-static void edma_free_channel(struct edma_chan *echan)
-{
-       /* ensure no events are pending */
-       edma_stop(echan);
-       /* REVISIT should probably take out of shadow region 0 */
-       edma_setup_interrupt(echan, false);
-}
-
-static inline struct edma_cc *to_edma_cc(struct dma_device *d)
-{
-       return container_of(d, struct edma_cc, dma_slave);
-}
-
-static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
-{
-       return container_of(c, struct edma_chan, vchan.chan);
-}
-
-static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx)
-{
-       return container_of(tx, struct edma_desc, vdesc.tx);
-}
-
-static void edma_desc_free(struct virt_dma_desc *vdesc)
-{
-       kfree(container_of(vdesc, struct edma_desc, vdesc));
-}
-
-/* Dispatch a queued descriptor to the controller (caller holds lock) */
-static void edma_execute(struct edma_chan *echan)
-{
-       struct edma_cc *ecc = echan->ecc;
-       struct virt_dma_desc *vdesc;
-       struct edma_desc *edesc;
-       struct device *dev = echan->vchan.chan.device->dev;
-       int i, j, left, nslots;
-
-       if (!echan->edesc) {
-               /* Setup is needed for the first transfer */
-               vdesc = vchan_next_desc(&echan->vchan);
-               if (!vdesc)
-                       return;
-               list_del(&vdesc->node);
-               echan->edesc = to_edma_desc(&vdesc->tx);
-       }
-
-       edesc = echan->edesc;
-
-       /* Find out how many left */
-       left = edesc->pset_nr - edesc->processed;
-       nslots = min(MAX_NR_SG, left);
-       edesc->sg_len = 0;
-
-       /* Write descriptor PaRAM set(s) */
-       for (i = 0; i < nslots; i++) {
-               j = i + edesc->processed;
-               edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param);
-               edesc->sg_len += edesc->pset[j].len;
-               dev_vdbg(dev,
-                        "\n pset[%d]:\n"
-                        "  chnum\t%d\n"
-                        "  slot\t%d\n"
-                        "  opt\t%08x\n"
-                        "  src\t%08x\n"
-                        "  dst\t%08x\n"
-                        "  abcnt\t%08x\n"
-                        "  ccnt\t%08x\n"
-                        "  bidx\t%08x\n"
-                        "  cidx\t%08x\n"
-                        "  lkrld\t%08x\n",
-                        j, echan->ch_num, echan->slot[i],
-                        edesc->pset[j].param.opt,
-                        edesc->pset[j].param.src,
-                        edesc->pset[j].param.dst,
-                        edesc->pset[j].param.a_b_cnt,
-                        edesc->pset[j].param.ccnt,
-                        edesc->pset[j].param.src_dst_bidx,
-                        edesc->pset[j].param.src_dst_cidx,
-                        edesc->pset[j].param.link_bcntrld);
-               /* Link to the previous slot if not the last set */
-               if (i != (nslots - 1))
-                       edma_link(ecc, echan->slot[i], echan->slot[i + 1]);
-       }
-
-       edesc->processed += nslots;
-
-       /*
-        * If this is either the last set in a set of SG-list transactions
-        * then setup a link to the dummy slot, this results in all future
-        * events being absorbed and that's OK because we're done
-        */
-       if (edesc->processed == edesc->pset_nr) {
-               if (edesc->cyclic)
-                       edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]);
-               else
-                       edma_link(ecc, echan->slot[nslots - 1],
-                                 echan->ecc->dummy_slot);
-       }
-
-       if (echan->missed) {
-               /*
-                * This happens due to setup times between intermediate
-                * transfers in long SG lists which have to be broken up into
-                * transfers of MAX_NR_SG
-                */
-               dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
-               edma_clean_channel(echan);
-               edma_stop(echan);
-               edma_start(echan);
-               edma_trigger_channel(echan);
-               echan->missed = 0;
-       } else if (edesc->processed <= MAX_NR_SG) {
-               dev_dbg(dev, "first transfer starting on channel %d\n",
-                       echan->ch_num);
-               edma_start(echan);
-       } else {
-               dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
-                       echan->ch_num, edesc->processed);
-               edma_resume(echan);
-       }
-}
-
-static int edma_terminate_all(struct dma_chan *chan)
-{
-       struct edma_chan *echan = to_edma_chan(chan);
-       unsigned long flags;
-       LIST_HEAD(head);
-
-       spin_lock_irqsave(&echan->vchan.lock, flags);
-
-       /*
-        * Stop DMA activity: we assume the callback will not be called
-        * after edma_dma() returns (even if it does, it will see
-        * echan->edesc is NULL and exit.)
-        */
-       if (echan->edesc) {
-               edma_stop(echan);
-               /* Move the cyclic channel back to default queue */
-               if (!echan->tc && echan->edesc->cyclic)
-                       edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
-
-               vchan_terminate_vdesc(&echan->edesc->vdesc);
-               echan->edesc = NULL;
-       }
-
-       vchan_get_all_descriptors(&echan->vchan, &head);
-       spin_unlock_irqrestore(&echan->vchan.lock, flags);
-       vchan_dma_desc_free_list(&echan->vchan, &head);
-
-       return 0;
-}
-
-static void edma_synchronize(struct dma_chan *chan)
-{
-       struct edma_chan *echan = to_edma_chan(chan);
-
-       vchan_synchronize(&echan->vchan);
-}
-
-static int edma_slave_config(struct dma_chan *chan,
-       struct dma_slave_config *cfg)
-{
-       struct edma_chan *echan = to_edma_chan(chan);
-
-       if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
-           cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
-               return -EINVAL;
-
-       if (cfg->src_maxburst > chan->device->max_burst ||
-           cfg->dst_maxburst > chan->device->max_burst)
-               return -EINVAL;
-
-       memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
-
-       return 0;
-}
-
-static int edma_dma_pause(struct dma_chan *chan)
-{
-       struct edma_chan *echan = to_edma_chan(chan);
-
-       if (!echan->edesc)
-               return -EINVAL;
-
-       edma_pause(echan);
-       return 0;
-}
-
-static int edma_dma_resume(struct dma_chan *chan)
-{
-       struct edma_chan *echan = to_edma_chan(chan);
-
-       edma_resume(echan);
-       return 0;
-}
-
-/*
- * A PaRAM set configuration abstraction used by other modes
- * @chan: Channel who's PaRAM set we're configuring
- * @pset: PaRAM set to initialize and setup.
- * @src_addr: Source address of the DMA
- * @dst_addr: Destination address of the DMA
- * @burst: In units of dev_width, how much to send
- * @dev_width: How much is the dev_width
- * @dma_length: Total length of the DMA transfer
- * @direction: Direction of the transfer
- */
-static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
-                           dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
-                           unsigned int acnt, unsigned int dma_length,
-                           enum dma_transfer_direction direction)
-{
-       struct edma_chan *echan = to_edma_chan(chan);
-       struct device *dev = chan->device->dev;
-       struct edmacc_param *param = &epset->param;
-       int bcnt, ccnt, cidx;
-       int src_bidx, dst_bidx, src_cidx, dst_cidx;
-       int absync;
-
-       /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
-       if (!burst)
-               burst = 1;
-       /*
-        * If the maxburst is equal to the fifo width, use
-        * A-synced transfers. This allows for large contiguous
-        * buffer transfers using only one PaRAM set.
-        */
-       if (burst == 1) {
-               /*
-                * For the A-sync case, bcnt and ccnt are the remainder
-                * and quotient respectively of the division of:
-                * (dma_length / acnt) by (SZ_64K -1). This is so
-                * that in case bcnt over flows, we have ccnt to use.
-                * Note: In A-sync tranfer only, bcntrld is used, but it
-                * only applies for sg_dma_len(sg) >= SZ_64K.
-                * In this case, the best way adopted is- bccnt for the
-                * first frame will be the remainder below. Then for
-                * every successive frame, bcnt will be SZ_64K-1. This
-                * is assured as bcntrld = 0xffff in end of function.
-                */
-               absync = false;
-               ccnt = dma_length / acnt / (SZ_64K - 1);
-               bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
-               /*
-                * If bcnt is non-zero, we have a remainder and hence an
-                * extra frame to transfer, so increment ccnt.
-                */
-               if (bcnt)
-                       ccnt++;
-               else
-                       bcnt = SZ_64K - 1;
-               cidx = acnt;
-       } else {
-               /*
-                * If maxburst is greater than the fifo address_width,
-                * use AB-synced transfers where A count is the fifo
-                * address_width and B count is the maxburst. In this
-                * case, we are limited to transfers of C count frames
-                * of (address_width * maxburst) where C count is limited
-                * to SZ_64K-1. This places an upper bound on the length
-                * of an SG segment that can be handled.
-                */
-               absync = true;
-               bcnt = burst;
-               ccnt = dma_length / (acnt * bcnt);
-               if (ccnt > (SZ_64K - 1)) {
-                       dev_err(dev, "Exceeded max SG segment size\n");
-                       return -EINVAL;
-               }
-               cidx = acnt * bcnt;
-       }
-
-       epset->len = dma_length;
-
-       if (direction == DMA_MEM_TO_DEV) {
-               src_bidx = acnt;
-               src_cidx = cidx;
-               dst_bidx = 0;
-               dst_cidx = 0;
-               epset->addr = src_addr;
-       } else if (direction == DMA_DEV_TO_MEM)  {
-               src_bidx = 0;
-               src_cidx = 0;
-               dst_bidx = acnt;
-               dst_cidx = cidx;
-               epset->addr = dst_addr;
-       } else if (direction == DMA_MEM_TO_MEM)  {
-               src_bidx = acnt;
-               src_cidx = cidx;
-               dst_bidx = acnt;
-               dst_cidx = cidx;
-       } else {
-               dev_err(dev, "%s: direction not implemented yet\n", __func__);
-               return -EINVAL;
-       }
-
-       param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
-       /* Configure A or AB synchronized transfers */
-       if (absync)
-               param->opt |= SYNCDIM;
-
-       param->src = src_addr;
-       param->dst = dst_addr;
-
-       param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
-       param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
-
-       param->a_b_cnt = bcnt << 16 | acnt;
-       param->ccnt = ccnt;
-       /*
-        * Only time when (bcntrld) auto reload is required is for
-        * A-sync case, and in this case, a requirement of reload value
-        * of SZ_64K-1 only is assured. 'link' is initially set to NULL
-        * and then later will be populated by edma_execute.
-        */
-       param->link_bcntrld = 0xffffffff;
-       return absync;
-}
-
-static struct dma_async_tx_descriptor *edma_prep_slave_sg(
-       struct dma_chan *chan, struct scatterlist *sgl,
-       unsigned int sg_len, enum dma_transfer_direction direction,
-       unsigned long tx_flags, void *context)
-{
-       struct edma_chan *echan = to_edma_chan(chan);
-       struct device *dev = chan->device->dev;
-       struct edma_desc *edesc;
-       dma_addr_t src_addr = 0, dst_addr = 0;
-       enum dma_slave_buswidth dev_width;
-       u32 burst;
-       struct scatterlist *sg;
-       int i, nslots, ret;
-
-       if (unlikely(!echan || !sgl || !sg_len))
-               return NULL;
-
-       if (direction == DMA_DEV_TO_MEM) {
-               src_addr = echan->cfg.src_addr;
-               dev_width = echan->cfg.src_addr_width;
-               burst = echan->cfg.src_maxburst;
-       } else if (direction == DMA_MEM_TO_DEV) {
-               dst_addr = echan->cfg.dst_addr;
-               dev_width = echan->cfg.dst_addr_width;
-               burst = echan->cfg.dst_maxburst;
-       } else {
-               dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
-               return NULL;
-       }
-
-       if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
-               dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
-               return NULL;
-       }
-
-       edesc = kzalloc(struct_size(edesc, pset, sg_len), GFP_ATOMIC);
-       if (!edesc)
-               return NULL;
-
-       edesc->pset_nr = sg_len;
-       edesc->residue = 0;
-       edesc->direction = direction;
-       edesc->echan = echan;
-
-       /* Allocate a PaRAM slot, if needed */
-       nslots = min_t(unsigned, MAX_NR_SG, sg_len);
-
-       for (i = 0; i < nslots; i++) {
-               if (echan->slot[i] < 0) {
-                       echan->slot[i] =
-                               edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
-                       if (echan->slot[i] < 0) {
-                               kfree(edesc);
-                               dev_err(dev, "%s: Failed to allocate slot\n",
-                                       __func__);
-                               return NULL;
-                       }
-               }
-       }
-
-       /* Configure PaRAM sets for each SG */
-       for_each_sg(sgl, sg, sg_len, i) {
-               /* Get address for each SG */
-               if (direction == DMA_DEV_TO_MEM)
-                       dst_addr = sg_dma_address(sg);
-               else
-                       src_addr = sg_dma_address(sg);
-
-               ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
-                                      dst_addr, burst, dev_width,
-                                      sg_dma_len(sg), direction);
-               if (ret < 0) {
-                       kfree(edesc);
-                       return NULL;
-               }
-
-               edesc->absync = ret;
-               edesc->residue += sg_dma_len(sg);
-
-               if (i == sg_len - 1)
-                       /* Enable completion interrupt */
-                       edesc->pset[i].param.opt |= TCINTEN;
-               else if (!((i+1) % MAX_NR_SG))
-                       /*
-                        * Enable early completion interrupt for the
-                        * intermediateset. In this case the driver will be
-                        * notified when the paRAM set is submitted to TC. This
-                        * will allow more time to set up the next set of slots.
-                        */
-                       edesc->pset[i].param.opt |= (TCINTEN | TCCMODE);
-       }
-       edesc->residue_stat = edesc->residue;
-
-       return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
-}
-
-static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
-       struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
-       size_t len, unsigned long tx_flags)
-{
-       int ret, nslots;
-       struct edma_desc *edesc;
-       struct device *dev = chan->device->dev;
-       struct edma_chan *echan = to_edma_chan(chan);
-       unsigned int width, pset_len, array_size;
-
-       if (unlikely(!echan || !len))
-               return NULL;
-
-       /* Align the array size (acnt block) with the transfer properties */
-       switch (__ffs((src | dest | len))) {
-       case 0:
-               array_size = SZ_32K - 1;
-               break;
-       case 1:
-               array_size = SZ_32K - 2;
-               break;
-       default:
-               array_size = SZ_32K - 4;
-               break;
-       }
-
-       if (len < SZ_64K) {
-               /*
-                * Transfer size less than 64K can be handled with one paRAM
-                * slot and with one burst.
-                * ACNT = length
-                */
-               width = len;
-               pset_len = len;
-               nslots = 1;
-       } else {
-               /*
-                * Transfer size bigger than 64K will be handled with maximum of
-                * two paRAM slots.
-                * slot1: (full_length / 32767) times 32767 bytes bursts.
-                *        ACNT = 32767, length1: (full_length / 32767) * 32767
-                * slot2: the remaining amount of data after slot1.
-                *        ACNT = full_length - length1, length2 = ACNT
-                *
-                * When the full_length is multibple of 32767 one slot can be
-                * used to complete the transfer.
-                */
-               width = array_size;
-               pset_len = rounddown(len, width);
-               /* One slot is enough for lengths multiple of (SZ_32K -1) */
-               if (unlikely(pset_len == len))
-                       nslots = 1;
-               else
-                       nslots = 2;
-       }
-
-       edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC);
-       if (!edesc)
-               return NULL;
-
-       edesc->pset_nr = nslots;
-       edesc->residue = edesc->residue_stat = len;
-       edesc->direction = DMA_MEM_TO_MEM;
-       edesc->echan = echan;
-
-       ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
-                              width, pset_len, DMA_MEM_TO_MEM);
-       if (ret < 0) {
-               kfree(edesc);
-               return NULL;
-       }
-
-       edesc->absync = ret;
-
-       edesc->pset[0].param.opt |= ITCCHEN;
-       if (nslots == 1) {
-               /* Enable transfer complete interrupt */
-               edesc->pset[0].param.opt |= TCINTEN;
-       } else {
-               /* Enable transfer complete chaining for the first slot */
-               edesc->pset[0].param.opt |= TCCHEN;
-
-               if (echan->slot[1] < 0) {
-                       echan->slot[1] = edma_alloc_slot(echan->ecc,
-                                                        EDMA_SLOT_ANY);
-                       if (echan->slot[1] < 0) {
-                               kfree(edesc);
-                               dev_err(dev, "%s: Failed to allocate slot\n",
-                                       __func__);
-                               return NULL;
-                       }
-               }
-               dest += pset_len;
-               src += pset_len;
-               pset_len = width = len % array_size;
-
-               ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
-                                      width, pset_len, DMA_MEM_TO_MEM);
-               if (ret < 0) {
-                       kfree(edesc);
-                       return NULL;
-               }
-
-               edesc->pset[1].param.opt |= ITCCHEN;
-               edesc->pset[1].param.opt |= TCINTEN;
-       }
-
-       return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
-}
-
-static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
-       struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
-       size_t period_len, enum dma_transfer_direction direction,
-       unsigned long tx_flags)
-{
-       struct edma_chan *echan = to_edma_chan(chan);
-       struct device *dev = chan->device->dev;
-       struct edma_desc *edesc;
-       dma_addr_t src_addr, dst_addr;
-       enum dma_slave_buswidth dev_width;
-       bool use_intermediate = false;
-       u32 burst;
-       int i, ret, nslots;
-
-       if (unlikely(!echan || !buf_len || !period_len))
-               return NULL;
-
-       if (direction == DMA_DEV_TO_MEM) {
-               src_addr = echan->cfg.src_addr;
-               dst_addr = buf_addr;
-               dev_width = echan->cfg.src_addr_width;
-               burst = echan->cfg.src_maxburst;
-       } else if (direction == DMA_MEM_TO_DEV) {
-               src_addr = buf_addr;
-               dst_addr = echan->cfg.dst_addr;
-               dev_width = echan->cfg.dst_addr_width;
-               burst = echan->cfg.dst_maxburst;
-       } else {
-               dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
-               return NULL;
-       }
-
-       if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
-               dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
-               return NULL;
-       }
-
-       if (unlikely(buf_len % period_len)) {
-               dev_err(dev, "Period should be multiple of Buffer length\n");
-               return NULL;
-       }
-
-       nslots = (buf_len / period_len) + 1;
-
-       /*
-        * Cyclic DMA users such as audio cannot tolerate delays introduced
-        * by cases where the number of periods is more than the maximum
-        * number of SGs the EDMA driver can handle at a time. For DMA types
-        * such as Slave SGs, such delays are tolerable and synchronized,
-        * but the synchronization is difficult to achieve with Cyclic and
-        * cannot be guaranteed, so we error out early.
-        */
-       if (nslots > MAX_NR_SG) {
-               /*
-                * If the burst and period sizes are the same, we can put
-                * the full buffer into a single period and activate
-                * intermediate interrupts. This will produce interrupts
-                * after each burst, which is also after each desired period.
-                */
-               if (burst == period_len) {
-                       period_len = buf_len;
-                       nslots = 2;
-                       use_intermediate = true;
-               } else {
-                       return NULL;
-               }
-       }
-
-       edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC);
-       if (!edesc)
-               return NULL;
-
-       edesc->cyclic = 1;
-       edesc->pset_nr = nslots;
-       edesc->residue = edesc->residue_stat = buf_len;
-       edesc->direction = direction;
-       edesc->echan = echan;
-
-       dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
-               __func__, echan->ch_num, nslots, period_len, buf_len);
-
-       for (i = 0; i < nslots; i++) {
-               /* Allocate a PaRAM slot, if needed */
-               if (echan->slot[i] < 0) {
-                       echan->slot[i] =
-                               edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
-                       if (echan->slot[i] < 0) {
-                               kfree(edesc);
-                               dev_err(dev, "%s: Failed to allocate slot\n",
-                                       __func__);
-                               return NULL;
-                       }
-               }
-
-               if (i == nslots - 1) {
-                       memcpy(&edesc->pset[i], &edesc->pset[0],
-                              sizeof(edesc->pset[0]));
-                       break;
-               }
-
-               ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
-                                      dst_addr, burst, dev_width, period_len,
-                                      direction);
-               if (ret < 0) {
-                       kfree(edesc);
-                       return NULL;
-               }
-
-               if (direction == DMA_DEV_TO_MEM)
-                       dst_addr += period_len;
-               else
-                       src_addr += period_len;
-
-               dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
-               dev_vdbg(dev,
-                       "\n pset[%d]:\n"
-                       "  chnum\t%d\n"
-                       "  slot\t%d\n"
-                       "  opt\t%08x\n"
-                       "  src\t%08x\n"
-                       "  dst\t%08x\n"
-                       "  abcnt\t%08x\n"
-                       "  ccnt\t%08x\n"
-                       "  bidx\t%08x\n"
-                       "  cidx\t%08x\n"
-                       "  lkrld\t%08x\n",
-                       i, echan->ch_num, echan->slot[i],
-                       edesc->pset[i].param.opt,
-                       edesc->pset[i].param.src,
-                       edesc->pset[i].param.dst,
-                       edesc->pset[i].param.a_b_cnt,
-                       edesc->pset[i].param.ccnt,
-                       edesc->pset[i].param.src_dst_bidx,
-                       edesc->pset[i].param.src_dst_cidx,
-                       edesc->pset[i].param.link_bcntrld);
-
-               edesc->absync = ret;
-
-               /*
-                * Enable period interrupt only if it is requested
-                */
-               if (tx_flags & DMA_PREP_INTERRUPT) {
-                       edesc->pset[i].param.opt |= TCINTEN;
-
-                       /* Also enable intermediate interrupts if necessary */
-                       if (use_intermediate)
-                               edesc->pset[i].param.opt |= ITCINTEN;
-               }
-       }
-
-       /* Place the cyclic channel to highest priority queue */
-       if (!echan->tc)
-               edma_assign_channel_eventq(echan, EVENTQ_0);
-
-       return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
-}
-
-static void edma_completion_handler(struct edma_chan *echan)
-{
-       struct device *dev = echan->vchan.chan.device->dev;
-       struct edma_desc *edesc;
-
-       spin_lock(&echan->vchan.lock);
-       edesc = echan->edesc;
-       if (edesc) {
-               if (edesc->cyclic) {
-                       vchan_cyclic_callback(&edesc->vdesc);
-                       spin_unlock(&echan->vchan.lock);
-                       return;
-               } else if (edesc->processed == edesc->pset_nr) {
-                       edesc->residue = 0;
-                       edma_stop(echan);
-                       vchan_cookie_complete(&edesc->vdesc);
-                       echan->edesc = NULL;
-
-                       dev_dbg(dev, "Transfer completed on channel %d\n",
-                               echan->ch_num);
-               } else {
-                       dev_dbg(dev, "Sub transfer completed on channel %d\n",
-                               echan->ch_num);
-
-                       edma_pause(echan);
-
-                       /* Update statistics for tx_status */
-                       edesc->residue -= edesc->sg_len;
-                       edesc->residue_stat = edesc->residue;
-                       edesc->processed_stat = edesc->processed;
-               }
-               edma_execute(echan);
-       }
-
-       spin_unlock(&echan->vchan.lock);
-}
-
-/* eDMA interrupt handler */
-static irqreturn_t dma_irq_handler(int irq, void *data)
-{
-       struct edma_cc *ecc = data;
-       int ctlr;
-       u32 sh_ier;
-       u32 sh_ipr;
-       u32 bank;
-
-       ctlr = ecc->id;
-       if (ctlr < 0)
-               return IRQ_NONE;
-
-       dev_vdbg(ecc->dev, "dma_irq_handler\n");
-
-       sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0);
-       if (!sh_ipr) {
-               sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1);
-               if (!sh_ipr)
-                       return IRQ_NONE;
-               sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1);
-               bank = 1;
-       } else {
-               sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0);
-               bank = 0;
-       }
-
-       do {
-               u32 slot;
-               u32 channel;
-
-               slot = __ffs(sh_ipr);
-               sh_ipr &= ~(BIT(slot));
-
-               if (sh_ier & BIT(slot)) {
-                       channel = (bank << 5) | slot;
-                       /* Clear the corresponding IPR bits */
-                       edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot));
-                       edma_completion_handler(&ecc->slave_chans[channel]);
-               }
-       } while (sh_ipr);
-
-       edma_shadow0_write(ecc, SH_IEVAL, 1);
-       return IRQ_HANDLED;
-}
-
-static void edma_error_handler(struct edma_chan *echan)
-{
-       struct edma_cc *ecc = echan->ecc;
-       struct device *dev = echan->vchan.chan.device->dev;
-       struct edmacc_param p;
-       int err;
-
-       if (!echan->edesc)
-               return;
-
-       spin_lock(&echan->vchan.lock);
-
-       err = edma_read_slot(ecc, echan->slot[0], &p);
-
-       /*
-        * Issue later based on missed flag which will be sure
-        * to happen as:
-        * (1) we finished transmitting an intermediate slot and
-        *     edma_execute is coming up.
-        * (2) or we finished current transfer and issue will
-        *     call edma_execute.
-        *
-        * Important note: issuing can be dangerous here and
-        * lead to some nasty recursion when we are in a NULL
-        * slot. So we avoid doing so and set the missed flag.
-        */
-       if (err || (p.a_b_cnt == 0 && p.ccnt == 0)) {
-               dev_dbg(dev, "Error on null slot, setting miss\n");
-               echan->missed = 1;
-       } else {
-               /*
-                * The slot is already programmed but the event got
-                * missed, so its safe to issue it here.
-                */
-               dev_dbg(dev, "Missed event, TRIGGERING\n");
-               edma_clean_channel(echan);
-               edma_stop(echan);
-               edma_start(echan);
-               edma_trigger_channel(echan);
-       }
-       spin_unlock(&echan->vchan.lock);
-}
-
-static inline bool edma_error_pending(struct edma_cc *ecc)
-{
-       if (edma_read_array(ecc, EDMA_EMR, 0) ||
-           edma_read_array(ecc, EDMA_EMR, 1) ||
-           edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR))
-               return true;
-
-       return false;
-}
-
-/* eDMA error interrupt handler */
-static irqreturn_t dma_ccerr_handler(int irq, void *data)
-{
-       struct edma_cc *ecc = data;
-       int i, j;
-       int ctlr;
-       unsigned int cnt = 0;
-       unsigned int val;
-
-       ctlr = ecc->id;
-       if (ctlr < 0)
-               return IRQ_NONE;
-
-       dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
-
-       if (!edma_error_pending(ecc)) {
-               /*
-                * The registers indicate no pending error event but the irq
-                * handler has been called.
-                * Ask eDMA to re-evaluate the error registers.
-                */
-               dev_err(ecc->dev, "%s: Error interrupt without error event!\n",
-                       __func__);
-               edma_write(ecc, EDMA_EEVAL, 1);
-               return IRQ_NONE;
-       }
-
-       while (1) {
-               /* Event missed register(s) */
-               for (j = 0; j < 2; j++) {
-                       unsigned long emr;
-
-                       val = edma_read_array(ecc, EDMA_EMR, j);
-                       if (!val)
-                               continue;
-
-                       dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
-                       emr = val;
-                       for (i = find_next_bit(&emr, 32, 0); i < 32;
-                            i = find_next_bit(&emr, 32, i + 1)) {
-                               int k = (j << 5) + i;
-
-                               /* Clear the corresponding EMR bits */
-                               edma_write_array(ecc, EDMA_EMCR, j, BIT(i));
-                               /* Clear any SER */
-                               edma_shadow0_write_array(ecc, SH_SECR, j,
-                                                        BIT(i));
-                               edma_error_handler(&ecc->slave_chans[k]);
-                       }
-               }
-
-               val = edma_read(ecc, EDMA_QEMR);
-               if (val) {
-                       dev_dbg(ecc->dev, "QEMR 0x%02x\n", val);
-                       /* Not reported, just clear the interrupt reason. */
-                       edma_write(ecc, EDMA_QEMCR, val);
-                       edma_shadow0_write(ecc, SH_QSECR, val);
-               }
-
-               val = edma_read(ecc, EDMA_CCERR);
-               if (val) {
-                       dev_warn(ecc->dev, "CCERR 0x%08x\n", val);
-                       /* Not reported, just clear the interrupt reason. */
-                       edma_write(ecc, EDMA_CCERRCLR, val);
-               }
-
-               if (!edma_error_pending(ecc))
-                       break;
-               cnt++;
-               if (cnt > 10)
-                       break;
-       }
-       edma_write(ecc, EDMA_EEVAL, 1);
-       return IRQ_HANDLED;
-}
-
-/* Alloc channel resources */
-static int edma_alloc_chan_resources(struct dma_chan *chan)
-{
-       struct edma_chan *echan = to_edma_chan(chan);
-       struct edma_cc *ecc = echan->ecc;
-       struct device *dev = ecc->dev;
-       enum dma_event_q eventq_no = EVENTQ_DEFAULT;
-       int ret;
-
-       if (echan->tc) {
-               eventq_no = echan->tc->id;
-       } else if (ecc->tc_list) {
-               /* memcpy channel */
-               echan->tc = &ecc->tc_list[ecc->info->default_queue];
-               eventq_no = echan->tc->id;
-       }
-
-       ret = edma_alloc_channel(echan, eventq_no);
-       if (ret)
-               return ret;
-
-       echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num);
-       if (echan->slot[0] < 0) {
-               dev_err(dev, "Entry slot allocation failed for channel %u\n",
-                       EDMA_CHAN_SLOT(echan->ch_num));
-               ret = echan->slot[0];
-               goto err_slot;
-       }
-
-       /* Set up channel -> slot mapping for the entry slot */
-       edma_set_chmap(echan, echan->slot[0]);
-       echan->alloced = true;
-
-       dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n",
-               EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
-               echan->hw_triggered ? "HW" : "SW");
-
-       return 0;
-
-err_slot:
-       edma_free_channel(echan);
-       return ret;
-}
-
-/* Free channel resources */
-static void edma_free_chan_resources(struct dma_chan *chan)
-{
-       struct edma_chan *echan = to_edma_chan(chan);
-       struct device *dev = echan->ecc->dev;
-       int i;
-
-       /* Terminate transfers */
-       edma_stop(echan);
-
-       vchan_free_chan_resources(&echan->vchan);
-
-       /* Free EDMA PaRAM slots */
-       for (i = 0; i < EDMA_MAX_SLOTS; i++) {
-               if (echan->slot[i] >= 0) {
-                       edma_free_slot(echan->ecc, echan->slot[i]);
-                       echan->slot[i] = -1;
-               }
-       }
-
-       /* Set entry slot to the dummy slot */
-       edma_set_chmap(echan, echan->ecc->dummy_slot);
-
-       /* Free EDMA channel */
-       if (echan->alloced) {
-               edma_free_channel(echan);
-               echan->alloced = false;
-       }
-
-       echan->tc = NULL;
-       echan->hw_triggered = false;
-
-       dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n",
-               EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id);
-}
-
-/* Send pending descriptor to hardware */
-static void edma_issue_pending(struct dma_chan *chan)
-{
-       struct edma_chan *echan = to_edma_chan(chan);
-       unsigned long flags;
-
-       spin_lock_irqsave(&echan->vchan.lock, flags);
-       if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
-               edma_execute(echan);
-       spin_unlock_irqrestore(&echan->vchan.lock, flags);
-}
-
-/*
- * This limit exists to avoid a possible infinite loop when waiting for proof
- * that a particular transfer is completed. This limit can be hit if there
- * are large bursts to/from slow devices or the CPU is never able to catch
- * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
- * RX-FIFO, as many as 55 loops have been seen.
- */
-#define EDMA_MAX_TR_WAIT_LOOPS 1000
-
-static u32 edma_residue(struct edma_desc *edesc)
-{
-       bool dst = edesc->direction == DMA_DEV_TO_MEM;
-       int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
-       struct edma_chan *echan = edesc->echan;
-       struct edma_pset *pset = edesc->pset;
-       dma_addr_t done, pos;
-       int i;
-
-       /*
-        * We always read the dst/src position from the first RamPar
-        * pset. That's the one which is active now.
-        */
-       pos = edma_get_position(echan->ecc, echan->slot[0], dst);
-
-       /*
-        * "pos" may represent a transfer request that is still being
-        * processed by the EDMACC or EDMATC. We will busy wait until
-        * any one of the situations occurs:
-        *   1. the DMA hardware is idle
-        *   2. a new transfer request is setup
-        *   3. we hit the loop limit
-        */
-       while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
-               /* check if a new transfer request is setup */
-               if (edma_get_position(echan->ecc,
-                                     echan->slot[0], dst) != pos) {
-                       break;
-               }
-
-               if (!--loop_count) {
-                       dev_dbg_ratelimited(echan->vchan.chan.device->dev,
-                               "%s: timeout waiting for PaRAM update\n",
-                               __func__);
-                       break;
-               }
-
-               cpu_relax();
-       }
-
-       /*
-        * Cyclic is simple. Just subtract pset[0].addr from pos.
-        *
-        * We never update edesc->residue in the cyclic case, so we
-        * can tell the remaining room to the end of the circular
-        * buffer.
-        */
-       if (edesc->cyclic) {
-               done = pos - pset->addr;
-               edesc->residue_stat = edesc->residue - done;
-               return edesc->residue_stat;
-       }
-
-       /*
-        * For SG operation we catch up with the last processed
-        * status.
-        */
-       pset += edesc->processed_stat;
-
-       for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
-               /*
-                * If we are inside this pset address range, we know
-                * this is the active one. Get the current delta and
-                * stop walking the psets.
-                */
-               if (pos >= pset->addr && pos < pset->addr + pset->len)
-                       return edesc->residue_stat - (pos - pset->addr);
-
-               /* Otherwise mark it done and update residue_stat. */
-               edesc->processed_stat++;
-               edesc->residue_stat -= pset->len;
-       }
-       return edesc->residue_stat;
-}
-
-/* Check request completion status */
-static enum dma_status edma_tx_status(struct dma_chan *chan,
-                                     dma_cookie_t cookie,
-                                     struct dma_tx_state *txstate)
-{
-       struct edma_chan *echan = to_edma_chan(chan);
-       struct virt_dma_desc *vdesc;
-       enum dma_status ret;
-       unsigned long flags;
-
-       ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_COMPLETE || !txstate)
-               return ret;
-
-       spin_lock_irqsave(&echan->vchan.lock, flags);
-       if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
-               txstate->residue = edma_residue(echan->edesc);
-       else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
-               txstate->residue = to_edma_desc(&vdesc->tx)->residue;
-       spin_unlock_irqrestore(&echan->vchan.lock, flags);
-
-       return ret;
-}
-
-static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
-{
-       if (!memcpy_channels)
-               return false;
-       while (*memcpy_channels != -1) {
-               if (*memcpy_channels == ch_num)
-                       return true;
-               memcpy_channels++;
-       }
-       return false;
-}
-
-#define EDMA_DMA_BUSWIDTHS     (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
-                                BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
-                                BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
-                                BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
-
-static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
-{
-       struct dma_device *s_ddev = &ecc->dma_slave;
-       struct dma_device *m_ddev = NULL;
-       s32 *memcpy_channels = ecc->info->memcpy_channels;
-       int i, j;
-
-       dma_cap_zero(s_ddev->cap_mask);
-       dma_cap_set(DMA_SLAVE, s_ddev->cap_mask);
-       dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask);
-       if (ecc->legacy_mode && !memcpy_channels) {
-               dev_warn(ecc->dev,
-                        "Legacy memcpy is enabled, things might not work\n");
-
-               dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
-               s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
-               s_ddev->directions = BIT(DMA_MEM_TO_MEM);
-       }
-
-       s_ddev->device_prep_slave_sg = edma_prep_slave_sg;
-       s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic;
-       s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
-       s_ddev->device_free_chan_resources = edma_free_chan_resources;
-       s_ddev->device_issue_pending = edma_issue_pending;
-       s_ddev->device_tx_status = edma_tx_status;
-       s_ddev->device_config = edma_slave_config;
-       s_ddev->device_pause = edma_dma_pause;
-       s_ddev->device_resume = edma_dma_resume;
-       s_ddev->device_terminate_all = edma_terminate_all;
-       s_ddev->device_synchronize = edma_synchronize;
-
-       s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
-       s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
-       s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV));
-       s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
-       s_ddev->max_burst = SZ_32K - 1; /* CIDX: 16bit signed */
-
-       s_ddev->dev = ecc->dev;
-       INIT_LIST_HEAD(&s_ddev->channels);
-
-       if (memcpy_channels) {
-               m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL);
-               if (!m_ddev) {
-                       dev_warn(ecc->dev, "memcpy is disabled due to OoM\n");
-                       memcpy_channels = NULL;
-                       goto ch_setup;
-               }
-               ecc->dma_memcpy = m_ddev;
-
-               dma_cap_zero(m_ddev->cap_mask);
-               dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
-
-               m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
-               m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
-               m_ddev->device_free_chan_resources = edma_free_chan_resources;
-               m_ddev->device_issue_pending = edma_issue_pending;
-               m_ddev->device_tx_status = edma_tx_status;
-               m_ddev->device_config = edma_slave_config;
-               m_ddev->device_pause = edma_dma_pause;
-               m_ddev->device_resume = edma_dma_resume;
-               m_ddev->device_terminate_all = edma_terminate_all;
-               m_ddev->device_synchronize = edma_synchronize;
-
-               m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
-               m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
-               m_ddev->directions = BIT(DMA_MEM_TO_MEM);
-               m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
-
-               m_ddev->dev = ecc->dev;
-               INIT_LIST_HEAD(&m_ddev->channels);
-       } else if (!ecc->legacy_mode) {
-               dev_info(ecc->dev, "memcpy is disabled\n");
-       }
-
-ch_setup:
-       for (i = 0; i < ecc->num_channels; i++) {
-               struct edma_chan *echan = &ecc->slave_chans[i];
-               echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i);
-               echan->ecc = ecc;
-               echan->vchan.desc_free = edma_desc_free;
-
-               if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels))
-                       vchan_init(&echan->vchan, m_ddev);
-               else
-                       vchan_init(&echan->vchan, s_ddev);
-
-               INIT_LIST_HEAD(&echan->node);
-               for (j = 0; j < EDMA_MAX_SLOTS; j++)
-                       echan->slot[j] = -1;
-       }
-}
-
-static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
-                             struct edma_cc *ecc)
-{
-       int i;
-       u32 value, cccfg;
-       s8 (*queue_priority_map)[2];
-
-       /* Decode the eDMA3 configuration from CCCFG register */
-       cccfg = edma_read(ecc, EDMA_CCCFG);
-
-       value = GET_NUM_REGN(cccfg);
-       ecc->num_region = BIT(value);
-
-       value = GET_NUM_DMACH(cccfg);
-       ecc->num_channels = BIT(value + 1);
-
-       value = GET_NUM_QDMACH(cccfg);
-       ecc->num_qchannels = value * 2;
-
-       value = GET_NUM_PAENTRY(cccfg);
-       ecc->num_slots = BIT(value + 4);
-
-       value = GET_NUM_EVQUE(cccfg);
-       ecc->num_tc = value + 1;
-
-       ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false;
-
-       dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg);
-       dev_dbg(dev, "num_region: %u\n", ecc->num_region);
-       dev_dbg(dev, "num_channels: %u\n", ecc->num_channels);
-       dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
-       dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
-       dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
-       dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
-
-       /* Nothing need to be done if queue priority is provided */
-       if (pdata->queue_priority_mapping)
-               return 0;
-
-       /*
-        * Configure TC/queue priority as follows:
-        * Q0 - priority 0
-        * Q1 - priority 1
-        * Q2 - priority 2
-        * ...
-        * The meaning of priority numbers: 0 highest priority, 7 lowest
-        * priority. So Q0 is the highest priority queue and the last queue has
-        * the lowest priority.
-        */
-       queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
-                                         GFP_KERNEL);
-       if (!queue_priority_map)
-               return -ENOMEM;
-
-       for (i = 0; i < ecc->num_tc; i++) {
-               queue_priority_map[i][0] = i;
-               queue_priority_map[i][1] = i;
-       }
-       queue_priority_map[i][0] = -1;
-       queue_priority_map[i][1] = -1;
-
-       pdata->queue_priority_mapping = queue_priority_map;
-       /* Default queue has the lowest priority */
-       pdata->default_queue = i - 1;
-
-       return 0;
-}
-
-#if IS_ENABLED(CONFIG_OF)
-static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata,
-                              size_t sz)
-{
-       const char pname[] = "ti,edma-xbar-event-map";
-       struct resource res;
-       void __iomem *xbar;
-       s16 (*xbar_chans)[2];
-       size_t nelm = sz / sizeof(s16);
-       u32 shift, offset, mux;
-       int ret, i;
-
-       xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL);
-       if (!xbar_chans)
-               return -ENOMEM;
-
-       ret = of_address_to_resource(dev->of_node, 1, &res);
-       if (ret)
-               return -ENOMEM;
-
-       xbar = devm_ioremap(dev, res.start, resource_size(&res));
-       if (!xbar)
-               return -ENOMEM;
-
-       ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans,
-                                        nelm);
-       if (ret)
-               return -EIO;
-
-       /* Invalidate last entry for the other user of this mess */
-       nelm >>= 1;
-       xbar_chans[nelm][0] = -1;
-       xbar_chans[nelm][1] = -1;
-
-       for (i = 0; i < nelm; i++) {
-               shift = (xbar_chans[i][1] & 0x03) << 3;
-               offset = xbar_chans[i][1] & 0xfffffffc;
-               mux = readl(xbar + offset);
-               mux &= ~(0xff << shift);
-               mux |= xbar_chans[i][0] << shift;
-               writel(mux, (xbar + offset));
-       }
-
-       pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
-       return 0;
-}
-
-static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
-                                                    bool legacy_mode)
-{
-       struct edma_soc_info *info;
-       struct property *prop;
-       int sz, ret;
-
-       info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
-       if (!info)
-               return ERR_PTR(-ENOMEM);
-
-       if (legacy_mode) {
-               prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map",
-                                       &sz);
-               if (prop) {
-                       ret = edma_xbar_event_map(dev, info, sz);
-                       if (ret)
-                               return ERR_PTR(ret);
-               }
-               return info;
-       }
-
-       /* Get the list of channels allocated to be used for memcpy */
-       prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
-       if (prop) {
-               const char pname[] = "ti,edma-memcpy-channels";
-               size_t nelm = sz / sizeof(s32);
-               s32 *memcpy_ch;
-
-               memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
-                                        GFP_KERNEL);
-               if (!memcpy_ch)
-                       return ERR_PTR(-ENOMEM);
-
-               ret = of_property_read_u32_array(dev->of_node, pname,
-                                                (u32 *)memcpy_ch, nelm);
-               if (ret)
-                       return ERR_PTR(ret);
-
-               memcpy_ch[nelm] = -1;
-               info->memcpy_channels = memcpy_ch;
-       }
-
-       prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges",
-                               &sz);
-       if (prop) {
-               const char pname[] = "ti,edma-reserved-slot-ranges";
-               u32 (*tmp)[2];
-               s16 (*rsv_slots)[2];
-               size_t nelm = sz / sizeof(*tmp);
-               struct edma_rsv_info *rsv_info;
-               int i;
-
-               if (!nelm)
-                       return info;
-
-               tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
-               if (!tmp)
-                       return ERR_PTR(-ENOMEM);
-
-               rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
-               if (!rsv_info) {
-                       kfree(tmp);
-                       return ERR_PTR(-ENOMEM);
-               }
-
-               rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
-                                        GFP_KERNEL);
-               if (!rsv_slots) {
-                       kfree(tmp);
-                       return ERR_PTR(-ENOMEM);
-               }
-
-               ret = of_property_read_u32_array(dev->of_node, pname,
-                                                (u32 *)tmp, nelm * 2);
-               if (ret) {
-                       kfree(tmp);
-                       return ERR_PTR(ret);
-               }
-
-               for (i = 0; i < nelm; i++) {
-                       rsv_slots[i][0] = tmp[i][0];
-                       rsv_slots[i][1] = tmp[i][1];
-               }
-               rsv_slots[nelm][0] = -1;
-               rsv_slots[nelm][1] = -1;
-
-               info->rsv = rsv_info;
-               info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
-
-               kfree(tmp);
-       }
-
-       return info;
-}
-
-static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
-                                     struct of_dma *ofdma)
-{
-       struct edma_cc *ecc = ofdma->of_dma_data;
-       struct dma_chan *chan = NULL;
-       struct edma_chan *echan;
-       int i;
-
-       if (!ecc || dma_spec->args_count < 1)
-               return NULL;
-
-       for (i = 0; i < ecc->num_channels; i++) {
-               echan = &ecc->slave_chans[i];
-               if (echan->ch_num == dma_spec->args[0]) {
-                       chan = &echan->vchan.chan;
-                       break;
-               }
-       }
-
-       if (!chan)
-               return NULL;
-
-       if (echan->ecc->legacy_mode && dma_spec->args_count == 1)
-               goto out;
-
-       if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 &&
-           dma_spec->args[1] < echan->ecc->num_tc) {
-               echan->tc = &echan->ecc->tc_list[dma_spec->args[1]];
-               goto out;
-       }
-
-       return NULL;
-out:
-       /* The channel is going to be used as HW synchronized */
-       echan->hw_triggered = true;
-       return dma_get_slave_channel(chan);
-}
-#else
-static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
-                                                    bool legacy_mode)
-{
-       return ERR_PTR(-EINVAL);
-}
-
-static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
-                                     struct of_dma *ofdma)
-{
-       return NULL;
-}
-#endif
-
-static int edma_probe(struct platform_device *pdev)
-{
-       struct edma_soc_info    *info = pdev->dev.platform_data;
-       s8                      (*queue_priority_mapping)[2];
-       int                     i, off, ln;
-       const s16               (*rsv_slots)[2];
-       const s16               (*xbar_chans)[2];
-       int                     irq;
-       char                    *irq_name;
-       struct resource         *mem;
-       struct device_node      *node = pdev->dev.of_node;
-       struct device           *dev = &pdev->dev;
-       struct edma_cc          *ecc;
-       bool                    legacy_mode = true;
-       int ret;
-
-       if (node) {
-               const struct of_device_id *match;
-
-               match = of_match_node(edma_of_ids, node);
-               if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC)
-                       legacy_mode = false;
-
-               info = edma_setup_info_from_dt(dev, legacy_mode);
-               if (IS_ERR(info)) {
-                       dev_err(dev, "failed to get DT data\n");
-                       return PTR_ERR(info);
-               }
-       }
-
-       if (!info)
-               return -ENODEV;
-
-       pm_runtime_enable(dev);
-       ret = pm_runtime_get_sync(dev);
-       if (ret < 0) {
-               dev_err(dev, "pm_runtime_get_sync() failed\n");
-               return ret;
-       }
-
-       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
-       if (ret)
-               return ret;
-
-       ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
-       if (!ecc)
-               return -ENOMEM;
-
-       ecc->dev = dev;
-       ecc->id = pdev->id;
-       ecc->legacy_mode = legacy_mode;
-       /* When booting with DT the pdev->id is -1 */
-       if (ecc->id < 0)
-               ecc->id = 0;
-
-       mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc");
-       if (!mem) {
-               dev_dbg(dev, "mem resource not found, using index 0\n");
-               mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-               if (!mem) {
-                       dev_err(dev, "no mem resource?\n");
-                       return -ENODEV;
-               }
-       }
-       ecc->base = devm_ioremap_resource(dev, mem);
-       if (IS_ERR(ecc->base))
-               return PTR_ERR(ecc->base);
-
-       platform_set_drvdata(pdev, ecc);
-
-       /* Get eDMA3 configuration from IP */
-       ret = edma_setup_from_hw(dev, info, ecc);
-       if (ret)
-               return ret;
-
-       /* Allocate memory based on the information we got from the IP */
-       ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
-                                       sizeof(*ecc->slave_chans), GFP_KERNEL);
-       if (!ecc->slave_chans)
-               return -ENOMEM;
-
-       ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
-                                      sizeof(unsigned long), GFP_KERNEL);
-       if (!ecc->slot_inuse)
-               return -ENOMEM;
-
-       ecc->default_queue = info->default_queue;
-
-       for (i = 0; i < ecc->num_slots; i++)
-               edma_write_slot(ecc, i, &dummy_paramset);
-
-       if (info->rsv) {
-               /* Set the reserved slots in inuse list */
-               rsv_slots = info->rsv->rsv_slots;
-               if (rsv_slots) {
-                       for (i = 0; rsv_slots[i][0] != -1; i++) {
-                               off = rsv_slots[i][0];
-                               ln = rsv_slots[i][1];
-                               edma_set_bits(off, ln, ecc->slot_inuse);
-                       }
-               }
-       }
-
-       /* Clear the xbar mapped channels in unused list */
-       xbar_chans = info->xbar_chans;
-       if (xbar_chans) {
-               for (i = 0; xbar_chans[i][1] != -1; i++) {
-                       off = xbar_chans[i][1];
-               }
-       }
-
-       irq = platform_get_irq_byname(pdev, "edma3_ccint");
-       if (irq < 0 && node)
-               irq = irq_of_parse_and_map(node, 0);
-
-       if (irq >= 0) {
-               irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
-                                         dev_name(dev));
-               ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
-                                      ecc);
-               if (ret) {
-                       dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
-                       return ret;
-               }
-               ecc->ccint = irq;
-       }
-
-       irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
-       if (irq < 0 && node)
-               irq = irq_of_parse_and_map(node, 2);
-
-       if (irq >= 0) {
-               irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
-                                         dev_name(dev));
-               ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
-                                      ecc);
-               if (ret) {
-                       dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
-                       return ret;
-               }
-               ecc->ccerrint = irq;
-       }
-
-       ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
-       if (ecc->dummy_slot < 0) {
-               dev_err(dev, "Can't allocate PaRAM dummy slot\n");
-               return ecc->dummy_slot;
-       }
-
-       queue_priority_mapping = info->queue_priority_mapping;
-
-       if (!ecc->legacy_mode) {
-               int lowest_priority = 0;
-               struct of_phandle_args tc_args;
-
-               ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
-                                           sizeof(*ecc->tc_list), GFP_KERNEL);
-               if (!ecc->tc_list)
-                       return -ENOMEM;
-
-               for (i = 0;; i++) {
-                       ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
-                                                              1, i, &tc_args);
-                       if (ret || i == ecc->num_tc)
-                               break;
-
-                       ecc->tc_list[i].node = tc_args.np;
-                       ecc->tc_list[i].id = i;
-                       queue_priority_mapping[i][1] = tc_args.args[0];
-                       if (queue_priority_mapping[i][1] > lowest_priority) {
-                               lowest_priority = queue_priority_mapping[i][1];
-                               info->default_queue = i;
-                       }
-               }
-       }
-
-       /* Event queue priority mapping */
-       for (i = 0; queue_priority_mapping[i][0] != -1; i++)
-               edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
-                                             queue_priority_mapping[i][1]);
-
-       for (i = 0; i < ecc->num_region; i++) {
-               edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0);
-               edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0);
-               edma_write_array(ecc, EDMA_QRAE, i, 0x0);
-       }
-       ecc->info = info;
-
-       /* Init the dma device and channels */
-       edma_dma_init(ecc, legacy_mode);
-
-       for (i = 0; i < ecc->num_channels; i++) {
-               /* Assign all channels to the default queue */
-               edma_assign_channel_eventq(&ecc->slave_chans[i],
-                                          info->default_queue);
-               /* Set entry slot to the dummy slot */
-               edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
-       }
-
-       ecc->dma_slave.filter.map = info->slave_map;
-       ecc->dma_slave.filter.mapcnt = info->slavecnt;
-       ecc->dma_slave.filter.fn = edma_filter_fn;
-
-       ret = dma_async_device_register(&ecc->dma_slave);
-       if (ret) {
-               dev_err(dev, "slave ddev registration failed (%d)\n", ret);
-               goto err_reg1;
-       }
-
-       if (ecc->dma_memcpy) {
-               ret = dma_async_device_register(ecc->dma_memcpy);
-               if (ret) {
-                       dev_err(dev, "memcpy ddev registration failed (%d)\n",
-                               ret);
-                       dma_async_device_unregister(&ecc->dma_slave);
-                       goto err_reg1;
-               }
-       }
-
-       if (node)
-               of_dma_controller_register(node, of_edma_xlate, ecc);
-
-       dev_info(dev, "TI EDMA DMA engine driver\n");
-
-       return 0;
-
-err_reg1:
-       edma_free_slot(ecc, ecc->dummy_slot);
-       return ret;
-}
-
-static void edma_cleanupp_vchan(struct dma_device *dmadev)
-{
-       struct edma_chan *echan, *_echan;
-
-       list_for_each_entry_safe(echan, _echan,
-                       &dmadev->channels, vchan.chan.device_node) {
-               list_del(&echan->vchan.chan.device_node);
-               tasklet_kill(&echan->vchan.task);
-       }
-}
-
-static int edma_remove(struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-       struct edma_cc *ecc = dev_get_drvdata(dev);
-
-       devm_free_irq(dev, ecc->ccint, ecc);
-       devm_free_irq(dev, ecc->ccerrint, ecc);
-
-       edma_cleanupp_vchan(&ecc->dma_slave);
-
-       if (dev->of_node)
-               of_dma_controller_free(dev->of_node);
-       dma_async_device_unregister(&ecc->dma_slave);
-       if (ecc->dma_memcpy)
-               dma_async_device_unregister(ecc->dma_memcpy);
-       edma_free_slot(ecc, ecc->dummy_slot);
-
-       return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int edma_pm_suspend(struct device *dev)
-{
-       struct edma_cc *ecc = dev_get_drvdata(dev);
-       struct edma_chan *echan = ecc->slave_chans;
-       int i;
-
-       for (i = 0; i < ecc->num_channels; i++) {
-               if (echan[i].alloced)
-                       edma_setup_interrupt(&echan[i], false);
-       }
-
-       return 0;
-}
-
-static int edma_pm_resume(struct device *dev)
-{
-       struct edma_cc *ecc = dev_get_drvdata(dev);
-       struct edma_chan *echan = ecc->slave_chans;
-       int i;
-       s8 (*queue_priority_mapping)[2];
-
-       /* re initialize dummy slot to dummy param set */
-       edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset);
-
-       queue_priority_mapping = ecc->info->queue_priority_mapping;
-
-       /* Event queue priority mapping */
-       for (i = 0; queue_priority_mapping[i][0] != -1; i++)
-               edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
-                                             queue_priority_mapping[i][1]);
-
-       for (i = 0; i < ecc->num_channels; i++) {
-               if (echan[i].alloced) {
-                       /* ensure access through shadow region 0 */
-                       edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
-                                      BIT(i & 0x1f));
-
-                       edma_setup_interrupt(&echan[i], true);
-
-                       /* Set up channel -> slot mapping for the entry slot */
-                       edma_set_chmap(&echan[i], echan[i].slot[0]);
-               }
-       }
-
-       return 0;
-}
-#endif
-
-static const struct dev_pm_ops edma_pm_ops = {
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume)
-};
-
-static struct platform_driver edma_driver = {
-       .probe          = edma_probe,
-       .remove         = edma_remove,
-       .driver = {
-               .name   = "edma",
-               .pm     = &edma_pm_ops,
-               .of_match_table = edma_of_ids,
-       },
-};
-
-static int edma_tptc_probe(struct platform_device *pdev)
-{
-       pm_runtime_enable(&pdev->dev);
-       return pm_runtime_get_sync(&pdev->dev);
-}
-
-static struct platform_driver edma_tptc_driver = {
-       .probe          = edma_tptc_probe,
-       .driver = {
-               .name   = "edma3-tptc",
-               .of_match_table = edma_tptc_of_ids,
-       },
-};
-
-bool edma_filter_fn(struct dma_chan *chan, void *param)
-{
-       bool match = false;
-
-       if (chan->device->dev->driver == &edma_driver.driver) {
-               struct edma_chan *echan = to_edma_chan(chan);
-               unsigned ch_req = *(unsigned *)param;
-               if (ch_req == echan->ch_num) {
-                       /* The channel is going to be used as HW synchronized */
-                       echan->hw_triggered = true;
-                       match = true;
-               }
-       }
-       return match;
-}
-EXPORT_SYMBOL(edma_filter_fn);
-
-static int edma_init(void)
-{
-       int ret;
-
-       ret = platform_driver_register(&edma_tptc_driver);
-       if (ret)
-               return ret;
-
-       return platform_driver_register(&edma_driver);
-}
-subsys_initcall(edma_init);
-
-static void __exit edma_exit(void)
-{
-       platform_driver_unregister(&edma_driver);
-       platform_driver_unregister(&edma_tptc_driver);
-}
-module_exit(edma_exit);
-
-MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
-MODULE_DESCRIPTION("TI EDMA DMA engine driver");
-MODULE_LICENSE("GPL v2");
index 3eaece888e751a9d7723211613c068e133c2530c..1117b5123a6fc786f28d68868deef2fa10951d42 100644 (file)
@@ -1328,8 +1328,7 @@ static int fsldma_of_remove(struct platform_device *op)
 #ifdef CONFIG_PM
 static int fsldma_suspend_late(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct fsldma_device *fdev = platform_get_drvdata(pdev);
+       struct fsldma_device *fdev = dev_get_drvdata(dev);
        struct fsldma_chan *chan;
        int i;
 
@@ -1360,8 +1359,7 @@ out:
 
 static int fsldma_resume_early(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct fsldma_device *fdev = platform_get_drvdata(pdev);
+       struct fsldma_device *fdev = dev_get_drvdata(dev);
        struct fsldma_chan *chan;
        u32 mode;
        int i;
index 1953e57505f4b4493d3d55bbfec102fdb4ede615..e5c911200bdb4a15920181c85e13fe47705214e6 100644 (file)
@@ -670,8 +670,7 @@ static int idma64_platform_remove(struct platform_device *pdev)
 
 static int idma64_pm_suspend(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct idma64_chip *chip = platform_get_drvdata(pdev);
+       struct idma64_chip *chip = dev_get_drvdata(dev);
 
        idma64_off(chip->idma64);
        return 0;
@@ -679,8 +678,7 @@ static int idma64_pm_suspend(struct device *dev)
 
 static int idma64_pm_resume(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct idma64_chip *chip = platform_get_drvdata(pdev);
+       struct idma64_chip *chip = dev_get_drvdata(dev);
 
        idma64_on(chip->idma64);
        return 0;
index 715b39ae5a46b70ef9ca700eb7932064bd34409c..75b6ff0415ee0206c8d2ef70399153eae169a6c5 100644 (file)
@@ -1,19 +1,13 @@
-/*
- * drivers/dma/imx-dma.c
- *
- * This file contains a driver for the Freescale i.MX DMA engine
- * found on i.MX1/21/27
- *
- * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
- * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// drivers/dma/imx-dma.c
+//
+// This file contains a driver for the Freescale i.MX DMA engine
+// found on i.MX1/21/27
+//
+// Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+// Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
+
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/types.h>
index ccd03c3cedfeda7c3508677e85017178d8ab2f63..f077992635c2ff8e3b03b16803495a9909b0b8db 100644 (file)
@@ -1,21 +1,14 @@
-/*
- * drivers/dma/imx-sdma.c
- *
- * This file contains a driver for the Freescale Smart DMA engine
- *
- * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
- *
- * Based on code from Freescale:
- *
- * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// drivers/dma/imx-sdma.c
+//
+// This file contains a driver for the Freescale Smart DMA engine
+//
+// Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+//
+// Based on code from Freescale:
+//
+// Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
 
 #include <linux/init.h>
 #include <linux/iopoll.h>
index 41d167921fab176246459eda0fc5c104effe5d45..ae5182ff01282a1adf2df4c8abd343883770f6c2 100644 (file)
@@ -1,12 +1,8 @@
-/*
- * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * Refer to drivers/dma/imx-sdma.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+//
+// Refer to drivers/dma/imx-sdma.c
 
 #include <linux/init.h>
 #include <linux/types.h>
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
deleted file mode 100644 (file)
index 9483000..0000000
+++ /dev/null
@@ -1,1668 +0,0 @@
-/*
- * OMAP DMAengine support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/delay.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/omap-dma.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/of_dma.h>
-#include <linux/of_device.h>
-
-#include "virt-dma.h"
-
-#define OMAP_SDMA_REQUESTS     127
-#define OMAP_SDMA_CHANNELS     32
-
-struct omap_dmadev {
-       struct dma_device ddev;
-       spinlock_t lock;
-       void __iomem *base;
-       const struct omap_dma_reg *reg_map;
-       struct omap_system_dma_plat_info *plat;
-       bool legacy;
-       bool ll123_supported;
-       struct dma_pool *desc_pool;
-       unsigned dma_requests;
-       spinlock_t irq_lock;
-       uint32_t irq_enable_mask;
-       struct omap_chan **lch_map;
-};
-
-struct omap_chan {
-       struct virt_dma_chan vc;
-       void __iomem *channel_base;
-       const struct omap_dma_reg *reg_map;
-       uint32_t ccr;
-
-       struct dma_slave_config cfg;
-       unsigned dma_sig;
-       bool cyclic;
-       bool paused;
-       bool running;
-
-       int dma_ch;
-       struct omap_desc *desc;
-       unsigned sgidx;
-};
-
-#define DESC_NXT_SV_REFRESH    (0x1 << 24)
-#define DESC_NXT_SV_REUSE      (0x2 << 24)
-#define DESC_NXT_DV_REFRESH    (0x1 << 26)
-#define DESC_NXT_DV_REUSE      (0x2 << 26)
-#define DESC_NTYPE_TYPE2       (0x2 << 29)
-
-/* Type 2 descriptor with Source or Destination address update */
-struct omap_type2_desc {
-       uint32_t next_desc;
-       uint32_t en;
-       uint32_t addr; /* src or dst */
-       uint16_t fn;
-       uint16_t cicr;
-       int16_t cdei;
-       int16_t csei;
-       int32_t cdfi;
-       int32_t csfi;
-} __packed;
-
-struct omap_sg {
-       dma_addr_t addr;
-       uint32_t en;            /* number of elements (24-bit) */
-       uint32_t fn;            /* number of frames (16-bit) */
-       int32_t fi;             /* for double indexing */
-       int16_t ei;             /* for double indexing */
-
-       /* Linked list */
-       struct omap_type2_desc *t2_desc;
-       dma_addr_t t2_desc_paddr;
-};
-
-struct omap_desc {
-       struct virt_dma_desc vd;
-       bool using_ll;
-       enum dma_transfer_direction dir;
-       dma_addr_t dev_addr;
-
-       int32_t fi;             /* for OMAP_DMA_SYNC_PACKET / double indexing */
-       int16_t ei;             /* for double indexing */
-       uint8_t es;             /* CSDP_DATA_TYPE_xxx */
-       uint32_t ccr;           /* CCR value */
-       uint16_t clnk_ctrl;     /* CLNK_CTRL value */
-       uint16_t cicr;          /* CICR value */
-       uint32_t csdp;          /* CSDP value */
-
-       unsigned sglen;
-       struct omap_sg sg[0];
-};
-
-enum {
-       CAPS_0_SUPPORT_LL123    = BIT(20),      /* Linked List type1/2/3 */
-       CAPS_0_SUPPORT_LL4      = BIT(21),      /* Linked List type4 */
-
-       CCR_FS                  = BIT(5),
-       CCR_READ_PRIORITY       = BIT(6),
-       CCR_ENABLE              = BIT(7),
-       CCR_AUTO_INIT           = BIT(8),       /* OMAP1 only */
-       CCR_REPEAT              = BIT(9),       /* OMAP1 only */
-       CCR_OMAP31_DISABLE      = BIT(10),      /* OMAP1 only */
-       CCR_SUSPEND_SENSITIVE   = BIT(8),       /* OMAP2+ only */
-       CCR_RD_ACTIVE           = BIT(9),       /* OMAP2+ only */
-       CCR_WR_ACTIVE           = BIT(10),      /* OMAP2+ only */
-       CCR_SRC_AMODE_CONSTANT  = 0 << 12,
-       CCR_SRC_AMODE_POSTINC   = 1 << 12,
-       CCR_SRC_AMODE_SGLIDX    = 2 << 12,
-       CCR_SRC_AMODE_DBLIDX    = 3 << 12,
-       CCR_DST_AMODE_CONSTANT  = 0 << 14,
-       CCR_DST_AMODE_POSTINC   = 1 << 14,
-       CCR_DST_AMODE_SGLIDX    = 2 << 14,
-       CCR_DST_AMODE_DBLIDX    = 3 << 14,
-       CCR_CONSTANT_FILL       = BIT(16),
-       CCR_TRANSPARENT_COPY    = BIT(17),
-       CCR_BS                  = BIT(18),
-       CCR_SUPERVISOR          = BIT(22),
-       CCR_PREFETCH            = BIT(23),
-       CCR_TRIGGER_SRC         = BIT(24),
-       CCR_BUFFERING_DISABLE   = BIT(25),
-       CCR_WRITE_PRIORITY      = BIT(26),
-       CCR_SYNC_ELEMENT        = 0,
-       CCR_SYNC_FRAME          = CCR_FS,
-       CCR_SYNC_BLOCK          = CCR_BS,
-       CCR_SYNC_PACKET         = CCR_BS | CCR_FS,
-
-       CSDP_DATA_TYPE_8        = 0,
-       CSDP_DATA_TYPE_16       = 1,
-       CSDP_DATA_TYPE_32       = 2,
-       CSDP_SRC_PORT_EMIFF     = 0 << 2, /* OMAP1 only */
-       CSDP_SRC_PORT_EMIFS     = 1 << 2, /* OMAP1 only */
-       CSDP_SRC_PORT_OCP_T1    = 2 << 2, /* OMAP1 only */
-       CSDP_SRC_PORT_TIPB      = 3 << 2, /* OMAP1 only */
-       CSDP_SRC_PORT_OCP_T2    = 4 << 2, /* OMAP1 only */
-       CSDP_SRC_PORT_MPUI      = 5 << 2, /* OMAP1 only */
-       CSDP_SRC_PACKED         = BIT(6),
-       CSDP_SRC_BURST_1        = 0 << 7,
-       CSDP_SRC_BURST_16       = 1 << 7,
-       CSDP_SRC_BURST_32       = 2 << 7,
-       CSDP_SRC_BURST_64       = 3 << 7,
-       CSDP_DST_PORT_EMIFF     = 0 << 9, /* OMAP1 only */
-       CSDP_DST_PORT_EMIFS     = 1 << 9, /* OMAP1 only */
-       CSDP_DST_PORT_OCP_T1    = 2 << 9, /* OMAP1 only */
-       CSDP_DST_PORT_TIPB      = 3 << 9, /* OMAP1 only */
-       CSDP_DST_PORT_OCP_T2    = 4 << 9, /* OMAP1 only */
-       CSDP_DST_PORT_MPUI      = 5 << 9, /* OMAP1 only */
-       CSDP_DST_PACKED         = BIT(13),
-       CSDP_DST_BURST_1        = 0 << 14,
-       CSDP_DST_BURST_16       = 1 << 14,
-       CSDP_DST_BURST_32       = 2 << 14,
-       CSDP_DST_BURST_64       = 3 << 14,
-       CSDP_WRITE_NON_POSTED   = 0 << 16,
-       CSDP_WRITE_POSTED       = 1 << 16,
-       CSDP_WRITE_LAST_NON_POSTED = 2 << 16,
-
-       CICR_TOUT_IE            = BIT(0),       /* OMAP1 only */
-       CICR_DROP_IE            = BIT(1),
-       CICR_HALF_IE            = BIT(2),
-       CICR_FRAME_IE           = BIT(3),
-       CICR_LAST_IE            = BIT(4),
-       CICR_BLOCK_IE           = BIT(5),
-       CICR_PKT_IE             = BIT(7),       /* OMAP2+ only */
-       CICR_TRANS_ERR_IE       = BIT(8),       /* OMAP2+ only */
-       CICR_SUPERVISOR_ERR_IE  = BIT(10),      /* OMAP2+ only */
-       CICR_MISALIGNED_ERR_IE  = BIT(11),      /* OMAP2+ only */
-       CICR_DRAIN_IE           = BIT(12),      /* OMAP2+ only */
-       CICR_SUPER_BLOCK_IE     = BIT(14),      /* OMAP2+ only */
-
-       CLNK_CTRL_ENABLE_LNK    = BIT(15),
-
-       CDP_DST_VALID_INC       = 0 << 0,
-       CDP_DST_VALID_RELOAD    = 1 << 0,
-       CDP_DST_VALID_REUSE     = 2 << 0,
-       CDP_SRC_VALID_INC       = 0 << 2,
-       CDP_SRC_VALID_RELOAD    = 1 << 2,
-       CDP_SRC_VALID_REUSE     = 2 << 2,
-       CDP_NTYPE_TYPE1         = 1 << 4,
-       CDP_NTYPE_TYPE2         = 2 << 4,
-       CDP_NTYPE_TYPE3         = 3 << 4,
-       CDP_TMODE_NORMAL        = 0 << 8,
-       CDP_TMODE_LLIST         = 1 << 8,
-       CDP_FAST                = BIT(10),
-};
-
-static const unsigned es_bytes[] = {
-       [CSDP_DATA_TYPE_8] = 1,
-       [CSDP_DATA_TYPE_16] = 2,
-       [CSDP_DATA_TYPE_32] = 4,
-};
-
-static struct of_dma_filter_info omap_dma_info = {
-       .filter_fn = omap_dma_filter_fn,
-};
-
-static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
-{
-       return container_of(d, struct omap_dmadev, ddev);
-}
-
-static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
-{
-       return container_of(c, struct omap_chan, vc.chan);
-}
-
-static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
-{
-       return container_of(t, struct omap_desc, vd.tx);
-}
-
-static void omap_dma_desc_free(struct virt_dma_desc *vd)
-{
-       struct omap_desc *d = to_omap_dma_desc(&vd->tx);
-
-       if (d->using_ll) {
-               struct omap_dmadev *od = to_omap_dma_dev(vd->tx.chan->device);
-               int i;
-
-               for (i = 0; i < d->sglen; i++) {
-                       if (d->sg[i].t2_desc)
-                               dma_pool_free(od->desc_pool, d->sg[i].t2_desc,
-                                             d->sg[i].t2_desc_paddr);
-               }
-       }
-
-       kfree(d);
-}
-
-static void omap_dma_fill_type2_desc(struct omap_desc *d, int idx,
-                                    enum dma_transfer_direction dir, bool last)
-{
-       struct omap_sg *sg = &d->sg[idx];
-       struct omap_type2_desc *t2_desc = sg->t2_desc;
-
-       if (idx)
-               d->sg[idx - 1].t2_desc->next_desc = sg->t2_desc_paddr;
-       if (last)
-               t2_desc->next_desc = 0xfffffffc;
-
-       t2_desc->en = sg->en;
-       t2_desc->addr = sg->addr;
-       t2_desc->fn = sg->fn & 0xffff;
-       t2_desc->cicr = d->cicr;
-       if (!last)
-               t2_desc->cicr &= ~CICR_BLOCK_IE;
-
-       switch (dir) {
-       case DMA_DEV_TO_MEM:
-               t2_desc->cdei = sg->ei;
-               t2_desc->csei = d->ei;
-               t2_desc->cdfi = sg->fi;
-               t2_desc->csfi = d->fi;
-
-               t2_desc->en |= DESC_NXT_DV_REFRESH;
-               t2_desc->en |= DESC_NXT_SV_REUSE;
-               break;
-       case DMA_MEM_TO_DEV:
-               t2_desc->cdei = d->ei;
-               t2_desc->csei = sg->ei;
-               t2_desc->cdfi = d->fi;
-               t2_desc->csfi = sg->fi;
-
-               t2_desc->en |= DESC_NXT_SV_REFRESH;
-               t2_desc->en |= DESC_NXT_DV_REUSE;
-               break;
-       default:
-               return;
-       }
-
-       t2_desc->en |= DESC_NTYPE_TYPE2;
-}
-
-static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr)
-{
-       switch (type) {
-       case OMAP_DMA_REG_16BIT:
-               writew_relaxed(val, addr);
-               break;
-       case OMAP_DMA_REG_2X16BIT:
-               writew_relaxed(val, addr);
-               writew_relaxed(val >> 16, addr + 2);
-               break;
-       case OMAP_DMA_REG_32BIT:
-               writel_relaxed(val, addr);
-               break;
-       default:
-               WARN_ON(1);
-       }
-}
-
-static unsigned omap_dma_read(unsigned type, void __iomem *addr)
-{
-       unsigned val;
-
-       switch (type) {
-       case OMAP_DMA_REG_16BIT:
-               val = readw_relaxed(addr);
-               break;
-       case OMAP_DMA_REG_2X16BIT:
-               val = readw_relaxed(addr);
-               val |= readw_relaxed(addr + 2) << 16;
-               break;
-       case OMAP_DMA_REG_32BIT:
-               val = readl_relaxed(addr);
-               break;
-       default:
-               WARN_ON(1);
-               val = 0;
-       }
-
-       return val;
-}
-
-static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val)
-{
-       const struct omap_dma_reg *r = od->reg_map + reg;
-
-       WARN_ON(r->stride);
-
-       omap_dma_write(val, r->type, od->base + r->offset);
-}
-
-static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg)
-{
-       const struct omap_dma_reg *r = od->reg_map + reg;
-
-       WARN_ON(r->stride);
-
-       return omap_dma_read(r->type, od->base + r->offset);
-}
-
-static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val)
-{
-       const struct omap_dma_reg *r = c->reg_map + reg;
-
-       omap_dma_write(val, r->type, c->channel_base + r->offset);
-}
-
-static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg)
-{
-       const struct omap_dma_reg *r = c->reg_map + reg;
-
-       return omap_dma_read(r->type, c->channel_base + r->offset);
-}
-
-static void omap_dma_clear_csr(struct omap_chan *c)
-{
-       if (dma_omap1())
-               omap_dma_chan_read(c, CSR);
-       else
-               omap_dma_chan_write(c, CSR, ~0);
-}
-
-static unsigned omap_dma_get_csr(struct omap_chan *c)
-{
-       unsigned val = omap_dma_chan_read(c, CSR);
-
-       if (!dma_omap1())
-               omap_dma_chan_write(c, CSR, val);
-
-       return val;
-}
-
-static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
-       unsigned lch)
-{
-       c->channel_base = od->base + od->plat->channel_stride * lch;
-
-       od->lch_map[lch] = c;
-}
-
-static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
-{
-       struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
-       uint16_t cicr = d->cicr;
-
-       if (__dma_omap15xx(od->plat->dma_attr))
-               omap_dma_chan_write(c, CPC, 0);
-       else
-               omap_dma_chan_write(c, CDAC, 0);
-
-       omap_dma_clear_csr(c);
-
-       if (d->using_ll) {
-               uint32_t cdp = CDP_TMODE_LLIST | CDP_NTYPE_TYPE2 | CDP_FAST;
-
-               if (d->dir == DMA_DEV_TO_MEM)
-                       cdp |= (CDP_DST_VALID_RELOAD | CDP_SRC_VALID_REUSE);
-               else
-                       cdp |= (CDP_DST_VALID_REUSE | CDP_SRC_VALID_RELOAD);
-               omap_dma_chan_write(c, CDP, cdp);
-
-               omap_dma_chan_write(c, CNDP, d->sg[0].t2_desc_paddr);
-               omap_dma_chan_write(c, CCDN, 0);
-               omap_dma_chan_write(c, CCFN, 0xffff);
-               omap_dma_chan_write(c, CCEN, 0xffffff);
-
-               cicr &= ~CICR_BLOCK_IE;
-       } else if (od->ll123_supported) {
-               omap_dma_chan_write(c, CDP, 0);
-       }
-
-       /* Enable interrupts */
-       omap_dma_chan_write(c, CICR, cicr);
-
-       /* Enable channel */
-       omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
-
-       c->running = true;
-}
-
-static void omap_dma_drain_chan(struct omap_chan *c)
-{
-       int i;
-       u32 val;
-
-       /* Wait for sDMA FIFO to drain */
-       for (i = 0; ; i++) {
-               val = omap_dma_chan_read(c, CCR);
-               if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
-                       break;
-
-               if (i > 100)
-                       break;
-
-               udelay(5);
-       }
-
-       if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
-               dev_err(c->vc.chan.device->dev,
-                       "DMA drain did not complete on lch %d\n",
-                       c->dma_ch);
-}
-
-static int omap_dma_stop(struct omap_chan *c)
-{
-       struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
-       uint32_t val;
-
-       /* disable irq */
-       omap_dma_chan_write(c, CICR, 0);
-
-       omap_dma_clear_csr(c);
-
-       val = omap_dma_chan_read(c, CCR);
-       if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
-               uint32_t sysconfig;
-
-               sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
-               val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
-               val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
-               omap_dma_glbl_write(od, OCP_SYSCONFIG, val);
-
-               val = omap_dma_chan_read(c, CCR);
-               val &= ~CCR_ENABLE;
-               omap_dma_chan_write(c, CCR, val);
-
-               if (!(c->ccr & CCR_BUFFERING_DISABLE))
-                       omap_dma_drain_chan(c);
-
-               omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
-       } else {
-               if (!(val & CCR_ENABLE))
-                       return -EINVAL;
-
-               val &= ~CCR_ENABLE;
-               omap_dma_chan_write(c, CCR, val);
-
-               if (!(c->ccr & CCR_BUFFERING_DISABLE))
-                       omap_dma_drain_chan(c);
-       }
-
-       mb();
-
-       if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
-               val = omap_dma_chan_read(c, CLNK_CTRL);
-
-               if (dma_omap1())
-                       val |= 1 << 14; /* set the STOP_LNK bit */
-               else
-                       val &= ~CLNK_CTRL_ENABLE_LNK;
-
-               omap_dma_chan_write(c, CLNK_CTRL, val);
-       }
-       c->running = false;
-       return 0;
-}
-
-static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d)
-{
-       struct omap_sg *sg = d->sg + c->sgidx;
-       unsigned cxsa, cxei, cxfi;
-
-       if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
-               cxsa = CDSA;
-               cxei = CDEI;
-               cxfi = CDFI;
-       } else {
-               cxsa = CSSA;
-               cxei = CSEI;
-               cxfi = CSFI;
-       }
-
-       omap_dma_chan_write(c, cxsa, sg->addr);
-       omap_dma_chan_write(c, cxei, sg->ei);
-       omap_dma_chan_write(c, cxfi, sg->fi);
-       omap_dma_chan_write(c, CEN, sg->en);
-       omap_dma_chan_write(c, CFN, sg->fn);
-
-       omap_dma_start(c, d);
-       c->sgidx++;
-}
-
-static void omap_dma_start_desc(struct omap_chan *c)
-{
-       struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
-       struct omap_desc *d;
-       unsigned cxsa, cxei, cxfi;
-
-       if (!vd) {
-               c->desc = NULL;
-               return;
-       }
-
-       list_del(&vd->node);
-
-       c->desc = d = to_omap_dma_desc(&vd->tx);
-       c->sgidx = 0;
-
-       /*
-        * This provides the necessary barrier to ensure data held in
-        * DMA coherent memory is visible to the DMA engine prior to
-        * the transfer starting.
-        */
-       mb();
-
-       omap_dma_chan_write(c, CCR, d->ccr);
-       if (dma_omap1())
-               omap_dma_chan_write(c, CCR2, d->ccr >> 16);
-
-       if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
-               cxsa = CSSA;
-               cxei = CSEI;
-               cxfi = CSFI;
-       } else {
-               cxsa = CDSA;
-               cxei = CDEI;
-               cxfi = CDFI;
-       }
-
-       omap_dma_chan_write(c, cxsa, d->dev_addr);
-       omap_dma_chan_write(c, cxei, d->ei);
-       omap_dma_chan_write(c, cxfi, d->fi);
-       omap_dma_chan_write(c, CSDP, d->csdp);
-       omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
-
-       omap_dma_start_sg(c, d);
-}
-
-static void omap_dma_callback(int ch, u16 status, void *data)
-{
-       struct omap_chan *c = data;
-       struct omap_desc *d;
-       unsigned long flags;
-
-       spin_lock_irqsave(&c->vc.lock, flags);
-       d = c->desc;
-       if (d) {
-               if (c->cyclic) {
-                       vchan_cyclic_callback(&d->vd);
-               } else if (d->using_ll || c->sgidx == d->sglen) {
-                       omap_dma_start_desc(c);
-                       vchan_cookie_complete(&d->vd);
-               } else {
-                       omap_dma_start_sg(c, d);
-               }
-       }
-       spin_unlock_irqrestore(&c->vc.lock, flags);
-}
-
-static irqreturn_t omap_dma_irq(int irq, void *devid)
-{
-       struct omap_dmadev *od = devid;
-       unsigned status, channel;
-
-       spin_lock(&od->irq_lock);
-
-       status = omap_dma_glbl_read(od, IRQSTATUS_L1);
-       status &= od->irq_enable_mask;
-       if (status == 0) {
-               spin_unlock(&od->irq_lock);
-               return IRQ_NONE;
-       }
-
-       while ((channel = ffs(status)) != 0) {
-               unsigned mask, csr;
-               struct omap_chan *c;
-
-               channel -= 1;
-               mask = BIT(channel);
-               status &= ~mask;
-
-               c = od->lch_map[channel];
-               if (c == NULL) {
-                       /* This should never happen */
-                       dev_err(od->ddev.dev, "invalid channel %u\n", channel);
-                       continue;
-               }
-
-               csr = omap_dma_get_csr(c);
-               omap_dma_glbl_write(od, IRQSTATUS_L1, mask);
-
-               omap_dma_callback(channel, csr, c);
-       }
-
-       spin_unlock(&od->irq_lock);
-
-       return IRQ_HANDLED;
-}
-
-static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
-{
-       struct omap_dmadev *od = to_omap_dma_dev(chan->device);
-       struct omap_chan *c = to_omap_dma_chan(chan);
-       struct device *dev = od->ddev.dev;
-       int ret;
-
-       if (od->legacy) {
-               ret = omap_request_dma(c->dma_sig, "DMA engine",
-                                      omap_dma_callback, c, &c->dma_ch);
-       } else {
-               ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL,
-                                      &c->dma_ch);
-       }
-
-       dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig);
-
-       if (ret >= 0) {
-               omap_dma_assign(od, c, c->dma_ch);
-
-               if (!od->legacy) {
-                       unsigned val;
-
-                       spin_lock_irq(&od->irq_lock);
-                       val = BIT(c->dma_ch);
-                       omap_dma_glbl_write(od, IRQSTATUS_L1, val);
-                       od->irq_enable_mask |= val;
-                       omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
-
-                       val = omap_dma_glbl_read(od, IRQENABLE_L0);
-                       val &= ~BIT(c->dma_ch);
-                       omap_dma_glbl_write(od, IRQENABLE_L0, val);
-                       spin_unlock_irq(&od->irq_lock);
-               }
-       }
-
-       if (dma_omap1()) {
-               if (__dma_omap16xx(od->plat->dma_attr)) {
-                       c->ccr = CCR_OMAP31_DISABLE;
-                       /* Duplicate what plat-omap/dma.c does */
-                       c->ccr |= c->dma_ch + 1;
-               } else {
-                       c->ccr = c->dma_sig & 0x1f;
-               }
-       } else {
-               c->ccr = c->dma_sig & 0x1f;
-               c->ccr |= (c->dma_sig & ~0x1f) << 14;
-       }
-       if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
-               c->ccr |= CCR_BUFFERING_DISABLE;
-
-       return ret;
-}
-
-static void omap_dma_free_chan_resources(struct dma_chan *chan)
-{
-       struct omap_dmadev *od = to_omap_dma_dev(chan->device);
-       struct omap_chan *c = to_omap_dma_chan(chan);
-
-       if (!od->legacy) {
-               spin_lock_irq(&od->irq_lock);
-               od->irq_enable_mask &= ~BIT(c->dma_ch);
-               omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
-               spin_unlock_irq(&od->irq_lock);
-       }
-
-       c->channel_base = NULL;
-       od->lch_map[c->dma_ch] = NULL;
-       vchan_free_chan_resources(&c->vc);
-       omap_free_dma(c->dma_ch);
-
-       dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch,
-               c->dma_sig);
-       c->dma_sig = 0;
-}
-
-static size_t omap_dma_sg_size(struct omap_sg *sg)
-{
-       return sg->en * sg->fn;
-}
-
-static size_t omap_dma_desc_size(struct omap_desc *d)
-{
-       unsigned i;
-       size_t size;
-
-       for (size = i = 0; i < d->sglen; i++)
-               size += omap_dma_sg_size(&d->sg[i]);
-
-       return size * es_bytes[d->es];
-}
-
-static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
-{
-       unsigned i;
-       size_t size, es_size = es_bytes[d->es];
-
-       for (size = i = 0; i < d->sglen; i++) {
-               size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
-
-               if (size)
-                       size += this_size;
-               else if (addr >= d->sg[i].addr &&
-                        addr < d->sg[i].addr + this_size)
-                       size += d->sg[i].addr + this_size - addr;
-       }
-       return size;
-}
-
-/*
- * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
- * read before the DMA controller finished disabling the channel.
- */
-static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg)
-{
-       struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
-       uint32_t val;
-
-       val = omap_dma_chan_read(c, reg);
-       if (val == 0 && od->plat->errata & DMA_ERRATA_3_3)
-               val = omap_dma_chan_read(c, reg);
-
-       return val;
-}
-
-static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
-{
-       struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
-       dma_addr_t addr, cdac;
-
-       if (__dma_omap15xx(od->plat->dma_attr)) {
-               addr = omap_dma_chan_read(c, CPC);
-       } else {
-               addr = omap_dma_chan_read_3_3(c, CSAC);
-               cdac = omap_dma_chan_read_3_3(c, CDAC);
-
-               /*
-                * CDAC == 0 indicates that the DMA transfer on the channel has
-                * not been started (no data has been transferred so far).
-                * Return the programmed source start address in this case.
-                */
-               if (cdac == 0)
-                       addr = omap_dma_chan_read(c, CSSA);
-       }
-
-       if (dma_omap1())
-               addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000;
-
-       return addr;
-}
-
-static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
-{
-       struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
-       dma_addr_t addr;
-
-       if (__dma_omap15xx(od->plat->dma_attr)) {
-               addr = omap_dma_chan_read(c, CPC);
-       } else {
-               addr = omap_dma_chan_read_3_3(c, CDAC);
-
-               /*
-                * CDAC == 0 indicates that the DMA transfer on the channel
-                * has not been started (no data has been transferred so
-                * far).  Return the programmed destination start address in
-                * this case.
-                */
-               if (addr == 0)
-                       addr = omap_dma_chan_read(c, CDSA);
-       }
-
-       if (dma_omap1())
-               addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000;
-
-       return addr;
-}
-
-static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
-       dma_cookie_t cookie, struct dma_tx_state *txstate)
-{
-       struct omap_chan *c = to_omap_dma_chan(chan);
-       struct virt_dma_desc *vd;
-       enum dma_status ret;
-       unsigned long flags;
-
-       ret = dma_cookie_status(chan, cookie, txstate);
-
-       if (!c->paused && c->running) {
-               uint32_t ccr = omap_dma_chan_read(c, CCR);
-               /*
-                * The channel is no longer active, set the return value
-                * accordingly
-                */
-               if (!(ccr & CCR_ENABLE))
-                       ret = DMA_COMPLETE;
-       }
-
-       if (ret == DMA_COMPLETE || !txstate)
-               return ret;
-
-       spin_lock_irqsave(&c->vc.lock, flags);
-       vd = vchan_find_desc(&c->vc, cookie);
-       if (vd) {
-               txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
-       } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
-               struct omap_desc *d = c->desc;
-               dma_addr_t pos;
-
-               if (d->dir == DMA_MEM_TO_DEV)
-                       pos = omap_dma_get_src_pos(c);
-               else if (d->dir == DMA_DEV_TO_MEM  || d->dir == DMA_MEM_TO_MEM)
-                       pos = omap_dma_get_dst_pos(c);
-               else
-                       pos = 0;
-
-               txstate->residue = omap_dma_desc_size_pos(d, pos);
-       } else {
-               txstate->residue = 0;
-       }
-       if (ret == DMA_IN_PROGRESS && c->paused)
-               ret = DMA_PAUSED;
-       spin_unlock_irqrestore(&c->vc.lock, flags);
-
-       return ret;
-}
-
-static void omap_dma_issue_pending(struct dma_chan *chan)
-{
-       struct omap_chan *c = to_omap_dma_chan(chan);
-       unsigned long flags;
-
-       spin_lock_irqsave(&c->vc.lock, flags);
-       if (vchan_issue_pending(&c->vc) && !c->desc)
-               omap_dma_start_desc(c);
-       spin_unlock_irqrestore(&c->vc.lock, flags);
-}
-
-static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
-       struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
-       enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
-{
-       struct omap_dmadev *od = to_omap_dma_dev(chan->device);
-       struct omap_chan *c = to_omap_dma_chan(chan);
-       enum dma_slave_buswidth dev_width;
-       struct scatterlist *sgent;
-       struct omap_desc *d;
-       dma_addr_t dev_addr;
-       unsigned i, es, en, frame_bytes;
-       bool ll_failed = false;
-       u32 burst;
-       u32 port_window, port_window_bytes;
-
-       if (dir == DMA_DEV_TO_MEM) {
-               dev_addr = c->cfg.src_addr;
-               dev_width = c->cfg.src_addr_width;
-               burst = c->cfg.src_maxburst;
-               port_window = c->cfg.src_port_window_size;
-       } else if (dir == DMA_MEM_TO_DEV) {
-               dev_addr = c->cfg.dst_addr;
-               dev_width = c->cfg.dst_addr_width;
-               burst = c->cfg.dst_maxburst;
-               port_window = c->cfg.dst_port_window_size;
-       } else {
-               dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
-               return NULL;
-       }
-
-       /* Bus width translates to the element size (ES) */
-       switch (dev_width) {
-       case DMA_SLAVE_BUSWIDTH_1_BYTE:
-               es = CSDP_DATA_TYPE_8;
-               break;
-       case DMA_SLAVE_BUSWIDTH_2_BYTES:
-               es = CSDP_DATA_TYPE_16;
-               break;
-       case DMA_SLAVE_BUSWIDTH_4_BYTES:
-               es = CSDP_DATA_TYPE_32;
-               break;
-       default: /* not reached */
-               return NULL;
-       }
-
-       /* Now allocate and setup the descriptor. */
-       d = kzalloc(struct_size(d, sg, sglen), GFP_ATOMIC);
-       if (!d)
-               return NULL;
-
-       d->dir = dir;
-       d->dev_addr = dev_addr;
-       d->es = es;
-
-       /* When the port_window is used, one frame must cover the window */
-       if (port_window) {
-               burst = port_window;
-               port_window_bytes = port_window * es_bytes[es];
-
-               d->ei = 1;
-               /*
-                * One frame covers the port_window and by  configure
-                * the source frame index to be -1 * (port_window - 1)
-                * we instruct the sDMA that after a frame is processed
-                * it should move back to the start of the window.
-                */
-               d->fi = -(port_window_bytes - 1);
-       }
-
-       d->ccr = c->ccr | CCR_SYNC_FRAME;
-       if (dir == DMA_DEV_TO_MEM) {
-               d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
-
-               d->ccr |= CCR_DST_AMODE_POSTINC;
-               if (port_window) {
-                       d->ccr |= CCR_SRC_AMODE_DBLIDX;
-
-                       if (port_window_bytes >= 64)
-                               d->csdp |= CSDP_SRC_BURST_64;
-                       else if (port_window_bytes >= 32)
-                               d->csdp |= CSDP_SRC_BURST_32;
-                       else if (port_window_bytes >= 16)
-                               d->csdp |= CSDP_SRC_BURST_16;
-
-               } else {
-                       d->ccr |= CCR_SRC_AMODE_CONSTANT;
-               }
-       } else {
-               d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
-
-               d->ccr |= CCR_SRC_AMODE_POSTINC;
-               if (port_window) {
-                       d->ccr |= CCR_DST_AMODE_DBLIDX;
-
-                       if (port_window_bytes >= 64)
-                               d->csdp |= CSDP_DST_BURST_64;
-                       else if (port_window_bytes >= 32)
-                               d->csdp |= CSDP_DST_BURST_32;
-                       else if (port_window_bytes >= 16)
-                               d->csdp |= CSDP_DST_BURST_16;
-               } else {
-                       d->ccr |= CCR_DST_AMODE_CONSTANT;
-               }
-       }
-
-       d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
-       d->csdp |= es;
-
-       if (dma_omap1()) {
-               d->cicr |= CICR_TOUT_IE;
-
-               if (dir == DMA_DEV_TO_MEM)
-                       d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB;
-               else
-                       d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF;
-       } else {
-               if (dir == DMA_DEV_TO_MEM)
-                       d->ccr |= CCR_TRIGGER_SRC;
-
-               d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
-
-               if (port_window)
-                       d->csdp |= CSDP_WRITE_LAST_NON_POSTED;
-       }
-       if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
-               d->clnk_ctrl = c->dma_ch;
-
-       /*
-        * Build our scatterlist entries: each contains the address,
-        * the number of elements (EN) in each frame, and the number of
-        * frames (FN).  Number of bytes for this entry = ES * EN * FN.
-        *
-        * Burst size translates to number of elements with frame sync.
-        * Note: DMA engine defines burst to be the number of dev-width
-        * transfers.
-        */
-       en = burst;
-       frame_bytes = es_bytes[es] * en;
-
-       if (sglen >= 2)
-               d->using_ll = od->ll123_supported;
-
-       for_each_sg(sgl, sgent, sglen, i) {
-               struct omap_sg *osg = &d->sg[i];
-
-               osg->addr = sg_dma_address(sgent);
-               osg->en = en;
-               osg->fn = sg_dma_len(sgent) / frame_bytes;
-
-               if (d->using_ll) {
-                       osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
-                                                     &osg->t2_desc_paddr);
-                       if (!osg->t2_desc) {
-                               dev_err(chan->device->dev,
-                                       "t2_desc[%d] allocation failed\n", i);
-                               ll_failed = true;
-                               d->using_ll = false;
-                               continue;
-                       }
-
-                       omap_dma_fill_type2_desc(d, i, dir, (i == sglen - 1));
-               }
-       }
-
-       d->sglen = sglen;
-
-       /* Release the dma_pool entries if one allocation failed */
-       if (ll_failed) {
-               for (i = 0; i < d->sglen; i++) {
-                       struct omap_sg *osg = &d->sg[i];
-
-                       if (osg->t2_desc) {
-                               dma_pool_free(od->desc_pool, osg->t2_desc,
-                                             osg->t2_desc_paddr);
-                               osg->t2_desc = NULL;
-                       }
-               }
-       }
-
-       return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
-}
-
-static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
-       struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
-       size_t period_len, enum dma_transfer_direction dir, unsigned long flags)
-{
-       struct omap_dmadev *od = to_omap_dma_dev(chan->device);
-       struct omap_chan *c = to_omap_dma_chan(chan);
-       enum dma_slave_buswidth dev_width;
-       struct omap_desc *d;
-       dma_addr_t dev_addr;
-       unsigned es;
-       u32 burst;
-
-       if (dir == DMA_DEV_TO_MEM) {
-               dev_addr = c->cfg.src_addr;
-               dev_width = c->cfg.src_addr_width;
-               burst = c->cfg.src_maxburst;
-       } else if (dir == DMA_MEM_TO_DEV) {
-               dev_addr = c->cfg.dst_addr;
-               dev_width = c->cfg.dst_addr_width;
-               burst = c->cfg.dst_maxburst;
-       } else {
-               dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
-               return NULL;
-       }
-
-       /* Bus width translates to the element size (ES) */
-       switch (dev_width) {
-       case DMA_SLAVE_BUSWIDTH_1_BYTE:
-               es = CSDP_DATA_TYPE_8;
-               break;
-       case DMA_SLAVE_BUSWIDTH_2_BYTES:
-               es = CSDP_DATA_TYPE_16;
-               break;
-       case DMA_SLAVE_BUSWIDTH_4_BYTES:
-               es = CSDP_DATA_TYPE_32;
-               break;
-       default: /* not reached */
-               return NULL;
-       }
-
-       /* Now allocate and setup the descriptor. */
-       d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
-       if (!d)
-               return NULL;
-
-       d->dir = dir;
-       d->dev_addr = dev_addr;
-       d->fi = burst;
-       d->es = es;
-       d->sg[0].addr = buf_addr;
-       d->sg[0].en = period_len / es_bytes[es];
-       d->sg[0].fn = buf_len / period_len;
-       d->sglen = 1;
-
-       d->ccr = c->ccr;
-       if (dir == DMA_DEV_TO_MEM)
-               d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
-       else
-               d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
-
-       d->cicr = CICR_DROP_IE;
-       if (flags & DMA_PREP_INTERRUPT)
-               d->cicr |= CICR_FRAME_IE;
-
-       d->csdp = es;
-
-       if (dma_omap1()) {
-               d->cicr |= CICR_TOUT_IE;
-
-               if (dir == DMA_DEV_TO_MEM)
-                       d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI;
-               else
-                       d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF;
-       } else {
-               if (burst)
-                       d->ccr |= CCR_SYNC_PACKET;
-               else
-                       d->ccr |= CCR_SYNC_ELEMENT;
-
-               if (dir == DMA_DEV_TO_MEM) {
-                       d->ccr |= CCR_TRIGGER_SRC;
-                       d->csdp |= CSDP_DST_PACKED;
-               } else {
-                       d->csdp |= CSDP_SRC_PACKED;
-               }
-
-               d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
-
-               d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
-       }
-
-       if (__dma_omap15xx(od->plat->dma_attr))
-               d->ccr |= CCR_AUTO_INIT | CCR_REPEAT;
-       else
-               d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK;
-
-       c->cyclic = true;
-
-       return vchan_tx_prep(&c->vc, &d->vd, flags);
-}
-
-static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
-       struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
-       size_t len, unsigned long tx_flags)
-{
-       struct omap_chan *c = to_omap_dma_chan(chan);
-       struct omap_desc *d;
-       uint8_t data_type;
-
-       d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
-       if (!d)
-               return NULL;
-
-       data_type = __ffs((src | dest | len));
-       if (data_type > CSDP_DATA_TYPE_32)
-               data_type = CSDP_DATA_TYPE_32;
-
-       d->dir = DMA_MEM_TO_MEM;
-       d->dev_addr = src;
-       d->fi = 0;
-       d->es = data_type;
-       d->sg[0].en = len / BIT(data_type);
-       d->sg[0].fn = 1;
-       d->sg[0].addr = dest;
-       d->sglen = 1;
-       d->ccr = c->ccr;
-       d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
-
-       d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
-
-       d->csdp = data_type;
-
-       if (dma_omap1()) {
-               d->cicr |= CICR_TOUT_IE;
-               d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
-       } else {
-               d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
-               d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
-               d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
-       }
-
-       return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
-}
-
-static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
-       struct dma_chan *chan, struct dma_interleaved_template *xt,
-       unsigned long flags)
-{
-       struct omap_chan *c = to_omap_dma_chan(chan);
-       struct omap_desc *d;
-       struct omap_sg *sg;
-       uint8_t data_type;
-       size_t src_icg, dst_icg;
-
-       /* Slave mode is not supported */
-       if (is_slave_direction(xt->dir))
-               return NULL;
-
-       if (xt->frame_size != 1 || xt->numf == 0)
-               return NULL;
-
-       d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
-       if (!d)
-               return NULL;
-
-       data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size));
-       if (data_type > CSDP_DATA_TYPE_32)
-               data_type = CSDP_DATA_TYPE_32;
-
-       sg = &d->sg[0];
-       d->dir = DMA_MEM_TO_MEM;
-       d->dev_addr = xt->src_start;
-       d->es = data_type;
-       sg->en = xt->sgl[0].size / BIT(data_type);
-       sg->fn = xt->numf;
-       sg->addr = xt->dst_start;
-       d->sglen = 1;
-       d->ccr = c->ccr;
-
-       src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
-       dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
-       if (src_icg) {
-               d->ccr |= CCR_SRC_AMODE_DBLIDX;
-               d->ei = 1;
-               d->fi = src_icg;
-       } else if (xt->src_inc) {
-               d->ccr |= CCR_SRC_AMODE_POSTINC;
-               d->fi = 0;
-       } else {
-               dev_err(chan->device->dev,
-                       "%s: SRC constant addressing is not supported\n",
-                       __func__);
-               kfree(d);
-               return NULL;
-       }
-
-       if (dst_icg) {
-               d->ccr |= CCR_DST_AMODE_DBLIDX;
-               sg->ei = 1;
-               sg->fi = dst_icg;
-       } else if (xt->dst_inc) {
-               d->ccr |= CCR_DST_AMODE_POSTINC;
-               sg->fi = 0;
-       } else {
-               dev_err(chan->device->dev,
-                       "%s: DST constant addressing is not supported\n",
-                       __func__);
-               kfree(d);
-               return NULL;
-       }
-
-       d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
-
-       d->csdp = data_type;
-
-       if (dma_omap1()) {
-               d->cicr |= CICR_TOUT_IE;
-               d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
-       } else {
-               d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
-               d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
-               d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
-       }
-
-       return vchan_tx_prep(&c->vc, &d->vd, flags);
-}
-
-static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
-{
-       struct omap_chan *c = to_omap_dma_chan(chan);
-
-       if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
-           cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
-               return -EINVAL;
-
-       if (cfg->src_maxburst > chan->device->max_burst ||
-           cfg->dst_maxburst > chan->device->max_burst)
-               return -EINVAL;
-
-       memcpy(&c->cfg, cfg, sizeof(c->cfg));
-
-       return 0;
-}
-
-static int omap_dma_terminate_all(struct dma_chan *chan)
-{
-       struct omap_chan *c = to_omap_dma_chan(chan);
-       unsigned long flags;
-       LIST_HEAD(head);
-
-       spin_lock_irqsave(&c->vc.lock, flags);
-
-       /*
-        * Stop DMA activity: we assume the callback will not be called
-        * after omap_dma_stop() returns (even if it does, it will see
-        * c->desc is NULL and exit.)
-        */
-       if (c->desc) {
-               vchan_terminate_vdesc(&c->desc->vd);
-               c->desc = NULL;
-               /* Avoid stopping the dma twice */
-               if (!c->paused)
-                       omap_dma_stop(c);
-       }
-
-       c->cyclic = false;
-       c->paused = false;
-
-       vchan_get_all_descriptors(&c->vc, &head);
-       spin_unlock_irqrestore(&c->vc.lock, flags);
-       vchan_dma_desc_free_list(&c->vc, &head);
-
-       return 0;
-}
-
-static void omap_dma_synchronize(struct dma_chan *chan)
-{
-       struct omap_chan *c = to_omap_dma_chan(chan);
-
-       vchan_synchronize(&c->vc);
-}
-
-static int omap_dma_pause(struct dma_chan *chan)
-{
-       struct omap_chan *c = to_omap_dma_chan(chan);
-       struct omap_dmadev *od = to_omap_dma_dev(chan->device);
-       unsigned long flags;
-       int ret = -EINVAL;
-       bool can_pause = false;
-
-       spin_lock_irqsave(&od->irq_lock, flags);
-
-       if (!c->desc)
-               goto out;
-
-       if (c->cyclic)
-               can_pause = true;
-
-       /*
-        * We do not allow DMA_MEM_TO_DEV transfers to be paused.
-        * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
-        * "When a channel is disabled during a transfer, the channel undergoes
-        * an abort, unless it is hardware-source-synchronized …".
-        * A source-synchronised channel is one where the fetching of data is
-        * under control of the device. In other words, a device-to-memory
-        * transfer. So, a destination-synchronised channel (which would be a
-        * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
-        * bit is cleared.
-        * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
-        * aborts immediately after completion of current read/write
-        * transactions and then the FIFO is cleaned up." The term "cleaned up"
-        * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
-        * are both clear _before_ disabling the channel, otherwise data loss
-        * will occur.
-        * The problem is that if the channel is active, then device activity
-        * can result in DMA activity starting between reading those as both
-        * clear and the write to DMA_CCR to clear the enable bit hitting the
-        * hardware. If the DMA hardware can't drain the data in its FIFO to the
-        * destination, then data loss "might" occur (say if we write to an UART
-        * and the UART is not accepting any further data).
-        */
-       else if (c->desc->dir == DMA_DEV_TO_MEM)
-               can_pause = true;
-
-       if (can_pause && !c->paused) {
-               ret = omap_dma_stop(c);
-               if (!ret)
-                       c->paused = true;
-       }
-out:
-       spin_unlock_irqrestore(&od->irq_lock, flags);
-
-       return ret;
-}
-
-static int omap_dma_resume(struct dma_chan *chan)
-{
-       struct omap_chan *c = to_omap_dma_chan(chan);
-       struct omap_dmadev *od = to_omap_dma_dev(chan->device);
-       unsigned long flags;
-       int ret = -EINVAL;
-
-       spin_lock_irqsave(&od->irq_lock, flags);
-
-       if (c->paused && c->desc) {
-               mb();
-
-               /* Restore channel link register */
-               omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl);
-
-               omap_dma_start(c, c->desc);
-               c->paused = false;
-               ret = 0;
-       }
-       spin_unlock_irqrestore(&od->irq_lock, flags);
-
-       return ret;
-}
-
-static int omap_dma_chan_init(struct omap_dmadev *od)
-{
-       struct omap_chan *c;
-
-       c = kzalloc(sizeof(*c), GFP_KERNEL);
-       if (!c)
-               return -ENOMEM;
-
-       c->reg_map = od->reg_map;
-       c->vc.desc_free = omap_dma_desc_free;
-       vchan_init(&c->vc, &od->ddev);
-
-       return 0;
-}
-
-static void omap_dma_free(struct omap_dmadev *od)
-{
-       while (!list_empty(&od->ddev.channels)) {
-               struct omap_chan *c = list_first_entry(&od->ddev.channels,
-                       struct omap_chan, vc.chan.device_node);
-
-               list_del(&c->vc.chan.device_node);
-               tasklet_kill(&c->vc.task);
-               kfree(c);
-       }
-}
-
-#define OMAP_DMA_BUSWIDTHS     (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
-                                BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
-                                BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
-
-static int omap_dma_probe(struct platform_device *pdev)
-{
-       struct omap_dmadev *od;
-       struct resource *res;
-       int rc, i, irq;
-       u32 lch_count;
-
-       od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
-       if (!od)
-               return -ENOMEM;
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       od->base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(od->base))
-               return PTR_ERR(od->base);
-
-       od->plat = omap_get_plat_info();
-       if (!od->plat)
-               return -EPROBE_DEFER;
-
-       od->reg_map = od->plat->reg_map;
-
-       dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
-       dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
-       dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
-       dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask);
-       od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
-       od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
-       od->ddev.device_tx_status = omap_dma_tx_status;
-       od->ddev.device_issue_pending = omap_dma_issue_pending;
-       od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
-       od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
-       od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy;
-       od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved;
-       od->ddev.device_config = omap_dma_slave_config;
-       od->ddev.device_pause = omap_dma_pause;
-       od->ddev.device_resume = omap_dma_resume;
-       od->ddev.device_terminate_all = omap_dma_terminate_all;
-       od->ddev.device_synchronize = omap_dma_synchronize;
-       od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
-       od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
-       od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-       od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
-       od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
-       od->ddev.dev = &pdev->dev;
-       INIT_LIST_HEAD(&od->ddev.channels);
-       spin_lock_init(&od->lock);
-       spin_lock_init(&od->irq_lock);
-
-       /* Number of DMA requests */
-       od->dma_requests = OMAP_SDMA_REQUESTS;
-       if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
-                                                     "dma-requests",
-                                                     &od->dma_requests)) {
-               dev_info(&pdev->dev,
-                        "Missing dma-requests property, using %u.\n",
-                        OMAP_SDMA_REQUESTS);
-       }
-
-       /* Number of available logical channels */
-       if (!pdev->dev.of_node) {
-               lch_count = od->plat->dma_attr->lch_count;
-               if (unlikely(!lch_count))
-                       lch_count = OMAP_SDMA_CHANNELS;
-       } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
-                                       &lch_count)) {
-               dev_info(&pdev->dev,
-                        "Missing dma-channels property, using %u.\n",
-                        OMAP_SDMA_CHANNELS);
-               lch_count = OMAP_SDMA_CHANNELS;
-       }
-
-       od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map),
-                                  GFP_KERNEL);
-       if (!od->lch_map)
-               return -ENOMEM;
-
-       for (i = 0; i < od->dma_requests; i++) {
-               rc = omap_dma_chan_init(od);
-               if (rc) {
-                       omap_dma_free(od);
-                       return rc;
-               }
-       }
-
-       irq = platform_get_irq(pdev, 1);
-       if (irq <= 0) {
-               dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq);
-               od->legacy = true;
-       } else {
-               /* Disable all interrupts */
-               od->irq_enable_mask = 0;
-               omap_dma_glbl_write(od, IRQENABLE_L1, 0);
-
-               rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
-                                     IRQF_SHARED, "omap-dma-engine", od);
-               if (rc)
-                       return rc;
-       }
-
-       if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
-               od->ll123_supported = true;
-
-       od->ddev.filter.map = od->plat->slave_map;
-       od->ddev.filter.mapcnt = od->plat->slavecnt;
-       od->ddev.filter.fn = omap_dma_filter_fn;
-
-       if (od->ll123_supported) {
-               od->desc_pool = dma_pool_create(dev_name(&pdev->dev),
-                                               &pdev->dev,
-                                               sizeof(struct omap_type2_desc),
-                                               4, 0);
-               if (!od->desc_pool) {
-                       dev_err(&pdev->dev,
-                               "unable to allocate descriptor pool\n");
-                       od->ll123_supported = false;
-               }
-       }
-
-       rc = dma_async_device_register(&od->ddev);
-       if (rc) {
-               pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
-                       rc);
-               omap_dma_free(od);
-               return rc;
-       }
-
-       platform_set_drvdata(pdev, od);
-
-       if (pdev->dev.of_node) {
-               omap_dma_info.dma_cap = od->ddev.cap_mask;
-
-               /* Device-tree DMA controller registration */
-               rc = of_dma_controller_register(pdev->dev.of_node,
-                               of_dma_simple_xlate, &omap_dma_info);
-               if (rc) {
-                       pr_warn("OMAP-DMA: failed to register DMA controller\n");
-                       dma_async_device_unregister(&od->ddev);
-                       omap_dma_free(od);
-               }
-       }
-
-       dev_info(&pdev->dev, "OMAP DMA engine driver%s\n",
-                od->ll123_supported ? " (LinkedList1/2/3 supported)" : "");
-
-       return rc;
-}
-
-static int omap_dma_remove(struct platform_device *pdev)
-{
-       struct omap_dmadev *od = platform_get_drvdata(pdev);
-       int irq;
-
-       if (pdev->dev.of_node)
-               of_dma_controller_free(pdev->dev.of_node);
-
-       irq = platform_get_irq(pdev, 1);
-       devm_free_irq(&pdev->dev, irq, od);
-
-       dma_async_device_unregister(&od->ddev);
-
-       if (!od->legacy) {
-               /* Disable all interrupts */
-               omap_dma_glbl_write(od, IRQENABLE_L0, 0);
-       }
-
-       if (od->ll123_supported)
-               dma_pool_destroy(od->desc_pool);
-
-       omap_dma_free(od);
-
-       return 0;
-}
-
-static const struct of_device_id omap_dma_match[] = {
-       { .compatible = "ti,omap2420-sdma", },
-       { .compatible = "ti,omap2430-sdma", },
-       { .compatible = "ti,omap3430-sdma", },
-       { .compatible = "ti,omap3630-sdma", },
-       { .compatible = "ti,omap4430-sdma", },
-       {},
-};
-MODULE_DEVICE_TABLE(of, omap_dma_match);
-
-static struct platform_driver omap_dma_driver = {
-       .probe  = omap_dma_probe,
-       .remove = omap_dma_remove,
-       .driver = {
-               .name = "omap-dma-engine",
-               .of_match_table = of_match_ptr(omap_dma_match),
-       },
-};
-
-bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
-{
-       if (chan->device->dev->driver == &omap_dma_driver.driver) {
-               struct omap_dmadev *od = to_omap_dma_dev(chan->device);
-               struct omap_chan *c = to_omap_dma_chan(chan);
-               unsigned req = *(unsigned *)param;
-
-               if (req <= od->dma_requests) {
-                       c->dma_sig = req;
-                       return true;
-               }
-       }
-       return false;
-}
-EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
-
-static int omap_dma_init(void)
-{
-       return platform_driver_register(&omap_dma_driver);
-}
-subsys_initcall(omap_dma_init);
-
-static void __exit omap_dma_exit(void)
-{
-       platform_driver_unregister(&omap_dma_driver);
-}
-module_exit(omap_dma_exit);
-
-MODULE_AUTHOR("Russell King");
-MODULE_LICENSE("GPL");
index de1fd59fe13699b5efb84500be5aff699fe1a1d9..6237069001c4fea463d73f943d18b22ed5f9fba3 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/of_dma.h>
 #include <linux/err.h>
 #include <linux/pm_runtime.h>
+#include <linux/bug.h>
 
 #include "dmaengine.h"
 #define PL330_MAX_CHAN         8
@@ -1094,51 +1095,96 @@ static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
        return off;
 }
 
-static inline int _ldst_devtomem(struct pl330_dmac *pl330, unsigned dry_run,
-                                u8 buf[], const struct _xfer_spec *pxs,
-                                int cyc)
+static u32 _emit_load(unsigned int dry_run, u8 buf[],
+       enum pl330_cond cond, enum dma_transfer_direction direction,
+       u8 peri)
 {
        int off = 0;
-       enum pl330_cond cond;
 
-       if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
-               cond = BURST;
-       else
-               cond = SINGLE;
+       switch (direction) {
+       case DMA_MEM_TO_MEM:
+               /* fall through */
+       case DMA_MEM_TO_DEV:
+               off += _emit_LD(dry_run, &buf[off], cond);
+               break;
 
-       while (cyc--) {
-               off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
-               off += _emit_LDP(dry_run, &buf[off], cond, pxs->desc->peri);
-               off += _emit_ST(dry_run, &buf[off], ALWAYS);
+       case DMA_DEV_TO_MEM:
+               if (cond == ALWAYS) {
+                       off += _emit_LDP(dry_run, &buf[off], SINGLE,
+                               peri);
+                       off += _emit_LDP(dry_run, &buf[off], BURST,
+                               peri);
+               } else {
+                       off += _emit_LDP(dry_run, &buf[off], cond,
+                               peri);
+               }
+               break;
 
-               if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
-                       off += _emit_FLUSHP(dry_run, &buf[off],
-                                           pxs->desc->peri);
+       default:
+               /* this code should be unreachable */
+               WARN_ON(1);
+               break;
        }
 
        return off;
 }
 
-static inline int _ldst_memtodev(struct pl330_dmac *pl330,
+static inline u32 _emit_store(unsigned int dry_run, u8 buf[],
+       enum pl330_cond cond, enum dma_transfer_direction direction,
+       u8 peri)
+{
+       int off = 0;
+
+       switch (direction) {
+       case DMA_MEM_TO_MEM:
+               /* fall through */
+       case DMA_DEV_TO_MEM:
+               off += _emit_ST(dry_run, &buf[off], cond);
+               break;
+
+       case DMA_MEM_TO_DEV:
+               if (cond == ALWAYS) {
+                       off += _emit_STP(dry_run, &buf[off], SINGLE,
+                               peri);
+                       off += _emit_STP(dry_run, &buf[off], BURST,
+                               peri);
+               } else {
+                       off += _emit_STP(dry_run, &buf[off], cond,
+                               peri);
+               }
+               break;
+
+       default:
+               /* this code should be unreachable */
+               WARN_ON(1);
+               break;
+       }
+
+       return off;
+}
+
+static inline int _ldst_peripheral(struct pl330_dmac *pl330,
                                 unsigned dry_run, u8 buf[],
-                                const struct _xfer_spec *pxs, int cyc)
+                                const struct _xfer_spec *pxs, int cyc,
+                                enum pl330_cond cond)
 {
        int off = 0;
-       enum pl330_cond cond;
 
        if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
                cond = BURST;
-       else
-               cond = SINGLE;
 
+       /*
+        * do FLUSHP at beginning to clear any stale dma requests before the
+        * first WFP.
+        */
+       if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
+               off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
        while (cyc--) {
                off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
-               off += _emit_LD(dry_run, &buf[off], ALWAYS);
-               off += _emit_STP(dry_run, &buf[off], cond, pxs->desc->peri);
-
-               if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
-                       off += _emit_FLUSHP(dry_run, &buf[off],
-                                           pxs->desc->peri);
+               off += _emit_load(dry_run, &buf[off], cond, pxs->desc->rqtype,
+                       pxs->desc->peri);
+               off += _emit_store(dry_run, &buf[off], cond, pxs->desc->rqtype,
+                       pxs->desc->peri);
        }
 
        return off;
@@ -1148,19 +1194,65 @@ static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
                const struct _xfer_spec *pxs, int cyc)
 {
        int off = 0;
+       enum pl330_cond cond = BRST_LEN(pxs->ccr) > 1 ? BURST : SINGLE;
 
        switch (pxs->desc->rqtype) {
        case DMA_MEM_TO_DEV:
-               off += _ldst_memtodev(pl330, dry_run, &buf[off], pxs, cyc);
-               break;
+               /* fall through */
        case DMA_DEV_TO_MEM:
-               off += _ldst_devtomem(pl330, dry_run, &buf[off], pxs, cyc);
+               off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, cyc,
+                       cond);
                break;
+
        case DMA_MEM_TO_MEM:
                off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
                break;
+
+       default:
+               /* this code should be unreachable */
+               WARN_ON(1);
+               break;
+       }
+
+       return off;
+}
+
+/*
+ * transfer dregs with single transfers to peripheral, or a reduced size burst
+ * for mem-to-mem.
+ */
+static int _dregs(struct pl330_dmac *pl330, unsigned int dry_run, u8 buf[],
+               const struct _xfer_spec *pxs, int transfer_length)
+{
+       int off = 0;
+       int dregs_ccr;
+
+       if (transfer_length == 0)
+               return off;
+
+       switch (pxs->desc->rqtype) {
+       case DMA_MEM_TO_DEV:
+               /* fall through */
+       case DMA_DEV_TO_MEM:
+               off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs,
+                       transfer_length, SINGLE);
+               break;
+
+       case DMA_MEM_TO_MEM:
+               dregs_ccr = pxs->ccr;
+               dregs_ccr &= ~((0xf << CC_SRCBRSTLEN_SHFT) |
+                       (0xf << CC_DSTBRSTLEN_SHFT));
+               dregs_ccr |= (((transfer_length - 1) & 0xf) <<
+                       CC_SRCBRSTLEN_SHFT);
+               dregs_ccr |= (((transfer_length - 1) & 0xf) <<
+                       CC_DSTBRSTLEN_SHFT);
+               off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr);
+               off += _ldst_memtomem(dry_run, &buf[off], pxs, 1);
+               break;
+
        default:
-               off += 0x40000000; /* Scare off the Client */
+               /* this code should be unreachable */
+               WARN_ON(1);
                break;
        }
 
@@ -1256,6 +1348,8 @@ static inline int _setup_loops(struct pl330_dmac *pl330,
        struct pl330_xfer *x = &pxs->desc->px;
        u32 ccr = pxs->ccr;
        unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
+       int num_dregs = (x->bytes - BURST_TO_BYTE(bursts, ccr)) /
+               BRST_SIZE(ccr);
        int off = 0;
 
        while (bursts) {
@@ -1263,6 +1357,7 @@ static inline int _setup_loops(struct pl330_dmac *pl330,
                off += _loop(pl330, dry_run, &buf[off], &c, pxs);
                bursts -= c;
        }
+       off += _dregs(pl330, dry_run, &buf[off], pxs, num_dregs);
 
        return off;
 }
@@ -1294,7 +1389,6 @@ static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
                      struct _xfer_spec *pxs)
 {
        struct _pl330_req *req = &thrd->req[index];
-       struct pl330_xfer *x;
        u8 *buf = req->mc_cpu;
        int off = 0;
 
@@ -1303,11 +1397,6 @@ static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
        /* DMAMOV CCR, ccr */
        off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
 
-       x = &pxs->desc->px;
-       /* Error if xfer length is not aligned at burst size */
-       if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
-               return -EINVAL;
-
        off += _setup_xfer(pl330, dry_run, &buf[off], pxs);
 
        /* DMASEV peripheral/event */
@@ -1365,6 +1454,20 @@ static int pl330_submit_req(struct pl330_thread *thrd,
        u32 ccr;
        int ret = 0;
 
+       switch (desc->rqtype) {
+       case DMA_MEM_TO_DEV:
+               break;
+
+       case DMA_DEV_TO_MEM:
+               break;
+
+       case DMA_MEM_TO_MEM:
+               break;
+
+       default:
+               return -ENOTSUPP;
+       }
+
        if (pl330->state == DYING
                || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
                dev_info(thrd->dmac->ddma.dev, "%s:%d\n",
@@ -2106,6 +2209,18 @@ static bool pl330_prep_slave_fifo(struct dma_pl330_chan *pch,
        return true;
 }
 
+static int fixup_burst_len(int max_burst_len, int quirks)
+{
+       if (quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
+               return 1;
+       else if (max_burst_len > PL330_MAX_BURST)
+               return PL330_MAX_BURST;
+       else if (max_burst_len < 1)
+               return 1;
+       else
+               return max_burst_len;
+}
+
 static int pl330_config(struct dma_chan *chan,
                        struct dma_slave_config *slave_config)
 {
@@ -2117,15 +2232,15 @@ static int pl330_config(struct dma_chan *chan,
                        pch->fifo_addr = slave_config->dst_addr;
                if (slave_config->dst_addr_width)
                        pch->burst_sz = __ffs(slave_config->dst_addr_width);
-               if (slave_config->dst_maxburst)
-                       pch->burst_len = slave_config->dst_maxburst;
+               pch->burst_len = fixup_burst_len(slave_config->dst_maxburst,
+                       pch->dmac->quirks);
        } else if (slave_config->direction == DMA_DEV_TO_MEM) {
                if (slave_config->src_addr)
                        pch->fifo_addr = slave_config->src_addr;
                if (slave_config->src_addr_width)
                        pch->burst_sz = __ffs(slave_config->src_addr_width);
-               if (slave_config->src_maxburst)
-                       pch->burst_len = slave_config->src_maxburst;
+               pch->burst_len = fixup_burst_len(slave_config->src_maxburst,
+                       pch->dmac->quirks);
        }
 
        return 0;
@@ -2519,14 +2634,8 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
        burst_len >>= desc->rqcfg.brst_size;
 
        /* src/dst_burst_len can't be more than 16 */
-       if (burst_len > 16)
-               burst_len = 16;
-
-       while (burst_len > 1) {
-               if (!(len % (burst_len << desc->rqcfg.brst_size)))
-                       break;
-               burst_len--;
-       }
+       if (burst_len > PL330_MAX_BURST)
+               burst_len = PL330_MAX_BURST;
 
        return burst_len;
 }
@@ -2598,7 +2707,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
 
                desc->rqtype = direction;
                desc->rqcfg.brst_size = pch->burst_sz;
-               desc->rqcfg.brst_len = 1;
+               desc->rqcfg.brst_len = pch->burst_len;
                desc->bytes_requested = period_len;
                fill_px(&desc->px, dst, src, period_len);
 
@@ -2743,7 +2852,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                }
 
                desc->rqcfg.brst_size = pch->burst_sz;
-               desc->rqcfg.brst_len = 1;
+               desc->rqcfg.brst_len = pch->burst_len;
                desc->rqtype = direction;
                desc->bytes_requested = sg_dma_len(sg);
        }
index 4a828c18099a6e411ff928256992c836f93ebdcb..1617715aa6e072616374bfbb45089f4b119ad9c6 100644 (file)
@@ -451,6 +451,7 @@ static void bam_reset_channel(struct bam_chan *bchan)
 /**
  * bam_chan_init_hw - Initialize channel hardware
  * @bchan: bam channel
+ * @dir: DMA transfer direction
  *
  * This function resets and initializes the BAM channel
  */
@@ -673,7 +674,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
                                remainder = 0;
                        }
 
-                       async_desc->length += desc->size;
+                       async_desc->length += le16_to_cpu(desc->size);
                        desc++;
                } while (remainder > 0);
        }
@@ -687,7 +688,7 @@ err_out:
 
 /**
  * bam_dma_terminate_all - terminate all transactions on a channel
- * @bchan: bam dma channel
+ * @chan: bam dma channel
  *
  * Dequeues and frees all transactions
  * No callbacks are done
@@ -918,7 +919,8 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
                                continue;
 
                        for (i = 0; i < async_desc->num_desc; i++)
-                               residue += async_desc->curr_desc[i].size;
+                               residue += le16_to_cpu(
+                                               async_desc->curr_desc[i].size);
                }
        }
 
@@ -958,7 +960,7 @@ static void bam_apply_new_config(struct bam_chan *bchan,
 
 /**
  * bam_start_dma - start next transaction
- * @bchan - bam dma channel
+ * @bchan: bam dma channel
  */
 static void bam_start_dma(struct bam_chan *bchan)
 {
index 963cc5228d05a5259214f2ba825c3ea735fae49d..43d4b00b81388e061b8b9bc2f251617509030821 100644 (file)
@@ -616,8 +616,7 @@ static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
 static ssize_t hidma_show_values(struct device *dev,
                                 struct device_attribute *attr, char *buf)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct hidma_dev *mdev = platform_get_drvdata(pdev);
+       struct hidma_dev *mdev = dev_get_drvdata(dev);
 
        buf[0] = 0;
 
index d61f1068a34b31087c1ad3978aebcb3d79bd3af1..cbb89eafd8442622a5fb3753a7f0c7a29a7d3abf 100644 (file)
@@ -107,8 +107,7 @@ static struct hidma_mgmt_fileinfo hidma_mgmt_files[] = {
 static ssize_t show_values(struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev);
+       struct hidma_mgmt_dev *mdev = dev_get_drvdata(dev);
        unsigned int i;
 
        buf[0] = 0;
@@ -125,8 +124,7 @@ static ssize_t show_values(struct device *dev, struct device_attribute *attr,
 static ssize_t set_values(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev);
+       struct hidma_mgmt_dev *mdev = dev_get_drvdata(dev);
        unsigned long tmp;
        unsigned int i;
        int rc;
index c94ffab0d25c756609b1d0c01733a4abc322a8a1..04a74e0a95b799d0b25171907093403ffcd8ee2d 100644 (file)
@@ -443,7 +443,6 @@ static bool sh_dmae_reset(struct sh_dmae_device *shdev)
        return ret;
 }
 
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
 static irqreturn_t sh_dmae_err(int irq, void *data)
 {
        struct sh_dmae_device *shdev = data;
@@ -454,7 +453,6 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
        sh_dmae_reset(shdev);
        return IRQ_HANDLED;
 }
-#endif
 
 static bool sh_dmae_desc_completed(struct shdma_chan *schan,
                                   struct shdma_desc *sdesc)
@@ -686,11 +684,8 @@ static int sh_dmae_probe(struct platform_device *pdev)
        const struct sh_dmae_pdata *pdata;
        unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
        int chan_irq[SH_DMAE_MAX_CHANNELS];
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
        unsigned long irqflags = 0;
-       int errirq;
-#endif
-       int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
+       int err, errirq, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
        struct sh_dmae_device *shdev;
        struct dma_device *dma_dev;
        struct resource *chan, *dmars, *errirq_res, *chanirq_res;
@@ -792,33 +787,32 @@ static int sh_dmae_probe(struct platform_device *pdev)
        if (err)
                goto rst_err;
 
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
-       chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+       if (IS_ENABLED(CONFIG_CPU_SH4) || IS_ENABLED(CONFIG_ARCH_RENESAS)) {
+               chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
 
-       if (!chanirq_res)
-               chanirq_res = errirq_res;
-       else
-               irqres++;
+               if (!chanirq_res)
+                       chanirq_res = errirq_res;
+               else
+                       irqres++;
 
-       if (chanirq_res == errirq_res ||
-           (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
-               irqflags = IRQF_SHARED;
+               if (chanirq_res == errirq_res ||
+                   (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
+                       irqflags = IRQF_SHARED;
 
-       errirq = errirq_res->start;
+               errirq = errirq_res->start;
 
-       err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags,
-                              "DMAC Address Error", shdev);
-       if (err) {
-               dev_err(&pdev->dev,
-                       "DMA failed requesting irq #%d, error %d\n",
-                       errirq, err);
-               goto eirq_err;
+               err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err,
+                                      irqflags, "DMAC Address Error", shdev);
+               if (err) {
+                       dev_err(&pdev->dev,
+                               "DMA failed requesting irq #%d, error %d\n",
+                               errirq, err);
+                       goto eirq_err;
+               }
+       } else {
+               chanirq_res = errirq_res;
        }
 
-#else
-       chanirq_res = errirq_res;
-#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
-
        if (chanirq_res->start == chanirq_res->end &&
            !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
                /* Special case - all multiplexed */
@@ -884,9 +878,7 @@ edmadevreg:
 chan_probe_err:
        sh_dmae_chan_remove(shdev);
 
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
 eirq_err:
-#endif
 rst_err:
        spin_lock_irq(&sh_dmae_lock);
        list_del_rcu(&shdev->node);
index 52ebccb483bed15c480134b5e176f4e0b22acca9..55df0d41355b0622f4fb9ede60fd6bddbfe5a4e6 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
+#include <linux/dma/sprd-dma.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #define SPRD_DMA_SRC_TRSF_STEP_OFFSET  0
 #define SPRD_DMA_TRSF_STEP_MASK                GENMASK(15, 0)
 
+/* define the DMA transfer step type */
+#define SPRD_DMA_NONE_STEP             0
+#define SPRD_DMA_BYTE_STEP             1
+#define SPRD_DMA_SHORT_STEP            2
+#define SPRD_DMA_WORD_STEP             4
+#define SPRD_DMA_DWORD_STEP            8
+
 #define SPRD_DMA_SOFTWARE_UID          0
 
-/*
- * enum sprd_dma_req_mode: define the DMA request mode
- * @SPRD_DMA_FRAG_REQ: fragment request mode
- * @SPRD_DMA_BLK_REQ: block request mode
- * @SPRD_DMA_TRANS_REQ: transaction request mode
- * @SPRD_DMA_LIST_REQ: link-list request mode
- *
- * We have 4 types request mode: fragment mode, block mode, transaction mode
- * and linklist mode. One transaction can contain several blocks, one block can
- * contain several fragments. Link-list mode means we can save several DMA
- * configuration into one reserved memory, then DMA can fetch each DMA
- * configuration automatically to start transfer.
- */
-enum sprd_dma_req_mode {
-       SPRD_DMA_FRAG_REQ,
-       SPRD_DMA_BLK_REQ,
-       SPRD_DMA_TRANS_REQ,
-       SPRD_DMA_LIST_REQ,
-};
-
-/*
- * enum sprd_dma_int_type: define the DMA interrupt type
- * @SPRD_DMA_NO_INT: do not need generate DMA interrupts.
- * @SPRD_DMA_FRAG_INT: fragment done interrupt when one fragment request
- * is done.
- * @SPRD_DMA_BLK_INT: block done interrupt when one block request is done.
- * @SPRD_DMA_BLK_FRAG_INT: block and fragment interrupt when one fragment
- * or one block request is done.
- * @SPRD_DMA_TRANS_INT: tansaction done interrupt when one transaction
- * request is done.
- * @SPRD_DMA_TRANS_FRAG_INT: transaction and fragment interrupt when one
- * transaction request or fragment request is done.
- * @SPRD_DMA_TRANS_BLK_INT: transaction and block interrupt when one
- * transaction request or block request is done.
- * @SPRD_DMA_LIST_INT: link-list done interrupt when one link-list request
- * is done.
- * @SPRD_DMA_CFGERR_INT: configure error interrupt when configuration is
- * incorrect.
- */
-enum sprd_dma_int_type {
-       SPRD_DMA_NO_INT,
-       SPRD_DMA_FRAG_INT,
-       SPRD_DMA_BLK_INT,
-       SPRD_DMA_BLK_FRAG_INT,
-       SPRD_DMA_TRANS_INT,
-       SPRD_DMA_TRANS_FRAG_INT,
-       SPRD_DMA_TRANS_BLK_INT,
-       SPRD_DMA_LIST_INT,
-       SPRD_DMA_CFGERR_INT,
+/* dma data width values */
+enum sprd_dma_datawidth {
+       SPRD_DMA_DATAWIDTH_1_BYTE,
+       SPRD_DMA_DATAWIDTH_2_BYTES,
+       SPRD_DMA_DATAWIDTH_4_BYTES,
+       SPRD_DMA_DATAWIDTH_8_BYTES,
 };
 
 /* dma channel hardware configuration */
@@ -199,6 +164,7 @@ struct sprd_dma_desc {
 struct sprd_dma_chn {
        struct virt_dma_chan    vc;
        void __iomem            *chn_base;
+       struct dma_slave_config slave_cfg;
        u32                     chn_num;
        u32                     dev_id;
        struct sprd_dma_desc    *cur_desc;
@@ -587,52 +553,97 @@ static void sprd_dma_issue_pending(struct dma_chan *chan)
        spin_unlock_irqrestore(&schan->vc.lock, flags);
 }
 
-static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
-                          dma_addr_t dest, dma_addr_t src, size_t len)
+static int sprd_dma_get_datawidth(enum dma_slave_buswidth buswidth)
+{
+       switch (buswidth) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+       case DMA_SLAVE_BUSWIDTH_8_BYTES:
+               return ffs(buswidth) - 1;
+
+       default:
+               return -EINVAL;
+       }
+}
+
+static int sprd_dma_get_step(enum dma_slave_buswidth buswidth)
+{
+       switch (buswidth) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+       case DMA_SLAVE_BUSWIDTH_8_BYTES:
+               return buswidth;
+
+       default:
+               return -EINVAL;
+       }
+}
+
+static int sprd_dma_fill_desc(struct dma_chan *chan,
+                             struct sprd_dma_desc *sdesc,
+                             dma_addr_t src, dma_addr_t dst, u32 len,
+                             enum dma_transfer_direction dir,
+                             unsigned long flags,
+                             struct dma_slave_config *slave_cfg)
 {
        struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
+       struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
        struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
-       u32 datawidth, src_step, des_step, fragment_len;
-       u32 block_len, req_mode, irq_mode, transcation_len;
-       u32 fix_mode = 0, fix_en = 0;
-
-       if (IS_ALIGNED(len, 4)) {
-               datawidth = 2;
-               src_step = 4;
-               des_step = 4;
-       } else if (IS_ALIGNED(len, 2)) {
-               datawidth = 1;
-               src_step = 2;
-               des_step = 2;
+       u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
+       u32 int_mode = flags & SPRD_DMA_INT_MASK;
+       int src_datawidth, dst_datawidth, src_step, dst_step;
+       u32 temp, fix_mode = 0, fix_en = 0;
+
+       if (dir == DMA_MEM_TO_DEV) {
+               src_step = sprd_dma_get_step(slave_cfg->src_addr_width);
+               if (src_step < 0) {
+                       dev_err(sdev->dma_dev.dev, "invalid source step\n");
+                       return src_step;
+               }
+               dst_step = SPRD_DMA_NONE_STEP;
        } else {
-               datawidth = 0;
-               src_step = 1;
-               des_step = 1;
+               dst_step = sprd_dma_get_step(slave_cfg->dst_addr_width);
+               if (dst_step < 0) {
+                       dev_err(sdev->dma_dev.dev, "invalid destination step\n");
+                       return dst_step;
+               }
+               src_step = SPRD_DMA_NONE_STEP;
        }
 
-       fragment_len = SPRD_DMA_MEMCPY_MIN_SIZE;
-       if (len <= SPRD_DMA_BLK_LEN_MASK) {
-               block_len = len;
-               transcation_len = 0;
-               req_mode = SPRD_DMA_BLK_REQ;
-               irq_mode = SPRD_DMA_BLK_INT;
-       } else {
-               block_len = SPRD_DMA_MEMCPY_MIN_SIZE;
-               transcation_len = len;
-               req_mode = SPRD_DMA_TRANS_REQ;
-               irq_mode = SPRD_DMA_TRANS_INT;
+       src_datawidth = sprd_dma_get_datawidth(slave_cfg->src_addr_width);
+       if (src_datawidth < 0) {
+               dev_err(sdev->dma_dev.dev, "invalid source datawidth\n");
+               return src_datawidth;
        }
 
+       dst_datawidth = sprd_dma_get_datawidth(slave_cfg->dst_addr_width);
+       if (dst_datawidth < 0) {
+               dev_err(sdev->dma_dev.dev, "invalid destination datawidth\n");
+               return dst_datawidth;
+       }
+
+       if (slave_cfg->slave_id)
+               schan->dev_id = slave_cfg->slave_id;
+
        hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
-       hw->wrap_ptr = (u32)((src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
-                            SPRD_DMA_HIGH_ADDR_MASK);
-       hw->wrap_to = (u32)((dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
-                           SPRD_DMA_HIGH_ADDR_MASK);
 
-       hw->src_addr = (u32)(src & SPRD_DMA_LOW_ADDR_MASK);
-       hw->des_addr = (u32)(dest & SPRD_DMA_LOW_ADDR_MASK);
+       /*
+        * wrap_ptr and wrap_to will save the high 4 bits source address and
+        * destination address.
+        */
+       hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
+       hw->wrap_to = (dst >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
+       hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
+       hw->des_addr = dst & SPRD_DMA_LOW_ADDR_MASK;
 
-       if ((src_step != 0 && des_step != 0) || (src_step | des_step) == 0) {
+       /*
+        * If the src step and dst step both are 0 or both are not 0, that means
+        * we can not enable the fix mode. If one is 0 and another one is not,
+        * we can enable the fix mode.
+        */
+       if ((src_step != 0 && dst_step != 0) || (src_step | dst_step) == 0) {
                fix_en = 0;
        } else {
                fix_en = 1;
@@ -642,87 +653,119 @@ static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
                        fix_mode = 0;
        }
 
-       hw->frg_len = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET |
-               datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET |
-               req_mode << SPRD_DMA_REQ_MODE_OFFSET |
-               fix_mode << SPRD_DMA_FIX_SEL_OFFSET |
-               fix_en << SPRD_DMA_FIX_EN_OFFSET |
-               (fragment_len & SPRD_DMA_FRG_LEN_MASK);
-       hw->blk_len = block_len & SPRD_DMA_BLK_LEN_MASK;
-
-       hw->intc = SPRD_DMA_CFG_ERR_INT_EN;
-
-       switch (irq_mode) {
-       case SPRD_DMA_NO_INT:
-               break;
-
-       case SPRD_DMA_FRAG_INT:
-               hw->intc |= SPRD_DMA_FRAG_INT_EN;
-               break;
+       hw->intc = int_mode | SPRD_DMA_CFG_ERR_INT_EN;
 
-       case SPRD_DMA_BLK_INT:
-               hw->intc |= SPRD_DMA_BLK_INT_EN;
-               break;
+       temp = src_datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
+       temp |= dst_datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
+       temp |= req_mode << SPRD_DMA_REQ_MODE_OFFSET;
+       temp |= fix_mode << SPRD_DMA_FIX_SEL_OFFSET;
+       temp |= fix_en << SPRD_DMA_FIX_EN_OFFSET;
+       temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
+       hw->frg_len = temp;
 
-       case SPRD_DMA_BLK_FRAG_INT:
-               hw->intc |= SPRD_DMA_BLK_INT_EN | SPRD_DMA_FRAG_INT_EN;
-               break;
+       hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
+       hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
 
-       case SPRD_DMA_TRANS_INT:
-               hw->intc |= SPRD_DMA_TRANS_INT_EN;
-               break;
+       temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
+       temp |= (src_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
+       hw->trsf_step = temp;
 
-       case SPRD_DMA_TRANS_FRAG_INT:
-               hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_FRAG_INT_EN;
-               break;
+       hw->frg_step = 0;
+       hw->src_blk_step = 0;
+       hw->des_blk_step = 0;
+       return 0;
+}
 
-       case SPRD_DMA_TRANS_BLK_INT:
-               hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_BLK_INT_EN;
-               break;
+static struct dma_async_tx_descriptor *
+sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+                        size_t len, unsigned long flags)
+{
+       struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+       struct sprd_dma_desc *sdesc;
+       struct sprd_dma_chn_hw *hw;
+       enum sprd_dma_datawidth datawidth;
+       u32 step, temp;
 
-       case SPRD_DMA_LIST_INT:
-               hw->intc |= SPRD_DMA_LIST_INT_EN;
-               break;
+       sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
+       if (!sdesc)
+               return NULL;
 
-       case SPRD_DMA_CFGERR_INT:
-               hw->intc |= SPRD_DMA_CFG_ERR_INT_EN;
-               break;
+       hw = &sdesc->chn_hw;
 
-       default:
-               dev_err(sdev->dma_dev.dev, "invalid irq mode\n");
-               return -EINVAL;
+       hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
+       hw->intc = SPRD_DMA_TRANS_INT | SPRD_DMA_CFG_ERR_INT_EN;
+       hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
+       hw->des_addr = dest & SPRD_DMA_LOW_ADDR_MASK;
+       hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
+               SPRD_DMA_HIGH_ADDR_MASK;
+       hw->wrap_to = (dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
+               SPRD_DMA_HIGH_ADDR_MASK;
+
+       if (IS_ALIGNED(len, 8)) {
+               datawidth = SPRD_DMA_DATAWIDTH_8_BYTES;
+               step = SPRD_DMA_DWORD_STEP;
+       } else if (IS_ALIGNED(len, 4)) {
+               datawidth = SPRD_DMA_DATAWIDTH_4_BYTES;
+               step = SPRD_DMA_WORD_STEP;
+       } else if (IS_ALIGNED(len, 2)) {
+               datawidth = SPRD_DMA_DATAWIDTH_2_BYTES;
+               step = SPRD_DMA_SHORT_STEP;
+       } else {
+               datawidth = SPRD_DMA_DATAWIDTH_1_BYTE;
+               step = SPRD_DMA_BYTE_STEP;
        }
 
-       if (transcation_len == 0)
-               hw->trsc_len = block_len & SPRD_DMA_TRSC_LEN_MASK;
-       else
-               hw->trsc_len = transcation_len & SPRD_DMA_TRSC_LEN_MASK;
+       temp = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
+       temp |= datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
+       temp |= SPRD_DMA_TRANS_REQ << SPRD_DMA_REQ_MODE_OFFSET;
+       temp |= len & SPRD_DMA_FRG_LEN_MASK;
+       hw->frg_len = temp;
 
-       hw->trsf_step = (des_step & SPRD_DMA_TRSF_STEP_MASK) <<
-                       SPRD_DMA_DEST_TRSF_STEP_OFFSET |
-                       (src_step & SPRD_DMA_TRSF_STEP_MASK) <<
-                       SPRD_DMA_SRC_TRSF_STEP_OFFSET;
+       hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
+       hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
 
-       hw->frg_step = 0;
-       hw->src_blk_step = 0;
-       hw->des_blk_step = 0;
-       hw->src_blk_step = 0;
-       return 0;
+       temp = (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
+       temp |= (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
+       hw->trsf_step = temp;
+
+       return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
 }
 
 static struct dma_async_tx_descriptor *
-sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
-                        size_t len, unsigned long flags)
+sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+                      unsigned int sglen, enum dma_transfer_direction dir,
+                      unsigned long flags, void *context)
 {
        struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+       struct dma_slave_config *slave_cfg = &schan->slave_cfg;
+       dma_addr_t src = 0, dst = 0;
        struct sprd_dma_desc *sdesc;
-       int ret;
+       struct scatterlist *sg;
+       u32 len = 0;
+       int ret, i;
+
+       /* TODO: now we only support one sg for each DMA configuration. */
+       if (!is_slave_direction(dir) || sglen > 1)
+               return NULL;
 
        sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
        if (!sdesc)
                return NULL;
 
-       ret = sprd_dma_config(chan, sdesc, dest, src, len);
+       for_each_sg(sgl, sg, sglen, i) {
+               len = sg_dma_len(sg);
+
+               if (dir == DMA_MEM_TO_DEV) {
+                       src = sg_dma_address(sg);
+                       dst = slave_cfg->dst_addr;
+               } else {
+                       src = slave_cfg->src_addr;
+                       dst = sg_dma_address(sg);
+               }
+       }
+
+       ret = sprd_dma_fill_desc(chan, sdesc, src, dst, len, dir, flags,
+                                slave_cfg);
        if (ret) {
                kfree(sdesc);
                return NULL;
@@ -731,6 +774,19 @@ sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
        return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
 }
 
+static int sprd_dma_slave_config(struct dma_chan *chan,
+                                struct dma_slave_config *config)
+{
+       struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+       struct dma_slave_config *slave_cfg = &schan->slave_cfg;
+
+       if (!is_slave_direction(config->direction))
+               return -EINVAL;
+
+       memcpy(slave_cfg, config, sizeof(*config));
+       return 0;
+}
+
 static int sprd_dma_pause(struct dma_chan *chan)
 {
        struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
@@ -842,10 +898,9 @@ static int sprd_dma_probe(struct platform_device *pdev)
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       sdev->glb_base = devm_ioremap_nocache(&pdev->dev, res->start,
-                                             resource_size(res));
-       if (!sdev->glb_base)
-               return -ENOMEM;
+       sdev->glb_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(sdev->glb_base))
+               return PTR_ERR(sdev->glb_base);
 
        dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask);
        sdev->total_chns = chn_count;
@@ -858,6 +913,8 @@ static int sprd_dma_probe(struct platform_device *pdev)
        sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
        sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
        sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
+       sdev->dma_dev.device_prep_slave_sg = sprd_dma_prep_slave_sg;
+       sdev->dma_dev.device_config = sprd_dma_slave_config;
        sdev->dma_dev.device_pause = sprd_dma_pause;
        sdev->dma_dev.device_resume = sprd_dma_resume;
        sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
index c2b089af04208c0b0077823d7b19efb344409888..1bc149af990e98ed2c08f7858e7953de04862abb 100644 (file)
@@ -2889,8 +2889,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
 #ifdef CONFIG_PM_SLEEP
 static int dma40_suspend(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct d40_base *base = platform_get_drvdata(pdev);
+       struct d40_base *base = dev_get_drvdata(dev);
        int ret;
 
        ret = pm_runtime_force_suspend(dev);
@@ -2904,8 +2903,7 @@ static int dma40_suspend(struct device *dev)
 
 static int dma40_resume(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct d40_base *base = platform_get_drvdata(pdev);
+       struct d40_base *base = dev_get_drvdata(dev);
        int ret = 0;
 
        if (base->lcpa_regulator) {
@@ -2970,8 +2968,7 @@ static void d40_save_restore_registers(struct d40_base *base, bool save)
 
 static int dma40_runtime_suspend(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct d40_base *base = platform_get_drvdata(pdev);
+       struct d40_base *base = dev_get_drvdata(dev);
 
        d40_save_restore_registers(base, true);
 
@@ -2985,8 +2982,7 @@ static int dma40_runtime_suspend(struct device *dev)
 
 static int dma40_runtime_resume(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct d40_base *base = platform_get_drvdata(pdev);
+       struct d40_base *base = dev_get_drvdata(dev);
 
        d40_save_restore_registers(base, false);
 
index daa1602eb9f55af734256408974a8935bfbbe948..9dc450b7ace6c1c6291064dc6d37255860d74a14 100644 (file)
@@ -252,13 +252,17 @@ struct stm32_mdma_hwdesc {
        u32 cmdr;
 } __aligned(64);
 
+struct stm32_mdma_desc_node {
+       struct stm32_mdma_hwdesc *hwdesc;
+       dma_addr_t hwdesc_phys;
+};
+
 struct stm32_mdma_desc {
        struct virt_dma_desc vdesc;
        u32 ccr;
-       struct stm32_mdma_hwdesc *hwdesc;
-       dma_addr_t hwdesc_phys;
        bool cyclic;
        u32 count;
+       struct stm32_mdma_desc_node node[];
 };
 
 struct stm32_mdma_chan {
@@ -344,30 +348,42 @@ static struct stm32_mdma_desc *stm32_mdma_alloc_desc(
                struct stm32_mdma_chan *chan, u32 count)
 {
        struct stm32_mdma_desc *desc;
+       int i;
 
-       desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+       desc = kzalloc(offsetof(typeof(*desc), node[count]), GFP_NOWAIT);
        if (!desc)
                return NULL;
 
-       desc->hwdesc = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT,
-                                     &desc->hwdesc_phys);
-       if (!desc->hwdesc) {
-               dev_err(chan2dev(chan), "Failed to allocate descriptor\n");
-               kfree(desc);
-               return NULL;
+       for (i = 0; i < count; i++) {
+               desc->node[i].hwdesc =
+                       dma_pool_alloc(chan->desc_pool, GFP_NOWAIT,
+                                      &desc->node[i].hwdesc_phys);
+               if (!desc->node[i].hwdesc)
+                       goto err;
        }
 
        desc->count = count;
 
        return desc;
+
+err:
+       dev_err(chan2dev(chan), "Failed to allocate descriptor\n");
+       while (--i >= 0)
+               dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
+                             desc->node[i].hwdesc_phys);
+       kfree(desc);
+       return NULL;
 }
 
 static void stm32_mdma_desc_free(struct virt_dma_desc *vdesc)
 {
        struct stm32_mdma_desc *desc = to_stm32_mdma_desc(vdesc);
        struct stm32_mdma_chan *chan = to_stm32_mdma_chan(vdesc->tx.chan);
+       int i;
 
-       dma_pool_free(chan->desc_pool, desc->hwdesc, desc->hwdesc_phys);
+       for (i = 0; i < desc->count; i++)
+               dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
+                             desc->node[i].hwdesc_phys);
        kfree(desc);
 }
 
@@ -410,13 +426,10 @@ static enum dma_slave_buswidth stm32_mdma_get_max_width(dma_addr_t addr,
 static u32 stm32_mdma_get_best_burst(u32 buf_len, u32 tlen, u32 max_burst,
                                     enum dma_slave_buswidth width)
 {
-       u32 best_burst = max_burst;
-       u32 burst_len = best_burst * width;
+       u32 best_burst;
 
-       while ((burst_len > 0) && (tlen % burst_len)) {
-               best_burst = best_burst >> 1;
-               burst_len = best_burst * width;
-       }
+       best_burst = min((u32)1 << __ffs(tlen | buf_len),
+                        max_burst * width) / width;
 
        return (best_burst > 0) ? best_burst : 1;
 }
@@ -669,18 +682,18 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
 }
 
 static void stm32_mdma_dump_hwdesc(struct stm32_mdma_chan *chan,
-                                  struct stm32_mdma_hwdesc *hwdesc)
+                                  struct stm32_mdma_desc_node *node)
 {
-       dev_dbg(chan2dev(chan), "hwdesc:  0x%p\n", hwdesc);
-       dev_dbg(chan2dev(chan), "CTCR:    0x%08x\n", hwdesc->ctcr);
-       dev_dbg(chan2dev(chan), "CBNDTR:  0x%08x\n", hwdesc->cbndtr);
-       dev_dbg(chan2dev(chan), "CSAR:    0x%08x\n", hwdesc->csar);
-       dev_dbg(chan2dev(chan), "CDAR:    0x%08x\n", hwdesc->cdar);
-       dev_dbg(chan2dev(chan), "CBRUR:   0x%08x\n", hwdesc->cbrur);
-       dev_dbg(chan2dev(chan), "CLAR:    0x%08x\n", hwdesc->clar);
-       dev_dbg(chan2dev(chan), "CTBR:    0x%08x\n", hwdesc->ctbr);
-       dev_dbg(chan2dev(chan), "CMAR:    0x%08x\n", hwdesc->cmar);
-       dev_dbg(chan2dev(chan), "CMDR:    0x%08x\n\n", hwdesc->cmdr);
+       dev_dbg(chan2dev(chan), "hwdesc:  %pad\n", &node->hwdesc_phys);
+       dev_dbg(chan2dev(chan), "CTCR:    0x%08x\n", node->hwdesc->ctcr);
+       dev_dbg(chan2dev(chan), "CBNDTR:  0x%08x\n", node->hwdesc->cbndtr);
+       dev_dbg(chan2dev(chan), "CSAR:    0x%08x\n", node->hwdesc->csar);
+       dev_dbg(chan2dev(chan), "CDAR:    0x%08x\n", node->hwdesc->cdar);
+       dev_dbg(chan2dev(chan), "CBRUR:   0x%08x\n", node->hwdesc->cbrur);
+       dev_dbg(chan2dev(chan), "CLAR:    0x%08x\n", node->hwdesc->clar);
+       dev_dbg(chan2dev(chan), "CTBR:    0x%08x\n", node->hwdesc->ctbr);
+       dev_dbg(chan2dev(chan), "CMAR:    0x%08x\n", node->hwdesc->cmar);
+       dev_dbg(chan2dev(chan), "CMDR:    0x%08x\n\n", node->hwdesc->cmdr);
 }
 
 static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan,
@@ -694,7 +707,7 @@ static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan,
        struct stm32_mdma_hwdesc *hwdesc;
        u32 next = count + 1;
 
-       hwdesc = &desc->hwdesc[count];
+       hwdesc = desc->node[count].hwdesc;
        hwdesc->ctcr = ctcr;
        hwdesc->cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK |
                        STM32_MDMA_CBNDTR_BRDUM |
@@ -704,19 +717,20 @@ static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan,
        hwdesc->csar = src_addr;
        hwdesc->cdar = dst_addr;
        hwdesc->cbrur = 0;
-       hwdesc->clar = desc->hwdesc_phys + next * sizeof(*hwdesc);
        hwdesc->ctbr = ctbr;
        hwdesc->cmar = config->mask_addr;
        hwdesc->cmdr = config->mask_data;
 
        if (is_last) {
                if (is_cyclic)
-                       hwdesc->clar = desc->hwdesc_phys;
+                       hwdesc->clar = desc->node[0].hwdesc_phys;
                else
                        hwdesc->clar = 0;
+       } else {
+               hwdesc->clar = desc->node[next].hwdesc_phys;
        }
 
-       stm32_mdma_dump_hwdesc(chan, hwdesc);
+       stm32_mdma_dump_hwdesc(chan, &desc->node[count]);
 }
 
 static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
@@ -780,7 +794,7 @@ stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
 {
        struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
        struct stm32_mdma_desc *desc;
-       int ret;
+       int i, ret;
 
        /*
         * Once DMA is in setup cyclic mode the channel we cannot assign this
@@ -806,7 +820,9 @@ stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
        return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
 
 xfer_setup_err:
-       dma_pool_free(chan->desc_pool, &desc->hwdesc, desc->hwdesc_phys);
+       for (i = 0; i < desc->count; i++)
+               dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
+                             desc->node[i].hwdesc_phys);
        kfree(desc);
        return NULL;
 }
@@ -895,7 +911,9 @@ stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr,
        return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
 
 xfer_setup_err:
-       dma_pool_free(chan->desc_pool, &desc->hwdesc, desc->hwdesc_phys);
+       for (i = 0; i < desc->count; i++)
+               dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
+                             desc->node[i].hwdesc_phys);
        kfree(desc);
        return NULL;
 }
@@ -1009,7 +1027,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
                        ctcr |= STM32_MDMA_CTCR_PKE;
 
                /* Prepare hardware descriptor */
-               hwdesc = desc->hwdesc;
+               hwdesc = desc->node[0].hwdesc;
                hwdesc->ctcr = ctcr;
                hwdesc->cbndtr = cbndtr;
                hwdesc->csar = src;
@@ -1020,7 +1038,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
                hwdesc->cmar = 0;
                hwdesc->cmdr = 0;
 
-               stm32_mdma_dump_hwdesc(chan, hwdesc);
+               stm32_mdma_dump_hwdesc(chan, &desc->node[0]);
        } else {
                /* Setup a LLI transfer */
                ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_LINKED_LIST) |
@@ -1120,7 +1138,7 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
        }
 
        chan->desc = to_stm32_mdma_desc(vdesc);
-       hwdesc = chan->desc->hwdesc;
+       hwdesc = chan->desc->node[0].hwdesc;
        chan->curr_hwdesc = 0;
 
        stm32_mdma_write(dmadev, STM32_MDMA_CCR(id), chan->desc->ccr);
@@ -1198,7 +1216,7 @@ static int stm32_mdma_resume(struct dma_chan *c)
        unsigned long flags;
        u32 status, reg;
 
-       hwdesc = &chan->desc->hwdesc[chan->curr_hwdesc];
+       hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc;
 
        spin_lock_irqsave(&chan->vchan.lock, flags);
 
@@ -1268,13 +1286,13 @@ static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
                                      u32 curr_hwdesc)
 {
        struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+       struct stm32_mdma_hwdesc *hwdesc = desc->node[0].hwdesc;
        u32 cbndtr, residue, modulo, burst_size;
        int i;
 
        residue = 0;
        for (i = curr_hwdesc + 1; i < desc->count; i++) {
-               struct stm32_mdma_hwdesc *hwdesc = &desc->hwdesc[i];
-
+               hwdesc = desc->node[i].hwdesc;
                residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr);
        }
        cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
@@ -1503,7 +1521,7 @@ static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
 
        c = dma_get_any_slave_channel(&dmadev->ddev);
        if (!c) {
-               dev_err(mdma2dev(dmadev), "No more channel avalaible\n");
+               dev_err(mdma2dev(dmadev), "No more channels available\n");
                return NULL;
        }
 
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
deleted file mode 100644 (file)
index 9272b17..0000000
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- *  Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
- *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/io.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_dma.h>
-
-#define TI_XBAR_DRA7           0
-#define TI_XBAR_AM335X         1
-static const u32 ti_xbar_type[] = {
-       [TI_XBAR_DRA7] = TI_XBAR_DRA7,
-       [TI_XBAR_AM335X] = TI_XBAR_AM335X,
-};
-
-static const struct of_device_id ti_dma_xbar_match[] = {
-       {
-               .compatible = "ti,dra7-dma-crossbar",
-               .data = &ti_xbar_type[TI_XBAR_DRA7],
-       },
-       {
-               .compatible = "ti,am335x-edma-crossbar",
-               .data = &ti_xbar_type[TI_XBAR_AM335X],
-       },
-       {},
-};
-
-/* Crossbar on AM335x/AM437x family */
-#define TI_AM335X_XBAR_LINES   64
-
-struct ti_am335x_xbar_data {
-       void __iomem *iomem;
-
-       struct dma_router dmarouter;
-
-       u32 xbar_events; /* maximum number of events to select in xbar */
-       u32 dma_requests; /* number of DMA requests on eDMA */
-};
-
-struct ti_am335x_xbar_map {
-       u16 dma_line;
-       u8 mux_val;
-};
-
-static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
-{
-       /*
-        * TPCC_EVT_MUX_60_63 register layout is different than the
-        * rest, in the sense, that event 63 is mapped to lowest byte
-        * and event 60 is mapped to highest, handle it separately.
-        */
-       if (event >= 60 && event <= 63)
-               writeb_relaxed(val, iomem + (63 - event % 4));
-       else
-               writeb_relaxed(val, iomem + event);
-}
-
-static void ti_am335x_xbar_free(struct device *dev, void *route_data)
-{
-       struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev);
-       struct ti_am335x_xbar_map *map = route_data;
-
-       dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n",
-               map->mux_val, map->dma_line);
-
-       ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0);
-       kfree(map);
-}
-
-static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
-                                          struct of_dma *ofdma)
-{
-       struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
-       struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev);
-       struct ti_am335x_xbar_map *map;
-
-       if (dma_spec->args_count != 3)
-               return ERR_PTR(-EINVAL);
-
-       if (dma_spec->args[2] >= xbar->xbar_events) {
-               dev_err(&pdev->dev, "Invalid XBAR event number: %d\n",
-                       dma_spec->args[2]);
-               return ERR_PTR(-EINVAL);
-       }
-
-       if (dma_spec->args[0] >= xbar->dma_requests) {
-               dev_err(&pdev->dev, "Invalid DMA request line number: %d\n",
-                       dma_spec->args[0]);
-               return ERR_PTR(-EINVAL);
-       }
-
-       /* The of_node_put() will be done in the core for the node */
-       dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
-       if (!dma_spec->np) {
-               dev_err(&pdev->dev, "Can't get DMA master\n");
-               return ERR_PTR(-EINVAL);
-       }
-
-       map = kzalloc(sizeof(*map), GFP_KERNEL);
-       if (!map) {
-               of_node_put(dma_spec->np);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       map->dma_line = (u16)dma_spec->args[0];
-       map->mux_val = (u8)dma_spec->args[2];
-
-       dma_spec->args[2] = 0;
-       dma_spec->args_count = 2;
-
-       dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n",
-               map->mux_val, map->dma_line);
-
-       ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val);
-
-       return map;
-}
-
-static const struct of_device_id ti_am335x_master_match[] = {
-       { .compatible = "ti,edma3-tpcc", },
-       {},
-};
-
-static int ti_am335x_xbar_probe(struct platform_device *pdev)
-{
-       struct device_node *node = pdev->dev.of_node;
-       const struct of_device_id *match;
-       struct device_node *dma_node;
-       struct ti_am335x_xbar_data *xbar;
-       struct resource *res;
-       void __iomem *iomem;
-       int i, ret;
-
-       if (!node)
-               return -ENODEV;
-
-       xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
-       if (!xbar)
-               return -ENOMEM;
-
-       dma_node = of_parse_phandle(node, "dma-masters", 0);
-       if (!dma_node) {
-               dev_err(&pdev->dev, "Can't get DMA master node\n");
-               return -ENODEV;
-       }
-
-       match = of_match_node(ti_am335x_master_match, dma_node);
-       if (!match) {
-               dev_err(&pdev->dev, "DMA master is not supported\n");
-               of_node_put(dma_node);
-               return -EINVAL;
-       }
-
-       if (of_property_read_u32(dma_node, "dma-requests",
-                                &xbar->dma_requests)) {
-               dev_info(&pdev->dev,
-                        "Missing XBAR output information, using %u.\n",
-                        TI_AM335X_XBAR_LINES);
-               xbar->dma_requests = TI_AM335X_XBAR_LINES;
-       }
-       of_node_put(dma_node);
-
-       if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) {
-               dev_info(&pdev->dev,
-                        "Missing XBAR input information, using %u.\n",
-                        TI_AM335X_XBAR_LINES);
-               xbar->xbar_events = TI_AM335X_XBAR_LINES;
-       }
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       iomem = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(iomem))
-               return PTR_ERR(iomem);
-
-       xbar->iomem = iomem;
-
-       xbar->dmarouter.dev = &pdev->dev;
-       xbar->dmarouter.route_free = ti_am335x_xbar_free;
-
-       platform_set_drvdata(pdev, xbar);
-
-       /* Reset the crossbar */
-       for (i = 0; i < xbar->dma_requests; i++)
-               ti_am335x_xbar_write(xbar->iomem, i, 0);
-
-       ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate,
-                                    &xbar->dmarouter);
-
-       return ret;
-}
-
-/* Crossbar on DRA7xx family */
-#define TI_DRA7_XBAR_OUTPUTS   127
-#define TI_DRA7_XBAR_INPUTS    256
-
-struct ti_dra7_xbar_data {
-       void __iomem *iomem;
-
-       struct dma_router dmarouter;
-       struct mutex mutex;
-       unsigned long *dma_inuse;
-
-       u16 safe_val; /* Value to rest the crossbar lines */
-       u32 xbar_requests; /* number of DMA requests connected to XBAR */
-       u32 dma_requests; /* number of DMA requests forwarded to DMA */
-       u32 dma_offset;
-};
-
-struct ti_dra7_xbar_map {
-       u16 xbar_in;
-       int xbar_out;
-};
-
-static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val)
-{
-       writew_relaxed(val, iomem + (xbar * 2));
-}
-
-static void ti_dra7_xbar_free(struct device *dev, void *route_data)
-{
-       struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev);
-       struct ti_dra7_xbar_map *map = route_data;
-
-       dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n",
-               map->xbar_in, map->xbar_out);
-
-       ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
-       mutex_lock(&xbar->mutex);
-       clear_bit(map->xbar_out, xbar->dma_inuse);
-       mutex_unlock(&xbar->mutex);
-       kfree(map);
-}
-
-static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
-                                        struct of_dma *ofdma)
-{
-       struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
-       struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev);
-       struct ti_dra7_xbar_map *map;
-
-       if (dma_spec->args[0] >= xbar->xbar_requests) {
-               dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
-                       dma_spec->args[0]);
-               return ERR_PTR(-EINVAL);
-       }
-
-       /* The of_node_put() will be done in the core for the node */
-       dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
-       if (!dma_spec->np) {
-               dev_err(&pdev->dev, "Can't get DMA master\n");
-               return ERR_PTR(-EINVAL);
-       }
-
-       map = kzalloc(sizeof(*map), GFP_KERNEL);
-       if (!map) {
-               of_node_put(dma_spec->np);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       mutex_lock(&xbar->mutex);
-       map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
-                                           xbar->dma_requests);
-       if (map->xbar_out == xbar->dma_requests) {
-               mutex_unlock(&xbar->mutex);
-               dev_err(&pdev->dev, "Run out of free DMA requests\n");
-               kfree(map);
-               return ERR_PTR(-ENOMEM);
-       }
-       set_bit(map->xbar_out, xbar->dma_inuse);
-       mutex_unlock(&xbar->mutex);
-
-       map->xbar_in = (u16)dma_spec->args[0];
-
-       dma_spec->args[0] = map->xbar_out + xbar->dma_offset;
-
-       dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n",
-               map->xbar_in, map->xbar_out);
-
-       ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
-
-       return map;
-}
-
-#define TI_XBAR_EDMA_OFFSET    0
-#define TI_XBAR_SDMA_OFFSET    1
-static const u32 ti_dma_offset[] = {
-       [TI_XBAR_EDMA_OFFSET] = 0,
-       [TI_XBAR_SDMA_OFFSET] = 1,
-};
-
-static const struct of_device_id ti_dra7_master_match[] = {
-       {
-               .compatible = "ti,omap4430-sdma",
-               .data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET],
-       },
-       {
-               .compatible = "ti,edma3",
-               .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
-       },
-       {
-               .compatible = "ti,edma3-tpcc",
-               .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
-       },
-       {},
-};
-
-static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p)
-{
-       for (; len > 0; len--)
-               set_bit(offset + (len - 1), p);
-}
-
-static int ti_dra7_xbar_probe(struct platform_device *pdev)
-{
-       struct device_node *node = pdev->dev.of_node;
-       const struct of_device_id *match;
-       struct device_node *dma_node;
-       struct ti_dra7_xbar_data *xbar;
-       struct property *prop;
-       struct resource *res;
-       u32 safe_val;
-       int sz;
-       void __iomem *iomem;
-       int i, ret;
-
-       if (!node)
-               return -ENODEV;
-
-       xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
-       if (!xbar)
-               return -ENOMEM;
-
-       dma_node = of_parse_phandle(node, "dma-masters", 0);
-       if (!dma_node) {
-               dev_err(&pdev->dev, "Can't get DMA master node\n");
-               return -ENODEV;
-       }
-
-       match = of_match_node(ti_dra7_master_match, dma_node);
-       if (!match) {
-               dev_err(&pdev->dev, "DMA master is not supported\n");
-               of_node_put(dma_node);
-               return -EINVAL;
-       }
-
-       if (of_property_read_u32(dma_node, "dma-requests",
-                                &xbar->dma_requests)) {
-               dev_info(&pdev->dev,
-                        "Missing XBAR output information, using %u.\n",
-                        TI_DRA7_XBAR_OUTPUTS);
-               xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS;
-       }
-       of_node_put(dma_node);
-
-       xbar->dma_inuse = devm_kcalloc(&pdev->dev,
-                                      BITS_TO_LONGS(xbar->dma_requests),
-                                      sizeof(unsigned long), GFP_KERNEL);
-       if (!xbar->dma_inuse)
-               return -ENOMEM;
-
-       if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) {
-               dev_info(&pdev->dev,
-                        "Missing XBAR input information, using %u.\n",
-                        TI_DRA7_XBAR_INPUTS);
-               xbar->xbar_requests = TI_DRA7_XBAR_INPUTS;
-       }
-
-       if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val))
-               xbar->safe_val = (u16)safe_val;
-
-
-       prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz);
-       if (prop) {
-               const char pname[] = "ti,reserved-dma-request-ranges";
-               u32 (*rsv_events)[2];
-               size_t nelm = sz / sizeof(*rsv_events);
-               int i;
-
-               if (!nelm)
-                       return -EINVAL;
-
-               rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL);
-               if (!rsv_events)
-                       return -ENOMEM;
-
-               ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
-                                                nelm * 2);
-               if (ret)
-                       return ret;
-
-               for (i = 0; i < nelm; i++) {
-                       ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
-                                            xbar->dma_inuse);
-               }
-               kfree(rsv_events);
-       }
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       iomem = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(iomem))
-               return PTR_ERR(iomem);
-
-       xbar->iomem = iomem;
-
-       xbar->dmarouter.dev = &pdev->dev;
-       xbar->dmarouter.route_free = ti_dra7_xbar_free;
-       xbar->dma_offset = *(u32 *)match->data;
-
-       mutex_init(&xbar->mutex);
-       platform_set_drvdata(pdev, xbar);
-
-       /* Reset the crossbar */
-       for (i = 0; i < xbar->dma_requests; i++) {
-               if (!test_bit(i, xbar->dma_inuse))
-                       ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val);
-       }
-
-       ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate,
-                                    &xbar->dmarouter);
-       if (ret) {
-               /* Restore the defaults for the crossbar */
-               for (i = 0; i < xbar->dma_requests; i++) {
-                       if (!test_bit(i, xbar->dma_inuse))
-                               ti_dra7_xbar_write(xbar->iomem, i, i);
-               }
-       }
-
-       return ret;
-}
-
-static int ti_dma_xbar_probe(struct platform_device *pdev)
-{
-       const struct of_device_id *match;
-       int ret;
-
-       match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node);
-       if (unlikely(!match))
-               return -EINVAL;
-
-       switch (*(u32 *)match->data) {
-       case TI_XBAR_DRA7:
-               ret = ti_dra7_xbar_probe(pdev);
-               break;
-       case TI_XBAR_AM335X:
-               ret = ti_am335x_xbar_probe(pdev);
-               break;
-       default:
-               dev_err(&pdev->dev, "Unsupported crossbar\n");
-               ret = -ENODEV;
-               break;
-       }
-
-       return ret;
-}
-
-static struct platform_driver ti_dma_xbar_driver = {
-       .driver = {
-               .name = "ti-dma-crossbar",
-               .of_match_table = of_match_ptr(ti_dma_xbar_match),
-       },
-       .probe  = ti_dma_xbar_probe,
-};
-
-static int omap_dmaxbar_init(void)
-{
-       return platform_driver_register(&ti_dma_xbar_driver);
-}
-arch_initcall(omap_dmaxbar_init);
diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig
new file mode 100644 (file)
index 0000000..e5e74e1
--- /dev/null
@@ -0,0 +1,37 @@
+#
+# Texas Instruments DMA drivers
+#
+
+config TI_CPPI41
+       tristate "Texas Instruments CPPI 4.1 DMA support"
+       depends on (ARCH_OMAP || ARCH_DAVINCI_DA8XX)
+       select DMA_ENGINE
+       help
+         The Communications Port Programming Interface (CPPI) 4.1 DMA engine
+         is currently used by the USB driver on AM335x and DA8xx platforms.
+
+config TI_EDMA
+       tristate "Texas Instruments EDMA support"
+       depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE || COMPILE_TEST
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       select TI_DMA_CROSSBAR if (ARCH_OMAP || COMPILE_TEST)
+       default y
+       help
+         Enable support for the TI EDMA (Enhanced DMA) controller. This DMA
+         engine is found on TI DaVinci, AM33xx, AM43xx, DRA7xx and Keystone 2
+         parts.
+
+config DMA_OMAP
+       tristate "Texas Instruments sDMA (omap-dma) support"
+       depends on ARCH_OMAP || COMPILE_TEST
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       select TI_DMA_CROSSBAR if (SOC_DRA7XX || COMPILE_TEST)
+       default y
+       help
+         Enable support for the TI sDMA (System DMA or DMA4) controller. This
+         DMA engine is found on OMAP and DRA7xx parts.
+
+config TI_DMA_CROSSBAR
+       bool
diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile
new file mode 100644 (file)
index 0000000..113e59e
--- /dev/null
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_TI_CPPI41) += cppi41.o
+obj-$(CONFIG_TI_EDMA) += edma.o
+obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c
new file mode 100644 (file)
index 0000000..1497da3
--- /dev/null
@@ -0,0 +1,1223 @@
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+#include "../dmaengine.h"
+
+#define DESC_TYPE      27
+#define DESC_TYPE_HOST 0x10
+#define DESC_TYPE_TEARD        0x13
+
+#define TD_DESC_IS_RX  (1 << 16)
+#define TD_DESC_DMA_NUM        10
+
+#define DESC_LENGTH_BITS_NUM   21
+
+#define DESC_TYPE_USB  (5 << 26)
+#define DESC_PD_COMPLETE       (1 << 31)
+
+/* DMA engine */
+#define DMA_TDFDQ      4
+#define DMA_TXGCR(x)   (0x800 + (x) * 0x20)
+#define DMA_RXGCR(x)   (0x808 + (x) * 0x20)
+#define RXHPCRA0               4
+
+#define GCR_CHAN_ENABLE                (1 << 31)
+#define GCR_TEARDOWN           (1 << 30)
+#define GCR_STARV_RETRY                (1 << 24)
+#define GCR_DESC_TYPE_HOST     (1 << 14)
+
+/* DMA scheduler */
+#define DMA_SCHED_CTRL         0
+#define DMA_SCHED_CTRL_EN      (1 << 31)
+#define DMA_SCHED_WORD(x)      ((x) * 4 + 0x800)
+
+#define SCHED_ENTRY0_CHAN(x)   ((x) << 0)
+#define SCHED_ENTRY0_IS_RX     (1 << 7)
+
+#define SCHED_ENTRY1_CHAN(x)   ((x) << 8)
+#define SCHED_ENTRY1_IS_RX     (1 << 15)
+
+#define SCHED_ENTRY2_CHAN(x)   ((x) << 16)
+#define SCHED_ENTRY2_IS_RX     (1 << 23)
+
+#define SCHED_ENTRY3_CHAN(x)   ((x) << 24)
+#define SCHED_ENTRY3_IS_RX     (1 << 31)
+
+/* Queue manager */
+/* 4 KiB of memory for descriptors, 2 for each endpoint */
+#define ALLOC_DECS_NUM         128
+#define DESCS_AREAS            1
+#define TOTAL_DESCS_NUM                (ALLOC_DECS_NUM * DESCS_AREAS)
+#define QMGR_SCRATCH_SIZE      (TOTAL_DESCS_NUM * 4)
+
+#define QMGR_LRAM0_BASE                0x80
+#define QMGR_LRAM_SIZE         0x84
+#define QMGR_LRAM1_BASE                0x88
+#define QMGR_MEMBASE(x)                (0x1000 + (x) * 0x10)
+#define QMGR_MEMCTRL(x)                (0x1004 + (x) * 0x10)
+#define QMGR_MEMCTRL_IDX_SH    16
+#define QMGR_MEMCTRL_DESC_SH   8
+
+#define QMGR_PEND(x)   (0x90 + (x) * 4)
+
+#define QMGR_PENDING_SLOT_Q(x) (x / 32)
+#define QMGR_PENDING_BIT_Q(x)  (x % 32)
+
+#define QMGR_QUEUE_A(n)        (0x2000 + (n) * 0x10)
+#define QMGR_QUEUE_B(n)        (0x2004 + (n) * 0x10)
+#define QMGR_QUEUE_C(n)        (0x2008 + (n) * 0x10)
+#define QMGR_QUEUE_D(n)        (0x200c + (n) * 0x10)
+
+/* Packet Descriptor */
+#define PD2_ZERO_LENGTH                (1 << 19)
+
+struct cppi41_channel {
+       struct dma_chan chan;
+       struct dma_async_tx_descriptor txd;
+       struct cppi41_dd *cdd;
+       struct cppi41_desc *desc;
+       dma_addr_t desc_phys;
+       void __iomem *gcr_reg;
+       int is_tx;
+       u32 residue;
+
+       unsigned int q_num;
+       unsigned int q_comp_num;
+       unsigned int port_num;
+
+       unsigned td_retry;
+       unsigned td_queued:1;
+       unsigned td_seen:1;
+       unsigned td_desc_seen:1;
+
+       struct list_head node;          /* Node for pending list */
+};
+
+struct cppi41_desc {
+       u32 pd0;
+       u32 pd1;
+       u32 pd2;
+       u32 pd3;
+       u32 pd4;
+       u32 pd5;
+       u32 pd6;
+       u32 pd7;
+} __aligned(32);
+
+struct chan_queues {
+       u16 submit;
+       u16 complete;
+};
+
+struct cppi41_dd {
+       struct dma_device ddev;
+
+       void *qmgr_scratch;
+       dma_addr_t scratch_phys;
+
+       struct cppi41_desc *cd;
+       dma_addr_t descs_phys;
+       u32 first_td_desc;
+       struct cppi41_channel *chan_busy[ALLOC_DECS_NUM];
+
+       void __iomem *ctrl_mem;
+       void __iomem *sched_mem;
+       void __iomem *qmgr_mem;
+       unsigned int irq;
+       const struct chan_queues *queues_rx;
+       const struct chan_queues *queues_tx;
+       struct chan_queues td_queue;
+       u16 first_completion_queue;
+       u16 qmgr_num_pend;
+       u32 n_chans;
+       u8 platform;
+
+       struct list_head pending;       /* Pending queued transfers */
+       spinlock_t lock;                /* Lock for pending list */
+
+       /* context for suspend/resume */
+       unsigned int dma_tdfdq;
+
+       bool is_suspended;
+};
+
+static struct chan_queues am335x_usb_queues_tx[] = {
+       /* USB0 ENDP 1 */
+       [ 0] = { .submit = 32, .complete =  93},
+       [ 1] = { .submit = 34, .complete =  94},
+       [ 2] = { .submit = 36, .complete =  95},
+       [ 3] = { .submit = 38, .complete =  96},
+       [ 4] = { .submit = 40, .complete =  97},
+       [ 5] = { .submit = 42, .complete =  98},
+       [ 6] = { .submit = 44, .complete =  99},
+       [ 7] = { .submit = 46, .complete = 100},
+       [ 8] = { .submit = 48, .complete = 101},
+       [ 9] = { .submit = 50, .complete = 102},
+       [10] = { .submit = 52, .complete = 103},
+       [11] = { .submit = 54, .complete = 104},
+       [12] = { .submit = 56, .complete = 105},
+       [13] = { .submit = 58, .complete = 106},
+       [14] = { .submit = 60, .complete = 107},
+
+       /* USB1 ENDP1 */
+       [15] = { .submit = 62, .complete = 125},
+       [16] = { .submit = 64, .complete = 126},
+       [17] = { .submit = 66, .complete = 127},
+       [18] = { .submit = 68, .complete = 128},
+       [19] = { .submit = 70, .complete = 129},
+       [20] = { .submit = 72, .complete = 130},
+       [21] = { .submit = 74, .complete = 131},
+       [22] = { .submit = 76, .complete = 132},
+       [23] = { .submit = 78, .complete = 133},
+       [24] = { .submit = 80, .complete = 134},
+       [25] = { .submit = 82, .complete = 135},
+       [26] = { .submit = 84, .complete = 136},
+       [27] = { .submit = 86, .complete = 137},
+       [28] = { .submit = 88, .complete = 138},
+       [29] = { .submit = 90, .complete = 139},
+};
+
+static const struct chan_queues am335x_usb_queues_rx[] = {
+       /* USB0 ENDP 1 */
+       [ 0] = { .submit =  1, .complete = 109},
+       [ 1] = { .submit =  2, .complete = 110},
+       [ 2] = { .submit =  3, .complete = 111},
+       [ 3] = { .submit =  4, .complete = 112},
+       [ 4] = { .submit =  5, .complete = 113},
+       [ 5] = { .submit =  6, .complete = 114},
+       [ 6] = { .submit =  7, .complete = 115},
+       [ 7] = { .submit =  8, .complete = 116},
+       [ 8] = { .submit =  9, .complete = 117},
+       [ 9] = { .submit = 10, .complete = 118},
+       [10] = { .submit = 11, .complete = 119},
+       [11] = { .submit = 12, .complete = 120},
+       [12] = { .submit = 13, .complete = 121},
+       [13] = { .submit = 14, .complete = 122},
+       [14] = { .submit = 15, .complete = 123},
+
+       /* USB1 ENDP 1 */
+       [15] = { .submit = 16, .complete = 141},
+       [16] = { .submit = 17, .complete = 142},
+       [17] = { .submit = 18, .complete = 143},
+       [18] = { .submit = 19, .complete = 144},
+       [19] = { .submit = 20, .complete = 145},
+       [20] = { .submit = 21, .complete = 146},
+       [21] = { .submit = 22, .complete = 147},
+       [22] = { .submit = 23, .complete = 148},
+       [23] = { .submit = 24, .complete = 149},
+       [24] = { .submit = 25, .complete = 150},
+       [25] = { .submit = 26, .complete = 151},
+       [26] = { .submit = 27, .complete = 152},
+       [27] = { .submit = 28, .complete = 153},
+       [28] = { .submit = 29, .complete = 154},
+       [29] = { .submit = 30, .complete = 155},
+};
+
+static const struct chan_queues da8xx_usb_queues_tx[] = {
+       [0] = { .submit =  16, .complete = 24},
+       [1] = { .submit =  18, .complete = 24},
+       [2] = { .submit =  20, .complete = 24},
+       [3] = { .submit =  22, .complete = 24},
+};
+
+static const struct chan_queues da8xx_usb_queues_rx[] = {
+       [0] = { .submit =  1, .complete = 26},
+       [1] = { .submit =  3, .complete = 26},
+       [2] = { .submit =  5, .complete = 26},
+       [3] = { .submit =  7, .complete = 26},
+};
+
+struct cppi_glue_infos {
+       const struct chan_queues *queues_rx;
+       const struct chan_queues *queues_tx;
+       struct chan_queues td_queue;
+       u16 first_completion_queue;
+       u16 qmgr_num_pend;
+};
+
+static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c)
+{
+       return container_of(c, struct cppi41_channel, chan);
+}
+
+static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
+{
+       struct cppi41_channel *c;
+       u32 descs_size;
+       u32 desc_num;
+
+       descs_size = sizeof(struct cppi41_desc) * ALLOC_DECS_NUM;
+
+       if (!((desc >= cdd->descs_phys) &&
+                       (desc < (cdd->descs_phys + descs_size)))) {
+               return NULL;
+       }
+
+       desc_num = (desc - cdd->descs_phys) / sizeof(struct cppi41_desc);
+       BUG_ON(desc_num >= ALLOC_DECS_NUM);
+       c = cdd->chan_busy[desc_num];
+       cdd->chan_busy[desc_num] = NULL;
+
+       /* Usecount for chan_busy[], paired with push_desc_queue() */
+       pm_runtime_put(cdd->ddev.dev);
+
+       return c;
+}
+
+static void cppi_writel(u32 val, void *__iomem *mem)
+{
+       __raw_writel(val, mem);
+}
+
+static u32 cppi_readl(void *__iomem *mem)
+{
+       return __raw_readl(mem);
+}
+
+static u32 pd_trans_len(u32 val)
+{
+       return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1);
+}
+
+static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
+{
+       u32 desc;
+
+       desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
+       desc &= ~0x1f;
+       return desc;
+}
+
+static irqreturn_t cppi41_irq(int irq, void *data)
+{
+       struct cppi41_dd *cdd = data;
+       u16 first_completion_queue = cdd->first_completion_queue;
+       u16 qmgr_num_pend = cdd->qmgr_num_pend;
+       struct cppi41_channel *c;
+       int i;
+
+       for (i = QMGR_PENDING_SLOT_Q(first_completion_queue); i < qmgr_num_pend;
+                       i++) {
+               u32 val;
+               u32 q_num;
+
+               val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i));
+               if (i == QMGR_PENDING_SLOT_Q(first_completion_queue) && val) {
+                       u32 mask;
+                       /* set corresponding bit for completetion Q 93 */
+                       mask = 1 << QMGR_PENDING_BIT_Q(first_completion_queue);
+                       /* not set all bits for queues less than Q 93 */
+                       mask--;
+                       /* now invert and keep only Q 93+ set */
+                       val &= ~mask;
+               }
+
+               if (val)
+                       __iormb();
+
+               while (val) {
+                       u32 desc, len;
+
+                       /*
+                        * This should never trigger, see the comments in
+                        * push_desc_queue()
+                        */
+                       WARN_ON(cdd->is_suspended);
+
+                       q_num = __fls(val);
+                       val &= ~(1 << q_num);
+                       q_num += 32 * i;
+                       desc = cppi41_pop_desc(cdd, q_num);
+                       c = desc_to_chan(cdd, desc);
+                       if (WARN_ON(!c)) {
+                               pr_err("%s() q %d desc %08x\n", __func__,
+                                               q_num, desc);
+                               continue;
+                       }
+
+                       if (c->desc->pd2 & PD2_ZERO_LENGTH)
+                               len = 0;
+                       else
+                               len = pd_trans_len(c->desc->pd0);
+
+                       c->residue = pd_trans_len(c->desc->pd6) - len;
+                       dma_cookie_complete(&c->txd);
+                       dmaengine_desc_get_callback_invoke(&c->txd, NULL);
+               }
+       }
+       return IRQ_HANDLED;
+}
+
+static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+       dma_cookie_t cookie;
+
+       cookie = dma_cookie_assign(tx);
+
+       return cookie;
+}
+
+static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct cppi41_channel *c = to_cpp41_chan(chan);
+       struct cppi41_dd *cdd = c->cdd;
+       int error;
+
+       error = pm_runtime_get_sync(cdd->ddev.dev);
+       if (error < 0) {
+               dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
+                       __func__, error);
+               pm_runtime_put_noidle(cdd->ddev.dev);
+
+               return error;
+       }
+
+       dma_cookie_init(chan);
+       dma_async_tx_descriptor_init(&c->txd, chan);
+       c->txd.tx_submit = cppi41_tx_submit;
+
+       if (!c->is_tx)
+               cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
+
+       pm_runtime_mark_last_busy(cdd->ddev.dev);
+       pm_runtime_put_autosuspend(cdd->ddev.dev);
+
+       return 0;
+}
+
+static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
+{
+       struct cppi41_channel *c = to_cpp41_chan(chan);
+       struct cppi41_dd *cdd = c->cdd;
+       int error;
+
+       error = pm_runtime_get_sync(cdd->ddev.dev);
+       if (error < 0) {
+               pm_runtime_put_noidle(cdd->ddev.dev);
+
+               return;
+       }
+
+       WARN_ON(!list_empty(&cdd->pending));
+
+       pm_runtime_mark_last_busy(cdd->ddev.dev);
+       pm_runtime_put_autosuspend(cdd->ddev.dev);
+}
+
+static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
+       dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+       struct cppi41_channel *c = to_cpp41_chan(chan);
+       enum dma_status ret;
+
+       ret = dma_cookie_status(chan, cookie, txstate);
+
+       dma_set_residue(txstate, c->residue);
+
+       return ret;
+}
+
+static void push_desc_queue(struct cppi41_channel *c)
+{
+       struct cppi41_dd *cdd = c->cdd;
+       u32 desc_num;
+       u32 desc_phys;
+       u32 reg;
+
+       c->residue = 0;
+
+       reg = GCR_CHAN_ENABLE;
+       if (!c->is_tx) {
+               reg |= GCR_STARV_RETRY;
+               reg |= GCR_DESC_TYPE_HOST;
+               reg |= c->q_comp_num;
+       }
+
+       cppi_writel(reg, c->gcr_reg);
+
+       /*
+        * We don't use writel() but __raw_writel() so we have to make sure
+        * that the DMA descriptor in coherent memory made to the main memory
+        * before starting the dma engine.
+        */
+       __iowmb();
+
+       /*
+        * DMA transfers can take at least 200ms to complete with USB mass
+        * storage connected. To prevent autosuspend timeouts, we must use
+        * pm_runtime_get/put() when chan_busy[] is modified. This will get
+        * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
+        * outcome of the transfer.
+        */
+       pm_runtime_get(cdd->ddev.dev);
+
+       desc_phys = lower_32_bits(c->desc_phys);
+       desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
+       WARN_ON(cdd->chan_busy[desc_num]);
+       cdd->chan_busy[desc_num] = c;
+
+       reg = (sizeof(struct cppi41_desc) - 24) / 4;
+       reg |= desc_phys;
+       cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
+}
+
+/*
+ * Caller must hold cdd->lock to prevent push_desc_queue()
+ * getting called out of order. We have both cppi41_dma_issue_pending()
+ * and cppi41_runtime_resume() call this function.
+ */
+static void cppi41_run_queue(struct cppi41_dd *cdd)
+{
+       struct cppi41_channel *c, *_c;
+
+       list_for_each_entry_safe(c, _c, &cdd->pending, node) {
+               push_desc_queue(c);
+               list_del(&c->node);
+       }
+}
+
+static void cppi41_dma_issue_pending(struct dma_chan *chan)
+{
+       struct cppi41_channel *c = to_cpp41_chan(chan);
+       struct cppi41_dd *cdd = c->cdd;
+       unsigned long flags;
+       int error;
+
+       error = pm_runtime_get(cdd->ddev.dev);
+       if ((error != -EINPROGRESS) && error < 0) {
+               pm_runtime_put_noidle(cdd->ddev.dev);
+               dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
+                       error);
+
+               return;
+       }
+
+       spin_lock_irqsave(&cdd->lock, flags);
+       list_add_tail(&c->node, &cdd->pending);
+       if (!cdd->is_suspended)
+               cppi41_run_queue(cdd);
+       spin_unlock_irqrestore(&cdd->lock, flags);
+
+       pm_runtime_mark_last_busy(cdd->ddev.dev);
+       pm_runtime_put_autosuspend(cdd->ddev.dev);
+}
+
+static u32 get_host_pd0(u32 length)
+{
+       u32 reg;
+
+       reg = DESC_TYPE_HOST << DESC_TYPE;
+       reg |= length;
+
+       return reg;
+}
+
+static u32 get_host_pd1(struct cppi41_channel *c)
+{
+       u32 reg;
+
+       reg = 0;
+
+       return reg;
+}
+
+static u32 get_host_pd2(struct cppi41_channel *c)
+{
+       u32 reg;
+
+       reg = DESC_TYPE_USB;
+       reg |= c->q_comp_num;
+
+       return reg;
+}
+
+static u32 get_host_pd3(u32 length)
+{
+       u32 reg;
+
+       /* PD3 = packet size */
+       reg = length;
+
+       return reg;
+}
+
+static u32 get_host_pd6(u32 length)
+{
+       u32 reg;
+
+       /* PD6 buffer size */
+       reg = DESC_PD_COMPLETE;
+       reg |= length;
+
+       return reg;
+}
+
+static u32 get_host_pd4_or_7(u32 addr)
+{
+       u32 reg;
+
+       reg = addr;
+
+       return reg;
+}
+
+static u32 get_host_pd5(void)
+{
+       u32 reg;
+
+       reg = 0;
+
+       return reg;
+}
+
+static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
+       struct dma_chan *chan, struct scatterlist *sgl, unsigned sg_len,
+       enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
+{
+       struct cppi41_channel *c = to_cpp41_chan(chan);
+       struct cppi41_desc *d;
+       struct scatterlist *sg;
+       unsigned int i;
+
+       d = c->desc;
+       for_each_sg(sgl, sg, sg_len, i) {
+               u32 addr;
+               u32 len;
+
+               /* We need to use more than one desc once musb supports sg */
+               addr = lower_32_bits(sg_dma_address(sg));
+               len = sg_dma_len(sg);
+
+               d->pd0 = get_host_pd0(len);
+               d->pd1 = get_host_pd1(c);
+               d->pd2 = get_host_pd2(c);
+               d->pd3 = get_host_pd3(len);
+               d->pd4 = get_host_pd4_or_7(addr);
+               d->pd5 = get_host_pd5();
+               d->pd6 = get_host_pd6(len);
+               d->pd7 = get_host_pd4_or_7(addr);
+
+               d++;
+       }
+
+       return &c->txd;
+}
+
+static void cppi41_compute_td_desc(struct cppi41_desc *d)
+{
+       d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
+}
+
+static int cppi41_tear_down_chan(struct cppi41_channel *c)
+{
+       struct dmaengine_result abort_result;
+       struct cppi41_dd *cdd = c->cdd;
+       struct cppi41_desc *td;
+       u32 reg;
+       u32 desc_phys;
+       u32 td_desc_phys;
+
+       td = cdd->cd;
+       td += cdd->first_td_desc;
+
+       td_desc_phys = cdd->descs_phys;
+       td_desc_phys += cdd->first_td_desc * sizeof(struct cppi41_desc);
+
+       if (!c->td_queued) {
+               cppi41_compute_td_desc(td);
+               __iowmb();
+
+               reg = (sizeof(struct cppi41_desc) - 24) / 4;
+               reg |= td_desc_phys;
+               cppi_writel(reg, cdd->qmgr_mem +
+                               QMGR_QUEUE_D(cdd->td_queue.submit));
+
+               reg = GCR_CHAN_ENABLE;
+               if (!c->is_tx) {
+                       reg |= GCR_STARV_RETRY;
+                       reg |= GCR_DESC_TYPE_HOST;
+                       reg |= cdd->td_queue.complete;
+               }
+               reg |= GCR_TEARDOWN;
+               cppi_writel(reg, c->gcr_reg);
+               c->td_queued = 1;
+               c->td_retry = 500;
+       }
+
+       if (!c->td_seen || !c->td_desc_seen) {
+
+               desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete);
+               if (!desc_phys && c->is_tx)
+                       desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
+
+               if (desc_phys == c->desc_phys) {
+                       c->td_desc_seen = 1;
+
+               } else if (desc_phys == td_desc_phys) {
+                       u32 pd0;
+
+                       __iormb();
+                       pd0 = td->pd0;
+                       WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
+                       WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
+                       WARN_ON((pd0 & 0x1f) != c->port_num);
+                       c->td_seen = 1;
+               } else if (desc_phys) {
+                       WARN_ON_ONCE(1);
+               }
+       }
+       c->td_retry--;
+       /*
+        * If the TX descriptor / channel is in use, the caller needs to poke
+        * his TD bit multiple times. After that he hardware releases the
+        * transfer descriptor followed by TD descriptor. Waiting seems not to
+        * cause any difference.
+        * RX seems to be thrown out right away. However once the TearDown
+        * descriptor gets through we are done. If we have seens the transfer
+        * descriptor before the TD we fetch it from enqueue, it has to be
+        * there waiting for us.
+        */
+       if (!c->td_seen && c->td_retry) {
+               udelay(1);
+               return -EAGAIN;
+       }
+       WARN_ON(!c->td_retry);
+
+       if (!c->td_desc_seen) {
+               desc_phys = cppi41_pop_desc(cdd, c->q_num);
+               if (!desc_phys)
+                       desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
+               WARN_ON(!desc_phys);
+       }
+
+       c->td_queued = 0;
+       c->td_seen = 0;
+       c->td_desc_seen = 0;
+       cppi_writel(0, c->gcr_reg);
+
+       /* Invoke the callback to do the necessary clean-up */
+       abort_result.result = DMA_TRANS_ABORTED;
+       dma_cookie_complete(&c->txd);
+       dmaengine_desc_get_callback_invoke(&c->txd, &abort_result);
+
+       return 0;
+}
+
+static int cppi41_stop_chan(struct dma_chan *chan)
+{
+       struct cppi41_channel *c = to_cpp41_chan(chan);
+       struct cppi41_dd *cdd = c->cdd;
+       u32 desc_num;
+       u32 desc_phys;
+       int ret;
+
+       desc_phys = lower_32_bits(c->desc_phys);
+       desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
+       if (!cdd->chan_busy[desc_num])
+               return 0;
+
+       ret = cppi41_tear_down_chan(c);
+       if (ret)
+               return ret;
+
+       WARN_ON(!cdd->chan_busy[desc_num]);
+       cdd->chan_busy[desc_num] = NULL;
+
+       /* Usecount for chan_busy[], paired with push_desc_queue() */
+       pm_runtime_put(cdd->ddev.dev);
+
+       return 0;
+}
+
+static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
+{
+       struct cppi41_channel *cchan, *chans;
+       int i;
+       u32 n_chans = cdd->n_chans;
+
+       /*
+        * The channels can only be used as TX or as RX. So we add twice
+        * that much dma channels because USB can only do RX or TX.
+        */
+       n_chans *= 2;
+
+       chans = devm_kcalloc(dev, n_chans, sizeof(*chans), GFP_KERNEL);
+       if (!chans)
+               return -ENOMEM;
+
+       for (i = 0; i < n_chans; i++) {
+               cchan = &chans[i];
+
+               cchan->cdd = cdd;
+               if (i & 1) {
+                       cchan->gcr_reg = cdd->ctrl_mem + DMA_TXGCR(i >> 1);
+                       cchan->is_tx = 1;
+               } else {
+                       cchan->gcr_reg = cdd->ctrl_mem + DMA_RXGCR(i >> 1);
+                       cchan->is_tx = 0;
+               }
+               cchan->port_num = i >> 1;
+               cchan->desc = &cdd->cd[i];
+               cchan->desc_phys = cdd->descs_phys;
+               cchan->desc_phys += i * sizeof(struct cppi41_desc);
+               cchan->chan.device = &cdd->ddev;
+               list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels);
+       }
+       cdd->first_td_desc = n_chans;
+
+       return 0;
+}
+
+static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
+{
+       unsigned int mem_decs;
+       int i;
+
+       mem_decs = ALLOC_DECS_NUM * sizeof(struct cppi41_desc);
+
+       for (i = 0; i < DESCS_AREAS; i++) {
+
+               cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
+               cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
+
+               dma_free_coherent(dev, mem_decs, cdd->cd,
+                               cdd->descs_phys);
+       }
+}
+
+static void disable_sched(struct cppi41_dd *cdd)
+{
+       cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
+}
+
+static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
+{
+       disable_sched(cdd);
+
+       purge_descs(dev, cdd);
+
+       cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
+       cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
+       dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
+                       cdd->scratch_phys);
+}
+
+static int init_descs(struct device *dev, struct cppi41_dd *cdd)
+{
+       unsigned int desc_size;
+       unsigned int mem_decs;
+       int i;
+       u32 reg;
+       u32 idx;
+
+       BUILD_BUG_ON(sizeof(struct cppi41_desc) &
+                       (sizeof(struct cppi41_desc) - 1));
+       BUILD_BUG_ON(sizeof(struct cppi41_desc) < 32);
+       BUILD_BUG_ON(ALLOC_DECS_NUM < 32);
+
+       desc_size = sizeof(struct cppi41_desc);
+       mem_decs = ALLOC_DECS_NUM * desc_size;
+
+       idx = 0;
+       for (i = 0; i < DESCS_AREAS; i++) {
+
+               reg = idx << QMGR_MEMCTRL_IDX_SH;
+               reg |= (ilog2(desc_size) - 5) << QMGR_MEMCTRL_DESC_SH;
+               reg |= ilog2(ALLOC_DECS_NUM) - 5;
+
+               BUILD_BUG_ON(DESCS_AREAS != 1);
+               cdd->cd = dma_alloc_coherent(dev, mem_decs,
+                               &cdd->descs_phys, GFP_KERNEL);
+               if (!cdd->cd)
+                       return -ENOMEM;
+
+               cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
+               cppi_writel(reg, cdd->qmgr_mem + QMGR_MEMCTRL(i));
+
+               idx += ALLOC_DECS_NUM;
+       }
+       return 0;
+}
+
+static void init_sched(struct cppi41_dd *cdd)
+{
+       unsigned ch;
+       unsigned word;
+       u32 reg;
+
+       word = 0;
+       cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
+       for (ch = 0; ch < cdd->n_chans; ch += 2) {
+
+               reg = SCHED_ENTRY0_CHAN(ch);
+               reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX;
+
+               reg |= SCHED_ENTRY2_CHAN(ch + 1);
+               reg |= SCHED_ENTRY3_CHAN(ch + 1) | SCHED_ENTRY3_IS_RX;
+               cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word));
+               word++;
+       }
+       reg = cdd->n_chans * 2 - 1;
+       reg |= DMA_SCHED_CTRL_EN;
+       cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
+}
+
+static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
+{
+       int ret;
+
+       BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
+       cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
+                       &cdd->scratch_phys, GFP_KERNEL);
+       if (!cdd->qmgr_scratch)
+               return -ENOMEM;
+
+       cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
+       cppi_writel(TOTAL_DESCS_NUM, cdd->qmgr_mem + QMGR_LRAM_SIZE);
+       cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
+
+       ret = init_descs(dev, cdd);
+       if (ret)
+               goto err_td;
+
+       cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ);
+       init_sched(cdd);
+
+       return 0;
+err_td:
+       deinit_cppi41(dev, cdd);
+       return ret;
+}
+
+static struct platform_driver cpp41_dma_driver;
+/*
+ * The param format is:
+ * X Y
+ * X: Port
+ * Y: 0 = RX else TX
+ */
+#define INFO_PORT      0
+#define INFO_IS_TX     1
+
+static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+       struct cppi41_channel *cchan;
+       struct cppi41_dd *cdd;
+       const struct chan_queues *queues;
+       u32 *num = param;
+
+       if (chan->device->dev->driver != &cpp41_dma_driver.driver)
+               return false;
+
+       cchan = to_cpp41_chan(chan);
+
+       if (cchan->port_num != num[INFO_PORT])
+               return false;
+
+       if (cchan->is_tx && !num[INFO_IS_TX])
+               return false;
+       cdd = cchan->cdd;
+       if (cchan->is_tx)
+               queues = cdd->queues_tx;
+       else
+               queues = cdd->queues_rx;
+
+       BUILD_BUG_ON(ARRAY_SIZE(am335x_usb_queues_rx) !=
+                    ARRAY_SIZE(am335x_usb_queues_tx));
+       if (WARN_ON(cchan->port_num >= ARRAY_SIZE(am335x_usb_queues_rx)))
+               return false;
+
+       cchan->q_num = queues[cchan->port_num].submit;
+       cchan->q_comp_num = queues[cchan->port_num].complete;
+       return true;
+}
+
+static struct of_dma_filter_info cpp41_dma_info = {
+       .filter_fn = cpp41_dma_filter_fn,
+};
+
+static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec,
+               struct of_dma *ofdma)
+{
+       int count = dma_spec->args_count;
+       struct of_dma_filter_info *info = ofdma->of_dma_data;
+
+       if (!info || !info->filter_fn)
+               return NULL;
+
+       if (count != 2)
+               return NULL;
+
+       return dma_request_channel(info->dma_cap, info->filter_fn,
+                       &dma_spec->args[0]);
+}
+
+static const struct cppi_glue_infos am335x_usb_infos = {
+       .queues_rx = am335x_usb_queues_rx,
+       .queues_tx = am335x_usb_queues_tx,
+       .td_queue = { .submit = 31, .complete = 0 },
+       .first_completion_queue = 93,
+       .qmgr_num_pend = 5,
+};
+
+static const struct cppi_glue_infos da8xx_usb_infos = {
+       .queues_rx = da8xx_usb_queues_rx,
+       .queues_tx = da8xx_usb_queues_tx,
+       .td_queue = { .submit = 31, .complete = 0 },
+       .first_completion_queue = 24,
+       .qmgr_num_pend = 2,
+};
+
+static const struct of_device_id cppi41_dma_ids[] = {
+       { .compatible = "ti,am3359-cppi41", .data = &am335x_usb_infos},
+       { .compatible = "ti,da830-cppi41", .data = &da8xx_usb_infos},
+       {},
+};
+MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
+
+static const struct cppi_glue_infos *get_glue_info(struct device *dev)
+{
+       const struct of_device_id *of_id;
+
+       of_id = of_match_node(cppi41_dma_ids, dev->of_node);
+       if (!of_id)
+               return NULL;
+       return of_id->data;
+}
+
+#define CPPI41_DMA_BUSWIDTHS   (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+                               BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+                               BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
+                               BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+static int cppi41_dma_probe(struct platform_device *pdev)
+{
+       struct cppi41_dd *cdd;
+       struct device *dev = &pdev->dev;
+       const struct cppi_glue_infos *glue_info;
+       struct resource *mem;
+       int index;
+       int irq;
+       int ret;
+
+       glue_info = get_glue_info(dev);
+       if (!glue_info)
+               return -EINVAL;
+
+       cdd = devm_kzalloc(&pdev->dev, sizeof(*cdd), GFP_KERNEL);
+       if (!cdd)
+               return -ENOMEM;
+
+       dma_cap_set(DMA_SLAVE, cdd->ddev.cap_mask);
+       cdd->ddev.device_alloc_chan_resources = cppi41_dma_alloc_chan_resources;
+       cdd->ddev.device_free_chan_resources = cppi41_dma_free_chan_resources;
+       cdd->ddev.device_tx_status = cppi41_dma_tx_status;
+       cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
+       cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
+       cdd->ddev.device_terminate_all = cppi41_stop_chan;
+       cdd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       cdd->ddev.src_addr_widths = CPPI41_DMA_BUSWIDTHS;
+       cdd->ddev.dst_addr_widths = CPPI41_DMA_BUSWIDTHS;
+       cdd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+       cdd->ddev.dev = dev;
+       INIT_LIST_HEAD(&cdd->ddev.channels);
+       cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
+
+       index = of_property_match_string(dev->of_node,
+                                        "reg-names", "controller");
+       if (index < 0)
+               return index;
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, index);
+       cdd->ctrl_mem = devm_ioremap_resource(dev, mem);
+       if (IS_ERR(cdd->ctrl_mem))
+               return PTR_ERR(cdd->ctrl_mem);
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 1);
+       cdd->sched_mem = devm_ioremap_resource(dev, mem);
+       if (IS_ERR(cdd->sched_mem))
+               return PTR_ERR(cdd->sched_mem);
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 2);
+       cdd->qmgr_mem = devm_ioremap_resource(dev, mem);
+       if (IS_ERR(cdd->qmgr_mem))
+               return PTR_ERR(cdd->qmgr_mem);
+
+       spin_lock_init(&cdd->lock);
+       INIT_LIST_HEAD(&cdd->pending);
+
+       platform_set_drvdata(pdev, cdd);
+
+       pm_runtime_enable(dev);
+       pm_runtime_set_autosuspend_delay(dev, 100);
+       pm_runtime_use_autosuspend(dev);
+       ret = pm_runtime_get_sync(dev);
+       if (ret < 0)
+               goto err_get_sync;
+
+       cdd->queues_rx = glue_info->queues_rx;
+       cdd->queues_tx = glue_info->queues_tx;
+       cdd->td_queue = glue_info->td_queue;
+       cdd->qmgr_num_pend = glue_info->qmgr_num_pend;
+       cdd->first_completion_queue = glue_info->first_completion_queue;
+
+       ret = of_property_read_u32(dev->of_node,
+                                  "#dma-channels", &cdd->n_chans);
+       if (ret)
+               goto err_get_n_chans;
+
+       ret = init_cppi41(dev, cdd);
+       if (ret)
+               goto err_init_cppi;
+
+       ret = cppi41_add_chans(dev, cdd);
+       if (ret)
+               goto err_chans;
+
+       irq = irq_of_parse_and_map(dev->of_node, 0);
+       if (!irq) {
+               ret = -EINVAL;
+               goto err_chans;
+       }
+
+       ret = devm_request_irq(&pdev->dev, irq, cppi41_irq, IRQF_SHARED,
+                       dev_name(dev), cdd);
+       if (ret)
+               goto err_chans;
+       cdd->irq = irq;
+
+       ret = dma_async_device_register(&cdd->ddev);
+       if (ret)
+               goto err_chans;
+
+       ret = of_dma_controller_register(dev->of_node,
+                       cppi41_dma_xlate, &cpp41_dma_info);
+       if (ret)
+               goto err_of;
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+
+       return 0;
+err_of:
+       dma_async_device_unregister(&cdd->ddev);
+err_chans:
+       deinit_cppi41(dev, cdd);
+err_init_cppi:
+       pm_runtime_dont_use_autosuspend(dev);
+err_get_n_chans:
+err_get_sync:
+       pm_runtime_put_sync(dev);
+       pm_runtime_disable(dev);
+       return ret;
+}
+
+static int cppi41_dma_remove(struct platform_device *pdev)
+{
+       struct cppi41_dd *cdd = platform_get_drvdata(pdev);
+       int error;
+
+       error = pm_runtime_get_sync(&pdev->dev);
+       if (error < 0)
+               dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n",
+                       __func__, error);
+       of_dma_controller_free(pdev->dev.of_node);
+       dma_async_device_unregister(&cdd->ddev);
+
+       devm_free_irq(&pdev->dev, cdd->irq, cdd);
+       deinit_cppi41(&pdev->dev, cdd);
+       pm_runtime_dont_use_autosuspend(&pdev->dev);
+       pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+       return 0;
+}
+
+static int __maybe_unused cppi41_suspend(struct device *dev)
+{
+       struct cppi41_dd *cdd = dev_get_drvdata(dev);
+
+       cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ);
+       disable_sched(cdd);
+
+       return 0;
+}
+
+static int __maybe_unused cppi41_resume(struct device *dev)
+{
+       struct cppi41_dd *cdd = dev_get_drvdata(dev);
+       struct cppi41_channel *c;
+       int i;
+
+       for (i = 0; i < DESCS_AREAS; i++)
+               cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
+
+       list_for_each_entry(c, &cdd->ddev.channels, chan.device_node)
+               if (!c->is_tx)
+                       cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
+
+       init_sched(cdd);
+
+       cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ);
+       cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
+       cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
+       cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
+
+       return 0;
+}
+
+static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
+{
+       struct cppi41_dd *cdd = dev_get_drvdata(dev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&cdd->lock, flags);
+       cdd->is_suspended = true;
+       WARN_ON(!list_empty(&cdd->pending));
+       spin_unlock_irqrestore(&cdd->lock, flags);
+
+       return 0;
+}
+
+static int __maybe_unused cppi41_runtime_resume(struct device *dev)
+{
+       struct cppi41_dd *cdd = dev_get_drvdata(dev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&cdd->lock, flags);
+       cdd->is_suspended = false;
+       cppi41_run_queue(cdd);
+       spin_unlock_irqrestore(&cdd->lock, flags);
+
+       return 0;
+}
+
+static const struct dev_pm_ops cppi41_pm_ops = {
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(cppi41_suspend, cppi41_resume)
+       SET_RUNTIME_PM_OPS(cppi41_runtime_suspend,
+                          cppi41_runtime_resume,
+                          NULL)
+};
+
+static struct platform_driver cpp41_dma_driver = {
+       .probe  = cppi41_dma_probe,
+       .remove = cppi41_dma_remove,
+       .driver = {
+               .name = "cppi41-dma-engine",
+               .pm = &cppi41_pm_ops,
+               .of_match_table = of_match_ptr(cppi41_dma_ids),
+       },
+};
+
+module_platform_driver(cpp41_dma_driver);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
new file mode 100644 (file)
index 0000000..9272b17
--- /dev/null
@@ -0,0 +1,478 @@
+/*
+ *  Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+ *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+
+#define TI_XBAR_DRA7           0
+#define TI_XBAR_AM335X         1
+static const u32 ti_xbar_type[] = {
+       [TI_XBAR_DRA7] = TI_XBAR_DRA7,
+       [TI_XBAR_AM335X] = TI_XBAR_AM335X,
+};
+
+static const struct of_device_id ti_dma_xbar_match[] = {
+       {
+               .compatible = "ti,dra7-dma-crossbar",
+               .data = &ti_xbar_type[TI_XBAR_DRA7],
+       },
+       {
+               .compatible = "ti,am335x-edma-crossbar",
+               .data = &ti_xbar_type[TI_XBAR_AM335X],
+       },
+       {},
+};
+
+/* Crossbar on AM335x/AM437x family */
+#define TI_AM335X_XBAR_LINES   64
+
+struct ti_am335x_xbar_data {
+       void __iomem *iomem;
+
+       struct dma_router dmarouter;
+
+       u32 xbar_events; /* maximum number of events to select in xbar */
+       u32 dma_requests; /* number of DMA requests on eDMA */
+};
+
+struct ti_am335x_xbar_map {
+       u16 dma_line;
+       u8 mux_val;
+};
+
+static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
+{
+       /*
+        * TPCC_EVT_MUX_60_63 register layout is different than the
+        * rest, in the sense, that event 63 is mapped to lowest byte
+        * and event 60 is mapped to highest, handle it separately.
+        */
+       if (event >= 60 && event <= 63)
+               writeb_relaxed(val, iomem + (63 - event % 4));
+       else
+               writeb_relaxed(val, iomem + event);
+}
+
+static void ti_am335x_xbar_free(struct device *dev, void *route_data)
+{
+       struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev);
+       struct ti_am335x_xbar_map *map = route_data;
+
+       dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n",
+               map->mux_val, map->dma_line);
+
+       ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0);
+       kfree(map);
+}
+
+static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
+                                          struct of_dma *ofdma)
+{
+       struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+       struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev);
+       struct ti_am335x_xbar_map *map;
+
+       if (dma_spec->args_count != 3)
+               return ERR_PTR(-EINVAL);
+
+       if (dma_spec->args[2] >= xbar->xbar_events) {
+               dev_err(&pdev->dev, "Invalid XBAR event number: %d\n",
+                       dma_spec->args[2]);
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (dma_spec->args[0] >= xbar->dma_requests) {
+               dev_err(&pdev->dev, "Invalid DMA request line number: %d\n",
+                       dma_spec->args[0]);
+               return ERR_PTR(-EINVAL);
+       }
+
+       /* The of_node_put() will be done in the core for the node */
+       dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
+       if (!dma_spec->np) {
+               dev_err(&pdev->dev, "Can't get DMA master\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
+       if (!map) {
+               of_node_put(dma_spec->np);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       map->dma_line = (u16)dma_spec->args[0];
+       map->mux_val = (u8)dma_spec->args[2];
+
+       dma_spec->args[2] = 0;
+       dma_spec->args_count = 2;
+
+       dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n",
+               map->mux_val, map->dma_line);
+
+       ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val);
+
+       return map;
+}
+
+static const struct of_device_id ti_am335x_master_match[] = {
+       { .compatible = "ti,edma3-tpcc", },
+       {},
+};
+
+static int ti_am335x_xbar_probe(struct platform_device *pdev)
+{
+       struct device_node *node = pdev->dev.of_node;
+       const struct of_device_id *match;
+       struct device_node *dma_node;
+       struct ti_am335x_xbar_data *xbar;
+       struct resource *res;
+       void __iomem *iomem;
+       int i, ret;
+
+       if (!node)
+               return -ENODEV;
+
+       xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
+       if (!xbar)
+               return -ENOMEM;
+
+       dma_node = of_parse_phandle(node, "dma-masters", 0);
+       if (!dma_node) {
+               dev_err(&pdev->dev, "Can't get DMA master node\n");
+               return -ENODEV;
+       }
+
+       match = of_match_node(ti_am335x_master_match, dma_node);
+       if (!match) {
+               dev_err(&pdev->dev, "DMA master is not supported\n");
+               of_node_put(dma_node);
+               return -EINVAL;
+       }
+
+       if (of_property_read_u32(dma_node, "dma-requests",
+                                &xbar->dma_requests)) {
+               dev_info(&pdev->dev,
+                        "Missing XBAR output information, using %u.\n",
+                        TI_AM335X_XBAR_LINES);
+               xbar->dma_requests = TI_AM335X_XBAR_LINES;
+       }
+       of_node_put(dma_node);
+
+       if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) {
+               dev_info(&pdev->dev,
+                        "Missing XBAR input information, using %u.\n",
+                        TI_AM335X_XBAR_LINES);
+               xbar->xbar_events = TI_AM335X_XBAR_LINES;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       iomem = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(iomem))
+               return PTR_ERR(iomem);
+
+       xbar->iomem = iomem;
+
+       xbar->dmarouter.dev = &pdev->dev;
+       xbar->dmarouter.route_free = ti_am335x_xbar_free;
+
+       platform_set_drvdata(pdev, xbar);
+
+       /* Reset the crossbar */
+       for (i = 0; i < xbar->dma_requests; i++)
+               ti_am335x_xbar_write(xbar->iomem, i, 0);
+
+       ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate,
+                                    &xbar->dmarouter);
+
+       return ret;
+}
+
+/* Crossbar on DRA7xx family */
+#define TI_DRA7_XBAR_OUTPUTS   127
+#define TI_DRA7_XBAR_INPUTS    256
+
+struct ti_dra7_xbar_data {
+       void __iomem *iomem;
+
+       struct dma_router dmarouter;
+       struct mutex mutex;
+       unsigned long *dma_inuse;
+
+       u16 safe_val; /* Value to rest the crossbar lines */
+       u32 xbar_requests; /* number of DMA requests connected to XBAR */
+       u32 dma_requests; /* number of DMA requests forwarded to DMA */
+       u32 dma_offset;
+};
+
+struct ti_dra7_xbar_map {
+       u16 xbar_in;
+       int xbar_out;
+};
+
+static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val)
+{
+       writew_relaxed(val, iomem + (xbar * 2));
+}
+
+static void ti_dra7_xbar_free(struct device *dev, void *route_data)
+{
+       struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev);
+       struct ti_dra7_xbar_map *map = route_data;
+
+       dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n",
+               map->xbar_in, map->xbar_out);
+
+       ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
+       mutex_lock(&xbar->mutex);
+       clear_bit(map->xbar_out, xbar->dma_inuse);
+       mutex_unlock(&xbar->mutex);
+       kfree(map);
+}
+
+static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
+                                        struct of_dma *ofdma)
+{
+       struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+       struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev);
+       struct ti_dra7_xbar_map *map;
+
+       if (dma_spec->args[0] >= xbar->xbar_requests) {
+               dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
+                       dma_spec->args[0]);
+               return ERR_PTR(-EINVAL);
+       }
+
+       /* The of_node_put() will be done in the core for the node */
+       dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
+       if (!dma_spec->np) {
+               dev_err(&pdev->dev, "Can't get DMA master\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
+       if (!map) {
+               of_node_put(dma_spec->np);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       mutex_lock(&xbar->mutex);
+       map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
+                                           xbar->dma_requests);
+       if (map->xbar_out == xbar->dma_requests) {
+               mutex_unlock(&xbar->mutex);
+               dev_err(&pdev->dev, "Run out of free DMA requests\n");
+               kfree(map);
+               return ERR_PTR(-ENOMEM);
+       }
+       set_bit(map->xbar_out, xbar->dma_inuse);
+       mutex_unlock(&xbar->mutex);
+
+       map->xbar_in = (u16)dma_spec->args[0];
+
+       dma_spec->args[0] = map->xbar_out + xbar->dma_offset;
+
+       dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n",
+               map->xbar_in, map->xbar_out);
+
+       ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
+
+       return map;
+}
+
+#define TI_XBAR_EDMA_OFFSET    0
+#define TI_XBAR_SDMA_OFFSET    1
+static const u32 ti_dma_offset[] = {
+       [TI_XBAR_EDMA_OFFSET] = 0,
+       [TI_XBAR_SDMA_OFFSET] = 1,
+};
+
+static const struct of_device_id ti_dra7_master_match[] = {
+       {
+               .compatible = "ti,omap4430-sdma",
+               .data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET],
+       },
+       {
+               .compatible = "ti,edma3",
+               .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
+       },
+       {
+               .compatible = "ti,edma3-tpcc",
+               .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
+       },
+       {},
+};
+
+static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p)
+{
+       for (; len > 0; len--)
+               set_bit(offset + (len - 1), p);
+}
+
+static int ti_dra7_xbar_probe(struct platform_device *pdev)
+{
+       struct device_node *node = pdev->dev.of_node;
+       const struct of_device_id *match;
+       struct device_node *dma_node;
+       struct ti_dra7_xbar_data *xbar;
+       struct property *prop;
+       struct resource *res;
+       u32 safe_val;
+       int sz;
+       void __iomem *iomem;
+       int i, ret;
+
+       if (!node)
+               return -ENODEV;
+
+       xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
+       if (!xbar)
+               return -ENOMEM;
+
+       dma_node = of_parse_phandle(node, "dma-masters", 0);
+       if (!dma_node) {
+               dev_err(&pdev->dev, "Can't get DMA master node\n");
+               return -ENODEV;
+       }
+
+       match = of_match_node(ti_dra7_master_match, dma_node);
+       if (!match) {
+               dev_err(&pdev->dev, "DMA master is not supported\n");
+               of_node_put(dma_node);
+               return -EINVAL;
+       }
+
+       if (of_property_read_u32(dma_node, "dma-requests",
+                                &xbar->dma_requests)) {
+               dev_info(&pdev->dev,
+                        "Missing XBAR output information, using %u.\n",
+                        TI_DRA7_XBAR_OUTPUTS);
+               xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS;
+       }
+       of_node_put(dma_node);
+
+       xbar->dma_inuse = devm_kcalloc(&pdev->dev,
+                                      BITS_TO_LONGS(xbar->dma_requests),
+                                      sizeof(unsigned long), GFP_KERNEL);
+       if (!xbar->dma_inuse)
+               return -ENOMEM;
+
+       if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) {
+               dev_info(&pdev->dev,
+                        "Missing XBAR input information, using %u.\n",
+                        TI_DRA7_XBAR_INPUTS);
+               xbar->xbar_requests = TI_DRA7_XBAR_INPUTS;
+       }
+
+       if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val))
+               xbar->safe_val = (u16)safe_val;
+
+
+       prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz);
+       if (prop) {
+               const char pname[] = "ti,reserved-dma-request-ranges";
+               u32 (*rsv_events)[2];
+               size_t nelm = sz / sizeof(*rsv_events);
+               int i;
+
+               if (!nelm)
+                       return -EINVAL;
+
+               rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL);
+               if (!rsv_events)
+                       return -ENOMEM;
+
+               ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
+                                                nelm * 2);
+               if (ret)
+                       return ret;
+
+               for (i = 0; i < nelm; i++) {
+                       ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
+                                            xbar->dma_inuse);
+               }
+               kfree(rsv_events);
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       iomem = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(iomem))
+               return PTR_ERR(iomem);
+
+       xbar->iomem = iomem;
+
+       xbar->dmarouter.dev = &pdev->dev;
+       xbar->dmarouter.route_free = ti_dra7_xbar_free;
+       xbar->dma_offset = *(u32 *)match->data;
+
+       mutex_init(&xbar->mutex);
+       platform_set_drvdata(pdev, xbar);
+
+       /* Reset the crossbar */
+       for (i = 0; i < xbar->dma_requests; i++) {
+               if (!test_bit(i, xbar->dma_inuse))
+                       ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val);
+       }
+
+       ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate,
+                                    &xbar->dmarouter);
+       if (ret) {
+               /* Restore the defaults for the crossbar */
+               for (i = 0; i < xbar->dma_requests; i++) {
+                       if (!test_bit(i, xbar->dma_inuse))
+                               ti_dra7_xbar_write(xbar->iomem, i, i);
+               }
+       }
+
+       return ret;
+}
+
+static int ti_dma_xbar_probe(struct platform_device *pdev)
+{
+       const struct of_device_id *match;
+       int ret;
+
+       match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node);
+       if (unlikely(!match))
+               return -EINVAL;
+
+       switch (*(u32 *)match->data) {
+       case TI_XBAR_DRA7:
+               ret = ti_dra7_xbar_probe(pdev);
+               break;
+       case TI_XBAR_AM335X:
+               ret = ti_am335x_xbar_probe(pdev);
+               break;
+       default:
+               dev_err(&pdev->dev, "Unsupported crossbar\n");
+               ret = -ENODEV;
+               break;
+       }
+
+       return ret;
+}
+
+static struct platform_driver ti_dma_xbar_driver = {
+       .driver = {
+               .name = "ti-dma-crossbar",
+               .of_match_table = of_match_ptr(ti_dma_xbar_match),
+       },
+       .probe  = ti_dma_xbar_probe,
+};
+
+static int omap_dmaxbar_init(void)
+{
+       return platform_driver_register(&ti_dma_xbar_driver);
+}
+arch_initcall(omap_dmaxbar_init);
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
new file mode 100644 (file)
index 0000000..ceabdea
--- /dev/null
@@ -0,0 +1,2565 @@
+/*
+ * TI EDMA DMA engine driver
+ *
+ * Copyright 2012 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/edma.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/platform_data/edma.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+/* Offsets matching "struct edmacc_param" */
+#define PARM_OPT               0x00
+#define PARM_SRC               0x04
+#define PARM_A_B_CNT           0x08
+#define PARM_DST               0x0c
+#define PARM_SRC_DST_BIDX      0x10
+#define PARM_LINK_BCNTRLD      0x14
+#define PARM_SRC_DST_CIDX      0x18
+#define PARM_CCNT              0x1c
+
+#define PARM_SIZE              0x20
+
+/* Offsets for EDMA CC global channel registers and their shadows */
+#define SH_ER                  0x00    /* 64 bits */
+#define SH_ECR                 0x08    /* 64 bits */
+#define SH_ESR                 0x10    /* 64 bits */
+#define SH_CER                 0x18    /* 64 bits */
+#define SH_EER                 0x20    /* 64 bits */
+#define SH_EECR                        0x28    /* 64 bits */
+#define SH_EESR                        0x30    /* 64 bits */
+#define SH_SER                 0x38    /* 64 bits */
+#define SH_SECR                        0x40    /* 64 bits */
+#define SH_IER                 0x50    /* 64 bits */
+#define SH_IECR                        0x58    /* 64 bits */
+#define SH_IESR                        0x60    /* 64 bits */
+#define SH_IPR                 0x68    /* 64 bits */
+#define SH_ICR                 0x70    /* 64 bits */
+#define SH_IEVAL               0x78
+#define SH_QER                 0x80
+#define SH_QEER                        0x84
+#define SH_QEECR               0x88
+#define SH_QEESR               0x8c
+#define SH_QSER                        0x90
+#define SH_QSECR               0x94
+#define SH_SIZE                        0x200
+
+/* Offsets for EDMA CC global registers */
+#define EDMA_REV               0x0000
+#define EDMA_CCCFG             0x0004
+#define EDMA_QCHMAP            0x0200  /* 8 registers */
+#define EDMA_DMAQNUM           0x0240  /* 8 registers (4 on OMAP-L1xx) */
+#define EDMA_QDMAQNUM          0x0260
+#define EDMA_QUETCMAP          0x0280
+#define EDMA_QUEPRI            0x0284
+#define EDMA_EMR               0x0300  /* 64 bits */
+#define EDMA_EMCR              0x0308  /* 64 bits */
+#define EDMA_QEMR              0x0310
+#define EDMA_QEMCR             0x0314
+#define EDMA_CCERR             0x0318
+#define EDMA_CCERRCLR          0x031c
+#define EDMA_EEVAL             0x0320
+#define EDMA_DRAE              0x0340  /* 4 x 64 bits*/
+#define EDMA_QRAE              0x0380  /* 4 registers */
+#define EDMA_QUEEVTENTRY       0x0400  /* 2 x 16 registers */
+#define EDMA_QSTAT             0x0600  /* 2 registers */
+#define EDMA_QWMTHRA           0x0620
+#define EDMA_QWMTHRB           0x0624
+#define EDMA_CCSTAT            0x0640
+
+#define EDMA_M                 0x1000  /* global channel registers */
+#define EDMA_ECR               0x1008
+#define EDMA_ECRH              0x100C
+#define EDMA_SHADOW0           0x2000  /* 4 shadow regions */
+#define EDMA_PARM              0x4000  /* PaRAM entries */
+
+#define PARM_OFFSET(param_no)  (EDMA_PARM + ((param_no) << 5))
+
+#define EDMA_DCHMAP            0x0100  /* 64 registers */
+
+/* CCCFG register */
+#define GET_NUM_DMACH(x)       (x & 0x7) /* bits 0-2 */
+#define GET_NUM_QDMACH(x)      ((x & 0x70) >> 4) /* bits 4-6 */
+#define GET_NUM_PAENTRY(x)     ((x & 0x7000) >> 12) /* bits 12-14 */
+#define GET_NUM_EVQUE(x)       ((x & 0x70000) >> 16) /* bits 16-18 */
+#define GET_NUM_REGN(x)                ((x & 0x300000) >> 20) /* bits 20-21 */
+#define CHMAP_EXIST            BIT(24)
+
+/* CCSTAT register */
+#define EDMA_CCSTAT_ACTV       BIT(4)
+
+/*
+ * Max of 20 segments per channel to conserve PaRAM slots
+ * Also note that MAX_NR_SG should be atleast the no.of periods
+ * that are required for ASoC, otherwise DMA prep calls will
+ * fail. Today davinci-pcm is the only user of this driver and
+ * requires atleast 17 slots, so we setup the default to 20.
+ */
+#define MAX_NR_SG              20
+#define EDMA_MAX_SLOTS         MAX_NR_SG
+#define EDMA_DESCRIPTORS       16
+
+#define EDMA_CHANNEL_ANY               -1      /* for edma_alloc_channel() */
+#define EDMA_SLOT_ANY                  -1      /* for edma_alloc_slot() */
+#define EDMA_CONT_PARAMS_ANY            1001
+#define EDMA_CONT_PARAMS_FIXED_EXACT    1002
+#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
+
+/* PaRAM slots are laid out like this */
+struct edmacc_param {
+       u32 opt;
+       u32 src;
+       u32 a_b_cnt;
+       u32 dst;
+       u32 src_dst_bidx;
+       u32 link_bcntrld;
+       u32 src_dst_cidx;
+       u32 ccnt;
+} __packed;
+
+/* fields in edmacc_param.opt */
+#define SAM            BIT(0)
+#define DAM            BIT(1)
+#define SYNCDIM                BIT(2)
+#define STATIC         BIT(3)
+#define EDMA_FWID      (0x07 << 8)
+#define TCCMODE                BIT(11)
+#define EDMA_TCC(t)    ((t) << 12)
+#define TCINTEN                BIT(20)
+#define ITCINTEN       BIT(21)
+#define TCCHEN         BIT(22)
+#define ITCCHEN                BIT(23)
+
+struct edma_pset {
+       u32                             len;
+       dma_addr_t                      addr;
+       struct edmacc_param             param;
+};
+
+struct edma_desc {
+       struct virt_dma_desc            vdesc;
+       struct list_head                node;
+       enum dma_transfer_direction     direction;
+       int                             cyclic;
+       int                             absync;
+       int                             pset_nr;
+       struct edma_chan                *echan;
+       int                             processed;
+
+       /*
+        * The following 4 elements are used for residue accounting.
+        *
+        * - processed_stat: the number of SG elements we have traversed
+        * so far to cover accounting. This is updated directly to processed
+        * during edma_callback and is always <= processed, because processed
+        * refers to the number of pending transfer (programmed to EDMA
+        * controller), where as processed_stat tracks number of transfers
+        * accounted for so far.
+        *
+        * - residue: The amount of bytes we have left to transfer for this desc
+        *
+        * - residue_stat: The residue in bytes of data we have covered
+        * so far for accounting. This is updated directly to residue
+        * during callbacks to keep it current.
+        *
+        * - sg_len: Tracks the length of the current intermediate transfer,
+        * this is required to update the residue during intermediate transfer
+        * completion callback.
+        */
+       int                             processed_stat;
+       u32                             sg_len;
+       u32                             residue;
+       u32                             residue_stat;
+
+       struct edma_pset                pset[0];
+};
+
+struct edma_cc;
+
+struct edma_tc {
+       struct device_node              *node;
+       u16                             id;
+};
+
+struct edma_chan {
+       struct virt_dma_chan            vchan;
+       struct list_head                node;
+       struct edma_desc                *edesc;
+       struct edma_cc                  *ecc;
+       struct edma_tc                  *tc;
+       int                             ch_num;
+       bool                            alloced;
+       bool                            hw_triggered;
+       int                             slot[EDMA_MAX_SLOTS];
+       int                             missed;
+       struct dma_slave_config         cfg;
+};
+
+struct edma_cc {
+       struct device                   *dev;
+       struct edma_soc_info            *info;
+       void __iomem                    *base;
+       int                             id;
+       bool                            legacy_mode;
+
+       /* eDMA3 resource information */
+       unsigned                        num_channels;
+       unsigned                        num_qchannels;
+       unsigned                        num_region;
+       unsigned                        num_slots;
+       unsigned                        num_tc;
+       bool                            chmap_exist;
+       enum dma_event_q                default_queue;
+
+       unsigned int                    ccint;
+       unsigned int                    ccerrint;
+
+       /*
+        * The slot_inuse bit for each PaRAM slot is clear unless the slot is
+        * in use by Linux or if it is allocated to be used by DSP.
+        */
+       unsigned long *slot_inuse;
+
+       struct dma_device               dma_slave;
+       struct dma_device               *dma_memcpy;
+       struct edma_chan                *slave_chans;
+       struct edma_tc                  *tc_list;
+       int                             dummy_slot;
+};
+
+/* dummy param set used to (re)initialize parameter RAM slots */
+static const struct edmacc_param dummy_paramset = {
+       .link_bcntrld = 0xffff,
+       .ccnt = 1,
+};
+
+#define EDMA_BINDING_LEGACY    0
+#define EDMA_BINDING_TPCC      1
+static const u32 edma_binding_type[] = {
+       [EDMA_BINDING_LEGACY] = EDMA_BINDING_LEGACY,
+       [EDMA_BINDING_TPCC] = EDMA_BINDING_TPCC,
+};
+
+static const struct of_device_id edma_of_ids[] = {
+       {
+               .compatible = "ti,edma3",
+               .data = &edma_binding_type[EDMA_BINDING_LEGACY],
+       },
+       {
+               .compatible = "ti,edma3-tpcc",
+               .data = &edma_binding_type[EDMA_BINDING_TPCC],
+       },
+       {}
+};
+MODULE_DEVICE_TABLE(of, edma_of_ids);
+
+static const struct of_device_id edma_tptc_of_ids[] = {
+       { .compatible = "ti,edma3-tptc", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, edma_tptc_of_ids);
+
+static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
+{
+       return (unsigned int)__raw_readl(ecc->base + offset);
+}
+
+static inline void edma_write(struct edma_cc *ecc, int offset, int val)
+{
+       __raw_writel(val, ecc->base + offset);
+}
+
+static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
+                              unsigned or)
+{
+       unsigned val = edma_read(ecc, offset);
+
+       val &= and;
+       val |= or;
+       edma_write(ecc, offset, val);
+}
+
+static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and)
+{
+       unsigned val = edma_read(ecc, offset);
+
+       val &= and;
+       edma_write(ecc, offset, val);
+}
+
+static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
+{
+       unsigned val = edma_read(ecc, offset);
+
+       val |= or;
+       edma_write(ecc, offset, val);
+}
+
+static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset,
+                                          int i)
+{
+       return edma_read(ecc, offset + (i << 2));
+}
+
+static inline void edma_write_array(struct edma_cc *ecc, int offset, int i,
+                                   unsigned val)
+{
+       edma_write(ecc, offset + (i << 2), val);
+}
+
+static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
+                                    unsigned and, unsigned or)
+{
+       edma_modify(ecc, offset + (i << 2), and, or);
+}
+
+static inline void edma_or_array(struct edma_cc *ecc, int offset, int i,
+                                unsigned or)
+{
+       edma_or(ecc, offset + (i << 2), or);
+}
+
+static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
+                                 unsigned or)
+{
+       edma_or(ecc, offset + ((i * 2 + j) << 2), or);
+}
+
+static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
+                                    int j, unsigned val)
+{
+       edma_write(ecc, offset + ((i * 2 + j) << 2), val);
+}
+
+static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset)
+{
+       return edma_read(ecc, EDMA_SHADOW0 + offset);
+}
+
+static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
+                                                  int offset, int i)
+{
+       return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2));
+}
+
+static inline void edma_shadow0_write(struct edma_cc *ecc, int offset,
+                                     unsigned val)
+{
+       edma_write(ecc, EDMA_SHADOW0 + offset, val);
+}
+
+static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
+                                           int i, unsigned val)
+{
+       edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
+}
+
+static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset,
+                                          int param_no)
+{
+       return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
+}
+
+static inline void edma_param_write(struct edma_cc *ecc, int offset,
+                                   int param_no, unsigned val)
+{
+       edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
+}
+
+static inline void edma_param_modify(struct edma_cc *ecc, int offset,
+                                    int param_no, unsigned and, unsigned or)
+{
+       edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
+}
+
+static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no,
+                                 unsigned and)
+{
+       edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
+}
+
+static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
+                                unsigned or)
+{
+       edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
+}
+
+static inline void edma_set_bits(int offset, int len, unsigned long *p)
+{
+       for (; len > 0; len--)
+               set_bit(offset + (len - 1), p);
+}
+
+static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
+                                         int priority)
+{
+       int bit = queue_no * 4;
+
+       edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
+}
+
+static void edma_set_chmap(struct edma_chan *echan, int slot)
+{
+       struct edma_cc *ecc = echan->ecc;
+       int channel = EDMA_CHAN_SLOT(echan->ch_num);
+
+       if (ecc->chmap_exist) {
+               slot = EDMA_CHAN_SLOT(slot);
+               edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5));
+       }
+}
+
+static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
+{
+       struct edma_cc *ecc = echan->ecc;
+       int channel = EDMA_CHAN_SLOT(echan->ch_num);
+
+       if (enable) {
+               edma_shadow0_write_array(ecc, SH_ICR, channel >> 5,
+                                        BIT(channel & 0x1f));
+               edma_shadow0_write_array(ecc, SH_IESR, channel >> 5,
+                                        BIT(channel & 0x1f));
+       } else {
+               edma_shadow0_write_array(ecc, SH_IECR, channel >> 5,
+                                        BIT(channel & 0x1f));
+       }
+}
+
+/*
+ * paRAM slot management functions
+ */
+static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
+                           const struct edmacc_param *param)
+{
+       slot = EDMA_CHAN_SLOT(slot);
+       if (slot >= ecc->num_slots)
+               return;
+       memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
+}
+
+static int edma_read_slot(struct edma_cc *ecc, unsigned slot,
+                          struct edmacc_param *param)
+{
+       slot = EDMA_CHAN_SLOT(slot);
+       if (slot >= ecc->num_slots)
+               return -EINVAL;
+       memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
+
+       return 0;
+}
+
+/**
+ * edma_alloc_slot - allocate DMA parameter RAM
+ * @ecc: pointer to edma_cc struct
+ * @slot: specific slot to allocate; negative for "any unused slot"
+ *
+ * This allocates a parameter RAM slot, initializing it to hold a
+ * dummy transfer.  Slots allocated using this routine have not been
+ * mapped to a hardware DMA channel, and will normally be used by
+ * linking to them from a slot associated with a DMA channel.
+ *
+ * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
+ * slots may be allocated on behalf of DSP firmware.
+ *
+ * Returns the number of the slot, else negative errno.
+ */
+static int edma_alloc_slot(struct edma_cc *ecc, int slot)
+{
+       if (slot >= 0) {
+               slot = EDMA_CHAN_SLOT(slot);
+               /* Requesting entry paRAM slot for a HW triggered channel. */
+               if (ecc->chmap_exist && slot < ecc->num_channels)
+                       slot = EDMA_SLOT_ANY;
+       }
+
+       if (slot < 0) {
+               if (ecc->chmap_exist)
+                       slot = 0;
+               else
+                       slot = ecc->num_channels;
+               for (;;) {
+                       slot = find_next_zero_bit(ecc->slot_inuse,
+                                                 ecc->num_slots,
+                                                 slot);
+                       if (slot == ecc->num_slots)
+                               return -ENOMEM;
+                       if (!test_and_set_bit(slot, ecc->slot_inuse))
+                               break;
+               }
+       } else if (slot >= ecc->num_slots) {
+               return -EINVAL;
+       } else if (test_and_set_bit(slot, ecc->slot_inuse)) {
+               return -EBUSY;
+       }
+
+       edma_write_slot(ecc, slot, &dummy_paramset);
+
+       return EDMA_CTLR_CHAN(ecc->id, slot);
+}
+
+static void edma_free_slot(struct edma_cc *ecc, unsigned slot)
+{
+       slot = EDMA_CHAN_SLOT(slot);
+       if (slot >= ecc->num_slots)
+               return;
+
+       edma_write_slot(ecc, slot, &dummy_paramset);
+       clear_bit(slot, ecc->slot_inuse);
+}
+
+/**
+ * edma_link - link one parameter RAM slot to another
+ * @ecc: pointer to edma_cc struct
+ * @from: parameter RAM slot originating the link
+ * @to: parameter RAM slot which is the link target
+ *
+ * The originating slot should not be part of any active DMA transfer.
+ */
+static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to)
+{
+       if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to)))
+               dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n");
+
+       from = EDMA_CHAN_SLOT(from);
+       to = EDMA_CHAN_SLOT(to);
+       if (from >= ecc->num_slots || to >= ecc->num_slots)
+               return;
+
+       edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000,
+                         PARM_OFFSET(to));
+}
+
+/**
+ * edma_get_position - returns the current transfer point
+ * @ecc: pointer to edma_cc struct
+ * @slot: parameter RAM slot being examined
+ * @dst:  true selects the dest position, false the source
+ *
+ * Returns the position of the current active slot
+ */
+static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot,
+                                   bool dst)
+{
+       u32 offs;
+
+       slot = EDMA_CHAN_SLOT(slot);
+       offs = PARM_OFFSET(slot);
+       offs += dst ? PARM_DST : PARM_SRC;
+
+       return edma_read(ecc, offs);
+}
+
+/*
+ * Channels with event associations will be triggered by their hardware
+ * events, and channels without such associations will be triggered by
+ * software.  (At this writing there is no interface for using software
+ * triggers except with channels that don't support hardware triggers.)
+ */
+static void edma_start(struct edma_chan *echan)
+{
+       struct edma_cc *ecc = echan->ecc;
+       int channel = EDMA_CHAN_SLOT(echan->ch_num);
+       int j = (channel >> 5);
+       unsigned int mask = BIT(channel & 0x1f);
+
+       if (!echan->hw_triggered) {
+               /* EDMA channels without event association */
+               dev_dbg(ecc->dev, "ESR%d %08x\n", j,
+                       edma_shadow0_read_array(ecc, SH_ESR, j));
+               edma_shadow0_write_array(ecc, SH_ESR, j, mask);
+       } else {
+               /* EDMA channel with event association */
+               dev_dbg(ecc->dev, "ER%d %08x\n", j,
+                       edma_shadow0_read_array(ecc, SH_ER, j));
+               /* Clear any pending event or error */
+               edma_write_array(ecc, EDMA_ECR, j, mask);
+               edma_write_array(ecc, EDMA_EMCR, j, mask);
+               /* Clear any SER */
+               edma_shadow0_write_array(ecc, SH_SECR, j, mask);
+               edma_shadow0_write_array(ecc, SH_EESR, j, mask);
+               dev_dbg(ecc->dev, "EER%d %08x\n", j,
+                       edma_shadow0_read_array(ecc, SH_EER, j));
+       }
+}
+
+static void edma_stop(struct edma_chan *echan)
+{
+       struct edma_cc *ecc = echan->ecc;
+       int channel = EDMA_CHAN_SLOT(echan->ch_num);
+       int j = (channel >> 5);
+       unsigned int mask = BIT(channel & 0x1f);
+
+       edma_shadow0_write_array(ecc, SH_EECR, j, mask);
+       edma_shadow0_write_array(ecc, SH_ECR, j, mask);
+       edma_shadow0_write_array(ecc, SH_SECR, j, mask);
+       edma_write_array(ecc, EDMA_EMCR, j, mask);
+
+       /* clear possibly pending completion interrupt */
+       edma_shadow0_write_array(ecc, SH_ICR, j, mask);
+
+       dev_dbg(ecc->dev, "EER%d %08x\n", j,
+               edma_shadow0_read_array(ecc, SH_EER, j));
+
+       /* REVISIT:  consider guarding against inappropriate event
+        * chaining by overwriting with dummy_paramset.
+        */
+}
+
+/*
+ * Temporarily disable EDMA hardware events on the specified channel,
+ * preventing them from triggering new transfers
+ */
+static void edma_pause(struct edma_chan *echan)
+{
+       int channel = EDMA_CHAN_SLOT(echan->ch_num);
+       unsigned int mask = BIT(channel & 0x1f);
+
+       edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask);
+}
+
+/* Re-enable EDMA hardware events on the specified channel.  */
+static void edma_resume(struct edma_chan *echan)
+{
+       int channel = EDMA_CHAN_SLOT(echan->ch_num);
+       unsigned int mask = BIT(channel & 0x1f);
+
+       edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask);
+}
+
+static void edma_trigger_channel(struct edma_chan *echan)
+{
+       struct edma_cc *ecc = echan->ecc;
+       int channel = EDMA_CHAN_SLOT(echan->ch_num);
+       unsigned int mask = BIT(channel & 0x1f);
+
+       edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
+
+       dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
+               edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
+}
+
+static void edma_clean_channel(struct edma_chan *echan)
+{
+       struct edma_cc *ecc = echan->ecc;
+       int channel = EDMA_CHAN_SLOT(echan->ch_num);
+       int j = (channel >> 5);
+       unsigned int mask = BIT(channel & 0x1f);
+
+       dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j));
+       edma_shadow0_write_array(ecc, SH_ECR, j, mask);
+       /* Clear the corresponding EMR bits */
+       edma_write_array(ecc, EDMA_EMCR, j, mask);
+       /* Clear any SER */
+       edma_shadow0_write_array(ecc, SH_SECR, j, mask);
+       edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
+}
+
+/* Move channel to a specific event queue */
+static void edma_assign_channel_eventq(struct edma_chan *echan,
+                                      enum dma_event_q eventq_no)
+{
+       struct edma_cc *ecc = echan->ecc;
+       int channel = EDMA_CHAN_SLOT(echan->ch_num);
+       int bit = (channel & 0x7) * 4;
+
+       /* default to low priority queue */
+       if (eventq_no == EVENTQ_DEFAULT)
+               eventq_no = ecc->default_queue;
+       if (eventq_no >= ecc->num_tc)
+               return;
+
+       eventq_no &= 7;
+       edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit),
+                         eventq_no << bit);
+}
+
+static int edma_alloc_channel(struct edma_chan *echan,
+                             enum dma_event_q eventq_no)
+{
+       struct edma_cc *ecc = echan->ecc;
+       int channel = EDMA_CHAN_SLOT(echan->ch_num);
+
+       /* ensure access through shadow region 0 */
+       edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
+
+       /* ensure no events are pending */
+       edma_stop(echan);
+
+       edma_setup_interrupt(echan, true);
+
+       edma_assign_channel_eventq(echan, eventq_no);
+
+       return 0;
+}
+
+static void edma_free_channel(struct edma_chan *echan)
+{
+       /* ensure no events are pending */
+       edma_stop(echan);
+       /* REVISIT should probably take out of shadow region 0 */
+       edma_setup_interrupt(echan, false);
+}
+
+static inline struct edma_cc *to_edma_cc(struct dma_device *d)
+{
+       return container_of(d, struct edma_cc, dma_slave);
+}
+
+static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
+{
+       return container_of(c, struct edma_chan, vchan.chan);
+}
+
+static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx)
+{
+       return container_of(tx, struct edma_desc, vdesc.tx);
+}
+
+static void edma_desc_free(struct virt_dma_desc *vdesc)
+{
+       kfree(container_of(vdesc, struct edma_desc, vdesc));
+}
+
+/* Dispatch a queued descriptor to the controller (caller holds lock) */
+static void edma_execute(struct edma_chan *echan)
+{
+       struct edma_cc *ecc = echan->ecc;
+       struct virt_dma_desc *vdesc;
+       struct edma_desc *edesc;
+       struct device *dev = echan->vchan.chan.device->dev;
+       int i, j, left, nslots;
+
+       if (!echan->edesc) {
+               /* Setup is needed for the first transfer */
+               vdesc = vchan_next_desc(&echan->vchan);
+               if (!vdesc)
+                       return;
+               list_del(&vdesc->node);
+               echan->edesc = to_edma_desc(&vdesc->tx);
+       }
+
+       edesc = echan->edesc;
+
+       /* Find out how many left */
+       left = edesc->pset_nr - edesc->processed;
+       nslots = min(MAX_NR_SG, left);
+       edesc->sg_len = 0;
+
+       /* Write descriptor PaRAM set(s) */
+       for (i = 0; i < nslots; i++) {
+               j = i + edesc->processed;
+               edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param);
+               edesc->sg_len += edesc->pset[j].len;
+               dev_vdbg(dev,
+                        "\n pset[%d]:\n"
+                        "  chnum\t%d\n"
+                        "  slot\t%d\n"
+                        "  opt\t%08x\n"
+                        "  src\t%08x\n"
+                        "  dst\t%08x\n"
+                        "  abcnt\t%08x\n"
+                        "  ccnt\t%08x\n"
+                        "  bidx\t%08x\n"
+                        "  cidx\t%08x\n"
+                        "  lkrld\t%08x\n",
+                        j, echan->ch_num, echan->slot[i],
+                        edesc->pset[j].param.opt,
+                        edesc->pset[j].param.src,
+                        edesc->pset[j].param.dst,
+                        edesc->pset[j].param.a_b_cnt,
+                        edesc->pset[j].param.ccnt,
+                        edesc->pset[j].param.src_dst_bidx,
+                        edesc->pset[j].param.src_dst_cidx,
+                        edesc->pset[j].param.link_bcntrld);
+               /* Link to the previous slot if not the last set */
+               if (i != (nslots - 1))
+                       edma_link(ecc, echan->slot[i], echan->slot[i + 1]);
+       }
+
+       edesc->processed += nslots;
+
+       /*
+        * If this is either the last set in a set of SG-list transactions
+        * then setup a link to the dummy slot, this results in all future
+        * events being absorbed and that's OK because we're done
+        */
+       if (edesc->processed == edesc->pset_nr) {
+               if (edesc->cyclic)
+                       edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]);
+               else
+                       edma_link(ecc, echan->slot[nslots - 1],
+                                 echan->ecc->dummy_slot);
+       }
+
+       if (echan->missed) {
+               /*
+                * This happens due to setup times between intermediate
+                * transfers in long SG lists which have to be broken up into
+                * transfers of MAX_NR_SG
+                */
+               dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
+               edma_clean_channel(echan);
+               edma_stop(echan);
+               edma_start(echan);
+               edma_trigger_channel(echan);
+               echan->missed = 0;
+       } else if (edesc->processed <= MAX_NR_SG) {
+               dev_dbg(dev, "first transfer starting on channel %d\n",
+                       echan->ch_num);
+               edma_start(echan);
+       } else {
+               dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
+                       echan->ch_num, edesc->processed);
+               edma_resume(echan);
+       }
+}
+
+static int edma_terminate_all(struct dma_chan *chan)
+{
+       struct edma_chan *echan = to_edma_chan(chan);
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&echan->vchan.lock, flags);
+
+       /*
+        * Stop DMA activity: we assume the callback will not be called
+        * after edma_dma() returns (even if it does, it will see
+        * echan->edesc is NULL and exit.)
+        */
+       if (echan->edesc) {
+               edma_stop(echan);
+               /* Move the cyclic channel back to default queue */
+               if (!echan->tc && echan->edesc->cyclic)
+                       edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
+
+               vchan_terminate_vdesc(&echan->edesc->vdesc);
+               echan->edesc = NULL;
+       }
+
+       vchan_get_all_descriptors(&echan->vchan, &head);
+       spin_unlock_irqrestore(&echan->vchan.lock, flags);
+       vchan_dma_desc_free_list(&echan->vchan, &head);
+
+       return 0;
+}
+
+static void edma_synchronize(struct dma_chan *chan)
+{
+       struct edma_chan *echan = to_edma_chan(chan);
+
+       vchan_synchronize(&echan->vchan);
+}
+
+static int edma_slave_config(struct dma_chan *chan,
+       struct dma_slave_config *cfg)
+{
+       struct edma_chan *echan = to_edma_chan(chan);
+
+       if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+           cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+               return -EINVAL;
+
+       if (cfg->src_maxburst > chan->device->max_burst ||
+           cfg->dst_maxburst > chan->device->max_burst)
+               return -EINVAL;
+
+       memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
+
+       return 0;
+}
+
+static int edma_dma_pause(struct dma_chan *chan)
+{
+       struct edma_chan *echan = to_edma_chan(chan);
+
+       if (!echan->edesc)
+               return -EINVAL;
+
+       edma_pause(echan);
+       return 0;
+}
+
+static int edma_dma_resume(struct dma_chan *chan)
+{
+       struct edma_chan *echan = to_edma_chan(chan);
+
+       edma_resume(echan);
+       return 0;
+}
+
+/*
+ * A PaRAM set configuration abstraction used by other modes
+ * @chan: Channel who's PaRAM set we're configuring
+ * @pset: PaRAM set to initialize and setup.
+ * @src_addr: Source address of the DMA
+ * @dst_addr: Destination address of the DMA
+ * @burst: In units of dev_width, how much to send
+ * @dev_width: How much is the dev_width
+ * @dma_length: Total length of the DMA transfer
+ * @direction: Direction of the transfer
+ */
+static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
+                           dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
+                           unsigned int acnt, unsigned int dma_length,
+                           enum dma_transfer_direction direction)
+{
+       struct edma_chan *echan = to_edma_chan(chan);
+       struct device *dev = chan->device->dev;
+       struct edmacc_param *param = &epset->param;
+       int bcnt, ccnt, cidx;
+       int src_bidx, dst_bidx, src_cidx, dst_cidx;
+       int absync;
+
+       /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
+       if (!burst)
+               burst = 1;
+       /*
+        * If the maxburst is equal to the fifo width, use
+        * A-synced transfers. This allows for large contiguous
+        * buffer transfers using only one PaRAM set.
+        */
+       if (burst == 1) {
+               /*
+                * For the A-sync case, bcnt and ccnt are the remainder
+                * and quotient respectively of the division of:
+                * (dma_length / acnt) by (SZ_64K -1). This is so
+                * that in case bcnt over flows, we have ccnt to use.
+                * Note: In A-sync tranfer only, bcntrld is used, but it
+                * only applies for sg_dma_len(sg) >= SZ_64K.
+                * In this case, the best way adopted is- bccnt for the
+                * first frame will be the remainder below. Then for
+                * every successive frame, bcnt will be SZ_64K-1. This
+                * is assured as bcntrld = 0xffff in end of function.
+                */
+               absync = false;
+               ccnt = dma_length / acnt / (SZ_64K - 1);
+               bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
+               /*
+                * If bcnt is non-zero, we have a remainder and hence an
+                * extra frame to transfer, so increment ccnt.
+                */
+               if (bcnt)
+                       ccnt++;
+               else
+                       bcnt = SZ_64K - 1;
+               cidx = acnt;
+       } else {
+               /*
+                * If maxburst is greater than the fifo address_width,
+                * use AB-synced transfers where A count is the fifo
+                * address_width and B count is the maxburst. In this
+                * case, we are limited to transfers of C count frames
+                * of (address_width * maxburst) where C count is limited
+                * to SZ_64K-1. This places an upper bound on the length
+                * of an SG segment that can be handled.
+                */
+               absync = true;
+               bcnt = burst;
+               ccnt = dma_length / (acnt * bcnt);
+               if (ccnt > (SZ_64K - 1)) {
+                       dev_err(dev, "Exceeded max SG segment size\n");
+                       return -EINVAL;
+               }
+               cidx = acnt * bcnt;
+       }
+
+       epset->len = dma_length;
+
+       if (direction == DMA_MEM_TO_DEV) {
+               src_bidx = acnt;
+               src_cidx = cidx;
+               dst_bidx = 0;
+               dst_cidx = 0;
+               epset->addr = src_addr;
+       } else if (direction == DMA_DEV_TO_MEM)  {
+               src_bidx = 0;
+               src_cidx = 0;
+               dst_bidx = acnt;
+               dst_cidx = cidx;
+               epset->addr = dst_addr;
+       } else if (direction == DMA_MEM_TO_MEM)  {
+               src_bidx = acnt;
+               src_cidx = cidx;
+               dst_bidx = acnt;
+               dst_cidx = cidx;
+       } else {
+               dev_err(dev, "%s: direction not implemented yet\n", __func__);
+               return -EINVAL;
+       }
+
+       param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
+       /* Configure A or AB synchronized transfers */
+       if (absync)
+               param->opt |= SYNCDIM;
+
+       param->src = src_addr;
+       param->dst = dst_addr;
+
+       param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
+       param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
+
+       param->a_b_cnt = bcnt << 16 | acnt;
+       param->ccnt = ccnt;
+       /*
+        * Only time when (bcntrld) auto reload is required is for
+        * A-sync case, and in this case, a requirement of reload value
+        * of SZ_64K-1 only is assured. 'link' is initially set to NULL
+        * and then later will be populated by edma_execute.
+        */
+       param->link_bcntrld = 0xffffffff;
+       return absync;
+}
+
+static struct dma_async_tx_descriptor *edma_prep_slave_sg(
+       struct dma_chan *chan, struct scatterlist *sgl,
+       unsigned int sg_len, enum dma_transfer_direction direction,
+       unsigned long tx_flags, void *context)
+{
+       struct edma_chan *echan = to_edma_chan(chan);
+       struct device *dev = chan->device->dev;
+       struct edma_desc *edesc;
+       dma_addr_t src_addr = 0, dst_addr = 0;
+       enum dma_slave_buswidth dev_width;
+       u32 burst;
+       struct scatterlist *sg;
+       int i, nslots, ret;
+
+       if (unlikely(!echan || !sgl || !sg_len))
+               return NULL;
+
+       if (direction == DMA_DEV_TO_MEM) {
+               src_addr = echan->cfg.src_addr;
+               dev_width = echan->cfg.src_addr_width;
+               burst = echan->cfg.src_maxburst;
+       } else if (direction == DMA_MEM_TO_DEV) {
+               dst_addr = echan->cfg.dst_addr;
+               dev_width = echan->cfg.dst_addr_width;
+               burst = echan->cfg.dst_maxburst;
+       } else {
+               dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
+               return NULL;
+       }
+
+       if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
+               dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
+               return NULL;
+       }
+
+       edesc = kzalloc(struct_size(edesc, pset, sg_len), GFP_ATOMIC);
+       if (!edesc)
+               return NULL;
+
+       edesc->pset_nr = sg_len;
+       edesc->residue = 0;
+       edesc->direction = direction;
+       edesc->echan = echan;
+
+       /* Allocate a PaRAM slot, if needed */
+       nslots = min_t(unsigned, MAX_NR_SG, sg_len);
+
+       for (i = 0; i < nslots; i++) {
+               if (echan->slot[i] < 0) {
+                       echan->slot[i] =
+                               edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
+                       if (echan->slot[i] < 0) {
+                               kfree(edesc);
+                               dev_err(dev, "%s: Failed to allocate slot\n",
+                                       __func__);
+                               return NULL;
+                       }
+               }
+       }
+
+       /* Configure PaRAM sets for each SG */
+       for_each_sg(sgl, sg, sg_len, i) {
+               /* Get address for each SG */
+               if (direction == DMA_DEV_TO_MEM)
+                       dst_addr = sg_dma_address(sg);
+               else
+                       src_addr = sg_dma_address(sg);
+
+               ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
+                                      dst_addr, burst, dev_width,
+                                      sg_dma_len(sg), direction);
+               if (ret < 0) {
+                       kfree(edesc);
+                       return NULL;
+               }
+
+               edesc->absync = ret;
+               edesc->residue += sg_dma_len(sg);
+
+               if (i == sg_len - 1)
+                       /* Enable completion interrupt */
+                       edesc->pset[i].param.opt |= TCINTEN;
+               else if (!((i+1) % MAX_NR_SG))
+                       /*
+                        * Enable early completion interrupt for the
+                        * intermediateset. In this case the driver will be
+                        * notified when the paRAM set is submitted to TC. This
+                        * will allow more time to set up the next set of slots.
+                        */
+                       edesc->pset[i].param.opt |= (TCINTEN | TCCMODE);
+       }
+       edesc->residue_stat = edesc->residue;
+
+       return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
+}
+
+static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
+       struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+       size_t len, unsigned long tx_flags)
+{
+       int ret, nslots;
+       struct edma_desc *edesc;
+       struct device *dev = chan->device->dev;
+       struct edma_chan *echan = to_edma_chan(chan);
+       unsigned int width, pset_len, array_size;
+
+       if (unlikely(!echan || !len))
+               return NULL;
+
+       /* Align the array size (acnt block) with the transfer properties */
+       switch (__ffs((src | dest | len))) {
+       case 0:
+               array_size = SZ_32K - 1;
+               break;
+       case 1:
+               array_size = SZ_32K - 2;
+               break;
+       default:
+               array_size = SZ_32K - 4;
+               break;
+       }
+
+       if (len < SZ_64K) {
+               /*
+                * Transfer size less than 64K can be handled with one paRAM
+                * slot and with one burst.
+                * ACNT = length
+                */
+               width = len;
+               pset_len = len;
+               nslots = 1;
+       } else {
+               /*
+                * Transfer size bigger than 64K will be handled with maximum of
+                * two paRAM slots.
+                * slot1: (full_length / 32767) times 32767 bytes bursts.
+                *        ACNT = 32767, length1: (full_length / 32767) * 32767
+                * slot2: the remaining amount of data after slot1.
+                *        ACNT = full_length - length1, length2 = ACNT
+                *
+                * When the full_length is multibple of 32767 one slot can be
+                * used to complete the transfer.
+                */
+               width = array_size;
+               pset_len = rounddown(len, width);
+               /* One slot is enough for lengths multiple of (SZ_32K -1) */
+               if (unlikely(pset_len == len))
+                       nslots = 1;
+               else
+                       nslots = 2;
+       }
+
+       edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC);
+       if (!edesc)
+               return NULL;
+
+       edesc->pset_nr = nslots;
+       edesc->residue = edesc->residue_stat = len;
+       edesc->direction = DMA_MEM_TO_MEM;
+       edesc->echan = echan;
+
+       ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
+                              width, pset_len, DMA_MEM_TO_MEM);
+       if (ret < 0) {
+               kfree(edesc);
+               return NULL;
+       }
+
+       edesc->absync = ret;
+
+       edesc->pset[0].param.opt |= ITCCHEN;
+       if (nslots == 1) {
+               /* Enable transfer complete interrupt */
+               edesc->pset[0].param.opt |= TCINTEN;
+       } else {
+               /* Enable transfer complete chaining for the first slot */
+               edesc->pset[0].param.opt |= TCCHEN;
+
+               if (echan->slot[1] < 0) {
+                       echan->slot[1] = edma_alloc_slot(echan->ecc,
+                                                        EDMA_SLOT_ANY);
+                       if (echan->slot[1] < 0) {
+                               kfree(edesc);
+                               dev_err(dev, "%s: Failed to allocate slot\n",
+                                       __func__);
+                               return NULL;
+                       }
+               }
+               dest += pset_len;
+               src += pset_len;
+               pset_len = width = len % array_size;
+
+               ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
+                                      width, pset_len, DMA_MEM_TO_MEM);
+               if (ret < 0) {
+                       kfree(edesc);
+                       return NULL;
+               }
+
+               edesc->pset[1].param.opt |= ITCCHEN;
+               edesc->pset[1].param.opt |= TCINTEN;
+       }
+
+       return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
+}
+
+static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
+       struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+       size_t period_len, enum dma_transfer_direction direction,
+       unsigned long tx_flags)
+{
+       struct edma_chan *echan = to_edma_chan(chan);
+       struct device *dev = chan->device->dev;
+       struct edma_desc *edesc;
+       dma_addr_t src_addr, dst_addr;
+       enum dma_slave_buswidth dev_width;
+       bool use_intermediate = false;
+       u32 burst;
+       int i, ret, nslots;
+
+       if (unlikely(!echan || !buf_len || !period_len))
+               return NULL;
+
+       if (direction == DMA_DEV_TO_MEM) {
+               src_addr = echan->cfg.src_addr;
+               dst_addr = buf_addr;
+               dev_width = echan->cfg.src_addr_width;
+               burst = echan->cfg.src_maxburst;
+       } else if (direction == DMA_MEM_TO_DEV) {
+               src_addr = buf_addr;
+               dst_addr = echan->cfg.dst_addr;
+               dev_width = echan->cfg.dst_addr_width;
+               burst = echan->cfg.dst_maxburst;
+       } else {
+               dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
+               return NULL;
+       }
+
+       if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
+               dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
+               return NULL;
+       }
+
+       if (unlikely(buf_len % period_len)) {
+               dev_err(dev, "Period should be multiple of Buffer length\n");
+               return NULL;
+       }
+
+       nslots = (buf_len / period_len) + 1;
+
+       /*
+        * Cyclic DMA users such as audio cannot tolerate delays introduced
+        * by cases where the number of periods is more than the maximum
+        * number of SGs the EDMA driver can handle at a time. For DMA types
+        * such as Slave SGs, such delays are tolerable and synchronized,
+        * but the synchronization is difficult to achieve with Cyclic and
+        * cannot be guaranteed, so we error out early.
+        */
+       if (nslots > MAX_NR_SG) {
+               /*
+                * If the burst and period sizes are the same, we can put
+                * the full buffer into a single period and activate
+                * intermediate interrupts. This will produce interrupts
+                * after each burst, which is also after each desired period.
+                */
+               if (burst == period_len) {
+                       period_len = buf_len;
+                       nslots = 2;
+                       use_intermediate = true;
+               } else {
+                       return NULL;
+               }
+       }
+
+       edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC);
+       if (!edesc)
+               return NULL;
+
+       edesc->cyclic = 1;
+       edesc->pset_nr = nslots;
+       edesc->residue = edesc->residue_stat = buf_len;
+       edesc->direction = direction;
+       edesc->echan = echan;
+
+       dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
+               __func__, echan->ch_num, nslots, period_len, buf_len);
+
+       for (i = 0; i < nslots; i++) {
+               /* Allocate a PaRAM slot, if needed */
+               if (echan->slot[i] < 0) {
+                       echan->slot[i] =
+                               edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
+                       if (echan->slot[i] < 0) {
+                               kfree(edesc);
+                               dev_err(dev, "%s: Failed to allocate slot\n",
+                                       __func__);
+                               return NULL;
+                       }
+               }
+
+               if (i == nslots - 1) {
+                       memcpy(&edesc->pset[i], &edesc->pset[0],
+                              sizeof(edesc->pset[0]));
+                       break;
+               }
+
+               ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
+                                      dst_addr, burst, dev_width, period_len,
+                                      direction);
+               if (ret < 0) {
+                       kfree(edesc);
+                       return NULL;
+               }
+
+               if (direction == DMA_DEV_TO_MEM)
+                       dst_addr += period_len;
+               else
+                       src_addr += period_len;
+
+               dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
+               dev_vdbg(dev,
+                       "\n pset[%d]:\n"
+                       "  chnum\t%d\n"
+                       "  slot\t%d\n"
+                       "  opt\t%08x\n"
+                       "  src\t%08x\n"
+                       "  dst\t%08x\n"
+                       "  abcnt\t%08x\n"
+                       "  ccnt\t%08x\n"
+                       "  bidx\t%08x\n"
+                       "  cidx\t%08x\n"
+                       "  lkrld\t%08x\n",
+                       i, echan->ch_num, echan->slot[i],
+                       edesc->pset[i].param.opt,
+                       edesc->pset[i].param.src,
+                       edesc->pset[i].param.dst,
+                       edesc->pset[i].param.a_b_cnt,
+                       edesc->pset[i].param.ccnt,
+                       edesc->pset[i].param.src_dst_bidx,
+                       edesc->pset[i].param.src_dst_cidx,
+                       edesc->pset[i].param.link_bcntrld);
+
+               edesc->absync = ret;
+
+               /*
+                * Enable period interrupt only if it is requested
+                */
+               if (tx_flags & DMA_PREP_INTERRUPT) {
+                       edesc->pset[i].param.opt |= TCINTEN;
+
+                       /* Also enable intermediate interrupts if necessary */
+                       if (use_intermediate)
+                               edesc->pset[i].param.opt |= ITCINTEN;
+               }
+       }
+
+       /* Place the cyclic channel to highest priority queue */
+       if (!echan->tc)
+               edma_assign_channel_eventq(echan, EVENTQ_0);
+
+       return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
+}
+
+static void edma_completion_handler(struct edma_chan *echan)
+{
+       struct device *dev = echan->vchan.chan.device->dev;
+       struct edma_desc *edesc;
+
+       spin_lock(&echan->vchan.lock);
+       edesc = echan->edesc;
+       if (edesc) {
+               if (edesc->cyclic) {
+                       vchan_cyclic_callback(&edesc->vdesc);
+                       spin_unlock(&echan->vchan.lock);
+                       return;
+               } else if (edesc->processed == edesc->pset_nr) {
+                       edesc->residue = 0;
+                       edma_stop(echan);
+                       vchan_cookie_complete(&edesc->vdesc);
+                       echan->edesc = NULL;
+
+                       dev_dbg(dev, "Transfer completed on channel %d\n",
+                               echan->ch_num);
+               } else {
+                       dev_dbg(dev, "Sub transfer completed on channel %d\n",
+                               echan->ch_num);
+
+                       edma_pause(echan);
+
+                       /* Update statistics for tx_status */
+                       edesc->residue -= edesc->sg_len;
+                       edesc->residue_stat = edesc->residue;
+                       edesc->processed_stat = edesc->processed;
+               }
+               edma_execute(echan);
+       }
+
+       spin_unlock(&echan->vchan.lock);
+}
+
+/* eDMA interrupt handler */
+static irqreturn_t dma_irq_handler(int irq, void *data)
+{
+       struct edma_cc *ecc = data;
+       int ctlr;
+       u32 sh_ier;
+       u32 sh_ipr;
+       u32 bank;
+
+       ctlr = ecc->id;
+       if (ctlr < 0)
+               return IRQ_NONE;
+
+       dev_vdbg(ecc->dev, "dma_irq_handler\n");
+
+       sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0);
+       if (!sh_ipr) {
+               sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1);
+               if (!sh_ipr)
+                       return IRQ_NONE;
+               sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1);
+               bank = 1;
+       } else {
+               sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0);
+               bank = 0;
+       }
+
+       do {
+               u32 slot;
+               u32 channel;
+
+               slot = __ffs(sh_ipr);
+               sh_ipr &= ~(BIT(slot));
+
+               if (sh_ier & BIT(slot)) {
+                       channel = (bank << 5) | slot;
+                       /* Clear the corresponding IPR bits */
+                       edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot));
+                       edma_completion_handler(&ecc->slave_chans[channel]);
+               }
+       } while (sh_ipr);
+
+       edma_shadow0_write(ecc, SH_IEVAL, 1);
+       return IRQ_HANDLED;
+}
+
+static void edma_error_handler(struct edma_chan *echan)
+{
+       struct edma_cc *ecc = echan->ecc;
+       struct device *dev = echan->vchan.chan.device->dev;
+       struct edmacc_param p;
+       int err;
+
+       if (!echan->edesc)
+               return;
+
+       spin_lock(&echan->vchan.lock);
+
+       err = edma_read_slot(ecc, echan->slot[0], &p);
+
+       /*
+        * Issue later based on missed flag which will be sure
+        * to happen as:
+        * (1) we finished transmitting an intermediate slot and
+        *     edma_execute is coming up.
+        * (2) or we finished current transfer and issue will
+        *     call edma_execute.
+        *
+        * Important note: issuing can be dangerous here and
+        * lead to some nasty recursion when we are in a NULL
+        * slot. So we avoid doing so and set the missed flag.
+        */
+       if (err || (p.a_b_cnt == 0 && p.ccnt == 0)) {
+               dev_dbg(dev, "Error on null slot, setting miss\n");
+               echan->missed = 1;
+       } else {
+               /*
+                * The slot is already programmed but the event got
+                * missed, so its safe to issue it here.
+                */
+               dev_dbg(dev, "Missed event, TRIGGERING\n");
+               edma_clean_channel(echan);
+               edma_stop(echan);
+               edma_start(echan);
+               edma_trigger_channel(echan);
+       }
+       spin_unlock(&echan->vchan.lock);
+}
+
+static inline bool edma_error_pending(struct edma_cc *ecc)
+{
+       if (edma_read_array(ecc, EDMA_EMR, 0) ||
+           edma_read_array(ecc, EDMA_EMR, 1) ||
+           edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR))
+               return true;
+
+       return false;
+}
+
+/* eDMA error interrupt handler */
+static irqreturn_t dma_ccerr_handler(int irq, void *data)
+{
+       struct edma_cc *ecc = data;
+       int i, j;
+       int ctlr;
+       unsigned int cnt = 0;
+       unsigned int val;
+
+       ctlr = ecc->id;
+       if (ctlr < 0)
+               return IRQ_NONE;
+
+       dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
+
+       if (!edma_error_pending(ecc)) {
+               /*
+                * The registers indicate no pending error event but the irq
+                * handler has been called.
+                * Ask eDMA to re-evaluate the error registers.
+                */
+               dev_err(ecc->dev, "%s: Error interrupt without error event!\n",
+                       __func__);
+               edma_write(ecc, EDMA_EEVAL, 1);
+               return IRQ_NONE;
+       }
+
+       while (1) {
+               /* Event missed register(s) */
+               for (j = 0; j < 2; j++) {
+                       unsigned long emr;
+
+                       val = edma_read_array(ecc, EDMA_EMR, j);
+                       if (!val)
+                               continue;
+
+                       dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
+                       emr = val;
+                       for (i = find_next_bit(&emr, 32, 0); i < 32;
+                            i = find_next_bit(&emr, 32, i + 1)) {
+                               int k = (j << 5) + i;
+
+                               /* Clear the corresponding EMR bits */
+                               edma_write_array(ecc, EDMA_EMCR, j, BIT(i));
+                               /* Clear any SER */
+                               edma_shadow0_write_array(ecc, SH_SECR, j,
+                                                        BIT(i));
+                               edma_error_handler(&ecc->slave_chans[k]);
+                       }
+               }
+
+               val = edma_read(ecc, EDMA_QEMR);
+               if (val) {
+                       dev_dbg(ecc->dev, "QEMR 0x%02x\n", val);
+                       /* Not reported, just clear the interrupt reason. */
+                       edma_write(ecc, EDMA_QEMCR, val);
+                       edma_shadow0_write(ecc, SH_QSECR, val);
+               }
+
+               val = edma_read(ecc, EDMA_CCERR);
+               if (val) {
+                       dev_warn(ecc->dev, "CCERR 0x%08x\n", val);
+                       /* Not reported, just clear the interrupt reason. */
+                       edma_write(ecc, EDMA_CCERRCLR, val);
+               }
+
+               if (!edma_error_pending(ecc))
+                       break;
+               cnt++;
+               if (cnt > 10)
+                       break;
+       }
+       edma_write(ecc, EDMA_EEVAL, 1);
+       return IRQ_HANDLED;
+}
+
+/* Alloc channel resources */
+static int edma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct edma_chan *echan = to_edma_chan(chan);
+       struct edma_cc *ecc = echan->ecc;
+       struct device *dev = ecc->dev;
+       enum dma_event_q eventq_no = EVENTQ_DEFAULT;
+       int ret;
+
+       if (echan->tc) {
+               eventq_no = echan->tc->id;
+       } else if (ecc->tc_list) {
+               /* memcpy channel */
+               echan->tc = &ecc->tc_list[ecc->info->default_queue];
+               eventq_no = echan->tc->id;
+       }
+
+       ret = edma_alloc_channel(echan, eventq_no);
+       if (ret)
+               return ret;
+
+       echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num);
+       if (echan->slot[0] < 0) {
+               dev_err(dev, "Entry slot allocation failed for channel %u\n",
+                       EDMA_CHAN_SLOT(echan->ch_num));
+               ret = echan->slot[0];
+               goto err_slot;
+       }
+
+       /* Set up channel -> slot mapping for the entry slot */
+       edma_set_chmap(echan, echan->slot[0]);
+       echan->alloced = true;
+
+       dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n",
+               EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
+               echan->hw_triggered ? "HW" : "SW");
+
+       return 0;
+
+err_slot:
+       edma_free_channel(echan);
+       return ret;
+}
+
+/* Free channel resources */
+static void edma_free_chan_resources(struct dma_chan *chan)
+{
+       struct edma_chan *echan = to_edma_chan(chan);
+       struct device *dev = echan->ecc->dev;
+       int i;
+
+       /* Terminate transfers */
+       edma_stop(echan);
+
+       vchan_free_chan_resources(&echan->vchan);
+
+       /* Free EDMA PaRAM slots */
+       for (i = 0; i < EDMA_MAX_SLOTS; i++) {
+               if (echan->slot[i] >= 0) {
+                       edma_free_slot(echan->ecc, echan->slot[i]);
+                       echan->slot[i] = -1;
+               }
+       }
+
+       /* Set entry slot to the dummy slot */
+       edma_set_chmap(echan, echan->ecc->dummy_slot);
+
+       /* Free EDMA channel */
+       if (echan->alloced) {
+               edma_free_channel(echan);
+               echan->alloced = false;
+       }
+
+       echan->tc = NULL;
+       echan->hw_triggered = false;
+
+       dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n",
+               EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id);
+}
+
+/* Send pending descriptor to hardware */
+static void edma_issue_pending(struct dma_chan *chan)
+{
+       struct edma_chan *echan = to_edma_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&echan->vchan.lock, flags);
+       if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
+               edma_execute(echan);
+       spin_unlock_irqrestore(&echan->vchan.lock, flags);
+}
+
+/*
+ * This limit exists to avoid a possible infinite loop when waiting for proof
+ * that a particular transfer is completed. This limit can be hit if there
+ * are large bursts to/from slow devices or the CPU is never able to catch
+ * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
+ * RX-FIFO, as many as 55 loops have been seen.
+ */
+#define EDMA_MAX_TR_WAIT_LOOPS 1000
+
+static u32 edma_residue(struct edma_desc *edesc)
+{
+       bool dst = edesc->direction == DMA_DEV_TO_MEM;
+       int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
+       struct edma_chan *echan = edesc->echan;
+       struct edma_pset *pset = edesc->pset;
+       dma_addr_t done, pos;
+       int i;
+
+       /*
+        * We always read the dst/src position from the first RamPar
+        * pset. That's the one which is active now.
+        */
+       pos = edma_get_position(echan->ecc, echan->slot[0], dst);
+
+       /*
+        * "pos" may represent a transfer request that is still being
+        * processed by the EDMACC or EDMATC. We will busy wait until
+        * any one of the situations occurs:
+        *   1. the DMA hardware is idle
+        *   2. a new transfer request is setup
+        *   3. we hit the loop limit
+        */
+       while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
+               /* check if a new transfer request is setup */
+               if (edma_get_position(echan->ecc,
+                                     echan->slot[0], dst) != pos) {
+                       break;
+               }
+
+               if (!--loop_count) {
+                       dev_dbg_ratelimited(echan->vchan.chan.device->dev,
+                               "%s: timeout waiting for PaRAM update\n",
+                               __func__);
+                       break;
+               }
+
+               cpu_relax();
+       }
+
+       /*
+        * Cyclic is simple. Just subtract pset[0].addr from pos.
+        *
+        * We never update edesc->residue in the cyclic case, so we
+        * can tell the remaining room to the end of the circular
+        * buffer.
+        */
+       if (edesc->cyclic) {
+               done = pos - pset->addr;
+               edesc->residue_stat = edesc->residue - done;
+               return edesc->residue_stat;
+       }
+
+       /*
+        * For SG operation we catch up with the last processed
+        * status.
+        */
+       pset += edesc->processed_stat;
+
+       for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
+               /*
+                * If we are inside this pset address range, we know
+                * this is the active one. Get the current delta and
+                * stop walking the psets.
+                */
+               if (pos >= pset->addr && pos < pset->addr + pset->len)
+                       return edesc->residue_stat - (pos - pset->addr);
+
+               /* Otherwise mark it done and update residue_stat. */
+               edesc->processed_stat++;
+               edesc->residue_stat -= pset->len;
+       }
+       return edesc->residue_stat;
+}
+
+/* Check request completion status */
+static enum dma_status edma_tx_status(struct dma_chan *chan,
+                                     dma_cookie_t cookie,
+                                     struct dma_tx_state *txstate)
+{
+       struct edma_chan *echan = to_edma_chan(chan);
+       struct virt_dma_desc *vdesc;
+       enum dma_status ret;
+       unsigned long flags;
+
+       ret = dma_cookie_status(chan, cookie, txstate);
+       if (ret == DMA_COMPLETE || !txstate)
+               return ret;
+
+       spin_lock_irqsave(&echan->vchan.lock, flags);
+       if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
+               txstate->residue = edma_residue(echan->edesc);
+       else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
+               txstate->residue = to_edma_desc(&vdesc->tx)->residue;
+       spin_unlock_irqrestore(&echan->vchan.lock, flags);
+
+       return ret;
+}
+
+static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
+{
+       if (!memcpy_channels)
+               return false;
+       while (*memcpy_channels != -1) {
+               if (*memcpy_channels == ch_num)
+                       return true;
+               memcpy_channels++;
+       }
+       return false;
+}
+
+#define EDMA_DMA_BUSWIDTHS     (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
+{
+       struct dma_device *s_ddev = &ecc->dma_slave;
+       struct dma_device *m_ddev = NULL;
+       s32 *memcpy_channels = ecc->info->memcpy_channels;
+       int i, j;
+
+       dma_cap_zero(s_ddev->cap_mask);
+       dma_cap_set(DMA_SLAVE, s_ddev->cap_mask);
+       dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask);
+       if (ecc->legacy_mode && !memcpy_channels) {
+               dev_warn(ecc->dev,
+                        "Legacy memcpy is enabled, things might not work\n");
+
+               dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
+               s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
+               s_ddev->directions = BIT(DMA_MEM_TO_MEM);
+       }
+
+       s_ddev->device_prep_slave_sg = edma_prep_slave_sg;
+       s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic;
+       s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
+       s_ddev->device_free_chan_resources = edma_free_chan_resources;
+       s_ddev->device_issue_pending = edma_issue_pending;
+       s_ddev->device_tx_status = edma_tx_status;
+       s_ddev->device_config = edma_slave_config;
+       s_ddev->device_pause = edma_dma_pause;
+       s_ddev->device_resume = edma_dma_resume;
+       s_ddev->device_terminate_all = edma_terminate_all;
+       s_ddev->device_synchronize = edma_synchronize;
+
+       s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
+       s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
+       s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV));
+       s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+       s_ddev->max_burst = SZ_32K - 1; /* CIDX: 16bit signed */
+
+       s_ddev->dev = ecc->dev;
+       INIT_LIST_HEAD(&s_ddev->channels);
+
+       if (memcpy_channels) {
+               m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL);
+               if (!m_ddev) {
+                       dev_warn(ecc->dev, "memcpy is disabled due to OoM\n");
+                       memcpy_channels = NULL;
+                       goto ch_setup;
+               }
+               ecc->dma_memcpy = m_ddev;
+
+               dma_cap_zero(m_ddev->cap_mask);
+               dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
+
+               m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
+               m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
+               m_ddev->device_free_chan_resources = edma_free_chan_resources;
+               m_ddev->device_issue_pending = edma_issue_pending;
+               m_ddev->device_tx_status = edma_tx_status;
+               m_ddev->device_config = edma_slave_config;
+               m_ddev->device_pause = edma_dma_pause;
+               m_ddev->device_resume = edma_dma_resume;
+               m_ddev->device_terminate_all = edma_terminate_all;
+               m_ddev->device_synchronize = edma_synchronize;
+
+               m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
+               m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
+               m_ddev->directions = BIT(DMA_MEM_TO_MEM);
+               m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+               m_ddev->dev = ecc->dev;
+               INIT_LIST_HEAD(&m_ddev->channels);
+       } else if (!ecc->legacy_mode) {
+               dev_info(ecc->dev, "memcpy is disabled\n");
+       }
+
+ch_setup:
+       for (i = 0; i < ecc->num_channels; i++) {
+               struct edma_chan *echan = &ecc->slave_chans[i];
+               echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i);
+               echan->ecc = ecc;
+               echan->vchan.desc_free = edma_desc_free;
+
+               if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels))
+                       vchan_init(&echan->vchan, m_ddev);
+               else
+                       vchan_init(&echan->vchan, s_ddev);
+
+               INIT_LIST_HEAD(&echan->node);
+               for (j = 0; j < EDMA_MAX_SLOTS; j++)
+                       echan->slot[j] = -1;
+       }
+}
+
+static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
+                             struct edma_cc *ecc)
+{
+       int i;
+       u32 value, cccfg;
+       s8 (*queue_priority_map)[2];
+
+       /* Decode the eDMA3 configuration from CCCFG register */
+       cccfg = edma_read(ecc, EDMA_CCCFG);
+
+       value = GET_NUM_REGN(cccfg);
+       ecc->num_region = BIT(value);
+
+       value = GET_NUM_DMACH(cccfg);
+       ecc->num_channels = BIT(value + 1);
+
+       value = GET_NUM_QDMACH(cccfg);
+       ecc->num_qchannels = value * 2;
+
+       value = GET_NUM_PAENTRY(cccfg);
+       ecc->num_slots = BIT(value + 4);
+
+       value = GET_NUM_EVQUE(cccfg);
+       ecc->num_tc = value + 1;
+
+       ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false;
+
+       dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg);
+       dev_dbg(dev, "num_region: %u\n", ecc->num_region);
+       dev_dbg(dev, "num_channels: %u\n", ecc->num_channels);
+       dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
+       dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
+       dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
+       dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
+
+       /* Nothing need to be done if queue priority is provided */
+       if (pdata->queue_priority_mapping)
+               return 0;
+
+       /*
+        * Configure TC/queue priority as follows:
+        * Q0 - priority 0
+        * Q1 - priority 1
+        * Q2 - priority 2
+        * ...
+        * The meaning of priority numbers: 0 highest priority, 7 lowest
+        * priority. So Q0 is the highest priority queue and the last queue has
+        * the lowest priority.
+        */
+       queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
+                                         GFP_KERNEL);
+       if (!queue_priority_map)
+               return -ENOMEM;
+
+       for (i = 0; i < ecc->num_tc; i++) {
+               queue_priority_map[i][0] = i;
+               queue_priority_map[i][1] = i;
+       }
+       queue_priority_map[i][0] = -1;
+       queue_priority_map[i][1] = -1;
+
+       pdata->queue_priority_mapping = queue_priority_map;
+       /* Default queue has the lowest priority */
+       pdata->default_queue = i - 1;
+
+       return 0;
+}
+
+#if IS_ENABLED(CONFIG_OF)
+static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata,
+                              size_t sz)
+{
+       const char pname[] = "ti,edma-xbar-event-map";
+       struct resource res;
+       void __iomem *xbar;
+       s16 (*xbar_chans)[2];
+       size_t nelm = sz / sizeof(s16);
+       u32 shift, offset, mux;
+       int ret, i;
+
+       xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL);
+       if (!xbar_chans)
+               return -ENOMEM;
+
+       ret = of_address_to_resource(dev->of_node, 1, &res);
+       if (ret)
+               return -ENOMEM;
+
+       xbar = devm_ioremap(dev, res.start, resource_size(&res));
+       if (!xbar)
+               return -ENOMEM;
+
+       ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans,
+                                        nelm);
+       if (ret)
+               return -EIO;
+
+       /* Invalidate last entry for the other user of this mess */
+       nelm >>= 1;
+       xbar_chans[nelm][0] = -1;
+       xbar_chans[nelm][1] = -1;
+
+       for (i = 0; i < nelm; i++) {
+               shift = (xbar_chans[i][1] & 0x03) << 3;
+               offset = xbar_chans[i][1] & 0xfffffffc;
+               mux = readl(xbar + offset);
+               mux &= ~(0xff << shift);
+               mux |= xbar_chans[i][0] << shift;
+               writel(mux, (xbar + offset));
+       }
+
+       pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
+       return 0;
+}
+
+static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
+                                                    bool legacy_mode)
+{
+       struct edma_soc_info *info;
+       struct property *prop;
+       int sz, ret;
+
+       info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
+       if (!info)
+               return ERR_PTR(-ENOMEM);
+
+       if (legacy_mode) {
+               prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map",
+                                       &sz);
+               if (prop) {
+                       ret = edma_xbar_event_map(dev, info, sz);
+                       if (ret)
+                               return ERR_PTR(ret);
+               }
+               return info;
+       }
+
+       /* Get the list of channels allocated to be used for memcpy */
+       prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
+       if (prop) {
+               const char pname[] = "ti,edma-memcpy-channels";
+               size_t nelm = sz / sizeof(s32);
+               s32 *memcpy_ch;
+
+               memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
+                                        GFP_KERNEL);
+               if (!memcpy_ch)
+                       return ERR_PTR(-ENOMEM);
+
+               ret = of_property_read_u32_array(dev->of_node, pname,
+                                                (u32 *)memcpy_ch, nelm);
+               if (ret)
+                       return ERR_PTR(ret);
+
+               memcpy_ch[nelm] = -1;
+               info->memcpy_channels = memcpy_ch;
+       }
+
+       prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges",
+                               &sz);
+       if (prop) {
+               const char pname[] = "ti,edma-reserved-slot-ranges";
+               u32 (*tmp)[2];
+               s16 (*rsv_slots)[2];
+               size_t nelm = sz / sizeof(*tmp);
+               struct edma_rsv_info *rsv_info;
+               int i;
+
+               if (!nelm)
+                       return info;
+
+               tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
+               if (!tmp)
+                       return ERR_PTR(-ENOMEM);
+
+               rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
+               if (!rsv_info) {
+                       kfree(tmp);
+                       return ERR_PTR(-ENOMEM);
+               }
+
+               rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
+                                        GFP_KERNEL);
+               if (!rsv_slots) {
+                       kfree(tmp);
+                       return ERR_PTR(-ENOMEM);
+               }
+
+               ret = of_property_read_u32_array(dev->of_node, pname,
+                                                (u32 *)tmp, nelm * 2);
+               if (ret) {
+                       kfree(tmp);
+                       return ERR_PTR(ret);
+               }
+
+               for (i = 0; i < nelm; i++) {
+                       rsv_slots[i][0] = tmp[i][0];
+                       rsv_slots[i][1] = tmp[i][1];
+               }
+               rsv_slots[nelm][0] = -1;
+               rsv_slots[nelm][1] = -1;
+
+               info->rsv = rsv_info;
+               info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
+
+               kfree(tmp);
+       }
+
+       return info;
+}
+
+static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
+                                     struct of_dma *ofdma)
+{
+       struct edma_cc *ecc = ofdma->of_dma_data;
+       struct dma_chan *chan = NULL;
+       struct edma_chan *echan;
+       int i;
+
+       if (!ecc || dma_spec->args_count < 1)
+               return NULL;
+
+       for (i = 0; i < ecc->num_channels; i++) {
+               echan = &ecc->slave_chans[i];
+               if (echan->ch_num == dma_spec->args[0]) {
+                       chan = &echan->vchan.chan;
+                       break;
+               }
+       }
+
+       if (!chan)
+               return NULL;
+
+       if (echan->ecc->legacy_mode && dma_spec->args_count == 1)
+               goto out;
+
+       if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 &&
+           dma_spec->args[1] < echan->ecc->num_tc) {
+               echan->tc = &echan->ecc->tc_list[dma_spec->args[1]];
+               goto out;
+       }
+
+       return NULL;
+out:
+       /* The channel is going to be used as HW synchronized */
+       echan->hw_triggered = true;
+       return dma_get_slave_channel(chan);
+}
+#else
+static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
+                                                    bool legacy_mode)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
+                                     struct of_dma *ofdma)
+{
+       return NULL;
+}
+#endif
+
+static int edma_probe(struct platform_device *pdev)
+{
+       struct edma_soc_info    *info = pdev->dev.platform_data;
+       s8                      (*queue_priority_mapping)[2];
+       int                     i, off, ln;
+       const s16               (*rsv_slots)[2];
+       const s16               (*xbar_chans)[2];
+       int                     irq;
+       char                    *irq_name;
+       struct resource         *mem;
+       struct device_node      *node = pdev->dev.of_node;
+       struct device           *dev = &pdev->dev;
+       struct edma_cc          *ecc;
+       bool                    legacy_mode = true;
+       int ret;
+
+       if (node) {
+               const struct of_device_id *match;
+
+               match = of_match_node(edma_of_ids, node);
+               if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC)
+                       legacy_mode = false;
+
+               info = edma_setup_info_from_dt(dev, legacy_mode);
+               if (IS_ERR(info)) {
+                       dev_err(dev, "failed to get DT data\n");
+                       return PTR_ERR(info);
+               }
+       }
+
+       if (!info)
+               return -ENODEV;
+
+       pm_runtime_enable(dev);
+       ret = pm_runtime_get_sync(dev);
+       if (ret < 0) {
+               dev_err(dev, "pm_runtime_get_sync() failed\n");
+               return ret;
+       }
+
+       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
+
+       ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
+       if (!ecc)
+               return -ENOMEM;
+
+       ecc->dev = dev;
+       ecc->id = pdev->id;
+       ecc->legacy_mode = legacy_mode;
+       /* When booting with DT the pdev->id is -1 */
+       if (ecc->id < 0)
+               ecc->id = 0;
+
+       mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc");
+       if (!mem) {
+               dev_dbg(dev, "mem resource not found, using index 0\n");
+               mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+               if (!mem) {
+                       dev_err(dev, "no mem resource?\n");
+                       return -ENODEV;
+               }
+       }
+       ecc->base = devm_ioremap_resource(dev, mem);
+       if (IS_ERR(ecc->base))
+               return PTR_ERR(ecc->base);
+
+       platform_set_drvdata(pdev, ecc);
+
+       /* Get eDMA3 configuration from IP */
+       ret = edma_setup_from_hw(dev, info, ecc);
+       if (ret)
+               return ret;
+
+       /* Allocate memory based on the information we got from the IP */
+       ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
+                                       sizeof(*ecc->slave_chans), GFP_KERNEL);
+       if (!ecc->slave_chans)
+               return -ENOMEM;
+
+       ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
+                                      sizeof(unsigned long), GFP_KERNEL);
+       if (!ecc->slot_inuse)
+               return -ENOMEM;
+
+       ecc->default_queue = info->default_queue;
+
+       for (i = 0; i < ecc->num_slots; i++)
+               edma_write_slot(ecc, i, &dummy_paramset);
+
+       if (info->rsv) {
+               /* Set the reserved slots in inuse list */
+               rsv_slots = info->rsv->rsv_slots;
+               if (rsv_slots) {
+                       for (i = 0; rsv_slots[i][0] != -1; i++) {
+                               off = rsv_slots[i][0];
+                               ln = rsv_slots[i][1];
+                               edma_set_bits(off, ln, ecc->slot_inuse);
+                       }
+               }
+       }
+
+       /* Clear the xbar mapped channels in unused list */
+       xbar_chans = info->xbar_chans;
+       if (xbar_chans) {
+               for (i = 0; xbar_chans[i][1] != -1; i++) {
+                       off = xbar_chans[i][1];
+               }
+       }
+
+       irq = platform_get_irq_byname(pdev, "edma3_ccint");
+       if (irq < 0 && node)
+               irq = irq_of_parse_and_map(node, 0);
+
+       if (irq >= 0) {
+               irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
+                                         dev_name(dev));
+               ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
+                                      ecc);
+               if (ret) {
+                       dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
+                       return ret;
+               }
+               ecc->ccint = irq;
+       }
+
+       irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
+       if (irq < 0 && node)
+               irq = irq_of_parse_and_map(node, 2);
+
+       if (irq >= 0) {
+               irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
+                                         dev_name(dev));
+               ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
+                                      ecc);
+               if (ret) {
+                       dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
+                       return ret;
+               }
+               ecc->ccerrint = irq;
+       }
+
+       ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
+       if (ecc->dummy_slot < 0) {
+               dev_err(dev, "Can't allocate PaRAM dummy slot\n");
+               return ecc->dummy_slot;
+       }
+
+       queue_priority_mapping = info->queue_priority_mapping;
+
+       if (!ecc->legacy_mode) {
+               int lowest_priority = 0;
+               struct of_phandle_args tc_args;
+
+               ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
+                                           sizeof(*ecc->tc_list), GFP_KERNEL);
+               if (!ecc->tc_list)
+                       return -ENOMEM;
+
+               for (i = 0;; i++) {
+                       ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
+                                                              1, i, &tc_args);
+                       if (ret || i == ecc->num_tc)
+                               break;
+
+                       ecc->tc_list[i].node = tc_args.np;
+                       ecc->tc_list[i].id = i;
+                       queue_priority_mapping[i][1] = tc_args.args[0];
+                       if (queue_priority_mapping[i][1] > lowest_priority) {
+                               lowest_priority = queue_priority_mapping[i][1];
+                               info->default_queue = i;
+                       }
+               }
+       }
+
+       /* Event queue priority mapping */
+       for (i = 0; queue_priority_mapping[i][0] != -1; i++)
+               edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
+                                             queue_priority_mapping[i][1]);
+
+       for (i = 0; i < ecc->num_region; i++) {
+               edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0);
+               edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0);
+               edma_write_array(ecc, EDMA_QRAE, i, 0x0);
+       }
+       ecc->info = info;
+
+       /* Init the dma device and channels */
+       edma_dma_init(ecc, legacy_mode);
+
+       for (i = 0; i < ecc->num_channels; i++) {
+               /* Assign all channels to the default queue */
+               edma_assign_channel_eventq(&ecc->slave_chans[i],
+                                          info->default_queue);
+               /* Set entry slot to the dummy slot */
+               edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
+       }
+
+       ecc->dma_slave.filter.map = info->slave_map;
+       ecc->dma_slave.filter.mapcnt = info->slavecnt;
+       ecc->dma_slave.filter.fn = edma_filter_fn;
+
+       ret = dma_async_device_register(&ecc->dma_slave);
+       if (ret) {
+               dev_err(dev, "slave ddev registration failed (%d)\n", ret);
+               goto err_reg1;
+       }
+
+       if (ecc->dma_memcpy) {
+               ret = dma_async_device_register(ecc->dma_memcpy);
+               if (ret) {
+                       dev_err(dev, "memcpy ddev registration failed (%d)\n",
+                               ret);
+                       dma_async_device_unregister(&ecc->dma_slave);
+                       goto err_reg1;
+               }
+       }
+
+       if (node)
+               of_dma_controller_register(node, of_edma_xlate, ecc);
+
+       dev_info(dev, "TI EDMA DMA engine driver\n");
+
+       return 0;
+
+err_reg1:
+       edma_free_slot(ecc, ecc->dummy_slot);
+       return ret;
+}
+
+static void edma_cleanupp_vchan(struct dma_device *dmadev)
+{
+       struct edma_chan *echan, *_echan;
+
+       list_for_each_entry_safe(echan, _echan,
+                       &dmadev->channels, vchan.chan.device_node) {
+               list_del(&echan->vchan.chan.device_node);
+               tasklet_kill(&echan->vchan.task);
+       }
+}
+
+static int edma_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct edma_cc *ecc = dev_get_drvdata(dev);
+
+       devm_free_irq(dev, ecc->ccint, ecc);
+       devm_free_irq(dev, ecc->ccerrint, ecc);
+
+       edma_cleanupp_vchan(&ecc->dma_slave);
+
+       if (dev->of_node)
+               of_dma_controller_free(dev->of_node);
+       dma_async_device_unregister(&ecc->dma_slave);
+       if (ecc->dma_memcpy)
+               dma_async_device_unregister(ecc->dma_memcpy);
+       edma_free_slot(ecc, ecc->dummy_slot);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int edma_pm_suspend(struct device *dev)
+{
+       struct edma_cc *ecc = dev_get_drvdata(dev);
+       struct edma_chan *echan = ecc->slave_chans;
+       int i;
+
+       for (i = 0; i < ecc->num_channels; i++) {
+               if (echan[i].alloced)
+                       edma_setup_interrupt(&echan[i], false);
+       }
+
+       return 0;
+}
+
+static int edma_pm_resume(struct device *dev)
+{
+       struct edma_cc *ecc = dev_get_drvdata(dev);
+       struct edma_chan *echan = ecc->slave_chans;
+       int i;
+       s8 (*queue_priority_mapping)[2];
+
+       /* re initialize dummy slot to dummy param set */
+       edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset);
+
+       queue_priority_mapping = ecc->info->queue_priority_mapping;
+
+       /* Event queue priority mapping */
+       for (i = 0; queue_priority_mapping[i][0] != -1; i++)
+               edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
+                                             queue_priority_mapping[i][1]);
+
+       for (i = 0; i < ecc->num_channels; i++) {
+               if (echan[i].alloced) {
+                       /* ensure access through shadow region 0 */
+                       edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
+                                      BIT(i & 0x1f));
+
+                       edma_setup_interrupt(&echan[i], true);
+
+                       /* Set up channel -> slot mapping for the entry slot */
+                       edma_set_chmap(&echan[i], echan[i].slot[0]);
+               }
+       }
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops edma_pm_ops = {
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume)
+};
+
+static struct platform_driver edma_driver = {
+       .probe          = edma_probe,
+       .remove         = edma_remove,
+       .driver = {
+               .name   = "edma",
+               .pm     = &edma_pm_ops,
+               .of_match_table = edma_of_ids,
+       },
+};
+
+static int edma_tptc_probe(struct platform_device *pdev)
+{
+       pm_runtime_enable(&pdev->dev);
+       return pm_runtime_get_sync(&pdev->dev);
+}
+
+static struct platform_driver edma_tptc_driver = {
+       .probe          = edma_tptc_probe,
+       .driver = {
+               .name   = "edma3-tptc",
+               .of_match_table = edma_tptc_of_ids,
+       },
+};
+
+bool edma_filter_fn(struct dma_chan *chan, void *param)
+{
+       bool match = false;
+
+       if (chan->device->dev->driver == &edma_driver.driver) {
+               struct edma_chan *echan = to_edma_chan(chan);
+               unsigned ch_req = *(unsigned *)param;
+               if (ch_req == echan->ch_num) {
+                       /* The channel is going to be used as HW synchronized */
+                       echan->hw_triggered = true;
+                       match = true;
+               }
+       }
+       return match;
+}
+EXPORT_SYMBOL(edma_filter_fn);
+
+static int edma_init(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&edma_tptc_driver);
+       if (ret)
+               return ret;
+
+       return platform_driver_register(&edma_driver);
+}
+subsys_initcall(edma_init);
+
+static void __exit edma_exit(void)
+{
+       platform_driver_unregister(&edma_driver);
+       platform_driver_unregister(&edma_tptc_driver);
+}
+module_exit(edma_exit);
+
+MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
+MODULE_DESCRIPTION("TI EDMA DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
new file mode 100644 (file)
index 0000000..9b5ca86
--- /dev/null
@@ -0,0 +1,1668 @@
+/*
+ * OMAP DMAengine support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/omap-dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_dma.h>
+#include <linux/of_device.h>
+
+#include "../virt-dma.h"
+
+#define OMAP_SDMA_REQUESTS     127
+#define OMAP_SDMA_CHANNELS     32
+
+struct omap_dmadev {
+       struct dma_device ddev;
+       spinlock_t lock;
+       void __iomem *base;
+       const struct omap_dma_reg *reg_map;
+       struct omap_system_dma_plat_info *plat;
+       bool legacy;
+       bool ll123_supported;
+       struct dma_pool *desc_pool;
+       unsigned dma_requests;
+       spinlock_t irq_lock;
+       uint32_t irq_enable_mask;
+       struct omap_chan **lch_map;
+};
+
+struct omap_chan {
+       struct virt_dma_chan vc;
+       void __iomem *channel_base;
+       const struct omap_dma_reg *reg_map;
+       uint32_t ccr;
+
+       struct dma_slave_config cfg;
+       unsigned dma_sig;
+       bool cyclic;
+       bool paused;
+       bool running;
+
+       int dma_ch;
+       struct omap_desc *desc;
+       unsigned sgidx;
+};
+
+#define DESC_NXT_SV_REFRESH    (0x1 << 24)
+#define DESC_NXT_SV_REUSE      (0x2 << 24)
+#define DESC_NXT_DV_REFRESH    (0x1 << 26)
+#define DESC_NXT_DV_REUSE      (0x2 << 26)
+#define DESC_NTYPE_TYPE2       (0x2 << 29)
+
+/* Type 2 descriptor with Source or Destination address update */
+struct omap_type2_desc {
+       uint32_t next_desc;
+       uint32_t en;
+       uint32_t addr; /* src or dst */
+       uint16_t fn;
+       uint16_t cicr;
+       int16_t cdei;
+       int16_t csei;
+       int32_t cdfi;
+       int32_t csfi;
+} __packed;
+
+struct omap_sg {
+       dma_addr_t addr;
+       uint32_t en;            /* number of elements (24-bit) */
+       uint32_t fn;            /* number of frames (16-bit) */
+       int32_t fi;             /* for double indexing */
+       int16_t ei;             /* for double indexing */
+
+       /* Linked list */
+       struct omap_type2_desc *t2_desc;
+       dma_addr_t t2_desc_paddr;
+};
+
+struct omap_desc {
+       struct virt_dma_desc vd;
+       bool using_ll;
+       enum dma_transfer_direction dir;
+       dma_addr_t dev_addr;
+
+       int32_t fi;             /* for OMAP_DMA_SYNC_PACKET / double indexing */
+       int16_t ei;             /* for double indexing */
+       uint8_t es;             /* CSDP_DATA_TYPE_xxx */
+       uint32_t ccr;           /* CCR value */
+       uint16_t clnk_ctrl;     /* CLNK_CTRL value */
+       uint16_t cicr;          /* CICR value */
+       uint32_t csdp;          /* CSDP value */
+
+       unsigned sglen;
+       struct omap_sg sg[0];
+};
+
+enum {
+       CAPS_0_SUPPORT_LL123    = BIT(20),      /* Linked List type1/2/3 */
+       CAPS_0_SUPPORT_LL4      = BIT(21),      /* Linked List type4 */
+
+       CCR_FS                  = BIT(5),
+       CCR_READ_PRIORITY       = BIT(6),
+       CCR_ENABLE              = BIT(7),
+       CCR_AUTO_INIT           = BIT(8),       /* OMAP1 only */
+       CCR_REPEAT              = BIT(9),       /* OMAP1 only */
+       CCR_OMAP31_DISABLE      = BIT(10),      /* OMAP1 only */
+       CCR_SUSPEND_SENSITIVE   = BIT(8),       /* OMAP2+ only */
+       CCR_RD_ACTIVE           = BIT(9),       /* OMAP2+ only */
+       CCR_WR_ACTIVE           = BIT(10),      /* OMAP2+ only */
+       CCR_SRC_AMODE_CONSTANT  = 0 << 12,
+       CCR_SRC_AMODE_POSTINC   = 1 << 12,
+       CCR_SRC_AMODE_SGLIDX    = 2 << 12,
+       CCR_SRC_AMODE_DBLIDX    = 3 << 12,
+       CCR_DST_AMODE_CONSTANT  = 0 << 14,
+       CCR_DST_AMODE_POSTINC   = 1 << 14,
+       CCR_DST_AMODE_SGLIDX    = 2 << 14,
+       CCR_DST_AMODE_DBLIDX    = 3 << 14,
+       CCR_CONSTANT_FILL       = BIT(16),
+       CCR_TRANSPARENT_COPY    = BIT(17),
+       CCR_BS                  = BIT(18),
+       CCR_SUPERVISOR          = BIT(22),
+       CCR_PREFETCH            = BIT(23),
+       CCR_TRIGGER_SRC         = BIT(24),
+       CCR_BUFFERING_DISABLE   = BIT(25),
+       CCR_WRITE_PRIORITY      = BIT(26),
+       CCR_SYNC_ELEMENT        = 0,
+       CCR_SYNC_FRAME          = CCR_FS,
+       CCR_SYNC_BLOCK          = CCR_BS,
+       CCR_SYNC_PACKET         = CCR_BS | CCR_FS,
+
+       CSDP_DATA_TYPE_8        = 0,
+       CSDP_DATA_TYPE_16       = 1,
+       CSDP_DATA_TYPE_32       = 2,
+       CSDP_SRC_PORT_EMIFF     = 0 << 2, /* OMAP1 only */
+       CSDP_SRC_PORT_EMIFS     = 1 << 2, /* OMAP1 only */
+       CSDP_SRC_PORT_OCP_T1    = 2 << 2, /* OMAP1 only */
+       CSDP_SRC_PORT_TIPB      = 3 << 2, /* OMAP1 only */
+       CSDP_SRC_PORT_OCP_T2    = 4 << 2, /* OMAP1 only */
+       CSDP_SRC_PORT_MPUI      = 5 << 2, /* OMAP1 only */
+       CSDP_SRC_PACKED         = BIT(6),
+       CSDP_SRC_BURST_1        = 0 << 7,
+       CSDP_SRC_BURST_16       = 1 << 7,
+       CSDP_SRC_BURST_32       = 2 << 7,
+       CSDP_SRC_BURST_64       = 3 << 7,
+       CSDP_DST_PORT_EMIFF     = 0 << 9, /* OMAP1 only */
+       CSDP_DST_PORT_EMIFS     = 1 << 9, /* OMAP1 only */
+       CSDP_DST_PORT_OCP_T1    = 2 << 9, /* OMAP1 only */
+       CSDP_DST_PORT_TIPB      = 3 << 9, /* OMAP1 only */
+       CSDP_DST_PORT_OCP_T2    = 4 << 9, /* OMAP1 only */
+       CSDP_DST_PORT_MPUI      = 5 << 9, /* OMAP1 only */
+       CSDP_DST_PACKED         = BIT(13),
+       CSDP_DST_BURST_1        = 0 << 14,
+       CSDP_DST_BURST_16       = 1 << 14,
+       CSDP_DST_BURST_32       = 2 << 14,
+       CSDP_DST_BURST_64       = 3 << 14,
+       CSDP_WRITE_NON_POSTED   = 0 << 16,
+       CSDP_WRITE_POSTED       = 1 << 16,
+       CSDP_WRITE_LAST_NON_POSTED = 2 << 16,
+
+       CICR_TOUT_IE            = BIT(0),       /* OMAP1 only */
+       CICR_DROP_IE            = BIT(1),
+       CICR_HALF_IE            = BIT(2),
+       CICR_FRAME_IE           = BIT(3),
+       CICR_LAST_IE            = BIT(4),
+       CICR_BLOCK_IE           = BIT(5),
+       CICR_PKT_IE             = BIT(7),       /* OMAP2+ only */
+       CICR_TRANS_ERR_IE       = BIT(8),       /* OMAP2+ only */
+       CICR_SUPERVISOR_ERR_IE  = BIT(10),      /* OMAP2+ only */
+       CICR_MISALIGNED_ERR_IE  = BIT(11),      /* OMAP2+ only */
+       CICR_DRAIN_IE           = BIT(12),      /* OMAP2+ only */
+       CICR_SUPER_BLOCK_IE     = BIT(14),      /* OMAP2+ only */
+
+       CLNK_CTRL_ENABLE_LNK    = BIT(15),
+
+       CDP_DST_VALID_INC       = 0 << 0,
+       CDP_DST_VALID_RELOAD    = 1 << 0,
+       CDP_DST_VALID_REUSE     = 2 << 0,
+       CDP_SRC_VALID_INC       = 0 << 2,
+       CDP_SRC_VALID_RELOAD    = 1 << 2,
+       CDP_SRC_VALID_REUSE     = 2 << 2,
+       CDP_NTYPE_TYPE1         = 1 << 4,
+       CDP_NTYPE_TYPE2         = 2 << 4,
+       CDP_NTYPE_TYPE3         = 3 << 4,
+       CDP_TMODE_NORMAL        = 0 << 8,
+       CDP_TMODE_LLIST         = 1 << 8,
+       CDP_FAST                = BIT(10),
+};
+
+static const unsigned es_bytes[] = {
+       [CSDP_DATA_TYPE_8] = 1,
+       [CSDP_DATA_TYPE_16] = 2,
+       [CSDP_DATA_TYPE_32] = 4,
+};
+
+static struct of_dma_filter_info omap_dma_info = {
+       .filter_fn = omap_dma_filter_fn,
+};
+
+static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
+{
+       return container_of(d, struct omap_dmadev, ddev);
+}
+
+static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
+{
+       return container_of(c, struct omap_chan, vc.chan);
+}
+
+static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
+{
+       return container_of(t, struct omap_desc, vd.tx);
+}
+
+static void omap_dma_desc_free(struct virt_dma_desc *vd)
+{
+       struct omap_desc *d = to_omap_dma_desc(&vd->tx);
+
+       if (d->using_ll) {
+               struct omap_dmadev *od = to_omap_dma_dev(vd->tx.chan->device);
+               int i;
+
+               for (i = 0; i < d->sglen; i++) {
+                       if (d->sg[i].t2_desc)
+                               dma_pool_free(od->desc_pool, d->sg[i].t2_desc,
+                                             d->sg[i].t2_desc_paddr);
+               }
+       }
+
+       kfree(d);
+}
+
+static void omap_dma_fill_type2_desc(struct omap_desc *d, int idx,
+                                    enum dma_transfer_direction dir, bool last)
+{
+       struct omap_sg *sg = &d->sg[idx];
+       struct omap_type2_desc *t2_desc = sg->t2_desc;
+
+       if (idx)
+               d->sg[idx - 1].t2_desc->next_desc = sg->t2_desc_paddr;
+       if (last)
+               t2_desc->next_desc = 0xfffffffc;
+
+       t2_desc->en = sg->en;
+       t2_desc->addr = sg->addr;
+       t2_desc->fn = sg->fn & 0xffff;
+       t2_desc->cicr = d->cicr;
+       if (!last)
+               t2_desc->cicr &= ~CICR_BLOCK_IE;
+
+       switch (dir) {
+       case DMA_DEV_TO_MEM:
+               t2_desc->cdei = sg->ei;
+               t2_desc->csei = d->ei;
+               t2_desc->cdfi = sg->fi;
+               t2_desc->csfi = d->fi;
+
+               t2_desc->en |= DESC_NXT_DV_REFRESH;
+               t2_desc->en |= DESC_NXT_SV_REUSE;
+               break;
+       case DMA_MEM_TO_DEV:
+               t2_desc->cdei = d->ei;
+               t2_desc->csei = sg->ei;
+               t2_desc->cdfi = d->fi;
+               t2_desc->csfi = sg->fi;
+
+               t2_desc->en |= DESC_NXT_SV_REFRESH;
+               t2_desc->en |= DESC_NXT_DV_REUSE;
+               break;
+       default:
+               return;
+       }
+
+       t2_desc->en |= DESC_NTYPE_TYPE2;
+}
+
+static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr)
+{
+       switch (type) {
+       case OMAP_DMA_REG_16BIT:
+               writew_relaxed(val, addr);
+               break;
+       case OMAP_DMA_REG_2X16BIT:
+               writew_relaxed(val, addr);
+               writew_relaxed(val >> 16, addr + 2);
+               break;
+       case OMAP_DMA_REG_32BIT:
+               writel_relaxed(val, addr);
+               break;
+       default:
+               WARN_ON(1);
+       }
+}
+
+static unsigned omap_dma_read(unsigned type, void __iomem *addr)
+{
+       unsigned val;
+
+       switch (type) {
+       case OMAP_DMA_REG_16BIT:
+               val = readw_relaxed(addr);
+               break;
+       case OMAP_DMA_REG_2X16BIT:
+               val = readw_relaxed(addr);
+               val |= readw_relaxed(addr + 2) << 16;
+               break;
+       case OMAP_DMA_REG_32BIT:
+               val = readl_relaxed(addr);
+               break;
+       default:
+               WARN_ON(1);
+               val = 0;
+       }
+
+       return val;
+}
+
+static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val)
+{
+       const struct omap_dma_reg *r = od->reg_map + reg;
+
+       WARN_ON(r->stride);
+
+       omap_dma_write(val, r->type, od->base + r->offset);
+}
+
+static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg)
+{
+       const struct omap_dma_reg *r = od->reg_map + reg;
+
+       WARN_ON(r->stride);
+
+       return omap_dma_read(r->type, od->base + r->offset);
+}
+
+static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val)
+{
+       const struct omap_dma_reg *r = c->reg_map + reg;
+
+       omap_dma_write(val, r->type, c->channel_base + r->offset);
+}
+
+static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg)
+{
+       const struct omap_dma_reg *r = c->reg_map + reg;
+
+       return omap_dma_read(r->type, c->channel_base + r->offset);
+}
+
+static void omap_dma_clear_csr(struct omap_chan *c)
+{
+       if (dma_omap1())
+               omap_dma_chan_read(c, CSR);
+       else
+               omap_dma_chan_write(c, CSR, ~0);
+}
+
+static unsigned omap_dma_get_csr(struct omap_chan *c)
+{
+       unsigned val = omap_dma_chan_read(c, CSR);
+
+       if (!dma_omap1())
+               omap_dma_chan_write(c, CSR, val);
+
+       return val;
+}
+
+static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
+       unsigned lch)
+{
+       c->channel_base = od->base + od->plat->channel_stride * lch;
+
+       od->lch_map[lch] = c;
+}
+
+static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
+{
+       struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
+       uint16_t cicr = d->cicr;
+
+       if (__dma_omap15xx(od->plat->dma_attr))
+               omap_dma_chan_write(c, CPC, 0);
+       else
+               omap_dma_chan_write(c, CDAC, 0);
+
+       omap_dma_clear_csr(c);
+
+       if (d->using_ll) {
+               uint32_t cdp = CDP_TMODE_LLIST | CDP_NTYPE_TYPE2 | CDP_FAST;
+
+               if (d->dir == DMA_DEV_TO_MEM)
+                       cdp |= (CDP_DST_VALID_RELOAD | CDP_SRC_VALID_REUSE);
+               else
+                       cdp |= (CDP_DST_VALID_REUSE | CDP_SRC_VALID_RELOAD);
+               omap_dma_chan_write(c, CDP, cdp);
+
+               omap_dma_chan_write(c, CNDP, d->sg[0].t2_desc_paddr);
+               omap_dma_chan_write(c, CCDN, 0);
+               omap_dma_chan_write(c, CCFN, 0xffff);
+               omap_dma_chan_write(c, CCEN, 0xffffff);
+
+               cicr &= ~CICR_BLOCK_IE;
+       } else if (od->ll123_supported) {
+               omap_dma_chan_write(c, CDP, 0);
+       }
+
+       /* Enable interrupts */
+       omap_dma_chan_write(c, CICR, cicr);
+
+       /* Enable channel */
+       omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
+
+       c->running = true;
+}
+
+static void omap_dma_drain_chan(struct omap_chan *c)
+{
+       int i;
+       u32 val;
+
+       /* Wait for sDMA FIFO to drain */
+       for (i = 0; ; i++) {
+               val = omap_dma_chan_read(c, CCR);
+               if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
+                       break;
+
+               if (i > 100)
+                       break;
+
+               udelay(5);
+       }
+
+       if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
+               dev_err(c->vc.chan.device->dev,
+                       "DMA drain did not complete on lch %d\n",
+                       c->dma_ch);
+}
+
+static int omap_dma_stop(struct omap_chan *c)
+{
+       struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
+       uint32_t val;
+
+       /* disable irq */
+       omap_dma_chan_write(c, CICR, 0);
+
+       omap_dma_clear_csr(c);
+
+       val = omap_dma_chan_read(c, CCR);
+       if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
+               uint32_t sysconfig;
+
+               sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
+               val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
+               val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
+               omap_dma_glbl_write(od, OCP_SYSCONFIG, val);
+
+               val = omap_dma_chan_read(c, CCR);
+               val &= ~CCR_ENABLE;
+               omap_dma_chan_write(c, CCR, val);
+
+               if (!(c->ccr & CCR_BUFFERING_DISABLE))
+                       omap_dma_drain_chan(c);
+
+               omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
+       } else {
+               if (!(val & CCR_ENABLE))
+                       return -EINVAL;
+
+               val &= ~CCR_ENABLE;
+               omap_dma_chan_write(c, CCR, val);
+
+               if (!(c->ccr & CCR_BUFFERING_DISABLE))
+                       omap_dma_drain_chan(c);
+       }
+
+       mb();
+
+       if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
+               val = omap_dma_chan_read(c, CLNK_CTRL);
+
+               if (dma_omap1())
+                       val |= 1 << 14; /* set the STOP_LNK bit */
+               else
+                       val &= ~CLNK_CTRL_ENABLE_LNK;
+
+               omap_dma_chan_write(c, CLNK_CTRL, val);
+       }
+       c->running = false;
+       return 0;
+}
+
+static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d)
+{
+       struct omap_sg *sg = d->sg + c->sgidx;
+       unsigned cxsa, cxei, cxfi;
+
+       if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
+               cxsa = CDSA;
+               cxei = CDEI;
+               cxfi = CDFI;
+       } else {
+               cxsa = CSSA;
+               cxei = CSEI;
+               cxfi = CSFI;
+       }
+
+       omap_dma_chan_write(c, cxsa, sg->addr);
+       omap_dma_chan_write(c, cxei, sg->ei);
+       omap_dma_chan_write(c, cxfi, sg->fi);
+       omap_dma_chan_write(c, CEN, sg->en);
+       omap_dma_chan_write(c, CFN, sg->fn);
+
+       omap_dma_start(c, d);
+       c->sgidx++;
+}
+
+static void omap_dma_start_desc(struct omap_chan *c)
+{
+       struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+       struct omap_desc *d;
+       unsigned cxsa, cxei, cxfi;
+
+       if (!vd) {
+               c->desc = NULL;
+               return;
+       }
+
+       list_del(&vd->node);
+
+       c->desc = d = to_omap_dma_desc(&vd->tx);
+       c->sgidx = 0;
+
+       /*
+        * This provides the necessary barrier to ensure data held in
+        * DMA coherent memory is visible to the DMA engine prior to
+        * the transfer starting.
+        */
+       mb();
+
+       omap_dma_chan_write(c, CCR, d->ccr);
+       if (dma_omap1())
+               omap_dma_chan_write(c, CCR2, d->ccr >> 16);
+
+       if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
+               cxsa = CSSA;
+               cxei = CSEI;
+               cxfi = CSFI;
+       } else {
+               cxsa = CDSA;
+               cxei = CDEI;
+               cxfi = CDFI;
+       }
+
+       omap_dma_chan_write(c, cxsa, d->dev_addr);
+       omap_dma_chan_write(c, cxei, d->ei);
+       omap_dma_chan_write(c, cxfi, d->fi);
+       omap_dma_chan_write(c, CSDP, d->csdp);
+       omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
+
+       omap_dma_start_sg(c, d);
+}
+
+static void omap_dma_callback(int ch, u16 status, void *data)
+{
+       struct omap_chan *c = data;
+       struct omap_desc *d;
+       unsigned long flags;
+
+       spin_lock_irqsave(&c->vc.lock, flags);
+       d = c->desc;
+       if (d) {
+               if (c->cyclic) {
+                       vchan_cyclic_callback(&d->vd);
+               } else if (d->using_ll || c->sgidx == d->sglen) {
+                       omap_dma_start_desc(c);
+                       vchan_cookie_complete(&d->vd);
+               } else {
+                       omap_dma_start_sg(c, d);
+               }
+       }
+       spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static irqreturn_t omap_dma_irq(int irq, void *devid)
+{
+       struct omap_dmadev *od = devid;
+       unsigned status, channel;
+
+       spin_lock(&od->irq_lock);
+
+       status = omap_dma_glbl_read(od, IRQSTATUS_L1);
+       status &= od->irq_enable_mask;
+       if (status == 0) {
+               spin_unlock(&od->irq_lock);
+               return IRQ_NONE;
+       }
+
+       while ((channel = ffs(status)) != 0) {
+               unsigned mask, csr;
+               struct omap_chan *c;
+
+               channel -= 1;
+               mask = BIT(channel);
+               status &= ~mask;
+
+               c = od->lch_map[channel];
+               if (c == NULL) {
+                       /* This should never happen */
+                       dev_err(od->ddev.dev, "invalid channel %u\n", channel);
+                       continue;
+               }
+
+               csr = omap_dma_get_csr(c);
+               omap_dma_glbl_write(od, IRQSTATUS_L1, mask);
+
+               omap_dma_callback(channel, csr, c);
+       }
+
+       spin_unlock(&od->irq_lock);
+
+       return IRQ_HANDLED;
+}
+
+static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+       struct omap_chan *c = to_omap_dma_chan(chan);
+       struct device *dev = od->ddev.dev;
+       int ret;
+
+       if (od->legacy) {
+               ret = omap_request_dma(c->dma_sig, "DMA engine",
+                                      omap_dma_callback, c, &c->dma_ch);
+       } else {
+               ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL,
+                                      &c->dma_ch);
+       }
+
+       dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig);
+
+       if (ret >= 0) {
+               omap_dma_assign(od, c, c->dma_ch);
+
+               if (!od->legacy) {
+                       unsigned val;
+
+                       spin_lock_irq(&od->irq_lock);
+                       val = BIT(c->dma_ch);
+                       omap_dma_glbl_write(od, IRQSTATUS_L1, val);
+                       od->irq_enable_mask |= val;
+                       omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
+
+                       val = omap_dma_glbl_read(od, IRQENABLE_L0);
+                       val &= ~BIT(c->dma_ch);
+                       omap_dma_glbl_write(od, IRQENABLE_L0, val);
+                       spin_unlock_irq(&od->irq_lock);
+               }
+       }
+
+       if (dma_omap1()) {
+               if (__dma_omap16xx(od->plat->dma_attr)) {
+                       c->ccr = CCR_OMAP31_DISABLE;
+                       /* Duplicate what plat-omap/dma.c does */
+                       c->ccr |= c->dma_ch + 1;
+               } else {
+                       c->ccr = c->dma_sig & 0x1f;
+               }
+       } else {
+               c->ccr = c->dma_sig & 0x1f;
+               c->ccr |= (c->dma_sig & ~0x1f) << 14;
+       }
+       if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
+               c->ccr |= CCR_BUFFERING_DISABLE;
+
+       return ret;
+}
+
+static void omap_dma_free_chan_resources(struct dma_chan *chan)
+{
+       struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+       struct omap_chan *c = to_omap_dma_chan(chan);
+
+       if (!od->legacy) {
+               spin_lock_irq(&od->irq_lock);
+               od->irq_enable_mask &= ~BIT(c->dma_ch);
+               omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
+               spin_unlock_irq(&od->irq_lock);
+       }
+
+       c->channel_base = NULL;
+       od->lch_map[c->dma_ch] = NULL;
+       vchan_free_chan_resources(&c->vc);
+       omap_free_dma(c->dma_ch);
+
+       dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch,
+               c->dma_sig);
+       c->dma_sig = 0;
+}
+
+static size_t omap_dma_sg_size(struct omap_sg *sg)
+{
+       return sg->en * sg->fn;
+}
+
+static size_t omap_dma_desc_size(struct omap_desc *d)
+{
+       unsigned i;
+       size_t size;
+
+       for (size = i = 0; i < d->sglen; i++)
+               size += omap_dma_sg_size(&d->sg[i]);
+
+       return size * es_bytes[d->es];
+}
+
+static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
+{
+       unsigned i;
+       size_t size, es_size = es_bytes[d->es];
+
+       for (size = i = 0; i < d->sglen; i++) {
+               size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
+
+               if (size)
+                       size += this_size;
+               else if (addr >= d->sg[i].addr &&
+                        addr < d->sg[i].addr + this_size)
+                       size += d->sg[i].addr + this_size - addr;
+       }
+       return size;
+}
+
+/*
+ * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
+ * read before the DMA controller finished disabling the channel.
+ */
+static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg)
+{
+       struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
+       uint32_t val;
+
+       val = omap_dma_chan_read(c, reg);
+       if (val == 0 && od->plat->errata & DMA_ERRATA_3_3)
+               val = omap_dma_chan_read(c, reg);
+
+       return val;
+}
+
+static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
+{
+       struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
+       dma_addr_t addr, cdac;
+
+       if (__dma_omap15xx(od->plat->dma_attr)) {
+               addr = omap_dma_chan_read(c, CPC);
+       } else {
+               addr = omap_dma_chan_read_3_3(c, CSAC);
+               cdac = omap_dma_chan_read_3_3(c, CDAC);
+
+               /*
+                * CDAC == 0 indicates that the DMA transfer on the channel has
+                * not been started (no data has been transferred so far).
+                * Return the programmed source start address in this case.
+                */
+               if (cdac == 0)
+                       addr = omap_dma_chan_read(c, CSSA);
+       }
+
+       if (dma_omap1())
+               addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000;
+
+       return addr;
+}
+
+static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
+{
+       struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
+       dma_addr_t addr;
+
+       if (__dma_omap15xx(od->plat->dma_attr)) {
+               addr = omap_dma_chan_read(c, CPC);
+       } else {
+               addr = omap_dma_chan_read_3_3(c, CDAC);
+
+               /*
+                * CDAC == 0 indicates that the DMA transfer on the channel
+                * has not been started (no data has been transferred so
+                * far).  Return the programmed destination start address in
+                * this case.
+                */
+               if (addr == 0)
+                       addr = omap_dma_chan_read(c, CDSA);
+       }
+
+       if (dma_omap1())
+               addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000;
+
+       return addr;
+}
+
+static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
+       dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+       struct omap_chan *c = to_omap_dma_chan(chan);
+       struct virt_dma_desc *vd;
+       enum dma_status ret;
+       unsigned long flags;
+
+       ret = dma_cookie_status(chan, cookie, txstate);
+
+       if (!c->paused && c->running) {
+               uint32_t ccr = omap_dma_chan_read(c, CCR);
+               /*
+                * The channel is no longer active, set the return value
+                * accordingly
+                */
+               if (!(ccr & CCR_ENABLE))
+                       ret = DMA_COMPLETE;
+       }
+
+       if (ret == DMA_COMPLETE || !txstate)
+               return ret;
+
+       spin_lock_irqsave(&c->vc.lock, flags);
+       vd = vchan_find_desc(&c->vc, cookie);
+       if (vd) {
+               txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
+       } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
+               struct omap_desc *d = c->desc;
+               dma_addr_t pos;
+
+               if (d->dir == DMA_MEM_TO_DEV)
+                       pos = omap_dma_get_src_pos(c);
+               else if (d->dir == DMA_DEV_TO_MEM  || d->dir == DMA_MEM_TO_MEM)
+                       pos = omap_dma_get_dst_pos(c);
+               else
+                       pos = 0;
+
+               txstate->residue = omap_dma_desc_size_pos(d, pos);
+       } else {
+               txstate->residue = 0;
+       }
+       if (ret == DMA_IN_PROGRESS && c->paused)
+               ret = DMA_PAUSED;
+       spin_unlock_irqrestore(&c->vc.lock, flags);
+
+       return ret;
+}
+
+static void omap_dma_issue_pending(struct dma_chan *chan)
+{
+       struct omap_chan *c = to_omap_dma_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&c->vc.lock, flags);
+       if (vchan_issue_pending(&c->vc) && !c->desc)
+               omap_dma_start_desc(c);
+       spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
+       struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
+       enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
+{
+       struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+       struct omap_chan *c = to_omap_dma_chan(chan);
+       enum dma_slave_buswidth dev_width;
+       struct scatterlist *sgent;
+       struct omap_desc *d;
+       dma_addr_t dev_addr;
+       unsigned i, es, en, frame_bytes;
+       bool ll_failed = false;
+       u32 burst;
+       u32 port_window, port_window_bytes;
+
+       if (dir == DMA_DEV_TO_MEM) {
+               dev_addr = c->cfg.src_addr;
+               dev_width = c->cfg.src_addr_width;
+               burst = c->cfg.src_maxburst;
+               port_window = c->cfg.src_port_window_size;
+       } else if (dir == DMA_MEM_TO_DEV) {
+               dev_addr = c->cfg.dst_addr;
+               dev_width = c->cfg.dst_addr_width;
+               burst = c->cfg.dst_maxburst;
+               port_window = c->cfg.dst_port_window_size;
+       } else {
+               dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+               return NULL;
+       }
+
+       /* Bus width translates to the element size (ES) */
+       switch (dev_width) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+               es = CSDP_DATA_TYPE_8;
+               break;
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+               es = CSDP_DATA_TYPE_16;
+               break;
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+               es = CSDP_DATA_TYPE_32;
+               break;
+       default: /* not reached */
+               return NULL;
+       }
+
+       /* Now allocate and setup the descriptor. */
+       d = kzalloc(struct_size(d, sg, sglen), GFP_ATOMIC);
+       if (!d)
+               return NULL;
+
+       d->dir = dir;
+       d->dev_addr = dev_addr;
+       d->es = es;
+
+       /* When the port_window is used, one frame must cover the window */
+       if (port_window) {
+               burst = port_window;
+               port_window_bytes = port_window * es_bytes[es];
+
+               d->ei = 1;
+               /*
+                * One frame covers the port_window and by  configure
+                * the source frame index to be -1 * (port_window - 1)
+                * we instruct the sDMA that after a frame is processed
+                * it should move back to the start of the window.
+                */
+               d->fi = -(port_window_bytes - 1);
+       }
+
+       d->ccr = c->ccr | CCR_SYNC_FRAME;
+       if (dir == DMA_DEV_TO_MEM) {
+               d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
+
+               d->ccr |= CCR_DST_AMODE_POSTINC;
+               if (port_window) {
+                       d->ccr |= CCR_SRC_AMODE_DBLIDX;
+
+                       if (port_window_bytes >= 64)
+                               d->csdp |= CSDP_SRC_BURST_64;
+                       else if (port_window_bytes >= 32)
+                               d->csdp |= CSDP_SRC_BURST_32;
+                       else if (port_window_bytes >= 16)
+                               d->csdp |= CSDP_SRC_BURST_16;
+
+               } else {
+                       d->ccr |= CCR_SRC_AMODE_CONSTANT;
+               }
+       } else {
+               d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
+
+               d->ccr |= CCR_SRC_AMODE_POSTINC;
+               if (port_window) {
+                       d->ccr |= CCR_DST_AMODE_DBLIDX;
+
+                       if (port_window_bytes >= 64)
+                               d->csdp |= CSDP_DST_BURST_64;
+                       else if (port_window_bytes >= 32)
+                               d->csdp |= CSDP_DST_BURST_32;
+                       else if (port_window_bytes >= 16)
+                               d->csdp |= CSDP_DST_BURST_16;
+               } else {
+                       d->ccr |= CCR_DST_AMODE_CONSTANT;
+               }
+       }
+
+       d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
+       d->csdp |= es;
+
+       if (dma_omap1()) {
+               d->cicr |= CICR_TOUT_IE;
+
+               if (dir == DMA_DEV_TO_MEM)
+                       d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB;
+               else
+                       d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF;
+       } else {
+               if (dir == DMA_DEV_TO_MEM)
+                       d->ccr |= CCR_TRIGGER_SRC;
+
+               d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
+
+               if (port_window)
+                       d->csdp |= CSDP_WRITE_LAST_NON_POSTED;
+       }
+       if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
+               d->clnk_ctrl = c->dma_ch;
+
+       /*
+        * Build our scatterlist entries: each contains the address,
+        * the number of elements (EN) in each frame, and the number of
+        * frames (FN).  Number of bytes for this entry = ES * EN * FN.
+        *
+        * Burst size translates to number of elements with frame sync.
+        * Note: DMA engine defines burst to be the number of dev-width
+        * transfers.
+        */
+       en = burst;
+       frame_bytes = es_bytes[es] * en;
+
+       if (sglen >= 2)
+               d->using_ll = od->ll123_supported;
+
+       for_each_sg(sgl, sgent, sglen, i) {
+               struct omap_sg *osg = &d->sg[i];
+
+               osg->addr = sg_dma_address(sgent);
+               osg->en = en;
+               osg->fn = sg_dma_len(sgent) / frame_bytes;
+
+               if (d->using_ll) {
+                       osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
+                                                     &osg->t2_desc_paddr);
+                       if (!osg->t2_desc) {
+                               dev_err(chan->device->dev,
+                                       "t2_desc[%d] allocation failed\n", i);
+                               ll_failed = true;
+                               d->using_ll = false;
+                               continue;
+                       }
+
+                       omap_dma_fill_type2_desc(d, i, dir, (i == sglen - 1));
+               }
+       }
+
+       d->sglen = sglen;
+
+       /* Release the dma_pool entries if one allocation failed */
+       if (ll_failed) {
+               for (i = 0; i < d->sglen; i++) {
+                       struct omap_sg *osg = &d->sg[i];
+
+                       if (osg->t2_desc) {
+                               dma_pool_free(od->desc_pool, osg->t2_desc,
+                                             osg->t2_desc_paddr);
+                               osg->t2_desc = NULL;
+                       }
+               }
+       }
+
+       return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
+}
+
+static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
+       struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+       size_t period_len, enum dma_transfer_direction dir, unsigned long flags)
+{
+       struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+       struct omap_chan *c = to_omap_dma_chan(chan);
+       enum dma_slave_buswidth dev_width;
+       struct omap_desc *d;
+       dma_addr_t dev_addr;
+       unsigned es;
+       u32 burst;
+
+       if (dir == DMA_DEV_TO_MEM) {
+               dev_addr = c->cfg.src_addr;
+               dev_width = c->cfg.src_addr_width;
+               burst = c->cfg.src_maxburst;
+       } else if (dir == DMA_MEM_TO_DEV) {
+               dev_addr = c->cfg.dst_addr;
+               dev_width = c->cfg.dst_addr_width;
+               burst = c->cfg.dst_maxburst;
+       } else {
+               dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+               return NULL;
+       }
+
+       /* Bus width translates to the element size (ES) */
+       switch (dev_width) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+               es = CSDP_DATA_TYPE_8;
+               break;
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+               es = CSDP_DATA_TYPE_16;
+               break;
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+               es = CSDP_DATA_TYPE_32;
+               break;
+       default: /* not reached */
+               return NULL;
+       }
+
+       /* Now allocate and setup the descriptor. */
+       d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
+       if (!d)
+               return NULL;
+
+       d->dir = dir;
+       d->dev_addr = dev_addr;
+       d->fi = burst;
+       d->es = es;
+       d->sg[0].addr = buf_addr;
+       d->sg[0].en = period_len / es_bytes[es];
+       d->sg[0].fn = buf_len / period_len;
+       d->sglen = 1;
+
+       d->ccr = c->ccr;
+       if (dir == DMA_DEV_TO_MEM)
+               d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
+       else
+               d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
+
+       d->cicr = CICR_DROP_IE;
+       if (flags & DMA_PREP_INTERRUPT)
+               d->cicr |= CICR_FRAME_IE;
+
+       d->csdp = es;
+
+       if (dma_omap1()) {
+               d->cicr |= CICR_TOUT_IE;
+
+               if (dir == DMA_DEV_TO_MEM)
+                       d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI;
+               else
+                       d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF;
+       } else {
+               if (burst)
+                       d->ccr |= CCR_SYNC_PACKET;
+               else
+                       d->ccr |= CCR_SYNC_ELEMENT;
+
+               if (dir == DMA_DEV_TO_MEM) {
+                       d->ccr |= CCR_TRIGGER_SRC;
+                       d->csdp |= CSDP_DST_PACKED;
+               } else {
+                       d->csdp |= CSDP_SRC_PACKED;
+               }
+
+               d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
+
+               d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
+       }
+
+       if (__dma_omap15xx(od->plat->dma_attr))
+               d->ccr |= CCR_AUTO_INIT | CCR_REPEAT;
+       else
+               d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK;
+
+       c->cyclic = true;
+
+       return vchan_tx_prep(&c->vc, &d->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
+       struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+       size_t len, unsigned long tx_flags)
+{
+       struct omap_chan *c = to_omap_dma_chan(chan);
+       struct omap_desc *d;
+       uint8_t data_type;
+
+       d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
+       if (!d)
+               return NULL;
+
+       data_type = __ffs((src | dest | len));
+       if (data_type > CSDP_DATA_TYPE_32)
+               data_type = CSDP_DATA_TYPE_32;
+
+       d->dir = DMA_MEM_TO_MEM;
+       d->dev_addr = src;
+       d->fi = 0;
+       d->es = data_type;
+       d->sg[0].en = len / BIT(data_type);
+       d->sg[0].fn = 1;
+       d->sg[0].addr = dest;
+       d->sglen = 1;
+       d->ccr = c->ccr;
+       d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
+
+       d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
+
+       d->csdp = data_type;
+
+       if (dma_omap1()) {
+               d->cicr |= CICR_TOUT_IE;
+               d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
+       } else {
+               d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
+               d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
+               d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
+       }
+
+       return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
+}
+
+static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
+       struct dma_chan *chan, struct dma_interleaved_template *xt,
+       unsigned long flags)
+{
+       struct omap_chan *c = to_omap_dma_chan(chan);
+       struct omap_desc *d;
+       struct omap_sg *sg;
+       uint8_t data_type;
+       size_t src_icg, dst_icg;
+
+       /* Slave mode is not supported */
+       if (is_slave_direction(xt->dir))
+               return NULL;
+
+       if (xt->frame_size != 1 || xt->numf == 0)
+               return NULL;
+
+       d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
+       if (!d)
+               return NULL;
+
+       data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size));
+       if (data_type > CSDP_DATA_TYPE_32)
+               data_type = CSDP_DATA_TYPE_32;
+
+       sg = &d->sg[0];
+       d->dir = DMA_MEM_TO_MEM;
+       d->dev_addr = xt->src_start;
+       d->es = data_type;
+       sg->en = xt->sgl[0].size / BIT(data_type);
+       sg->fn = xt->numf;
+       sg->addr = xt->dst_start;
+       d->sglen = 1;
+       d->ccr = c->ccr;
+
+       src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
+       dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
+       if (src_icg) {
+               d->ccr |= CCR_SRC_AMODE_DBLIDX;
+               d->ei = 1;
+               d->fi = src_icg;
+       } else if (xt->src_inc) {
+               d->ccr |= CCR_SRC_AMODE_POSTINC;
+               d->fi = 0;
+       } else {
+               dev_err(chan->device->dev,
+                       "%s: SRC constant addressing is not supported\n",
+                       __func__);
+               kfree(d);
+               return NULL;
+       }
+
+       if (dst_icg) {
+               d->ccr |= CCR_DST_AMODE_DBLIDX;
+               sg->ei = 1;
+               sg->fi = dst_icg;
+       } else if (xt->dst_inc) {
+               d->ccr |= CCR_DST_AMODE_POSTINC;
+               sg->fi = 0;
+       } else {
+               dev_err(chan->device->dev,
+                       "%s: DST constant addressing is not supported\n",
+                       __func__);
+               kfree(d);
+               return NULL;
+       }
+
+       d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
+
+       d->csdp = data_type;
+
+       if (dma_omap1()) {
+               d->cicr |= CICR_TOUT_IE;
+               d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
+       } else {
+               d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
+               d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
+               d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
+       }
+
+       return vchan_tx_prep(&c->vc, &d->vd, flags);
+}
+
+static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
+{
+       struct omap_chan *c = to_omap_dma_chan(chan);
+
+       if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+           cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+               return -EINVAL;
+
+       if (cfg->src_maxburst > chan->device->max_burst ||
+           cfg->dst_maxburst > chan->device->max_burst)
+               return -EINVAL;
+
+       memcpy(&c->cfg, cfg, sizeof(c->cfg));
+
+       return 0;
+}
+
+static int omap_dma_terminate_all(struct dma_chan *chan)
+{
+       struct omap_chan *c = to_omap_dma_chan(chan);
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&c->vc.lock, flags);
+
+       /*
+        * Stop DMA activity: we assume the callback will not be called
+        * after omap_dma_stop() returns (even if it does, it will see
+        * c->desc is NULL and exit.)
+        */
+       if (c->desc) {
+               vchan_terminate_vdesc(&c->desc->vd);
+               c->desc = NULL;
+               /* Avoid stopping the dma twice */
+               if (!c->paused)
+                       omap_dma_stop(c);
+       }
+
+       c->cyclic = false;
+       c->paused = false;
+
+       vchan_get_all_descriptors(&c->vc, &head);
+       spin_unlock_irqrestore(&c->vc.lock, flags);
+       vchan_dma_desc_free_list(&c->vc, &head);
+
+       return 0;
+}
+
+static void omap_dma_synchronize(struct dma_chan *chan)
+{
+       struct omap_chan *c = to_omap_dma_chan(chan);
+
+       vchan_synchronize(&c->vc);
+}
+
+static int omap_dma_pause(struct dma_chan *chan)
+{
+       struct omap_chan *c = to_omap_dma_chan(chan);
+       struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+       unsigned long flags;
+       int ret = -EINVAL;
+       bool can_pause = false;
+
+       spin_lock_irqsave(&od->irq_lock, flags);
+
+       if (!c->desc)
+               goto out;
+
+       if (c->cyclic)
+               can_pause = true;
+
+       /*
+        * We do not allow DMA_MEM_TO_DEV transfers to be paused.
+        * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
+        * "When a channel is disabled during a transfer, the channel undergoes
+        * an abort, unless it is hardware-source-synchronized …".
+        * A source-synchronised channel is one where the fetching of data is
+        * under control of the device. In other words, a device-to-memory
+        * transfer. So, a destination-synchronised channel (which would be a
+        * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
+        * bit is cleared.
+        * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
+        * aborts immediately after completion of current read/write
+        * transactions and then the FIFO is cleaned up." The term "cleaned up"
+        * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
+        * are both clear _before_ disabling the channel, otherwise data loss
+        * will occur.
+        * The problem is that if the channel is active, then device activity
+        * can result in DMA activity starting between reading those as both
+        * clear and the write to DMA_CCR to clear the enable bit hitting the
+        * hardware. If the DMA hardware can't drain the data in its FIFO to the
+        * destination, then data loss "might" occur (say if we write to an UART
+        * and the UART is not accepting any further data).
+        */
+       else if (c->desc->dir == DMA_DEV_TO_MEM)
+               can_pause = true;
+
+       if (can_pause && !c->paused) {
+               ret = omap_dma_stop(c);
+               if (!ret)
+                       c->paused = true;
+       }
+out:
+       spin_unlock_irqrestore(&od->irq_lock, flags);
+
+       return ret;
+}
+
+static int omap_dma_resume(struct dma_chan *chan)
+{
+       struct omap_chan *c = to_omap_dma_chan(chan);
+       struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+       unsigned long flags;
+       int ret = -EINVAL;
+
+       spin_lock_irqsave(&od->irq_lock, flags);
+
+       if (c->paused && c->desc) {
+               mb();
+
+               /* Restore channel link register */
+               omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl);
+
+               omap_dma_start(c, c->desc);
+               c->paused = false;
+               ret = 0;
+       }
+       spin_unlock_irqrestore(&od->irq_lock, flags);
+
+       return ret;
+}
+
+static int omap_dma_chan_init(struct omap_dmadev *od)
+{
+       struct omap_chan *c;
+
+       c = kzalloc(sizeof(*c), GFP_KERNEL);
+       if (!c)
+               return -ENOMEM;
+
+       c->reg_map = od->reg_map;
+       c->vc.desc_free = omap_dma_desc_free;
+       vchan_init(&c->vc, &od->ddev);
+
+       return 0;
+}
+
+static void omap_dma_free(struct omap_dmadev *od)
+{
+       while (!list_empty(&od->ddev.channels)) {
+               struct omap_chan *c = list_first_entry(&od->ddev.channels,
+                       struct omap_chan, vc.chan.device_node);
+
+               list_del(&c->vc.chan.device_node);
+               tasklet_kill(&c->vc.task);
+               kfree(c);
+       }
+}
+
+#define OMAP_DMA_BUSWIDTHS     (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+static int omap_dma_probe(struct platform_device *pdev)
+{
+       struct omap_dmadev *od;
+       struct resource *res;
+       int rc, i, irq;
+       u32 lch_count;
+
+       od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
+       if (!od)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       od->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(od->base))
+               return PTR_ERR(od->base);
+
+       od->plat = omap_get_plat_info();
+       if (!od->plat)
+               return -EPROBE_DEFER;
+
+       od->reg_map = od->plat->reg_map;
+
+       dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
+       dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
+       dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
+       dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask);
+       od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
+       od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
+       od->ddev.device_tx_status = omap_dma_tx_status;
+       od->ddev.device_issue_pending = omap_dma_issue_pending;
+       od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
+       od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
+       od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy;
+       od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved;
+       od->ddev.device_config = omap_dma_slave_config;
+       od->ddev.device_pause = omap_dma_pause;
+       od->ddev.device_resume = omap_dma_resume;
+       od->ddev.device_terminate_all = omap_dma_terminate_all;
+       od->ddev.device_synchronize = omap_dma_synchronize;
+       od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
+       od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
+       od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+       od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
+       od->ddev.dev = &pdev->dev;
+       INIT_LIST_HEAD(&od->ddev.channels);
+       spin_lock_init(&od->lock);
+       spin_lock_init(&od->irq_lock);
+
+       /* Number of DMA requests */
+       od->dma_requests = OMAP_SDMA_REQUESTS;
+       if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
+                                                     "dma-requests",
+                                                     &od->dma_requests)) {
+               dev_info(&pdev->dev,
+                        "Missing dma-requests property, using %u.\n",
+                        OMAP_SDMA_REQUESTS);
+       }
+
+       /* Number of available logical channels */
+       if (!pdev->dev.of_node) {
+               lch_count = od->plat->dma_attr->lch_count;
+               if (unlikely(!lch_count))
+                       lch_count = OMAP_SDMA_CHANNELS;
+       } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
+                                       &lch_count)) {
+               dev_info(&pdev->dev,
+                        "Missing dma-channels property, using %u.\n",
+                        OMAP_SDMA_CHANNELS);
+               lch_count = OMAP_SDMA_CHANNELS;
+       }
+
+       od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map),
+                                  GFP_KERNEL);
+       if (!od->lch_map)
+               return -ENOMEM;
+
+       for (i = 0; i < od->dma_requests; i++) {
+               rc = omap_dma_chan_init(od);
+               if (rc) {
+                       omap_dma_free(od);
+                       return rc;
+               }
+       }
+
+       irq = platform_get_irq(pdev, 1);
+       if (irq <= 0) {
+               dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq);
+               od->legacy = true;
+       } else {
+               /* Disable all interrupts */
+               od->irq_enable_mask = 0;
+               omap_dma_glbl_write(od, IRQENABLE_L1, 0);
+
+               rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
+                                     IRQF_SHARED, "omap-dma-engine", od);
+               if (rc)
+                       return rc;
+       }
+
+       if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
+               od->ll123_supported = true;
+
+       od->ddev.filter.map = od->plat->slave_map;
+       od->ddev.filter.mapcnt = od->plat->slavecnt;
+       od->ddev.filter.fn = omap_dma_filter_fn;
+
+       if (od->ll123_supported) {
+               od->desc_pool = dma_pool_create(dev_name(&pdev->dev),
+                                               &pdev->dev,
+                                               sizeof(struct omap_type2_desc),
+                                               4, 0);
+               if (!od->desc_pool) {
+                       dev_err(&pdev->dev,
+                               "unable to allocate descriptor pool\n");
+                       od->ll123_supported = false;
+               }
+       }
+
+       rc = dma_async_device_register(&od->ddev);
+       if (rc) {
+               pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
+                       rc);
+               omap_dma_free(od);
+               return rc;
+       }
+
+       platform_set_drvdata(pdev, od);
+
+       if (pdev->dev.of_node) {
+               omap_dma_info.dma_cap = od->ddev.cap_mask;
+
+               /* Device-tree DMA controller registration */
+               rc = of_dma_controller_register(pdev->dev.of_node,
+                               of_dma_simple_xlate, &omap_dma_info);
+               if (rc) {
+                       pr_warn("OMAP-DMA: failed to register DMA controller\n");
+                       dma_async_device_unregister(&od->ddev);
+                       omap_dma_free(od);
+               }
+       }
+
+       dev_info(&pdev->dev, "OMAP DMA engine driver%s\n",
+                od->ll123_supported ? " (LinkedList1/2/3 supported)" : "");
+
+       return rc;
+}
+
+static int omap_dma_remove(struct platform_device *pdev)
+{
+       struct omap_dmadev *od = platform_get_drvdata(pdev);
+       int irq;
+
+       if (pdev->dev.of_node)
+               of_dma_controller_free(pdev->dev.of_node);
+
+       irq = platform_get_irq(pdev, 1);
+       devm_free_irq(&pdev->dev, irq, od);
+
+       dma_async_device_unregister(&od->ddev);
+
+       if (!od->legacy) {
+               /* Disable all interrupts */
+               omap_dma_glbl_write(od, IRQENABLE_L0, 0);
+       }
+
+       if (od->ll123_supported)
+               dma_pool_destroy(od->desc_pool);
+
+       omap_dma_free(od);
+
+       return 0;
+}
+
+static const struct of_device_id omap_dma_match[] = {
+       { .compatible = "ti,omap2420-sdma", },
+       { .compatible = "ti,omap2430-sdma", },
+       { .compatible = "ti,omap3430-sdma", },
+       { .compatible = "ti,omap3630-sdma", },
+       { .compatible = "ti,omap4430-sdma", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, omap_dma_match);
+
+static struct platform_driver omap_dma_driver = {
+       .probe  = omap_dma_probe,
+       .remove = omap_dma_remove,
+       .driver = {
+               .name = "omap-dma-engine",
+               .of_match_table = of_match_ptr(omap_dma_match),
+       },
+};
+
+bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+       if (chan->device->dev->driver == &omap_dma_driver.driver) {
+               struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+               struct omap_chan *c = to_omap_dma_chan(chan);
+               unsigned req = *(unsigned *)param;
+
+               if (req <= od->dma_requests) {
+                       c->dma_sig = req;
+                       return true;
+               }
+       }
+       return false;
+}
+EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
+
+static int omap_dma_init(void)
+{
+       return platform_driver_register(&omap_dma_driver);
+}
+subsys_initcall(omap_dma_init);
+
+static void __exit omap_dma_exit(void)
+{
+       platform_driver_unregister(&omap_dma_driver);
+}
+module_exit(omap_dma_exit);
+
+MODULE_AUTHOR("Russell King");
+MODULE_LICENSE("GPL");
index 4d8c7b9078fd7864f36a9ec605b43652e6267e4b..eb45af71d3a343c08fa9f6c8f6db3336b45d56db 100644 (file)
@@ -1244,8 +1244,7 @@ static void txx9dmac_shutdown(struct platform_device *pdev)
 
 static int txx9dmac_suspend_noirq(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
+       struct txx9dmac_dev *ddev = dev_get_drvdata(dev);
 
        txx9dmac_off(ddev);
        return 0;
@@ -1253,9 +1252,8 @@ static int txx9dmac_suspend_noirq(struct device *dev)
 
 static int txx9dmac_resume_noirq(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
-       struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
+       struct txx9dmac_dev *ddev = dev_get_drvdata(dev);
+       struct txx9dmac_platform_data *pdata = dev_get_platdata(dev);
        u32 mcr;
 
        mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h
new file mode 100644 (file)
index 0000000..b0115e3
--- /dev/null
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _SPRD_DMA_H_
+#define _SPRD_DMA_H_
+
+#define SPRD_DMA_REQ_SHIFT 16
+#define SPRD_DMA_FLAGS(req_mode, int_type) \
+       ((req_mode) << SPRD_DMA_REQ_SHIFT | (int_type))
+
+/*
+ * enum sprd_dma_req_mode: define the DMA request mode
+ * @SPRD_DMA_FRAG_REQ: fragment request mode
+ * @SPRD_DMA_BLK_REQ: block request mode
+ * @SPRD_DMA_TRANS_REQ: transaction request mode
+ * @SPRD_DMA_LIST_REQ: link-list request mode
+ *
+ * We have 4 types request mode: fragment mode, block mode, transaction mode
+ * and linklist mode. One transaction can contain several blocks, one block can
+ * contain several fragments. Link-list mode means we can save several DMA
+ * configuration into one reserved memory, then DMA can fetch each DMA
+ * configuration automatically to start transfer.
+ */
+enum sprd_dma_req_mode {
+       SPRD_DMA_FRAG_REQ,
+       SPRD_DMA_BLK_REQ,
+       SPRD_DMA_TRANS_REQ,
+       SPRD_DMA_LIST_REQ,
+};
+
+/*
+ * enum sprd_dma_int_type: define the DMA interrupt type
+ * @SPRD_DMA_NO_INT: do not need generate DMA interrupts.
+ * @SPRD_DMA_FRAG_INT: fragment done interrupt when one fragment request
+ * is done.
+ * @SPRD_DMA_BLK_INT: block done interrupt when one block request is done.
+ * @SPRD_DMA_BLK_FRAG_INT: block and fragment interrupt when one fragment
+ * or one block request is done.
+ * @SPRD_DMA_TRANS_INT: tansaction done interrupt when one transaction
+ * request is done.
+ * @SPRD_DMA_TRANS_FRAG_INT: transaction and fragment interrupt when one
+ * transaction request or fragment request is done.
+ * @SPRD_DMA_TRANS_BLK_INT: transaction and block interrupt when one
+ * transaction request or block request is done.
+ * @SPRD_DMA_LIST_INT: link-list done interrupt when one link-list request
+ * is done.
+ * @SPRD_DMA_CFGERR_INT: configure error interrupt when configuration is
+ * incorrect.
+ */
+enum sprd_dma_int_type {
+       SPRD_DMA_NO_INT,
+       SPRD_DMA_FRAG_INT,
+       SPRD_DMA_BLK_INT,
+       SPRD_DMA_BLK_FRAG_INT,
+       SPRD_DMA_TRANS_INT,
+       SPRD_DMA_TRANS_FRAG_INT,
+       SPRD_DMA_TRANS_BLK_INT,
+       SPRD_DMA_LIST_INT,
+       SPRD_DMA_CFGERR_INT,
+};
+
+#endif