Merge tag 'dmaengine-4.17-rc1' of git://git.infradead.org/users/vkoul/slave-dma
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 10 Apr 2018 19:14:37 +0000 (12:14 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 10 Apr 2018 19:14:37 +0000 (12:14 -0700)
Pull dmaengine updates from Vinod Koul:
 "This time we have couple of new drivers along with updates to drivers:

   - new drivers for the DesignWare AXI DMAC and MediaTek High-Speed DMA
     controllers

   - stm32 dma and qcom bam dma driver updates

   - norandom test option for dmatest"

* tag 'dmaengine-4.17-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (30 commits)
  dmaengine: stm32-dma: properly mask irq bits
  dmaengine: stm32-dma: fix max items per transfer
  dmaengine: stm32-dma: fix DMA IRQ status handling
  dmaengine: stm32-dma: Improve memory burst management
  dmaengine: stm32-dma: fix typo and reported checkpatch warnings
  dmaengine: stm32-dma: fix incomplete configuration in cyclic mode
  dmaengine: stm32-dma: threshold manages with bitfield feature
  dt-bindings: stm32-dma: introduce DMA features bitfield
  dt-bindings: rcar-dmac: Document r8a77470 support
  dmaengine: rcar-dmac: Fix too early/late system suspend/resume callbacks
  dmaengine: dw-axi-dmac: fix spelling mistake: "catched" -> "caught"
  dmaengine: edma: Check the memory allocation for the memcpy dma device
  dmaengine: at_xdmac: fix rare residue corruption
  dmaengine: mediatek: update MAINTAINERS entry with MediaTek DMA driver
  dmaengine: mediatek: Add MediaTek High-Speed DMA controller for MT7622 and MT7623 SoC
  dt-bindings: dmaengine: Add MediaTek High-Speed DMA controller bindings
  dt-bindings: Document the Synopsys DW AXI DMA bindings
  dmaengine: Introduce DW AXI DMAC driver
  dmaengine: pl330: fix a race condition in case of threaded irqs
  dmaengine: imx-sdma: fix pagefault when channel is disabled during interrupt
  ...

24 files changed:
Documentation/devicetree/bindings/dma/mtk-hsdma.txt [new file with mode: 0644]
Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt [new file with mode: 0644]
Documentation/devicetree/bindings/dma/stm32-dma.txt
MAINTAINERS
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/at_xdmac.c
drivers/dma/dmatest.c
drivers/dma/dw-axi-dmac/Makefile [new file with mode: 0644]
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c [new file with mode: 0644]
drivers/dma/dw-axi-dmac/dw-axi-dmac.h [new file with mode: 0644]
drivers/dma/edma.c
drivers/dma/imx-sdma.c
drivers/dma/mediatek/Kconfig [new file with mode: 0644]
drivers/dma/mediatek/Makefile [new file with mode: 0644]
drivers/dma/mediatek/mtk-hsdma.c [new file with mode: 0644]
drivers/dma/pl330.c
drivers/dma/qcom/bam_dma.c
drivers/dma/sh/rcar-dmac.c
drivers/dma/stm32-dma.c
include/linux/dmaengine.h

diff --git a/Documentation/devicetree/bindings/dma/mtk-hsdma.txt b/Documentation/devicetree/bindings/dma/mtk-hsdma.txt
new file mode 100644 (file)
index 0000000..4bb3173
--- /dev/null
@@ -0,0 +1,33 @@
+MediaTek High-Speed DMA Controller
+==================================
+
+This device follows the generic DMA bindings defined in dma/dma.txt.
+
+Required properties:
+
+- compatible:  Must be one of
+                 "mediatek,mt7622-hsdma": for MT7622 SoC
+                 "mediatek,mt7623-hsdma": for MT7623 SoC
+- reg:         Should contain the register's base address and length.
+- interrupts:  Should contain a reference to the interrupt used by this
+               device.
+- clocks:      Should be the clock specifiers corresponding to the entry in
+               clock-names property.
+- clock-names: Should contain "hsdma" entries.
+- power-domains: Phandle to the power domain that the device is part of
+- #dma-cells:  The length of the DMA specifier, must be <1>. This one cell
+               in dmas property of a client device represents the channel
+               number.
+Example:
+
+        hsdma: dma-controller@1b007000 {
+               compatible = "mediatek,mt7623-hsdma";
+               reg = <0 0x1b007000 0 0x1000>;
+               interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_LOW>;
+               clocks = <&ethsys CLK_ETHSYS_HSDMA>;
+               clock-names = "hsdma";
+               power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
+               #dma-cells = <1>;
+       };
+
+DMA clients must use the format described in dma/dma.txt file.
index 9cbf5d9df8fd00d8e2101fd1d3fc22933410a244..cf5b9e44432c62b3d2b451e142dcc67db2ac11ec 100644 (file)
@@ -15,6 +15,10 @@ Required properties:
   the secure world.
 - qcom,controlled-remotely : optional, indicates that the bam is controlled by
   remote proccessor i.e. execution environment.
+- num-channels : optional, indicates supported number of DMA channels in a
+  remotely controlled bam.
+- qcom,num-ees : optional, indicates supported number of Execution Environments
+  in a remotely controlled bam.
 
 Example:
 
index 891db41e94201ce69944133854eaedc97b9e672a..aadfb236d53abdd12fc69cf36cd1940303441deb 100644 (file)
@@ -18,6 +18,7 @@ Required Properties:
              Examples with soctypes are:
                - "renesas,dmac-r8a7743" (RZ/G1M)
                - "renesas,dmac-r8a7745" (RZ/G1E)
+               - "renesas,dmac-r8a77470" (RZ/G1C)
                - "renesas,dmac-r8a7790" (R-Car H2)
                - "renesas,dmac-r8a7791" (R-Car M2-W)
                - "renesas,dmac-r8a7792" (R-Car V2H)
@@ -26,6 +27,7 @@ Required Properties:
                - "renesas,dmac-r8a7795" (R-Car H3)
                - "renesas,dmac-r8a7796" (R-Car M3-W)
                - "renesas,dmac-r8a77970" (R-Car V3M)
+               - "renesas,dmac-r8a77980" (R-Car V3H)
 
 - reg: base address and length of the registers block for the DMAC
 
index f3d1f151ba80a45e40e0c1d97535ad8e20af296e..9dc935e24e558f4f6edaa166a29fbd038bc66d48 100644 (file)
@@ -11,6 +11,7 @@ Required Properties:
          - "renesas,r8a7794-usb-dmac" (R-Car E2)
          - "renesas,r8a7795-usb-dmac" (R-Car H3)
          - "renesas,r8a7796-usb-dmac" (R-Car M3-W)
+         - "renesas,r8a77965-usb-dmac" (R-Car M3-N)
 - reg: base address and length of the registers block for the DMAC
 - interrupts: interrupt specifiers for the DMAC, one for each entry in
   interrupt-names.
diff --git a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt
new file mode 100644 (file)
index 0000000..f237b79
--- /dev/null
@@ -0,0 +1,41 @@
+Synopsys DesignWare AXI DMA Controller
+
+Required properties:
+- compatible: "snps,axi-dma-1.01a"
+- reg: Address range of the DMAC registers. This should include
+  all of the per-channel registers.
+- interrupt: Should contain the DMAC interrupt number.
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device.
+- dma-channels: Number of channels supported by hardware.
+- snps,dma-masters: Number of AXI masters supported by the hardware.
+- snps,data-width: Maximum AXI data width supported by hardware.
+  (0 - 8bits, 1 - 16bits, 2 - 32bits, ..., 6 - 512bits)
+- snps,priority: Priority of channel. Array size is equal to the number of
+  dma-channels. Priority value must be programmed within [0:dma-channels-1]
+  range. (0 - minimum priority)
+- snps,block-size: Maximum block size supported by the controller channel.
+  Array size is equal to the number of dma-channels.
+
+Optional properties:
+- snps,axi-max-burst-len: Restrict master AXI burst length by value specified
+  in this property. If this property is missing the maximum AXI burst length
+  supported by DMAC is used. [1:256]
+
+Example:
+
+dmac: dma-controller@80000 {
+       compatible = "snps,axi-dma-1.01a";
+       reg = <0x80000 0x400>;
+       clocks = <&core_clk>, <&cfgr_clk>;
+       clock-names = "core-clk", "cfgr-clk";
+       interrupt-parent = <&intc>;
+       interrupts = <27>;
+
+       dma-channels = <4>;
+       snps,dma-masters = <2>;
+       snps,data-width = <3>;
+       snps,block-size = <4096 4096 4096 4096>;
+       snps,priority = <0 1 2 3>;
+       snps,axi-max-burst-len = <16>;
+};
index 0b55718bf88993a83eb5091128cc6761b93e5852..c5f519097204f847fee551879c67a9260f71103e 100644 (file)
@@ -62,14 +62,14 @@ channel: a phandle to the DMA controller plus the following four integer cells:
        0x1: medium
        0x2: high
        0x3: very high
-4. A 32bit mask specifying the DMA FIFO threshold configuration which are device
-   dependent:
- -bit 0-1: Fifo threshold
+4. A 32bit bitfield value specifying DMA features which are device dependent:
+ -bit 0-1: DMA FIFO threshold selection
        0x0: 1/4 full FIFO
        0x1: 1/2 full FIFO
        0x2: 3/4 full FIFO
        0x3: full FIFO
 
+
 Example:
 
        usart1: serial@40011000 {
index b7bd40b6b80d0e6046aace8ccb2580eec383ee08..7bb2e9595f149fda418b76febc9263b13ec92888 100644 (file)
@@ -8859,6 +8859,15 @@ M:       Sean Wang <sean.wang@mediatek.com>
 S:     Maintained
 F:     drivers/media/rc/mtk-cir.c
 
+MEDIATEK DMA DRIVER
+M:     Sean Wang <sean.wang@mediatek.com>
+L:     dmaengine@vger.kernel.org
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     linux-mediatek@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+F:     Documentation/devicetree/bindings/dma/mtk-*
+F:     drivers/dma/mediatek/
+
 MEDIATEK PMIC LED DRIVER
 M:     Sean Wang <sean.wang@mediatek.com>
 S:     Maintained
@@ -13482,6 +13491,12 @@ S:     Maintained
 F:     drivers/gpio/gpio-dwapb.c
 F:     Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
 
+SYNOPSYS DESIGNWARE AXI DMAC DRIVER
+M:     Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+S:     Maintained
+F:     drivers/dma/dwi-axi-dmac/
+F:     Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt
+
 SYNOPSYS DESIGNWARE DMAC DRIVER
 M:     Viresh Kumar <vireshk@kernel.org>
 R:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
index 27df3e2837fdec03212085a5dc433307989baea2..6d61cd0236339172b2b1075134db65ed43a2f47d 100644 (file)
@@ -187,6 +187,16 @@ config DMA_SUN6I
        help
          Support for the DMA engine first found in Allwinner A31 SoCs.
 
+config DW_AXI_DMAC
+       tristate "Synopsys DesignWare AXI DMA support"
+       depends on OF || COMPILE_TEST
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       help
+         Enable support for Synopsys DesignWare AXI DMA controller.
+         NOTE: This driver wasn't tested on 64 bit platform because
+         of lack 64 bit platform with Synopsys DW AXI DMAC.
+
 config EP93XX_DMA
        bool "Cirrus Logic EP93xx DMA support"
        depends on ARCH_EP93XX || COMPILE_TEST
@@ -633,6 +643,8 @@ config ZX_DMA
 # driver files
 source "drivers/dma/bestcomm/Kconfig"
 
+source "drivers/dma/mediatek/Kconfig"
+
 source "drivers/dma/qcom/Kconfig"
 
 source "drivers/dma/dw/Kconfig"
index b9dca8a0e142067d01bc302952cc7f962c56ff5a..0f62a4d49aabc91a13e1194df562cfb0d3a88aeb 100644 (file)
@@ -28,6 +28,7 @@ obj-$(CONFIG_DMA_OMAP) += omap-dma.o
 obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
 obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o
 obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
+obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/
 obj-$(CONFIG_DW_DMAC_CORE) += dw/
 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
 obj-$(CONFIG_FSL_DMA) += fsldma.o
@@ -75,5 +76,6 @@ obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
 obj-$(CONFIG_ZX_DMA) += zx_dma.o
 obj-$(CONFIG_ST_FDMA) += st_fdma.o
 
+obj-y += mediatek/
 obj-y += qcom/
 obj-y += xilinx/
index c00e3923d7d81154c7b0157491f270e90dd78fdf..94236ec9d4100fd6f1c0673f6c49e1cfd914d234 100644 (file)
@@ -1471,10 +1471,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
                check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
                rmb();
-               initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
-               rmb();
                cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
                rmb();
+               initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
+               rmb();
                cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
                rmb();
 
index 80cc2be6483cb87074b4a11e205f8b15c7b715e3..b9339524d5bd38859e00e701c062acda5dba69e7 100644 (file)
@@ -74,7 +74,11 @@ MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
 
 static bool noverify;
 module_param(noverify, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(noverify, "Disable random data setup and verification");
+MODULE_PARM_DESC(noverify, "Disable data verification (default: verify)");
+
+static bool norandom;
+module_param(norandom, bool, 0644);
+MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
 
 static bool verbose;
 module_param(verbose, bool, S_IRUGO | S_IWUSR);
@@ -103,6 +107,7 @@ struct dmatest_params {
        unsigned int    pq_sources;
        int             timeout;
        bool            noverify;
+       bool            norandom;
 };
 
 /**
@@ -575,7 +580,7 @@ static int dmatest_func(void *data)
                        break;
                }
 
-               if (params->noverify)
+               if (params->norandom)
                        len = params->buf_size;
                else
                        len = dmatest_random() % params->buf_size + 1;
@@ -586,17 +591,19 @@ static int dmatest_func(void *data)
 
                total_len += len;
 
-               if (params->noverify) {
+               if (params->norandom) {
                        src_off = 0;
                        dst_off = 0;
                } else {
-                       start = ktime_get();
                        src_off = dmatest_random() % (params->buf_size - len + 1);
                        dst_off = dmatest_random() % (params->buf_size - len + 1);
 
                        src_off = (src_off >> align) << align;
                        dst_off = (dst_off >> align) << align;
+               }
 
+               if (!params->noverify) {
+                       start = ktime_get();
                        dmatest_init_srcs(thread->srcs, src_off, len,
                                          params->buf_size, is_memset);
                        dmatest_init_dsts(thread->dsts, dst_off, len,
@@ -975,6 +982,7 @@ static void run_threaded_test(struct dmatest_info *info)
        params->pq_sources = pq_sources;
        params->timeout = timeout;
        params->noverify = noverify;
+       params->norandom = norandom;
 
        request_channels(info, DMA_MEMCPY);
        request_channels(info, DMA_MEMSET);
diff --git a/drivers/dma/dw-axi-dmac/Makefile b/drivers/dma/dw-axi-dmac/Makefile
new file mode 100644 (file)
index 0000000..4bfa462
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac-platform.o
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
new file mode 100644 (file)
index 0000000..c4eb55e
--- /dev/null
@@ -0,0 +1,1008 @@
+// SPDX-License-Identifier:  GPL-2.0
+// (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
+
+/*
+ * Synopsys DesignWare AXI DMA Controller driver.
+ *
+ * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/types.h>
+
+#include "dw-axi-dmac.h"
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+/*
+ * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
+ * master data bus width up to 512 bits (for both AXI master interfaces), but
+ * it depends on IP block configurarion.
+ */
+#define AXI_DMA_BUSWIDTHS                \
+       (DMA_SLAVE_BUSWIDTH_1_BYTE      | \
+       DMA_SLAVE_BUSWIDTH_2_BYTES      | \
+       DMA_SLAVE_BUSWIDTH_4_BYTES      | \
+       DMA_SLAVE_BUSWIDTH_8_BYTES      | \
+       DMA_SLAVE_BUSWIDTH_16_BYTES     | \
+       DMA_SLAVE_BUSWIDTH_32_BYTES     | \
+       DMA_SLAVE_BUSWIDTH_64_BYTES)
+
+static inline void
+axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
+{
+       iowrite32(val, chip->regs + reg);
+}
+
+static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
+{
+       return ioread32(chip->regs + reg);
+}
+
+static inline void
+axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
+{
+       iowrite32(val, chan->chan_regs + reg);
+}
+
+static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
+{
+       return ioread32(chan->chan_regs + reg);
+}
+
+static inline void
+axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
+{
+       /*
+        * We split one 64 bit write for two 32 bit write as some HW doesn't
+        * support 64 bit access.
+        */
+       iowrite32(lower_32_bits(val), chan->chan_regs + reg);
+       iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
+}
+
+static inline void axi_dma_disable(struct axi_dma_chip *chip)
+{
+       u32 val;
+
+       val = axi_dma_ioread32(chip, DMAC_CFG);
+       val &= ~DMAC_EN_MASK;
+       axi_dma_iowrite32(chip, DMAC_CFG, val);
+}
+
+static inline void axi_dma_enable(struct axi_dma_chip *chip)
+{
+       u32 val;
+
+       val = axi_dma_ioread32(chip, DMAC_CFG);
+       val |= DMAC_EN_MASK;
+       axi_dma_iowrite32(chip, DMAC_CFG, val);
+}
+
+static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
+{
+       u32 val;
+
+       val = axi_dma_ioread32(chip, DMAC_CFG);
+       val &= ~INT_EN_MASK;
+       axi_dma_iowrite32(chip, DMAC_CFG, val);
+}
+
+static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
+{
+       u32 val;
+
+       val = axi_dma_ioread32(chip, DMAC_CFG);
+       val |= INT_EN_MASK;
+       axi_dma_iowrite32(chip, DMAC_CFG, val);
+}
+
+static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
+{
+       u32 val;
+
+       if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
+               axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
+       } else {
+               val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
+               val &= ~irq_mask;
+               axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
+       }
+}
+
+static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
+{
+       axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
+}
+
+static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
+{
+       axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
+}
+
+static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
+{
+       axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
+}
+
+static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
+{
+       return axi_chan_ioread32(chan, CH_INTSTATUS);
+}
+
+static inline void axi_chan_disable(struct axi_dma_chan *chan)
+{
+       u32 val;
+
+       val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+       val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
+       val |=   BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
+       axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
+}
+
+static inline void axi_chan_enable(struct axi_dma_chan *chan)
+{
+       u32 val;
+
+       val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+       val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
+              BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
+       axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
+}
+
+static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
+{
+       u32 val;
+
+       val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+
+       return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
+}
+
+static void axi_dma_hw_init(struct axi_dma_chip *chip)
+{
+       u32 i;
+
+       for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
+               axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
+               axi_chan_disable(&chip->dw->chan[i]);
+       }
+}
+
+static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
+                                  dma_addr_t dst, size_t len)
+{
+       u32 max_width = chan->chip->dw->hdata->m_data_width;
+
+       return __ffs(src | dst | len | BIT(max_width));
+}
+
+static inline const char *axi_chan_name(struct axi_dma_chan *chan)
+{
+       return dma_chan_name(&chan->vc.chan);
+}
+
+static struct axi_dma_desc *axi_desc_get(struct axi_dma_chan *chan)
+{
+       struct dw_axi_dma *dw = chan->chip->dw;
+       struct axi_dma_desc *desc;
+       dma_addr_t phys;
+
+       desc = dma_pool_zalloc(dw->desc_pool, GFP_NOWAIT, &phys);
+       if (unlikely(!desc)) {
+               dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
+                       axi_chan_name(chan));
+               return NULL;
+       }
+
+       atomic_inc(&chan->descs_allocated);
+       INIT_LIST_HEAD(&desc->xfer_list);
+       desc->vd.tx.phys = phys;
+       desc->chan = chan;
+
+       return desc;
+}
+
+static void axi_desc_put(struct axi_dma_desc *desc)
+{
+       struct axi_dma_chan *chan = desc->chan;
+       struct dw_axi_dma *dw = chan->chip->dw;
+       struct axi_dma_desc *child, *_next;
+       unsigned int descs_put = 0;
+
+       list_for_each_entry_safe(child, _next, &desc->xfer_list, xfer_list) {
+               list_del(&child->xfer_list);
+               dma_pool_free(dw->desc_pool, child, child->vd.tx.phys);
+               descs_put++;
+       }
+
+       dma_pool_free(dw->desc_pool, desc, desc->vd.tx.phys);
+       descs_put++;
+
+       atomic_sub(descs_put, &chan->descs_allocated);
+       dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
+               axi_chan_name(chan), descs_put,
+               atomic_read(&chan->descs_allocated));
+}
+
+static void vchan_desc_put(struct virt_dma_desc *vdesc)
+{
+       axi_desc_put(vd_to_axi_desc(vdesc));
+}
+
+static enum dma_status
+dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
+                 struct dma_tx_state *txstate)
+{
+       struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
+       enum dma_status ret;
+
+       ret = dma_cookie_status(dchan, cookie, txstate);
+
+       if (chan->is_paused && ret == DMA_IN_PROGRESS)
+               ret = DMA_PAUSED;
+
+       return ret;
+}
+
+static void write_desc_llp(struct axi_dma_desc *desc, dma_addr_t adr)
+{
+       desc->lli.llp = cpu_to_le64(adr);
+}
+
+static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
+{
+       axi_chan_iowrite64(chan, CH_LLP, adr);
+}
+
+/* Called in chan locked context */
+static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
+                                     struct axi_dma_desc *first)
+{
+       u32 priority = chan->chip->dw->hdata->priority[chan->id];
+       u32 reg, irq_mask;
+       u8 lms = 0; /* Select AXI0 master for LLI fetching */
+
+       if (unlikely(axi_chan_is_hw_enable(chan))) {
+               dev_err(chan2dev(chan), "%s is non-idle!\n",
+                       axi_chan_name(chan));
+
+               return;
+       }
+
+       axi_dma_enable(chan->chip);
+
+       reg = (DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_DST_MULTBLK_TYPE_POS |
+              DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
+       axi_chan_iowrite32(chan, CH_CFG_L, reg);
+
+       reg = (DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC << CH_CFG_H_TT_FC_POS |
+              priority << CH_CFG_H_PRIORITY_POS |
+              DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_DST_POS |
+              DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS);
+       axi_chan_iowrite32(chan, CH_CFG_H, reg);
+
+       write_chan_llp(chan, first->vd.tx.phys | lms);
+
+       irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
+       axi_chan_irq_sig_set(chan, irq_mask);
+
+       /* Generate 'suspend' status but don't generate interrupt */
+       irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
+       axi_chan_irq_set(chan, irq_mask);
+
+       axi_chan_enable(chan);
+}
+
+static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
+{
+       struct axi_dma_desc *desc;
+       struct virt_dma_desc *vd;
+
+       vd = vchan_next_desc(&chan->vc);
+       if (!vd)
+               return;
+
+       desc = vd_to_axi_desc(vd);
+       dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
+               vd->tx.cookie);
+       axi_chan_block_xfer_start(chan, desc);
+}
+
+static void dma_chan_issue_pending(struct dma_chan *dchan)
+{
+       struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->vc.lock, flags);
+       if (vchan_issue_pending(&chan->vc))
+               axi_chan_start_first_queued(chan);
+       spin_unlock_irqrestore(&chan->vc.lock, flags);
+}
+
+static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
+{
+       struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
+
+       /* ASSERT: channel is idle */
+       if (axi_chan_is_hw_enable(chan)) {
+               dev_err(chan2dev(chan), "%s is non-idle!\n",
+                       axi_chan_name(chan));
+               return -EBUSY;
+       }
+
+       dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
+
+       pm_runtime_get(chan->chip->dev);
+
+       return 0;
+}
+
+static void dma_chan_free_chan_resources(struct dma_chan *dchan)
+{
+       struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
+
+       /* ASSERT: channel is idle */
+       if (axi_chan_is_hw_enable(chan))
+               dev_err(dchan2dev(dchan), "%s is non-idle!\n",
+                       axi_chan_name(chan));
+
+       axi_chan_disable(chan);
+       axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
+
+       vchan_free_chan_resources(&chan->vc);
+
+       dev_vdbg(dchan2dev(dchan),
+                "%s: free resources, descriptor still allocated: %u\n",
+                axi_chan_name(chan), atomic_read(&chan->descs_allocated));
+
+       pm_runtime_put(chan->chip->dev);
+}
+
+/*
+ * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
+ * as 1, it understands that the current block is the final block in the
+ * transfer and completes the DMA transfer operation at the end of current
+ * block transfer.
+ */
+static void set_desc_last(struct axi_dma_desc *desc)
+{
+       u32 val;
+
+       val = le32_to_cpu(desc->lli.ctl_hi);
+       val |= CH_CTL_H_LLI_LAST;
+       desc->lli.ctl_hi = cpu_to_le32(val);
+}
+
+static void write_desc_sar(struct axi_dma_desc *desc, dma_addr_t adr)
+{
+       desc->lli.sar = cpu_to_le64(adr);
+}
+
+static void write_desc_dar(struct axi_dma_desc *desc, dma_addr_t adr)
+{
+       desc->lli.dar = cpu_to_le64(adr);
+}
+
+static void set_desc_src_master(struct axi_dma_desc *desc)
+{
+       u32 val;
+
+       /* Select AXI0 for source master */
+       val = le32_to_cpu(desc->lli.ctl_lo);
+       val &= ~CH_CTL_L_SRC_MAST;
+       desc->lli.ctl_lo = cpu_to_le32(val);
+}
+
+static void set_desc_dest_master(struct axi_dma_desc *desc)
+{
+       u32 val;
+
+       /* Select AXI1 for source master if available */
+       val = le32_to_cpu(desc->lli.ctl_lo);
+       if (desc->chan->chip->dw->hdata->nr_masters > 1)
+               val |= CH_CTL_L_DST_MAST;
+       else
+               val &= ~CH_CTL_L_DST_MAST;
+
+       desc->lli.ctl_lo = cpu_to_le32(val);
+}
+
+static struct dma_async_tx_descriptor *
+dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
+                        dma_addr_t src_adr, size_t len, unsigned long flags)
+{
+       struct axi_dma_desc *first = NULL, *desc = NULL, *prev = NULL;
+       struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
+       size_t block_ts, max_block_ts, xfer_len;
+       u32 xfer_width, reg;
+       u8 lms = 0; /* Select AXI0 master for LLI fetching */
+
+       dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
+               axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
+
+       max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
+
+       while (len) {
+               xfer_len = len;
+
+               /*
+                * Take care for the alignment.
+                * Actually source and destination widths can be different, but
+                * make them same to be simpler.
+                */
+               xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);
+
+               /*
+                * block_ts indicates the total number of data of width
+                * to be transferred in a DMA block transfer.
+                * BLOCK_TS register should be set to block_ts - 1
+                */
+               block_ts = xfer_len >> xfer_width;
+               if (block_ts > max_block_ts) {
+                       block_ts = max_block_ts;
+                       xfer_len = max_block_ts << xfer_width;
+               }
+
+               desc = axi_desc_get(chan);
+               if (unlikely(!desc))
+                       goto err_desc_get;
+
+               write_desc_sar(desc, src_adr);
+               write_desc_dar(desc, dst_adr);
+               desc->lli.block_ts_lo = cpu_to_le32(block_ts - 1);
+
+               reg = CH_CTL_H_LLI_VALID;
+               if (chan->chip->dw->hdata->restrict_axi_burst_len) {
+                       u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
+
+                       reg |= (CH_CTL_H_ARLEN_EN |
+                               burst_len << CH_CTL_H_ARLEN_POS |
+                               CH_CTL_H_AWLEN_EN |
+                               burst_len << CH_CTL_H_AWLEN_POS);
+               }
+               desc->lli.ctl_hi = cpu_to_le32(reg);
+
+               reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
+                      DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
+                      xfer_width << CH_CTL_L_DST_WIDTH_POS |
+                      xfer_width << CH_CTL_L_SRC_WIDTH_POS |
+                      DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
+                      DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
+               desc->lli.ctl_lo = cpu_to_le32(reg);
+
+               set_desc_src_master(desc);
+               set_desc_dest_master(desc);
+
+               /* Manage transfer list (xfer_list) */
+               if (!first) {
+                       first = desc;
+               } else {
+                       list_add_tail(&desc->xfer_list, &first->xfer_list);
+                       write_desc_llp(prev, desc->vd.tx.phys | lms);
+               }
+               prev = desc;
+
+               /* update the length and addresses for the next loop cycle */
+               len -= xfer_len;
+               dst_adr += xfer_len;
+               src_adr += xfer_len;
+       }
+
+       /* Total len of src/dest sg == 0, so no descriptor were allocated */
+       if (unlikely(!first))
+               return NULL;
+
+       /* Set end-of-link to the last link descriptor of list */
+       set_desc_last(desc);
+
+       return vchan_tx_prep(&chan->vc, &first->vd, flags);
+
+err_desc_get:
+       axi_desc_put(first);
+       return NULL;
+}
+
+static void axi_chan_dump_lli(struct axi_dma_chan *chan,
+                             struct axi_dma_desc *desc)
+{
+       dev_err(dchan2dev(&chan->vc.chan),
+               "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
+               le64_to_cpu(desc->lli.sar),
+               le64_to_cpu(desc->lli.dar),
+               le64_to_cpu(desc->lli.llp),
+               le32_to_cpu(desc->lli.block_ts_lo),
+               le32_to_cpu(desc->lli.ctl_hi),
+               le32_to_cpu(desc->lli.ctl_lo));
+}
+
+static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
+                                  struct axi_dma_desc *desc_head)
+{
+       struct axi_dma_desc *desc;
+
+       axi_chan_dump_lli(chan, desc_head);
+       list_for_each_entry(desc, &desc_head->xfer_list, xfer_list)
+               axi_chan_dump_lli(chan, desc);
+}
+
+static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
+{
+       struct virt_dma_desc *vd;
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->vc.lock, flags);
+
+       axi_chan_disable(chan);
+
+       /* The bad descriptor currently is in the head of vc list */
+       vd = vchan_next_desc(&chan->vc);
+       /* Remove the completed descriptor from issued list */
+       list_del(&vd->node);
+
+       /* WARN about bad descriptor */
+       dev_err(chan2dev(chan),
+               "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
+               axi_chan_name(chan), vd->tx.cookie, status);
+       axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
+
+       vchan_cookie_complete(vd);
+
+       /* Try to restart the controller */
+       axi_chan_start_first_queued(chan);
+
+       spin_unlock_irqrestore(&chan->vc.lock, flags);
+}
+
+static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
+{
+       struct virt_dma_desc *vd;
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->vc.lock, flags);
+       if (unlikely(axi_chan_is_hw_enable(chan))) {
+               dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
+                       axi_chan_name(chan));
+               axi_chan_disable(chan);
+       }
+
+       /* The completed descriptor currently is in the head of vc list */
+       vd = vchan_next_desc(&chan->vc);
+       /* Remove the completed descriptor from issued list before completing */
+       list_del(&vd->node);
+       vchan_cookie_complete(vd);
+
+       /* Submit queued descriptors after processing the completed ones */
+       axi_chan_start_first_queued(chan);
+
+       spin_unlock_irqrestore(&chan->vc.lock, flags);
+}
+
+static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
+{
+       struct axi_dma_chip *chip = dev_id;
+       struct dw_axi_dma *dw = chip->dw;
+       struct axi_dma_chan *chan;
+
+       u32 status, i;
+
+       /* Disable DMAC inerrupts. We'll enable them after processing chanels */
+       axi_dma_irq_disable(chip);
+
+       /* Poll, clear and process every chanel interrupt status */
+       for (i = 0; i < dw->hdata->nr_channels; i++) {
+               chan = &dw->chan[i];
+               status = axi_chan_irq_read(chan);
+               axi_chan_irq_clear(chan, status);
+
+               dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
+                       axi_chan_name(chan), i, status);
+
+               if (status & DWAXIDMAC_IRQ_ALL_ERR)
+                       axi_chan_handle_err(chan, status);
+               else if (status & DWAXIDMAC_IRQ_DMA_TRF)
+                       axi_chan_block_xfer_complete(chan);
+       }
+
+       /* Re-enable interrupts */
+       axi_dma_irq_enable(chip);
+
+       return IRQ_HANDLED;
+}
+
+static int dma_chan_terminate_all(struct dma_chan *dchan)
+{
+       struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&chan->vc.lock, flags);
+
+       axi_chan_disable(chan);
+
+       vchan_get_all_descriptors(&chan->vc, &head);
+
+       /*
+        * As vchan_dma_desc_free_list can access to desc_allocated list
+        * we need to call it in vc.lock context.
+        */
+       vchan_dma_desc_free_list(&chan->vc, &head);
+
+       spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+       dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
+
+       return 0;
+}
+
+static int dma_chan_pause(struct dma_chan *dchan)
+{
+       struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
+       unsigned long flags;
+       unsigned int timeout = 20; /* timeout iterations */
+       u32 val;
+
+       spin_lock_irqsave(&chan->vc.lock, flags);
+
+       val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+       val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
+              BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
+       axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
+
+       do  {
+               if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
+                       break;
+
+               udelay(2);
+       } while (--timeout);
+
+       axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
+
+       chan->is_paused = true;
+
+       spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+       return timeout ? 0 : -EAGAIN;
+}
+
+/* Called in chan locked context */
+static inline void axi_chan_resume(struct axi_dma_chan *chan)
+{
+       u32 val;
+
+       val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
+       val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
+       val |=  (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
+       axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
+
+       chan->is_paused = false;
+}
+
+static int dma_chan_resume(struct dma_chan *dchan)
+{
+       struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->vc.lock, flags);
+
+       if (chan->is_paused)
+               axi_chan_resume(chan);
+
+       spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+       return 0;
+}
+
+static int axi_dma_suspend(struct axi_dma_chip *chip)
+{
+       axi_dma_irq_disable(chip);
+       axi_dma_disable(chip);
+
+       clk_disable_unprepare(chip->core_clk);
+       clk_disable_unprepare(chip->cfgr_clk);
+
+       return 0;
+}
+
+static int axi_dma_resume(struct axi_dma_chip *chip)
+{
+       int ret;
+
+       ret = clk_prepare_enable(chip->cfgr_clk);
+       if (ret < 0)
+               return ret;
+
+       ret = clk_prepare_enable(chip->core_clk);
+       if (ret < 0)
+               return ret;
+
+       axi_dma_enable(chip);
+       axi_dma_irq_enable(chip);
+
+       return 0;
+}
+
+static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)
+{
+       struct axi_dma_chip *chip = dev_get_drvdata(dev);
+
+       return axi_dma_suspend(chip);
+}
+
+static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
+{
+       struct axi_dma_chip *chip = dev_get_drvdata(dev);
+
+       return axi_dma_resume(chip);
+}
+
+static int parse_device_properties(struct axi_dma_chip *chip)
+{
+       struct device *dev = chip->dev;
+       u32 tmp, carr[DMAC_MAX_CHANNELS];
+       int ret;
+
+       ret = device_property_read_u32(dev, "dma-channels", &tmp);
+       if (ret)
+               return ret;
+       if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
+               return -EINVAL;
+
+       chip->dw->hdata->nr_channels = tmp;
+
+       ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
+       if (ret)
+               return ret;
+       if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
+               return -EINVAL;
+
+       chip->dw->hdata->nr_masters = tmp;
+
+       ret = device_property_read_u32(dev, "snps,data-width", &tmp);
+       if (ret)
+               return ret;
+       if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
+               return -EINVAL;
+
+       chip->dw->hdata->m_data_width = tmp;
+
+       ret = device_property_read_u32_array(dev, "snps,block-size", carr,
+                                            chip->dw->hdata->nr_channels);
+       if (ret)
+               return ret;
+       for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
+               if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
+                       return -EINVAL;
+
+               chip->dw->hdata->block_size[tmp] = carr[tmp];
+       }
+
+       ret = device_property_read_u32_array(dev, "snps,priority", carr,
+                                            chip->dw->hdata->nr_channels);
+       if (ret)
+               return ret;
+       /* Priority value must be programmed within [0:nr_channels-1] range */
+       for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
+               if (carr[tmp] >= chip->dw->hdata->nr_channels)
+                       return -EINVAL;
+
+               chip->dw->hdata->priority[tmp] = carr[tmp];
+       }
+
+       /* axi-max-burst-len is optional property */
+       ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);
+       if (!ret) {
+               if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)
+                       return -EINVAL;
+               if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)
+                       return -EINVAL;
+
+               chip->dw->hdata->restrict_axi_burst_len = true;
+               chip->dw->hdata->axi_rw_burst_len = tmp - 1;
+       }
+
+       return 0;
+}
+
+static int dw_probe(struct platform_device *pdev)
+{
+       struct axi_dma_chip *chip;
+       struct resource *mem;
+       struct dw_axi_dma *dw;
+       struct dw_axi_dma_hcfg *hdata;
+       u32 i;
+       int ret;
+
+       chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+       if (!chip)
+               return -ENOMEM;
+
+       dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
+       if (!dw)
+               return -ENOMEM;
+
+       hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
+       if (!hdata)
+               return -ENOMEM;
+
+       chip->dw = dw;
+       chip->dev = &pdev->dev;
+       chip->dw->hdata = hdata;
+
+       chip->irq = platform_get_irq(pdev, 0);
+       if (chip->irq < 0)
+               return chip->irq;
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       chip->regs = devm_ioremap_resource(chip->dev, mem);
+       if (IS_ERR(chip->regs))
+               return PTR_ERR(chip->regs);
+
+       chip->core_clk = devm_clk_get(chip->dev, "core-clk");
+       if (IS_ERR(chip->core_clk))
+               return PTR_ERR(chip->core_clk);
+
+       chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");
+       if (IS_ERR(chip->cfgr_clk))
+               return PTR_ERR(chip->cfgr_clk);
+
+       ret = parse_device_properties(chip);
+       if (ret)
+               return ret;
+
+       dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
+                               sizeof(*dw->chan), GFP_KERNEL);
+       if (!dw->chan)
+               return -ENOMEM;
+
+       ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt,
+                              IRQF_SHARED, KBUILD_MODNAME, chip);
+       if (ret)
+               return ret;
+
+       /* Lli address must be aligned to a 64-byte boundary */
+       dw->desc_pool = dmam_pool_create(KBUILD_MODNAME, chip->dev,
+                                        sizeof(struct axi_dma_desc), 64, 0);
+       if (!dw->desc_pool) {
+               dev_err(chip->dev, "No memory for descriptors dma pool\n");
+               return -ENOMEM;
+       }
+
+       INIT_LIST_HEAD(&dw->dma.channels);
+       for (i = 0; i < hdata->nr_channels; i++) {
+               struct axi_dma_chan *chan = &dw->chan[i];
+
+               chan->chip = chip;
+               chan->id = i;
+               chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
+               atomic_set(&chan->descs_allocated, 0);
+
+               chan->vc.desc_free = vchan_desc_put;
+               vchan_init(&chan->vc, &dw->dma);
+       }
+
+       /* Set capabilities */
+       dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
+
+       /* DMA capabilities */
+       dw->dma.chancnt = hdata->nr_channels;
+       dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
+       dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
+       dw->dma.directions = BIT(DMA_MEM_TO_MEM);
+       dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
+       dw->dma.dev = chip->dev;
+       dw->dma.device_tx_status = dma_chan_tx_status;
+       dw->dma.device_issue_pending = dma_chan_issue_pending;
+       dw->dma.device_terminate_all = dma_chan_terminate_all;
+       dw->dma.device_pause = dma_chan_pause;
+       dw->dma.device_resume = dma_chan_resume;
+
+       dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
+       dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
+
+       dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
+
+       platform_set_drvdata(pdev, chip);
+
+       pm_runtime_enable(chip->dev);
+
+       /*
+        * We can't just call pm_runtime_get here instead of
+        * pm_runtime_get_noresume + axi_dma_resume because we need
+        * driver to work also without Runtime PM.
+        */
+       pm_runtime_get_noresume(chip->dev);
+       ret = axi_dma_resume(chip);
+       if (ret < 0)
+               goto err_pm_disable;
+
+       axi_dma_hw_init(chip);
+
+       pm_runtime_put(chip->dev);
+
+       ret = dma_async_device_register(&dw->dma);
+       if (ret)
+               goto err_pm_disable;
+
+       dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
+                dw->hdata->nr_channels);
+
+       return 0;
+
+err_pm_disable:
+       pm_runtime_disable(chip->dev);
+
+       return ret;
+}
+
+static int dw_remove(struct platform_device *pdev)
+{
+       struct axi_dma_chip *chip = platform_get_drvdata(pdev);
+       struct dw_axi_dma *dw = chip->dw;
+       struct axi_dma_chan *chan, *_chan;
+       u32 i;
+
+       /* Enable clk before accessing to registers */
+       clk_prepare_enable(chip->cfgr_clk);
+       clk_prepare_enable(chip->core_clk);
+       axi_dma_irq_disable(chip);
+       for (i = 0; i < dw->hdata->nr_channels; i++) {
+               axi_chan_disable(&chip->dw->chan[i]);
+               axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
+       }
+       axi_dma_disable(chip);
+
+       pm_runtime_disable(chip->dev);
+       axi_dma_suspend(chip);
+
+       devm_free_irq(chip->dev, chip->irq, chip);
+
+       list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
+                       vc.chan.device_node) {
+               list_del(&chan->vc.chan.device_node);
+               tasklet_kill(&chan->vc.task);
+       }
+
+       dma_async_device_unregister(&dw->dma);
+
+       return 0;
+}
+
+static const struct dev_pm_ops dw_axi_dma_pm_ops = {
+       SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)
+};
+
+static const struct of_device_id dw_dma_of_id_table[] = {
+       { .compatible = "snps,axi-dma-1.01a" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
+
+static struct platform_driver dw_driver = {
+       .probe          = dw_probe,
+       .remove         = dw_remove,
+       .driver = {
+               .name   = KBUILD_MODNAME,
+               .of_match_table = of_match_ptr(dw_dma_of_id_table),
+               .pm = &dw_axi_dma_pm_ops,
+       },
+};
+module_platform_driver(dw_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
+MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
new file mode 100644 (file)
index 0000000..f8888dc
--- /dev/null
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier:  GPL-2.0
+// (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
+
+/*
+ * Synopsys DesignWare AXI DMA Controller driver.
+ *
+ * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+ */
+
+#ifndef _AXI_DMA_PLATFORM_H
+#define _AXI_DMA_PLATFORM_H
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/types.h>
+
+#include "../virt-dma.h"
+
+#define DMAC_MAX_CHANNELS      8
+#define DMAC_MAX_MASTERS       2
+#define DMAC_MAX_BLK_SIZE      0x200000
+
+struct dw_axi_dma_hcfg {
+       u32     nr_channels;
+       u32     nr_masters;
+       u32     m_data_width;
+       u32     block_size[DMAC_MAX_CHANNELS];
+       u32     priority[DMAC_MAX_CHANNELS];
+       /* maximum supported axi burst length */
+       u32     axi_rw_burst_len;
+       bool    restrict_axi_burst_len;
+};
+
+struct axi_dma_chan {
+       struct axi_dma_chip             *chip;
+       void __iomem                    *chan_regs;
+       u8                              id;
+       atomic_t                        descs_allocated;
+
+       struct virt_dma_chan            vc;
+
+       /* these other elements are all protected by vc.lock */
+       bool                            is_paused;
+};
+
+struct dw_axi_dma {
+       struct dma_device       dma;
+       struct dw_axi_dma_hcfg  *hdata;
+       struct dma_pool         *desc_pool;
+
+       /* channels */
+       struct axi_dma_chan     *chan;
+};
+
+struct axi_dma_chip {
+       struct device           *dev;
+       int                     irq;
+       void __iomem            *regs;
+       struct clk              *core_clk;
+       struct clk              *cfgr_clk;
+       struct dw_axi_dma       *dw;
+};
+
+/* LLI == Linked List Item */
+struct __packed axi_dma_lli {
+       __le64          sar;
+       __le64          dar;
+       __le32          block_ts_lo;
+       __le32          block_ts_hi;
+       __le64          llp;
+       __le32          ctl_lo;
+       __le32          ctl_hi;
+       __le32          sstat;
+       __le32          dstat;
+       __le32          status_lo;
+       __le32          ststus_hi;
+       __le32          reserved_lo;
+       __le32          reserved_hi;
+};
+
+struct axi_dma_desc {
+       struct axi_dma_lli              lli;
+
+       struct virt_dma_desc            vd;
+       struct axi_dma_chan             *chan;
+       struct list_head                xfer_list;
+};
+
+static inline struct device *dchan2dev(struct dma_chan *dchan)
+{
+       return &dchan->dev->device;
+}
+
+static inline struct device *chan2dev(struct axi_dma_chan *chan)
+{
+       return &chan->vc.chan.dev->device;
+}
+
+static inline struct axi_dma_desc *vd_to_axi_desc(struct virt_dma_desc *vd)
+{
+       return container_of(vd, struct axi_dma_desc, vd);
+}
+
+static inline struct axi_dma_chan *vc_to_axi_dma_chan(struct virt_dma_chan *vc)
+{
+       return container_of(vc, struct axi_dma_chan, vc);
+}
+
+static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan)
+{
+       return vc_to_axi_dma_chan(to_virt_chan(dchan));
+}
+
+
+#define COMMON_REG_LEN         0x100
+#define CHAN_REG_LEN           0x100
+
+/* Common registers offset */
+#define DMAC_ID                        0x000 /* R DMAC ID */
+#define DMAC_COMPVER           0x008 /* R DMAC Component Version */
+#define DMAC_CFG               0x010 /* R/W DMAC Configuration */
+#define DMAC_CHEN              0x018 /* R/W DMAC Channel Enable */
+#define DMAC_CHEN_L            0x018 /* R/W DMAC Channel Enable 00-31 */
+#define DMAC_CHEN_H            0x01C /* R/W DMAC Channel Enable 32-63 */
+#define DMAC_INTSTATUS         0x030 /* R DMAC Interrupt Status */
+#define DMAC_COMMON_INTCLEAR   0x038 /* W DMAC Interrupt Clear */
+#define DMAC_COMMON_INTSTATUS_ENA 0x040 /* R DMAC Interrupt Status Enable */
+#define DMAC_COMMON_INTSIGNAL_ENA 0x048 /* R/W DMAC Interrupt Signal Enable */
+#define DMAC_COMMON_INTSTATUS  0x050 /* R DMAC Interrupt Status */
+#define DMAC_RESET             0x058 /* R DMAC Reset Register1 */
+
+/* DMA channel registers offset */
+#define CH_SAR                 0x000 /* R/W Chan Source Address */
+#define CH_DAR                 0x008 /* R/W Chan Destination Address */
+#define CH_BLOCK_TS            0x010 /* R/W Chan Block Transfer Size */
+#define CH_CTL                 0x018 /* R/W Chan Control */
+#define CH_CTL_L               0x018 /* R/W Chan Control 00-31 */
+#define CH_CTL_H               0x01C /* R/W Chan Control 32-63 */
+#define CH_CFG                 0x020 /* R/W Chan Configuration */
+#define CH_CFG_L               0x020 /* R/W Chan Configuration 00-31 */
+#define CH_CFG_H               0x024 /* R/W Chan Configuration 32-63 */
+#define CH_LLP                 0x028 /* R/W Chan Linked List Pointer */
+#define CH_STATUS              0x030 /* R Chan Status */
+#define CH_SWHSSRC             0x038 /* R/W Chan SW Handshake Source */
+#define CH_SWHSDST             0x040 /* R/W Chan SW Handshake Destination */
+#define CH_BLK_TFR_RESUMEREQ   0x048 /* W Chan Block Transfer Resume Req */
+#define CH_AXI_ID              0x050 /* R/W Chan AXI ID */
+#define CH_AXI_QOS             0x058 /* R/W Chan AXI QOS */
+#define CH_SSTAT               0x060 /* R Chan Source Status */
+#define CH_DSTAT               0x068 /* R Chan Destination Status */
+#define CH_SSTATAR             0x070 /* R/W Chan Source Status Fetch Addr */
+#define CH_DSTATAR             0x078 /* R/W Chan Destination Status Fetch Addr */
+#define CH_INTSTATUS_ENA       0x080 /* R/W Chan Interrupt Status Enable */
+#define CH_INTSTATUS           0x088 /* R/W Chan Interrupt Status */
+#define CH_INTSIGNAL_ENA       0x090 /* R/W Chan Interrupt Signal Enable */
+#define CH_INTCLEAR            0x098 /* W Chan Interrupt Clear */
+
+
+/* DMAC_CFG */
+#define DMAC_EN_POS                    0
+#define DMAC_EN_MASK                   BIT(DMAC_EN_POS)
+
+#define INT_EN_POS                     1
+#define INT_EN_MASK                    BIT(INT_EN_POS)
+
+#define DMAC_CHAN_EN_SHIFT             0
+#define DMAC_CHAN_EN_WE_SHIFT          8
+
+#define DMAC_CHAN_SUSP_SHIFT           16
+#define DMAC_CHAN_SUSP_WE_SHIFT                24
+
+/* CH_CTL_H */
+#define CH_CTL_H_ARLEN_EN              BIT(6)
+#define CH_CTL_H_ARLEN_POS             7
+#define CH_CTL_H_AWLEN_EN              BIT(15)
+#define CH_CTL_H_AWLEN_POS             16
+
+enum {
+       DWAXIDMAC_ARWLEN_1              = 0,
+       DWAXIDMAC_ARWLEN_2              = 1,
+       DWAXIDMAC_ARWLEN_4              = 3,
+       DWAXIDMAC_ARWLEN_8              = 7,
+       DWAXIDMAC_ARWLEN_16             = 15,
+       DWAXIDMAC_ARWLEN_32             = 31,
+       DWAXIDMAC_ARWLEN_64             = 63,
+       DWAXIDMAC_ARWLEN_128            = 127,
+       DWAXIDMAC_ARWLEN_256            = 255,
+       DWAXIDMAC_ARWLEN_MIN            = DWAXIDMAC_ARWLEN_1,
+       DWAXIDMAC_ARWLEN_MAX            = DWAXIDMAC_ARWLEN_256
+};
+
+#define CH_CTL_H_LLI_LAST              BIT(30)
+#define CH_CTL_H_LLI_VALID             BIT(31)
+
+/* CH_CTL_L */
+#define CH_CTL_L_LAST_WRITE_EN         BIT(30)
+
+#define CH_CTL_L_DST_MSIZE_POS         18
+#define CH_CTL_L_SRC_MSIZE_POS         14
+
+enum {
+       DWAXIDMAC_BURST_TRANS_LEN_1     = 0,
+       DWAXIDMAC_BURST_TRANS_LEN_4,
+       DWAXIDMAC_BURST_TRANS_LEN_8,
+       DWAXIDMAC_BURST_TRANS_LEN_16,
+       DWAXIDMAC_BURST_TRANS_LEN_32,
+       DWAXIDMAC_BURST_TRANS_LEN_64,
+       DWAXIDMAC_BURST_TRANS_LEN_128,
+       DWAXIDMAC_BURST_TRANS_LEN_256,
+       DWAXIDMAC_BURST_TRANS_LEN_512,
+       DWAXIDMAC_BURST_TRANS_LEN_1024
+};
+
+#define CH_CTL_L_DST_WIDTH_POS         11
+#define CH_CTL_L_SRC_WIDTH_POS         8
+
+#define CH_CTL_L_DST_INC_POS           6
+#define CH_CTL_L_SRC_INC_POS           4
+enum {
+       DWAXIDMAC_CH_CTL_L_INC          = 0,
+       DWAXIDMAC_CH_CTL_L_NOINC
+};
+
+#define CH_CTL_L_DST_MAST              BIT(2)
+#define CH_CTL_L_SRC_MAST              BIT(0)
+
+/* CH_CFG_H */
+#define CH_CFG_H_PRIORITY_POS          17
+#define CH_CFG_H_HS_SEL_DST_POS                4
+#define CH_CFG_H_HS_SEL_SRC_POS                3
+enum {
+       DWAXIDMAC_HS_SEL_HW             = 0,
+       DWAXIDMAC_HS_SEL_SW
+};
+
+#define CH_CFG_H_TT_FC_POS             0
+enum {
+       DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC = 0,
+       DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC,
+       DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC,
+       DWAXIDMAC_TT_FC_PER_TO_PER_DMAC,
+       DWAXIDMAC_TT_FC_PER_TO_MEM_SRC,
+       DWAXIDMAC_TT_FC_PER_TO_PER_SRC,
+       DWAXIDMAC_TT_FC_MEM_TO_PER_DST,
+       DWAXIDMAC_TT_FC_PER_TO_PER_DST
+};
+
+/* CH_CFG_L */
+#define CH_CFG_L_DST_MULTBLK_TYPE_POS  2
+#define CH_CFG_L_SRC_MULTBLK_TYPE_POS  0
+enum {
+       DWAXIDMAC_MBLK_TYPE_CONTIGUOUS  = 0,
+       DWAXIDMAC_MBLK_TYPE_RELOAD,
+       DWAXIDMAC_MBLK_TYPE_SHADOW_REG,
+       DWAXIDMAC_MBLK_TYPE_LL
+};
+
+/**
+ * DW AXI DMA channel interrupts
+ *
+ * @DWAXIDMAC_IRQ_NONE: Bitmask of no one interrupt
+ * @DWAXIDMAC_IRQ_BLOCK_TRF: Block transfer complete
+ * @DWAXIDMAC_IRQ_DMA_TRF: Dma transfer complete
+ * @DWAXIDMAC_IRQ_SRC_TRAN: Source transaction complete
+ * @DWAXIDMAC_IRQ_DST_TRAN: Destination transaction complete
+ * @DWAXIDMAC_IRQ_SRC_DEC_ERR: Source decode error
+ * @DWAXIDMAC_IRQ_DST_DEC_ERR: Destination decode error
+ * @DWAXIDMAC_IRQ_SRC_SLV_ERR: Source slave error
+ * @DWAXIDMAC_IRQ_DST_SLV_ERR: Destination slave error
+ * @DWAXIDMAC_IRQ_LLI_RD_DEC_ERR: LLI read decode error
+ * @DWAXIDMAC_IRQ_LLI_WR_DEC_ERR: LLI write decode error
+ * @DWAXIDMAC_IRQ_LLI_RD_SLV_ERR: LLI read slave error
+ * @DWAXIDMAC_IRQ_LLI_WR_SLV_ERR: LLI write slave error
+ * @DWAXIDMAC_IRQ_INVALID_ERR: LLI invalid error or Shadow register error
+ * @DWAXIDMAC_IRQ_MULTIBLKTYPE_ERR: Slave Interface Multiblock type error
+ * @DWAXIDMAC_IRQ_DEC_ERR: Slave Interface decode error
+ * @DWAXIDMAC_IRQ_WR2RO_ERR: Slave Interface write to read only error
+ * @DWAXIDMAC_IRQ_RD2RWO_ERR: Slave Interface read to write only error
+ * @DWAXIDMAC_IRQ_WRONCHEN_ERR: Slave Interface write to channel error
+ * @DWAXIDMAC_IRQ_SHADOWREG_ERR: Slave Interface shadow reg error
+ * @DWAXIDMAC_IRQ_WRONHOLD_ERR: Slave Interface hold error
+ * @DWAXIDMAC_IRQ_LOCK_CLEARED: Lock Cleared Status
+ * @DWAXIDMAC_IRQ_SRC_SUSPENDED: Source Suspended Status
+ * @DWAXIDMAC_IRQ_SUSPENDED: Channel Suspended Status
+ * @DWAXIDMAC_IRQ_DISABLED: Channel Disabled Status
+ * @DWAXIDMAC_IRQ_ABORTED: Channel Aborted Status
+ * @DWAXIDMAC_IRQ_ALL_ERR: Bitmask of all error interrupts
+ * @DWAXIDMAC_IRQ_ALL: Bitmask of all interrupts
+ */
+enum {
+       DWAXIDMAC_IRQ_NONE              = 0,
+       DWAXIDMAC_IRQ_BLOCK_TRF         = BIT(0),
+       DWAXIDMAC_IRQ_DMA_TRF           = BIT(1),
+       DWAXIDMAC_IRQ_SRC_TRAN          = BIT(3),
+       DWAXIDMAC_IRQ_DST_TRAN          = BIT(4),
+       DWAXIDMAC_IRQ_SRC_DEC_ERR       = BIT(5),
+       DWAXIDMAC_IRQ_DST_DEC_ERR       = BIT(6),
+       DWAXIDMAC_IRQ_SRC_SLV_ERR       = BIT(7),
+       DWAXIDMAC_IRQ_DST_SLV_ERR       = BIT(8),
+       DWAXIDMAC_IRQ_LLI_RD_DEC_ERR    = BIT(9),
+       DWAXIDMAC_IRQ_LLI_WR_DEC_ERR    = BIT(10),
+       DWAXIDMAC_IRQ_LLI_RD_SLV_ERR    = BIT(11),
+       DWAXIDMAC_IRQ_LLI_WR_SLV_ERR    = BIT(12),
+       DWAXIDMAC_IRQ_INVALID_ERR       = BIT(13),
+       DWAXIDMAC_IRQ_MULTIBLKTYPE_ERR  = BIT(14),
+       DWAXIDMAC_IRQ_DEC_ERR           = BIT(16),
+       DWAXIDMAC_IRQ_WR2RO_ERR         = BIT(17),
+       DWAXIDMAC_IRQ_RD2RWO_ERR        = BIT(18),
+       DWAXIDMAC_IRQ_WRONCHEN_ERR      = BIT(19),
+       DWAXIDMAC_IRQ_SHADOWREG_ERR     = BIT(20),
+       DWAXIDMAC_IRQ_WRONHOLD_ERR      = BIT(21),
+       DWAXIDMAC_IRQ_LOCK_CLEARED      = BIT(27),
+       DWAXIDMAC_IRQ_SRC_SUSPENDED     = BIT(28),
+       DWAXIDMAC_IRQ_SUSPENDED         = BIT(29),
+       DWAXIDMAC_IRQ_DISABLED          = BIT(30),
+       DWAXIDMAC_IRQ_ABORTED           = BIT(31),
+       DWAXIDMAC_IRQ_ALL_ERR           = (GENMASK(21, 16) | GENMASK(14, 5)),
+       DWAXIDMAC_IRQ_ALL               = GENMASK(31, 0)
+};
+
+enum {
+       DWAXIDMAC_TRANS_WIDTH_8         = 0,
+       DWAXIDMAC_TRANS_WIDTH_16,
+       DWAXIDMAC_TRANS_WIDTH_32,
+       DWAXIDMAC_TRANS_WIDTH_64,
+       DWAXIDMAC_TRANS_WIDTH_128,
+       DWAXIDMAC_TRANS_WIDTH_256,
+       DWAXIDMAC_TRANS_WIDTH_512,
+       DWAXIDMAC_TRANS_WIDTH_MAX       = DWAXIDMAC_TRANS_WIDTH_512
+};
+
+#endif /* _AXI_DMA_PLATFORM_H */
index 948df1ab5f1a26bfa564f55db06b7e352d4152bb..85ea92fcea5400e7fb5676f8c79c772e3c89b907 100644 (file)
@@ -1876,6 +1876,11 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
 
        if (memcpy_channels) {
                m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL);
+               if (!m_ddev) {
+                       dev_warn(ecc->dev, "memcpy is disabled due to OoM\n");
+                       memcpy_channels = NULL;
+                       goto ch_setup;
+               }
                ecc->dma_memcpy = m_ddev;
 
                dma_cap_zero(m_ddev->cap_mask);
@@ -1903,6 +1908,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
                dev_info(ecc->dev, "memcpy is disabled\n");
        }
 
+ch_setup:
        for (i = 0; i < ecc->num_channels; i++) {
                struct edma_chan *echan = &ecc->slave_chans[i];
                echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i);
index e7db24c67030d19a0db9c88d46ae867d11e4d0dd..ccd03c3cedfeda7c3508677e85017178d8ab2f63 100644 (file)
@@ -338,6 +338,7 @@ struct sdma_channel {
        unsigned int                    chn_real_count;
        struct tasklet_struct           tasklet;
        struct imx_dma_data             data;
+       bool                            enabled;
 };
 
 #define IMX_DMA_SG_LOOP                BIT(0)
@@ -596,7 +597,14 @@ static int sdma_config_ownership(struct sdma_channel *sdmac,
 
 static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
 {
+       unsigned long flags;
+       struct sdma_channel *sdmac = &sdma->channel[channel];
+
        writel(BIT(channel), sdma->regs + SDMA_H_START);
+
+       spin_lock_irqsave(&sdmac->lock, flags);
+       sdmac->enabled = true;
+       spin_unlock_irqrestore(&sdmac->lock, flags);
 }
 
 /*
@@ -685,6 +693,14 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
        struct sdma_buffer_descriptor *bd;
        int error = 0;
        enum dma_status old_status = sdmac->status;
+       unsigned long flags;
+
+       spin_lock_irqsave(&sdmac->lock, flags);
+       if (!sdmac->enabled) {
+               spin_unlock_irqrestore(&sdmac->lock, flags);
+               return;
+       }
+       spin_unlock_irqrestore(&sdmac->lock, flags);
 
        /*
         * loop mode. Iterate over descriptors, re-setup them and
@@ -938,10 +954,15 @@ static int sdma_disable_channel(struct dma_chan *chan)
        struct sdma_channel *sdmac = to_sdma_chan(chan);
        struct sdma_engine *sdma = sdmac->sdma;
        int channel = sdmac->channel;
+       unsigned long flags;
 
        writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
        sdmac->status = DMA_ERROR;
 
+       spin_lock_irqsave(&sdmac->lock, flags);
+       sdmac->enabled = false;
+       spin_unlock_irqrestore(&sdmac->lock, flags);
+
        return 0;
 }
 
diff --git a/drivers/dma/mediatek/Kconfig b/drivers/dma/mediatek/Kconfig
new file mode 100644 (file)
index 0000000..27bac0b
--- /dev/null
@@ -0,0 +1,13 @@
+
+config MTK_HSDMA
+       tristate "MediaTek High-Speed DMA controller support"
+       depends on ARCH_MEDIATEK || COMPILE_TEST
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       ---help---
+         Enable support for High-Speed DMA controller on MediaTek
+         SoCs.
+
+         This controller provides the channels which is dedicated to
+         memory-to-memory transfer to offload from CPU through ring-
+         based descriptor management.
diff --git a/drivers/dma/mediatek/Makefile b/drivers/dma/mediatek/Makefile
new file mode 100644 (file)
index 0000000..6e778f8
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
new file mode 100644 (file)
index 0000000..b7ec56a
--- /dev/null
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017-2018 MediaTek Inc.
+
+/*
+ * Driver for MediaTek High-Speed DMA Controller
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/refcount.h>
+#include <linux/slab.h>
+
+#include "../virt-dma.h"
+
+#define MTK_HSDMA_USEC_POLL            20
+#define MTK_HSDMA_TIMEOUT_POLL         200000
+#define MTK_HSDMA_DMA_BUSWIDTHS                BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
+
+/* The default number of virtual channel */
+#define MTK_HSDMA_NR_VCHANS            3
+
+/* Only one physical channel supported */
+#define MTK_HSDMA_NR_MAX_PCHANS                1
+
+/* Macro for physical descriptor (PD) manipulation */
+/* The number of PD which must be 2 of power */
+#define MTK_DMA_SIZE                   64
+#define MTK_HSDMA_NEXT_DESP_IDX(x, y)  (((x) + 1) & ((y) - 1))
+#define MTK_HSDMA_LAST_DESP_IDX(x, y)  (((x) - 1) & ((y) - 1))
+#define MTK_HSDMA_MAX_LEN              0x3f80
+#define MTK_HSDMA_ALIGN_SIZE           4
+#define MTK_HSDMA_PLEN_MASK            0x3fff
+#define MTK_HSDMA_DESC_PLEN(x)         (((x) & MTK_HSDMA_PLEN_MASK) << 16)
+#define MTK_HSDMA_DESC_PLEN_GET(x)     (((x) >> 16) & MTK_HSDMA_PLEN_MASK)
+
+/* Registers for underlying ring manipulation */
+#define MTK_HSDMA_TX_BASE              0x0
+#define MTK_HSDMA_TX_CNT               0x4
+#define MTK_HSDMA_TX_CPU               0x8
+#define MTK_HSDMA_TX_DMA               0xc
+#define MTK_HSDMA_RX_BASE              0x100
+#define MTK_HSDMA_RX_CNT               0x104
+#define MTK_HSDMA_RX_CPU               0x108
+#define MTK_HSDMA_RX_DMA               0x10c
+
+/* Registers for global setup */
+#define MTK_HSDMA_GLO                  0x204
+#define MTK_HSDMA_GLO_MULTI_DMA                BIT(10)
+#define MTK_HSDMA_TX_WB_DDONE          BIT(6)
+#define MTK_HSDMA_BURST_64BYTES                (0x2 << 4)
+#define MTK_HSDMA_GLO_RX_BUSY          BIT(3)
+#define MTK_HSDMA_GLO_RX_DMA           BIT(2)
+#define MTK_HSDMA_GLO_TX_BUSY          BIT(1)
+#define MTK_HSDMA_GLO_TX_DMA           BIT(0)
+#define MTK_HSDMA_GLO_DMA              (MTK_HSDMA_GLO_TX_DMA | \
+                                        MTK_HSDMA_GLO_RX_DMA)
+#define MTK_HSDMA_GLO_BUSY             (MTK_HSDMA_GLO_RX_BUSY | \
+                                        MTK_HSDMA_GLO_TX_BUSY)
+#define MTK_HSDMA_GLO_DEFAULT          (MTK_HSDMA_GLO_TX_DMA | \
+                                        MTK_HSDMA_GLO_RX_DMA | \
+                                        MTK_HSDMA_TX_WB_DDONE | \
+                                        MTK_HSDMA_BURST_64BYTES | \
+                                        MTK_HSDMA_GLO_MULTI_DMA)
+
+/* Registers for reset */
+#define MTK_HSDMA_RESET                        0x208
+#define MTK_HSDMA_RST_TX               BIT(0)
+#define MTK_HSDMA_RST_RX               BIT(16)
+
+/* Registers for interrupt control */
+#define MTK_HSDMA_DLYINT               0x20c
+#define MTK_HSDMA_RXDLY_INT_EN         BIT(15)
+
+/* Interrupt fires when the pending number's more than the specified */
+#define MTK_HSDMA_RXMAX_PINT(x)                (((x) & 0x7f) << 8)
+
+/* Interrupt fires when the pending time's more than the specified in 20 us */
+#define MTK_HSDMA_RXMAX_PTIME(x)       ((x) & 0x7f)
+#define MTK_HSDMA_DLYINT_DEFAULT       (MTK_HSDMA_RXDLY_INT_EN | \
+                                        MTK_HSDMA_RXMAX_PINT(20) | \
+                                        MTK_HSDMA_RXMAX_PTIME(20))
+#define MTK_HSDMA_INT_STATUS           0x220
+#define MTK_HSDMA_INT_ENABLE           0x228
+#define MTK_HSDMA_INT_RXDONE           BIT(16)
+
+enum mtk_hsdma_vdesc_flag {
+       MTK_HSDMA_VDESC_FINISHED        = 0x01,
+};
+
+#define IS_MTK_HSDMA_VDESC_FINISHED(x) ((x) == MTK_HSDMA_VDESC_FINISHED)
+
+/**
+ * struct mtk_hsdma_pdesc - This is the struct holding info describing physical
+ *                         descriptor (PD) and its placement must be kept at
+ *                         4-bytes alignment in little endian order.
+ * @desc[1-4]:             The control pad used to indicate hardware how to
+ *                         deal with the descriptor such as source and
+ *                         destination address and data length. The maximum
+ *                         data length each pdesc can handle is 0x3f80 bytes
+ */
+struct mtk_hsdma_pdesc {
+       __le32 desc1;
+       __le32 desc2;
+       __le32 desc3;
+       __le32 desc4;
+} __packed __aligned(4);
+
+/**
+ * struct mtk_hsdma_vdesc - This is the struct holding info describing virtual
+ *                         descriptor (VD)
+ * @vd:                            An instance for struct virt_dma_desc
+ * @len:                   The total data size device wants to move
+ * @residue:               The remaining data size device will move
+ * @dest:                  The destination address device wants to move to
+ * @src:                   The source address device wants to move from
+ */
+struct mtk_hsdma_vdesc {
+       struct virt_dma_desc vd;
+       size_t len;
+       size_t residue;
+       dma_addr_t dest;
+       dma_addr_t src;
+};
+
+/**
+ * struct mtk_hsdma_cb - This is the struct holding extra info required for RX
+ *                      ring to know what relevant VD the the PD is being
+ *                      mapped to.
+ * @vd:                         Pointer to the relevant VD.
+ * @flag:               Flag indicating what action should be taken when VD
+ *                      is completed.
+ */
+struct mtk_hsdma_cb {
+       struct virt_dma_desc *vd;
+       enum mtk_hsdma_vdesc_flag flag;
+};
+
+/**
+ * struct mtk_hsdma_ring - This struct holds info describing underlying ring
+ *                        space
+ * @txd:                  The descriptor TX ring which describes DMA source
+ *                        information
+ * @rxd:                  The descriptor RX ring which describes DMA
+ *                        destination information
+ * @cb:                           The extra information pointed at by RX ring
+ * @tphys:                The physical addr of TX ring
+ * @rphys:                The physical addr of RX ring
+ * @cur_tptr:             Pointer to the next free descriptor used by the host
+ * @cur_rptr:             Pointer to the last done descriptor by the device
+ */
+struct mtk_hsdma_ring {
+       struct mtk_hsdma_pdesc *txd;
+       struct mtk_hsdma_pdesc *rxd;
+       struct mtk_hsdma_cb *cb;
+       dma_addr_t tphys;
+       dma_addr_t rphys;
+       u16 cur_tptr;
+       u16 cur_rptr;
+};
+
+/**
+ * struct mtk_hsdma_pchan - This is the struct holding info describing physical
+ *                        channel (PC)
+ * @ring:                 An instance for the underlying ring
+ * @sz_ring:              Total size allocated for the ring
+ * @nr_free:              Total number of free rooms in the ring. It would
+ *                        be accessed and updated frequently between IRQ
+ *                        context and user context to reflect whether ring
+ *                        can accept requests from VD.
+ */
+struct mtk_hsdma_pchan {
+       struct mtk_hsdma_ring ring;
+       size_t sz_ring;
+       atomic_t nr_free;
+};
+
+/**
+ * struct mtk_hsdma_vchan - This is the struct holding info describing virtual
+ *                        channel (VC)
+ * @vc:                           An instance for struct virt_dma_chan
+ * @issue_completion:     The wait for all issued descriptors completited
+ * @issue_synchronize:    Bool indicating channel synchronization starts
+ * @desc_hw_processing:           List those descriptors the hardware is processing,
+ *                        which is protected by vc.lock
+ */
+struct mtk_hsdma_vchan {
+       struct virt_dma_chan vc;
+       struct completion issue_completion;
+       bool issue_synchronize;
+       struct list_head desc_hw_processing;
+};
+
+/**
+ * struct mtk_hsdma_soc - This is the struct holding differences among SoCs
+ * @ddone:               Bit mask for DDONE
+ * @ls0:                 Bit mask for LS0
+ */
+struct mtk_hsdma_soc {
+       __le32 ddone;
+       __le32 ls0;
+};
+
+/**
+ * struct mtk_hsdma_device - This is the struct holding info describing HSDMA
+ *                          device
+ * @ddev:                   An instance for struct dma_device
+ * @base:                   The mapped register I/O base
+ * @clk:                    The clock that device internal is using
+ * @irq:                    The IRQ that device are using
+ * @dma_requests:           The number of VCs the device supports to
+ * @vc:                             The pointer to all available VCs
+ * @pc:                             The pointer to the underlying PC
+ * @pc_refcnt:              Track how many VCs are using the PC
+ * @lock:                   Lock protect agaisting multiple VCs access PC
+ * @soc:                    The pointer to area holding differences among
+ *                          vaious platform
+ */
+struct mtk_hsdma_device {
+       struct dma_device ddev;
+       void __iomem *base;
+       struct clk *clk;
+       u32 irq;
+
+       u32 dma_requests;
+       struct mtk_hsdma_vchan *vc;
+       struct mtk_hsdma_pchan *pc;
+       refcount_t pc_refcnt;
+
+       /* Lock used to protect against multiple VCs access PC */
+       spinlock_t lock;
+
+       const struct mtk_hsdma_soc *soc;
+};
+
+static struct mtk_hsdma_device *to_hsdma_dev(struct dma_chan *chan)
+{
+       return container_of(chan->device, struct mtk_hsdma_device, ddev);
+}
+
+static inline struct mtk_hsdma_vchan *to_hsdma_vchan(struct dma_chan *chan)
+{
+       return container_of(chan, struct mtk_hsdma_vchan, vc.chan);
+}
+
+static struct mtk_hsdma_vdesc *to_hsdma_vdesc(struct virt_dma_desc *vd)
+{
+       return container_of(vd, struct mtk_hsdma_vdesc, vd);
+}
+
+static struct device *hsdma2dev(struct mtk_hsdma_device *hsdma)
+{
+       return hsdma->ddev.dev;
+}
+
+static u32 mtk_dma_read(struct mtk_hsdma_device *hsdma, u32 reg)
+{
+       return readl(hsdma->base + reg);
+}
+
+static void mtk_dma_write(struct mtk_hsdma_device *hsdma, u32 reg, u32 val)
+{
+       writel(val, hsdma->base + reg);
+}
+
+static void mtk_dma_rmw(struct mtk_hsdma_device *hsdma, u32 reg,
+                       u32 mask, u32 set)
+{
+       u32 val;
+
+       val = mtk_dma_read(hsdma, reg);
+       val &= ~mask;
+       val |= set;
+       mtk_dma_write(hsdma, reg, val);
+}
+
+static void mtk_dma_set(struct mtk_hsdma_device *hsdma, u32 reg, u32 val)
+{
+       mtk_dma_rmw(hsdma, reg, 0, val);
+}
+
+static void mtk_dma_clr(struct mtk_hsdma_device *hsdma, u32 reg, u32 val)
+{
+       mtk_dma_rmw(hsdma, reg, val, 0);
+}
+
+static void mtk_hsdma_vdesc_free(struct virt_dma_desc *vd)
+{
+       kfree(container_of(vd, struct mtk_hsdma_vdesc, vd));
+}
+
+static int mtk_hsdma_busy_wait(struct mtk_hsdma_device *hsdma)
+{
+       u32 status = 0;
+
+       return readl_poll_timeout(hsdma->base + MTK_HSDMA_GLO, status,
+                                 !(status & MTK_HSDMA_GLO_BUSY),
+                                 MTK_HSDMA_USEC_POLL,
+                                 MTK_HSDMA_TIMEOUT_POLL);
+}
+
+static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma,
+                                struct mtk_hsdma_pchan *pc)
+{
+       struct mtk_hsdma_ring *ring = &pc->ring;
+       int err;
+
+       memset(pc, 0, sizeof(*pc));
+
+       /*
+        * Allocate ring space where [0 ... MTK_DMA_SIZE - 1] is for TX ring
+        * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring.
+        */
+       pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd);
+       ring->txd = dma_zalloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
+                                       &ring->tphys, GFP_NOWAIT);
+       if (!ring->txd)
+               return -ENOMEM;
+
+       ring->rxd = &ring->txd[MTK_DMA_SIZE];
+       ring->rphys = ring->tphys + MTK_DMA_SIZE * sizeof(*ring->txd);
+       ring->cur_tptr = 0;
+       ring->cur_rptr = MTK_DMA_SIZE - 1;
+
+       ring->cb = kcalloc(MTK_DMA_SIZE, sizeof(*ring->cb), GFP_NOWAIT);
+       if (!ring->cb) {
+               err = -ENOMEM;
+               goto err_free_dma;
+       }
+
+       atomic_set(&pc->nr_free, MTK_DMA_SIZE - 1);
+
+       /* Disable HSDMA and wait for the completion */
+       mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA);
+       err = mtk_hsdma_busy_wait(hsdma);
+       if (err)
+               goto err_free_cb;
+
+       /* Reset */
+       mtk_dma_set(hsdma, MTK_HSDMA_RESET,
+                   MTK_HSDMA_RST_TX | MTK_HSDMA_RST_RX);
+       mtk_dma_clr(hsdma, MTK_HSDMA_RESET,
+                   MTK_HSDMA_RST_TX | MTK_HSDMA_RST_RX);
+
+       /* Setup HSDMA initial pointer in the ring */
+       mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, ring->tphys);
+       mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, MTK_DMA_SIZE);
+       mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr);
+       mtk_dma_write(hsdma, MTK_HSDMA_TX_DMA, 0);
+       mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, ring->rphys);
+       mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, MTK_DMA_SIZE);
+       mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, ring->cur_rptr);
+       mtk_dma_write(hsdma, MTK_HSDMA_RX_DMA, 0);
+
+       /* Enable HSDMA */
+       mtk_dma_set(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA);
+
+       /* Setup delayed interrupt */
+       mtk_dma_write(hsdma, MTK_HSDMA_DLYINT, MTK_HSDMA_DLYINT_DEFAULT);
+
+       /* Enable interrupt */
+       mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
+
+       return 0;
+
+err_free_cb:
+       kfree(ring->cb);
+
+err_free_dma:
+       dma_free_coherent(hsdma2dev(hsdma),
+                         pc->sz_ring, ring->txd, ring->tphys);
+       return err;
+}
+
+static void mtk_hsdma_free_pchan(struct mtk_hsdma_device *hsdma,
+                                struct mtk_hsdma_pchan *pc)
+{
+       struct mtk_hsdma_ring *ring = &pc->ring;
+
+       /* Disable HSDMA and then wait for the completion */
+       mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA);
+       mtk_hsdma_busy_wait(hsdma);
+
+       /* Reset pointer in the ring */
+       mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
+       mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, 0);
+       mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, 0);
+       mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, 0);
+       mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, 0);
+       mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, 0);
+       mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, MTK_DMA_SIZE - 1);
+
+       kfree(ring->cb);
+
+       dma_free_coherent(hsdma2dev(hsdma),
+                         pc->sz_ring, ring->txd, ring->tphys);
+}
+
+static int mtk_hsdma_issue_pending_vdesc(struct mtk_hsdma_device *hsdma,
+                                        struct mtk_hsdma_pchan *pc,
+                                        struct mtk_hsdma_vdesc *hvd)
+{
+       struct mtk_hsdma_ring *ring = &pc->ring;
+       struct mtk_hsdma_pdesc *txd, *rxd;
+       u16 reserved, prev, tlen, num_sgs;
+       unsigned long flags;
+
+       /* Protect against PC is accessed by multiple VCs simultaneously */
+       spin_lock_irqsave(&hsdma->lock, flags);
+
+       /*
+        * Reserve rooms, where pc->nr_free is used to track how many free
+        * rooms in the ring being updated in user and IRQ context.
+        */
+       num_sgs = DIV_ROUND_UP(hvd->len, MTK_HSDMA_MAX_LEN);
+       reserved = min_t(u16, num_sgs, atomic_read(&pc->nr_free));
+
+       if (!reserved) {
+               spin_unlock_irqrestore(&hsdma->lock, flags);
+               return -ENOSPC;
+       }
+
+       atomic_sub(reserved, &pc->nr_free);
+
+       while (reserved--) {
+               /* Limit size by PD capability for valid data moving */
+               tlen = (hvd->len > MTK_HSDMA_MAX_LEN) ?
+                      MTK_HSDMA_MAX_LEN : hvd->len;
+
+               /*
+                * Setup PDs using the remaining VD info mapped on those
+                * reserved rooms. And since RXD is shared memory between the
+                * host and the device allocated by dma_alloc_coherent call,
+                * the helper macro WRITE_ONCE can ensure the data written to
+                * RAM would really happens.
+                */
+               txd = &ring->txd[ring->cur_tptr];
+               WRITE_ONCE(txd->desc1, hvd->src);
+               WRITE_ONCE(txd->desc2,
+                          hsdma->soc->ls0 | MTK_HSDMA_DESC_PLEN(tlen));
+
+               rxd = &ring->rxd[ring->cur_tptr];
+               WRITE_ONCE(rxd->desc1, hvd->dest);
+               WRITE_ONCE(rxd->desc2, MTK_HSDMA_DESC_PLEN(tlen));
+
+               /* Associate VD, the PD belonged to */
+               ring->cb[ring->cur_tptr].vd = &hvd->vd;
+
+               /* Move forward the pointer of TX ring */
+               ring->cur_tptr = MTK_HSDMA_NEXT_DESP_IDX(ring->cur_tptr,
+                                                        MTK_DMA_SIZE);
+
+               /* Update VD with remaining data */
+               hvd->src  += tlen;
+               hvd->dest += tlen;
+               hvd->len  -= tlen;
+       }
+
+       /*
+        * Tagging flag for the last PD for VD will be responsible for
+        * completing VD.
+        */
+       if (!hvd->len) {
+               prev = MTK_HSDMA_LAST_DESP_IDX(ring->cur_tptr, MTK_DMA_SIZE);
+               ring->cb[prev].flag = MTK_HSDMA_VDESC_FINISHED;
+       }
+
+       /* Ensure all changes indeed done before we're going on */
+       wmb();
+
+       /*
+        * Updating into hardware the pointer of TX ring lets HSDMA to take
+        * action for those pending PDs.
+        */
+       mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr);
+
+       spin_unlock_irqrestore(&hsdma->lock, flags);
+
+       return 0;
+}
+
+static void mtk_hsdma_issue_vchan_pending(struct mtk_hsdma_device *hsdma,
+                                         struct mtk_hsdma_vchan *hvc)
+{
+       struct virt_dma_desc *vd, *vd2;
+       int err;
+
+       lockdep_assert_held(&hvc->vc.lock);
+
+       list_for_each_entry_safe(vd, vd2, &hvc->vc.desc_issued, node) {
+               struct mtk_hsdma_vdesc *hvd;
+
+               hvd = to_hsdma_vdesc(vd);
+
+               /* Map VD into PC and all VCs shares a single PC */
+               err = mtk_hsdma_issue_pending_vdesc(hsdma, hsdma->pc, hvd);
+
+               /*
+                * Move VD from desc_issued to desc_hw_processing when entire
+                * VD is fit into available PDs. Otherwise, the uncompleted
+                * VDs would stay in list desc_issued and then restart the
+                * processing as soon as possible once underlying ring space
+                * got freed.
+                */
+               if (err == -ENOSPC || hvd->len > 0)
+                       break;
+
+               /*
+                * The extra list desc_hw_processing is used because
+                * hardware can't provide sufficient information allowing us
+                * to know what VDs are still working on the underlying ring.
+                * Through the additional list, it can help us to implement
+                * terminate_all, residue calculation and such thing needed
+                * to know detail descriptor status on the hardware.
+                */
+               list_move_tail(&vd->node, &hvc->desc_hw_processing);
+       }
+}
+
+static void mtk_hsdma_free_rooms_in_ring(struct mtk_hsdma_device *hsdma)
+{
+       struct mtk_hsdma_vchan *hvc;
+       struct mtk_hsdma_pdesc *rxd;
+       struct mtk_hsdma_vdesc *hvd;
+       struct mtk_hsdma_pchan *pc;
+       struct mtk_hsdma_cb *cb;
+       int i = MTK_DMA_SIZE;
+       __le32 desc2;
+       u32 status;
+       u16 next;
+
+       /* Read IRQ status */
+       status = mtk_dma_read(hsdma, MTK_HSDMA_INT_STATUS);
+       if (unlikely(!(status & MTK_HSDMA_INT_RXDONE)))
+               goto rx_done;
+
+       pc = hsdma->pc;
+
+       /*
+        * Using a fail-safe loop with iterations of up to MTK_DMA_SIZE to
+        * reclaim these finished descriptors: The most number of PDs the ISR
+        * can handle at one time shouldn't be more than MTK_DMA_SIZE so we
+        * take it as limited count instead of just using a dangerous infinite
+        * poll.
+        */
+       while (i--) {
+               next = MTK_HSDMA_NEXT_DESP_IDX(pc->ring.cur_rptr,
+                                              MTK_DMA_SIZE);
+               rxd = &pc->ring.rxd[next];
+
+               /*
+                * If MTK_HSDMA_DESC_DDONE is no specified, that means data
+                * moving for the PD is still under going.
+                */
+               desc2 = READ_ONCE(rxd->desc2);
+               if (!(desc2 & hsdma->soc->ddone))
+                       break;
+
+               cb = &pc->ring.cb[next];
+               if (unlikely(!cb->vd)) {
+                       dev_err(hsdma2dev(hsdma), "cb->vd cannot be null\n");
+                       break;
+               }
+
+               /* Update residue of VD the associated PD belonged to */
+               hvd = to_hsdma_vdesc(cb->vd);
+               hvd->residue -= MTK_HSDMA_DESC_PLEN_GET(rxd->desc2);
+
+               /* Complete VD until the relevant last PD is finished */
+               if (IS_MTK_HSDMA_VDESC_FINISHED(cb->flag)) {
+                       hvc = to_hsdma_vchan(cb->vd->tx.chan);
+
+                       spin_lock(&hvc->vc.lock);
+
+                       /* Remove VD from list desc_hw_processing */
+                       list_del(&cb->vd->node);
+
+                       /* Add VD into list desc_completed */
+                       vchan_cookie_complete(cb->vd);
+
+                       if (hvc->issue_synchronize &&
+                           list_empty(&hvc->desc_hw_processing)) {
+                               complete(&hvc->issue_completion);
+                               hvc->issue_synchronize = false;
+                       }
+                       spin_unlock(&hvc->vc.lock);
+
+                       cb->flag = 0;
+               }
+
+               cb->vd = 0;
+
+               /*
+                * Recycle the RXD with the helper WRITE_ONCE that can ensure
+                * data written into RAM would really happens.
+                */
+               WRITE_ONCE(rxd->desc1, 0);
+               WRITE_ONCE(rxd->desc2, 0);
+               pc->ring.cur_rptr = next;
+
+               /* Release rooms */
+               atomic_inc(&pc->nr_free);
+       }
+
+       /* Ensure all changes indeed done before we're going on */
+       wmb();
+
+       /* Update CPU pointer for those completed PDs */
+       mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, pc->ring.cur_rptr);
+
+       /*
+        * Acking the pending IRQ allows hardware no longer to keep the used
+        * IRQ line in certain trigger state when software has completed all
+        * the finished physical descriptors.
+        */
+       if (atomic_read(&pc->nr_free) >= MTK_DMA_SIZE - 1)
+               mtk_dma_write(hsdma, MTK_HSDMA_INT_STATUS, status);
+
+       /* ASAP handles pending VDs in all VCs after freeing some rooms */
+       for (i = 0; i < hsdma->dma_requests; i++) {
+               hvc = &hsdma->vc[i];
+               spin_lock(&hvc->vc.lock);
+               mtk_hsdma_issue_vchan_pending(hsdma, hvc);
+               spin_unlock(&hvc->vc.lock);
+       }
+
+rx_done:
+       /* All completed PDs are cleaned up, so enable interrupt again */
+       mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
+}
+
+static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
+{
+       struct mtk_hsdma_device *hsdma = devid;
+
+       /*
+        * Disable interrupt until all completed PDs are cleaned up in
+        * mtk_hsdma_free_rooms call.
+        */
+       mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE);
+
+       mtk_hsdma_free_rooms_in_ring(hsdma);
+
+       return IRQ_HANDLED;
+}
+
+static struct virt_dma_desc *mtk_hsdma_find_active_desc(struct dma_chan *c,
+                                                       dma_cookie_t cookie)
+{
+       struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
+       struct virt_dma_desc *vd;
+
+       list_for_each_entry(vd, &hvc->desc_hw_processing, node)
+               if (vd->tx.cookie == cookie)
+                       return vd;
+
+       list_for_each_entry(vd, &hvc->vc.desc_issued, node)
+               if (vd->tx.cookie == cookie)
+                       return vd;
+
+       return NULL;
+}
+
+static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
+                                          dma_cookie_t cookie,
+                                          struct dma_tx_state *txstate)
+{
+       struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
+       struct mtk_hsdma_vdesc *hvd;
+       struct virt_dma_desc *vd;
+       enum dma_status ret;
+       unsigned long flags;
+       size_t bytes = 0;
+
+       ret = dma_cookie_status(c, cookie, txstate);
+       if (ret == DMA_COMPLETE || !txstate)
+               return ret;
+
+       spin_lock_irqsave(&hvc->vc.lock, flags);
+       vd = mtk_hsdma_find_active_desc(c, cookie);
+       spin_unlock_irqrestore(&hvc->vc.lock, flags);
+
+       if (vd) {
+               hvd = to_hsdma_vdesc(vd);
+               bytes = hvd->residue;
+       }
+
+       dma_set_residue(txstate, bytes);
+
+       return ret;
+}
+
+static void mtk_hsdma_issue_pending(struct dma_chan *c)
+{
+       struct mtk_hsdma_device *hsdma = to_hsdma_dev(c);
+       struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
+       unsigned long flags;
+
+       spin_lock_irqsave(&hvc->vc.lock, flags);
+
+       if (vchan_issue_pending(&hvc->vc))
+               mtk_hsdma_issue_vchan_pending(hsdma, hvc);
+
+       spin_unlock_irqrestore(&hvc->vc.lock, flags);
+}
+
+static struct dma_async_tx_descriptor *
+mtk_hsdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest,
+                         dma_addr_t src, size_t len, unsigned long flags)
+{
+       struct mtk_hsdma_vdesc *hvd;
+
+       hvd = kzalloc(sizeof(*hvd), GFP_NOWAIT);
+       if (!hvd)
+               return NULL;
+
+       hvd->len = len;
+       hvd->residue = len;
+       hvd->src = src;
+       hvd->dest = dest;
+
+       return vchan_tx_prep(to_virt_chan(c), &hvd->vd, flags);
+}
+
+static int mtk_hsdma_free_inactive_desc(struct dma_chan *c)
+{
+       struct virt_dma_chan *vc = to_virt_chan(c);
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&vc->lock, flags);
+       list_splice_tail_init(&vc->desc_allocated, &head);
+       list_splice_tail_init(&vc->desc_submitted, &head);
+       list_splice_tail_init(&vc->desc_issued, &head);
+       spin_unlock_irqrestore(&vc->lock, flags);
+
+       /* At the point, we don't expect users put descriptor into VC again */
+       vchan_dma_desc_free_list(vc, &head);
+
+       return 0;
+}
+
+static void mtk_hsdma_free_active_desc(struct dma_chan *c)
+{
+       struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c);
+       bool sync_needed = false;
+
+       /*
+        * Once issue_synchronize is being set, which means once the hardware
+        * consumes all descriptors for the channel in the ring, the
+        * synchronization must be be notified immediately it is completed.
+        */
+       spin_lock(&hvc->vc.lock);
+       if (!list_empty(&hvc->desc_hw_processing)) {
+               hvc->issue_synchronize = true;
+               sync_needed = true;
+       }
+       spin_unlock(&hvc->vc.lock);
+
+       if (sync_needed)
+               wait_for_completion(&hvc->issue_completion);
+       /*
+        * At the point, we expect that all remaining descriptors in the ring
+        * for the channel should be all processing done.
+        */
+       WARN_ONCE(!list_empty(&hvc->desc_hw_processing),
+                 "Desc pending still in list desc_hw_processing\n");
+
+       /* Free all descriptors in list desc_completed */
+       vchan_synchronize(&hvc->vc);
+
+       WARN_ONCE(!list_empty(&hvc->vc.desc_completed),
+                 "Desc pending still in list desc_completed\n");
+}
+
+static int mtk_hsdma_terminate_all(struct dma_chan *c)
+{
+       /*
+        * Free pending descriptors not processed yet by hardware that have
+        * previously been submitted to the channel.
+        */
+       mtk_hsdma_free_inactive_desc(c);
+
+       /*
+        * However, the DMA engine doesn't provide any way to stop these
+        * descriptors being processed currently by hardware. The only way is
+        * to just waiting until these descriptors are all processed completely
+        * through mtk_hsdma_free_active_desc call.
+        */
+       mtk_hsdma_free_active_desc(c);
+
+       return 0;
+}
+
+static int mtk_hsdma_alloc_chan_resources(struct dma_chan *c)
+{
+       struct mtk_hsdma_device *hsdma = to_hsdma_dev(c);
+       int err;
+
+       /*
+        * Since HSDMA has only one PC, the resource for PC is being allocated
+        * when the first VC is being created and the other VCs would run on
+        * the same PC.
+        */
+       if (!refcount_read(&hsdma->pc_refcnt)) {
+               err = mtk_hsdma_alloc_pchan(hsdma, hsdma->pc);
+               if (err)
+                       return err;
+               /*
+                * refcount_inc would complain increment on 0; use-after-free.
+                * Thus, we need to explicitly set it as 1 initially.
+                */
+               refcount_set(&hsdma->pc_refcnt, 1);
+       } else {
+               refcount_inc(&hsdma->pc_refcnt);
+       }
+
+       return 0;
+}
+
+static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
+{
+       struct mtk_hsdma_device *hsdma = to_hsdma_dev(c);
+
+       /* Free all descriptors in all lists on the VC */
+       mtk_hsdma_terminate_all(c);
+
+       /* The resource for PC is not freed until all the VCs are destroyed */
+       if (!refcount_dec_and_test(&hsdma->pc_refcnt))
+               return;
+
+       mtk_hsdma_free_pchan(hsdma, hsdma->pc);
+}
+
+static int mtk_hsdma_hw_init(struct mtk_hsdma_device *hsdma)
+{
+       int err;
+
+       pm_runtime_enable(hsdma2dev(hsdma));
+       pm_runtime_get_sync(hsdma2dev(hsdma));
+
+       err = clk_prepare_enable(hsdma->clk);
+       if (err)
+               return err;
+
+       mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0);
+       mtk_dma_write(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DEFAULT);
+
+       return 0;
+}
+
+static int mtk_hsdma_hw_deinit(struct mtk_hsdma_device *hsdma)
+{
+       mtk_dma_write(hsdma, MTK_HSDMA_GLO, 0);
+
+       clk_disable_unprepare(hsdma->clk);
+
+       pm_runtime_put_sync(hsdma2dev(hsdma));
+       pm_runtime_disable(hsdma2dev(hsdma));
+
+       return 0;
+}
+
+static const struct mtk_hsdma_soc mt7623_soc = {
+       .ddone = BIT(31),
+       .ls0 = BIT(30),
+};
+
+static const struct mtk_hsdma_soc mt7622_soc = {
+       .ddone = BIT(15),
+       .ls0 = BIT(14),
+};
+
+static const struct of_device_id mtk_hsdma_match[] = {
+       { .compatible = "mediatek,mt7623-hsdma", .data = &mt7623_soc},
+       { .compatible = "mediatek,mt7622-hsdma", .data = &mt7622_soc},
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mtk_hsdma_match);
+
+static int mtk_hsdma_probe(struct platform_device *pdev)
+{
+       struct mtk_hsdma_device *hsdma;
+       struct mtk_hsdma_vchan *vc;
+       struct dma_device *dd;
+       struct resource *res;
+       int i, err;
+
+       hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
+       if (!hsdma)
+               return -ENOMEM;
+
+       dd = &hsdma->ddev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       hsdma->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(hsdma->base))
+               return PTR_ERR(hsdma->base);
+
+       hsdma->soc = of_device_get_match_data(&pdev->dev);
+       if (!hsdma->soc) {
+               dev_err(&pdev->dev, "No device match found\n");
+               return -ENODEV;
+       }
+
+       hsdma->clk = devm_clk_get(&pdev->dev, "hsdma");
+       if (IS_ERR(hsdma->clk)) {
+               dev_err(&pdev->dev, "No clock for %s\n",
+                       dev_name(&pdev->dev));
+               return PTR_ERR(hsdma->clk);
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "No irq resource for %s\n",
+                       dev_name(&pdev->dev));
+               return -EINVAL;
+       }
+       hsdma->irq = res->start;
+
+       refcount_set(&hsdma->pc_refcnt, 0);
+       spin_lock_init(&hsdma->lock);
+
+       dma_cap_set(DMA_MEMCPY, dd->cap_mask);
+
+       dd->copy_align = MTK_HSDMA_ALIGN_SIZE;
+       dd->device_alloc_chan_resources = mtk_hsdma_alloc_chan_resources;
+       dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
+       dd->device_tx_status = mtk_hsdma_tx_status;
+       dd->device_issue_pending = mtk_hsdma_issue_pending;
+       dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
+       dd->device_terminate_all = mtk_hsdma_terminate_all;
+       dd->src_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS;
+       dd->dst_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS;
+       dd->directions = BIT(DMA_MEM_TO_MEM);
+       dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+       dd->dev = &pdev->dev;
+       INIT_LIST_HEAD(&dd->channels);
+
+       hsdma->dma_requests = MTK_HSDMA_NR_VCHANS;
+       if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
+                                                     "dma-requests",
+                                                     &hsdma->dma_requests)) {
+               dev_info(&pdev->dev,
+                        "Using %u as missing dma-requests property\n",
+                        MTK_HSDMA_NR_VCHANS);
+       }
+
+       hsdma->pc = devm_kcalloc(&pdev->dev, MTK_HSDMA_NR_MAX_PCHANS,
+                                sizeof(*hsdma->pc), GFP_KERNEL);
+       if (!hsdma->pc)
+               return -ENOMEM;
+
+       hsdma->vc = devm_kcalloc(&pdev->dev, hsdma->dma_requests,
+                                sizeof(*hsdma->vc), GFP_KERNEL);
+       if (!hsdma->vc)
+               return -ENOMEM;
+
+       for (i = 0; i < hsdma->dma_requests; i++) {
+               vc = &hsdma->vc[i];
+               vc->vc.desc_free = mtk_hsdma_vdesc_free;
+               vchan_init(&vc->vc, dd);
+               init_completion(&vc->issue_completion);
+               INIT_LIST_HEAD(&vc->desc_hw_processing);
+       }
+
+       err = dma_async_device_register(dd);
+       if (err)
+               return err;
+
+       err = of_dma_controller_register(pdev->dev.of_node,
+                                        of_dma_xlate_by_chan_id, hsdma);
+       if (err) {
+               dev_err(&pdev->dev,
+                       "MediaTek HSDMA OF registration failed %d\n", err);
+               goto err_unregister;
+       }
+
+       mtk_hsdma_hw_init(hsdma);
+
+       err = devm_request_irq(&pdev->dev, hsdma->irq,
+                              mtk_hsdma_irq, 0,
+                              dev_name(&pdev->dev), hsdma);
+       if (err) {
+               dev_err(&pdev->dev,
+                       "request_irq failed with err %d\n", err);
+               goto err_unregister;
+       }
+
+       platform_set_drvdata(pdev, hsdma);
+
+       dev_info(&pdev->dev, "MediaTek HSDMA driver registered\n");
+
+       return 0;
+
+err_unregister:
+       dma_async_device_unregister(dd);
+
+       return err;
+}
+
+static int mtk_hsdma_remove(struct platform_device *pdev)
+{
+       struct mtk_hsdma_device *hsdma = platform_get_drvdata(pdev);
+       struct mtk_hsdma_vchan *vc;
+       int i;
+
+       /* Kill VC task */
+       for (i = 0; i < hsdma->dma_requests; i++) {
+               vc = &hsdma->vc[i];
+
+               list_del(&vc->vc.chan.device_node);
+               tasklet_kill(&vc->vc.task);
+       }
+
+       /* Disable DMA interrupt */
+       mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0);
+
+       /* Waits for any pending IRQ handlers to complete */
+       synchronize_irq(hsdma->irq);
+
+       /* Disable hardware */
+       mtk_hsdma_hw_deinit(hsdma);
+
+       dma_async_device_unregister(&hsdma->ddev);
+       of_dma_controller_free(pdev->dev.of_node);
+
+       return 0;
+}
+
+static struct platform_driver mtk_hsdma_driver = {
+       .probe          = mtk_hsdma_probe,
+       .remove         = mtk_hsdma_remove,
+       .driver = {
+               .name           = KBUILD_MODNAME,
+               .of_match_table = mtk_hsdma_match,
+       },
+};
+module_platform_driver(mtk_hsdma_driver);
+
+MODULE_DESCRIPTION("MediaTek High-Speed DMA Controller Driver");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_LICENSE("GPL v2");
index d7327fd5f445667a30f2e691c321a46ad03e862c..de1fd59fe13699b5efb84500be5aff699fe1a1d9 100644 (file)
@@ -1510,7 +1510,7 @@ static void pl330_dotask(unsigned long data)
 /* Returns 1 if state was updated, 0 otherwise */
 static int pl330_update(struct pl330_dmac *pl330)
 {
-       struct dma_pl330_desc *descdone, *tmp;
+       struct dma_pl330_desc *descdone;
        unsigned long flags;
        void __iomem *regs;
        u32 val;
@@ -1588,7 +1588,9 @@ static int pl330_update(struct pl330_dmac *pl330)
        }
 
        /* Now that we are in no hurry, do the callbacks */
-       list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) {
+       while (!list_empty(&pl330->req_done)) {
+               descdone = list_first_entry(&pl330->req_done,
+                                           struct dma_pl330_desc, rqd);
                list_del(&descdone->rqd);
                spin_unlock_irqrestore(&pl330->lock, flags);
                dma_pl330_rqcb(descdone, PL330_ERR_NONE);
index d076940e0c69ada23dc451367df9d71c88649dc7..d29275b97e8453a1fd7731fe4564bf0a61b9460c 100644 (file)
@@ -393,6 +393,7 @@ struct bam_device {
        struct device_dma_parameters dma_parms;
        struct bam_chan *channels;
        u32 num_channels;
+       u32 num_ees;
 
        /* execution environment ID, from DT */
        u32 ee;
@@ -934,12 +935,15 @@ static void bam_apply_new_config(struct bam_chan *bchan,
        struct bam_device *bdev = bchan->bdev;
        u32 maxburst;
 
-       if (dir == DMA_DEV_TO_MEM)
-               maxburst = bchan->slave.src_maxburst;
-       else
-               maxburst = bchan->slave.dst_maxburst;
+       if (!bdev->controlled_remotely) {
+               if (dir == DMA_DEV_TO_MEM)
+                       maxburst = bchan->slave.src_maxburst;
+               else
+                       maxburst = bchan->slave.dst_maxburst;
 
-       writel_relaxed(maxburst, bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
+               writel_relaxed(maxburst,
+                              bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
+       }
 
        bchan->reconfigure = 0;
 }
@@ -1128,15 +1132,19 @@ static int bam_init(struct bam_device *bdev)
        u32 val;
 
        /* read revision and configuration information */
-       val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT;
-       val &= NUM_EES_MASK;
+       if (!bdev->num_ees) {
+               val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
+               bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
+       }
 
        /* check that configured EE is within range */
-       if (bdev->ee >= val)
+       if (bdev->ee >= bdev->num_ees)
                return -EINVAL;
 
-       val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
-       bdev->num_channels = val & BAM_NUM_PIPES_MASK;
+       if (!bdev->num_channels) {
+               val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
+               bdev->num_channels = val & BAM_NUM_PIPES_MASK;
+       }
 
        if (bdev->controlled_remotely)
                return 0;
@@ -1232,9 +1240,25 @@ static int bam_dma_probe(struct platform_device *pdev)
        bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
                                                "qcom,controlled-remotely");
 
+       if (bdev->controlled_remotely) {
+               ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
+                                          &bdev->num_channels);
+               if (ret)
+                       dev_err(bdev->dev, "num-channels unspecified in dt\n");
+
+               ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
+                                          &bdev->num_ees);
+               if (ret)
+                       dev_err(bdev->dev, "num-ees unspecified in dt\n");
+       }
+
        bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
-       if (IS_ERR(bdev->bamclk))
-               return PTR_ERR(bdev->bamclk);
+       if (IS_ERR(bdev->bamclk)) {
+               if (!bdev->controlled_remotely)
+                       return PTR_ERR(bdev->bamclk);
+
+               bdev->bamclk = NULL;
+       }
 
        ret = clk_prepare_enable(bdev->bamclk);
        if (ret) {
@@ -1309,6 +1333,11 @@ static int bam_dma_probe(struct platform_device *pdev)
        if (ret)
                goto err_unregister_dma;
 
+       if (bdev->controlled_remotely) {
+               pm_runtime_disable(&pdev->dev);
+               return 0;
+       }
+
        pm_runtime_irq_safe(&pdev->dev);
        pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY);
        pm_runtime_use_autosuspend(&pdev->dev);
@@ -1392,7 +1421,8 @@ static int __maybe_unused bam_dma_suspend(struct device *dev)
 {
        struct bam_device *bdev = dev_get_drvdata(dev);
 
-       pm_runtime_force_suspend(dev);
+       if (!bdev->controlled_remotely)
+               pm_runtime_force_suspend(dev);
 
        clk_unprepare(bdev->bamclk);
 
@@ -1408,7 +1438,8 @@ static int __maybe_unused bam_dma_resume(struct device *dev)
        if (ret)
                return ret;
 
-       pm_runtime_force_resume(dev);
+       if (!bdev->controlled_remotely)
+               pm_runtime_force_resume(dev);
 
        return 0;
 }
index d0cacdb0713eca47360e4f5ceedc8dc6428145bb..2a2ccd9c78e4cc1f8e7b7a72b22613530a0c1afe 100644 (file)
@@ -1301,8 +1301,17 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
         * If the cookie doesn't correspond to the currently running transfer
         * then the descriptor hasn't been processed yet, and the residue is
         * equal to the full descriptor size.
+        * Also, a client driver is possible to call this function before
+        * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running"
+        * will be the next descriptor, and the done list will appear. So, if
+        * the argument cookie matches the done list's cookie, we can assume
+        * the residue is zero.
         */
        if (cookie != desc->async_tx.cookie) {
+               list_for_each_entry(desc, &chan->desc.done, node) {
+                       if (cookie == desc->async_tx.cookie)
+                               return 0;
+               }
                list_for_each_entry(desc, &chan->desc.pending, node) {
                        if (cookie == desc->async_tx.cookie)
                                return desc->size;
@@ -1677,8 +1686,8 @@ static const struct dev_pm_ops rcar_dmac_pm = {
         *   - Wait for the current transfer to complete and stop the device,
         *   - Resume transfers, if any.
         */
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-                                    pm_runtime_force_resume)
+       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                                     pm_runtime_force_resume)
        SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
                           NULL)
 };
index 786fc8fcc38ed6a2f2442a18a36bc4ab6ad4c273..8c5807362a257422ea89c805f92dc411bf8ded8e 100644 (file)
@@ -5,6 +5,7 @@
  *
  * Copyright (C) M'boumba Cedric Madianga 2015
  * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
+ *         Pierre-Yves Mordret <pierre-yves.mordret@st.com>
  *
  * License terms:  GNU General Public License (GPL), version 2
  */
 #define STM32_DMA_LIFCR                        0x0008 /* DMA Low Int Flag Clear Reg */
 #define STM32_DMA_HIFCR                        0x000c /* DMA High Int Flag Clear Reg */
 #define STM32_DMA_TCI                  BIT(5) /* Transfer Complete Interrupt */
+#define STM32_DMA_HTI                  BIT(4) /* Half Transfer Interrupt */
 #define STM32_DMA_TEI                  BIT(3) /* Transfer Error Interrupt */
 #define STM32_DMA_DMEI                 BIT(2) /* Direct Mode Error Interrupt */
 #define STM32_DMA_FEI                  BIT(0) /* FIFO Error Interrupt */
+#define STM32_DMA_MASKI                        (STM32_DMA_TCI \
+                                        | STM32_DMA_TEI \
+                                        | STM32_DMA_DMEI \
+                                        | STM32_DMA_FEI)
 
 /* DMA Stream x Configuration Register */
 #define STM32_DMA_SCR(x)               (0x0010 + 0x18 * (x)) /* x = 0..7 */
@@ -60,7 +66,8 @@
 #define STM32_DMA_SCR_PINC             BIT(9) /* Peripheral increment mode */
 #define STM32_DMA_SCR_CIRC             BIT(8) /* Circular mode */
 #define STM32_DMA_SCR_PFCTRL           BIT(5) /* Peripheral Flow Controller */
-#define STM32_DMA_SCR_TCIE             BIT(4) /* Transfer Cplete Int Enable*/
+#define STM32_DMA_SCR_TCIE             BIT(4) /* Transfer Complete Int Enable
+                                               */
 #define STM32_DMA_SCR_TEIE             BIT(2) /* Transfer Error Int Enable */
 #define STM32_DMA_SCR_DMEIE            BIT(1) /* Direct Mode Err Int Enable */
 #define STM32_DMA_SCR_EN               BIT(0) /* Stream Enable */
 #define STM32_DMA_FIFO_THRESHOLD_FULL                  0x03
 
 #define STM32_DMA_MAX_DATA_ITEMS       0xffff
+/*
+ * Valid transfer starts from @0 to @0xFFFE leading to unaligned scatter
+ * gather at boundary. Thus it's safer to round down this value on FIFO
+ * size (16 Bytes)
+ */
+#define STM32_DMA_ALIGNED_MAX_DATA_ITEMS       \
+       ALIGN_DOWN(STM32_DMA_MAX_DATA_ITEMS, 16)
 #define STM32_DMA_MAX_CHANNELS         0x08
 #define STM32_DMA_MAX_REQUEST_ID       0x08
 #define STM32_DMA_MAX_DATA_PARAM       0x03
+#define STM32_DMA_FIFO_SIZE            16      /* FIFO is 16 bytes */
+#define STM32_DMA_MIN_BURST            4
 #define STM32_DMA_MAX_BURST            16
 
+/* DMA Features */
+#define STM32_DMA_THRESHOLD_FTR_MASK   GENMASK(1, 0)
+#define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK)
+
 enum stm32_dma_width {
        STM32_DMA_BYTE,
        STM32_DMA_HALF_WORD,
@@ -129,11 +149,18 @@ enum stm32_dma_burst_size {
        STM32_DMA_BURST_INCR16,
 };
 
+/**
+ * struct stm32_dma_cfg - STM32 DMA custom configuration
+ * @channel_id: channel ID
+ * @request_line: DMA request
+ * @stream_config: 32bit mask specifying the DMA channel configuration
+ * @features: 32bit mask specifying the DMA Feature list
+ */
 struct stm32_dma_cfg {
        u32 channel_id;
        u32 request_line;
        u32 stream_config;
-       u32 threshold;
+       u32 features;
 };
 
 struct stm32_dma_chan_reg {
@@ -171,6 +198,9 @@ struct stm32_dma_chan {
        u32 next_sg;
        struct dma_slave_config dma_sconfig;
        struct stm32_dma_chan_reg chan_reg;
+       u32 threshold;
+       u32 mem_burst;
+       u32 mem_width;
 };
 
 struct stm32_dma_device {
@@ -235,6 +265,85 @@ static int stm32_dma_get_width(struct stm32_dma_chan *chan,
        }
 }
 
+static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
+                                                      u32 threshold)
+{
+       enum dma_slave_buswidth max_width;
+
+       if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL)
+               max_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+       else
+               max_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+
+       while ((buf_len < max_width  || buf_len % max_width) &&
+              max_width > DMA_SLAVE_BUSWIDTH_1_BYTE)
+               max_width = max_width >> 1;
+
+       return max_width;
+}
+
+static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold,
+                                               enum dma_slave_buswidth width)
+{
+       u32 remaining;
+
+       if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) {
+               if (burst != 0) {
+                       /*
+                        * If number of beats fit in several whole bursts
+                        * this configuration is allowed.
+                        */
+                       remaining = ((STM32_DMA_FIFO_SIZE / width) *
+                                    (threshold + 1) / 4) % burst;
+
+                       if (remaining == 0)
+                               return true;
+               } else {
+                       return true;
+               }
+       }
+
+       return false;
+}
+
+static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold)
+{
+       switch (threshold) {
+       case STM32_DMA_FIFO_THRESHOLD_FULL:
+               if (buf_len >= STM32_DMA_MAX_BURST)
+                       return true;
+               else
+                       return false;
+       case STM32_DMA_FIFO_THRESHOLD_HALFFULL:
+               if (buf_len >= STM32_DMA_MAX_BURST / 2)
+                       return true;
+               else
+                       return false;
+       default:
+               return false;
+       }
+}
+
+static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold,
+                                   enum dma_slave_buswidth width)
+{
+       u32 best_burst = max_burst;
+
+       if (best_burst == 1 || !stm32_dma_is_burst_possible(buf_len, threshold))
+               return 0;
+
+       while ((buf_len < best_burst * width && best_burst > 1) ||
+              !stm32_dma_fifo_threshold_is_allowed(best_burst, threshold,
+                                                   width)) {
+               if (best_burst > STM32_DMA_MIN_BURST)
+                       best_burst = best_burst >> 1;
+               else
+                       best_burst = 0;
+       }
+
+       return best_burst;
+}
+
 static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst)
 {
        switch (maxburst) {
@@ -254,12 +363,12 @@ static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst)
 }
 
 static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan,
-                                     u32 src_maxburst, u32 dst_maxburst)
+                                     u32 src_burst, u32 dst_burst)
 {
        chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK;
        chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE;
 
-       if ((!src_maxburst) && (!dst_maxburst)) {
+       if (!src_burst && !dst_burst) {
                /* Using direct mode */
                chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE;
        } else {
@@ -300,7 +409,7 @@ static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan)
 
        flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
 
-       return flags;
+       return flags & STM32_DMA_MASKI;
 }
 
 static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
@@ -315,6 +424,7 @@ static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
         * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
         * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
         */
+       flags &= STM32_DMA_MASKI;
        dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
 
        if (chan->id & 4)
@@ -429,6 +539,8 @@ static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
        dev_dbg(chan2dev(chan), "SFCR:  0x%08x\n", sfcr);
 }
 
+static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan);
+
 static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
 {
        struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
@@ -471,6 +583,9 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
        if (status)
                stm32_dma_irq_clear(chan, status);
 
+       if (chan->desc->cyclic)
+               stm32_dma_configure_next_sg(chan);
+
        stm32_dma_dump_reg(chan);
 
        /* Start DMA */
@@ -541,13 +656,29 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
        status = stm32_dma_irq_status(chan);
        scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
 
-       if ((status & STM32_DMA_TCI) && (scr & STM32_DMA_SCR_TCIE)) {
+       if (status & STM32_DMA_TCI) {
                stm32_dma_irq_clear(chan, STM32_DMA_TCI);
-               stm32_dma_handle_chan_done(chan);
-
-       } else {
+               if (scr & STM32_DMA_SCR_TCIE)
+                       stm32_dma_handle_chan_done(chan);
+               status &= ~STM32_DMA_TCI;
+       }
+       if (status & STM32_DMA_HTI) {
+               stm32_dma_irq_clear(chan, STM32_DMA_HTI);
+               status &= ~STM32_DMA_HTI;
+       }
+       if (status & STM32_DMA_FEI) {
+               stm32_dma_irq_clear(chan, STM32_DMA_FEI);
+               status &= ~STM32_DMA_FEI;
+               if (!(scr & STM32_DMA_SCR_EN))
+                       dev_err(chan2dev(chan), "FIFO Error\n");
+               else
+                       dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
+       }
+       if (status) {
                stm32_dma_irq_clear(chan, status);
                dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
+               if (!(scr & STM32_DMA_SCR_EN))
+                       dev_err(chan2dev(chan), "chan disabled by HW\n");
        }
 
        spin_unlock(&chan->vchan.lock);
@@ -564,45 +695,59 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
        if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
                dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
                stm32_dma_start_transfer(chan);
-               if (chan->desc->cyclic)
-                       stm32_dma_configure_next_sg(chan);
+
        }
        spin_unlock_irqrestore(&chan->vchan.lock, flags);
 }
 
 static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
                                    enum dma_transfer_direction direction,
-                                   enum dma_slave_buswidth *buswidth)
+                                   enum dma_slave_buswidth *buswidth,
+                                   u32 buf_len)
 {
        enum dma_slave_buswidth src_addr_width, dst_addr_width;
        int src_bus_width, dst_bus_width;
        int src_burst_size, dst_burst_size;
-       u32 src_maxburst, dst_maxburst;
-       u32 dma_scr = 0;
+       u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
+       u32 dma_scr, threshold;
 
        src_addr_width = chan->dma_sconfig.src_addr_width;
        dst_addr_width = chan->dma_sconfig.dst_addr_width;
        src_maxburst = chan->dma_sconfig.src_maxburst;
        dst_maxburst = chan->dma_sconfig.dst_maxburst;
+       threshold = chan->threshold;
 
        switch (direction) {
        case DMA_MEM_TO_DEV:
+               /* Set device data size */
                dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
                if (dst_bus_width < 0)
                        return dst_bus_width;
 
-               dst_burst_size = stm32_dma_get_burst(chan, dst_maxburst);
+               /* Set device burst size */
+               dst_best_burst = stm32_dma_get_best_burst(buf_len,
+                                                         dst_maxburst,
+                                                         threshold,
+                                                         dst_addr_width);
+
+               dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
                if (dst_burst_size < 0)
                        return dst_burst_size;
 
-               if (!src_addr_width)
-                       src_addr_width = dst_addr_width;
-
+               /* Set memory data size */
+               src_addr_width = stm32_dma_get_max_width(buf_len, threshold);
+               chan->mem_width = src_addr_width;
                src_bus_width = stm32_dma_get_width(chan, src_addr_width);
                if (src_bus_width < 0)
                        return src_bus_width;
 
-               src_burst_size = stm32_dma_get_burst(chan, src_maxburst);
+               /* Set memory burst size */
+               src_maxburst = STM32_DMA_MAX_BURST;
+               src_best_burst = stm32_dma_get_best_burst(buf_len,
+                                                         src_maxburst,
+                                                         threshold,
+                                                         src_addr_width);
+               src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
                if (src_burst_size < 0)
                        return src_burst_size;
 
@@ -612,27 +757,46 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
                        STM32_DMA_SCR_PBURST(dst_burst_size) |
                        STM32_DMA_SCR_MBURST(src_burst_size);
 
+               /* Set FIFO threshold */
+               chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
+               chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold);
+
+               /* Set peripheral address */
                chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr;
                *buswidth = dst_addr_width;
                break;
 
        case DMA_DEV_TO_MEM:
+               /* Set device data size */
                src_bus_width = stm32_dma_get_width(chan, src_addr_width);
                if (src_bus_width < 0)
                        return src_bus_width;
 
-               src_burst_size = stm32_dma_get_burst(chan, src_maxburst);
+               /* Set device burst size */
+               src_best_burst = stm32_dma_get_best_burst(buf_len,
+                                                         src_maxburst,
+                                                         threshold,
+                                                         src_addr_width);
+               chan->mem_burst = src_best_burst;
+               src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
                if (src_burst_size < 0)
                        return src_burst_size;
 
-               if (!dst_addr_width)
-                       dst_addr_width = src_addr_width;
-
+               /* Set memory data size */
+               dst_addr_width = stm32_dma_get_max_width(buf_len, threshold);
+               chan->mem_width = dst_addr_width;
                dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
                if (dst_bus_width < 0)
                        return dst_bus_width;
 
-               dst_burst_size = stm32_dma_get_burst(chan, dst_maxburst);
+               /* Set memory burst size */
+               dst_maxburst = STM32_DMA_MAX_BURST;
+               dst_best_burst = stm32_dma_get_best_burst(buf_len,
+                                                         dst_maxburst,
+                                                         threshold,
+                                                         dst_addr_width);
+               chan->mem_burst = dst_best_burst;
+               dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
                if (dst_burst_size < 0)
                        return dst_burst_size;
 
@@ -642,6 +806,11 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
                        STM32_DMA_SCR_PBURST(src_burst_size) |
                        STM32_DMA_SCR_MBURST(dst_burst_size);
 
+               /* Set FIFO threshold */
+               chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
+               chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold);
+
+               /* Set peripheral address */
                chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr;
                *buswidth = chan->dma_sconfig.src_addr_width;
                break;
@@ -651,8 +820,9 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
                return -EINVAL;
        }
 
-       stm32_dma_set_fifo_config(chan, src_maxburst, dst_maxburst);
+       stm32_dma_set_fifo_config(chan, src_best_burst, dst_best_burst);
 
+       /* Set DMA control register */
        chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK |
                        STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK |
                        STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK);
@@ -692,10 +862,6 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
        if (!desc)
                return NULL;
 
-       ret = stm32_dma_set_xfer_param(chan, direction, &buswidth);
-       if (ret < 0)
-               goto err;
-
        /* Set peripheral flow controller */
        if (chan->dma_sconfig.device_fc)
                chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL;
@@ -703,10 +869,15 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
                chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
 
        for_each_sg(sgl, sg, sg_len, i) {
+               ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
+                                              sg_dma_len(sg));
+               if (ret < 0)
+                       goto err;
+
                desc->sg_req[i].len = sg_dma_len(sg);
 
                nb_data_items = desc->sg_req[i].len / buswidth;
-               if (nb_data_items > STM32_DMA_MAX_DATA_ITEMS) {
+               if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) {
                        dev_err(chan2dev(chan), "nb items not supported\n");
                        goto err;
                }
@@ -767,12 +938,12 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
                return NULL;
        }
 
-       ret = stm32_dma_set_xfer_param(chan, direction, &buswidth);
+       ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len);
        if (ret < 0)
                return NULL;
 
        nb_data_items = period_len / buswidth;
-       if (nb_data_items > STM32_DMA_MAX_DATA_ITEMS) {
+       if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) {
                dev_err(chan2dev(chan), "number of items not supported\n");
                return NULL;
        }
@@ -816,35 +987,45 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
        dma_addr_t src, size_t len, unsigned long flags)
 {
        struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
-       u32 num_sgs;
+       enum dma_slave_buswidth max_width;
        struct stm32_dma_desc *desc;
        size_t xfer_count, offset;
+       u32 num_sgs, best_burst, dma_burst, threshold;
        int i;
 
-       num_sgs = DIV_ROUND_UP(len, STM32_DMA_MAX_DATA_ITEMS);
+       num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
        desc = stm32_dma_alloc_desc(num_sgs);
        if (!desc)
                return NULL;
 
+       threshold = chan->threshold;
+
        for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) {
                xfer_count = min_t(size_t, len - offset,
-                                  STM32_DMA_MAX_DATA_ITEMS);
+                                  STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
 
-               desc->sg_req[i].len = xfer_count;
+               /* Compute best burst size */
+               max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+               best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST,
+                                                     threshold, max_width);
+               dma_burst = stm32_dma_get_burst(chan, best_burst);
 
                stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
                desc->sg_req[i].chan_reg.dma_scr =
                        STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) |
+                       STM32_DMA_SCR_PBURST(dma_burst) |
+                       STM32_DMA_SCR_MBURST(dma_burst) |
                        STM32_DMA_SCR_MINC |
                        STM32_DMA_SCR_PINC |
                        STM32_DMA_SCR_TCIE |
                        STM32_DMA_SCR_TEIE;
-               desc->sg_req[i].chan_reg.dma_sfcr = STM32_DMA_SFCR_DMDIS |
-                       STM32_DMA_SFCR_FTH(STM32_DMA_FIFO_THRESHOLD_FULL) |
-                       STM32_DMA_SFCR_FEIE;
+               desc->sg_req[i].chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
+               desc->sg_req[i].chan_reg.dma_sfcr |=
+                       STM32_DMA_SFCR_FTH(threshold);
                desc->sg_req[i].chan_reg.dma_spar = src + offset;
                desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset;
                desc->sg_req[i].chan_reg.dma_sndtr = xfer_count;
+               desc->sg_req[i].len = xfer_count;
        }
 
        desc->num_sgs = num_sgs;
@@ -869,6 +1050,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
                                     struct stm32_dma_desc *desc,
                                     u32 next_sg)
 {
+       u32 modulo, burst_size;
        u32 residue = 0;
        int i;
 
@@ -876,8 +1058,10 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
         * In cyclic mode, for the last period, residue = remaining bytes from
         * NDTR
         */
-       if (chan->desc->cyclic && next_sg == 0)
-               return stm32_dma_get_remaining_bytes(chan);
+       if (chan->desc->cyclic && next_sg == 0) {
+               residue = stm32_dma_get_remaining_bytes(chan);
+               goto end;
+       }
 
        /*
         * For all other periods in cyclic mode, and in sg mode,
@@ -888,6 +1072,15 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
                residue += desc->sg_req[i].len;
        residue += stm32_dma_get_remaining_bytes(chan);
 
+end:
+       if (!chan->mem_burst)
+               return residue;
+
+       burst_size = chan->mem_burst * chan->mem_width;
+       modulo = residue % burst_size;
+       if (modulo)
+               residue = residue - modulo + burst_size;
+
        return residue;
 }
 
@@ -902,7 +1095,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
        u32 residue = 0;
 
        status = dma_cookie_status(c, cookie, state);
-       if ((status == DMA_COMPLETE) || (!state))
+       if (status == DMA_COMPLETE || !state)
                return status;
 
        spin_lock_irqsave(&chan->vchan.lock, flags);
@@ -966,7 +1159,7 @@ static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)
 }
 
 static void stm32_dma_set_config(struct stm32_dma_chan *chan,
-                         struct stm32_dma_cfg *cfg)
+                                struct stm32_dma_cfg *cfg)
 {
        stm32_dma_clear_reg(&chan->chan_reg);
 
@@ -976,7 +1169,7 @@ static void stm32_dma_set_config(struct stm32_dma_chan *chan,
        /* Enable Interrupts  */
        chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE;
 
-       chan->chan_reg.dma_sfcr = cfg->threshold & STM32_DMA_SFCR_FTH_MASK;
+       chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features);
 }
 
 static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
@@ -996,10 +1189,10 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
        cfg.channel_id = dma_spec->args[0];
        cfg.request_line = dma_spec->args[1];
        cfg.stream_config = dma_spec->args[2];
-       cfg.threshold = dma_spec->args[3];
+       cfg.features = dma_spec->args[3];
 
-       if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) ||
-           (cfg.request_line >= STM32_DMA_MAX_REQUEST_ID)) {
+       if (cfg.channel_id >= STM32_DMA_MAX_CHANNELS ||
+           cfg.request_line >= STM32_DMA_MAX_REQUEST_ID) {
                dev_err(dev, "Bad channel and/or request id\n");
                return NULL;
        }
index f838764993eb366d8813052835e58c6a60f57aca..861be5cab1dff22b5ae2892eab265585a0c2de29 100644 (file)
@@ -470,7 +470,11 @@ typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
                                const struct dmaengine_result *result);
 
 struct dmaengine_unmap_data {
+#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
+       u16 map_cnt;
+#else
        u8 map_cnt;
+#endif
        u8 to_cnt;
        u8 from_cnt;
        u8 bidi_cnt;