dmaengine: stm32: New directory for STM32 DMA controllers drivers
authorAmelie Delaunay <amelie.delaunay@foss.st.com>
Fri, 31 May 2024 15:07:02 +0000 (17:07 +0200)
committerVinod Koul <vkoul@kernel.org>
Tue, 11 Jun 2024 18:25:33 +0000 (23:55 +0530)
Gather the STM32 DMA controllers under drivers/dma/stm32/

Signed-off-by: Amelie Delaunay <amelie.delaunay@foss.st.com>
Link: https://lore.kernel.org/r/20240531150712.2503554-3-amelie.delaunay@foss.st.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/stm32-dma.c [deleted file]
drivers/dma/stm32-dmamux.c [deleted file]
drivers/dma/stm32-mdma.c [deleted file]
drivers/dma/stm32/Kconfig [new file with mode: 0644]
drivers/dma/stm32/Makefile [new file with mode: 0644]
drivers/dma/stm32/stm32-dma.c [new file with mode: 0644]
drivers/dma/stm32/stm32-dmamux.c [new file with mode: 0644]
drivers/dma/stm32/stm32-mdma.c [new file with mode: 0644]

index 002a5ec806207ca2614615b37eb1e38303292728..32b4256ef87400c0b91bf23bd7769c110b70b374 100644 (file)
@@ -568,38 +568,6 @@ config ST_FDMA
          Say Y here if you have such a chipset.
          If unsure, say N.
 
-config STM32_DMA
-       bool "STMicroelectronics STM32 DMA support"
-       depends on ARCH_STM32 || COMPILE_TEST
-       select DMA_ENGINE
-       select DMA_VIRTUAL_CHANNELS
-       help
-         Enable support for the on-chip DMA controller on STMicroelectronics
-         STM32 MCUs.
-         If you have a board based on such a MCU and wish to use DMA say Y
-         here.
-
-config STM32_DMAMUX
-       bool "STMicroelectronics STM32 dma multiplexer support"
-       depends on STM32_DMA || COMPILE_TEST
-       help
-         Enable support for the on-chip DMA multiplexer on STMicroelectronics
-         STM32 MCUs.
-         If you have a board based on such a MCU and wish to use DMAMUX say Y
-         here.
-
-config STM32_MDMA
-       bool "STMicroelectronics STM32 master dma support"
-       depends on ARCH_STM32 || COMPILE_TEST
-       depends on OF
-       select DMA_ENGINE
-       select DMA_VIRTUAL_CHANNELS
-       help
-         Enable support for the on-chip MDMA controller on STMicroelectronics
-         STM32 platforms.
-         If you have a board based on STM32 SoC and wish to use the master DMA
-         say Y here.
-
 config SPRD_DMA
        tristate "Spreadtrum DMA support"
        depends on ARCH_SPRD || COMPILE_TEST
@@ -772,6 +740,8 @@ source "drivers/dma/fsl-dpaa2-qdma/Kconfig"
 
 source "drivers/dma/lgm/Kconfig"
 
+source "drivers/dma/stm32/Kconfig"
+
 # clients
 comment "DMA Clients"
        depends on DMA_ENGINE
index 802ca916f05f509f9e7645a8c6c8b5f43ae80325..374ea98faf43e259d23af5e5aaae7f16ecb61243 100644 (file)
@@ -70,9 +70,6 @@ obj-$(CONFIG_PXA_DMA) += pxa_dma.o
 obj-$(CONFIG_RENESAS_DMA) += sh/
 obj-$(CONFIG_SF_PDMA) += sf-pdma/
 obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
-obj-$(CONFIG_STM32_DMA) += stm32-dma.o
-obj-$(CONFIG_STM32_DMAMUX) += stm32-dmamux.o
-obj-$(CONFIG_STM32_MDMA) += stm32-mdma.o
 obj-$(CONFIG_SPRD_DMA) += sprd-dma.o
 obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
 obj-$(CONFIG_TEGRA186_GPC_DMA) += tegra186-gpc-dma.o
@@ -88,5 +85,6 @@ obj-$(CONFIG_INTEL_LDMA) += lgm/
 
 obj-y += mediatek/
 obj-y += qcom/
+obj-y += stm32/
 obj-y += ti/
 obj-y += xilinx/
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
deleted file mode 100644 (file)
index 90857d0..0000000
+++ /dev/null
@@ -1,1782 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Driver for STM32 DMA controller
- *
- * Inspired by dma-jz4740.c and tegra20-apb-dma.c
- *
- * Copyright (C) M'boumba Cedric Madianga 2015
- * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
- *         Pierre-Yves Mordret <pierre-yves.mordret@st.com>
- */
-
-#include <linux/bitfield.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/iopoll.h>
-#include <linux/jiffies.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_dma.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/reset.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-
-#include "virt-dma.h"
-
-#define STM32_DMA_LISR                 0x0000 /* DMA Low Int Status Reg */
-#define STM32_DMA_HISR                 0x0004 /* DMA High Int Status Reg */
-#define STM32_DMA_ISR(n)               (((n) & 4) ? STM32_DMA_HISR : STM32_DMA_LISR)
-#define STM32_DMA_LIFCR                        0x0008 /* DMA Low Int Flag Clear Reg */
-#define STM32_DMA_HIFCR                        0x000c /* DMA High Int Flag Clear Reg */
-#define STM32_DMA_IFCR(n)              (((n) & 4) ? STM32_DMA_HIFCR : STM32_DMA_LIFCR)
-#define STM32_DMA_TCI                  BIT(5) /* Transfer Complete Interrupt */
-#define STM32_DMA_HTI                  BIT(4) /* Half Transfer Interrupt */
-#define STM32_DMA_TEI                  BIT(3) /* Transfer Error Interrupt */
-#define STM32_DMA_DMEI                 BIT(2) /* Direct Mode Error Interrupt */
-#define STM32_DMA_FEI                  BIT(0) /* FIFO Error Interrupt */
-#define STM32_DMA_MASKI                        (STM32_DMA_TCI \
-                                        | STM32_DMA_TEI \
-                                        | STM32_DMA_DMEI \
-                                        | STM32_DMA_FEI)
-/*
- * If (chan->id % 4) is 2 or 3, left shift the mask by 16 bits;
- * if (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
- */
-#define STM32_DMA_FLAGS_SHIFT(n)       ({ typeof(n) (_n) = (n); \
-                                          (((_n) & 2) << 3) | (((_n) & 1) * 6); })
-
-/* DMA Stream x Configuration Register */
-#define STM32_DMA_SCR(x)               (0x0010 + 0x18 * (x)) /* x = 0..7 */
-#define STM32_DMA_SCR_REQ_MASK         GENMASK(27, 25)
-#define STM32_DMA_SCR_MBURST_MASK      GENMASK(24, 23)
-#define STM32_DMA_SCR_PBURST_MASK      GENMASK(22, 21)
-#define STM32_DMA_SCR_PL_MASK          GENMASK(17, 16)
-#define STM32_DMA_SCR_MSIZE_MASK       GENMASK(14, 13)
-#define STM32_DMA_SCR_PSIZE_MASK       GENMASK(12, 11)
-#define STM32_DMA_SCR_DIR_MASK         GENMASK(7, 6)
-#define STM32_DMA_SCR_TRBUFF           BIT(20) /* Bufferable transfer for USART/UART */
-#define STM32_DMA_SCR_CT               BIT(19) /* Target in double buffer */
-#define STM32_DMA_SCR_DBM              BIT(18) /* Double Buffer Mode */
-#define STM32_DMA_SCR_PINCOS           BIT(15) /* Peripheral inc offset size */
-#define STM32_DMA_SCR_MINC             BIT(10) /* Memory increment mode */
-#define STM32_DMA_SCR_PINC             BIT(9) /* Peripheral increment mode */
-#define STM32_DMA_SCR_CIRC             BIT(8) /* Circular mode */
-#define STM32_DMA_SCR_PFCTRL           BIT(5) /* Peripheral Flow Controller */
-#define STM32_DMA_SCR_TCIE             BIT(4) /* Transfer Complete Int Enable
-                                               */
-#define STM32_DMA_SCR_TEIE             BIT(2) /* Transfer Error Int Enable */
-#define STM32_DMA_SCR_DMEIE            BIT(1) /* Direct Mode Err Int Enable */
-#define STM32_DMA_SCR_EN               BIT(0) /* Stream Enable */
-#define STM32_DMA_SCR_CFG_MASK         (STM32_DMA_SCR_PINC \
-                                       | STM32_DMA_SCR_MINC \
-                                       | STM32_DMA_SCR_PINCOS \
-                                       | STM32_DMA_SCR_PL_MASK)
-#define STM32_DMA_SCR_IRQ_MASK         (STM32_DMA_SCR_TCIE \
-                                       | STM32_DMA_SCR_TEIE \
-                                       | STM32_DMA_SCR_DMEIE)
-
-/* DMA Stream x number of data register */
-#define STM32_DMA_SNDTR(x)             (0x0014 + 0x18 * (x))
-
-/* DMA stream peripheral address register */
-#define STM32_DMA_SPAR(x)              (0x0018 + 0x18 * (x))
-
-/* DMA stream x memory 0 address register */
-#define STM32_DMA_SM0AR(x)             (0x001c + 0x18 * (x))
-
-/* DMA stream x memory 1 address register */
-#define STM32_DMA_SM1AR(x)             (0x0020 + 0x18 * (x))
-
-/* DMA stream x FIFO control register */
-#define STM32_DMA_SFCR(x)              (0x0024 + 0x18 * (x))
-#define STM32_DMA_SFCR_FTH_MASK                GENMASK(1, 0)
-#define STM32_DMA_SFCR_FEIE            BIT(7) /* FIFO error interrupt enable */
-#define STM32_DMA_SFCR_DMDIS           BIT(2) /* Direct mode disable */
-#define STM32_DMA_SFCR_MASK            (STM32_DMA_SFCR_FEIE \
-                                       | STM32_DMA_SFCR_DMDIS)
-
-/* DMA direction */
-#define STM32_DMA_DEV_TO_MEM           0x00
-#define        STM32_DMA_MEM_TO_DEV            0x01
-#define        STM32_DMA_MEM_TO_MEM            0x02
-
-/* DMA priority level */
-#define STM32_DMA_PRIORITY_LOW         0x00
-#define STM32_DMA_PRIORITY_MEDIUM      0x01
-#define STM32_DMA_PRIORITY_HIGH                0x02
-#define STM32_DMA_PRIORITY_VERY_HIGH   0x03
-
-/* DMA FIFO threshold selection */
-#define STM32_DMA_FIFO_THRESHOLD_1QUARTERFULL          0x00
-#define STM32_DMA_FIFO_THRESHOLD_HALFFULL              0x01
-#define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL         0x02
-#define STM32_DMA_FIFO_THRESHOLD_FULL                  0x03
-#define STM32_DMA_FIFO_THRESHOLD_NONE                  0x04
-
-#define STM32_DMA_MAX_DATA_ITEMS       0xffff
-/*
- * Valid transfer starts from @0 to @0xFFFE leading to unaligned scatter
- * gather at boundary. Thus it's safer to round down this value on FIFO
- * size (16 Bytes)
- */
-#define STM32_DMA_ALIGNED_MAX_DATA_ITEMS       \
-       ALIGN_DOWN(STM32_DMA_MAX_DATA_ITEMS, 16)
-#define STM32_DMA_MAX_CHANNELS         0x08
-#define STM32_DMA_MAX_REQUEST_ID       0x08
-#define STM32_DMA_MAX_DATA_PARAM       0x03
-#define STM32_DMA_FIFO_SIZE            16      /* FIFO is 16 bytes */
-#define STM32_DMA_MIN_BURST            4
-#define STM32_DMA_MAX_BURST            16
-
-/* DMA Features */
-#define STM32_DMA_THRESHOLD_FTR_MASK   GENMASK(1, 0)
-#define STM32_DMA_DIRECT_MODE_MASK     BIT(2)
-#define STM32_DMA_ALT_ACK_MODE_MASK    BIT(4)
-#define STM32_DMA_MDMA_STREAM_ID_MASK  GENMASK(19, 16)
-
-enum stm32_dma_width {
-       STM32_DMA_BYTE,
-       STM32_DMA_HALF_WORD,
-       STM32_DMA_WORD,
-};
-
-enum stm32_dma_burst_size {
-       STM32_DMA_BURST_SINGLE,
-       STM32_DMA_BURST_INCR4,
-       STM32_DMA_BURST_INCR8,
-       STM32_DMA_BURST_INCR16,
-};
-
-/**
- * struct stm32_dma_cfg - STM32 DMA custom configuration
- * @channel_id: channel ID
- * @request_line: DMA request
- * @stream_config: 32bit mask specifying the DMA channel configuration
- * @features: 32bit mask specifying the DMA Feature list
- */
-struct stm32_dma_cfg {
-       u32 channel_id;
-       u32 request_line;
-       u32 stream_config;
-       u32 features;
-};
-
-struct stm32_dma_chan_reg {
-       u32 dma_lisr;
-       u32 dma_hisr;
-       u32 dma_lifcr;
-       u32 dma_hifcr;
-       u32 dma_scr;
-       u32 dma_sndtr;
-       u32 dma_spar;
-       u32 dma_sm0ar;
-       u32 dma_sm1ar;
-       u32 dma_sfcr;
-};
-
-struct stm32_dma_sg_req {
-       u32 len;
-       struct stm32_dma_chan_reg chan_reg;
-};
-
-struct stm32_dma_desc {
-       struct virt_dma_desc vdesc;
-       bool cyclic;
-       u32 num_sgs;
-       struct stm32_dma_sg_req sg_req[] __counted_by(num_sgs);
-};
-
-/**
- * struct stm32_dma_mdma_config - STM32 DMA MDMA configuration
- * @stream_id: DMA request to trigger STM32 MDMA transfer
- * @ifcr: DMA interrupt flag clear register address,
- *        used by STM32 MDMA to clear DMA Transfer Complete flag
- * @tcf: DMA Transfer Complete flag
- */
-struct stm32_dma_mdma_config {
-       u32 stream_id;
-       u32 ifcr;
-       u32 tcf;
-};
-
-struct stm32_dma_chan {
-       struct virt_dma_chan vchan;
-       bool config_init;
-       bool busy;
-       u32 id;
-       u32 irq;
-       struct stm32_dma_desc *desc;
-       u32 next_sg;
-       struct dma_slave_config dma_sconfig;
-       struct stm32_dma_chan_reg chan_reg;
-       u32 threshold;
-       u32 mem_burst;
-       u32 mem_width;
-       enum dma_status status;
-       bool trig_mdma;
-       struct stm32_dma_mdma_config mdma_config;
-};
-
-struct stm32_dma_device {
-       struct dma_device ddev;
-       void __iomem *base;
-       struct clk *clk;
-       bool mem2mem;
-       struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS];
-};
-
-static struct stm32_dma_device *stm32_dma_get_dev(struct stm32_dma_chan *chan)
-{
-       return container_of(chan->vchan.chan.device, struct stm32_dma_device,
-                           ddev);
-}
-
-static struct stm32_dma_chan *to_stm32_dma_chan(struct dma_chan *c)
-{
-       return container_of(c, struct stm32_dma_chan, vchan.chan);
-}
-
-static struct stm32_dma_desc *to_stm32_dma_desc(struct virt_dma_desc *vdesc)
-{
-       return container_of(vdesc, struct stm32_dma_desc, vdesc);
-}
-
-static struct device *chan2dev(struct stm32_dma_chan *chan)
-{
-       return &chan->vchan.chan.dev->device;
-}
-
-static u32 stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg)
-{
-       return readl_relaxed(dmadev->base + reg);
-}
-
-static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val)
-{
-       writel_relaxed(val, dmadev->base + reg);
-}
-
-static int stm32_dma_get_width(struct stm32_dma_chan *chan,
-                              enum dma_slave_buswidth width)
-{
-       switch (width) {
-       case DMA_SLAVE_BUSWIDTH_1_BYTE:
-               return STM32_DMA_BYTE;
-       case DMA_SLAVE_BUSWIDTH_2_BYTES:
-               return STM32_DMA_HALF_WORD;
-       case DMA_SLAVE_BUSWIDTH_4_BYTES:
-               return STM32_DMA_WORD;
-       default:
-               dev_err(chan2dev(chan), "Dma bus width not supported\n");
-               return -EINVAL;
-       }
-}
-
-static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
-                                                      dma_addr_t buf_addr,
-                                                      u32 threshold)
-{
-       enum dma_slave_buswidth max_width;
-
-       if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL)
-               max_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       else
-               max_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
-
-       while ((buf_len < max_width  || buf_len % max_width) &&
-              max_width > DMA_SLAVE_BUSWIDTH_1_BYTE)
-               max_width = max_width >> 1;
-
-       if (buf_addr & (max_width - 1))
-               max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
-
-       return max_width;
-}
-
-static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold,
-                                               enum dma_slave_buswidth width)
-{
-       u32 remaining;
-
-       if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE)
-               return false;
-
-       if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) {
-               if (burst != 0) {
-                       /*
-                        * If number of beats fit in several whole bursts
-                        * this configuration is allowed.
-                        */
-                       remaining = ((STM32_DMA_FIFO_SIZE / width) *
-                                    (threshold + 1) / 4) % burst;
-
-                       if (remaining == 0)
-                               return true;
-               } else {
-                       return true;
-               }
-       }
-
-       return false;
-}
-
-static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold)
-{
-       /* If FIFO direct mode, burst is not possible */
-       if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE)
-               return false;
-
-       /*
-        * Buffer or period length has to be aligned on FIFO depth.
-        * Otherwise bytes may be stuck within FIFO at buffer or period
-        * length.
-        */
-       return ((buf_len % ((threshold + 1) * 4)) == 0);
-}
-
-static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold,
-                                   enum dma_slave_buswidth width)
-{
-       u32 best_burst = max_burst;
-
-       if (best_burst == 1 || !stm32_dma_is_burst_possible(buf_len, threshold))
-               return 0;
-
-       while ((buf_len < best_burst * width && best_burst > 1) ||
-              !stm32_dma_fifo_threshold_is_allowed(best_burst, threshold,
-                                                   width)) {
-               if (best_burst > STM32_DMA_MIN_BURST)
-                       best_burst = best_burst >> 1;
-               else
-                       best_burst = 0;
-       }
-
-       return best_burst;
-}
-
-static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst)
-{
-       switch (maxburst) {
-       case 0:
-       case 1:
-               return STM32_DMA_BURST_SINGLE;
-       case 4:
-               return STM32_DMA_BURST_INCR4;
-       case 8:
-               return STM32_DMA_BURST_INCR8;
-       case 16:
-               return STM32_DMA_BURST_INCR16;
-       default:
-               dev_err(chan2dev(chan), "Dma burst size not supported\n");
-               return -EINVAL;
-       }
-}
-
-static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan,
-                                     u32 src_burst, u32 dst_burst)
-{
-       chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK;
-       chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE;
-
-       if (!src_burst && !dst_burst) {
-               /* Using direct mode */
-               chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE;
-       } else {
-               /* Using FIFO mode */
-               chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
-       }
-}
-
-static int stm32_dma_slave_config(struct dma_chan *c,
-                                 struct dma_slave_config *config)
-{
-       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
-
-       memcpy(&chan->dma_sconfig, config, sizeof(*config));
-
-       /* Check if user is requesting DMA to trigger STM32 MDMA */
-       if (config->peripheral_size) {
-               config->peripheral_config = &chan->mdma_config;
-               config->peripheral_size = sizeof(chan->mdma_config);
-               chan->trig_mdma = true;
-       }
-
-       chan->config_init = true;
-
-       return 0;
-}
-
-static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan)
-{
-       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
-       u32 flags, dma_isr;
-
-       /*
-        * Read "flags" from DMA_xISR register corresponding to the selected
-        * DMA channel at the correct bit offset inside that register.
-        */
-
-       dma_isr = stm32_dma_read(dmadev, STM32_DMA_ISR(chan->id));
-       flags = dma_isr >> STM32_DMA_FLAGS_SHIFT(chan->id);
-
-       return flags & STM32_DMA_MASKI;
-}
-
-static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
-{
-       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
-       u32 dma_ifcr;
-
-       /*
-        * Write "flags" to the DMA_xIFCR register corresponding to the selected
-        * DMA channel at the correct bit offset inside that register.
-        */
-       flags &= STM32_DMA_MASKI;
-       dma_ifcr = flags << STM32_DMA_FLAGS_SHIFT(chan->id);
-
-       stm32_dma_write(dmadev, STM32_DMA_IFCR(chan->id), dma_ifcr);
-}
-
-static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
-{
-       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
-       u32 dma_scr, id, reg;
-
-       id = chan->id;
-       reg = STM32_DMA_SCR(id);
-       dma_scr = stm32_dma_read(dmadev, reg);
-
-       if (dma_scr & STM32_DMA_SCR_EN) {
-               dma_scr &= ~STM32_DMA_SCR_EN;
-               stm32_dma_write(dmadev, reg, dma_scr);
-
-               return readl_relaxed_poll_timeout_atomic(dmadev->base + reg,
-                                       dma_scr, !(dma_scr & STM32_DMA_SCR_EN),
-                                       10, 1000000);
-       }
-
-       return 0;
-}
-
-static void stm32_dma_stop(struct stm32_dma_chan *chan)
-{
-       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
-       u32 dma_scr, dma_sfcr, status;
-       int ret;
-
-       /* Disable interrupts */
-       dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
-       dma_scr &= ~STM32_DMA_SCR_IRQ_MASK;
-       stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
-       dma_sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
-       dma_sfcr &= ~STM32_DMA_SFCR_FEIE;
-       stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), dma_sfcr);
-
-       /* Disable DMA */
-       ret = stm32_dma_disable_chan(chan);
-       if (ret < 0)
-               return;
-
-       /* Clear interrupt status if it is there */
-       status = stm32_dma_irq_status(chan);
-       if (status) {
-               dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n",
-                       __func__, status);
-               stm32_dma_irq_clear(chan, status);
-       }
-
-       chan->busy = false;
-       chan->status = DMA_COMPLETE;
-}
-
-static int stm32_dma_terminate_all(struct dma_chan *c)
-{
-       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
-       unsigned long flags;
-       LIST_HEAD(head);
-
-       spin_lock_irqsave(&chan->vchan.lock, flags);
-
-       if (chan->desc) {
-               dma_cookie_complete(&chan->desc->vdesc.tx);
-               vchan_terminate_vdesc(&chan->desc->vdesc);
-               if (chan->busy)
-                       stm32_dma_stop(chan);
-               chan->desc = NULL;
-       }
-
-       vchan_get_all_descriptors(&chan->vchan, &head);
-       spin_unlock_irqrestore(&chan->vchan.lock, flags);
-       vchan_dma_desc_free_list(&chan->vchan, &head);
-
-       return 0;
-}
-
-static void stm32_dma_synchronize(struct dma_chan *c)
-{
-       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
-
-       vchan_synchronize(&chan->vchan);
-}
-
-static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
-{
-       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
-       u32 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
-       u32 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
-       u32 spar = stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id));
-       u32 sm0ar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id));
-       u32 sm1ar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id));
-       u32 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
-
-       dev_dbg(chan2dev(chan), "SCR:   0x%08x\n", scr);
-       dev_dbg(chan2dev(chan), "NDTR:  0x%08x\n", ndtr);
-       dev_dbg(chan2dev(chan), "SPAR:  0x%08x\n", spar);
-       dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n", sm0ar);
-       dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n", sm1ar);
-       dev_dbg(chan2dev(chan), "SFCR:  0x%08x\n", sfcr);
-}
-
-static void stm32_dma_sg_inc(struct stm32_dma_chan *chan)
-{
-       chan->next_sg++;
-       if (chan->desc->cyclic && (chan->next_sg == chan->desc->num_sgs))
-               chan->next_sg = 0;
-}
-
-static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan);
-
-static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
-{
-       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
-       struct virt_dma_desc *vdesc;
-       struct stm32_dma_sg_req *sg_req;
-       struct stm32_dma_chan_reg *reg;
-       u32 status;
-       int ret;
-
-       ret = stm32_dma_disable_chan(chan);
-       if (ret < 0)
-               return;
-
-       if (!chan->desc) {
-               vdesc = vchan_next_desc(&chan->vchan);
-               if (!vdesc)
-                       return;
-
-               list_del(&vdesc->node);
-
-               chan->desc = to_stm32_dma_desc(vdesc);
-               chan->next_sg = 0;
-       }
-
-       if (chan->next_sg == chan->desc->num_sgs)
-               chan->next_sg = 0;
-
-       sg_req = &chan->desc->sg_req[chan->next_sg];
-       reg = &sg_req->chan_reg;
-
-       /* When DMA triggers STM32 MDMA, DMA Transfer Complete is managed by STM32 MDMA */
-       if (chan->trig_mdma && chan->dma_sconfig.direction != DMA_MEM_TO_DEV)
-               reg->dma_scr &= ~STM32_DMA_SCR_TCIE;
-
-       reg->dma_scr &= ~STM32_DMA_SCR_EN;
-       stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
-       stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar);
-       stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar);
-       stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), reg->dma_sfcr);
-       stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar);
-       stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr);
-
-       stm32_dma_sg_inc(chan);
-
-       /* Clear interrupt status if it is there */
-       status = stm32_dma_irq_status(chan);
-       if (status)
-               stm32_dma_irq_clear(chan, status);
-
-       if (chan->desc->cyclic)
-               stm32_dma_configure_next_sg(chan);
-
-       stm32_dma_dump_reg(chan);
-
-       /* Start DMA */
-       chan->busy = true;
-       chan->status = DMA_IN_PROGRESS;
-       reg->dma_scr |= STM32_DMA_SCR_EN;
-       stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
-
-       dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
-}
-
-static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
-{
-       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
-       struct stm32_dma_sg_req *sg_req;
-       u32 dma_scr, dma_sm0ar, dma_sm1ar, id;
-
-       id = chan->id;
-       dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
-
-       sg_req = &chan->desc->sg_req[chan->next_sg];
-
-       if (dma_scr & STM32_DMA_SCR_CT) {
-               dma_sm0ar = sg_req->chan_reg.dma_sm0ar;
-               stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar);
-               dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n",
-                       stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)));
-       } else {
-               dma_sm1ar = sg_req->chan_reg.dma_sm1ar;
-               stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar);
-               dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
-                       stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
-       }
-}
-
-static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan)
-{
-       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
-       u32 dma_scr;
-
-       /*
-        * Read and store current remaining data items and peripheral/memory addresses to be
-        * updated on resume
-        */
-       dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
-       /*
-        * Transfer can be paused while between a previous resume and reconfiguration on transfer
-        * complete. If transfer is cyclic and CIRC and DBM have been deactivated for resume, need
-        * to set it here in SCR backup to ensure a good reconfiguration on transfer complete.
-        */
-       if (chan->desc && chan->desc->cyclic) {
-               if (chan->desc->num_sgs == 1)
-                       dma_scr |= STM32_DMA_SCR_CIRC;
-               else
-                       dma_scr |= STM32_DMA_SCR_DBM;
-       }
-       chan->chan_reg.dma_scr = dma_scr;
-
-       /*
-        * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt, otherwise
-        * on resume NDTR autoreload value will be wrong (lower than the initial period length)
-        */
-       if (chan->desc && chan->desc->cyclic) {
-               dma_scr &= ~(STM32_DMA_SCR_DBM | STM32_DMA_SCR_CIRC);
-               stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
-       }
-
-       chan->chan_reg.dma_sndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
-
-       chan->status = DMA_PAUSED;
-
-       dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
-}
-
-static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
-{
-       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
-       struct stm32_dma_sg_req *sg_req;
-       u32 dma_scr, status, id;
-
-       id = chan->id;
-       dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
-
-       /* Clear interrupt status if it is there */
-       status = stm32_dma_irq_status(chan);
-       if (status)
-               stm32_dma_irq_clear(chan, status);
-
-       if (!chan->next_sg)
-               sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1];
-       else
-               sg_req = &chan->desc->sg_req[chan->next_sg - 1];
-
-       /* Reconfigure NDTR with the initial value */
-       stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), sg_req->chan_reg.dma_sndtr);
-
-       /* Restore SPAR */
-       stm32_dma_write(dmadev, STM32_DMA_SPAR(id), sg_req->chan_reg.dma_spar);
-
-       /* Restore SM0AR/SM1AR whatever DBM/CT as they may have been modified */
-       stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sg_req->chan_reg.dma_sm0ar);
-       stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sg_req->chan_reg.dma_sm1ar);
-
-       /* Reactivate CIRC/DBM if needed */
-       if (chan->chan_reg.dma_scr & STM32_DMA_SCR_DBM) {
-               dma_scr |= STM32_DMA_SCR_DBM;
-               /* Restore CT */
-               if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CT)
-                       dma_scr &= ~STM32_DMA_SCR_CT;
-               else
-                       dma_scr |= STM32_DMA_SCR_CT;
-       } else if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CIRC) {
-               dma_scr |= STM32_DMA_SCR_CIRC;
-       }
-       stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
-
-       stm32_dma_configure_next_sg(chan);
-
-       stm32_dma_dump_reg(chan);
-
-       dma_scr |= STM32_DMA_SCR_EN;
-       stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
-
-       dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n", &chan->vchan);
-}
-
-static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
-{
-       if (!chan->desc)
-               return;
-
-       if (chan->desc->cyclic) {
-               vchan_cyclic_callback(&chan->desc->vdesc);
-               if (chan->trig_mdma)
-                       return;
-               stm32_dma_sg_inc(chan);
-               /* cyclic while CIRC/DBM disable => post resume reconfiguration needed */
-               if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)))
-                       stm32_dma_post_resume_reconfigure(chan);
-               else if (scr & STM32_DMA_SCR_DBM)
-                       stm32_dma_configure_next_sg(chan);
-       } else {
-               chan->busy = false;
-               chan->status = DMA_COMPLETE;
-               if (chan->next_sg == chan->desc->num_sgs) {
-                       vchan_cookie_complete(&chan->desc->vdesc);
-                       chan->desc = NULL;
-               }
-               stm32_dma_start_transfer(chan);
-       }
-}
-
-static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
-{
-       struct stm32_dma_chan *chan = devid;
-       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
-       u32 status, scr, sfcr;
-
-       spin_lock(&chan->vchan.lock);
-
-       status = stm32_dma_irq_status(chan);
-       scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
-       sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
-
-       if (status & STM32_DMA_FEI) {
-               stm32_dma_irq_clear(chan, STM32_DMA_FEI);
-               status &= ~STM32_DMA_FEI;
-               if (sfcr & STM32_DMA_SFCR_FEIE) {
-                       if (!(scr & STM32_DMA_SCR_EN) &&
-                           !(status & STM32_DMA_TCI))
-                               dev_err(chan2dev(chan), "FIFO Error\n");
-                       else
-                               dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
-               }
-       }
-       if (status & STM32_DMA_DMEI) {
-               stm32_dma_irq_clear(chan, STM32_DMA_DMEI);
-               status &= ~STM32_DMA_DMEI;
-               if (sfcr & STM32_DMA_SCR_DMEIE)
-                       dev_dbg(chan2dev(chan), "Direct mode overrun\n");
-       }
-
-       if (status & STM32_DMA_TCI) {
-               stm32_dma_irq_clear(chan, STM32_DMA_TCI);
-               if (scr & STM32_DMA_SCR_TCIE) {
-                       if (chan->status != DMA_PAUSED)
-                               stm32_dma_handle_chan_done(chan, scr);
-               }
-               status &= ~STM32_DMA_TCI;
-       }
-
-       if (status & STM32_DMA_HTI) {
-               stm32_dma_irq_clear(chan, STM32_DMA_HTI);
-               status &= ~STM32_DMA_HTI;
-       }
-
-       if (status) {
-               stm32_dma_irq_clear(chan, status);
-               dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
-               if (!(scr & STM32_DMA_SCR_EN))
-                       dev_err(chan2dev(chan), "chan disabled by HW\n");
-       }
-
-       spin_unlock(&chan->vchan.lock);
-
-       return IRQ_HANDLED;
-}
-
-static void stm32_dma_issue_pending(struct dma_chan *c)
-{
-       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
-       unsigned long flags;
-
-       spin_lock_irqsave(&chan->vchan.lock, flags);
-       if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
-               dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
-               stm32_dma_start_transfer(chan);
-
-       }
-       spin_unlock_irqrestore(&chan->vchan.lock, flags);
-}
-
-static int stm32_dma_pause(struct dma_chan *c)
-{
-       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
-       unsigned long flags;
-       int ret;
-
-       if (chan->status != DMA_IN_PROGRESS)
-               return -EPERM;
-
-       spin_lock_irqsave(&chan->vchan.lock, flags);
-
-       ret = stm32_dma_disable_chan(chan);
-       if (!ret)
-               stm32_dma_handle_chan_paused(chan);
-
-       spin_unlock_irqrestore(&chan->vchan.lock, flags);
-
-       return ret;
-}
-
-static int stm32_dma_resume(struct dma_chan *c)
-{
-       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
-       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
-       struct stm32_dma_chan_reg chan_reg = chan->chan_reg;
-       u32 id = chan->id, scr, ndtr, offset, spar, sm0ar, sm1ar;
-       struct stm32_dma_sg_req *sg_req;
-       unsigned long flags;
-
-       if (chan->status != DMA_PAUSED)
-               return -EPERM;
-
-       scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
-       if (WARN_ON(scr & STM32_DMA_SCR_EN))
-               return -EPERM;
-
-       spin_lock_irqsave(&chan->vchan.lock, flags);
-
-       /* sg_reg[prev_sg] contains original ndtr, sm0ar and sm1ar before pausing the transfer */
-       if (!chan->next_sg)
-               sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1];
-       else
-               sg_req = &chan->desc->sg_req[chan->next_sg - 1];
-
-       ndtr = sg_req->chan_reg.dma_sndtr;
-       offset = (ndtr - chan_reg.dma_sndtr);
-       offset <<= FIELD_GET(STM32_DMA_SCR_PSIZE_MASK, chan_reg.dma_scr);
-       spar = sg_req->chan_reg.dma_spar;
-       sm0ar = sg_req->chan_reg.dma_sm0ar;
-       sm1ar = sg_req->chan_reg.dma_sm1ar;
-
-       /*
-        * The peripheral and/or memory addresses have to be updated in order to adjust the
-        * address pointers. Need to check increment.
-        */
-       if (chan_reg.dma_scr & STM32_DMA_SCR_PINC)
-               stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar + offset);
-       else
-               stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar);
-
-       if (!(chan_reg.dma_scr & STM32_DMA_SCR_MINC))
-               offset = 0;
-
-       /*
-        * In case of DBM, the current target could be SM1AR.
-        * Need to temporarily deactivate CIRC/DBM to finish the current transfer, so
-        * SM0AR becomes the current target and must be updated with SM1AR + offset if CT=1.
-        */
-       if ((chan_reg.dma_scr & STM32_DMA_SCR_DBM) && (chan_reg.dma_scr & STM32_DMA_SCR_CT))
-               stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sm1ar + offset);
-       else
-               stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sm0ar + offset);
-
-       /* NDTR must be restored otherwise internal HW counter won't be correctly reset */
-       stm32_dma_write(dmadev, STM32_DMA_SNDTR(id), chan_reg.dma_sndtr);
-
-       /*
-        * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt,
-        * otherwise NDTR autoreload value will be wrong (lower than the initial period length)
-        */
-       if (chan_reg.dma_scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM))
-               chan_reg.dma_scr &= ~(STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM);
-
-       if (chan_reg.dma_scr & STM32_DMA_SCR_DBM)
-               stm32_dma_configure_next_sg(chan);
-
-       stm32_dma_dump_reg(chan);
-
-       /* The stream may then be re-enabled to restart transfer from the point it was stopped */
-       chan->status = DMA_IN_PROGRESS;
-       chan_reg.dma_scr |= STM32_DMA_SCR_EN;
-       stm32_dma_write(dmadev, STM32_DMA_SCR(id), chan_reg.dma_scr);
-
-       spin_unlock_irqrestore(&chan->vchan.lock, flags);
-
-       dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
-
-       return 0;
-}
-
-static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
-                                   enum dma_transfer_direction direction,
-                                   enum dma_slave_buswidth *buswidth,
-                                   u32 buf_len, dma_addr_t buf_addr)
-{
-       enum dma_slave_buswidth src_addr_width, dst_addr_width;
-       int src_bus_width, dst_bus_width;
-       int src_burst_size, dst_burst_size;
-       u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
-       u32 dma_scr, fifoth;
-
-       src_addr_width = chan->dma_sconfig.src_addr_width;
-       dst_addr_width = chan->dma_sconfig.dst_addr_width;
-       src_maxburst = chan->dma_sconfig.src_maxburst;
-       dst_maxburst = chan->dma_sconfig.dst_maxburst;
-       fifoth = chan->threshold;
-
-       switch (direction) {
-       case DMA_MEM_TO_DEV:
-               /* Set device data size */
-               dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
-               if (dst_bus_width < 0)
-                       return dst_bus_width;
-
-               /* Set device burst size */
-               dst_best_burst = stm32_dma_get_best_burst(buf_len,
-                                                         dst_maxburst,
-                                                         fifoth,
-                                                         dst_addr_width);
-
-               dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
-               if (dst_burst_size < 0)
-                       return dst_burst_size;
-
-               /* Set memory data size */
-               src_addr_width = stm32_dma_get_max_width(buf_len, buf_addr,
-                                                        fifoth);
-               chan->mem_width = src_addr_width;
-               src_bus_width = stm32_dma_get_width(chan, src_addr_width);
-               if (src_bus_width < 0)
-                       return src_bus_width;
-
-               /*
-                * Set memory burst size - burst not possible if address is not aligned on
-                * the address boundary equal to the size of the transfer
-                */
-               if (buf_addr & (buf_len - 1))
-                       src_maxburst = 1;
-               else
-                       src_maxburst = STM32_DMA_MAX_BURST;
-               src_best_burst = stm32_dma_get_best_burst(buf_len,
-                                                         src_maxburst,
-                                                         fifoth,
-                                                         src_addr_width);
-               src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
-               if (src_burst_size < 0)
-                       return src_burst_size;
-
-               dma_scr = FIELD_PREP(STM32_DMA_SCR_DIR_MASK, STM32_DMA_MEM_TO_DEV) |
-                       FIELD_PREP(STM32_DMA_SCR_PSIZE_MASK, dst_bus_width) |
-                       FIELD_PREP(STM32_DMA_SCR_MSIZE_MASK, src_bus_width) |
-                       FIELD_PREP(STM32_DMA_SCR_PBURST_MASK, dst_burst_size) |
-                       FIELD_PREP(STM32_DMA_SCR_MBURST_MASK, src_burst_size);
-
-               /* Set FIFO threshold */
-               chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
-               if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
-                       chan->chan_reg.dma_sfcr |= FIELD_PREP(STM32_DMA_SFCR_FTH_MASK, fifoth);
-
-               /* Set peripheral address */
-               chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr;
-               *buswidth = dst_addr_width;
-               break;
-
-       case DMA_DEV_TO_MEM:
-               /* Set device data size */
-               src_bus_width = stm32_dma_get_width(chan, src_addr_width);
-               if (src_bus_width < 0)
-                       return src_bus_width;
-
-               /* Set device burst size */
-               src_best_burst = stm32_dma_get_best_burst(buf_len,
-                                                         src_maxburst,
-                                                         fifoth,
-                                                         src_addr_width);
-               chan->mem_burst = src_best_burst;
-               src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
-               if (src_burst_size < 0)
-                       return src_burst_size;
-
-               /* Set memory data size */
-               dst_addr_width = stm32_dma_get_max_width(buf_len, buf_addr,
-                                                        fifoth);
-               chan->mem_width = dst_addr_width;
-               dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
-               if (dst_bus_width < 0)
-                       return dst_bus_width;
-
-               /*
-                * Set memory burst size - burst not possible if address is not aligned on
-                * the address boundary equal to the size of the transfer
-                */
-               if (buf_addr & (buf_len - 1))
-                       dst_maxburst = 1;
-               else
-                       dst_maxburst = STM32_DMA_MAX_BURST;
-               dst_best_burst = stm32_dma_get_best_burst(buf_len,
-                                                         dst_maxburst,
-                                                         fifoth,
-                                                         dst_addr_width);
-               chan->mem_burst = dst_best_burst;
-               dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
-               if (dst_burst_size < 0)
-                       return dst_burst_size;
-
-               dma_scr = FIELD_PREP(STM32_DMA_SCR_DIR_MASK, STM32_DMA_DEV_TO_MEM) |
-                       FIELD_PREP(STM32_DMA_SCR_PSIZE_MASK, src_bus_width) |
-                       FIELD_PREP(STM32_DMA_SCR_MSIZE_MASK, dst_bus_width) |
-                       FIELD_PREP(STM32_DMA_SCR_PBURST_MASK, src_burst_size) |
-                       FIELD_PREP(STM32_DMA_SCR_MBURST_MASK, dst_burst_size);
-
-               /* Set FIFO threshold */
-               chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
-               if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
-                       chan->chan_reg.dma_sfcr |= FIELD_PREP(STM32_DMA_SFCR_FTH_MASK, fifoth);
-
-               /* Set peripheral address */
-               chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr;
-               *buswidth = chan->dma_sconfig.src_addr_width;
-               break;
-
-       default:
-               dev_err(chan2dev(chan), "Dma direction is not supported\n");
-               return -EINVAL;
-       }
-
-       stm32_dma_set_fifo_config(chan, src_best_burst, dst_best_burst);
-
-       /* Set DMA control register */
-       chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK |
-                       STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK |
-                       STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK);
-       chan->chan_reg.dma_scr |= dma_scr;
-
-       return 0;
-}
-
-static void stm32_dma_clear_reg(struct stm32_dma_chan_reg *regs)
-{
-       memset(regs, 0, sizeof(struct stm32_dma_chan_reg));
-}
-
-static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
-       struct dma_chan *c, struct scatterlist *sgl,
-       u32 sg_len, enum dma_transfer_direction direction,
-       unsigned long flags, void *context)
-{
-       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
-       struct stm32_dma_desc *desc;
-       struct scatterlist *sg;
-       enum dma_slave_buswidth buswidth;
-       u32 nb_data_items;
-       int i, ret;
-
-       if (!chan->config_init) {
-               dev_err(chan2dev(chan), "dma channel is not configured\n");
-               return NULL;
-       }
-
-       if (sg_len < 1) {
-               dev_err(chan2dev(chan), "Invalid segment length %d\n", sg_len);
-               return NULL;
-       }
-
-       desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT);
-       if (!desc)
-               return NULL;
-       desc->num_sgs = sg_len;
-
-       /* Set peripheral flow controller */
-       if (chan->dma_sconfig.device_fc)
-               chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL;
-       else
-               chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
-
-       /* Activate Double Buffer Mode if DMA triggers STM32 MDMA and more than 1 sg */
-       if (chan->trig_mdma && sg_len > 1) {
-               chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
-               chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT;
-       }
-
-       for_each_sg(sgl, sg, sg_len, i) {
-               ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
-                                              sg_dma_len(sg),
-                                              sg_dma_address(sg));
-               if (ret < 0)
-                       goto err;
-
-               desc->sg_req[i].len = sg_dma_len(sg);
-
-               nb_data_items = desc->sg_req[i].len / buswidth;
-               if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) {
-                       dev_err(chan2dev(chan), "nb items not supported\n");
-                       goto err;
-               }
-
-               stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
-               desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
-               desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
-               desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
-               desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg);
-               desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg);
-               if (chan->trig_mdma)
-                       desc->sg_req[i].chan_reg.dma_sm1ar += sg_dma_len(sg);
-               desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
-       }
-       desc->cyclic = false;
-
-       return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
-
-err:
-       kfree(desc);
-       return NULL;
-}
-
-static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
-       struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
-       size_t period_len, enum dma_transfer_direction direction,
-       unsigned long flags)
-{
-       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
-       struct stm32_dma_desc *desc;
-       enum dma_slave_buswidth buswidth;
-       u32 num_periods, nb_data_items;
-       int i, ret;
-
-       if (!buf_len || !period_len) {
-               dev_err(chan2dev(chan), "Invalid buffer/period len\n");
-               return NULL;
-       }
-
-       if (!chan->config_init) {
-               dev_err(chan2dev(chan), "dma channel is not configured\n");
-               return NULL;
-       }
-
-       if (buf_len % period_len) {
-               dev_err(chan2dev(chan), "buf_len not multiple of period_len\n");
-               return NULL;
-       }
-
-       /*
-        * We allow to take more number of requests till DMA is
-        * not started. The driver will loop over all requests.
-        * Once DMA is started then new requests can be queued only after
-        * terminating the DMA.
-        */
-       if (chan->busy) {
-               dev_err(chan2dev(chan), "Request not allowed when dma busy\n");
-               return NULL;
-       }
-
-       ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len,
-                                      buf_addr);
-       if (ret < 0)
-               return NULL;
-
-       nb_data_items = period_len / buswidth;
-       if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) {
-               dev_err(chan2dev(chan), "number of items not supported\n");
-               return NULL;
-       }
-
-       /*  Enable Circular mode or double buffer mode */
-       if (buf_len == period_len) {
-               chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC;
-       } else {
-               chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
-               chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT;
-       }
-
-       /* Clear periph ctrl if client set it */
-       chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
-
-       num_periods = buf_len / period_len;
-
-       desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT);
-       if (!desc)
-               return NULL;
-       desc->num_sgs = num_periods;
-
-       for (i = 0; i < num_periods; i++) {
-               desc->sg_req[i].len = period_len;
-
-               stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
-               desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
-               desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
-               desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
-               desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr;
-               desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr;
-               if (chan->trig_mdma)
-                       desc->sg_req[i].chan_reg.dma_sm1ar += period_len;
-               desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
-               if (!chan->trig_mdma)
-                       buf_addr += period_len;
-       }
-       desc->cyclic = true;
-
-       return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
-}
-
-static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
-       struct dma_chan *c, dma_addr_t dest,
-       dma_addr_t src, size_t len, unsigned long flags)
-{
-       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
-       enum dma_slave_buswidth max_width;
-       struct stm32_dma_desc *desc;
-       size_t xfer_count, offset;
-       u32 num_sgs, best_burst, threshold;
-       int dma_burst, i;
-
-       num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
-       desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
-       if (!desc)
-               return NULL;
-       desc->num_sgs = num_sgs;
-
-       threshold = chan->threshold;
-
-       for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) {
-               xfer_count = min_t(size_t, len - offset,
-                                  STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
-
-               /* Compute best burst size */
-               max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
-               best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST,
-                                                     threshold, max_width);
-               dma_burst = stm32_dma_get_burst(chan, best_burst);
-               if (dma_burst < 0) {
-                       kfree(desc);
-                       return NULL;
-               }
-
-               stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
-               desc->sg_req[i].chan_reg.dma_scr =
-                       FIELD_PREP(STM32_DMA_SCR_DIR_MASK, STM32_DMA_MEM_TO_MEM) |
-                       FIELD_PREP(STM32_DMA_SCR_PBURST_MASK, dma_burst) |
-                       FIELD_PREP(STM32_DMA_SCR_MBURST_MASK, dma_burst) |
-                       STM32_DMA_SCR_MINC |
-                       STM32_DMA_SCR_PINC |
-                       STM32_DMA_SCR_TCIE |
-                       STM32_DMA_SCR_TEIE;
-               desc->sg_req[i].chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
-               desc->sg_req[i].chan_reg.dma_sfcr |= FIELD_PREP(STM32_DMA_SFCR_FTH_MASK, threshold);
-               desc->sg_req[i].chan_reg.dma_spar = src + offset;
-               desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset;
-               desc->sg_req[i].chan_reg.dma_sndtr = xfer_count;
-               desc->sg_req[i].len = xfer_count;
-       }
-       desc->cyclic = false;
-
-       return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
-}
-
-static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan)
-{
-       u32 dma_scr, width, ndtr;
-       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
-
-       dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
-       width = FIELD_GET(STM32_DMA_SCR_PSIZE_MASK, dma_scr);
-       ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
-
-       return ndtr << width;
-}
-
-/**
- * stm32_dma_is_current_sg - check that expected sg_req is currently transferred
- * @chan: dma channel
- *
- * This function called when IRQ are disable, checks that the hardware has not
- * switched on the next transfer in double buffer mode. The test is done by
- * comparing the next_sg memory address with the hardware related register
- * (based on CT bit value).
- *
- * Returns true if expected current transfer is still running or double
- * buffer mode is not activated.
- */
-static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan)
-{
-       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
-       struct stm32_dma_sg_req *sg_req;
-       u32 dma_scr, dma_smar, id, period_len;
-
-       id = chan->id;
-       dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
-
-       /* In cyclic CIRC but not DBM, CT is not used */
-       if (!(dma_scr & STM32_DMA_SCR_DBM))
-               return true;
-
-       sg_req = &chan->desc->sg_req[chan->next_sg];
-       period_len = sg_req->len;
-
-       /* DBM - take care of a previous pause/resume not yet post reconfigured */
-       if (dma_scr & STM32_DMA_SCR_CT) {
-               dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id));
-               /*
-                * If transfer has been pause/resumed,
-                * SM0AR is in the range of [SM0AR:SM0AR+period_len]
-                */
-               return (dma_smar >= sg_req->chan_reg.dma_sm0ar &&
-                       dma_smar < sg_req->chan_reg.dma_sm0ar + period_len);
-       }
-
-       dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id));
-       /*
-        * If transfer has been pause/resumed,
-        * SM1AR is in the range of [SM1AR:SM1AR+period_len]
-        */
-       return (dma_smar >= sg_req->chan_reg.dma_sm1ar &&
-               dma_smar < sg_req->chan_reg.dma_sm1ar + period_len);
-}
-
-static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
-                                    struct stm32_dma_desc *desc,
-                                    u32 next_sg)
-{
-       u32 modulo, burst_size;
-       u32 residue;
-       u32 n_sg = next_sg;
-       struct stm32_dma_sg_req *sg_req = &chan->desc->sg_req[chan->next_sg];
-       int i;
-
-       /*
-        * Calculate the residue means compute the descriptors
-        * information:
-        * - the sg_req currently transferred
-        * - the Hardware remaining position in this sg (NDTR bits field).
-        *
-        * A race condition may occur if DMA is running in cyclic or double
-        * buffer mode, since the DMA register are automatically reloaded at end
-        * of period transfer. The hardware may have switched to the next
-        * transfer (CT bit updated) just before the position (SxNDTR reg) is
-        * read.
-        * In this case the SxNDTR reg could (or not) correspond to the new
-        * transfer position, and not the expected one.
-        * The strategy implemented in the stm32 driver is to:
-        *  - read the SxNDTR register
-        *  - crosscheck that hardware is still in current transfer.
-        * In case of switch, we can assume that the DMA is at the beginning of
-        * the next transfer. So we approximate the residue in consequence, by
-        * pointing on the beginning of next transfer.
-        *
-        * This race condition doesn't apply for none cyclic mode, as double
-        * buffer is not used. In such situation registers are updated by the
-        * software.
-        */
-
-       residue = stm32_dma_get_remaining_bytes(chan);
-
-       if ((chan->desc->cyclic || chan->trig_mdma) && !stm32_dma_is_current_sg(chan)) {
-               n_sg++;
-               if (n_sg == chan->desc->num_sgs)
-                       n_sg = 0;
-               if (!chan->trig_mdma)
-                       residue = sg_req->len;
-       }
-
-       /*
-        * In cyclic mode, for the last period, residue = remaining bytes
-        * from NDTR,
-        * else for all other periods in cyclic mode, and in sg mode,
-        * residue = remaining bytes from NDTR + remaining
-        * periods/sg to be transferred
-        */
-       if ((!chan->desc->cyclic && !chan->trig_mdma) || n_sg != 0)
-               for (i = n_sg; i < desc->num_sgs; i++)
-                       residue += desc->sg_req[i].len;
-
-       if (!chan->mem_burst)
-               return residue;
-
-       burst_size = chan->mem_burst * chan->mem_width;
-       modulo = residue % burst_size;
-       if (modulo)
-               residue = residue - modulo + burst_size;
-
-       return residue;
-}
-
-static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
-                                          dma_cookie_t cookie,
-                                          struct dma_tx_state *state)
-{
-       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
-       struct virt_dma_desc *vdesc;
-       enum dma_status status;
-       unsigned long flags;
-       u32 residue = 0;
-
-       status = dma_cookie_status(c, cookie, state);
-       if (status == DMA_COMPLETE)
-               return status;
-
-       status = chan->status;
-
-       if (!state)
-               return status;
-
-       spin_lock_irqsave(&chan->vchan.lock, flags);
-       vdesc = vchan_find_desc(&chan->vchan, cookie);
-       if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
-               residue = stm32_dma_desc_residue(chan, chan->desc,
-                                                chan->next_sg);
-       else if (vdesc)
-               residue = stm32_dma_desc_residue(chan,
-                                                to_stm32_dma_desc(vdesc), 0);
-       dma_set_residue(state, residue);
-
-       spin_unlock_irqrestore(&chan->vchan.lock, flags);
-
-       return status;
-}
-
-static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
-{
-       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
-       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
-       int ret;
-
-       chan->config_init = false;
-
-       ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
-       if (ret < 0)
-               return ret;
-
-       ret = stm32_dma_disable_chan(chan);
-       if (ret < 0)
-               pm_runtime_put(dmadev->ddev.dev);
-
-       return ret;
-}
-
-static void stm32_dma_free_chan_resources(struct dma_chan *c)
-{
-       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
-       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
-       unsigned long flags;
-
-       dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id);
-
-       if (chan->busy) {
-               spin_lock_irqsave(&chan->vchan.lock, flags);
-               stm32_dma_stop(chan);
-               chan->desc = NULL;
-               spin_unlock_irqrestore(&chan->vchan.lock, flags);
-       }
-
-       pm_runtime_put(dmadev->ddev.dev);
-
-       vchan_free_chan_resources(to_virt_chan(c));
-       stm32_dma_clear_reg(&chan->chan_reg);
-       chan->threshold = 0;
-}
-
-static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)
-{
-       kfree(container_of(vdesc, struct stm32_dma_desc, vdesc));
-}
-
-static void stm32_dma_set_config(struct stm32_dma_chan *chan,
-                                struct stm32_dma_cfg *cfg)
-{
-       stm32_dma_clear_reg(&chan->chan_reg);
-
-       chan->chan_reg.dma_scr = cfg->stream_config & STM32_DMA_SCR_CFG_MASK;
-       chan->chan_reg.dma_scr |= FIELD_PREP(STM32_DMA_SCR_REQ_MASK, cfg->request_line);
-
-       /* Enable Interrupts  */
-       chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE;
-
-       chan->threshold = FIELD_GET(STM32_DMA_THRESHOLD_FTR_MASK, cfg->features);
-       if (FIELD_GET(STM32_DMA_DIRECT_MODE_MASK, cfg->features))
-               chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE;
-       if (FIELD_GET(STM32_DMA_ALT_ACK_MODE_MASK, cfg->features))
-               chan->chan_reg.dma_scr |= STM32_DMA_SCR_TRBUFF;
-       chan->mdma_config.stream_id = FIELD_GET(STM32_DMA_MDMA_STREAM_ID_MASK, cfg->features);
-}
-
-static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
-                                          struct of_dma *ofdma)
-{
-       struct stm32_dma_device *dmadev = ofdma->of_dma_data;
-       struct device *dev = dmadev->ddev.dev;
-       struct stm32_dma_cfg cfg;
-       struct stm32_dma_chan *chan;
-       struct dma_chan *c;
-
-       if (dma_spec->args_count < 4) {
-               dev_err(dev, "Bad number of cells\n");
-               return NULL;
-       }
-
-       cfg.channel_id = dma_spec->args[0];
-       cfg.request_line = dma_spec->args[1];
-       cfg.stream_config = dma_spec->args[2];
-       cfg.features = dma_spec->args[3];
-
-       if (cfg.channel_id >= STM32_DMA_MAX_CHANNELS ||
-           cfg.request_line >= STM32_DMA_MAX_REQUEST_ID) {
-               dev_err(dev, "Bad channel and/or request id\n");
-               return NULL;
-       }
-
-       chan = &dmadev->chan[cfg.channel_id];
-
-       c = dma_get_slave_channel(&chan->vchan.chan);
-       if (!c) {
-               dev_err(dev, "No more channels available\n");
-               return NULL;
-       }
-
-       stm32_dma_set_config(chan, &cfg);
-
-       return c;
-}
-
-static const struct of_device_id stm32_dma_of_match[] = {
-       { .compatible = "st,stm32-dma", },
-       { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, stm32_dma_of_match);
-
-static int stm32_dma_probe(struct platform_device *pdev)
-{
-       struct stm32_dma_chan *chan;
-       struct stm32_dma_device *dmadev;
-       struct dma_device *dd;
-       struct resource *res;
-       struct reset_control *rst;
-       int i, ret;
-
-       dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
-       if (!dmadev)
-               return -ENOMEM;
-
-       dd = &dmadev->ddev;
-
-       dmadev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
-       if (IS_ERR(dmadev->base))
-               return PTR_ERR(dmadev->base);
-
-       dmadev->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(dmadev->clk))
-               return dev_err_probe(&pdev->dev, PTR_ERR(dmadev->clk), "Can't get clock\n");
-
-       ret = clk_prepare_enable(dmadev->clk);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
-               return ret;
-       }
-
-       dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
-                                               "st,mem2mem");
-
-       rst = devm_reset_control_get(&pdev->dev, NULL);
-       if (IS_ERR(rst)) {
-               ret = PTR_ERR(rst);
-               if (ret == -EPROBE_DEFER)
-                       goto clk_free;
-       } else {
-               reset_control_assert(rst);
-               udelay(2);
-               reset_control_deassert(rst);
-       }
-
-       dma_set_max_seg_size(&pdev->dev, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
-
-       dma_cap_set(DMA_SLAVE, dd->cap_mask);
-       dma_cap_set(DMA_PRIVATE, dd->cap_mask);
-       dma_cap_set(DMA_CYCLIC, dd->cap_mask);
-       dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources;
-       dd->device_free_chan_resources = stm32_dma_free_chan_resources;
-       dd->device_tx_status = stm32_dma_tx_status;
-       dd->device_issue_pending = stm32_dma_issue_pending;
-       dd->device_prep_slave_sg = stm32_dma_prep_slave_sg;
-       dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
-       dd->device_config = stm32_dma_slave_config;
-       dd->device_pause = stm32_dma_pause;
-       dd->device_resume = stm32_dma_resume;
-       dd->device_terminate_all = stm32_dma_terminate_all;
-       dd->device_synchronize = stm32_dma_synchronize;
-       dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
-               BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
-               BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
-       dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
-               BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
-               BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
-       dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-       dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
-       dd->copy_align = DMAENGINE_ALIGN_32_BYTES;
-       dd->max_burst = STM32_DMA_MAX_BURST;
-       dd->max_sg_burst = STM32_DMA_ALIGNED_MAX_DATA_ITEMS;
-       dd->descriptor_reuse = true;
-       dd->dev = &pdev->dev;
-       INIT_LIST_HEAD(&dd->channels);
-
-       if (dmadev->mem2mem) {
-               dma_cap_set(DMA_MEMCPY, dd->cap_mask);
-               dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy;
-               dd->directions |= BIT(DMA_MEM_TO_MEM);
-       }
-
-       for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
-               chan = &dmadev->chan[i];
-               chan->id = i;
-               chan->vchan.desc_free = stm32_dma_desc_free;
-               vchan_init(&chan->vchan, dd);
-
-               chan->mdma_config.ifcr = res->start;
-               chan->mdma_config.ifcr += STM32_DMA_IFCR(chan->id);
-
-               chan->mdma_config.tcf = STM32_DMA_TCI;
-               chan->mdma_config.tcf <<= STM32_DMA_FLAGS_SHIFT(chan->id);
-       }
-
-       ret = dma_async_device_register(dd);
-       if (ret)
-               goto clk_free;
-
-       for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
-               chan = &dmadev->chan[i];
-               ret = platform_get_irq(pdev, i);
-               if (ret < 0)
-                       goto err_unregister;
-               chan->irq = ret;
-
-               ret = devm_request_irq(&pdev->dev, chan->irq,
-                                      stm32_dma_chan_irq, 0,
-                                      dev_name(chan2dev(chan)), chan);
-               if (ret) {
-                       dev_err(&pdev->dev,
-                               "request_irq failed with err %d channel %d\n",
-                               ret, i);
-                       goto err_unregister;
-               }
-       }
-
-       ret = of_dma_controller_register(pdev->dev.of_node,
-                                        stm32_dma_of_xlate, dmadev);
-       if (ret < 0) {
-               dev_err(&pdev->dev,
-                       "STM32 DMA DMA OF registration failed %d\n", ret);
-               goto err_unregister;
-       }
-
-       platform_set_drvdata(pdev, dmadev);
-
-       pm_runtime_set_active(&pdev->dev);
-       pm_runtime_enable(&pdev->dev);
-       pm_runtime_get_noresume(&pdev->dev);
-       pm_runtime_put(&pdev->dev);
-
-       dev_info(&pdev->dev, "STM32 DMA driver registered\n");
-
-       return 0;
-
-err_unregister:
-       dma_async_device_unregister(dd);
-clk_free:
-       clk_disable_unprepare(dmadev->clk);
-
-       return ret;
-}
-
-#ifdef CONFIG_PM
-static int stm32_dma_runtime_suspend(struct device *dev)
-{
-       struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
-
-       clk_disable_unprepare(dmadev->clk);
-
-       return 0;
-}
-
-static int stm32_dma_runtime_resume(struct device *dev)
-{
-       struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
-       int ret;
-
-       ret = clk_prepare_enable(dmadev->clk);
-       if (ret) {
-               dev_err(dev, "failed to prepare_enable clock\n");
-               return ret;
-       }
-
-       return 0;
-}
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-static int stm32_dma_pm_suspend(struct device *dev)
-{
-       struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
-       int id, ret, scr;
-
-       ret = pm_runtime_resume_and_get(dev);
-       if (ret < 0)
-               return ret;
-
-       for (id = 0; id < STM32_DMA_MAX_CHANNELS; id++) {
-               scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
-               if (scr & STM32_DMA_SCR_EN) {
-                       dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
-                       return -EBUSY;
-               }
-       }
-
-       pm_runtime_put_sync(dev);
-
-       pm_runtime_force_suspend(dev);
-
-       return 0;
-}
-
-static int stm32_dma_pm_resume(struct device *dev)
-{
-       return pm_runtime_force_resume(dev);
-}
-#endif
-
-static const struct dev_pm_ops stm32_dma_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_pm_suspend, stm32_dma_pm_resume)
-       SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend,
-                          stm32_dma_runtime_resume, NULL)
-};
-
-static struct platform_driver stm32_dma_driver = {
-       .driver = {
-               .name = "stm32-dma",
-               .of_match_table = stm32_dma_of_match,
-               .pm = &stm32_dma_pm_ops,
-       },
-       .probe = stm32_dma_probe,
-};
-
-static int __init stm32_dma_init(void)
-{
-       return platform_driver_register(&stm32_dma_driver);
-}
-subsys_initcall(stm32_dma_init);
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
deleted file mode 100644 (file)
index 8d77e2a..0000000
+++ /dev/null
@@ -1,402 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- *
- * Copyright (C) STMicroelectronics SA 2017
- * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com>
- *            Pierre-Yves Mordret <pierre-yves.mordret@st.com>
- *
- * DMA Router driver for STM32 DMA MUX
- *
- * Based on TI DMA Crossbar driver
- */
-
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_dma.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/reset.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-
-#define STM32_DMAMUX_CCR(x)            (0x4 * (x))
-#define STM32_DMAMUX_MAX_DMA_REQUESTS  32
-#define STM32_DMAMUX_MAX_REQUESTS      255
-
-struct stm32_dmamux {
-       u32 master;
-       u32 request;
-       u32 chan_id;
-};
-
-struct stm32_dmamux_data {
-       struct dma_router dmarouter;
-       struct clk *clk;
-       void __iomem *iomem;
-       u32 dma_requests; /* Number of DMA requests connected to DMAMUX */
-       u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */
-       spinlock_t lock; /* Protects register access */
-       DECLARE_BITMAP(dma_inuse, STM32_DMAMUX_MAX_DMA_REQUESTS); /* Used DMA channel */
-       u32 ccr[STM32_DMAMUX_MAX_DMA_REQUESTS]; /* Used to backup CCR register
-                                                * in suspend
-                                                */
-       u32 dma_reqs[]; /* Number of DMA Request per DMA masters.
-                        *  [0] holds number of DMA Masters.
-                        *  To be kept at very end of this structure
-                        */
-};
-
-static inline u32 stm32_dmamux_read(void __iomem *iomem, u32 reg)
-{
-       return readl_relaxed(iomem + reg);
-}
-
-static inline void stm32_dmamux_write(void __iomem *iomem, u32 reg, u32 val)
-{
-       writel_relaxed(val, iomem + reg);
-}
-
-static void stm32_dmamux_free(struct device *dev, void *route_data)
-{
-       struct stm32_dmamux_data *dmamux = dev_get_drvdata(dev);
-       struct stm32_dmamux *mux = route_data;
-       unsigned long flags;
-
-       /* Clear dma request */
-       spin_lock_irqsave(&dmamux->lock, flags);
-
-       stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0);
-       clear_bit(mux->chan_id, dmamux->dma_inuse);
-
-       pm_runtime_put_sync(dev);
-
-       spin_unlock_irqrestore(&dmamux->lock, flags);
-
-       dev_dbg(dev, "Unmapping DMAMUX(%u) to DMA%u(%u)\n",
-               mux->request, mux->master, mux->chan_id);
-
-       kfree(mux);
-}
-
-static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
-                                        struct of_dma *ofdma)
-{
-       struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
-       struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev);
-       struct stm32_dmamux *mux;
-       u32 i, min, max;
-       int ret;
-       unsigned long flags;
-
-       if (dma_spec->args_count != 3) {
-               dev_err(&pdev->dev, "invalid number of dma mux args\n");
-               return ERR_PTR(-EINVAL);
-       }
-
-       if (dma_spec->args[0] > dmamux->dmamux_requests) {
-               dev_err(&pdev->dev, "invalid mux request number: %d\n",
-                       dma_spec->args[0]);
-               return ERR_PTR(-EINVAL);
-       }
-
-       mux = kzalloc(sizeof(*mux), GFP_KERNEL);
-       if (!mux)
-               return ERR_PTR(-ENOMEM);
-
-       spin_lock_irqsave(&dmamux->lock, flags);
-       mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
-                                          dmamux->dma_requests);
-
-       if (mux->chan_id == dmamux->dma_requests) {
-               spin_unlock_irqrestore(&dmamux->lock, flags);
-               dev_err(&pdev->dev, "Run out of free DMA requests\n");
-               ret = -ENOMEM;
-               goto error_chan_id;
-       }
-       set_bit(mux->chan_id, dmamux->dma_inuse);
-       spin_unlock_irqrestore(&dmamux->lock, flags);
-
-       /* Look for DMA Master */
-       for (i = 1, min = 0, max = dmamux->dma_reqs[i];
-            i <= dmamux->dma_reqs[0];
-            min += dmamux->dma_reqs[i], max += dmamux->dma_reqs[++i])
-               if (mux->chan_id < max)
-                       break;
-       mux->master = i - 1;
-
-       /* The of_node_put() will be done in of_dma_router_xlate function */
-       dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1);
-       if (!dma_spec->np) {
-               dev_err(&pdev->dev, "can't get dma master\n");
-               ret = -EINVAL;
-               goto error;
-       }
-
-       /* Set dma request */
-       spin_lock_irqsave(&dmamux->lock, flags);
-       ret = pm_runtime_resume_and_get(&pdev->dev);
-       if (ret < 0) {
-               spin_unlock_irqrestore(&dmamux->lock, flags);
-               goto error;
-       }
-       spin_unlock_irqrestore(&dmamux->lock, flags);
-
-       mux->request = dma_spec->args[0];
-
-       /*  craft DMA spec */
-       dma_spec->args[3] = dma_spec->args[2] | mux->chan_id << 16;
-       dma_spec->args[2] = dma_spec->args[1];
-       dma_spec->args[1] = 0;
-       dma_spec->args[0] = mux->chan_id - min;
-       dma_spec->args_count = 4;
-
-       stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id),
-                          mux->request);
-       dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n",
-               mux->request, mux->master, mux->chan_id);
-
-       return mux;
-
-error:
-       clear_bit(mux->chan_id, dmamux->dma_inuse);
-
-error_chan_id:
-       kfree(mux);
-       return ERR_PTR(ret);
-}
-
-static const struct of_device_id stm32_stm32dma_master_match[] __maybe_unused = {
-       { .compatible = "st,stm32-dma", },
-       {},
-};
-
-static int stm32_dmamux_probe(struct platform_device *pdev)
-{
-       struct device_node *node = pdev->dev.of_node;
-       const struct of_device_id *match;
-       struct device_node *dma_node;
-       struct stm32_dmamux_data *stm32_dmamux;
-       void __iomem *iomem;
-       struct reset_control *rst;
-       int i, count, ret;
-       u32 dma_req;
-
-       if (!node)
-               return -ENODEV;
-
-       count = device_property_count_u32(&pdev->dev, "dma-masters");
-       if (count < 0) {
-               dev_err(&pdev->dev, "Can't get DMA master(s) node\n");
-               return -ENODEV;
-       }
-
-       stm32_dmamux = devm_kzalloc(&pdev->dev, sizeof(*stm32_dmamux) +
-                                   sizeof(u32) * (count + 1), GFP_KERNEL);
-       if (!stm32_dmamux)
-               return -ENOMEM;
-
-       dma_req = 0;
-       for (i = 1; i <= count; i++) {
-               dma_node = of_parse_phandle(node, "dma-masters", i - 1);
-
-               match = of_match_node(stm32_stm32dma_master_match, dma_node);
-               if (!match) {
-                       dev_err(&pdev->dev, "DMA master is not supported\n");
-                       of_node_put(dma_node);
-                       return -EINVAL;
-               }
-
-               if (of_property_read_u32(dma_node, "dma-requests",
-                                        &stm32_dmamux->dma_reqs[i])) {
-                       dev_info(&pdev->dev,
-                                "Missing MUX output information, using %u.\n",
-                                STM32_DMAMUX_MAX_DMA_REQUESTS);
-                       stm32_dmamux->dma_reqs[i] =
-                               STM32_DMAMUX_MAX_DMA_REQUESTS;
-               }
-               dma_req += stm32_dmamux->dma_reqs[i];
-               of_node_put(dma_node);
-       }
-
-       if (dma_req > STM32_DMAMUX_MAX_DMA_REQUESTS) {
-               dev_err(&pdev->dev, "Too many DMA Master Requests to manage\n");
-               return -ENODEV;
-       }
-
-       stm32_dmamux->dma_requests = dma_req;
-       stm32_dmamux->dma_reqs[0] = count;
-
-       if (device_property_read_u32(&pdev->dev, "dma-requests",
-                                    &stm32_dmamux->dmamux_requests)) {
-               stm32_dmamux->dmamux_requests = STM32_DMAMUX_MAX_REQUESTS;
-               dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n",
-                        stm32_dmamux->dmamux_requests);
-       }
-       pm_runtime_get_noresume(&pdev->dev);
-
-       iomem = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(iomem))
-               return PTR_ERR(iomem);
-
-       spin_lock_init(&stm32_dmamux->lock);
-
-       stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(stm32_dmamux->clk))
-               return dev_err_probe(&pdev->dev, PTR_ERR(stm32_dmamux->clk),
-                                    "Missing clock controller\n");
-
-       ret = clk_prepare_enable(stm32_dmamux->clk);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
-               return ret;
-       }
-
-       rst = devm_reset_control_get(&pdev->dev, NULL);
-       if (IS_ERR(rst)) {
-               ret = PTR_ERR(rst);
-               if (ret == -EPROBE_DEFER)
-                       goto err_clk;
-       } else if (count > 1) { /* Don't reset if there is only one dma-master */
-               reset_control_assert(rst);
-               udelay(2);
-               reset_control_deassert(rst);
-       }
-
-       stm32_dmamux->iomem = iomem;
-       stm32_dmamux->dmarouter.dev = &pdev->dev;
-       stm32_dmamux->dmarouter.route_free = stm32_dmamux_free;
-
-       platform_set_drvdata(pdev, stm32_dmamux);
-       pm_runtime_set_active(&pdev->dev);
-       pm_runtime_enable(&pdev->dev);
-
-       pm_runtime_get_noresume(&pdev->dev);
-
-       /* Reset the dmamux */
-       for (i = 0; i < stm32_dmamux->dma_requests; i++)
-               stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0);
-
-       pm_runtime_put(&pdev->dev);
-
-       ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
-                                    &stm32_dmamux->dmarouter);
-       if (ret)
-               goto pm_disable;
-
-       return 0;
-
-pm_disable:
-       pm_runtime_disable(&pdev->dev);
-err_clk:
-       clk_disable_unprepare(stm32_dmamux->clk);
-
-       return ret;
-}
-
-#ifdef CONFIG_PM
-static int stm32_dmamux_runtime_suspend(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
-
-       clk_disable_unprepare(stm32_dmamux->clk);
-
-       return 0;
-}
-
-static int stm32_dmamux_runtime_resume(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
-       int ret;
-
-       ret = clk_prepare_enable(stm32_dmamux->clk);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to prepare_enable clock\n");
-               return ret;
-       }
-
-       return 0;
-}
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-static int stm32_dmamux_suspend(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
-       int i, ret;
-
-       ret = pm_runtime_resume_and_get(dev);
-       if (ret < 0)
-               return ret;
-
-       for (i = 0; i < stm32_dmamux->dma_requests; i++)
-               stm32_dmamux->ccr[i] = stm32_dmamux_read(stm32_dmamux->iomem,
-                                                        STM32_DMAMUX_CCR(i));
-
-       pm_runtime_put_sync(dev);
-
-       pm_runtime_force_suspend(dev);
-
-       return 0;
-}
-
-static int stm32_dmamux_resume(struct device *dev)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-       struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
-       int i, ret;
-
-       ret = pm_runtime_force_resume(dev);
-       if (ret < 0)
-               return ret;
-
-       ret = pm_runtime_resume_and_get(dev);
-       if (ret < 0)
-               return ret;
-
-       for (i = 0; i < stm32_dmamux->dma_requests; i++)
-               stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i),
-                                  stm32_dmamux->ccr[i]);
-
-       pm_runtime_put_sync(dev);
-
-       return 0;
-}
-#endif
-
-static const struct dev_pm_ops stm32_dmamux_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(stm32_dmamux_suspend, stm32_dmamux_resume)
-       SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend,
-                          stm32_dmamux_runtime_resume, NULL)
-};
-
-static const struct of_device_id stm32_dmamux_match[] = {
-       { .compatible = "st,stm32h7-dmamux" },
-       {},
-};
-
-static struct platform_driver stm32_dmamux_driver = {
-       .probe  = stm32_dmamux_probe,
-       .driver = {
-               .name = "stm32-dmamux",
-               .of_match_table = stm32_dmamux_match,
-               .pm = &stm32_dmamux_pm_ops,
-       },
-};
-
-static int __init stm32_dmamux_init(void)
-{
-       return platform_driver_register(&stm32_dmamux_driver);
-}
-arch_initcall(stm32_dmamux_init);
-
-MODULE_DESCRIPTION("DMA Router driver for STM32 DMA MUX");
-MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
-MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
deleted file mode 100644 (file)
index 6505081..0000000
+++ /dev/null
@@ -1,1829 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- *
- * Copyright (C) STMicroelectronics SA 2017
- * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com>
- *            Pierre-Yves Mordret <pierre-yves.mordret@st.com>
- *
- * Driver for STM32 MDMA controller
- *
- * Inspired by stm32-dma.c and dma-jz4780.c
- */
-
-#include <linux/bitfield.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/iopoll.h>
-#include <linux/jiffies.h>
-#include <linux/list.h>
-#include <linux/log2.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_dma.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/reset.h>
-#include <linux/slab.h>
-
-#include "virt-dma.h"
-
-#define STM32_MDMA_GISR0               0x0000 /* MDMA Int Status Reg 1 */
-
-/* MDMA Channel x interrupt/status register */
-#define STM32_MDMA_CISR(x)             (0x40 + 0x40 * (x)) /* x = 0..62 */
-#define STM32_MDMA_CISR_CRQA           BIT(16)
-#define STM32_MDMA_CISR_TCIF           BIT(4)
-#define STM32_MDMA_CISR_BTIF           BIT(3)
-#define STM32_MDMA_CISR_BRTIF          BIT(2)
-#define STM32_MDMA_CISR_CTCIF          BIT(1)
-#define STM32_MDMA_CISR_TEIF           BIT(0)
-
-/* MDMA Channel x interrupt flag clear register */
-#define STM32_MDMA_CIFCR(x)            (0x44 + 0x40 * (x))
-#define STM32_MDMA_CIFCR_CLTCIF                BIT(4)
-#define STM32_MDMA_CIFCR_CBTIF         BIT(3)
-#define STM32_MDMA_CIFCR_CBRTIF                BIT(2)
-#define STM32_MDMA_CIFCR_CCTCIF                BIT(1)
-#define STM32_MDMA_CIFCR_CTEIF         BIT(0)
-#define STM32_MDMA_CIFCR_CLEAR_ALL     (STM32_MDMA_CIFCR_CLTCIF \
-                                       | STM32_MDMA_CIFCR_CBTIF \
-                                       | STM32_MDMA_CIFCR_CBRTIF \
-                                       | STM32_MDMA_CIFCR_CCTCIF \
-                                       | STM32_MDMA_CIFCR_CTEIF)
-
-/* MDMA Channel x error status register */
-#define STM32_MDMA_CESR(x)             (0x48 + 0x40 * (x))
-#define STM32_MDMA_CESR_BSE            BIT(11)
-#define STM32_MDMA_CESR_ASR            BIT(10)
-#define STM32_MDMA_CESR_TEMD           BIT(9)
-#define STM32_MDMA_CESR_TELD           BIT(8)
-#define STM32_MDMA_CESR_TED            BIT(7)
-#define STM32_MDMA_CESR_TEA_MASK       GENMASK(6, 0)
-
-/* MDMA Channel x control register */
-#define STM32_MDMA_CCR(x)              (0x4C + 0x40 * (x))
-#define STM32_MDMA_CCR_SWRQ            BIT(16)
-#define STM32_MDMA_CCR_WEX             BIT(14)
-#define STM32_MDMA_CCR_HEX             BIT(13)
-#define STM32_MDMA_CCR_BEX             BIT(12)
-#define STM32_MDMA_CCR_SM              BIT(8)
-#define STM32_MDMA_CCR_PL_MASK         GENMASK(7, 6)
-#define STM32_MDMA_CCR_PL(n)           FIELD_PREP(STM32_MDMA_CCR_PL_MASK, (n))
-#define STM32_MDMA_CCR_TCIE            BIT(5)
-#define STM32_MDMA_CCR_BTIE            BIT(4)
-#define STM32_MDMA_CCR_BRTIE           BIT(3)
-#define STM32_MDMA_CCR_CTCIE           BIT(2)
-#define STM32_MDMA_CCR_TEIE            BIT(1)
-#define STM32_MDMA_CCR_EN              BIT(0)
-#define STM32_MDMA_CCR_IRQ_MASK                (STM32_MDMA_CCR_TCIE \
-                                       | STM32_MDMA_CCR_BTIE \
-                                       | STM32_MDMA_CCR_BRTIE \
-                                       | STM32_MDMA_CCR_CTCIE \
-                                       | STM32_MDMA_CCR_TEIE)
-
-/* MDMA Channel x transfer configuration register */
-#define STM32_MDMA_CTCR(x)             (0x50 + 0x40 * (x))
-#define STM32_MDMA_CTCR_BWM            BIT(31)
-#define STM32_MDMA_CTCR_SWRM           BIT(30)
-#define STM32_MDMA_CTCR_TRGM_MSK       GENMASK(29, 28)
-#define STM32_MDMA_CTCR_TRGM(n)                FIELD_PREP(STM32_MDMA_CTCR_TRGM_MSK, (n))
-#define STM32_MDMA_CTCR_TRGM_GET(n)    FIELD_GET(STM32_MDMA_CTCR_TRGM_MSK, (n))
-#define STM32_MDMA_CTCR_PAM_MASK       GENMASK(27, 26)
-#define STM32_MDMA_CTCR_PAM(n)         FIELD_PREP(STM32_MDMA_CTCR_PAM_MASK, (n))
-#define STM32_MDMA_CTCR_PKE            BIT(25)
-#define STM32_MDMA_CTCR_TLEN_MSK       GENMASK(24, 18)
-#define STM32_MDMA_CTCR_TLEN(n)                FIELD_PREP(STM32_MDMA_CTCR_TLEN_MSK, (n))
-#define STM32_MDMA_CTCR_TLEN_GET(n)    FIELD_GET(STM32_MDMA_CTCR_TLEN_MSK, (n))
-#define STM32_MDMA_CTCR_LEN2_MSK       GENMASK(25, 18)
-#define STM32_MDMA_CTCR_LEN2(n)                FIELD_PREP(STM32_MDMA_CTCR_LEN2_MSK, (n))
-#define STM32_MDMA_CTCR_LEN2_GET(n)    FIELD_GET(STM32_MDMA_CTCR_LEN2_MSK, (n))
-#define STM32_MDMA_CTCR_DBURST_MASK    GENMASK(17, 15)
-#define STM32_MDMA_CTCR_DBURST(n)      FIELD_PREP(STM32_MDMA_CTCR_DBURST_MASK, (n))
-#define STM32_MDMA_CTCR_SBURST_MASK    GENMASK(14, 12)
-#define STM32_MDMA_CTCR_SBURST(n)      FIELD_PREP(STM32_MDMA_CTCR_SBURST_MASK, (n))
-#define STM32_MDMA_CTCR_DINCOS_MASK    GENMASK(11, 10)
-#define STM32_MDMA_CTCR_DINCOS(n)      FIELD_PREP(STM32_MDMA_CTCR_DINCOS_MASK, (n))
-#define STM32_MDMA_CTCR_SINCOS_MASK    GENMASK(9, 8)
-#define STM32_MDMA_CTCR_SINCOS(n)      FIELD_PREP(STM32_MDMA_CTCR_SINCOS_MASK, (n))
-#define STM32_MDMA_CTCR_DSIZE_MASK     GENMASK(7, 6)
-#define STM32_MDMA_CTCR_DSIZE(n)       FIELD_PREP(STM32_MDMA_CTCR_DSIZE_MASK, (n))
-#define STM32_MDMA_CTCR_SSIZE_MASK     GENMASK(5, 4)
-#define STM32_MDMA_CTCR_SSIZE(n)       FIELD_PREP(STM32_MDMA_CTCR_SSIZE_MASK, (n))
-#define STM32_MDMA_CTCR_DINC_MASK      GENMASK(3, 2)
-#define STM32_MDMA_CTCR_DINC(n)                FIELD_PREP(STM32_MDMA_CTCR_DINC_MASK, (n))
-#define STM32_MDMA_CTCR_SINC_MASK      GENMASK(1, 0)
-#define STM32_MDMA_CTCR_SINC(n)                FIELD_PREP(STM32_MDMA_CTCR_SINC_MASK, (n))
-#define STM32_MDMA_CTCR_CFG_MASK       (STM32_MDMA_CTCR_SINC_MASK \
-                                       | STM32_MDMA_CTCR_DINC_MASK \
-                                       | STM32_MDMA_CTCR_SINCOS_MASK \
-                                       | STM32_MDMA_CTCR_DINCOS_MASK \
-                                       | STM32_MDMA_CTCR_LEN2_MSK \
-                                       | STM32_MDMA_CTCR_TRGM_MSK)
-
-/* MDMA Channel x block number of data register */
-#define STM32_MDMA_CBNDTR(x)           (0x54 + 0x40 * (x))
-#define STM32_MDMA_CBNDTR_BRC_MK       GENMASK(31, 20)
-#define STM32_MDMA_CBNDTR_BRC(n)       FIELD_PREP(STM32_MDMA_CBNDTR_BRC_MK, (n))
-#define STM32_MDMA_CBNDTR_BRC_GET(n)   FIELD_GET(STM32_MDMA_CBNDTR_BRC_MK, (n))
-
-#define STM32_MDMA_CBNDTR_BRDUM                BIT(19)
-#define STM32_MDMA_CBNDTR_BRSUM                BIT(18)
-#define STM32_MDMA_CBNDTR_BNDT_MASK    GENMASK(16, 0)
-#define STM32_MDMA_CBNDTR_BNDT(n)      FIELD_PREP(STM32_MDMA_CBNDTR_BNDT_MASK, (n))
-
-/* MDMA Channel x source address register */
-#define STM32_MDMA_CSAR(x)             (0x58 + 0x40 * (x))
-
-/* MDMA Channel x destination address register */
-#define STM32_MDMA_CDAR(x)             (0x5C + 0x40 * (x))
-
-/* MDMA Channel x block repeat address update register */
-#define STM32_MDMA_CBRUR(x)            (0x60 + 0x40 * (x))
-#define STM32_MDMA_CBRUR_DUV_MASK      GENMASK(31, 16)
-#define STM32_MDMA_CBRUR_DUV(n)                FIELD_PREP(STM32_MDMA_CBRUR_DUV_MASK, (n))
-#define STM32_MDMA_CBRUR_SUV_MASK      GENMASK(15, 0)
-#define STM32_MDMA_CBRUR_SUV(n)                FIELD_PREP(STM32_MDMA_CBRUR_SUV_MASK, (n))
-
-/* MDMA Channel x link address register */
-#define STM32_MDMA_CLAR(x)             (0x64 + 0x40 * (x))
-
-/* MDMA Channel x trigger and bus selection register */
-#define STM32_MDMA_CTBR(x)             (0x68 + 0x40 * (x))
-#define STM32_MDMA_CTBR_DBUS           BIT(17)
-#define STM32_MDMA_CTBR_SBUS           BIT(16)
-#define STM32_MDMA_CTBR_TSEL_MASK      GENMASK(5, 0)
-#define STM32_MDMA_CTBR_TSEL(n)                FIELD_PREP(STM32_MDMA_CTBR_TSEL_MASK, (n))
-
-/* MDMA Channel x mask address register */
-#define STM32_MDMA_CMAR(x)             (0x70 + 0x40 * (x))
-
-/* MDMA Channel x mask data register */
-#define STM32_MDMA_CMDR(x)             (0x74 + 0x40 * (x))
-
-#define STM32_MDMA_MAX_BUF_LEN         128
-#define STM32_MDMA_MAX_BLOCK_LEN       65536
-#define STM32_MDMA_MAX_CHANNELS                32
-#define STM32_MDMA_MAX_REQUESTS                256
-#define STM32_MDMA_MAX_BURST           128
-#define STM32_MDMA_VERY_HIGH_PRIORITY  0x3
-
-enum stm32_mdma_trigger_mode {
-       STM32_MDMA_BUFFER,
-       STM32_MDMA_BLOCK,
-       STM32_MDMA_BLOCK_REP,
-       STM32_MDMA_LINKED_LIST,
-};
-
-enum stm32_mdma_width {
-       STM32_MDMA_BYTE,
-       STM32_MDMA_HALF_WORD,
-       STM32_MDMA_WORD,
-       STM32_MDMA_DOUBLE_WORD,
-};
-
-enum stm32_mdma_inc_mode {
-       STM32_MDMA_FIXED = 0,
-       STM32_MDMA_INC = 2,
-       STM32_MDMA_DEC = 3,
-};
-
-struct stm32_mdma_chan_config {
-       u32 request;
-       u32 priority_level;
-       u32 transfer_config;
-       u32 mask_addr;
-       u32 mask_data;
-       bool m2m_hw; /* True when MDMA is triggered by STM32 DMA */
-};
-
-struct stm32_mdma_hwdesc {
-       u32 ctcr;
-       u32 cbndtr;
-       u32 csar;
-       u32 cdar;
-       u32 cbrur;
-       u32 clar;
-       u32 ctbr;
-       u32 dummy;
-       u32 cmar;
-       u32 cmdr;
-} __aligned(64);
-
-struct stm32_mdma_desc_node {
-       struct stm32_mdma_hwdesc *hwdesc;
-       dma_addr_t hwdesc_phys;
-};
-
-struct stm32_mdma_desc {
-       struct virt_dma_desc vdesc;
-       u32 ccr;
-       bool cyclic;
-       u32 count;
-       struct stm32_mdma_desc_node node[] __counted_by(count);
-};
-
-struct stm32_mdma_dma_config {
-       u32 request;    /* STM32 DMA channel stream id, triggering MDMA */
-       u32 cmar;       /* STM32 DMA interrupt flag clear register address */
-       u32 cmdr;       /* STM32 DMA Transfer Complete flag */
-};
-
-struct stm32_mdma_chan {
-       struct virt_dma_chan vchan;
-       struct dma_pool *desc_pool;
-       u32 id;
-       struct stm32_mdma_desc *desc;
-       u32 curr_hwdesc;
-       struct dma_slave_config dma_config;
-       struct stm32_mdma_chan_config chan_config;
-       bool busy;
-       u32 mem_burst;
-       u32 mem_width;
-};
-
-struct stm32_mdma_device {
-       struct dma_device ddev;
-       void __iomem *base;
-       struct clk *clk;
-       int irq;
-       u32 nr_channels;
-       u32 nr_requests;
-       u32 nr_ahb_addr_masks;
-       u32 chan_reserved;
-       struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS];
-       u32 ahb_addr_masks[] __counted_by(nr_ahb_addr_masks);
-};
-
-static struct stm32_mdma_device *stm32_mdma_get_dev(
-       struct stm32_mdma_chan *chan)
-{
-       return container_of(chan->vchan.chan.device, struct stm32_mdma_device,
-                           ddev);
-}
-
-static struct stm32_mdma_chan *to_stm32_mdma_chan(struct dma_chan *c)
-{
-       return container_of(c, struct stm32_mdma_chan, vchan.chan);
-}
-
-static struct stm32_mdma_desc *to_stm32_mdma_desc(struct virt_dma_desc *vdesc)
-{
-       return container_of(vdesc, struct stm32_mdma_desc, vdesc);
-}
-
-static struct device *chan2dev(struct stm32_mdma_chan *chan)
-{
-       return &chan->vchan.chan.dev->device;
-}
-
-static struct device *mdma2dev(struct stm32_mdma_device *mdma_dev)
-{
-       return mdma_dev->ddev.dev;
-}
-
-static u32 stm32_mdma_read(struct stm32_mdma_device *dmadev, u32 reg)
-{
-       return readl_relaxed(dmadev->base + reg);
-}
-
-static void stm32_mdma_write(struct stm32_mdma_device *dmadev, u32 reg, u32 val)
-{
-       writel_relaxed(val, dmadev->base + reg);
-}
-
-static void stm32_mdma_set_bits(struct stm32_mdma_device *dmadev, u32 reg,
-                               u32 mask)
-{
-       void __iomem *addr = dmadev->base + reg;
-
-       writel_relaxed(readl_relaxed(addr) | mask, addr);
-}
-
-static void stm32_mdma_clr_bits(struct stm32_mdma_device *dmadev, u32 reg,
-                               u32 mask)
-{
-       void __iomem *addr = dmadev->base + reg;
-
-       writel_relaxed(readl_relaxed(addr) & ~mask, addr);
-}
-
-static struct stm32_mdma_desc *stm32_mdma_alloc_desc(
-               struct stm32_mdma_chan *chan, u32 count)
-{
-       struct stm32_mdma_desc *desc;
-       int i;
-
-       desc = kzalloc(struct_size(desc, node, count), GFP_NOWAIT);
-       if (!desc)
-               return NULL;
-       desc->count = count;
-
-       for (i = 0; i < count; i++) {
-               desc->node[i].hwdesc =
-                       dma_pool_alloc(chan->desc_pool, GFP_NOWAIT,
-                                      &desc->node[i].hwdesc_phys);
-               if (!desc->node[i].hwdesc)
-                       goto err;
-       }
-
-       return desc;
-
-err:
-       dev_err(chan2dev(chan), "Failed to allocate descriptor\n");
-       while (--i >= 0)
-               dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
-                             desc->node[i].hwdesc_phys);
-       kfree(desc);
-       return NULL;
-}
-
-static void stm32_mdma_desc_free(struct virt_dma_desc *vdesc)
-{
-       struct stm32_mdma_desc *desc = to_stm32_mdma_desc(vdesc);
-       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(vdesc->tx.chan);
-       int i;
-
-       for (i = 0; i < desc->count; i++)
-               dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
-                             desc->node[i].hwdesc_phys);
-       kfree(desc);
-}
-
-static int stm32_mdma_get_width(struct stm32_mdma_chan *chan,
-                               enum dma_slave_buswidth width)
-{
-       switch (width) {
-       case DMA_SLAVE_BUSWIDTH_1_BYTE:
-       case DMA_SLAVE_BUSWIDTH_2_BYTES:
-       case DMA_SLAVE_BUSWIDTH_4_BYTES:
-       case DMA_SLAVE_BUSWIDTH_8_BYTES:
-               return ffs(width) - 1;
-       default:
-               dev_err(chan2dev(chan), "Dma bus width %i not supported\n",
-                       width);
-               return -EINVAL;
-       }
-}
-
-static enum dma_slave_buswidth stm32_mdma_get_max_width(dma_addr_t addr,
-                                                       u32 buf_len, u32 tlen)
-{
-       enum dma_slave_buswidth max_width = DMA_SLAVE_BUSWIDTH_8_BYTES;
-
-       for (max_width = DMA_SLAVE_BUSWIDTH_8_BYTES;
-            max_width > DMA_SLAVE_BUSWIDTH_1_BYTE;
-            max_width >>= 1) {
-               /*
-                * Address and buffer length both have to be aligned on
-                * bus width
-                */
-               if ((((buf_len | addr) & (max_width - 1)) == 0) &&
-                   tlen >= max_width)
-                       break;
-       }
-
-       return max_width;
-}
-
-static u32 stm32_mdma_get_best_burst(u32 buf_len, u32 tlen, u32 max_burst,
-                                    enum dma_slave_buswidth width)
-{
-       u32 best_burst;
-
-       best_burst = min((u32)1 << __ffs(tlen | buf_len),
-                        max_burst * width) / width;
-
-       return (best_burst > 0) ? best_burst : 1;
-}
-
-static int stm32_mdma_disable_chan(struct stm32_mdma_chan *chan)
-{
-       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
-       u32 ccr, cisr, id, reg;
-       int ret;
-
-       id = chan->id;
-       reg = STM32_MDMA_CCR(id);
-
-       /* Disable interrupts */
-       stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_IRQ_MASK);
-
-       ccr = stm32_mdma_read(dmadev, reg);
-       if (ccr & STM32_MDMA_CCR_EN) {
-               stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_EN);
-
-               /* Ensure that any ongoing transfer has been completed */
-               ret = readl_relaxed_poll_timeout_atomic(
-                               dmadev->base + STM32_MDMA_CISR(id), cisr,
-                               (cisr & STM32_MDMA_CISR_CTCIF), 10, 1000);
-               if (ret) {
-                       dev_err(chan2dev(chan), "%s: timeout!\n", __func__);
-                       return -EBUSY;
-               }
-       }
-
-       return 0;
-}
-
-static void stm32_mdma_stop(struct stm32_mdma_chan *chan)
-{
-       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
-       u32 status;
-       int ret;
-
-       /* Disable DMA */
-       ret = stm32_mdma_disable_chan(chan);
-       if (ret < 0)
-               return;
-
-       /* Clear interrupt status if it is there */
-       status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
-       if (status) {
-               dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n",
-                       __func__, status);
-               stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status);
-       }
-
-       chan->busy = false;
-}
-
-static void stm32_mdma_set_bus(struct stm32_mdma_device *dmadev, u32 *ctbr,
-                              u32 ctbr_mask, u32 src_addr)
-{
-       u32 mask;
-       int i;
-
-       /* Check if memory device is on AHB or AXI */
-       *ctbr &= ~ctbr_mask;
-       mask = src_addr & 0xF0000000;
-       for (i = 0; i < dmadev->nr_ahb_addr_masks; i++) {
-               if (mask == dmadev->ahb_addr_masks[i]) {
-                       *ctbr |= ctbr_mask;
-                       break;
-               }
-       }
-}
-
-static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
-                                    enum dma_transfer_direction direction,
-                                    u32 *mdma_ccr, u32 *mdma_ctcr,
-                                    u32 *mdma_ctbr, dma_addr_t addr,
-                                    u32 buf_len)
-{
-       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
-       struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
-       enum dma_slave_buswidth src_addr_width, dst_addr_width;
-       phys_addr_t src_addr, dst_addr;
-       int src_bus_width, dst_bus_width;
-       u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
-       u32 ccr, ctcr, ctbr, tlen;
-
-       src_addr_width = chan->dma_config.src_addr_width;
-       dst_addr_width = chan->dma_config.dst_addr_width;
-       src_maxburst = chan->dma_config.src_maxburst;
-       dst_maxburst = chan->dma_config.dst_maxburst;
-
-       ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
-       ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
-       ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
-
-       /* Enable HW request mode */
-       ctcr &= ~STM32_MDMA_CTCR_SWRM;
-
-       /* Set DINC, SINC, DINCOS, SINCOS, TRGM and TLEN retrieve from DT */
-       ctcr &= ~STM32_MDMA_CTCR_CFG_MASK;
-       ctcr |= chan_config->transfer_config & STM32_MDMA_CTCR_CFG_MASK;
-
-       /*
-        * For buffer transfer length (TLEN) we have to set
-        * the number of bytes - 1 in CTCR register
-        */
-       tlen = STM32_MDMA_CTCR_LEN2_GET(ctcr);
-       ctcr &= ~STM32_MDMA_CTCR_LEN2_MSK;
-       ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1));
-
-       /* Disable Pack Enable */
-       ctcr &= ~STM32_MDMA_CTCR_PKE;
-
-       /* Check burst size constraints */
-       if (src_maxburst * src_addr_width > STM32_MDMA_MAX_BURST ||
-           dst_maxburst * dst_addr_width > STM32_MDMA_MAX_BURST) {
-               dev_err(chan2dev(chan),
-                       "burst size * bus width higher than %d bytes\n",
-                       STM32_MDMA_MAX_BURST);
-               return -EINVAL;
-       }
-
-       if ((!is_power_of_2(src_maxburst) && src_maxburst > 0) ||
-           (!is_power_of_2(dst_maxburst) && dst_maxburst > 0)) {
-               dev_err(chan2dev(chan), "burst size must be a power of 2\n");
-               return -EINVAL;
-       }
-
-       /*
-        * Configure channel control:
-        * - Clear SW request as in this case this is a HW one
-        * - Clear WEX, HEX and BEX bits
-        * - Set priority level
-        */
-       ccr &= ~(STM32_MDMA_CCR_SWRQ | STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX |
-                STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK);
-       ccr |= STM32_MDMA_CCR_PL(chan_config->priority_level);
-
-       /* Configure Trigger selection */
-       ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK;
-       ctbr |= STM32_MDMA_CTBR_TSEL(chan_config->request);
-
-       switch (direction) {
-       case DMA_MEM_TO_DEV:
-               dst_addr = chan->dma_config.dst_addr;
-
-               /* Set device data size */
-               if (chan_config->m2m_hw)
-                       dst_addr_width = stm32_mdma_get_max_width(dst_addr, buf_len,
-                                                                 STM32_MDMA_MAX_BUF_LEN);
-               dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width);
-               if (dst_bus_width < 0)
-                       return dst_bus_width;
-               ctcr &= ~STM32_MDMA_CTCR_DSIZE_MASK;
-               ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width);
-               if (chan_config->m2m_hw) {
-                       ctcr &= ~STM32_MDMA_CTCR_DINCOS_MASK;
-                       ctcr |= STM32_MDMA_CTCR_DINCOS(dst_bus_width);
-               }
-
-               /* Set device burst value */
-               if (chan_config->m2m_hw)
-                       dst_maxburst = STM32_MDMA_MAX_BUF_LEN / dst_addr_width;
-
-               dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
-                                                          dst_maxburst,
-                                                          dst_addr_width);
-               chan->mem_burst = dst_best_burst;
-               ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK;
-               ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst)));
-
-               /* Set memory data size */
-               src_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen);
-               chan->mem_width = src_addr_width;
-               src_bus_width = stm32_mdma_get_width(chan, src_addr_width);
-               if (src_bus_width < 0)
-                       return src_bus_width;
-               ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK |
-                       STM32_MDMA_CTCR_SINCOS_MASK;
-               ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width) |
-                       STM32_MDMA_CTCR_SINCOS(src_bus_width);
-
-               /* Set memory burst value */
-               src_maxburst = STM32_MDMA_MAX_BUF_LEN / src_addr_width;
-               src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
-                                                          src_maxburst,
-                                                          src_addr_width);
-               chan->mem_burst = src_best_burst;
-               ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK;
-               ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst)));
-
-               /* Select bus */
-               stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
-                                  dst_addr);
-
-               if (dst_bus_width != src_bus_width)
-                       ctcr |= STM32_MDMA_CTCR_PKE;
-
-               /* Set destination address */
-               stm32_mdma_write(dmadev, STM32_MDMA_CDAR(chan->id), dst_addr);
-               break;
-
-       case DMA_DEV_TO_MEM:
-               src_addr = chan->dma_config.src_addr;
-
-               /* Set device data size */
-               if (chan_config->m2m_hw)
-                       src_addr_width = stm32_mdma_get_max_width(src_addr, buf_len,
-                                                                 STM32_MDMA_MAX_BUF_LEN);
-
-               src_bus_width = stm32_mdma_get_width(chan, src_addr_width);
-               if (src_bus_width < 0)
-                       return src_bus_width;
-               ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK;
-               ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width);
-               if (chan_config->m2m_hw) {
-                       ctcr &= ~STM32_MDMA_CTCR_SINCOS_MASK;
-                       ctcr |= STM32_MDMA_CTCR_SINCOS(src_bus_width);
-               }
-
-               /* Set device burst value */
-               if (chan_config->m2m_hw)
-                       src_maxburst = STM32_MDMA_MAX_BUF_LEN / src_addr_width;
-
-               src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
-                                                          src_maxburst,
-                                                          src_addr_width);
-               ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK;
-               ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst)));
-
-               /* Set memory data size */
-               dst_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen);
-               chan->mem_width = dst_addr_width;
-               dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width);
-               if (dst_bus_width < 0)
-                       return dst_bus_width;
-               ctcr &= ~(STM32_MDMA_CTCR_DSIZE_MASK |
-                       STM32_MDMA_CTCR_DINCOS_MASK);
-               ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
-                       STM32_MDMA_CTCR_DINCOS(dst_bus_width);
-
-               /* Set memory burst value */
-               dst_maxburst = STM32_MDMA_MAX_BUF_LEN / dst_addr_width;
-               dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
-                                                          dst_maxburst,
-                                                          dst_addr_width);
-               ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK;
-               ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst)));
-
-               /* Select bus */
-               stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
-                                  src_addr);
-
-               if (dst_bus_width != src_bus_width)
-                       ctcr |= STM32_MDMA_CTCR_PKE;
-
-               /* Set source address */
-               stm32_mdma_write(dmadev, STM32_MDMA_CSAR(chan->id), src_addr);
-               break;
-
-       default:
-               dev_err(chan2dev(chan), "Dma direction is not supported\n");
-               return -EINVAL;
-       }
-
-       *mdma_ccr = ccr;
-       *mdma_ctcr = ctcr;
-       *mdma_ctbr = ctbr;
-
-       return 0;
-}
-
-static void stm32_mdma_dump_hwdesc(struct stm32_mdma_chan *chan,
-                                  struct stm32_mdma_desc_node *node)
-{
-       dev_dbg(chan2dev(chan), "hwdesc:  %pad\n", &node->hwdesc_phys);
-       dev_dbg(chan2dev(chan), "CTCR:    0x%08x\n", node->hwdesc->ctcr);
-       dev_dbg(chan2dev(chan), "CBNDTR:  0x%08x\n", node->hwdesc->cbndtr);
-       dev_dbg(chan2dev(chan), "CSAR:    0x%08x\n", node->hwdesc->csar);
-       dev_dbg(chan2dev(chan), "CDAR:    0x%08x\n", node->hwdesc->cdar);
-       dev_dbg(chan2dev(chan), "CBRUR:   0x%08x\n", node->hwdesc->cbrur);
-       dev_dbg(chan2dev(chan), "CLAR:    0x%08x\n", node->hwdesc->clar);
-       dev_dbg(chan2dev(chan), "CTBR:    0x%08x\n", node->hwdesc->ctbr);
-       dev_dbg(chan2dev(chan), "CMAR:    0x%08x\n", node->hwdesc->cmar);
-       dev_dbg(chan2dev(chan), "CMDR:    0x%08x\n\n", node->hwdesc->cmdr);
-}
-
-static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan,
-                                   struct stm32_mdma_desc *desc,
-                                   enum dma_transfer_direction dir, u32 count,
-                                   dma_addr_t src_addr, dma_addr_t dst_addr,
-                                   u32 len, u32 ctcr, u32 ctbr, bool is_last,
-                                   bool is_first, bool is_cyclic)
-{
-       struct stm32_mdma_chan_config *config = &chan->chan_config;
-       struct stm32_mdma_hwdesc *hwdesc;
-       u32 next = count + 1;
-
-       hwdesc = desc->node[count].hwdesc;
-       hwdesc->ctcr = ctcr;
-       hwdesc->cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK |
-                       STM32_MDMA_CBNDTR_BRDUM |
-                       STM32_MDMA_CBNDTR_BRSUM |
-                       STM32_MDMA_CBNDTR_BNDT_MASK);
-       hwdesc->cbndtr |= STM32_MDMA_CBNDTR_BNDT(len);
-       hwdesc->csar = src_addr;
-       hwdesc->cdar = dst_addr;
-       hwdesc->cbrur = 0;
-       hwdesc->ctbr = ctbr;
-       hwdesc->cmar = config->mask_addr;
-       hwdesc->cmdr = config->mask_data;
-
-       if (is_last) {
-               if (is_cyclic)
-                       hwdesc->clar = desc->node[0].hwdesc_phys;
-               else
-                       hwdesc->clar = 0;
-       } else {
-               hwdesc->clar = desc->node[next].hwdesc_phys;
-       }
-
-       stm32_mdma_dump_hwdesc(chan, &desc->node[count]);
-}
-
-static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
-                                struct stm32_mdma_desc *desc,
-                                struct scatterlist *sgl, u32 sg_len,
-                                enum dma_transfer_direction direction)
-{
-       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
-       struct dma_slave_config *dma_config = &chan->dma_config;
-       struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
-       struct scatterlist *sg;
-       dma_addr_t src_addr, dst_addr;
-       u32 m2m_hw_period, ccr, ctcr, ctbr;
-       int i, ret = 0;
-
-       if (chan_config->m2m_hw)
-               m2m_hw_period = sg_dma_len(sgl);
-
-       for_each_sg(sgl, sg, sg_len, i) {
-               if (sg_dma_len(sg) > STM32_MDMA_MAX_BLOCK_LEN) {
-                       dev_err(chan2dev(chan), "Invalid block len\n");
-                       return -EINVAL;
-               }
-
-               if (direction == DMA_MEM_TO_DEV) {
-                       src_addr = sg_dma_address(sg);
-                       dst_addr = dma_config->dst_addr;
-                       if (chan_config->m2m_hw && (i & 1))
-                               dst_addr += m2m_hw_period;
-                       ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
-                                                       &ctcr, &ctbr, src_addr,
-                                                       sg_dma_len(sg));
-                       stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
-                                          src_addr);
-               } else {
-                       src_addr = dma_config->src_addr;
-                       if (chan_config->m2m_hw && (i & 1))
-                               src_addr += m2m_hw_period;
-                       dst_addr = sg_dma_address(sg);
-                       ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
-                                                       &ctcr, &ctbr, dst_addr,
-                                                       sg_dma_len(sg));
-                       stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
-                                          dst_addr);
-               }
-
-               if (ret < 0)
-                       return ret;
-
-               stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr,
-                                       dst_addr, sg_dma_len(sg), ctcr, ctbr,
-                                       i == sg_len - 1, i == 0, false);
-       }
-
-       /* Enable interrupts */
-       ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
-       ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE;
-       desc->ccr = ccr;
-
-       return 0;
-}
-
-static struct dma_async_tx_descriptor *
-stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
-                        u32 sg_len, enum dma_transfer_direction direction,
-                        unsigned long flags, void *context)
-{
-       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
-       struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
-       struct stm32_mdma_desc *desc;
-       int i, ret;
-
-       /*
-        * Once DMA is in setup cyclic mode the channel we cannot assign this
-        * channel anymore. The DMA channel needs to be aborted or terminated
-        * for allowing another request.
-        */
-       if (chan->desc && chan->desc->cyclic) {
-               dev_err(chan2dev(chan),
-                       "Request not allowed when dma in cyclic mode\n");
-               return NULL;
-       }
-
-       desc = stm32_mdma_alloc_desc(chan, sg_len);
-       if (!desc)
-               return NULL;
-
-       ret = stm32_mdma_setup_xfer(chan, desc, sgl, sg_len, direction);
-       if (ret < 0)
-               goto xfer_setup_err;
-
-       /*
-        * In case of M2M HW transfer triggered by STM32 DMA, we do not have to clear the
-        * transfer complete flag by hardware in order to let the CPU rearm the STM32 DMA
-        * with the next sg element and update some data in dmaengine framework.
-        */
-       if (chan_config->m2m_hw && direction == DMA_MEM_TO_DEV) {
-               struct stm32_mdma_hwdesc *hwdesc;
-
-               for (i = 0; i < sg_len; i++) {
-                       hwdesc = desc->node[i].hwdesc;
-                       hwdesc->cmar = 0;
-                       hwdesc->cmdr = 0;
-               }
-       }
-
-       desc->cyclic = false;
-
-       return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
-
-xfer_setup_err:
-       for (i = 0; i < desc->count; i++)
-               dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
-                             desc->node[i].hwdesc_phys);
-       kfree(desc);
-       return NULL;
-}
-
-static struct dma_async_tx_descriptor *
-stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr,
-                          size_t buf_len, size_t period_len,
-                          enum dma_transfer_direction direction,
-                          unsigned long flags)
-{
-       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
-       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
-       struct dma_slave_config *dma_config = &chan->dma_config;
-       struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
-       struct stm32_mdma_desc *desc;
-       dma_addr_t src_addr, dst_addr;
-       u32 ccr, ctcr, ctbr, count;
-       int i, ret;
-
-       /*
-        * Once DMA is in setup cyclic mode the channel we cannot assign this
-        * channel anymore. The DMA channel needs to be aborted or terminated
-        * for allowing another request.
-        */
-       if (chan->desc && chan->desc->cyclic) {
-               dev_err(chan2dev(chan),
-                       "Request not allowed when dma in cyclic mode\n");
-               return NULL;
-       }
-
-       if (!buf_len || !period_len || period_len > STM32_MDMA_MAX_BLOCK_LEN) {
-               dev_err(chan2dev(chan), "Invalid buffer/period len\n");
-               return NULL;
-       }
-
-       if (buf_len % period_len) {
-               dev_err(chan2dev(chan), "buf_len not multiple of period_len\n");
-               return NULL;
-       }
-
-       count = buf_len / period_len;
-
-       desc = stm32_mdma_alloc_desc(chan, count);
-       if (!desc)
-               return NULL;
-
-       /* Select bus */
-       if (direction == DMA_MEM_TO_DEV) {
-               src_addr = buf_addr;
-               ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr,
-                                               &ctbr, src_addr, period_len);
-               stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
-                                  src_addr);
-       } else {
-               dst_addr = buf_addr;
-               ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr,
-                                               &ctbr, dst_addr, period_len);
-               stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
-                                  dst_addr);
-       }
-
-       if (ret < 0)
-               goto xfer_setup_err;
-
-       /* Enable interrupts */
-       ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
-       ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE | STM32_MDMA_CCR_BTIE;
-       desc->ccr = ccr;
-
-       /* Configure hwdesc list */
-       for (i = 0; i < count; i++) {
-               if (direction == DMA_MEM_TO_DEV) {
-                       src_addr = buf_addr + i * period_len;
-                       dst_addr = dma_config->dst_addr;
-                       if (chan_config->m2m_hw && (i & 1))
-                               dst_addr += period_len;
-               } else {
-                       src_addr = dma_config->src_addr;
-                       if (chan_config->m2m_hw && (i & 1))
-                               src_addr += period_len;
-                       dst_addr = buf_addr + i * period_len;
-               }
-
-               stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr,
-                                       dst_addr, period_len, ctcr, ctbr,
-                                       i == count - 1, i == 0, true);
-       }
-
-       desc->cyclic = true;
-
-       return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
-
-xfer_setup_err:
-       for (i = 0; i < desc->count; i++)
-               dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
-                             desc->node[i].hwdesc_phys);
-       kfree(desc);
-       return NULL;
-}
-
-static struct dma_async_tx_descriptor *
-stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
-                          size_t len, unsigned long flags)
-{
-       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
-       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
-       enum dma_slave_buswidth max_width;
-       struct stm32_mdma_desc *desc;
-       struct stm32_mdma_hwdesc *hwdesc;
-       u32 ccr, ctcr, ctbr, cbndtr, count, max_burst, mdma_burst;
-       u32 best_burst, tlen;
-       size_t xfer_count, offset;
-       int src_bus_width, dst_bus_width;
-       int i;
-
-       /*
-        * Once DMA is in setup cyclic mode the channel we cannot assign this
-        * channel anymore. The DMA channel needs to be aborted or terminated
-        * to allow another request
-        */
-       if (chan->desc && chan->desc->cyclic) {
-               dev_err(chan2dev(chan),
-                       "Request not allowed when dma in cyclic mode\n");
-               return NULL;
-       }
-
-       count = DIV_ROUND_UP(len, STM32_MDMA_MAX_BLOCK_LEN);
-       desc = stm32_mdma_alloc_desc(chan, count);
-       if (!desc)
-               return NULL;
-
-       ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
-       ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
-       ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
-       cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
-
-       /* Enable sw req, some interrupts and clear other bits */
-       ccr &= ~(STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX |
-                STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK |
-                STM32_MDMA_CCR_IRQ_MASK);
-       ccr |= STM32_MDMA_CCR_TEIE;
-
-       /* Enable SW request mode, dest/src inc and clear other bits */
-       ctcr &= ~(STM32_MDMA_CTCR_BWM | STM32_MDMA_CTCR_TRGM_MSK |
-                 STM32_MDMA_CTCR_PAM_MASK | STM32_MDMA_CTCR_PKE |
-                 STM32_MDMA_CTCR_TLEN_MSK | STM32_MDMA_CTCR_DBURST_MASK |
-                 STM32_MDMA_CTCR_SBURST_MASK | STM32_MDMA_CTCR_DINCOS_MASK |
-                 STM32_MDMA_CTCR_SINCOS_MASK | STM32_MDMA_CTCR_DSIZE_MASK |
-                 STM32_MDMA_CTCR_SSIZE_MASK | STM32_MDMA_CTCR_DINC_MASK |
-                 STM32_MDMA_CTCR_SINC_MASK);
-       ctcr |= STM32_MDMA_CTCR_SWRM | STM32_MDMA_CTCR_SINC(STM32_MDMA_INC) |
-               STM32_MDMA_CTCR_DINC(STM32_MDMA_INC);
-
-       /* Reset HW request */
-       ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK;
-
-       /* Select bus */
-       stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, src);
-       stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, dest);
-
-       /* Clear CBNDTR registers */
-       cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK | STM32_MDMA_CBNDTR_BRDUM |
-                       STM32_MDMA_CBNDTR_BRSUM | STM32_MDMA_CBNDTR_BNDT_MASK);
-
-       if (len <= STM32_MDMA_MAX_BLOCK_LEN) {
-               cbndtr |= STM32_MDMA_CBNDTR_BNDT(len);
-               if (len <= STM32_MDMA_MAX_BUF_LEN) {
-                       /* Setup a buffer transfer */
-                       ccr |= STM32_MDMA_CCR_TCIE | STM32_MDMA_CCR_CTCIE;
-                       ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BUFFER);
-               } else {
-                       /* Setup a block transfer */
-                       ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE;
-                       ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BLOCK);
-               }
-
-               tlen = STM32_MDMA_MAX_BUF_LEN;
-               ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1));
-
-               /* Set source best burst size */
-               max_width = stm32_mdma_get_max_width(src, len, tlen);
-               src_bus_width = stm32_mdma_get_width(chan, max_width);
-
-               max_burst = tlen / max_width;
-               best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst,
-                                                      max_width);
-               mdma_burst = ilog2(best_burst);
-
-               ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) |
-                       STM32_MDMA_CTCR_SSIZE(src_bus_width) |
-                       STM32_MDMA_CTCR_SINCOS(src_bus_width);
-
-               /* Set destination best burst size */
-               max_width = stm32_mdma_get_max_width(dest, len, tlen);
-               dst_bus_width = stm32_mdma_get_width(chan, max_width);
-
-               max_burst = tlen / max_width;
-               best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst,
-                                                      max_width);
-               mdma_burst = ilog2(best_burst);
-
-               ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) |
-                       STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
-                       STM32_MDMA_CTCR_DINCOS(dst_bus_width);
-
-               if (dst_bus_width != src_bus_width)
-                       ctcr |= STM32_MDMA_CTCR_PKE;
-
-               /* Prepare hardware descriptor */
-               hwdesc = desc->node[0].hwdesc;
-               hwdesc->ctcr = ctcr;
-               hwdesc->cbndtr = cbndtr;
-               hwdesc->csar = src;
-               hwdesc->cdar = dest;
-               hwdesc->cbrur = 0;
-               hwdesc->clar = 0;
-               hwdesc->ctbr = ctbr;
-               hwdesc->cmar = 0;
-               hwdesc->cmdr = 0;
-
-               stm32_mdma_dump_hwdesc(chan, &desc->node[0]);
-       } else {
-               /* Setup a LLI transfer */
-               ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_LINKED_LIST) |
-                       STM32_MDMA_CTCR_TLEN((STM32_MDMA_MAX_BUF_LEN - 1));
-               ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE;
-               tlen = STM32_MDMA_MAX_BUF_LEN;
-
-               for (i = 0, offset = 0; offset < len;
-                    i++, offset += xfer_count) {
-                       xfer_count = min_t(size_t, len - offset,
-                                          STM32_MDMA_MAX_BLOCK_LEN);
-
-                       /* Set source best burst size */
-                       max_width = stm32_mdma_get_max_width(src, len, tlen);
-                       src_bus_width = stm32_mdma_get_width(chan, max_width);
-
-                       max_burst = tlen / max_width;
-                       best_burst = stm32_mdma_get_best_burst(len, tlen,
-                                                              max_burst,
-                                                              max_width);
-                       mdma_burst = ilog2(best_burst);
-
-                       ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) |
-                               STM32_MDMA_CTCR_SSIZE(src_bus_width) |
-                               STM32_MDMA_CTCR_SINCOS(src_bus_width);
-
-                       /* Set destination best burst size */
-                       max_width = stm32_mdma_get_max_width(dest, len, tlen);
-                       dst_bus_width = stm32_mdma_get_width(chan, max_width);
-
-                       max_burst = tlen / max_width;
-                       best_burst = stm32_mdma_get_best_burst(len, tlen,
-                                                              max_burst,
-                                                              max_width);
-                       mdma_burst = ilog2(best_burst);
-
-                       ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) |
-                               STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
-                               STM32_MDMA_CTCR_DINCOS(dst_bus_width);
-
-                       if (dst_bus_width != src_bus_width)
-                               ctcr |= STM32_MDMA_CTCR_PKE;
-
-                       /* Prepare hardware descriptor */
-                       stm32_mdma_setup_hwdesc(chan, desc, DMA_MEM_TO_MEM, i,
-                                               src + offset, dest + offset,
-                                               xfer_count, ctcr, ctbr,
-                                               i == count - 1, i == 0, false);
-               }
-       }
-
-       desc->ccr = ccr;
-
-       desc->cyclic = false;
-
-       return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
-}
-
-static void stm32_mdma_dump_reg(struct stm32_mdma_chan *chan)
-{
-       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
-
-       dev_dbg(chan2dev(chan), "CCR:     0x%08x\n",
-               stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)));
-       dev_dbg(chan2dev(chan), "CTCR:    0x%08x\n",
-               stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)));
-       dev_dbg(chan2dev(chan), "CBNDTR:  0x%08x\n",
-               stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)));
-       dev_dbg(chan2dev(chan), "CSAR:    0x%08x\n",
-               stm32_mdma_read(dmadev, STM32_MDMA_CSAR(chan->id)));
-       dev_dbg(chan2dev(chan), "CDAR:    0x%08x\n",
-               stm32_mdma_read(dmadev, STM32_MDMA_CDAR(chan->id)));
-       dev_dbg(chan2dev(chan), "CBRUR:   0x%08x\n",
-               stm32_mdma_read(dmadev, STM32_MDMA_CBRUR(chan->id)));
-       dev_dbg(chan2dev(chan), "CLAR:    0x%08x\n",
-               stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id)));
-       dev_dbg(chan2dev(chan), "CTBR:    0x%08x\n",
-               stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)));
-       dev_dbg(chan2dev(chan), "CMAR:    0x%08x\n",
-               stm32_mdma_read(dmadev, STM32_MDMA_CMAR(chan->id)));
-       dev_dbg(chan2dev(chan), "CMDR:    0x%08x\n",
-               stm32_mdma_read(dmadev, STM32_MDMA_CMDR(chan->id)));
-}
-
-static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
-{
-       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
-       struct virt_dma_desc *vdesc;
-       struct stm32_mdma_hwdesc *hwdesc;
-       u32 id = chan->id;
-       u32 status, reg;
-
-       vdesc = vchan_next_desc(&chan->vchan);
-       if (!vdesc) {
-               chan->desc = NULL;
-               return;
-       }
-
-       list_del(&vdesc->node);
-
-       chan->desc = to_stm32_mdma_desc(vdesc);
-       hwdesc = chan->desc->node[0].hwdesc;
-       chan->curr_hwdesc = 0;
-
-       stm32_mdma_write(dmadev, STM32_MDMA_CCR(id), chan->desc->ccr);
-       stm32_mdma_write(dmadev, STM32_MDMA_CTCR(id), hwdesc->ctcr);
-       stm32_mdma_write(dmadev, STM32_MDMA_CBNDTR(id), hwdesc->cbndtr);
-       stm32_mdma_write(dmadev, STM32_MDMA_CSAR(id), hwdesc->csar);
-       stm32_mdma_write(dmadev, STM32_MDMA_CDAR(id), hwdesc->cdar);
-       stm32_mdma_write(dmadev, STM32_MDMA_CBRUR(id), hwdesc->cbrur);
-       stm32_mdma_write(dmadev, STM32_MDMA_CLAR(id), hwdesc->clar);
-       stm32_mdma_write(dmadev, STM32_MDMA_CTBR(id), hwdesc->ctbr);
-       stm32_mdma_write(dmadev, STM32_MDMA_CMAR(id), hwdesc->cmar);
-       stm32_mdma_write(dmadev, STM32_MDMA_CMDR(id), hwdesc->cmdr);
-
-       /* Clear interrupt status if it is there */
-       status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(id));
-       if (status)
-               stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(id), status);
-
-       stm32_mdma_dump_reg(chan);
-
-       /* Start DMA */
-       stm32_mdma_set_bits(dmadev, STM32_MDMA_CCR(id), STM32_MDMA_CCR_EN);
-
-       /* Set SW request in case of MEM2MEM transfer */
-       if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM) {
-               reg = STM32_MDMA_CCR(id);
-               stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ);
-       }
-
-       chan->busy = true;
-
-       dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
-}
-
-static void stm32_mdma_issue_pending(struct dma_chan *c)
-{
-       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
-       unsigned long flags;
-
-       spin_lock_irqsave(&chan->vchan.lock, flags);
-
-       if (!vchan_issue_pending(&chan->vchan))
-               goto end;
-
-       dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
-
-       if (!chan->desc && !chan->busy)
-               stm32_mdma_start_transfer(chan);
-
-end:
-       spin_unlock_irqrestore(&chan->vchan.lock, flags);
-}
-
-static int stm32_mdma_pause(struct dma_chan *c)
-{
-       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&chan->vchan.lock, flags);
-       ret = stm32_mdma_disable_chan(chan);
-       spin_unlock_irqrestore(&chan->vchan.lock, flags);
-
-       if (!ret)
-               dev_dbg(chan2dev(chan), "vchan %pK: pause\n", &chan->vchan);
-
-       return ret;
-}
-
-static int stm32_mdma_resume(struct dma_chan *c)
-{
-       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
-       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
-       struct stm32_mdma_hwdesc *hwdesc;
-       unsigned long flags;
-       u32 status, reg;
-
-       /* Transfer can be terminated */
-       if (!chan->desc || (stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & STM32_MDMA_CCR_EN))
-               return -EPERM;
-
-       hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc;
-
-       spin_lock_irqsave(&chan->vchan.lock, flags);
-
-       /* Re-configure control register */
-       stm32_mdma_write(dmadev, STM32_MDMA_CCR(chan->id), chan->desc->ccr);
-
-       /* Clear interrupt status if it is there */
-       status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
-       if (status)
-               stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status);
-
-       stm32_mdma_dump_reg(chan);
-
-       /* Re-start DMA */
-       reg = STM32_MDMA_CCR(chan->id);
-       stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_EN);
-
-       /* Set SW request in case of MEM2MEM transfer */
-       if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM)
-               stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ);
-
-       spin_unlock_irqrestore(&chan->vchan.lock, flags);
-
-       dev_dbg(chan2dev(chan), "vchan %pK: resume\n", &chan->vchan);
-
-       return 0;
-}
-
-static int stm32_mdma_terminate_all(struct dma_chan *c)
-{
-       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
-       unsigned long flags;
-       LIST_HEAD(head);
-
-       spin_lock_irqsave(&chan->vchan.lock, flags);
-       if (chan->desc) {
-               vchan_terminate_vdesc(&chan->desc->vdesc);
-               if (chan->busy)
-                       stm32_mdma_stop(chan);
-               chan->desc = NULL;
-       }
-       vchan_get_all_descriptors(&chan->vchan, &head);
-       spin_unlock_irqrestore(&chan->vchan.lock, flags);
-
-       vchan_dma_desc_free_list(&chan->vchan, &head);
-
-       return 0;
-}
-
-static void stm32_mdma_synchronize(struct dma_chan *c)
-{
-       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
-
-       vchan_synchronize(&chan->vchan);
-}
-
-static int stm32_mdma_slave_config(struct dma_chan *c,
-                                  struct dma_slave_config *config)
-{
-       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
-
-       memcpy(&chan->dma_config, config, sizeof(*config));
-
-       /* Check if user is requesting STM32 DMA to trigger MDMA */
-       if (config->peripheral_size) {
-               struct stm32_mdma_dma_config *mdma_config;
-
-               mdma_config = (struct stm32_mdma_dma_config *)chan->dma_config.peripheral_config;
-               chan->chan_config.request = mdma_config->request;
-               chan->chan_config.mask_addr = mdma_config->cmar;
-               chan->chan_config.mask_data = mdma_config->cmdr;
-               chan->chan_config.m2m_hw = true;
-       }
-
-       return 0;
-}
-
-static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
-                                     struct stm32_mdma_desc *desc,
-                                     u32 curr_hwdesc,
-                                     struct dma_tx_state *state)
-{
-       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
-       struct stm32_mdma_hwdesc *hwdesc;
-       u32 cisr, clar, cbndtr, residue, modulo, burst_size;
-       int i;
-
-       cisr = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
-
-       residue = 0;
-       /* Get the next hw descriptor to process from current transfer */
-       clar = stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id));
-       for (i = desc->count - 1; i >= 0; i--) {
-               hwdesc = desc->node[i].hwdesc;
-
-               if (hwdesc->clar == clar)
-                       break;/* Current transfer found, stop cumulating */
-
-               /* Cumulate residue of unprocessed hw descriptors */
-               residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr);
-       }
-       cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
-       residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
-
-       state->in_flight_bytes = 0;
-       if (chan->chan_config.m2m_hw && (cisr & STM32_MDMA_CISR_CRQA))
-               state->in_flight_bytes = cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
-
-       if (!chan->mem_burst)
-               return residue;
-
-       burst_size = chan->mem_burst * chan->mem_width;
-       modulo = residue % burst_size;
-       if (modulo)
-               residue = residue - modulo + burst_size;
-
-       return residue;
-}
-
-static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
-                                           dma_cookie_t cookie,
-                                           struct dma_tx_state *state)
-{
-       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
-       struct virt_dma_desc *vdesc;
-       enum dma_status status;
-       unsigned long flags;
-       u32 residue = 0;
-
-       status = dma_cookie_status(c, cookie, state);
-       if ((status == DMA_COMPLETE) || (!state))
-               return status;
-
-       spin_lock_irqsave(&chan->vchan.lock, flags);
-
-       vdesc = vchan_find_desc(&chan->vchan, cookie);
-       if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
-               residue = stm32_mdma_desc_residue(chan, chan->desc, chan->curr_hwdesc, state);
-       else if (vdesc)
-               residue = stm32_mdma_desc_residue(chan, to_stm32_mdma_desc(vdesc), 0, state);
-
-       dma_set_residue(state, residue);
-
-       spin_unlock_irqrestore(&chan->vchan.lock, flags);
-
-       return status;
-}
-
-static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
-{
-       vchan_cookie_complete(&chan->desc->vdesc);
-       chan->desc = NULL;
-       chan->busy = false;
-
-       /* Start the next transfer if this driver has a next desc */
-       stm32_mdma_start_transfer(chan);
-}
-
-static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
-{
-       struct stm32_mdma_device *dmadev = devid;
-       struct stm32_mdma_chan *chan;
-       u32 reg, id, ccr, ien, status;
-
-       /* Find out which channel generates the interrupt */
-       status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0);
-       if (!status) {
-               dev_dbg(mdma2dev(dmadev), "spurious it\n");
-               return IRQ_NONE;
-       }
-       id = __ffs(status);
-       chan = &dmadev->chan[id];
-
-       /* Handle interrupt for the channel */
-       spin_lock(&chan->vchan.lock);
-       status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(id));
-       /* Mask Channel ReQuest Active bit which can be set in case of MEM2MEM */
-       status &= ~STM32_MDMA_CISR_CRQA;
-       ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(id));
-       ien = (ccr & STM32_MDMA_CCR_IRQ_MASK) >> 1;
-
-       if (!(status & ien)) {
-               spin_unlock(&chan->vchan.lock);
-               if (chan->busy)
-                       dev_warn(chan2dev(chan),
-                                "spurious it (status=0x%04x, ien=0x%04x)\n", status, ien);
-               else
-                       dev_dbg(chan2dev(chan),
-                               "spurious it (status=0x%04x, ien=0x%04x)\n", status, ien);
-               return IRQ_NONE;
-       }
-
-       reg = STM32_MDMA_CIFCR(id);
-
-       if (status & STM32_MDMA_CISR_TEIF) {
-               dev_err(chan2dev(chan), "Transfer Err: stat=0x%08x\n",
-                       readl_relaxed(dmadev->base + STM32_MDMA_CESR(id)));
-               stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CTEIF);
-               status &= ~STM32_MDMA_CISR_TEIF;
-       }
-
-       if (status & STM32_MDMA_CISR_CTCIF) {
-               stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CCTCIF);
-               status &= ~STM32_MDMA_CISR_CTCIF;
-               stm32_mdma_xfer_end(chan);
-       }
-
-       if (status & STM32_MDMA_CISR_BRTIF) {
-               stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBRTIF);
-               status &= ~STM32_MDMA_CISR_BRTIF;
-       }
-
-       if (status & STM32_MDMA_CISR_BTIF) {
-               stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBTIF);
-               status &= ~STM32_MDMA_CISR_BTIF;
-               chan->curr_hwdesc++;
-               if (chan->desc && chan->desc->cyclic) {
-                       if (chan->curr_hwdesc == chan->desc->count)
-                               chan->curr_hwdesc = 0;
-                       vchan_cyclic_callback(&chan->desc->vdesc);
-               }
-       }
-
-       if (status & STM32_MDMA_CISR_TCIF) {
-               stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CLTCIF);
-               status &= ~STM32_MDMA_CISR_TCIF;
-       }
-
-       if (status) {
-               stm32_mdma_set_bits(dmadev, reg, status);
-               dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
-               if (!(ccr & STM32_MDMA_CCR_EN))
-                       dev_err(chan2dev(chan), "chan disabled by HW\n");
-       }
-
-       spin_unlock(&chan->vchan.lock);
-
-       return IRQ_HANDLED;
-}
-
-static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
-{
-       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
-       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
-       int ret;
-
-       chan->desc_pool = dmam_pool_create(dev_name(&c->dev->device),
-                                          c->device->dev,
-                                          sizeof(struct stm32_mdma_hwdesc),
-                                         __alignof__(struct stm32_mdma_hwdesc),
-                                          0);
-       if (!chan->desc_pool) {
-               dev_err(chan2dev(chan), "failed to allocate descriptor pool\n");
-               return -ENOMEM;
-       }
-
-       ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
-       if (ret < 0)
-               return ret;
-
-       ret = stm32_mdma_disable_chan(chan);
-       if (ret < 0)
-               pm_runtime_put(dmadev->ddev.dev);
-
-       return ret;
-}
-
-static void stm32_mdma_free_chan_resources(struct dma_chan *c)
-{
-       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
-       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
-       unsigned long flags;
-
-       dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id);
-
-       if (chan->busy) {
-               spin_lock_irqsave(&chan->vchan.lock, flags);
-               stm32_mdma_stop(chan);
-               chan->desc = NULL;
-               spin_unlock_irqrestore(&chan->vchan.lock, flags);
-       }
-
-       pm_runtime_put(dmadev->ddev.dev);
-       vchan_free_chan_resources(to_virt_chan(c));
-       dmam_pool_destroy(chan->desc_pool);
-       chan->desc_pool = NULL;
-}
-
-static bool stm32_mdma_filter_fn(struct dma_chan *c, void *fn_param)
-{
-       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
-       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
-
-       /* Check if chan is marked Secure */
-       if (dmadev->chan_reserved & BIT(chan->id))
-               return false;
-
-       return true;
-}
-
-static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
-                                           struct of_dma *ofdma)
-{
-       struct stm32_mdma_device *dmadev = ofdma->of_dma_data;
-       dma_cap_mask_t mask = dmadev->ddev.cap_mask;
-       struct stm32_mdma_chan *chan;
-       struct dma_chan *c;
-       struct stm32_mdma_chan_config config;
-
-       if (dma_spec->args_count < 5) {
-               dev_err(mdma2dev(dmadev), "Bad number of args\n");
-               return NULL;
-       }
-
-       memset(&config, 0, sizeof(config));
-       config.request = dma_spec->args[0];
-       config.priority_level = dma_spec->args[1];
-       config.transfer_config = dma_spec->args[2];
-       config.mask_addr = dma_spec->args[3];
-       config.mask_data = dma_spec->args[4];
-
-       if (config.request >= dmadev->nr_requests) {
-               dev_err(mdma2dev(dmadev), "Bad request line\n");
-               return NULL;
-       }
-
-       if (config.priority_level > STM32_MDMA_VERY_HIGH_PRIORITY) {
-               dev_err(mdma2dev(dmadev), "Priority level not supported\n");
-               return NULL;
-       }
-
-       c = __dma_request_channel(&mask, stm32_mdma_filter_fn, &config, ofdma->of_node);
-       if (!c) {
-               dev_err(mdma2dev(dmadev), "No more channels available\n");
-               return NULL;
-       }
-
-       chan = to_stm32_mdma_chan(c);
-       chan->chan_config = config;
-
-       return c;
-}
-
-static const struct of_device_id stm32_mdma_of_match[] = {
-       { .compatible = "st,stm32h7-mdma", },
-       { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, stm32_mdma_of_match);
-
-static int stm32_mdma_probe(struct platform_device *pdev)
-{
-       struct stm32_mdma_chan *chan;
-       struct stm32_mdma_device *dmadev;
-       struct dma_device *dd;
-       struct device_node *of_node;
-       struct reset_control *rst;
-       u32 nr_channels, nr_requests;
-       int i, count, ret;
-
-       of_node = pdev->dev.of_node;
-       if (!of_node)
-               return -ENODEV;
-
-       ret = device_property_read_u32(&pdev->dev, "dma-channels",
-                                      &nr_channels);
-       if (ret) {
-               nr_channels = STM32_MDMA_MAX_CHANNELS;
-               dev_warn(&pdev->dev, "MDMA defaulting on %i channels\n",
-                        nr_channels);
-       }
-
-       ret = device_property_read_u32(&pdev->dev, "dma-requests",
-                                      &nr_requests);
-       if (ret) {
-               nr_requests = STM32_MDMA_MAX_REQUESTS;
-               dev_warn(&pdev->dev, "MDMA defaulting on %i request lines\n",
-                        nr_requests);
-       }
-
-       count = device_property_count_u32(&pdev->dev, "st,ahb-addr-masks");
-       if (count < 0)
-               count = 0;
-
-       dmadev = devm_kzalloc(&pdev->dev,
-                             struct_size(dmadev, ahb_addr_masks, count),
-                             GFP_KERNEL);
-       if (!dmadev)
-               return -ENOMEM;
-       dmadev->nr_ahb_addr_masks = count;
-
-       dmadev->nr_channels = nr_channels;
-       dmadev->nr_requests = nr_requests;
-       device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
-                                      dmadev->ahb_addr_masks,
-                                      count);
-
-       dmadev->base = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(dmadev->base))
-               return PTR_ERR(dmadev->base);
-
-       dmadev->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(dmadev->clk))
-               return dev_err_probe(&pdev->dev, PTR_ERR(dmadev->clk),
-                                    "Missing clock controller\n");
-
-       ret = clk_prepare_enable(dmadev->clk);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
-               return ret;
-       }
-
-       rst = devm_reset_control_get(&pdev->dev, NULL);
-       if (IS_ERR(rst)) {
-               ret = PTR_ERR(rst);
-               if (ret == -EPROBE_DEFER)
-                       goto err_clk;
-       } else {
-               reset_control_assert(rst);
-               udelay(2);
-               reset_control_deassert(rst);
-       }
-
-       dd = &dmadev->ddev;
-       dma_cap_set(DMA_SLAVE, dd->cap_mask);
-       dma_cap_set(DMA_PRIVATE, dd->cap_mask);
-       dma_cap_set(DMA_CYCLIC, dd->cap_mask);
-       dma_cap_set(DMA_MEMCPY, dd->cap_mask);
-       dd->device_alloc_chan_resources = stm32_mdma_alloc_chan_resources;
-       dd->device_free_chan_resources = stm32_mdma_free_chan_resources;
-       dd->device_tx_status = stm32_mdma_tx_status;
-       dd->device_issue_pending = stm32_mdma_issue_pending;
-       dd->device_prep_slave_sg = stm32_mdma_prep_slave_sg;
-       dd->device_prep_dma_cyclic = stm32_mdma_prep_dma_cyclic;
-       dd->device_prep_dma_memcpy = stm32_mdma_prep_dma_memcpy;
-       dd->device_config = stm32_mdma_slave_config;
-       dd->device_pause = stm32_mdma_pause;
-       dd->device_resume = stm32_mdma_resume;
-       dd->device_terminate_all = stm32_mdma_terminate_all;
-       dd->device_synchronize = stm32_mdma_synchronize;
-       dd->descriptor_reuse = true;
-
-       dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
-               BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
-               BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
-               BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
-       dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
-               BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
-               BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
-               BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
-       dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
-               BIT(DMA_MEM_TO_MEM);
-       dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
-       dd->max_burst = STM32_MDMA_MAX_BURST;
-       dd->dev = &pdev->dev;
-       INIT_LIST_HEAD(&dd->channels);
-
-       for (i = 0; i < dmadev->nr_channels; i++) {
-               chan = &dmadev->chan[i];
-               chan->id = i;
-
-               if (stm32_mdma_read(dmadev, STM32_MDMA_CCR(i)) & STM32_MDMA_CCR_SM)
-                       dmadev->chan_reserved |= BIT(i);
-
-               chan->vchan.desc_free = stm32_mdma_desc_free;
-               vchan_init(&chan->vchan, dd);
-       }
-
-       dmadev->irq = platform_get_irq(pdev, 0);
-       if (dmadev->irq < 0) {
-               ret = dmadev->irq;
-               goto err_clk;
-       }
-
-       ret = devm_request_irq(&pdev->dev, dmadev->irq, stm32_mdma_irq_handler,
-                              0, dev_name(&pdev->dev), dmadev);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to request IRQ\n");
-               goto err_clk;
-       }
-
-       ret = dmaenginem_async_device_register(dd);
-       if (ret)
-               goto err_clk;
-
-       ret = of_dma_controller_register(of_node, stm32_mdma_of_xlate, dmadev);
-       if (ret < 0) {
-               dev_err(&pdev->dev,
-                       "STM32 MDMA DMA OF registration failed %d\n", ret);
-               goto err_clk;
-       }
-
-       platform_set_drvdata(pdev, dmadev);
-       pm_runtime_set_active(&pdev->dev);
-       pm_runtime_enable(&pdev->dev);
-       pm_runtime_get_noresume(&pdev->dev);
-       pm_runtime_put(&pdev->dev);
-
-       dev_info(&pdev->dev, "STM32 MDMA driver registered\n");
-
-       return 0;
-
-err_clk:
-       clk_disable_unprepare(dmadev->clk);
-
-       return ret;
-}
-
-#ifdef CONFIG_PM
-static int stm32_mdma_runtime_suspend(struct device *dev)
-{
-       struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
-
-       clk_disable_unprepare(dmadev->clk);
-
-       return 0;
-}
-
-static int stm32_mdma_runtime_resume(struct device *dev)
-{
-       struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
-       int ret;
-
-       ret = clk_prepare_enable(dmadev->clk);
-       if (ret) {
-               dev_err(dev, "failed to prepare_enable clock\n");
-               return ret;
-       }
-
-       return 0;
-}
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-static int stm32_mdma_pm_suspend(struct device *dev)
-{
-       struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
-       u32 ccr, id;
-       int ret;
-
-       ret = pm_runtime_resume_and_get(dev);
-       if (ret < 0)
-               return ret;
-
-       for (id = 0; id < dmadev->nr_channels; id++) {
-               ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(id));
-               if (ccr & STM32_MDMA_CCR_EN) {
-                       dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
-                       return -EBUSY;
-               }
-       }
-
-       pm_runtime_put_sync(dev);
-
-       pm_runtime_force_suspend(dev);
-
-       return 0;
-}
-
-static int stm32_mdma_pm_resume(struct device *dev)
-{
-       return pm_runtime_force_resume(dev);
-}
-#endif
-
-static const struct dev_pm_ops stm32_mdma_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(stm32_mdma_pm_suspend, stm32_mdma_pm_resume)
-       SET_RUNTIME_PM_OPS(stm32_mdma_runtime_suspend,
-                          stm32_mdma_runtime_resume, NULL)
-};
-
-static struct platform_driver stm32_mdma_driver = {
-       .probe = stm32_mdma_probe,
-       .driver = {
-               .name = "stm32-mdma",
-               .of_match_table = stm32_mdma_of_match,
-               .pm = &stm32_mdma_pm_ops,
-       },
-};
-
-static int __init stm32_mdma_init(void)
-{
-       return platform_driver_register(&stm32_mdma_driver);
-}
-
-subsys_initcall(stm32_mdma_init);
-
-MODULE_DESCRIPTION("Driver for STM32 MDMA controller");
-MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
-MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
diff --git a/drivers/dma/stm32/Kconfig b/drivers/dma/stm32/Kconfig
new file mode 100644 (file)
index 0000000..b72ae1a
--- /dev/null
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# STM32 DMA controllers drivers
+#
+if ARCH_STM32 || COMPILE_TEST
+
+config STM32_DMA
+       bool "STMicroelectronics STM32 DMA support"
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       help
+         Enable support for the on-chip DMA controller on STMicroelectronics
+         STM32 platforms.
+         If you have a board based on STM32 SoC with such DMA controller
+         and want to use DMA say Y here.
+
+config STM32_DMAMUX
+       bool "STMicroelectronics STM32 DMA multiplexer support"
+       depends on STM32_DMA
+       help
+         Enable support for the on-chip DMA multiplexer on STMicroelectronics
+         STM32 platforms.
+         If you have a board based on STM32 SoC with such DMA multiplexer
+         and want to use DMAMUX say Y here.
+
+config STM32_MDMA
+       bool "STMicroelectronics STM32 master DMA support"
+       depends on OF
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       help
+         Enable support for the on-chip MDMA controller on STMicroelectronics
+         STM32 platforms.
+         If you have a board based on STM32 SoC with such DMA controller
+         and want to use MDMA say Y here.
+
+endif
diff --git a/drivers/dma/stm32/Makefile b/drivers/dma/stm32/Makefile
new file mode 100644 (file)
index 0000000..663a389
--- /dev/null
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_STM32_DMA) += stm32-dma.o
+obj-$(CONFIG_STM32_DMAMUX) += stm32-dmamux.o
+obj-$(CONFIG_STM32_MDMA) += stm32-mdma.o
diff --git a/drivers/dma/stm32/stm32-dma.c b/drivers/dma/stm32/stm32-dma.c
new file mode 100644 (file)
index 0000000..917f8e9
--- /dev/null
@@ -0,0 +1,1782 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for STM32 DMA controller
+ *
+ * Inspired by dma-jz4740.c and tegra20-apb-dma.c
+ *
+ * Copyright (C) M'boumba Cedric Madianga 2015
+ * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
+ *         Pierre-Yves Mordret <pierre-yves.mordret@st.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "../virt-dma.h"
+
+#define STM32_DMA_LISR                 0x0000 /* DMA Low Int Status Reg */
+#define STM32_DMA_HISR                 0x0004 /* DMA High Int Status Reg */
+#define STM32_DMA_ISR(n)               (((n) & 4) ? STM32_DMA_HISR : STM32_DMA_LISR)
+#define STM32_DMA_LIFCR                        0x0008 /* DMA Low Int Flag Clear Reg */
+#define STM32_DMA_HIFCR                        0x000c /* DMA High Int Flag Clear Reg */
+#define STM32_DMA_IFCR(n)              (((n) & 4) ? STM32_DMA_HIFCR : STM32_DMA_LIFCR)
+#define STM32_DMA_TCI                  BIT(5) /* Transfer Complete Interrupt */
+#define STM32_DMA_HTI                  BIT(4) /* Half Transfer Interrupt */
+#define STM32_DMA_TEI                  BIT(3) /* Transfer Error Interrupt */
+#define STM32_DMA_DMEI                 BIT(2) /* Direct Mode Error Interrupt */
+#define STM32_DMA_FEI                  BIT(0) /* FIFO Error Interrupt */
+#define STM32_DMA_MASKI                        (STM32_DMA_TCI \
+                                        | STM32_DMA_TEI \
+                                        | STM32_DMA_DMEI \
+                                        | STM32_DMA_FEI)
+/*
+ * If (chan->id % 4) is 2 or 3, left shift the mask by 16 bits;
+ * if (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
+ */
+#define STM32_DMA_FLAGS_SHIFT(n)       ({ typeof(n) (_n) = (n); \
+                                          (((_n) & 2) << 3) | (((_n) & 1) * 6); })
+
+/* DMA Stream x Configuration Register */
+#define STM32_DMA_SCR(x)               (0x0010 + 0x18 * (x)) /* x = 0..7 */
+#define STM32_DMA_SCR_REQ_MASK         GENMASK(27, 25)
+#define STM32_DMA_SCR_MBURST_MASK      GENMASK(24, 23)
+#define STM32_DMA_SCR_PBURST_MASK      GENMASK(22, 21)
+#define STM32_DMA_SCR_PL_MASK          GENMASK(17, 16)
+#define STM32_DMA_SCR_MSIZE_MASK       GENMASK(14, 13)
+#define STM32_DMA_SCR_PSIZE_MASK       GENMASK(12, 11)
+#define STM32_DMA_SCR_DIR_MASK         GENMASK(7, 6)
+#define STM32_DMA_SCR_TRBUFF           BIT(20) /* Bufferable transfer for USART/UART */
+#define STM32_DMA_SCR_CT               BIT(19) /* Target in double buffer */
+#define STM32_DMA_SCR_DBM              BIT(18) /* Double Buffer Mode */
+#define STM32_DMA_SCR_PINCOS           BIT(15) /* Peripheral inc offset size */
+#define STM32_DMA_SCR_MINC             BIT(10) /* Memory increment mode */
+#define STM32_DMA_SCR_PINC             BIT(9) /* Peripheral increment mode */
+#define STM32_DMA_SCR_CIRC             BIT(8) /* Circular mode */
+#define STM32_DMA_SCR_PFCTRL           BIT(5) /* Peripheral Flow Controller */
+#define STM32_DMA_SCR_TCIE             BIT(4) /* Transfer Complete Int Enable
+                                               */
+#define STM32_DMA_SCR_TEIE             BIT(2) /* Transfer Error Int Enable */
+#define STM32_DMA_SCR_DMEIE            BIT(1) /* Direct Mode Err Int Enable */
+#define STM32_DMA_SCR_EN               BIT(0) /* Stream Enable */
+#define STM32_DMA_SCR_CFG_MASK         (STM32_DMA_SCR_PINC \
+                                       | STM32_DMA_SCR_MINC \
+                                       | STM32_DMA_SCR_PINCOS \
+                                       | STM32_DMA_SCR_PL_MASK)
+#define STM32_DMA_SCR_IRQ_MASK         (STM32_DMA_SCR_TCIE \
+                                       | STM32_DMA_SCR_TEIE \
+                                       | STM32_DMA_SCR_DMEIE)
+
+/* DMA Stream x number of data register */
+#define STM32_DMA_SNDTR(x)             (0x0014 + 0x18 * (x))
+
+/* DMA stream peripheral address register */
+#define STM32_DMA_SPAR(x)              (0x0018 + 0x18 * (x))
+
+/* DMA stream x memory 0 address register */
+#define STM32_DMA_SM0AR(x)             (0x001c + 0x18 * (x))
+
+/* DMA stream x memory 1 address register */
+#define STM32_DMA_SM1AR(x)             (0x0020 + 0x18 * (x))
+
+/* DMA stream x FIFO control register */
+#define STM32_DMA_SFCR(x)              (0x0024 + 0x18 * (x))
+#define STM32_DMA_SFCR_FTH_MASK                GENMASK(1, 0)
+#define STM32_DMA_SFCR_FEIE            BIT(7) /* FIFO error interrupt enable */
+#define STM32_DMA_SFCR_DMDIS           BIT(2) /* Direct mode disable */
+#define STM32_DMA_SFCR_MASK            (STM32_DMA_SFCR_FEIE \
+                                       | STM32_DMA_SFCR_DMDIS)
+
+/* DMA direction */
+#define STM32_DMA_DEV_TO_MEM           0x00
+#define        STM32_DMA_MEM_TO_DEV            0x01
+#define        STM32_DMA_MEM_TO_MEM            0x02
+
+/* DMA priority level */
+#define STM32_DMA_PRIORITY_LOW         0x00
+#define STM32_DMA_PRIORITY_MEDIUM      0x01
+#define STM32_DMA_PRIORITY_HIGH                0x02
+#define STM32_DMA_PRIORITY_VERY_HIGH   0x03
+
+/* DMA FIFO threshold selection */
+#define STM32_DMA_FIFO_THRESHOLD_1QUARTERFULL          0x00
+#define STM32_DMA_FIFO_THRESHOLD_HALFFULL              0x01
+#define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL         0x02
+#define STM32_DMA_FIFO_THRESHOLD_FULL                  0x03
+#define STM32_DMA_FIFO_THRESHOLD_NONE                  0x04
+
+#define STM32_DMA_MAX_DATA_ITEMS       0xffff
+/*
+ * Valid transfer starts from @0 to @0xFFFE leading to unaligned scatter
+ * gather at boundary. Thus it's safer to round down this value on FIFO
+ * size (16 Bytes)
+ */
+#define STM32_DMA_ALIGNED_MAX_DATA_ITEMS       \
+       ALIGN_DOWN(STM32_DMA_MAX_DATA_ITEMS, 16)
+#define STM32_DMA_MAX_CHANNELS         0x08
+#define STM32_DMA_MAX_REQUEST_ID       0x08
+#define STM32_DMA_MAX_DATA_PARAM       0x03
+#define STM32_DMA_FIFO_SIZE            16      /* FIFO is 16 bytes */
+#define STM32_DMA_MIN_BURST            4
+#define STM32_DMA_MAX_BURST            16
+
+/* DMA Features */
+#define STM32_DMA_THRESHOLD_FTR_MASK   GENMASK(1, 0)
+#define STM32_DMA_DIRECT_MODE_MASK     BIT(2)
+#define STM32_DMA_ALT_ACK_MODE_MASK    BIT(4)
+#define STM32_DMA_MDMA_STREAM_ID_MASK  GENMASK(19, 16)
+
+enum stm32_dma_width {
+       STM32_DMA_BYTE,
+       STM32_DMA_HALF_WORD,
+       STM32_DMA_WORD,
+};
+
+enum stm32_dma_burst_size {
+       STM32_DMA_BURST_SINGLE,
+       STM32_DMA_BURST_INCR4,
+       STM32_DMA_BURST_INCR8,
+       STM32_DMA_BURST_INCR16,
+};
+
+/**
+ * struct stm32_dma_cfg - STM32 DMA custom configuration
+ * @channel_id: channel ID
+ * @request_line: DMA request
+ * @stream_config: 32bit mask specifying the DMA channel configuration
+ * @features: 32bit mask specifying the DMA Feature list
+ */
+struct stm32_dma_cfg {
+       u32 channel_id;
+       u32 request_line;
+       u32 stream_config;
+       u32 features;
+};
+
+struct stm32_dma_chan_reg {
+       u32 dma_lisr;
+       u32 dma_hisr;
+       u32 dma_lifcr;
+       u32 dma_hifcr;
+       u32 dma_scr;
+       u32 dma_sndtr;
+       u32 dma_spar;
+       u32 dma_sm0ar;
+       u32 dma_sm1ar;
+       u32 dma_sfcr;
+};
+
+struct stm32_dma_sg_req {
+       u32 len;
+       struct stm32_dma_chan_reg chan_reg;
+};
+
+struct stm32_dma_desc {
+       struct virt_dma_desc vdesc;
+       bool cyclic;
+       u32 num_sgs;
+       struct stm32_dma_sg_req sg_req[] __counted_by(num_sgs);
+};
+
+/**
+ * struct stm32_dma_mdma_config - STM32 DMA MDMA configuration
+ * @stream_id: DMA request to trigger STM32 MDMA transfer
+ * @ifcr: DMA interrupt flag clear register address,
+ *        used by STM32 MDMA to clear DMA Transfer Complete flag
+ * @tcf: DMA Transfer Complete flag
+ */
+struct stm32_dma_mdma_config {
+       u32 stream_id;
+       u32 ifcr;
+       u32 tcf;
+};
+
+struct stm32_dma_chan {
+       struct virt_dma_chan vchan;
+       bool config_init;
+       bool busy;
+       u32 id;
+       u32 irq;
+       struct stm32_dma_desc *desc;
+       u32 next_sg;
+       struct dma_slave_config dma_sconfig;
+       struct stm32_dma_chan_reg chan_reg;
+       u32 threshold;
+       u32 mem_burst;
+       u32 mem_width;
+       enum dma_status status;
+       bool trig_mdma;
+       struct stm32_dma_mdma_config mdma_config;
+};
+
+struct stm32_dma_device {
+       struct dma_device ddev;
+       void __iomem *base;
+       struct clk *clk;
+       bool mem2mem;
+       struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS];
+};
+
+static struct stm32_dma_device *stm32_dma_get_dev(struct stm32_dma_chan *chan)
+{
+       return container_of(chan->vchan.chan.device, struct stm32_dma_device,
+                           ddev);
+}
+
+static struct stm32_dma_chan *to_stm32_dma_chan(struct dma_chan *c)
+{
+       return container_of(c, struct stm32_dma_chan, vchan.chan);
+}
+
+static struct stm32_dma_desc *to_stm32_dma_desc(struct virt_dma_desc *vdesc)
+{
+       return container_of(vdesc, struct stm32_dma_desc, vdesc);
+}
+
+static struct device *chan2dev(struct stm32_dma_chan *chan)
+{
+       return &chan->vchan.chan.dev->device;
+}
+
+static u32 stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg)
+{
+       return readl_relaxed(dmadev->base + reg);
+}
+
+static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val)
+{
+       writel_relaxed(val, dmadev->base + reg);
+}
+
+static int stm32_dma_get_width(struct stm32_dma_chan *chan,
+                              enum dma_slave_buswidth width)
+{
+       switch (width) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+               return STM32_DMA_BYTE;
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+               return STM32_DMA_HALF_WORD;
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+               return STM32_DMA_WORD;
+       default:
+               dev_err(chan2dev(chan), "Dma bus width not supported\n");
+               return -EINVAL;
+       }
+}
+
+static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
+                                                      dma_addr_t buf_addr,
+                                                      u32 threshold)
+{
+       enum dma_slave_buswidth max_width;
+
+       if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL)
+               max_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+       else
+               max_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+
+       while ((buf_len < max_width  || buf_len % max_width) &&
+              max_width > DMA_SLAVE_BUSWIDTH_1_BYTE)
+               max_width = max_width >> 1;
+
+       if (buf_addr & (max_width - 1))
+               max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+       return max_width;
+}
+
+static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold,
+                                               enum dma_slave_buswidth width)
+{
+       u32 remaining;
+
+       if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE)
+               return false;
+
+       if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) {
+               if (burst != 0) {
+                       /*
+                        * If number of beats fit in several whole bursts
+                        * this configuration is allowed.
+                        */
+                       remaining = ((STM32_DMA_FIFO_SIZE / width) *
+                                    (threshold + 1) / 4) % burst;
+
+                       if (remaining == 0)
+                               return true;
+               } else {
+                       return true;
+               }
+       }
+
+       return false;
+}
+
+static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold)
+{
+       /* If FIFO direct mode, burst is not possible */
+       if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE)
+               return false;
+
+       /*
+        * Buffer or period length has to be aligned on FIFO depth.
+        * Otherwise bytes may be stuck within FIFO at buffer or period
+        * length.
+        */
+       return ((buf_len % ((threshold + 1) * 4)) == 0);
+}
+
+static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold,
+                                   enum dma_slave_buswidth width)
+{
+       u32 best_burst = max_burst;
+
+       if (best_burst == 1 || !stm32_dma_is_burst_possible(buf_len, threshold))
+               return 0;
+
+       while ((buf_len < best_burst * width && best_burst > 1) ||
+              !stm32_dma_fifo_threshold_is_allowed(best_burst, threshold,
+                                                   width)) {
+               if (best_burst > STM32_DMA_MIN_BURST)
+                       best_burst = best_burst >> 1;
+               else
+                       best_burst = 0;
+       }
+
+       return best_burst;
+}
+
+static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst)
+{
+       switch (maxburst) {
+       case 0:
+       case 1:
+               return STM32_DMA_BURST_SINGLE;
+       case 4:
+               return STM32_DMA_BURST_INCR4;
+       case 8:
+               return STM32_DMA_BURST_INCR8;
+       case 16:
+               return STM32_DMA_BURST_INCR16;
+       default:
+               dev_err(chan2dev(chan), "Dma burst size not supported\n");
+               return -EINVAL;
+       }
+}
+
+static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan,
+                                     u32 src_burst, u32 dst_burst)
+{
+       chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK;
+       chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE;
+
+       if (!src_burst && !dst_burst) {
+               /* Using direct mode */
+               chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE;
+       } else {
+               /* Using FIFO mode */
+               chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
+       }
+}
+
+static int stm32_dma_slave_config(struct dma_chan *c,
+                                 struct dma_slave_config *config)
+{
+       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+
+       memcpy(&chan->dma_sconfig, config, sizeof(*config));
+
+       /* Check if user is requesting DMA to trigger STM32 MDMA */
+       if (config->peripheral_size) {
+               config->peripheral_config = &chan->mdma_config;
+               config->peripheral_size = sizeof(chan->mdma_config);
+               chan->trig_mdma = true;
+       }
+
+       chan->config_init = true;
+
+       return 0;
+}
+
+static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan)
+{
+       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+       u32 flags, dma_isr;
+
+       /*
+        * Read "flags" from DMA_xISR register corresponding to the selected
+        * DMA channel at the correct bit offset inside that register.
+        */
+
+       dma_isr = stm32_dma_read(dmadev, STM32_DMA_ISR(chan->id));
+       flags = dma_isr >> STM32_DMA_FLAGS_SHIFT(chan->id);
+
+       return flags & STM32_DMA_MASKI;
+}
+
+static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
+{
+       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+       u32 dma_ifcr;
+
+       /*
+        * Write "flags" to the DMA_xIFCR register corresponding to the selected
+        * DMA channel at the correct bit offset inside that register.
+        */
+       flags &= STM32_DMA_MASKI;
+       dma_ifcr = flags << STM32_DMA_FLAGS_SHIFT(chan->id);
+
+       stm32_dma_write(dmadev, STM32_DMA_IFCR(chan->id), dma_ifcr);
+}
+
+static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
+{
+       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+       u32 dma_scr, id, reg;
+
+       id = chan->id;
+       reg = STM32_DMA_SCR(id);
+       dma_scr = stm32_dma_read(dmadev, reg);
+
+       if (dma_scr & STM32_DMA_SCR_EN) {
+               dma_scr &= ~STM32_DMA_SCR_EN;
+               stm32_dma_write(dmadev, reg, dma_scr);
+
+               return readl_relaxed_poll_timeout_atomic(dmadev->base + reg,
+                                       dma_scr, !(dma_scr & STM32_DMA_SCR_EN),
+                                       10, 1000000);
+       }
+
+       return 0;
+}
+
+static void stm32_dma_stop(struct stm32_dma_chan *chan)
+{
+       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+       u32 dma_scr, dma_sfcr, status;
+       int ret;
+
+       /* Disable interrupts */
+       dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+       dma_scr &= ~STM32_DMA_SCR_IRQ_MASK;
+       stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
+       dma_sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
+       dma_sfcr &= ~STM32_DMA_SFCR_FEIE;
+       stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), dma_sfcr);
+
+       /* Disable DMA */
+       ret = stm32_dma_disable_chan(chan);
+       if (ret < 0)
+               return;
+
+       /* Clear interrupt status if it is there */
+       status = stm32_dma_irq_status(chan);
+       if (status) {
+               dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n",
+                       __func__, status);
+               stm32_dma_irq_clear(chan, status);
+       }
+
+       chan->busy = false;
+       chan->status = DMA_COMPLETE;
+}
+
+static int stm32_dma_terminate_all(struct dma_chan *c)
+{
+       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&chan->vchan.lock, flags);
+
+       if (chan->desc) {
+               dma_cookie_complete(&chan->desc->vdesc.tx);
+               vchan_terminate_vdesc(&chan->desc->vdesc);
+               if (chan->busy)
+                       stm32_dma_stop(chan);
+               chan->desc = NULL;
+       }
+
+       vchan_get_all_descriptors(&chan->vchan, &head);
+       spin_unlock_irqrestore(&chan->vchan.lock, flags);
+       vchan_dma_desc_free_list(&chan->vchan, &head);
+
+       return 0;
+}
+
+static void stm32_dma_synchronize(struct dma_chan *c)
+{
+       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+
+       vchan_synchronize(&chan->vchan);
+}
+
+static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
+{
+       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+       u32 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+       u32 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
+       u32 spar = stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id));
+       u32 sm0ar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id));
+       u32 sm1ar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id));
+       u32 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
+
+       dev_dbg(chan2dev(chan), "SCR:   0x%08x\n", scr);
+       dev_dbg(chan2dev(chan), "NDTR:  0x%08x\n", ndtr);
+       dev_dbg(chan2dev(chan), "SPAR:  0x%08x\n", spar);
+       dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n", sm0ar);
+       dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n", sm1ar);
+       dev_dbg(chan2dev(chan), "SFCR:  0x%08x\n", sfcr);
+}
+
+static void stm32_dma_sg_inc(struct stm32_dma_chan *chan)
+{
+       chan->next_sg++;
+       if (chan->desc->cyclic && (chan->next_sg == chan->desc->num_sgs))
+               chan->next_sg = 0;
+}
+
+static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan);
+
+static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
+{
+       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+       struct virt_dma_desc *vdesc;
+       struct stm32_dma_sg_req *sg_req;
+       struct stm32_dma_chan_reg *reg;
+       u32 status;
+       int ret;
+
+       ret = stm32_dma_disable_chan(chan);
+       if (ret < 0)
+               return;
+
+       if (!chan->desc) {
+               vdesc = vchan_next_desc(&chan->vchan);
+               if (!vdesc)
+                       return;
+
+               list_del(&vdesc->node);
+
+               chan->desc = to_stm32_dma_desc(vdesc);
+               chan->next_sg = 0;
+       }
+
+       if (chan->next_sg == chan->desc->num_sgs)
+               chan->next_sg = 0;
+
+       sg_req = &chan->desc->sg_req[chan->next_sg];
+       reg = &sg_req->chan_reg;
+
+       /* When DMA triggers STM32 MDMA, DMA Transfer Complete is managed by STM32 MDMA */
+       if (chan->trig_mdma && chan->dma_sconfig.direction != DMA_MEM_TO_DEV)
+               reg->dma_scr &= ~STM32_DMA_SCR_TCIE;
+
+       reg->dma_scr &= ~STM32_DMA_SCR_EN;
+       stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
+       stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar);
+       stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar);
+       stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), reg->dma_sfcr);
+       stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar);
+       stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr);
+
+       stm32_dma_sg_inc(chan);
+
+       /* Clear interrupt status if it is there */
+       status = stm32_dma_irq_status(chan);
+       if (status)
+               stm32_dma_irq_clear(chan, status);
+
+       if (chan->desc->cyclic)
+               stm32_dma_configure_next_sg(chan);
+
+       stm32_dma_dump_reg(chan);
+
+       /* Start DMA */
+       chan->busy = true;
+       chan->status = DMA_IN_PROGRESS;
+       reg->dma_scr |= STM32_DMA_SCR_EN;
+       stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
+
+       dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
+}
+
+static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
+{
+       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+       struct stm32_dma_sg_req *sg_req;
+       u32 dma_scr, dma_sm0ar, dma_sm1ar, id;
+
+       id = chan->id;
+       dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+
+       sg_req = &chan->desc->sg_req[chan->next_sg];
+
+       if (dma_scr & STM32_DMA_SCR_CT) {
+               dma_sm0ar = sg_req->chan_reg.dma_sm0ar;
+               stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar);
+               dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n",
+                       stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)));
+       } else {
+               dma_sm1ar = sg_req->chan_reg.dma_sm1ar;
+               stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar);
+               dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
+                       stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
+       }
+}
+
+static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan)
+{
+       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+       u32 dma_scr;
+
+       /*
+        * Read and store current remaining data items and peripheral/memory addresses to be
+        * updated on resume
+        */
+       dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+       /*
+        * Transfer can be paused while between a previous resume and reconfiguration on transfer
+        * complete. If transfer is cyclic and CIRC and DBM have been deactivated for resume, need
+        * to set it here in SCR backup to ensure a good reconfiguration on transfer complete.
+        */
+       if (chan->desc && chan->desc->cyclic) {
+               if (chan->desc->num_sgs == 1)
+                       dma_scr |= STM32_DMA_SCR_CIRC;
+               else
+                       dma_scr |= STM32_DMA_SCR_DBM;
+       }
+       chan->chan_reg.dma_scr = dma_scr;
+
+       /*
+        * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt, otherwise
+        * on resume NDTR autoreload value will be wrong (lower than the initial period length)
+        */
+       if (chan->desc && chan->desc->cyclic) {
+               dma_scr &= ~(STM32_DMA_SCR_DBM | STM32_DMA_SCR_CIRC);
+               stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
+       }
+
+       chan->chan_reg.dma_sndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
+
+       chan->status = DMA_PAUSED;
+
+       dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
+}
+
+static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
+{
+       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+       struct stm32_dma_sg_req *sg_req;
+       u32 dma_scr, status, id;
+
+       id = chan->id;
+       dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+
+       /* Clear interrupt status if it is there */
+       status = stm32_dma_irq_status(chan);
+       if (status)
+               stm32_dma_irq_clear(chan, status);
+
+       if (!chan->next_sg)
+               sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1];
+       else
+               sg_req = &chan->desc->sg_req[chan->next_sg - 1];
+
+       /* Reconfigure NDTR with the initial value */
+       stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), sg_req->chan_reg.dma_sndtr);
+
+       /* Restore SPAR */
+       stm32_dma_write(dmadev, STM32_DMA_SPAR(id), sg_req->chan_reg.dma_spar);
+
+       /* Restore SM0AR/SM1AR whatever DBM/CT as they may have been modified */
+       stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sg_req->chan_reg.dma_sm0ar);
+       stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sg_req->chan_reg.dma_sm1ar);
+
+       /* Reactivate CIRC/DBM if needed */
+       if (chan->chan_reg.dma_scr & STM32_DMA_SCR_DBM) {
+               dma_scr |= STM32_DMA_SCR_DBM;
+               /* Restore CT */
+               if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CT)
+                       dma_scr &= ~STM32_DMA_SCR_CT;
+               else
+                       dma_scr |= STM32_DMA_SCR_CT;
+       } else if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CIRC) {
+               dma_scr |= STM32_DMA_SCR_CIRC;
+       }
+       stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
+
+       stm32_dma_configure_next_sg(chan);
+
+       stm32_dma_dump_reg(chan);
+
+       dma_scr |= STM32_DMA_SCR_EN;
+       stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
+
+       dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n", &chan->vchan);
+}
+
+static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
+{
+       if (!chan->desc)
+               return;
+
+       if (chan->desc->cyclic) {
+               vchan_cyclic_callback(&chan->desc->vdesc);
+               if (chan->trig_mdma)
+                       return;
+               stm32_dma_sg_inc(chan);
+               /* cyclic while CIRC/DBM disable => post resume reconfiguration needed */
+               if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)))
+                       stm32_dma_post_resume_reconfigure(chan);
+               else if (scr & STM32_DMA_SCR_DBM)
+                       stm32_dma_configure_next_sg(chan);
+       } else {
+               chan->busy = false;
+               chan->status = DMA_COMPLETE;
+               if (chan->next_sg == chan->desc->num_sgs) {
+                       vchan_cookie_complete(&chan->desc->vdesc);
+                       chan->desc = NULL;
+               }
+               stm32_dma_start_transfer(chan);
+       }
+}
+
+static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
+{
+       struct stm32_dma_chan *chan = devid;
+       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+       u32 status, scr, sfcr;
+
+       spin_lock(&chan->vchan.lock);
+
+       status = stm32_dma_irq_status(chan);
+       scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+       sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
+
+       if (status & STM32_DMA_FEI) {
+               stm32_dma_irq_clear(chan, STM32_DMA_FEI);
+               status &= ~STM32_DMA_FEI;
+               if (sfcr & STM32_DMA_SFCR_FEIE) {
+                       if (!(scr & STM32_DMA_SCR_EN) &&
+                           !(status & STM32_DMA_TCI))
+                               dev_err(chan2dev(chan), "FIFO Error\n");
+                       else
+                               dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
+               }
+       }
+       if (status & STM32_DMA_DMEI) {
+               stm32_dma_irq_clear(chan, STM32_DMA_DMEI);
+               status &= ~STM32_DMA_DMEI;
+               if (sfcr & STM32_DMA_SCR_DMEIE)
+                       dev_dbg(chan2dev(chan), "Direct mode overrun\n");
+       }
+
+       if (status & STM32_DMA_TCI) {
+               stm32_dma_irq_clear(chan, STM32_DMA_TCI);
+               if (scr & STM32_DMA_SCR_TCIE) {
+                       if (chan->status != DMA_PAUSED)
+                               stm32_dma_handle_chan_done(chan, scr);
+               }
+               status &= ~STM32_DMA_TCI;
+       }
+
+       if (status & STM32_DMA_HTI) {
+               stm32_dma_irq_clear(chan, STM32_DMA_HTI);
+               status &= ~STM32_DMA_HTI;
+       }
+
+       if (status) {
+               stm32_dma_irq_clear(chan, status);
+               dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
+               if (!(scr & STM32_DMA_SCR_EN))
+                       dev_err(chan2dev(chan), "chan disabled by HW\n");
+       }
+
+       spin_unlock(&chan->vchan.lock);
+
+       return IRQ_HANDLED;
+}
+
+static void stm32_dma_issue_pending(struct dma_chan *c)
+{
+       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->vchan.lock, flags);
+       if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
+               dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
+               stm32_dma_start_transfer(chan);
+
+       }
+       spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static int stm32_dma_pause(struct dma_chan *c)
+{
+       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+       unsigned long flags;
+       int ret;
+
+       if (chan->status != DMA_IN_PROGRESS)
+               return -EPERM;
+
+       spin_lock_irqsave(&chan->vchan.lock, flags);
+
+       ret = stm32_dma_disable_chan(chan);
+       if (!ret)
+               stm32_dma_handle_chan_paused(chan);
+
+       spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+       return ret;
+}
+
+static int stm32_dma_resume(struct dma_chan *c)
+{
+       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+       struct stm32_dma_chan_reg chan_reg = chan->chan_reg;
+       u32 id = chan->id, scr, ndtr, offset, spar, sm0ar, sm1ar;
+       struct stm32_dma_sg_req *sg_req;
+       unsigned long flags;
+
+       if (chan->status != DMA_PAUSED)
+               return -EPERM;
+
+       scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+       if (WARN_ON(scr & STM32_DMA_SCR_EN))
+               return -EPERM;
+
+       spin_lock_irqsave(&chan->vchan.lock, flags);
+
+       /* sg_reg[prev_sg] contains original ndtr, sm0ar and sm1ar before pausing the transfer */
+       if (!chan->next_sg)
+               sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1];
+       else
+               sg_req = &chan->desc->sg_req[chan->next_sg - 1];
+
+       ndtr = sg_req->chan_reg.dma_sndtr;
+       offset = (ndtr - chan_reg.dma_sndtr);
+       offset <<= FIELD_GET(STM32_DMA_SCR_PSIZE_MASK, chan_reg.dma_scr);
+       spar = sg_req->chan_reg.dma_spar;
+       sm0ar = sg_req->chan_reg.dma_sm0ar;
+       sm1ar = sg_req->chan_reg.dma_sm1ar;
+
+       /*
+        * The peripheral and/or memory addresses have to be updated in order to adjust the
+        * address pointers. Need to check increment.
+        */
+       if (chan_reg.dma_scr & STM32_DMA_SCR_PINC)
+               stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar + offset);
+       else
+               stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar);
+
+       if (!(chan_reg.dma_scr & STM32_DMA_SCR_MINC))
+               offset = 0;
+
+       /*
+        * In case of DBM, the current target could be SM1AR.
+        * Need to temporarily deactivate CIRC/DBM to finish the current transfer, so
+        * SM0AR becomes the current target and must be updated with SM1AR + offset if CT=1.
+        */
+       if ((chan_reg.dma_scr & STM32_DMA_SCR_DBM) && (chan_reg.dma_scr & STM32_DMA_SCR_CT))
+               stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sm1ar + offset);
+       else
+               stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sm0ar + offset);
+
+       /* NDTR must be restored otherwise internal HW counter won't be correctly reset */
+       stm32_dma_write(dmadev, STM32_DMA_SNDTR(id), chan_reg.dma_sndtr);
+
+       /*
+        * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt,
+        * otherwise NDTR autoreload value will be wrong (lower than the initial period length)
+        */
+       if (chan_reg.dma_scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM))
+               chan_reg.dma_scr &= ~(STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM);
+
+       if (chan_reg.dma_scr & STM32_DMA_SCR_DBM)
+               stm32_dma_configure_next_sg(chan);
+
+       stm32_dma_dump_reg(chan);
+
+       /* The stream may then be re-enabled to restart transfer from the point it was stopped */
+       chan->status = DMA_IN_PROGRESS;
+       chan_reg.dma_scr |= STM32_DMA_SCR_EN;
+       stm32_dma_write(dmadev, STM32_DMA_SCR(id), chan_reg.dma_scr);
+
+       spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+       dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
+
+       return 0;
+}
+
+static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
+                                   enum dma_transfer_direction direction,
+                                   enum dma_slave_buswidth *buswidth,
+                                   u32 buf_len, dma_addr_t buf_addr)
+{
+       enum dma_slave_buswidth src_addr_width, dst_addr_width;
+       int src_bus_width, dst_bus_width;
+       int src_burst_size, dst_burst_size;
+       u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
+       u32 dma_scr, fifoth;
+
+       src_addr_width = chan->dma_sconfig.src_addr_width;
+       dst_addr_width = chan->dma_sconfig.dst_addr_width;
+       src_maxburst = chan->dma_sconfig.src_maxburst;
+       dst_maxburst = chan->dma_sconfig.dst_maxburst;
+       fifoth = chan->threshold;
+
+       switch (direction) {
+       case DMA_MEM_TO_DEV:
+               /* Set device data size */
+               dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
+               if (dst_bus_width < 0)
+                       return dst_bus_width;
+
+               /* Set device burst size */
+               dst_best_burst = stm32_dma_get_best_burst(buf_len,
+                                                         dst_maxburst,
+                                                         fifoth,
+                                                         dst_addr_width);
+
+               dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
+               if (dst_burst_size < 0)
+                       return dst_burst_size;
+
+               /* Set memory data size */
+               src_addr_width = stm32_dma_get_max_width(buf_len, buf_addr,
+                                                        fifoth);
+               chan->mem_width = src_addr_width;
+               src_bus_width = stm32_dma_get_width(chan, src_addr_width);
+               if (src_bus_width < 0)
+                       return src_bus_width;
+
+               /*
+                * Set memory burst size - burst not possible if address is not aligned on
+                * the address boundary equal to the size of the transfer
+                */
+               if (buf_addr & (buf_len - 1))
+                       src_maxburst = 1;
+               else
+                       src_maxburst = STM32_DMA_MAX_BURST;
+               src_best_burst = stm32_dma_get_best_burst(buf_len,
+                                                         src_maxburst,
+                                                         fifoth,
+                                                         src_addr_width);
+               src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
+               if (src_burst_size < 0)
+                       return src_burst_size;
+
+               dma_scr = FIELD_PREP(STM32_DMA_SCR_DIR_MASK, STM32_DMA_MEM_TO_DEV) |
+                       FIELD_PREP(STM32_DMA_SCR_PSIZE_MASK, dst_bus_width) |
+                       FIELD_PREP(STM32_DMA_SCR_MSIZE_MASK, src_bus_width) |
+                       FIELD_PREP(STM32_DMA_SCR_PBURST_MASK, dst_burst_size) |
+                       FIELD_PREP(STM32_DMA_SCR_MBURST_MASK, src_burst_size);
+
+               /* Set FIFO threshold */
+               chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
+               if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
+                       chan->chan_reg.dma_sfcr |= FIELD_PREP(STM32_DMA_SFCR_FTH_MASK, fifoth);
+
+               /* Set peripheral address */
+               chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr;
+               *buswidth = dst_addr_width;
+               break;
+
+       case DMA_DEV_TO_MEM:
+               /* Set device data size */
+               src_bus_width = stm32_dma_get_width(chan, src_addr_width);
+               if (src_bus_width < 0)
+                       return src_bus_width;
+
+               /* Set device burst size */
+               src_best_burst = stm32_dma_get_best_burst(buf_len,
+                                                         src_maxburst,
+                                                         fifoth,
+                                                         src_addr_width);
+               chan->mem_burst = src_best_burst;
+               src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
+               if (src_burst_size < 0)
+                       return src_burst_size;
+
+               /* Set memory data size */
+               dst_addr_width = stm32_dma_get_max_width(buf_len, buf_addr,
+                                                        fifoth);
+               chan->mem_width = dst_addr_width;
+               dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
+               if (dst_bus_width < 0)
+                       return dst_bus_width;
+
+               /*
+                * Set memory burst size - burst not possible if address is not aligned on
+                * the address boundary equal to the size of the transfer
+                */
+               if (buf_addr & (buf_len - 1))
+                       dst_maxburst = 1;
+               else
+                       dst_maxburst = STM32_DMA_MAX_BURST;
+               dst_best_burst = stm32_dma_get_best_burst(buf_len,
+                                                         dst_maxburst,
+                                                         fifoth,
+                                                         dst_addr_width);
+               chan->mem_burst = dst_best_burst;
+               dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
+               if (dst_burst_size < 0)
+                       return dst_burst_size;
+
+               dma_scr = FIELD_PREP(STM32_DMA_SCR_DIR_MASK, STM32_DMA_DEV_TO_MEM) |
+                       FIELD_PREP(STM32_DMA_SCR_PSIZE_MASK, src_bus_width) |
+                       FIELD_PREP(STM32_DMA_SCR_MSIZE_MASK, dst_bus_width) |
+                       FIELD_PREP(STM32_DMA_SCR_PBURST_MASK, src_burst_size) |
+                       FIELD_PREP(STM32_DMA_SCR_MBURST_MASK, dst_burst_size);
+
+               /* Set FIFO threshold */
+               chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
+               if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
+                       chan->chan_reg.dma_sfcr |= FIELD_PREP(STM32_DMA_SFCR_FTH_MASK, fifoth);
+
+               /* Set peripheral address */
+               chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr;
+               *buswidth = chan->dma_sconfig.src_addr_width;
+               break;
+
+       default:
+               dev_err(chan2dev(chan), "Dma direction is not supported\n");
+               return -EINVAL;
+       }
+
+       stm32_dma_set_fifo_config(chan, src_best_burst, dst_best_burst);
+
+       /* Set DMA control register */
+       chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK |
+                       STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK |
+                       STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK);
+       chan->chan_reg.dma_scr |= dma_scr;
+
+       return 0;
+}
+
+static void stm32_dma_clear_reg(struct stm32_dma_chan_reg *regs)
+{
+       memset(regs, 0, sizeof(struct stm32_dma_chan_reg));
+}
+
+static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
+       struct dma_chan *c, struct scatterlist *sgl,
+       u32 sg_len, enum dma_transfer_direction direction,
+       unsigned long flags, void *context)
+{
+       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+       struct stm32_dma_desc *desc;
+       struct scatterlist *sg;
+       enum dma_slave_buswidth buswidth;
+       u32 nb_data_items;
+       int i, ret;
+
+       if (!chan->config_init) {
+               dev_err(chan2dev(chan), "dma channel is not configured\n");
+               return NULL;
+       }
+
+       if (sg_len < 1) {
+               dev_err(chan2dev(chan), "Invalid segment length %d\n", sg_len);
+               return NULL;
+       }
+
+       desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT);
+       if (!desc)
+               return NULL;
+       desc->num_sgs = sg_len;
+
+       /* Set peripheral flow controller */
+       if (chan->dma_sconfig.device_fc)
+               chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL;
+       else
+               chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
+
+       /* Activate Double Buffer Mode if DMA triggers STM32 MDMA and more than 1 sg */
+       if (chan->trig_mdma && sg_len > 1) {
+               chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
+               chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT;
+       }
+
+       for_each_sg(sgl, sg, sg_len, i) {
+               ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
+                                              sg_dma_len(sg),
+                                              sg_dma_address(sg));
+               if (ret < 0)
+                       goto err;
+
+               desc->sg_req[i].len = sg_dma_len(sg);
+
+               nb_data_items = desc->sg_req[i].len / buswidth;
+               if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) {
+                       dev_err(chan2dev(chan), "nb items not supported\n");
+                       goto err;
+               }
+
+               stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
+               desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
+               desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
+               desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
+               desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg);
+               desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg);
+               if (chan->trig_mdma)
+                       desc->sg_req[i].chan_reg.dma_sm1ar += sg_dma_len(sg);
+               desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
+       }
+       desc->cyclic = false;
+
+       return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+
+err:
+       kfree(desc);
+       return NULL;
+}
+
+static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
+       struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
+       size_t period_len, enum dma_transfer_direction direction,
+       unsigned long flags)
+{
+       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+       struct stm32_dma_desc *desc;
+       enum dma_slave_buswidth buswidth;
+       u32 num_periods, nb_data_items;
+       int i, ret;
+
+       if (!buf_len || !period_len) {
+               dev_err(chan2dev(chan), "Invalid buffer/period len\n");
+               return NULL;
+       }
+
+       if (!chan->config_init) {
+               dev_err(chan2dev(chan), "dma channel is not configured\n");
+               return NULL;
+       }
+
+       if (buf_len % period_len) {
+               dev_err(chan2dev(chan), "buf_len not multiple of period_len\n");
+               return NULL;
+       }
+
+       /*
+        * We allow to take more number of requests till DMA is
+        * not started. The driver will loop over all requests.
+        * Once DMA is started then new requests can be queued only after
+        * terminating the DMA.
+        */
+       if (chan->busy) {
+               dev_err(chan2dev(chan), "Request not allowed when dma busy\n");
+               return NULL;
+       }
+
+       ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len,
+                                      buf_addr);
+       if (ret < 0)
+               return NULL;
+
+       nb_data_items = period_len / buswidth;
+       if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) {
+               dev_err(chan2dev(chan), "number of items not supported\n");
+               return NULL;
+       }
+
+       /*  Enable Circular mode or double buffer mode */
+       if (buf_len == period_len) {
+               chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC;
+       } else {
+               chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
+               chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT;
+       }
+
+       /* Clear periph ctrl if client set it */
+       chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
+
+       num_periods = buf_len / period_len;
+
+       desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT);
+       if (!desc)
+               return NULL;
+       desc->num_sgs = num_periods;
+
+       for (i = 0; i < num_periods; i++) {
+               desc->sg_req[i].len = period_len;
+
+               stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
+               desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
+               desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
+               desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
+               desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr;
+               desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr;
+               if (chan->trig_mdma)
+                       desc->sg_req[i].chan_reg.dma_sm1ar += period_len;
+               desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
+               if (!chan->trig_mdma)
+                       buf_addr += period_len;
+       }
+       desc->cyclic = true;
+
+       return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
+       struct dma_chan *c, dma_addr_t dest,
+       dma_addr_t src, size_t len, unsigned long flags)
+{
+       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+       enum dma_slave_buswidth max_width;
+       struct stm32_dma_desc *desc;
+       size_t xfer_count, offset;
+       u32 num_sgs, best_burst, threshold;
+       int dma_burst, i;
+
+       num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
+       desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
+       if (!desc)
+               return NULL;
+       desc->num_sgs = num_sgs;
+
+       threshold = chan->threshold;
+
+       for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) {
+               xfer_count = min_t(size_t, len - offset,
+                                  STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
+
+               /* Compute best burst size */
+               max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+               best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST,
+                                                     threshold, max_width);
+               dma_burst = stm32_dma_get_burst(chan, best_burst);
+               if (dma_burst < 0) {
+                       kfree(desc);
+                       return NULL;
+               }
+
+               stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
+               desc->sg_req[i].chan_reg.dma_scr =
+                       FIELD_PREP(STM32_DMA_SCR_DIR_MASK, STM32_DMA_MEM_TO_MEM) |
+                       FIELD_PREP(STM32_DMA_SCR_PBURST_MASK, dma_burst) |
+                       FIELD_PREP(STM32_DMA_SCR_MBURST_MASK, dma_burst) |
+                       STM32_DMA_SCR_MINC |
+                       STM32_DMA_SCR_PINC |
+                       STM32_DMA_SCR_TCIE |
+                       STM32_DMA_SCR_TEIE;
+               desc->sg_req[i].chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
+               desc->sg_req[i].chan_reg.dma_sfcr |= FIELD_PREP(STM32_DMA_SFCR_FTH_MASK, threshold);
+               desc->sg_req[i].chan_reg.dma_spar = src + offset;
+               desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset;
+               desc->sg_req[i].chan_reg.dma_sndtr = xfer_count;
+               desc->sg_req[i].len = xfer_count;
+       }
+       desc->cyclic = false;
+
+       return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan)
+{
+       u32 dma_scr, width, ndtr;
+       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+
+       dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+       width = FIELD_GET(STM32_DMA_SCR_PSIZE_MASK, dma_scr);
+       ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
+
+       return ndtr << width;
+}
+
+/**
+ * stm32_dma_is_current_sg - check that expected sg_req is currently transferred
+ * @chan: dma channel
+ *
+ * This function called when IRQ are disable, checks that the hardware has not
+ * switched on the next transfer in double buffer mode. The test is done by
+ * comparing the next_sg memory address with the hardware related register
+ * (based on CT bit value).
+ *
+ * Returns true if expected current transfer is still running or double
+ * buffer mode is not activated.
+ */
+static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan)
+{
+       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+       struct stm32_dma_sg_req *sg_req;
+       u32 dma_scr, dma_smar, id, period_len;
+
+       id = chan->id;
+       dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+
+       /* In cyclic CIRC but not DBM, CT is not used */
+       if (!(dma_scr & STM32_DMA_SCR_DBM))
+               return true;
+
+       sg_req = &chan->desc->sg_req[chan->next_sg];
+       period_len = sg_req->len;
+
+       /* DBM - take care of a previous pause/resume not yet post reconfigured */
+       if (dma_scr & STM32_DMA_SCR_CT) {
+               dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id));
+               /*
+                * If transfer has been pause/resumed,
+                * SM0AR is in the range of [SM0AR:SM0AR+period_len]
+                */
+               return (dma_smar >= sg_req->chan_reg.dma_sm0ar &&
+                       dma_smar < sg_req->chan_reg.dma_sm0ar + period_len);
+       }
+
+       dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id));
+       /*
+        * If transfer has been pause/resumed,
+        * SM1AR is in the range of [SM1AR:SM1AR+period_len]
+        */
+       return (dma_smar >= sg_req->chan_reg.dma_sm1ar &&
+               dma_smar < sg_req->chan_reg.dma_sm1ar + period_len);
+}
+
+static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
+                                    struct stm32_dma_desc *desc,
+                                    u32 next_sg)
+{
+       u32 modulo, burst_size;
+       u32 residue;
+       u32 n_sg = next_sg;
+       struct stm32_dma_sg_req *sg_req = &chan->desc->sg_req[chan->next_sg];
+       int i;
+
+       /*
+        * Calculate the residue means compute the descriptors
+        * information:
+        * - the sg_req currently transferred
+        * - the Hardware remaining position in this sg (NDTR bits field).
+        *
+        * A race condition may occur if DMA is running in cyclic or double
+        * buffer mode, since the DMA register are automatically reloaded at end
+        * of period transfer. The hardware may have switched to the next
+        * transfer (CT bit updated) just before the position (SxNDTR reg) is
+        * read.
+        * In this case the SxNDTR reg could (or not) correspond to the new
+        * transfer position, and not the expected one.
+        * The strategy implemented in the stm32 driver is to:
+        *  - read the SxNDTR register
+        *  - crosscheck that hardware is still in current transfer.
+        * In case of switch, we can assume that the DMA is at the beginning of
+        * the next transfer. So we approximate the residue in consequence, by
+        * pointing on the beginning of next transfer.
+        *
+        * This race condition doesn't apply for none cyclic mode, as double
+        * buffer is not used. In such situation registers are updated by the
+        * software.
+        */
+
+       residue = stm32_dma_get_remaining_bytes(chan);
+
+       if ((chan->desc->cyclic || chan->trig_mdma) && !stm32_dma_is_current_sg(chan)) {
+               n_sg++;
+               if (n_sg == chan->desc->num_sgs)
+                       n_sg = 0;
+               if (!chan->trig_mdma)
+                       residue = sg_req->len;
+       }
+
+       /*
+        * In cyclic mode, for the last period, residue = remaining bytes
+        * from NDTR,
+        * else for all other periods in cyclic mode, and in sg mode,
+        * residue = remaining bytes from NDTR + remaining
+        * periods/sg to be transferred
+        */
+       if ((!chan->desc->cyclic && !chan->trig_mdma) || n_sg != 0)
+               for (i = n_sg; i < desc->num_sgs; i++)
+                       residue += desc->sg_req[i].len;
+
+       if (!chan->mem_burst)
+               return residue;
+
+       burst_size = chan->mem_burst * chan->mem_width;
+       modulo = residue % burst_size;
+       if (modulo)
+               residue = residue - modulo + burst_size;
+
+       return residue;
+}
+
+static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
+                                          dma_cookie_t cookie,
+                                          struct dma_tx_state *state)
+{
+       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+       struct virt_dma_desc *vdesc;
+       enum dma_status status;
+       unsigned long flags;
+       u32 residue = 0;
+
+       status = dma_cookie_status(c, cookie, state);
+       if (status == DMA_COMPLETE)
+               return status;
+
+       status = chan->status;
+
+       if (!state)
+               return status;
+
+       spin_lock_irqsave(&chan->vchan.lock, flags);
+       vdesc = vchan_find_desc(&chan->vchan, cookie);
+       if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
+               residue = stm32_dma_desc_residue(chan, chan->desc,
+                                                chan->next_sg);
+       else if (vdesc)
+               residue = stm32_dma_desc_residue(chan,
+                                                to_stm32_dma_desc(vdesc), 0);
+       dma_set_residue(state, residue);
+
+       spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+       return status;
+}
+
+static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
+{
+       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+       int ret;
+
+       chan->config_init = false;
+
+       ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
+       if (ret < 0)
+               return ret;
+
+       ret = stm32_dma_disable_chan(chan);
+       if (ret < 0)
+               pm_runtime_put(dmadev->ddev.dev);
+
+       return ret;
+}
+
+static void stm32_dma_free_chan_resources(struct dma_chan *c)
+{
+       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+       unsigned long flags;
+
+       dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id);
+
+       if (chan->busy) {
+               spin_lock_irqsave(&chan->vchan.lock, flags);
+               stm32_dma_stop(chan);
+               chan->desc = NULL;
+               spin_unlock_irqrestore(&chan->vchan.lock, flags);
+       }
+
+       pm_runtime_put(dmadev->ddev.dev);
+
+       vchan_free_chan_resources(to_virt_chan(c));
+       stm32_dma_clear_reg(&chan->chan_reg);
+       chan->threshold = 0;
+}
+
+static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)
+{
+       kfree(container_of(vdesc, struct stm32_dma_desc, vdesc));
+}
+
+static void stm32_dma_set_config(struct stm32_dma_chan *chan,
+                                struct stm32_dma_cfg *cfg)
+{
+       stm32_dma_clear_reg(&chan->chan_reg);
+
+       chan->chan_reg.dma_scr = cfg->stream_config & STM32_DMA_SCR_CFG_MASK;
+       chan->chan_reg.dma_scr |= FIELD_PREP(STM32_DMA_SCR_REQ_MASK, cfg->request_line);
+
+       /* Enable Interrupts  */
+       chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE;
+
+       chan->threshold = FIELD_GET(STM32_DMA_THRESHOLD_FTR_MASK, cfg->features);
+       if (FIELD_GET(STM32_DMA_DIRECT_MODE_MASK, cfg->features))
+               chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE;
+       if (FIELD_GET(STM32_DMA_ALT_ACK_MODE_MASK, cfg->features))
+               chan->chan_reg.dma_scr |= STM32_DMA_SCR_TRBUFF;
+       chan->mdma_config.stream_id = FIELD_GET(STM32_DMA_MDMA_STREAM_ID_MASK, cfg->features);
+}
+
+static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
+                                          struct of_dma *ofdma)
+{
+       struct stm32_dma_device *dmadev = ofdma->of_dma_data;
+       struct device *dev = dmadev->ddev.dev;
+       struct stm32_dma_cfg cfg;
+       struct stm32_dma_chan *chan;
+       struct dma_chan *c;
+
+       if (dma_spec->args_count < 4) {
+               dev_err(dev, "Bad number of cells\n");
+               return NULL;
+       }
+
+       cfg.channel_id = dma_spec->args[0];
+       cfg.request_line = dma_spec->args[1];
+       cfg.stream_config = dma_spec->args[2];
+       cfg.features = dma_spec->args[3];
+
+       if (cfg.channel_id >= STM32_DMA_MAX_CHANNELS ||
+           cfg.request_line >= STM32_DMA_MAX_REQUEST_ID) {
+               dev_err(dev, "Bad channel and/or request id\n");
+               return NULL;
+       }
+
+       chan = &dmadev->chan[cfg.channel_id];
+
+       c = dma_get_slave_channel(&chan->vchan.chan);
+       if (!c) {
+               dev_err(dev, "No more channels available\n");
+               return NULL;
+       }
+
+       stm32_dma_set_config(chan, &cfg);
+
+       return c;
+}
+
+static const struct of_device_id stm32_dma_of_match[] = {
+       { .compatible = "st,stm32-dma", },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, stm32_dma_of_match);
+
+static int stm32_dma_probe(struct platform_device *pdev)
+{
+       struct stm32_dma_chan *chan;
+       struct stm32_dma_device *dmadev;
+       struct dma_device *dd;
+       struct resource *res;
+       struct reset_control *rst;
+       int i, ret;
+
+       dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
+       if (!dmadev)
+               return -ENOMEM;
+
+       dd = &dmadev->ddev;
+
+       dmadev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+       if (IS_ERR(dmadev->base))
+               return PTR_ERR(dmadev->base);
+
+       dmadev->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(dmadev->clk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(dmadev->clk), "Can't get clock\n");
+
+       ret = clk_prepare_enable(dmadev->clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
+               return ret;
+       }
+
+       dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
+                                               "st,mem2mem");
+
+       rst = devm_reset_control_get(&pdev->dev, NULL);
+       if (IS_ERR(rst)) {
+               ret = PTR_ERR(rst);
+               if (ret == -EPROBE_DEFER)
+                       goto clk_free;
+       } else {
+               reset_control_assert(rst);
+               udelay(2);
+               reset_control_deassert(rst);
+       }
+
+       dma_set_max_seg_size(&pdev->dev, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
+
+       dma_cap_set(DMA_SLAVE, dd->cap_mask);
+       dma_cap_set(DMA_PRIVATE, dd->cap_mask);
+       dma_cap_set(DMA_CYCLIC, dd->cap_mask);
+       dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources;
+       dd->device_free_chan_resources = stm32_dma_free_chan_resources;
+       dd->device_tx_status = stm32_dma_tx_status;
+       dd->device_issue_pending = stm32_dma_issue_pending;
+       dd->device_prep_slave_sg = stm32_dma_prep_slave_sg;
+       dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
+       dd->device_config = stm32_dma_slave_config;
+       dd->device_pause = stm32_dma_pause;
+       dd->device_resume = stm32_dma_resume;
+       dd->device_terminate_all = stm32_dma_terminate_all;
+       dd->device_synchronize = stm32_dma_synchronize;
+       dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+               BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+               BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+       dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+               BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+               BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+       dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+       dd->copy_align = DMAENGINE_ALIGN_32_BYTES;
+       dd->max_burst = STM32_DMA_MAX_BURST;
+       dd->max_sg_burst = STM32_DMA_ALIGNED_MAX_DATA_ITEMS;
+       dd->descriptor_reuse = true;
+       dd->dev = &pdev->dev;
+       INIT_LIST_HEAD(&dd->channels);
+
+       if (dmadev->mem2mem) {
+               dma_cap_set(DMA_MEMCPY, dd->cap_mask);
+               dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy;
+               dd->directions |= BIT(DMA_MEM_TO_MEM);
+       }
+
+       for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
+               chan = &dmadev->chan[i];
+               chan->id = i;
+               chan->vchan.desc_free = stm32_dma_desc_free;
+               vchan_init(&chan->vchan, dd);
+
+               chan->mdma_config.ifcr = res->start;
+               chan->mdma_config.ifcr += STM32_DMA_IFCR(chan->id);
+
+               chan->mdma_config.tcf = STM32_DMA_TCI;
+               chan->mdma_config.tcf <<= STM32_DMA_FLAGS_SHIFT(chan->id);
+       }
+
+       ret = dma_async_device_register(dd);
+       if (ret)
+               goto clk_free;
+
+       for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
+               chan = &dmadev->chan[i];
+               ret = platform_get_irq(pdev, i);
+               if (ret < 0)
+                       goto err_unregister;
+               chan->irq = ret;
+
+               ret = devm_request_irq(&pdev->dev, chan->irq,
+                                      stm32_dma_chan_irq, 0,
+                                      dev_name(chan2dev(chan)), chan);
+               if (ret) {
+                       dev_err(&pdev->dev,
+                               "request_irq failed with err %d channel %d\n",
+                               ret, i);
+                       goto err_unregister;
+               }
+       }
+
+       ret = of_dma_controller_register(pdev->dev.of_node,
+                                        stm32_dma_of_xlate, dmadev);
+       if (ret < 0) {
+               dev_err(&pdev->dev,
+                       "STM32 DMA DMA OF registration failed %d\n", ret);
+               goto err_unregister;
+       }
+
+       platform_set_drvdata(pdev, dmadev);
+
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_get_noresume(&pdev->dev);
+       pm_runtime_put(&pdev->dev);
+
+       dev_info(&pdev->dev, "STM32 DMA driver registered\n");
+
+       return 0;
+
+err_unregister:
+       dma_async_device_unregister(dd);
+clk_free:
+       clk_disable_unprepare(dmadev->clk);
+
+       return ret;
+}
+
+#ifdef CONFIG_PM
+static int stm32_dma_runtime_suspend(struct device *dev)
+{
+       struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
+
+       clk_disable_unprepare(dmadev->clk);
+
+       return 0;
+}
+
+static int stm32_dma_runtime_resume(struct device *dev)
+{
+       struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
+       int ret;
+
+       ret = clk_prepare_enable(dmadev->clk);
+       if (ret) {
+               dev_err(dev, "failed to prepare_enable clock\n");
+               return ret;
+       }
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int stm32_dma_pm_suspend(struct device *dev)
+{
+       struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
+       int id, ret, scr;
+
+       ret = pm_runtime_resume_and_get(dev);
+       if (ret < 0)
+               return ret;
+
+       for (id = 0; id < STM32_DMA_MAX_CHANNELS; id++) {
+               scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+               if (scr & STM32_DMA_SCR_EN) {
+                       dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
+                       return -EBUSY;
+               }
+       }
+
+       pm_runtime_put_sync(dev);
+
+       pm_runtime_force_suspend(dev);
+
+       return 0;
+}
+
+static int stm32_dma_pm_resume(struct device *dev)
+{
+       return pm_runtime_force_resume(dev);
+}
+#endif
+
+static const struct dev_pm_ops stm32_dma_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_pm_suspend, stm32_dma_pm_resume)
+       SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend,
+                          stm32_dma_runtime_resume, NULL)
+};
+
+static struct platform_driver stm32_dma_driver = {
+       .driver = {
+               .name = "stm32-dma",
+               .of_match_table = stm32_dma_of_match,
+               .pm = &stm32_dma_pm_ops,
+       },
+       .probe = stm32_dma_probe,
+};
+
+static int __init stm32_dma_init(void)
+{
+       return platform_driver_register(&stm32_dma_driver);
+}
+subsys_initcall(stm32_dma_init);
diff --git a/drivers/dma/stm32/stm32-dmamux.c b/drivers/dma/stm32/stm32-dmamux.c
new file mode 100644 (file)
index 0000000..8d77e2a
--- /dev/null
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com>
+ *            Pierre-Yves Mordret <pierre-yves.mordret@st.com>
+ *
+ * DMA Router driver for STM32 DMA MUX
+ *
+ * Based on TI DMA Crossbar driver
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define STM32_DMAMUX_CCR(x)            (0x4 * (x))
+#define STM32_DMAMUX_MAX_DMA_REQUESTS  32
+#define STM32_DMAMUX_MAX_REQUESTS      255
+
+struct stm32_dmamux {
+       u32 master;
+       u32 request;
+       u32 chan_id;
+};
+
+struct stm32_dmamux_data {
+       struct dma_router dmarouter;
+       struct clk *clk;
+       void __iomem *iomem;
+       u32 dma_requests; /* Number of DMA requests connected to DMAMUX */
+       u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */
+       spinlock_t lock; /* Protects register access */
+       DECLARE_BITMAP(dma_inuse, STM32_DMAMUX_MAX_DMA_REQUESTS); /* Used DMA channel */
+       u32 ccr[STM32_DMAMUX_MAX_DMA_REQUESTS]; /* Used to backup CCR register
+                                                * in suspend
+                                                */
+       u32 dma_reqs[]; /* Number of DMA Request per DMA masters.
+                        *  [0] holds number of DMA Masters.
+                        *  To be kept at very end of this structure
+                        */
+};
+
+static inline u32 stm32_dmamux_read(void __iomem *iomem, u32 reg)
+{
+       return readl_relaxed(iomem + reg);
+}
+
+static inline void stm32_dmamux_write(void __iomem *iomem, u32 reg, u32 val)
+{
+       writel_relaxed(val, iomem + reg);
+}
+
+static void stm32_dmamux_free(struct device *dev, void *route_data)
+{
+       struct stm32_dmamux_data *dmamux = dev_get_drvdata(dev);
+       struct stm32_dmamux *mux = route_data;
+       unsigned long flags;
+
+       /* Clear dma request */
+       spin_lock_irqsave(&dmamux->lock, flags);
+
+       stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0);
+       clear_bit(mux->chan_id, dmamux->dma_inuse);
+
+       pm_runtime_put_sync(dev);
+
+       spin_unlock_irqrestore(&dmamux->lock, flags);
+
+       dev_dbg(dev, "Unmapping DMAMUX(%u) to DMA%u(%u)\n",
+               mux->request, mux->master, mux->chan_id);
+
+       kfree(mux);
+}
+
+static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
+                                        struct of_dma *ofdma)
+{
+       struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+       struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev);
+       struct stm32_dmamux *mux;
+       u32 i, min, max;
+       int ret;
+       unsigned long flags;
+
+       if (dma_spec->args_count != 3) {
+               dev_err(&pdev->dev, "invalid number of dma mux args\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (dma_spec->args[0] > dmamux->dmamux_requests) {
+               dev_err(&pdev->dev, "invalid mux request number: %d\n",
+                       dma_spec->args[0]);
+               return ERR_PTR(-EINVAL);
+       }
+
+       mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+       if (!mux)
+               return ERR_PTR(-ENOMEM);
+
+       spin_lock_irqsave(&dmamux->lock, flags);
+       mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
+                                          dmamux->dma_requests);
+
+       if (mux->chan_id == dmamux->dma_requests) {
+               spin_unlock_irqrestore(&dmamux->lock, flags);
+               dev_err(&pdev->dev, "Run out of free DMA requests\n");
+               ret = -ENOMEM;
+               goto error_chan_id;
+       }
+       set_bit(mux->chan_id, dmamux->dma_inuse);
+       spin_unlock_irqrestore(&dmamux->lock, flags);
+
+       /* Look for DMA Master */
+       for (i = 1, min = 0, max = dmamux->dma_reqs[i];
+            i <= dmamux->dma_reqs[0];
+            min += dmamux->dma_reqs[i], max += dmamux->dma_reqs[++i])
+               if (mux->chan_id < max)
+                       break;
+       mux->master = i - 1;
+
+       /* The of_node_put() will be done in of_dma_router_xlate function */
+       dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1);
+       if (!dma_spec->np) {
+               dev_err(&pdev->dev, "can't get dma master\n");
+               ret = -EINVAL;
+               goto error;
+       }
+
+       /* Set dma request */
+       spin_lock_irqsave(&dmamux->lock, flags);
+       ret = pm_runtime_resume_and_get(&pdev->dev);
+       if (ret < 0) {
+               spin_unlock_irqrestore(&dmamux->lock, flags);
+               goto error;
+       }
+       spin_unlock_irqrestore(&dmamux->lock, flags);
+
+       mux->request = dma_spec->args[0];
+
+       /*  craft DMA spec */
+       dma_spec->args[3] = dma_spec->args[2] | mux->chan_id << 16;
+       dma_spec->args[2] = dma_spec->args[1];
+       dma_spec->args[1] = 0;
+       dma_spec->args[0] = mux->chan_id - min;
+       dma_spec->args_count = 4;
+
+       stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id),
+                          mux->request);
+       dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n",
+               mux->request, mux->master, mux->chan_id);
+
+       return mux;
+
+error:
+       clear_bit(mux->chan_id, dmamux->dma_inuse);
+
+error_chan_id:
+       kfree(mux);
+       return ERR_PTR(ret);
+}
+
+static const struct of_device_id stm32_stm32dma_master_match[] __maybe_unused = {
+       { .compatible = "st,stm32-dma", },
+       {},
+};
+
+static int stm32_dmamux_probe(struct platform_device *pdev)
+{
+       struct device_node *node = pdev->dev.of_node;
+       const struct of_device_id *match;
+       struct device_node *dma_node;
+       struct stm32_dmamux_data *stm32_dmamux;
+       void __iomem *iomem;
+       struct reset_control *rst;
+       int i, count, ret;
+       u32 dma_req;
+
+       if (!node)
+               return -ENODEV;
+
+       count = device_property_count_u32(&pdev->dev, "dma-masters");
+       if (count < 0) {
+               dev_err(&pdev->dev, "Can't get DMA master(s) node\n");
+               return -ENODEV;
+       }
+
+       stm32_dmamux = devm_kzalloc(&pdev->dev, sizeof(*stm32_dmamux) +
+                                   sizeof(u32) * (count + 1), GFP_KERNEL);
+       if (!stm32_dmamux)
+               return -ENOMEM;
+
+       dma_req = 0;
+       for (i = 1; i <= count; i++) {
+               dma_node = of_parse_phandle(node, "dma-masters", i - 1);
+
+               match = of_match_node(stm32_stm32dma_master_match, dma_node);
+               if (!match) {
+                       dev_err(&pdev->dev, "DMA master is not supported\n");
+                       of_node_put(dma_node);
+                       return -EINVAL;
+               }
+
+               if (of_property_read_u32(dma_node, "dma-requests",
+                                        &stm32_dmamux->dma_reqs[i])) {
+                       dev_info(&pdev->dev,
+                                "Missing MUX output information, using %u.\n",
+                                STM32_DMAMUX_MAX_DMA_REQUESTS);
+                       stm32_dmamux->dma_reqs[i] =
+                               STM32_DMAMUX_MAX_DMA_REQUESTS;
+               }
+               dma_req += stm32_dmamux->dma_reqs[i];
+               of_node_put(dma_node);
+       }
+
+       if (dma_req > STM32_DMAMUX_MAX_DMA_REQUESTS) {
+               dev_err(&pdev->dev, "Too many DMA Master Requests to manage\n");
+               return -ENODEV;
+       }
+
+       stm32_dmamux->dma_requests = dma_req;
+       stm32_dmamux->dma_reqs[0] = count;
+
+       if (device_property_read_u32(&pdev->dev, "dma-requests",
+                                    &stm32_dmamux->dmamux_requests)) {
+               stm32_dmamux->dmamux_requests = STM32_DMAMUX_MAX_REQUESTS;
+               dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n",
+                        stm32_dmamux->dmamux_requests);
+       }
+       pm_runtime_get_noresume(&pdev->dev);
+
+       iomem = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(iomem))
+               return PTR_ERR(iomem);
+
+       spin_lock_init(&stm32_dmamux->lock);
+
+       stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(stm32_dmamux->clk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(stm32_dmamux->clk),
+                                    "Missing clock controller\n");
+
+       ret = clk_prepare_enable(stm32_dmamux->clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
+               return ret;
+       }
+
+       rst = devm_reset_control_get(&pdev->dev, NULL);
+       if (IS_ERR(rst)) {
+               ret = PTR_ERR(rst);
+               if (ret == -EPROBE_DEFER)
+                       goto err_clk;
+       } else if (count > 1) { /* Don't reset if there is only one dma-master */
+               reset_control_assert(rst);
+               udelay(2);
+               reset_control_deassert(rst);
+       }
+
+       stm32_dmamux->iomem = iomem;
+       stm32_dmamux->dmarouter.dev = &pdev->dev;
+       stm32_dmamux->dmarouter.route_free = stm32_dmamux_free;
+
+       platform_set_drvdata(pdev, stm32_dmamux);
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+
+       pm_runtime_get_noresume(&pdev->dev);
+
+       /* Reset the dmamux */
+       for (i = 0; i < stm32_dmamux->dma_requests; i++)
+               stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0);
+
+       pm_runtime_put(&pdev->dev);
+
+       ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
+                                    &stm32_dmamux->dmarouter);
+       if (ret)
+               goto pm_disable;
+
+       return 0;
+
+pm_disable:
+       pm_runtime_disable(&pdev->dev);
+err_clk:
+       clk_disable_unprepare(stm32_dmamux->clk);
+
+       return ret;
+}
+
+#ifdef CONFIG_PM
+static int stm32_dmamux_runtime_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
+
+       clk_disable_unprepare(stm32_dmamux->clk);
+
+       return 0;
+}
+
+static int stm32_dmamux_runtime_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
+       int ret;
+
+       ret = clk_prepare_enable(stm32_dmamux->clk);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to prepare_enable clock\n");
+               return ret;
+       }
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int stm32_dmamux_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
+       int i, ret;
+
+       ret = pm_runtime_resume_and_get(dev);
+       if (ret < 0)
+               return ret;
+
+       for (i = 0; i < stm32_dmamux->dma_requests; i++)
+               stm32_dmamux->ccr[i] = stm32_dmamux_read(stm32_dmamux->iomem,
+                                                        STM32_DMAMUX_CCR(i));
+
+       pm_runtime_put_sync(dev);
+
+       pm_runtime_force_suspend(dev);
+
+       return 0;
+}
+
+static int stm32_dmamux_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
+       int i, ret;
+
+       ret = pm_runtime_force_resume(dev);
+       if (ret < 0)
+               return ret;
+
+       ret = pm_runtime_resume_and_get(dev);
+       if (ret < 0)
+               return ret;
+
+       for (i = 0; i < stm32_dmamux->dma_requests; i++)
+               stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i),
+                                  stm32_dmamux->ccr[i]);
+
+       pm_runtime_put_sync(dev);
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm32_dmamux_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(stm32_dmamux_suspend, stm32_dmamux_resume)
+       SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend,
+                          stm32_dmamux_runtime_resume, NULL)
+};
+
+static const struct of_device_id stm32_dmamux_match[] = {
+       { .compatible = "st,stm32h7-dmamux" },
+       {},
+};
+
+static struct platform_driver stm32_dmamux_driver = {
+       .probe  = stm32_dmamux_probe,
+       .driver = {
+               .name = "stm32-dmamux",
+               .of_match_table = stm32_dmamux_match,
+               .pm = &stm32_dmamux_pm_ops,
+       },
+};
+
+static int __init stm32_dmamux_init(void)
+{
+       return platform_driver_register(&stm32_dmamux_driver);
+}
+arch_initcall(stm32_dmamux_init);
+
+MODULE_DESCRIPTION("DMA Router driver for STM32 DMA MUX");
+MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
+MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
diff --git a/drivers/dma/stm32/stm32-mdma.c b/drivers/dma/stm32/stm32-mdma.c
new file mode 100644 (file)
index 0000000..e6d5259
--- /dev/null
@@ -0,0 +1,1829 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com>
+ *            Pierre-Yves Mordret <pierre-yves.mordret@st.com>
+ *
+ * Driver for STM32 MDMA controller
+ *
+ * Inspired by stm32-dma.c and dma-jz4780.c
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "../virt-dma.h"
+
+#define STM32_MDMA_GISR0               0x0000 /* MDMA Int Status Reg 1 */
+
+/* MDMA Channel x interrupt/status register */
+#define STM32_MDMA_CISR(x)             (0x40 + 0x40 * (x)) /* x = 0..62 */
+#define STM32_MDMA_CISR_CRQA           BIT(16)
+#define STM32_MDMA_CISR_TCIF           BIT(4)
+#define STM32_MDMA_CISR_BTIF           BIT(3)
+#define STM32_MDMA_CISR_BRTIF          BIT(2)
+#define STM32_MDMA_CISR_CTCIF          BIT(1)
+#define STM32_MDMA_CISR_TEIF           BIT(0)
+
+/* MDMA Channel x interrupt flag clear register */
+#define STM32_MDMA_CIFCR(x)            (0x44 + 0x40 * (x))
+#define STM32_MDMA_CIFCR_CLTCIF                BIT(4)
+#define STM32_MDMA_CIFCR_CBTIF         BIT(3)
+#define STM32_MDMA_CIFCR_CBRTIF                BIT(2)
+#define STM32_MDMA_CIFCR_CCTCIF                BIT(1)
+#define STM32_MDMA_CIFCR_CTEIF         BIT(0)
+#define STM32_MDMA_CIFCR_CLEAR_ALL     (STM32_MDMA_CIFCR_CLTCIF \
+                                       | STM32_MDMA_CIFCR_CBTIF \
+                                       | STM32_MDMA_CIFCR_CBRTIF \
+                                       | STM32_MDMA_CIFCR_CCTCIF \
+                                       | STM32_MDMA_CIFCR_CTEIF)
+
+/* MDMA Channel x error status register */
+#define STM32_MDMA_CESR(x)             (0x48 + 0x40 * (x))
+#define STM32_MDMA_CESR_BSE            BIT(11)
+#define STM32_MDMA_CESR_ASR            BIT(10)
+#define STM32_MDMA_CESR_TEMD           BIT(9)
+#define STM32_MDMA_CESR_TELD           BIT(8)
+#define STM32_MDMA_CESR_TED            BIT(7)
+#define STM32_MDMA_CESR_TEA_MASK       GENMASK(6, 0)
+
+/* MDMA Channel x control register */
+#define STM32_MDMA_CCR(x)              (0x4C + 0x40 * (x))
+#define STM32_MDMA_CCR_SWRQ            BIT(16)
+#define STM32_MDMA_CCR_WEX             BIT(14)
+#define STM32_MDMA_CCR_HEX             BIT(13)
+#define STM32_MDMA_CCR_BEX             BIT(12)
+#define STM32_MDMA_CCR_SM              BIT(8)
+#define STM32_MDMA_CCR_PL_MASK         GENMASK(7, 6)
+#define STM32_MDMA_CCR_PL(n)           FIELD_PREP(STM32_MDMA_CCR_PL_MASK, (n))
+#define STM32_MDMA_CCR_TCIE            BIT(5)
+#define STM32_MDMA_CCR_BTIE            BIT(4)
+#define STM32_MDMA_CCR_BRTIE           BIT(3)
+#define STM32_MDMA_CCR_CTCIE           BIT(2)
+#define STM32_MDMA_CCR_TEIE            BIT(1)
+#define STM32_MDMA_CCR_EN              BIT(0)
+#define STM32_MDMA_CCR_IRQ_MASK                (STM32_MDMA_CCR_TCIE \
+                                       | STM32_MDMA_CCR_BTIE \
+                                       | STM32_MDMA_CCR_BRTIE \
+                                       | STM32_MDMA_CCR_CTCIE \
+                                       | STM32_MDMA_CCR_TEIE)
+
+/* MDMA Channel x transfer configuration register */
+#define STM32_MDMA_CTCR(x)             (0x50 + 0x40 * (x))
+#define STM32_MDMA_CTCR_BWM            BIT(31)
+#define STM32_MDMA_CTCR_SWRM           BIT(30)
+#define STM32_MDMA_CTCR_TRGM_MSK       GENMASK(29, 28)
+#define STM32_MDMA_CTCR_TRGM(n)                FIELD_PREP(STM32_MDMA_CTCR_TRGM_MSK, (n))
+#define STM32_MDMA_CTCR_TRGM_GET(n)    FIELD_GET(STM32_MDMA_CTCR_TRGM_MSK, (n))
+#define STM32_MDMA_CTCR_PAM_MASK       GENMASK(27, 26)
+#define STM32_MDMA_CTCR_PAM(n)         FIELD_PREP(STM32_MDMA_CTCR_PAM_MASK, (n))
+#define STM32_MDMA_CTCR_PKE            BIT(25)
+#define STM32_MDMA_CTCR_TLEN_MSK       GENMASK(24, 18)
+#define STM32_MDMA_CTCR_TLEN(n)                FIELD_PREP(STM32_MDMA_CTCR_TLEN_MSK, (n))
+#define STM32_MDMA_CTCR_TLEN_GET(n)    FIELD_GET(STM32_MDMA_CTCR_TLEN_MSK, (n))
+#define STM32_MDMA_CTCR_LEN2_MSK       GENMASK(25, 18)
+#define STM32_MDMA_CTCR_LEN2(n)                FIELD_PREP(STM32_MDMA_CTCR_LEN2_MSK, (n))
+#define STM32_MDMA_CTCR_LEN2_GET(n)    FIELD_GET(STM32_MDMA_CTCR_LEN2_MSK, (n))
+#define STM32_MDMA_CTCR_DBURST_MASK    GENMASK(17, 15)
+#define STM32_MDMA_CTCR_DBURST(n)      FIELD_PREP(STM32_MDMA_CTCR_DBURST_MASK, (n))
+#define STM32_MDMA_CTCR_SBURST_MASK    GENMASK(14, 12)
+#define STM32_MDMA_CTCR_SBURST(n)      FIELD_PREP(STM32_MDMA_CTCR_SBURST_MASK, (n))
+#define STM32_MDMA_CTCR_DINCOS_MASK    GENMASK(11, 10)
+#define STM32_MDMA_CTCR_DINCOS(n)      FIELD_PREP(STM32_MDMA_CTCR_DINCOS_MASK, (n))
+#define STM32_MDMA_CTCR_SINCOS_MASK    GENMASK(9, 8)
+#define STM32_MDMA_CTCR_SINCOS(n)      FIELD_PREP(STM32_MDMA_CTCR_SINCOS_MASK, (n))
+#define STM32_MDMA_CTCR_DSIZE_MASK     GENMASK(7, 6)
+#define STM32_MDMA_CTCR_DSIZE(n)       FIELD_PREP(STM32_MDMA_CTCR_DSIZE_MASK, (n))
+#define STM32_MDMA_CTCR_SSIZE_MASK     GENMASK(5, 4)
+#define STM32_MDMA_CTCR_SSIZE(n)       FIELD_PREP(STM32_MDMA_CTCR_SSIZE_MASK, (n))
+#define STM32_MDMA_CTCR_DINC_MASK      GENMASK(3, 2)
+#define STM32_MDMA_CTCR_DINC(n)                FIELD_PREP(STM32_MDMA_CTCR_DINC_MASK, (n))
+#define STM32_MDMA_CTCR_SINC_MASK      GENMASK(1, 0)
+#define STM32_MDMA_CTCR_SINC(n)                FIELD_PREP(STM32_MDMA_CTCR_SINC_MASK, (n))
+#define STM32_MDMA_CTCR_CFG_MASK       (STM32_MDMA_CTCR_SINC_MASK \
+                                       | STM32_MDMA_CTCR_DINC_MASK \
+                                       | STM32_MDMA_CTCR_SINCOS_MASK \
+                                       | STM32_MDMA_CTCR_DINCOS_MASK \
+                                       | STM32_MDMA_CTCR_LEN2_MSK \
+                                       | STM32_MDMA_CTCR_TRGM_MSK)
+
+/* MDMA Channel x block number of data register */
+#define STM32_MDMA_CBNDTR(x)           (0x54 + 0x40 * (x))
+#define STM32_MDMA_CBNDTR_BRC_MK       GENMASK(31, 20)
+#define STM32_MDMA_CBNDTR_BRC(n)       FIELD_PREP(STM32_MDMA_CBNDTR_BRC_MK, (n))
+#define STM32_MDMA_CBNDTR_BRC_GET(n)   FIELD_GET(STM32_MDMA_CBNDTR_BRC_MK, (n))
+
+#define STM32_MDMA_CBNDTR_BRDUM                BIT(19)
+#define STM32_MDMA_CBNDTR_BRSUM                BIT(18)
+#define STM32_MDMA_CBNDTR_BNDT_MASK    GENMASK(16, 0)
+#define STM32_MDMA_CBNDTR_BNDT(n)      FIELD_PREP(STM32_MDMA_CBNDTR_BNDT_MASK, (n))
+
+/* MDMA Channel x source address register */
+#define STM32_MDMA_CSAR(x)             (0x58 + 0x40 * (x))
+
+/* MDMA Channel x destination address register */
+#define STM32_MDMA_CDAR(x)             (0x5C + 0x40 * (x))
+
+/* MDMA Channel x block repeat address update register */
+#define STM32_MDMA_CBRUR(x)            (0x60 + 0x40 * (x))
+#define STM32_MDMA_CBRUR_DUV_MASK      GENMASK(31, 16)
+#define STM32_MDMA_CBRUR_DUV(n)                FIELD_PREP(STM32_MDMA_CBRUR_DUV_MASK, (n))
+#define STM32_MDMA_CBRUR_SUV_MASK      GENMASK(15, 0)
+#define STM32_MDMA_CBRUR_SUV(n)                FIELD_PREP(STM32_MDMA_CBRUR_SUV_MASK, (n))
+
+/* MDMA Channel x link address register */
+#define STM32_MDMA_CLAR(x)             (0x64 + 0x40 * (x))
+
+/* MDMA Channel x trigger and bus selection register */
+#define STM32_MDMA_CTBR(x)             (0x68 + 0x40 * (x))
+#define STM32_MDMA_CTBR_DBUS           BIT(17)
+#define STM32_MDMA_CTBR_SBUS           BIT(16)
+#define STM32_MDMA_CTBR_TSEL_MASK      GENMASK(5, 0)
+#define STM32_MDMA_CTBR_TSEL(n)                FIELD_PREP(STM32_MDMA_CTBR_TSEL_MASK, (n))
+
+/* MDMA Channel x mask address register */
+#define STM32_MDMA_CMAR(x)             (0x70 + 0x40 * (x))
+
+/* MDMA Channel x mask data register */
+#define STM32_MDMA_CMDR(x)             (0x74 + 0x40 * (x))
+
+#define STM32_MDMA_MAX_BUF_LEN         128
+#define STM32_MDMA_MAX_BLOCK_LEN       65536
+#define STM32_MDMA_MAX_CHANNELS                32
+#define STM32_MDMA_MAX_REQUESTS                256
+#define STM32_MDMA_MAX_BURST           128
+#define STM32_MDMA_VERY_HIGH_PRIORITY  0x3
+
+enum stm32_mdma_trigger_mode {
+       STM32_MDMA_BUFFER,
+       STM32_MDMA_BLOCK,
+       STM32_MDMA_BLOCK_REP,
+       STM32_MDMA_LINKED_LIST,
+};
+
+enum stm32_mdma_width {
+       STM32_MDMA_BYTE,
+       STM32_MDMA_HALF_WORD,
+       STM32_MDMA_WORD,
+       STM32_MDMA_DOUBLE_WORD,
+};
+
+enum stm32_mdma_inc_mode {
+       STM32_MDMA_FIXED = 0,
+       STM32_MDMA_INC = 2,
+       STM32_MDMA_DEC = 3,
+};
+
+struct stm32_mdma_chan_config {
+       u32 request;
+       u32 priority_level;
+       u32 transfer_config;
+       u32 mask_addr;
+       u32 mask_data;
+       bool m2m_hw; /* True when MDMA is triggered by STM32 DMA */
+};
+
+struct stm32_mdma_hwdesc {
+       u32 ctcr;
+       u32 cbndtr;
+       u32 csar;
+       u32 cdar;
+       u32 cbrur;
+       u32 clar;
+       u32 ctbr;
+       u32 dummy;
+       u32 cmar;
+       u32 cmdr;
+} __aligned(64);
+
+struct stm32_mdma_desc_node {
+       struct stm32_mdma_hwdesc *hwdesc;
+       dma_addr_t hwdesc_phys;
+};
+
+struct stm32_mdma_desc {
+       struct virt_dma_desc vdesc;
+       u32 ccr;
+       bool cyclic;
+       u32 count;
+       struct stm32_mdma_desc_node node[] __counted_by(count);
+};
+
+struct stm32_mdma_dma_config {
+       u32 request;    /* STM32 DMA channel stream id, triggering MDMA */
+       u32 cmar;       /* STM32 DMA interrupt flag clear register address */
+       u32 cmdr;       /* STM32 DMA Transfer Complete flag */
+};
+
+struct stm32_mdma_chan {
+       struct virt_dma_chan vchan;
+       struct dma_pool *desc_pool;
+       u32 id;
+       struct stm32_mdma_desc *desc;
+       u32 curr_hwdesc;
+       struct dma_slave_config dma_config;
+       struct stm32_mdma_chan_config chan_config;
+       bool busy;
+       u32 mem_burst;
+       u32 mem_width;
+};
+
+struct stm32_mdma_device {
+       struct dma_device ddev;
+       void __iomem *base;
+       struct clk *clk;
+       int irq;
+       u32 nr_channels;
+       u32 nr_requests;
+       u32 nr_ahb_addr_masks;
+       u32 chan_reserved;
+       struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS];
+       u32 ahb_addr_masks[] __counted_by(nr_ahb_addr_masks);
+};
+
+static struct stm32_mdma_device *stm32_mdma_get_dev(
+       struct stm32_mdma_chan *chan)
+{
+       return container_of(chan->vchan.chan.device, struct stm32_mdma_device,
+                           ddev);
+}
+
+static struct stm32_mdma_chan *to_stm32_mdma_chan(struct dma_chan *c)
+{
+       return container_of(c, struct stm32_mdma_chan, vchan.chan);
+}
+
+static struct stm32_mdma_desc *to_stm32_mdma_desc(struct virt_dma_desc *vdesc)
+{
+       return container_of(vdesc, struct stm32_mdma_desc, vdesc);
+}
+
+static struct device *chan2dev(struct stm32_mdma_chan *chan)
+{
+       return &chan->vchan.chan.dev->device;
+}
+
+static struct device *mdma2dev(struct stm32_mdma_device *mdma_dev)
+{
+       return mdma_dev->ddev.dev;
+}
+
+static u32 stm32_mdma_read(struct stm32_mdma_device *dmadev, u32 reg)
+{
+       return readl_relaxed(dmadev->base + reg);
+}
+
+static void stm32_mdma_write(struct stm32_mdma_device *dmadev, u32 reg, u32 val)
+{
+       writel_relaxed(val, dmadev->base + reg);
+}
+
+static void stm32_mdma_set_bits(struct stm32_mdma_device *dmadev, u32 reg,
+                               u32 mask)
+{
+       void __iomem *addr = dmadev->base + reg;
+
+       writel_relaxed(readl_relaxed(addr) | mask, addr);
+}
+
+static void stm32_mdma_clr_bits(struct stm32_mdma_device *dmadev, u32 reg,
+                               u32 mask)
+{
+       void __iomem *addr = dmadev->base + reg;
+
+       writel_relaxed(readl_relaxed(addr) & ~mask, addr);
+}
+
+static struct stm32_mdma_desc *stm32_mdma_alloc_desc(
+               struct stm32_mdma_chan *chan, u32 count)
+{
+       struct stm32_mdma_desc *desc;
+       int i;
+
+       desc = kzalloc(struct_size(desc, node, count), GFP_NOWAIT);
+       if (!desc)
+               return NULL;
+       desc->count = count;
+
+       for (i = 0; i < count; i++) {
+               desc->node[i].hwdesc =
+                       dma_pool_alloc(chan->desc_pool, GFP_NOWAIT,
+                                      &desc->node[i].hwdesc_phys);
+               if (!desc->node[i].hwdesc)
+                       goto err;
+       }
+
+       return desc;
+
+err:
+       dev_err(chan2dev(chan), "Failed to allocate descriptor\n");
+       while (--i >= 0)
+               dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
+                             desc->node[i].hwdesc_phys);
+       kfree(desc);
+       return NULL;
+}
+
+static void stm32_mdma_desc_free(struct virt_dma_desc *vdesc)
+{
+       struct stm32_mdma_desc *desc = to_stm32_mdma_desc(vdesc);
+       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(vdesc->tx.chan);
+       int i;
+
+       for (i = 0; i < desc->count; i++)
+               dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
+                             desc->node[i].hwdesc_phys);
+       kfree(desc);
+}
+
+static int stm32_mdma_get_width(struct stm32_mdma_chan *chan,
+                               enum dma_slave_buswidth width)
+{
+       switch (width) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+       case DMA_SLAVE_BUSWIDTH_8_BYTES:
+               return ffs(width) - 1;
+       default:
+               dev_err(chan2dev(chan), "Dma bus width %i not supported\n",
+                       width);
+               return -EINVAL;
+       }
+}
+
+static enum dma_slave_buswidth stm32_mdma_get_max_width(dma_addr_t addr,
+                                                       u32 buf_len, u32 tlen)
+{
+       enum dma_slave_buswidth max_width = DMA_SLAVE_BUSWIDTH_8_BYTES;
+
+       for (max_width = DMA_SLAVE_BUSWIDTH_8_BYTES;
+            max_width > DMA_SLAVE_BUSWIDTH_1_BYTE;
+            max_width >>= 1) {
+               /*
+                * Address and buffer length both have to be aligned on
+                * bus width
+                */
+               if ((((buf_len | addr) & (max_width - 1)) == 0) &&
+                   tlen >= max_width)
+                       break;
+       }
+
+       return max_width;
+}
+
+static u32 stm32_mdma_get_best_burst(u32 buf_len, u32 tlen, u32 max_burst,
+                                    enum dma_slave_buswidth width)
+{
+       u32 best_burst;
+
+       best_burst = min((u32)1 << __ffs(tlen | buf_len),
+                        max_burst * width) / width;
+
+       return (best_burst > 0) ? best_burst : 1;
+}
+
+static int stm32_mdma_disable_chan(struct stm32_mdma_chan *chan)
+{
+       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+       u32 ccr, cisr, id, reg;
+       int ret;
+
+       id = chan->id;
+       reg = STM32_MDMA_CCR(id);
+
+       /* Disable interrupts */
+       stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_IRQ_MASK);
+
+       ccr = stm32_mdma_read(dmadev, reg);
+       if (ccr & STM32_MDMA_CCR_EN) {
+               stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_EN);
+
+               /* Ensure that any ongoing transfer has been completed */
+               ret = readl_relaxed_poll_timeout_atomic(
+                               dmadev->base + STM32_MDMA_CISR(id), cisr,
+                               (cisr & STM32_MDMA_CISR_CTCIF), 10, 1000);
+               if (ret) {
+                       dev_err(chan2dev(chan), "%s: timeout!\n", __func__);
+                       return -EBUSY;
+               }
+       }
+
+       return 0;
+}
+
+static void stm32_mdma_stop(struct stm32_mdma_chan *chan)
+{
+       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+       u32 status;
+       int ret;
+
+       /* Disable DMA */
+       ret = stm32_mdma_disable_chan(chan);
+       if (ret < 0)
+               return;
+
+       /* Clear interrupt status if it is there */
+       status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
+       if (status) {
+               dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n",
+                       __func__, status);
+               stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status);
+       }
+
+       chan->busy = false;
+}
+
+static void stm32_mdma_set_bus(struct stm32_mdma_device *dmadev, u32 *ctbr,
+                              u32 ctbr_mask, u32 src_addr)
+{
+       u32 mask;
+       int i;
+
+       /* Check if memory device is on AHB or AXI */
+       *ctbr &= ~ctbr_mask;
+       mask = src_addr & 0xF0000000;
+       for (i = 0; i < dmadev->nr_ahb_addr_masks; i++) {
+               if (mask == dmadev->ahb_addr_masks[i]) {
+                       *ctbr |= ctbr_mask;
+                       break;
+               }
+       }
+}
+
+static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
+                                    enum dma_transfer_direction direction,
+                                    u32 *mdma_ccr, u32 *mdma_ctcr,
+                                    u32 *mdma_ctbr, dma_addr_t addr,
+                                    u32 buf_len)
+{
+       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+       struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
+       enum dma_slave_buswidth src_addr_width, dst_addr_width;
+       phys_addr_t src_addr, dst_addr;
+       int src_bus_width, dst_bus_width;
+       u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
+       u32 ccr, ctcr, ctbr, tlen;
+
+       src_addr_width = chan->dma_config.src_addr_width;
+       dst_addr_width = chan->dma_config.dst_addr_width;
+       src_maxburst = chan->dma_config.src_maxburst;
+       dst_maxburst = chan->dma_config.dst_maxburst;
+
+       ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
+       ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
+       ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
+
+       /* Enable HW request mode */
+       ctcr &= ~STM32_MDMA_CTCR_SWRM;
+
+       /* Set DINC, SINC, DINCOS, SINCOS, TRGM and TLEN retrieve from DT */
+       ctcr &= ~STM32_MDMA_CTCR_CFG_MASK;
+       ctcr |= chan_config->transfer_config & STM32_MDMA_CTCR_CFG_MASK;
+
+       /*
+        * For buffer transfer length (TLEN) we have to set
+        * the number of bytes - 1 in CTCR register
+        */
+       tlen = STM32_MDMA_CTCR_LEN2_GET(ctcr);
+       ctcr &= ~STM32_MDMA_CTCR_LEN2_MSK;
+       ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1));
+
+       /* Disable Pack Enable */
+       ctcr &= ~STM32_MDMA_CTCR_PKE;
+
+       /* Check burst size constraints */
+       if (src_maxburst * src_addr_width > STM32_MDMA_MAX_BURST ||
+           dst_maxburst * dst_addr_width > STM32_MDMA_MAX_BURST) {
+               dev_err(chan2dev(chan),
+                       "burst size * bus width higher than %d bytes\n",
+                       STM32_MDMA_MAX_BURST);
+               return -EINVAL;
+       }
+
+       if ((!is_power_of_2(src_maxburst) && src_maxburst > 0) ||
+           (!is_power_of_2(dst_maxburst) && dst_maxburst > 0)) {
+               dev_err(chan2dev(chan), "burst size must be a power of 2\n");
+               return -EINVAL;
+       }
+
+       /*
+        * Configure channel control:
+        * - Clear SW request as in this case this is a HW one
+        * - Clear WEX, HEX and BEX bits
+        * - Set priority level
+        */
+       ccr &= ~(STM32_MDMA_CCR_SWRQ | STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX |
+                STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK);
+       ccr |= STM32_MDMA_CCR_PL(chan_config->priority_level);
+
+       /* Configure Trigger selection */
+       ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK;
+       ctbr |= STM32_MDMA_CTBR_TSEL(chan_config->request);
+
+       switch (direction) {
+       case DMA_MEM_TO_DEV:
+               dst_addr = chan->dma_config.dst_addr;
+
+               /* Set device data size */
+               if (chan_config->m2m_hw)
+                       dst_addr_width = stm32_mdma_get_max_width(dst_addr, buf_len,
+                                                                 STM32_MDMA_MAX_BUF_LEN);
+               dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width);
+               if (dst_bus_width < 0)
+                       return dst_bus_width;
+               ctcr &= ~STM32_MDMA_CTCR_DSIZE_MASK;
+               ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width);
+               if (chan_config->m2m_hw) {
+                       ctcr &= ~STM32_MDMA_CTCR_DINCOS_MASK;
+                       ctcr |= STM32_MDMA_CTCR_DINCOS(dst_bus_width);
+               }
+
+               /* Set device burst value */
+               if (chan_config->m2m_hw)
+                       dst_maxburst = STM32_MDMA_MAX_BUF_LEN / dst_addr_width;
+
+               dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
+                                                          dst_maxburst,
+                                                          dst_addr_width);
+               chan->mem_burst = dst_best_burst;
+               ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK;
+               ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst)));
+
+               /* Set memory data size */
+               src_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen);
+               chan->mem_width = src_addr_width;
+               src_bus_width = stm32_mdma_get_width(chan, src_addr_width);
+               if (src_bus_width < 0)
+                       return src_bus_width;
+               ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK |
+                       STM32_MDMA_CTCR_SINCOS_MASK;
+               ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width) |
+                       STM32_MDMA_CTCR_SINCOS(src_bus_width);
+
+               /* Set memory burst value */
+               src_maxburst = STM32_MDMA_MAX_BUF_LEN / src_addr_width;
+               src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
+                                                          src_maxburst,
+                                                          src_addr_width);
+               chan->mem_burst = src_best_burst;
+               ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK;
+               ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst)));
+
+               /* Select bus */
+               stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
+                                  dst_addr);
+
+               if (dst_bus_width != src_bus_width)
+                       ctcr |= STM32_MDMA_CTCR_PKE;
+
+               /* Set destination address */
+               stm32_mdma_write(dmadev, STM32_MDMA_CDAR(chan->id), dst_addr);
+               break;
+
+       case DMA_DEV_TO_MEM:
+               src_addr = chan->dma_config.src_addr;
+
+               /* Set device data size */
+               if (chan_config->m2m_hw)
+                       src_addr_width = stm32_mdma_get_max_width(src_addr, buf_len,
+                                                                 STM32_MDMA_MAX_BUF_LEN);
+
+               src_bus_width = stm32_mdma_get_width(chan, src_addr_width);
+               if (src_bus_width < 0)
+                       return src_bus_width;
+               ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK;
+               ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width);
+               if (chan_config->m2m_hw) {
+                       ctcr &= ~STM32_MDMA_CTCR_SINCOS_MASK;
+                       ctcr |= STM32_MDMA_CTCR_SINCOS(src_bus_width);
+               }
+
+               /* Set device burst value */
+               if (chan_config->m2m_hw)
+                       src_maxburst = STM32_MDMA_MAX_BUF_LEN / src_addr_width;
+
+               src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
+                                                          src_maxburst,
+                                                          src_addr_width);
+               ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK;
+               ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst)));
+
+               /* Set memory data size */
+               dst_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen);
+               chan->mem_width = dst_addr_width;
+               dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width);
+               if (dst_bus_width < 0)
+                       return dst_bus_width;
+               ctcr &= ~(STM32_MDMA_CTCR_DSIZE_MASK |
+                       STM32_MDMA_CTCR_DINCOS_MASK);
+               ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
+                       STM32_MDMA_CTCR_DINCOS(dst_bus_width);
+
+               /* Set memory burst value */
+               dst_maxburst = STM32_MDMA_MAX_BUF_LEN / dst_addr_width;
+               dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
+                                                          dst_maxburst,
+                                                          dst_addr_width);
+               ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK;
+               ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst)));
+
+               /* Select bus */
+               stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
+                                  src_addr);
+
+               if (dst_bus_width != src_bus_width)
+                       ctcr |= STM32_MDMA_CTCR_PKE;
+
+               /* Set source address */
+               stm32_mdma_write(dmadev, STM32_MDMA_CSAR(chan->id), src_addr);
+               break;
+
+       default:
+               dev_err(chan2dev(chan), "Dma direction is not supported\n");
+               return -EINVAL;
+       }
+
+       *mdma_ccr = ccr;
+       *mdma_ctcr = ctcr;
+       *mdma_ctbr = ctbr;
+
+       return 0;
+}
+
+static void stm32_mdma_dump_hwdesc(struct stm32_mdma_chan *chan,
+                                  struct stm32_mdma_desc_node *node)
+{
+       dev_dbg(chan2dev(chan), "hwdesc:  %pad\n", &node->hwdesc_phys);
+       dev_dbg(chan2dev(chan), "CTCR:    0x%08x\n", node->hwdesc->ctcr);
+       dev_dbg(chan2dev(chan), "CBNDTR:  0x%08x\n", node->hwdesc->cbndtr);
+       dev_dbg(chan2dev(chan), "CSAR:    0x%08x\n", node->hwdesc->csar);
+       dev_dbg(chan2dev(chan), "CDAR:    0x%08x\n", node->hwdesc->cdar);
+       dev_dbg(chan2dev(chan), "CBRUR:   0x%08x\n", node->hwdesc->cbrur);
+       dev_dbg(chan2dev(chan), "CLAR:    0x%08x\n", node->hwdesc->clar);
+       dev_dbg(chan2dev(chan), "CTBR:    0x%08x\n", node->hwdesc->ctbr);
+       dev_dbg(chan2dev(chan), "CMAR:    0x%08x\n", node->hwdesc->cmar);
+       dev_dbg(chan2dev(chan), "CMDR:    0x%08x\n\n", node->hwdesc->cmdr);
+}
+
+static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan,
+                                   struct stm32_mdma_desc *desc,
+                                   enum dma_transfer_direction dir, u32 count,
+                                   dma_addr_t src_addr, dma_addr_t dst_addr,
+                                   u32 len, u32 ctcr, u32 ctbr, bool is_last,
+                                   bool is_first, bool is_cyclic)
+{
+       struct stm32_mdma_chan_config *config = &chan->chan_config;
+       struct stm32_mdma_hwdesc *hwdesc;
+       u32 next = count + 1;
+
+       hwdesc = desc->node[count].hwdesc;
+       hwdesc->ctcr = ctcr;
+       hwdesc->cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK |
+                       STM32_MDMA_CBNDTR_BRDUM |
+                       STM32_MDMA_CBNDTR_BRSUM |
+                       STM32_MDMA_CBNDTR_BNDT_MASK);
+       hwdesc->cbndtr |= STM32_MDMA_CBNDTR_BNDT(len);
+       hwdesc->csar = src_addr;
+       hwdesc->cdar = dst_addr;
+       hwdesc->cbrur = 0;
+       hwdesc->ctbr = ctbr;
+       hwdesc->cmar = config->mask_addr;
+       hwdesc->cmdr = config->mask_data;
+
+       if (is_last) {
+               if (is_cyclic)
+                       hwdesc->clar = desc->node[0].hwdesc_phys;
+               else
+                       hwdesc->clar = 0;
+       } else {
+               hwdesc->clar = desc->node[next].hwdesc_phys;
+       }
+
+       stm32_mdma_dump_hwdesc(chan, &desc->node[count]);
+}
+
+static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
+                                struct stm32_mdma_desc *desc,
+                                struct scatterlist *sgl, u32 sg_len,
+                                enum dma_transfer_direction direction)
+{
+       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+       struct dma_slave_config *dma_config = &chan->dma_config;
+       struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
+       struct scatterlist *sg;
+       dma_addr_t src_addr, dst_addr;
+       u32 m2m_hw_period, ccr, ctcr, ctbr;
+       int i, ret = 0;
+
+       if (chan_config->m2m_hw)
+               m2m_hw_period = sg_dma_len(sgl);
+
+       for_each_sg(sgl, sg, sg_len, i) {
+               if (sg_dma_len(sg) > STM32_MDMA_MAX_BLOCK_LEN) {
+                       dev_err(chan2dev(chan), "Invalid block len\n");
+                       return -EINVAL;
+               }
+
+               if (direction == DMA_MEM_TO_DEV) {
+                       src_addr = sg_dma_address(sg);
+                       dst_addr = dma_config->dst_addr;
+                       if (chan_config->m2m_hw && (i & 1))
+                               dst_addr += m2m_hw_period;
+                       ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
+                                                       &ctcr, &ctbr, src_addr,
+                                                       sg_dma_len(sg));
+                       stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
+                                          src_addr);
+               } else {
+                       src_addr = dma_config->src_addr;
+                       if (chan_config->m2m_hw && (i & 1))
+                               src_addr += m2m_hw_period;
+                       dst_addr = sg_dma_address(sg);
+                       ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
+                                                       &ctcr, &ctbr, dst_addr,
+                                                       sg_dma_len(sg));
+                       stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
+                                          dst_addr);
+               }
+
+               if (ret < 0)
+                       return ret;
+
+               stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr,
+                                       dst_addr, sg_dma_len(sg), ctcr, ctbr,
+                                       i == sg_len - 1, i == 0, false);
+       }
+
+       /* Enable interrupts */
+       ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
+       ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE;
+       desc->ccr = ccr;
+
+       return 0;
+}
+
+static struct dma_async_tx_descriptor *
+stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
+                        u32 sg_len, enum dma_transfer_direction direction,
+                        unsigned long flags, void *context)
+{
+       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+       struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
+       struct stm32_mdma_desc *desc;
+       int i, ret;
+
+       /*
+        * Once DMA is in setup cyclic mode the channel we cannot assign this
+        * channel anymore. The DMA channel needs to be aborted or terminated
+        * for allowing another request.
+        */
+       if (chan->desc && chan->desc->cyclic) {
+               dev_err(chan2dev(chan),
+                       "Request not allowed when dma in cyclic mode\n");
+               return NULL;
+       }
+
+       desc = stm32_mdma_alloc_desc(chan, sg_len);
+       if (!desc)
+               return NULL;
+
+       ret = stm32_mdma_setup_xfer(chan, desc, sgl, sg_len, direction);
+       if (ret < 0)
+               goto xfer_setup_err;
+
+       /*
+        * In case of M2M HW transfer triggered by STM32 DMA, we do not have to clear the
+        * transfer complete flag by hardware in order to let the CPU rearm the STM32 DMA
+        * with the next sg element and update some data in dmaengine framework.
+        */
+       if (chan_config->m2m_hw && direction == DMA_MEM_TO_DEV) {
+               struct stm32_mdma_hwdesc *hwdesc;
+
+               for (i = 0; i < sg_len; i++) {
+                       hwdesc = desc->node[i].hwdesc;
+                       hwdesc->cmar = 0;
+                       hwdesc->cmdr = 0;
+               }
+       }
+
+       desc->cyclic = false;
+
+       return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+
+xfer_setup_err:
+       for (i = 0; i < desc->count; i++)
+               dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
+                             desc->node[i].hwdesc_phys);
+       kfree(desc);
+       return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr,
+                          size_t buf_len, size_t period_len,
+                          enum dma_transfer_direction direction,
+                          unsigned long flags)
+{
+       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+       struct dma_slave_config *dma_config = &chan->dma_config;
+       struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
+       struct stm32_mdma_desc *desc;
+       dma_addr_t src_addr, dst_addr;
+       u32 ccr, ctcr, ctbr, count;
+       int i, ret;
+
+       /*
+        * Once DMA is in setup cyclic mode the channel we cannot assign this
+        * channel anymore. The DMA channel needs to be aborted or terminated
+        * for allowing another request.
+        */
+       if (chan->desc && chan->desc->cyclic) {
+               dev_err(chan2dev(chan),
+                       "Request not allowed when dma in cyclic mode\n");
+               return NULL;
+       }
+
+       if (!buf_len || !period_len || period_len > STM32_MDMA_MAX_BLOCK_LEN) {
+               dev_err(chan2dev(chan), "Invalid buffer/period len\n");
+               return NULL;
+       }
+
+       if (buf_len % period_len) {
+               dev_err(chan2dev(chan), "buf_len not multiple of period_len\n");
+               return NULL;
+       }
+
+       count = buf_len / period_len;
+
+       desc = stm32_mdma_alloc_desc(chan, count);
+       if (!desc)
+               return NULL;
+
+       /* Select bus */
+       if (direction == DMA_MEM_TO_DEV) {
+               src_addr = buf_addr;
+               ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr,
+                                               &ctbr, src_addr, period_len);
+               stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
+                                  src_addr);
+       } else {
+               dst_addr = buf_addr;
+               ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr,
+                                               &ctbr, dst_addr, period_len);
+               stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
+                                  dst_addr);
+       }
+
+       if (ret < 0)
+               goto xfer_setup_err;
+
+       /* Enable interrupts */
+       ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
+       ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE | STM32_MDMA_CCR_BTIE;
+       desc->ccr = ccr;
+
+       /* Configure hwdesc list */
+       for (i = 0; i < count; i++) {
+               if (direction == DMA_MEM_TO_DEV) {
+                       src_addr = buf_addr + i * period_len;
+                       dst_addr = dma_config->dst_addr;
+                       if (chan_config->m2m_hw && (i & 1))
+                               dst_addr += period_len;
+               } else {
+                       src_addr = dma_config->src_addr;
+                       if (chan_config->m2m_hw && (i & 1))
+                               src_addr += period_len;
+                       dst_addr = buf_addr + i * period_len;
+               }
+
+               stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr,
+                                       dst_addr, period_len, ctcr, ctbr,
+                                       i == count - 1, i == 0, true);
+       }
+
+       desc->cyclic = true;
+
+       return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+
+xfer_setup_err:
+       for (i = 0; i < desc->count; i++)
+               dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
+                             desc->node[i].hwdesc_phys);
+       kfree(desc);
+       return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
+                          size_t len, unsigned long flags)
+{
+       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+       enum dma_slave_buswidth max_width;
+       struct stm32_mdma_desc *desc;
+       struct stm32_mdma_hwdesc *hwdesc;
+       u32 ccr, ctcr, ctbr, cbndtr, count, max_burst, mdma_burst;
+       u32 best_burst, tlen;
+       size_t xfer_count, offset;
+       int src_bus_width, dst_bus_width;
+       int i;
+
+       /*
+        * Once DMA is in setup cyclic mode the channel we cannot assign this
+        * channel anymore. The DMA channel needs to be aborted or terminated
+        * to allow another request
+        */
+       if (chan->desc && chan->desc->cyclic) {
+               dev_err(chan2dev(chan),
+                       "Request not allowed when dma in cyclic mode\n");
+               return NULL;
+       }
+
+       count = DIV_ROUND_UP(len, STM32_MDMA_MAX_BLOCK_LEN);
+       desc = stm32_mdma_alloc_desc(chan, count);
+       if (!desc)
+               return NULL;
+
+       ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
+       ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
+       ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
+       cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
+
+       /* Enable sw req, some interrupts and clear other bits */
+       ccr &= ~(STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX |
+                STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK |
+                STM32_MDMA_CCR_IRQ_MASK);
+       ccr |= STM32_MDMA_CCR_TEIE;
+
+       /* Enable SW request mode, dest/src inc and clear other bits */
+       ctcr &= ~(STM32_MDMA_CTCR_BWM | STM32_MDMA_CTCR_TRGM_MSK |
+                 STM32_MDMA_CTCR_PAM_MASK | STM32_MDMA_CTCR_PKE |
+                 STM32_MDMA_CTCR_TLEN_MSK | STM32_MDMA_CTCR_DBURST_MASK |
+                 STM32_MDMA_CTCR_SBURST_MASK | STM32_MDMA_CTCR_DINCOS_MASK |
+                 STM32_MDMA_CTCR_SINCOS_MASK | STM32_MDMA_CTCR_DSIZE_MASK |
+                 STM32_MDMA_CTCR_SSIZE_MASK | STM32_MDMA_CTCR_DINC_MASK |
+                 STM32_MDMA_CTCR_SINC_MASK);
+       ctcr |= STM32_MDMA_CTCR_SWRM | STM32_MDMA_CTCR_SINC(STM32_MDMA_INC) |
+               STM32_MDMA_CTCR_DINC(STM32_MDMA_INC);
+
+       /* Reset HW request */
+       ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK;
+
+       /* Select bus */
+       stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, src);
+       stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, dest);
+
+       /* Clear CBNDTR registers */
+       cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK | STM32_MDMA_CBNDTR_BRDUM |
+                       STM32_MDMA_CBNDTR_BRSUM | STM32_MDMA_CBNDTR_BNDT_MASK);
+
+       if (len <= STM32_MDMA_MAX_BLOCK_LEN) {
+               cbndtr |= STM32_MDMA_CBNDTR_BNDT(len);
+               if (len <= STM32_MDMA_MAX_BUF_LEN) {
+                       /* Setup a buffer transfer */
+                       ccr |= STM32_MDMA_CCR_TCIE | STM32_MDMA_CCR_CTCIE;
+                       ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BUFFER);
+               } else {
+                       /* Setup a block transfer */
+                       ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE;
+                       ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BLOCK);
+               }
+
+               tlen = STM32_MDMA_MAX_BUF_LEN;
+               ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1));
+
+               /* Set source best burst size */
+               max_width = stm32_mdma_get_max_width(src, len, tlen);
+               src_bus_width = stm32_mdma_get_width(chan, max_width);
+
+               max_burst = tlen / max_width;
+               best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst,
+                                                      max_width);
+               mdma_burst = ilog2(best_burst);
+
+               ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) |
+                       STM32_MDMA_CTCR_SSIZE(src_bus_width) |
+                       STM32_MDMA_CTCR_SINCOS(src_bus_width);
+
+               /* Set destination best burst size */
+               max_width = stm32_mdma_get_max_width(dest, len, tlen);
+               dst_bus_width = stm32_mdma_get_width(chan, max_width);
+
+               max_burst = tlen / max_width;
+               best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst,
+                                                      max_width);
+               mdma_burst = ilog2(best_burst);
+
+               ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) |
+                       STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
+                       STM32_MDMA_CTCR_DINCOS(dst_bus_width);
+
+               if (dst_bus_width != src_bus_width)
+                       ctcr |= STM32_MDMA_CTCR_PKE;
+
+               /* Prepare hardware descriptor */
+               hwdesc = desc->node[0].hwdesc;
+               hwdesc->ctcr = ctcr;
+               hwdesc->cbndtr = cbndtr;
+               hwdesc->csar = src;
+               hwdesc->cdar = dest;
+               hwdesc->cbrur = 0;
+               hwdesc->clar = 0;
+               hwdesc->ctbr = ctbr;
+               hwdesc->cmar = 0;
+               hwdesc->cmdr = 0;
+
+               stm32_mdma_dump_hwdesc(chan, &desc->node[0]);
+       } else {
+               /* Setup a LLI transfer */
+               ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_LINKED_LIST) |
+                       STM32_MDMA_CTCR_TLEN((STM32_MDMA_MAX_BUF_LEN - 1));
+               ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE;
+               tlen = STM32_MDMA_MAX_BUF_LEN;
+
+               for (i = 0, offset = 0; offset < len;
+                    i++, offset += xfer_count) {
+                       xfer_count = min_t(size_t, len - offset,
+                                          STM32_MDMA_MAX_BLOCK_LEN);
+
+                       /* Set source best burst size */
+                       max_width = stm32_mdma_get_max_width(src, len, tlen);
+                       src_bus_width = stm32_mdma_get_width(chan, max_width);
+
+                       max_burst = tlen / max_width;
+                       best_burst = stm32_mdma_get_best_burst(len, tlen,
+                                                              max_burst,
+                                                              max_width);
+                       mdma_burst = ilog2(best_burst);
+
+                       ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) |
+                               STM32_MDMA_CTCR_SSIZE(src_bus_width) |
+                               STM32_MDMA_CTCR_SINCOS(src_bus_width);
+
+                       /* Set destination best burst size */
+                       max_width = stm32_mdma_get_max_width(dest, len, tlen);
+                       dst_bus_width = stm32_mdma_get_width(chan, max_width);
+
+                       max_burst = tlen / max_width;
+                       best_burst = stm32_mdma_get_best_burst(len, tlen,
+                                                              max_burst,
+                                                              max_width);
+                       mdma_burst = ilog2(best_burst);
+
+                       ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) |
+                               STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
+                               STM32_MDMA_CTCR_DINCOS(dst_bus_width);
+
+                       if (dst_bus_width != src_bus_width)
+                               ctcr |= STM32_MDMA_CTCR_PKE;
+
+                       /* Prepare hardware descriptor */
+                       stm32_mdma_setup_hwdesc(chan, desc, DMA_MEM_TO_MEM, i,
+                                               src + offset, dest + offset,
+                                               xfer_count, ctcr, ctbr,
+                                               i == count - 1, i == 0, false);
+               }
+       }
+
+       desc->ccr = ccr;
+
+       desc->cyclic = false;
+
+       return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static void stm32_mdma_dump_reg(struct stm32_mdma_chan *chan)
+{
+       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+
+       dev_dbg(chan2dev(chan), "CCR:     0x%08x\n",
+               stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)));
+       dev_dbg(chan2dev(chan), "CTCR:    0x%08x\n",
+               stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)));
+       dev_dbg(chan2dev(chan), "CBNDTR:  0x%08x\n",
+               stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)));
+       dev_dbg(chan2dev(chan), "CSAR:    0x%08x\n",
+               stm32_mdma_read(dmadev, STM32_MDMA_CSAR(chan->id)));
+       dev_dbg(chan2dev(chan), "CDAR:    0x%08x\n",
+               stm32_mdma_read(dmadev, STM32_MDMA_CDAR(chan->id)));
+       dev_dbg(chan2dev(chan), "CBRUR:   0x%08x\n",
+               stm32_mdma_read(dmadev, STM32_MDMA_CBRUR(chan->id)));
+       dev_dbg(chan2dev(chan), "CLAR:    0x%08x\n",
+               stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id)));
+       dev_dbg(chan2dev(chan), "CTBR:    0x%08x\n",
+               stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)));
+       dev_dbg(chan2dev(chan), "CMAR:    0x%08x\n",
+               stm32_mdma_read(dmadev, STM32_MDMA_CMAR(chan->id)));
+       dev_dbg(chan2dev(chan), "CMDR:    0x%08x\n",
+               stm32_mdma_read(dmadev, STM32_MDMA_CMDR(chan->id)));
+}
+
+static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
+{
+       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+       struct virt_dma_desc *vdesc;
+       struct stm32_mdma_hwdesc *hwdesc;
+       u32 id = chan->id;
+       u32 status, reg;
+
+       vdesc = vchan_next_desc(&chan->vchan);
+       if (!vdesc) {
+               chan->desc = NULL;
+               return;
+       }
+
+       list_del(&vdesc->node);
+
+       chan->desc = to_stm32_mdma_desc(vdesc);
+       hwdesc = chan->desc->node[0].hwdesc;
+       chan->curr_hwdesc = 0;
+
+       stm32_mdma_write(dmadev, STM32_MDMA_CCR(id), chan->desc->ccr);
+       stm32_mdma_write(dmadev, STM32_MDMA_CTCR(id), hwdesc->ctcr);
+       stm32_mdma_write(dmadev, STM32_MDMA_CBNDTR(id), hwdesc->cbndtr);
+       stm32_mdma_write(dmadev, STM32_MDMA_CSAR(id), hwdesc->csar);
+       stm32_mdma_write(dmadev, STM32_MDMA_CDAR(id), hwdesc->cdar);
+       stm32_mdma_write(dmadev, STM32_MDMA_CBRUR(id), hwdesc->cbrur);
+       stm32_mdma_write(dmadev, STM32_MDMA_CLAR(id), hwdesc->clar);
+       stm32_mdma_write(dmadev, STM32_MDMA_CTBR(id), hwdesc->ctbr);
+       stm32_mdma_write(dmadev, STM32_MDMA_CMAR(id), hwdesc->cmar);
+       stm32_mdma_write(dmadev, STM32_MDMA_CMDR(id), hwdesc->cmdr);
+
+       /* Clear interrupt status if it is there */
+       status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(id));
+       if (status)
+               stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(id), status);
+
+       stm32_mdma_dump_reg(chan);
+
+       /* Start DMA */
+       stm32_mdma_set_bits(dmadev, STM32_MDMA_CCR(id), STM32_MDMA_CCR_EN);
+
+       /* Set SW request in case of MEM2MEM transfer */
+       if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM) {
+               reg = STM32_MDMA_CCR(id);
+               stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ);
+       }
+
+       chan->busy = true;
+
+       dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
+}
+
+static void stm32_mdma_issue_pending(struct dma_chan *c)
+{
+       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->vchan.lock, flags);
+
+       if (!vchan_issue_pending(&chan->vchan))
+               goto end;
+
+       dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
+
+       if (!chan->desc && !chan->busy)
+               stm32_mdma_start_transfer(chan);
+
+end:
+       spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static int stm32_mdma_pause(struct dma_chan *c)
+{
+       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&chan->vchan.lock, flags);
+       ret = stm32_mdma_disable_chan(chan);
+       spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+       if (!ret)
+               dev_dbg(chan2dev(chan), "vchan %pK: pause\n", &chan->vchan);
+
+       return ret;
+}
+
+static int stm32_mdma_resume(struct dma_chan *c)
+{
+       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+       struct stm32_mdma_hwdesc *hwdesc;
+       unsigned long flags;
+       u32 status, reg;
+
+       /* Transfer can be terminated */
+       if (!chan->desc || (stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & STM32_MDMA_CCR_EN))
+               return -EPERM;
+
+       hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc;
+
+       spin_lock_irqsave(&chan->vchan.lock, flags);
+
+       /* Re-configure control register */
+       stm32_mdma_write(dmadev, STM32_MDMA_CCR(chan->id), chan->desc->ccr);
+
+       /* Clear interrupt status if it is there */
+       status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
+       if (status)
+               stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status);
+
+       stm32_mdma_dump_reg(chan);
+
+       /* Re-start DMA */
+       reg = STM32_MDMA_CCR(chan->id);
+       stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_EN);
+
+       /* Set SW request in case of MEM2MEM transfer */
+       if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM)
+               stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ);
+
+       spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+       dev_dbg(chan2dev(chan), "vchan %pK: resume\n", &chan->vchan);
+
+       return 0;
+}
+
+static int stm32_mdma_terminate_all(struct dma_chan *c)
+{
+       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&chan->vchan.lock, flags);
+       if (chan->desc) {
+               vchan_terminate_vdesc(&chan->desc->vdesc);
+               if (chan->busy)
+                       stm32_mdma_stop(chan);
+               chan->desc = NULL;
+       }
+       vchan_get_all_descriptors(&chan->vchan, &head);
+       spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+       vchan_dma_desc_free_list(&chan->vchan, &head);
+
+       return 0;
+}
+
+static void stm32_mdma_synchronize(struct dma_chan *c)
+{
+       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+
+       vchan_synchronize(&chan->vchan);
+}
+
+static int stm32_mdma_slave_config(struct dma_chan *c,
+                                  struct dma_slave_config *config)
+{
+       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+
+       memcpy(&chan->dma_config, config, sizeof(*config));
+
+       /* Check if user is requesting STM32 DMA to trigger MDMA */
+       if (config->peripheral_size) {
+               struct stm32_mdma_dma_config *mdma_config;
+
+               mdma_config = (struct stm32_mdma_dma_config *)chan->dma_config.peripheral_config;
+               chan->chan_config.request = mdma_config->request;
+               chan->chan_config.mask_addr = mdma_config->cmar;
+               chan->chan_config.mask_data = mdma_config->cmdr;
+               chan->chan_config.m2m_hw = true;
+       }
+
+       return 0;
+}
+
+static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
+                                     struct stm32_mdma_desc *desc,
+                                     u32 curr_hwdesc,
+                                     struct dma_tx_state *state)
+{
+       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+       struct stm32_mdma_hwdesc *hwdesc;
+       u32 cisr, clar, cbndtr, residue, modulo, burst_size;
+       int i;
+
+       cisr = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
+
+       residue = 0;
+       /* Get the next hw descriptor to process from current transfer */
+       clar = stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id));
+       for (i = desc->count - 1; i >= 0; i--) {
+               hwdesc = desc->node[i].hwdesc;
+
+               if (hwdesc->clar == clar)
+                       break;/* Current transfer found, stop cumulating */
+
+               /* Cumulate residue of unprocessed hw descriptors */
+               residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr);
+       }
+       cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
+       residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
+
+       state->in_flight_bytes = 0;
+       if (chan->chan_config.m2m_hw && (cisr & STM32_MDMA_CISR_CRQA))
+               state->in_flight_bytes = cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
+
+       if (!chan->mem_burst)
+               return residue;
+
+       burst_size = chan->mem_burst * chan->mem_width;
+       modulo = residue % burst_size;
+       if (modulo)
+               residue = residue - modulo + burst_size;
+
+       return residue;
+}
+
+static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
+                                           dma_cookie_t cookie,
+                                           struct dma_tx_state *state)
+{
+       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+       struct virt_dma_desc *vdesc;
+       enum dma_status status;
+       unsigned long flags;
+       u32 residue = 0;
+
+       status = dma_cookie_status(c, cookie, state);
+       if ((status == DMA_COMPLETE) || (!state))
+               return status;
+
+       spin_lock_irqsave(&chan->vchan.lock, flags);
+
+       vdesc = vchan_find_desc(&chan->vchan, cookie);
+       if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
+               residue = stm32_mdma_desc_residue(chan, chan->desc, chan->curr_hwdesc, state);
+       else if (vdesc)
+               residue = stm32_mdma_desc_residue(chan, to_stm32_mdma_desc(vdesc), 0, state);
+
+       dma_set_residue(state, residue);
+
+       spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+       return status;
+}
+
+static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
+{
+       vchan_cookie_complete(&chan->desc->vdesc);
+       chan->desc = NULL;
+       chan->busy = false;
+
+       /* Start the next transfer if this driver has a next desc */
+       stm32_mdma_start_transfer(chan);
+}
+
+static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
+{
+       struct stm32_mdma_device *dmadev = devid;
+       struct stm32_mdma_chan *chan;
+       u32 reg, id, ccr, ien, status;
+
+       /* Find out which channel generates the interrupt */
+       status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0);
+       if (!status) {
+               dev_dbg(mdma2dev(dmadev), "spurious it\n");
+               return IRQ_NONE;
+       }
+       id = __ffs(status);
+       chan = &dmadev->chan[id];
+
+       /* Handle interrupt for the channel */
+       spin_lock(&chan->vchan.lock);
+       status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(id));
+       /* Mask Channel ReQuest Active bit which can be set in case of MEM2MEM */
+       status &= ~STM32_MDMA_CISR_CRQA;
+       ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(id));
+       ien = (ccr & STM32_MDMA_CCR_IRQ_MASK) >> 1;
+
+       if (!(status & ien)) {
+               spin_unlock(&chan->vchan.lock);
+               if (chan->busy)
+                       dev_warn(chan2dev(chan),
+                                "spurious it (status=0x%04x, ien=0x%04x)\n", status, ien);
+               else
+                       dev_dbg(chan2dev(chan),
+                               "spurious it (status=0x%04x, ien=0x%04x)\n", status, ien);
+               return IRQ_NONE;
+       }
+
+       reg = STM32_MDMA_CIFCR(id);
+
+       if (status & STM32_MDMA_CISR_TEIF) {
+               dev_err(chan2dev(chan), "Transfer Err: stat=0x%08x\n",
+                       readl_relaxed(dmadev->base + STM32_MDMA_CESR(id)));
+               stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CTEIF);
+               status &= ~STM32_MDMA_CISR_TEIF;
+       }
+
+       if (status & STM32_MDMA_CISR_CTCIF) {
+               stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CCTCIF);
+               status &= ~STM32_MDMA_CISR_CTCIF;
+               stm32_mdma_xfer_end(chan);
+       }
+
+       if (status & STM32_MDMA_CISR_BRTIF) {
+               stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBRTIF);
+               status &= ~STM32_MDMA_CISR_BRTIF;
+       }
+
+       if (status & STM32_MDMA_CISR_BTIF) {
+               stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBTIF);
+               status &= ~STM32_MDMA_CISR_BTIF;
+               chan->curr_hwdesc++;
+               if (chan->desc && chan->desc->cyclic) {
+                       if (chan->curr_hwdesc == chan->desc->count)
+                               chan->curr_hwdesc = 0;
+                       vchan_cyclic_callback(&chan->desc->vdesc);
+               }
+       }
+
+       if (status & STM32_MDMA_CISR_TCIF) {
+               stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CLTCIF);
+               status &= ~STM32_MDMA_CISR_TCIF;
+       }
+
+       if (status) {
+               stm32_mdma_set_bits(dmadev, reg, status);
+               dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
+               if (!(ccr & STM32_MDMA_CCR_EN))
+                       dev_err(chan2dev(chan), "chan disabled by HW\n");
+       }
+
+       spin_unlock(&chan->vchan.lock);
+
+       return IRQ_HANDLED;
+}
+
+static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
+{
+       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+       int ret;
+
+       chan->desc_pool = dmam_pool_create(dev_name(&c->dev->device),
+                                          c->device->dev,
+                                          sizeof(struct stm32_mdma_hwdesc),
+                                         __alignof__(struct stm32_mdma_hwdesc),
+                                          0);
+       if (!chan->desc_pool) {
+               dev_err(chan2dev(chan), "failed to allocate descriptor pool\n");
+               return -ENOMEM;
+       }
+
+       ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
+       if (ret < 0)
+               return ret;
+
+       ret = stm32_mdma_disable_chan(chan);
+       if (ret < 0)
+               pm_runtime_put(dmadev->ddev.dev);
+
+       return ret;
+}
+
+static void stm32_mdma_free_chan_resources(struct dma_chan *c)
+{
+       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+       unsigned long flags;
+
+       dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id);
+
+       if (chan->busy) {
+               spin_lock_irqsave(&chan->vchan.lock, flags);
+               stm32_mdma_stop(chan);
+               chan->desc = NULL;
+               spin_unlock_irqrestore(&chan->vchan.lock, flags);
+       }
+
+       pm_runtime_put(dmadev->ddev.dev);
+       vchan_free_chan_resources(to_virt_chan(c));
+       dmam_pool_destroy(chan->desc_pool);
+       chan->desc_pool = NULL;
+}
+
+static bool stm32_mdma_filter_fn(struct dma_chan *c, void *fn_param)
+{
+       struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+       struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+
+       /* Check if chan is marked Secure */
+       if (dmadev->chan_reserved & BIT(chan->id))
+               return false;
+
+       return true;
+}
+
+static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
+                                           struct of_dma *ofdma)
+{
+       struct stm32_mdma_device *dmadev = ofdma->of_dma_data;
+       dma_cap_mask_t mask = dmadev->ddev.cap_mask;
+       struct stm32_mdma_chan *chan;
+       struct dma_chan *c;
+       struct stm32_mdma_chan_config config;
+
+       if (dma_spec->args_count < 5) {
+               dev_err(mdma2dev(dmadev), "Bad number of args\n");
+               return NULL;
+       }
+
+       memset(&config, 0, sizeof(config));
+       config.request = dma_spec->args[0];
+       config.priority_level = dma_spec->args[1];
+       config.transfer_config = dma_spec->args[2];
+       config.mask_addr = dma_spec->args[3];
+       config.mask_data = dma_spec->args[4];
+
+       if (config.request >= dmadev->nr_requests) {
+               dev_err(mdma2dev(dmadev), "Bad request line\n");
+               return NULL;
+       }
+
+       if (config.priority_level > STM32_MDMA_VERY_HIGH_PRIORITY) {
+               dev_err(mdma2dev(dmadev), "Priority level not supported\n");
+               return NULL;
+       }
+
+       c = __dma_request_channel(&mask, stm32_mdma_filter_fn, &config, ofdma->of_node);
+       if (!c) {
+               dev_err(mdma2dev(dmadev), "No more channels available\n");
+               return NULL;
+       }
+
+       chan = to_stm32_mdma_chan(c);
+       chan->chan_config = config;
+
+       return c;
+}
+
+static const struct of_device_id stm32_mdma_of_match[] = {
+       { .compatible = "st,stm32h7-mdma", },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, stm32_mdma_of_match);
+
+static int stm32_mdma_probe(struct platform_device *pdev)
+{
+       struct stm32_mdma_chan *chan;
+       struct stm32_mdma_device *dmadev;
+       struct dma_device *dd;
+       struct device_node *of_node;
+       struct reset_control *rst;
+       u32 nr_channels, nr_requests;
+       int i, count, ret;
+
+       of_node = pdev->dev.of_node;
+       if (!of_node)
+               return -ENODEV;
+
+       ret = device_property_read_u32(&pdev->dev, "dma-channels",
+                                      &nr_channels);
+       if (ret) {
+               nr_channels = STM32_MDMA_MAX_CHANNELS;
+               dev_warn(&pdev->dev, "MDMA defaulting on %i channels\n",
+                        nr_channels);
+       }
+
+       ret = device_property_read_u32(&pdev->dev, "dma-requests",
+                                      &nr_requests);
+       if (ret) {
+               nr_requests = STM32_MDMA_MAX_REQUESTS;
+               dev_warn(&pdev->dev, "MDMA defaulting on %i request lines\n",
+                        nr_requests);
+       }
+
+       count = device_property_count_u32(&pdev->dev, "st,ahb-addr-masks");
+       if (count < 0)
+               count = 0;
+
+       dmadev = devm_kzalloc(&pdev->dev,
+                             struct_size(dmadev, ahb_addr_masks, count),
+                             GFP_KERNEL);
+       if (!dmadev)
+               return -ENOMEM;
+       dmadev->nr_ahb_addr_masks = count;
+
+       dmadev->nr_channels = nr_channels;
+       dmadev->nr_requests = nr_requests;
+       device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
+                                      dmadev->ahb_addr_masks,
+                                      count);
+
+       dmadev->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(dmadev->base))
+               return PTR_ERR(dmadev->base);
+
+       dmadev->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(dmadev->clk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(dmadev->clk),
+                                    "Missing clock controller\n");
+
+       ret = clk_prepare_enable(dmadev->clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
+               return ret;
+       }
+
+       rst = devm_reset_control_get(&pdev->dev, NULL);
+       if (IS_ERR(rst)) {
+               ret = PTR_ERR(rst);
+               if (ret == -EPROBE_DEFER)
+                       goto err_clk;
+       } else {
+               reset_control_assert(rst);
+               udelay(2);
+               reset_control_deassert(rst);
+       }
+
+       dd = &dmadev->ddev;
+       dma_cap_set(DMA_SLAVE, dd->cap_mask);
+       dma_cap_set(DMA_PRIVATE, dd->cap_mask);
+       dma_cap_set(DMA_CYCLIC, dd->cap_mask);
+       dma_cap_set(DMA_MEMCPY, dd->cap_mask);
+       dd->device_alloc_chan_resources = stm32_mdma_alloc_chan_resources;
+       dd->device_free_chan_resources = stm32_mdma_free_chan_resources;
+       dd->device_tx_status = stm32_mdma_tx_status;
+       dd->device_issue_pending = stm32_mdma_issue_pending;
+       dd->device_prep_slave_sg = stm32_mdma_prep_slave_sg;
+       dd->device_prep_dma_cyclic = stm32_mdma_prep_dma_cyclic;
+       dd->device_prep_dma_memcpy = stm32_mdma_prep_dma_memcpy;
+       dd->device_config = stm32_mdma_slave_config;
+       dd->device_pause = stm32_mdma_pause;
+       dd->device_resume = stm32_mdma_resume;
+       dd->device_terminate_all = stm32_mdma_terminate_all;
+       dd->device_synchronize = stm32_mdma_synchronize;
+       dd->descriptor_reuse = true;
+
+       dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+               BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+               BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+               BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+       dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+               BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+               BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+               BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+       dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
+               BIT(DMA_MEM_TO_MEM);
+       dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+       dd->max_burst = STM32_MDMA_MAX_BURST;
+       dd->dev = &pdev->dev;
+       INIT_LIST_HEAD(&dd->channels);
+
+       for (i = 0; i < dmadev->nr_channels; i++) {
+               chan = &dmadev->chan[i];
+               chan->id = i;
+
+               if (stm32_mdma_read(dmadev, STM32_MDMA_CCR(i)) & STM32_MDMA_CCR_SM)
+                       dmadev->chan_reserved |= BIT(i);
+
+               chan->vchan.desc_free = stm32_mdma_desc_free;
+               vchan_init(&chan->vchan, dd);
+       }
+
+       dmadev->irq = platform_get_irq(pdev, 0);
+       if (dmadev->irq < 0) {
+               ret = dmadev->irq;
+               goto err_clk;
+       }
+
+       ret = devm_request_irq(&pdev->dev, dmadev->irq, stm32_mdma_irq_handler,
+                              0, dev_name(&pdev->dev), dmadev);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to request IRQ\n");
+               goto err_clk;
+       }
+
+       ret = dmaenginem_async_device_register(dd);
+       if (ret)
+               goto err_clk;
+
+       ret = of_dma_controller_register(of_node, stm32_mdma_of_xlate, dmadev);
+       if (ret < 0) {
+               dev_err(&pdev->dev,
+                       "STM32 MDMA DMA OF registration failed %d\n", ret);
+               goto err_clk;
+       }
+
+       platform_set_drvdata(pdev, dmadev);
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_get_noresume(&pdev->dev);
+       pm_runtime_put(&pdev->dev);
+
+       dev_info(&pdev->dev, "STM32 MDMA driver registered\n");
+
+       return 0;
+
+err_clk:
+       clk_disable_unprepare(dmadev->clk);
+
+       return ret;
+}
+
+#ifdef CONFIG_PM
+static int stm32_mdma_runtime_suspend(struct device *dev)
+{
+       struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
+
+       clk_disable_unprepare(dmadev->clk);
+
+       return 0;
+}
+
+static int stm32_mdma_runtime_resume(struct device *dev)
+{
+       struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
+       int ret;
+
+       ret = clk_prepare_enable(dmadev->clk);
+       if (ret) {
+               dev_err(dev, "failed to prepare_enable clock\n");
+               return ret;
+       }
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int stm32_mdma_pm_suspend(struct device *dev)
+{
+       struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
+       u32 ccr, id;
+       int ret;
+
+       ret = pm_runtime_resume_and_get(dev);
+       if (ret < 0)
+               return ret;
+
+       for (id = 0; id < dmadev->nr_channels; id++) {
+               ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(id));
+               if (ccr & STM32_MDMA_CCR_EN) {
+                       dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
+                       return -EBUSY;
+               }
+       }
+
+       pm_runtime_put_sync(dev);
+
+       pm_runtime_force_suspend(dev);
+
+       return 0;
+}
+
+static int stm32_mdma_pm_resume(struct device *dev)
+{
+       return pm_runtime_force_resume(dev);
+}
+#endif
+
+static const struct dev_pm_ops stm32_mdma_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(stm32_mdma_pm_suspend, stm32_mdma_pm_resume)
+       SET_RUNTIME_PM_OPS(stm32_mdma_runtime_suspend,
+                          stm32_mdma_runtime_resume, NULL)
+};
+
+static struct platform_driver stm32_mdma_driver = {
+       .probe = stm32_mdma_probe,
+       .driver = {
+               .name = "stm32-mdma",
+               .of_match_table = stm32_mdma_of_match,
+               .pm = &stm32_mdma_pm_ops,
+       },
+};
+
+static int __init stm32_mdma_init(void)
+{
+       return platform_driver_register(&stm32_mdma_driver);
+}
+
+subsys_initcall(stm32_mdma_init);
+
+MODULE_DESCRIPTION("Driver for STM32 MDMA controller");
+MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
+MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");