Merge tag 'dmaengine-5.3-rc1' of git://git.infradead.org/users/vkoul/slave-dma
[linux-2.6-block.git] / drivers / dma / mxs-dma.c
index 22cc7f68ef6e380580e233150baf18daaa0695ef..3039bba0e4d559836179fe0f71941da428bd563c 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/of_device.h>
 #include <linux/of_dma.h>
 #include <linux/list.h>
+#include <linux/dma/mxs-dma.h>
 
 #include <asm/irq.h>
 
@@ -77,6 +78,7 @@
 #define BM_CCW_COMMAND         (3 << 0)
 #define CCW_CHAIN              (1 << 2)
 #define CCW_IRQ                        (1 << 3)
+#define CCW_WAIT4RDY           (1 << 5)
 #define CCW_DEC_SEM            (1 << 6)
 #define CCW_WAIT4END           (1 << 7)
 #define CCW_HALT_ON_TERM       (1 << 8)
@@ -477,16 +479,16 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
  *            ......
  *            ->device_prep_slave_sg(0);
  *            ......
- *            ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ *            ->device_prep_slave_sg(DMA_CTRL_ACK);
  *            ......
  *    [3] If there are more than two DMA commands in the DMA chain, the code
  *        should be:
  *            ......
  *            ->device_prep_slave_sg(0);                                // First
  *            ......
- *            ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
+ *            ->device_prep_slave_sg(DMA_CTRL_ACK]);
  *            ......
- *            ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
+ *            ->device_prep_slave_sg(DMA_CTRL_ACK); // Last
  *            ......
  */
 static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
@@ -500,13 +502,12 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
        struct scatterlist *sg;
        u32 i, j;
        u32 *pio;
-       bool append = flags & DMA_PREP_INTERRUPT;
-       int idx = append ? mxs_chan->desc_count : 0;
+       int idx = 0;
 
-       if (mxs_chan->status == DMA_IN_PROGRESS && !append)
-               return NULL;
+       if (mxs_chan->status == DMA_IN_PROGRESS)
+               idx = mxs_chan->desc_count;
 
-       if (sg_len + (append ? idx : 0) > NUM_CCW) {
+       if (sg_len + idx > NUM_CCW) {
                dev_err(mxs_dma->dma_device.dev,
                                "maximum number of sg exceeded: %d > %d\n",
                                sg_len, NUM_CCW);
@@ -520,7 +521,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
         * If the sg is prepared with append flag set, the sg
         * will be appended to the last prepared sg.
         */
-       if (append) {
+       if (idx) {
                BUG_ON(idx < 1);
                ccw = &mxs_chan->ccw[idx - 1];
                ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
@@ -541,12 +542,14 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
                ccw->bits = 0;
                ccw->bits |= CCW_IRQ;
                ccw->bits |= CCW_DEC_SEM;
-               if (flags & DMA_CTRL_ACK)
+               if (flags & MXS_DMA_CTRL_WAIT4END)
                        ccw->bits |= CCW_WAIT4END;
                ccw->bits |= CCW_HALT_ON_TERM;
                ccw->bits |= CCW_TERM_FLUSH;
                ccw->bits |= BF_CCW(sg_len, PIO_NUM);
                ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
+               if (flags & MXS_DMA_CTRL_WAIT4RDY)
+                       ccw->bits |= CCW_WAIT4RDY;
        } else {
                for_each_sg(sgl, sg, sg_len, i) {
                        if (sg_dma_len(sg) > MAX_XFER_BYTES) {
@@ -573,7 +576,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
                                ccw->bits &= ~CCW_CHAIN;
                                ccw->bits |= CCW_IRQ;
                                ccw->bits |= CCW_DEC_SEM;
-                               if (flags & DMA_CTRL_ACK)
+                               if (flags & MXS_DMA_CTRL_WAIT4END)
                                        ccw->bits |= CCW_WAIT4END;
                        }
                }
@@ -716,7 +719,6 @@ err_out:
 }
 
 struct mxs_dma_filter_param {
-       struct device_node *of_node;
        unsigned int chan_id;
 };
 
@@ -727,9 +729,6 @@ static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param)
        struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
        int chan_irq;
 
-       if (mxs_dma->dma_device.dev->of_node != param->of_node)
-               return false;
-
        if (chan->chan_id != param->chan_id)
                return false;
 
@@ -752,13 +751,13 @@ static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
        if (dma_spec->args_count != 1)
                return NULL;
 
-       param.of_node = ofdma->of_node;
        param.chan_id = dma_spec->args[0];
 
        if (param.chan_id >= mxs_dma->nr_channels)
                return NULL;
 
-       return dma_request_channel(mask, mxs_dma_filter_fn, &param);
+       return __dma_request_channel(&mask, mxs_dma_filter_fn, &param,
+                                    ofdma->of_node);
 }
 
 static int __init mxs_dma_probe(struct platform_device *pdev)