dmaengine: qcom-bam-dma: Add pm_runtime support
authorPramod Gurav <pramod.gurav@linaro.org>
Fri, 17 Jun 2016 10:26:03 +0000 (15:56 +0530)
committerVinod Koul <vinod.koul@intel.com>
Thu, 30 Jun 2016 04:35:56 +0000 (10:05 +0530)
Adds pm_runtime support for BAM DMA so that clock is enabled only
when there is a transaction going on to help save power.

Signed-off-by: Pramod Gurav <pramod.gurav@linaro.org>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
drivers/dma/qcom/bam_dma.c

index 969b48176745031f4b2d9bf952561b4101af00e7..4754891742ad3218c1941586796cc996193684c8 100644 (file)
@@ -48,6 +48,7 @@
 #include <linux/of_dma.h>
 #include <linux/clk.h>
 #include <linux/dmaengine.h>
+#include <linux/pm_runtime.h>
 
 #include "../dmaengine.h"
 #include "../virt-dma.h"
@@ -58,6 +59,8 @@ struct bam_desc_hw {
        __le16 flags;
 };
 
+#define BAM_DMA_AUTOSUSPEND_DELAY 100
+
 #define DESC_FLAG_INT BIT(15)
 #define DESC_FLAG_EOT BIT(14)
 #define DESC_FLAG_EOB BIT(13)
@@ -527,12 +530,17 @@ static void bam_free_chan(struct dma_chan *chan)
        struct bam_device *bdev = bchan->bdev;
        u32 val;
        unsigned long flags;
+       int ret;
+
+       ret = pm_runtime_get_sync(bdev->dev);
+       if (ret < 0)
+               return;
 
        vchan_free_chan_resources(to_virt_chan(chan));
 
        if (bchan->curr_txd) {
                dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
-               return;
+               goto err;
        }
 
        spin_lock_irqsave(&bchan->vc.lock, flags);
@@ -550,6 +558,10 @@ static void bam_free_chan(struct dma_chan *chan)
 
        /* disable irq */
        writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
+
+err:
+       pm_runtime_mark_last_busy(bdev->dev);
+       pm_runtime_put_autosuspend(bdev->dev);
 }
 
 /**
@@ -696,11 +708,18 @@ static int bam_pause(struct dma_chan *chan)
        struct bam_chan *bchan = to_bam_chan(chan);
        struct bam_device *bdev = bchan->bdev;
        unsigned long flag;
+       int ret;
+
+       ret = pm_runtime_get_sync(bdev->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&bchan->vc.lock, flag);
        writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
        bchan->paused = 1;
        spin_unlock_irqrestore(&bchan->vc.lock, flag);
+       pm_runtime_mark_last_busy(bdev->dev);
+       pm_runtime_put_autosuspend(bdev->dev);
 
        return 0;
 }
@@ -715,11 +734,18 @@ static int bam_resume(struct dma_chan *chan)
        struct bam_chan *bchan = to_bam_chan(chan);
        struct bam_device *bdev = bchan->bdev;
        unsigned long flag;
+       int ret;
+
+       ret = pm_runtime_get_sync(bdev->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&bchan->vc.lock, flag);
        writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
        bchan->paused = 0;
        spin_unlock_irqrestore(&bchan->vc.lock, flag);
+       pm_runtime_mark_last_busy(bdev->dev);
+       pm_runtime_put_autosuspend(bdev->dev);
 
        return 0;
 }
@@ -795,6 +821,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
 {
        struct bam_device *bdev = data;
        u32 clr_mask = 0, srcs = 0;
+       int ret;
 
        srcs |= process_channel_irqs(bdev);
 
@@ -802,6 +829,10 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
        if (srcs & P_IRQ)
                tasklet_schedule(&bdev->task);
 
+       ret = pm_runtime_get_sync(bdev->dev);
+       if (ret < 0)
+               return ret;
+
        if (srcs & BAM_IRQ) {
                clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
 
@@ -814,6 +845,9 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
                writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
        }
 
+       pm_runtime_mark_last_busy(bdev->dev);
+       pm_runtime_put_autosuspend(bdev->dev);
+
        return IRQ_HANDLED;
 }
 
@@ -893,6 +927,7 @@ static void bam_start_dma(struct bam_chan *bchan)
        struct bam_desc_hw *desc;
        struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
                                        sizeof(struct bam_desc_hw));
+       int ret;
 
        lockdep_assert_held(&bchan->vc.lock);
 
@@ -904,6 +939,10 @@ static void bam_start_dma(struct bam_chan *bchan)
        async_desc = container_of(vd, struct bam_async_desc, vd);
        bchan->curr_txd = async_desc;
 
+       ret = pm_runtime_get_sync(bdev->dev);
+       if (ret < 0)
+               return;
+
        /* on first use, initialize the channel hardware */
        if (!bchan->initialized)
                bam_chan_init_hw(bchan, async_desc->dir);
@@ -946,6 +985,9 @@ static void bam_start_dma(struct bam_chan *bchan)
        wmb();
        writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
                        bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
+
+       pm_runtime_mark_last_busy(bdev->dev);
+       pm_runtime_put_autosuspend(bdev->dev);
 }
 
 /**
@@ -970,6 +1012,7 @@ static void dma_tasklet(unsigned long data)
                        bam_start_dma(bchan);
                spin_unlock_irqrestore(&bchan->vc.lock, flags);
        }
+
 }
 
 /**
@@ -1213,6 +1256,13 @@ static int bam_dma_probe(struct platform_device *pdev)
        if (ret)
                goto err_unregister_dma;
 
+       pm_runtime_irq_safe(&pdev->dev);
+       pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY);
+       pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_mark_last_busy(&pdev->dev);
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+
        return 0;
 
 err_unregister_dma:
@@ -1233,6 +1283,8 @@ static int bam_dma_remove(struct platform_device *pdev)
        struct bam_device *bdev = platform_get_drvdata(pdev);
        u32 i;
 
+       pm_runtime_force_suspend(&pdev->dev);
+
        of_dma_controller_free(pdev->dev.of_node);
        dma_async_device_unregister(&bdev->common);
 
@@ -1260,11 +1312,67 @@ static int bam_dma_remove(struct platform_device *pdev)
        return 0;
 }
 
+static int bam_dma_runtime_suspend(struct device *dev)
+{
+       struct bam_device *bdev = dev_get_drvdata(dev);
+
+       clk_disable(bdev->bamclk);
+
+       return 0;
+}
+
+static int bam_dma_runtime_resume(struct device *dev)
+{
+       struct bam_device *bdev = dev_get_drvdata(dev);
+       int ret;
+
+       ret = clk_enable(bdev->bamclk);
+       if (ret < 0) {
+               dev_err(dev, "clk_enable failed: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+#ifdef CONFIG_PM_SLEEP
+static int bam_dma_suspend(struct device *dev)
+{
+       struct bam_device *bdev = dev_get_drvdata(dev);
+
+       pm_runtime_force_suspend(dev);
+
+       clk_unprepare(bdev->bamclk);
+
+       return 0;
+}
+
+static int bam_dma_resume(struct device *dev)
+{
+       struct bam_device *bdev = dev_get_drvdata(dev);
+       int ret;
+
+       ret = clk_prepare(bdev->bamclk);
+       if (ret)
+               return ret;
+
+       pm_runtime_force_resume(dev);
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops bam_dma_pm_ops = {
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume)
+       SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume,
+                               NULL)
+};
+
 static struct platform_driver bam_dma_driver = {
        .probe = bam_dma_probe,
        .remove = bam_dma_remove,
        .driver = {
                .name = "bam-dma-engine",
+               .pm = &bam_dma_pm_ops,
                .of_match_table = bam_of_match,
        },
 };