Revert "dmaengine: virt-dma: don't always free descriptor upon completion"
authorJun Nie <jun.nie@linaro.org>
Fri, 10 Jul 2015 12:02:49 +0000 (20:02 +0800)
committerVinod Koul <vinod.koul@intel.com>
Fri, 31 Jul 2015 15:03:43 +0000 (20:33 +0530)
This reverts commit b9855f03d560d351e95301b9de0bc3cad3b31fe9.
The patch break existing DMA usage case. For example, audio SOC
dmaengine never release channel and cause virt-dma to cache too
much memory in descriptor to exhaust system memory.

Signed-off-by: Vinod Koul <vinod.koul@intel.com>
drivers/dma/virt-dma.c
drivers/dma/virt-dma.h

index 7d2c17d8d30fc1a4f1efd9c7471bc1e5cbdc794c..6f80432a3f0a3d74bf9a4612712ad6edf4875edc 100644 (file)
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
        spin_lock_irqsave(&vc->lock, flags);
        cookie = dma_cookie_assign(tx);
 
-       list_move_tail(&vd->node, &vc->desc_submitted);
+       list_add_tail(&vd->node, &vc->desc_submitted);
        spin_unlock_irqrestore(&vc->lock, flags);
 
        dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
@@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg)
                cb_data = vd->tx.callback_param;
 
                list_del(&vd->node);
-               if (async_tx_test_ack(&vd->tx))
-                       list_add(&vd->node, &vc->desc_allocated);
-               else
-                       vc->desc_free(vd);
+
+               vc->desc_free(vd);
 
                if (cb)
                        cb(cb_data);
@@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
        while (!list_empty(head)) {
                struct virt_dma_desc *vd = list_first_entry(head,
                        struct virt_dma_desc, node);
-               if (async_tx_test_ack(&vd->tx)) {
-                       list_move_tail(&vd->node, &vc->desc_allocated);
-               } else {
-                       dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
-                       list_del(&vd->node);
-                       vc->desc_free(vd);
-               }
+               list_del(&vd->node);
+               dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
+               vc->desc_free(vd);
        }
 }
 EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
        dma_cookie_init(&vc->chan);
 
        spin_lock_init(&vc->lock);
-       INIT_LIST_HEAD(&vc->desc_allocated);
        INIT_LIST_HEAD(&vc->desc_submitted);
        INIT_LIST_HEAD(&vc->desc_issued);
        INIT_LIST_HEAD(&vc->desc_completed);
index 189e75dbcb15f95876a81848ecde8196a4c82905..181b95267866b605f521860f973aa3860d694fa0 100644 (file)
@@ -29,7 +29,6 @@ struct virt_dma_chan {
        spinlock_t lock;
 
        /* protected by vc.lock */
-       struct list_head desc_allocated;
        struct list_head desc_submitted;
        struct list_head desc_issued;
        struct list_head desc_completed;
@@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
        struct virt_dma_desc *vd, unsigned long tx_flags)
 {
        extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
-       unsigned long flags;
 
        dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
        vd->tx.flags = tx_flags;
        vd->tx.tx_submit = vchan_tx_submit;
 
-       spin_lock_irqsave(&vc->lock, flags);
-       list_add_tail(&vd->node, &vc->desc_allocated);
-       spin_unlock_irqrestore(&vc->lock, flags);
-
        return &vd->tx;
 }
 
@@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
 }
 
 /**
- * vchan_get_all_descriptors - obtain all allocated, submitted and issued
- *                             descriptors
+ * vchan_get_all_descriptors - obtain all submitted and issued descriptors
  * vc: virtual channel to get descriptors from
  * head: list of descriptors found
  *
@@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
 static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
        struct list_head *head)
 {
-       list_splice_tail_init(&vc->desc_allocated, head);
        list_splice_tail_init(&vc->desc_submitted, head);
        list_splice_tail_init(&vc->desc_issued, head);
        list_splice_tail_init(&vc->desc_completed, head);
@@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
 
 static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
 {
-       struct virt_dma_desc *vd;
        unsigned long flags;
        LIST_HEAD(head);
 
        spin_lock_irqsave(&vc->lock, flags);
        vchan_get_all_descriptors(vc, &head);
-       list_for_each_entry(vd, &head, node)
-               async_tx_clear_ack(&vd->tx);
        spin_unlock_irqrestore(&vc->lock, flags);
 
        vchan_dma_desc_free_list(vc, &head);