bus: mhi: core: Fix MHI runtime_pm behavior
authorLoic Poulain <loic.poulain@linaro.org>
Tue, 6 Apr 2021 09:11:54 +0000 (11:11 +0200)
committerManivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Wed, 7 Apr 2021 06:48:28 +0000 (12:18 +0530)
This change ensures that PM reference is always get during packet
queueing and released either after queuing completion (RX) or once
the buffer has been consumed (TX). This guarantees proper update for
underlying MHI controller runtime status (e.g. last_busy timestamp)
and prevents suspend to be triggered while TX packets are flying,
or before we completed update of the RX ring.

Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Link: https://lore.kernel.org/r/1617700315-12492-1-git-send-email-loic.poulain@linaro.org
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
drivers/bus/mhi/core/main.c

index 58b8111ce66cb84dc38eb7560d30b0bb4e56c98e..7eed2741fcf9c17b1e06536fa325168954a67afa 100644 (file)
@@ -617,8 +617,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
                        /* notify client */
                        mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
 
-                       if (mhi_chan->dir == DMA_TO_DEVICE)
+                       if (mhi_chan->dir == DMA_TO_DEVICE) {
                                atomic_dec(&mhi_cntrl->pending_pkts);
+                               /* Release the reference got from mhi_queue() */
+                               mhi_cntrl->runtime_put(mhi_cntrl);
+                       }
 
                        /*
                         * Recycle the buffer if buffer is pre-allocated,
@@ -1054,9 +1057,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
        if (unlikely(ret))
                goto exit_unlock;
 
-       /* trigger M3 exit if necessary */
-       if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
-               mhi_trigger_resume(mhi_cntrl);
+       /* Packet is queued, take a usage ref to exit M3 if necessary
+        * for host->device buffer, balanced put is done on buffer completion
+        * for device->host buffer, balanced put is after ringing the DB
+        */
+       mhi_cntrl->runtime_get(mhi_cntrl);
 
        /* Assert dev_wake (to exit/prevent M1/M2)*/
        mhi_cntrl->wake_toggle(mhi_cntrl);
@@ -1067,6 +1072,9 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
        if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
                mhi_ring_chan_db(mhi_cntrl, mhi_chan);
 
+       if (dir == DMA_FROM_DEVICE)
+               mhi_cntrl->runtime_put(mhi_cntrl);
+
 exit_unlock:
        read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
 
@@ -1449,8 +1457,11 @@ static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
        while (tre_ring->rp != tre_ring->wp) {
                struct mhi_buf_info *buf_info = buf_ring->rp;
 
-               if (mhi_chan->dir == DMA_TO_DEVICE)
+               if (mhi_chan->dir == DMA_TO_DEVICE) {
                        atomic_dec(&mhi_cntrl->pending_pkts);
+                       /* Release the reference got from mhi_queue() */
+                       mhi_cntrl->runtime_put(mhi_cntrl);
+               }
 
                if (!buf_info->pre_mapped)
                        mhi_cntrl->unmap_single(mhi_cntrl, buf_info);