drm/i915/guc: Add stall timer to non blocking CTB send function
authorMatthew Brost <matthew.brost@intel.com>
Thu, 8 Jul 2021 16:20:53 +0000 (09:20 -0700)
committerJohn Harrison <John.C.Harrison@Intel.com>
Tue, 13 Jul 2021 20:50:03 +0000 (13:50 -0700)
Implement a stall timer which fails H2G CTBs once a period of time
with no forward progress is reached to prevent deadlock.

v2:
 (Michal)
  - Improve error message in ct_deadlock()
  - Set broken when ct_deadlock() returns true
  - Return -EPIPE on ct_deadlock()
v3:
 (Michal)
  - Add ms to stall timer comment
 (Matthew)
  - Move broken check to intel_guc_ct_send()

Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: John Harrison <John.C.Harrison@Intel.com>
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210708162055.129996-6-matthew.brost@intel.com
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h

index 3d6cba8d91ad26af1b672a0f53405bdb475c79fa..db3e85b89573c4bbdf3678b490eac39c82fd3a70 100644 (file)
@@ -4,6 +4,9 @@
  */
 
 #include <linux/circ_buf.h>
+#include <linux/ktime.h>
+#include <linux/time64.h>
+#include <linux/timekeeping.h>
 
 #include "i915_drv.h"
 #include "intel_guc_ct.h"
@@ -316,6 +319,7 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
                goto err_deregister;
 
        ct->enabled = true;
+       ct->stall_time = KTIME_MAX;
 
        return 0;
 
@@ -389,9 +393,6 @@ static int ct_write(struct intel_guc_ct *ct,
        u32 *cmds = ctb->cmds;
        unsigned int i;
 
-       if (unlikely(ctb->broken))
-               return -EPIPE;
-
        if (unlikely(desc->status))
                goto corrupted;
 
@@ -505,6 +506,25 @@ static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
        return err;
 }
 
+#define GUC_CTB_TIMEOUT_MS     1500
+static inline bool ct_deadlocked(struct intel_guc_ct *ct)
+{
+       long timeout = GUC_CTB_TIMEOUT_MS;
+       bool ret = ktime_ms_delta(ktime_get(), ct->stall_time) > timeout;
+
+       if (unlikely(ret)) {
+               struct guc_ct_buffer_desc *send = ct->ctbs.send.desc;
+               struct guc_ct_buffer_desc *recv = ct->ctbs.send.desc;
+
+               CT_ERROR(ct, "Communication stalled for %lld ms, desc status=%#x,%#x\n",
+                        ktime_ms_delta(ktime_get(), ct->stall_time),
+                        send->status, recv->status);
+               ct->ctbs.send.broken = true;
+       }
+
+       return ret;
+}
+
 static inline bool h2g_has_room(struct intel_guc_ct_buffer *ctb, u32 len_dw)
 {
        struct guc_ct_buffer_desc *desc = ctb->desc;
@@ -516,6 +536,26 @@ static inline bool h2g_has_room(struct intel_guc_ct_buffer *ctb, u32 len_dw)
        return space >= len_dw;
 }
 
+static int has_room_nb(struct intel_guc_ct *ct, u32 len_dw)
+{
+       struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
+
+       lockdep_assert_held(&ct->ctbs.send.lock);
+
+       if (unlikely(!h2g_has_room(ctb, len_dw))) {
+               if (ct->stall_time == KTIME_MAX)
+                       ct->stall_time = ktime_get();
+
+               if (unlikely(ct_deadlocked(ct)))
+                       return -EPIPE;
+               else
+                       return -EBUSY;
+       }
+
+       ct->stall_time = KTIME_MAX;
+       return 0;
+}
+
 static int ct_send_nb(struct intel_guc_ct *ct,
                      const u32 *action,
                      u32 len,
@@ -528,11 +568,9 @@ static int ct_send_nb(struct intel_guc_ct *ct,
 
        spin_lock_irqsave(&ctb->lock, spin_flags);
 
-       ret = h2g_has_room(ctb, len + GUC_CTB_HDR_LEN);
-       if (unlikely(!ret)) {
-               ret = -EBUSY;
+       ret = has_room_nb(ct, len + GUC_CTB_HDR_LEN);
+       if (unlikely(ret))
                goto out;
-       }
 
        fence = ct_get_next_fence(ct);
        ret = ct_write(ct, action, len, fence, flags);
@@ -575,8 +613,13 @@ static int ct_send(struct intel_guc_ct *ct,
 retry:
        spin_lock_irqsave(&ctb->lock, flags);
        if (unlikely(!h2g_has_room(ctb, len + GUC_CTB_HDR_LEN))) {
+               if (ct->stall_time == KTIME_MAX)
+                       ct->stall_time = ktime_get();
                spin_unlock_irqrestore(&ctb->lock, flags);
 
+               if (unlikely(ct_deadlocked(ct)))
+                       return -EPIPE;
+
                if (msleep_interruptible(sleep_period_ms))
                        return -EINTR;
                sleep_period_ms = sleep_period_ms << 1;
@@ -584,6 +627,8 @@ retry:
                goto retry;
        }
 
+       ct->stall_time = KTIME_MAX;
+
        fence = ct_get_next_fence(ct);
        request.fence = fence;
        request.status = 0;
@@ -646,6 +691,9 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
                return -ENODEV;
        }
 
+       if (unlikely(ct->ctbs.send.broken))
+               return -EPIPE;
+
        if (flags & INTEL_GUC_CT_SEND_NB)
                return ct_send_nb(ct, action, len, flags);
 
index 5bb8bef024c8459b6b1489f72356a671845f068f..bee03794c1eb9e100078120ead2c9727c5daa34d 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
+#include <linux/ktime.h>
 
 #include "intel_guc_fwif.h"
 
@@ -68,6 +69,9 @@ struct intel_guc_ct {
                struct list_head incoming; /* incoming requests */
                struct work_struct worker; /* handler for incoming requests */
        } requests;
+
+       /** @stall_time: time of first time a CTB submission is stalled */
+       ktime_t stall_time;
 };
 
 void intel_guc_ct_init_early(struct intel_guc_ct *ct);