drm/i915: Make i915_engine_info pretty printer to standalone
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 9 Oct 2017 11:02:57 +0000 (12:02 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 9 Oct 2017 16:07:28 +0000 (17:07 +0100)
We can use drm_printer to hide the differences between printk and
seq_printf, and so make the i915_engine_info pretty printer able to be
called from different contexts and not just debugfs. For instance, I
want to use the pretty printer to debug kselftests.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171009110301.21705-1-chris@chris-wilson.co.uk
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index f7817c66795862eaef84f837f0f7e187d805ae7d..9ec2bcd9a695a0bbd3a16e6afeef9e40b4999318 100644 (file)
@@ -3292,9 +3292,9 @@ static int i915_display_info(struct seq_file *m, void *unused)
 static int i915_engine_info(struct seq_file *m, void *unused)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       struct i915_gpu_error *error = &dev_priv->gpu_error;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
+       struct drm_printer p;
 
        intel_runtime_pm_get(dev_priv);
 
@@ -3303,149 +3303,9 @@ static int i915_engine_info(struct seq_file *m, void *unused)
        seq_printf(m, "Global active requests: %d\n",
                   dev_priv->gt.active_requests);
 
-       for_each_engine(engine, dev_priv, id) {
-               struct intel_breadcrumbs *b = &engine->breadcrumbs;
-               struct drm_i915_gem_request *rq;
-               struct rb_node *rb;
-               u64 addr;
-
-               seq_printf(m, "%s\n", engine->name);
-               seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
-                          intel_engine_get_seqno(engine),
-                          intel_engine_last_submit(engine),
-                          engine->hangcheck.seqno,
-                          jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
-                          engine->timeline->inflight_seqnos);
-               seq_printf(m, "\tReset count: %d\n",
-                          i915_reset_engine_count(error, engine));
-
-               rcu_read_lock();
-
-               seq_printf(m, "\tRequests:\n");
-
-               rq = list_first_entry(&engine->timeline->requests,
-                                     struct drm_i915_gem_request, link);
-               if (&rq->link != &engine->timeline->requests)
-                       print_request(m, rq, "\t\tfirst  ");
-
-               rq = list_last_entry(&engine->timeline->requests,
-                                    struct drm_i915_gem_request, link);
-               if (&rq->link != &engine->timeline->requests)
-                       print_request(m, rq, "\t\tlast   ");
-
-               rq = i915_gem_find_active_request(engine);
-               if (rq) {
-                       print_request(m, rq, "\t\tactive ");
-                       seq_printf(m,
-                                  "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
-                                  rq->head, rq->postfix, rq->tail,
-                                  rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
-                                  rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
-               }
-
-               seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
-                          I915_READ(RING_START(engine->mmio_base)),
-                          rq ? i915_ggtt_offset(rq->ring->vma) : 0);
-               seq_printf(m, "\tRING_HEAD:  0x%08x [0x%08x]\n",
-                          I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
-                          rq ? rq->ring->head : 0);
-               seq_printf(m, "\tRING_TAIL:  0x%08x [0x%08x]\n",
-                          I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
-                          rq ? rq->ring->tail : 0);
-               seq_printf(m, "\tRING_CTL:   0x%08x [%s]\n",
-                          I915_READ(RING_CTL(engine->mmio_base)),
-                          I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
-
-               rcu_read_unlock();
-
-               addr = intel_engine_get_active_head(engine);
-               seq_printf(m, "\tACTHD:  0x%08x_%08x\n",
-                          upper_32_bits(addr), lower_32_bits(addr));
-               addr = intel_engine_get_last_batch_head(engine);
-               seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
-                          upper_32_bits(addr), lower_32_bits(addr));
-
-               if (i915_modparams.enable_execlists) {
-                       const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
-                       struct intel_engine_execlists * const execlists = &engine->execlists;
-                       u32 ptr, read, write;
-                       unsigned int idx;
-
-                       seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
-                                  I915_READ(RING_EXECLIST_STATUS_LO(engine)),
-                                  I915_READ(RING_EXECLIST_STATUS_HI(engine)));
-
-                       ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
-                       read = GEN8_CSB_READ_PTR(ptr);
-                       write = GEN8_CSB_WRITE_PTR(ptr);
-                       seq_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
-                                  read, execlists->csb_head,
-                                  write,
-                                  intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
-                                  yesno(test_bit(ENGINE_IRQ_EXECLIST,
-                                                 &engine->irq_posted)));
-                       if (read >= GEN8_CSB_ENTRIES)
-                               read = 0;
-                       if (write >= GEN8_CSB_ENTRIES)
-                               write = 0;
-                       if (read > write)
-                               write += GEN8_CSB_ENTRIES;
-                       while (read < write) {
-                               idx = ++read % GEN8_CSB_ENTRIES;
-                               seq_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
-                                          idx,
-                                          I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
-                                          hws[idx * 2],
-                                          I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
-                                          hws[idx * 2 + 1]);
-                       }
-
-                       rcu_read_lock();
-                       for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
-                               unsigned int count;
-
-                               rq = port_unpack(&execlists->port[idx], &count);
-                               if (rq) {
-                                       seq_printf(m, "\t\tELSP[%d] count=%d, ",
-                                                  idx, count);
-                                       print_request(m, rq, "rq: ");
-                               } else {
-                                       seq_printf(m, "\t\tELSP[%d] idle\n",
-                                                  idx);
-                               }
-                       }
-                       rcu_read_unlock();
-
-                       spin_lock_irq(&engine->timeline->lock);
-                       for (rb = execlists->first; rb; rb = rb_next(rb)) {
-                               struct i915_priolist *p =
-                                       rb_entry(rb, typeof(*p), node);
-
-                               list_for_each_entry(rq, &p->requests,
-                                                   priotree.link)
-                                       print_request(m, rq, "\t\tQ ");
-                       }
-                       spin_unlock_irq(&engine->timeline->lock);
-               } else if (INTEL_GEN(dev_priv) > 6) {
-                       seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
-                                  I915_READ(RING_PP_DIR_BASE(engine)));
-                       seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
-                                  I915_READ(RING_PP_DIR_BASE_READ(engine)));
-                       seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
-                                  I915_READ(RING_PP_DIR_DCLV(engine)));
-               }
-
-               spin_lock_irq(&b->rb_lock);
-               for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
-                       struct intel_wait *w = rb_entry(rb, typeof(*w), node);
-
-                       seq_printf(m, "\t%s [%d] waiting for %x\n",
-                                  w->tsk->comm, w->tsk->pid, w->seqno);
-               }
-               spin_unlock_irq(&b->rb_lock);
-
-               seq_puts(m, "\n");
-       }
+       p = drm_seq_file_printer(m);
+       for_each_engine(engine, dev_priv, id)
+               intel_engine_dump(engine, &p);
 
        intel_runtime_pm_put(dev_priv);
 
index 807a7aafc08911cf768cf7f0ad6eb8d755147166..a59b2a30ff5ab9b99998d12d27792c4aa15defad 100644 (file)
@@ -22,6 +22,8 @@
  *
  */
 
+#include <drm/drm_print.h>
+
 #include "i915_drv.h"
 #include "intel_ringbuffer.h"
 #include "intel_lrc.h"
@@ -1616,6 +1618,164 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
        }
 }
 
+static void print_request(struct drm_printer *m,
+                         struct drm_i915_gem_request *rq,
+                         const char *prefix)
+{
+       drm_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
+                  rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
+                  rq->priotree.priority,
+                  jiffies_to_msecs(jiffies - rq->emitted_jiffies),
+                  rq->timeline->common->name);
+}
+
+void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
+{
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       struct i915_gpu_error *error = &engine->i915->gpu_error;
+       struct drm_i915_private *dev_priv = engine->i915;
+       struct drm_i915_gem_request *rq;
+       struct rb_node *rb;
+       u64 addr;
+
+       drm_printf(m, "%s\n", engine->name);
+       drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
+                  intel_engine_get_seqno(engine),
+                  intel_engine_last_submit(engine),
+                  engine->hangcheck.seqno,
+                  jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
+                  engine->timeline->inflight_seqnos);
+       drm_printf(m, "\tReset count: %d\n",
+                  i915_reset_engine_count(error, engine));
+
+       rcu_read_lock();
+
+       drm_printf(m, "\tRequests:\n");
+
+       rq = list_first_entry(&engine->timeline->requests,
+                             struct drm_i915_gem_request, link);
+       if (&rq->link != &engine->timeline->requests)
+               print_request(m, rq, "\t\tfirst  ");
+
+       rq = list_last_entry(&engine->timeline->requests,
+                            struct drm_i915_gem_request, link);
+       if (&rq->link != &engine->timeline->requests)
+               print_request(m, rq, "\t\tlast   ");
+
+       rq = i915_gem_find_active_request(engine);
+       if (rq) {
+               print_request(m, rq, "\t\tactive ");
+               drm_printf(m,
+                          "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
+                          rq->head, rq->postfix, rq->tail,
+                          rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+                          rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+       }
+
+       drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
+                  I915_READ(RING_START(engine->mmio_base)),
+                  rq ? i915_ggtt_offset(rq->ring->vma) : 0);
+       drm_printf(m, "\tRING_HEAD:  0x%08x [0x%08x]\n",
+                  I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
+                  rq ? rq->ring->head : 0);
+       drm_printf(m, "\tRING_TAIL:  0x%08x [0x%08x]\n",
+                  I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
+                  rq ? rq->ring->tail : 0);
+       drm_printf(m, "\tRING_CTL:   0x%08x [%s]\n",
+                  I915_READ(RING_CTL(engine->mmio_base)),
+                  I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
+
+       rcu_read_unlock();
+
+       addr = intel_engine_get_active_head(engine);
+       drm_printf(m, "\tACTHD:  0x%08x_%08x\n",
+                  upper_32_bits(addr), lower_32_bits(addr));
+       addr = intel_engine_get_last_batch_head(engine);
+       drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
+                  upper_32_bits(addr), lower_32_bits(addr));
+
+       if (i915_modparams.enable_execlists) {
+               const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+               struct intel_engine_execlists * const execlists = &engine->execlists;
+               u32 ptr, read, write;
+               unsigned int idx;
+
+               drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
+                          I915_READ(RING_EXECLIST_STATUS_LO(engine)),
+                          I915_READ(RING_EXECLIST_STATUS_HI(engine)));
+
+               ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
+               read = GEN8_CSB_READ_PTR(ptr);
+               write = GEN8_CSB_WRITE_PTR(ptr);
+               drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
+                          read, execlists->csb_head,
+                          write,
+                          intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
+                          yesno(test_bit(ENGINE_IRQ_EXECLIST,
+                                         &engine->irq_posted)));
+               if (read >= GEN8_CSB_ENTRIES)
+                       read = 0;
+               if (write >= GEN8_CSB_ENTRIES)
+                       write = 0;
+               if (read > write)
+                       write += GEN8_CSB_ENTRIES;
+               while (read < write) {
+                       idx = ++read % GEN8_CSB_ENTRIES;
+                       drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
+                                  idx,
+                                  I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
+                                  hws[idx * 2],
+                                  I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
+                                  hws[idx * 2 + 1]);
+               }
+
+               rcu_read_lock();
+               for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
+                       unsigned int count;
+
+                       rq = port_unpack(&execlists->port[idx], &count);
+                       if (rq) {
+                               drm_printf(m, "\t\tELSP[%d] count=%d, ",
+                                          idx, count);
+                               print_request(m, rq, "rq: ");
+                       } else {
+                               drm_printf(m, "\t\tELSP[%d] idle\n",
+                                          idx);
+                       }
+               }
+               rcu_read_unlock();
+
+               spin_lock_irq(&engine->timeline->lock);
+               for (rb = execlists->first; rb; rb = rb_next(rb)) {
+                       struct i915_priolist *p =
+                               rb_entry(rb, typeof(*p), node);
+
+                       list_for_each_entry(rq, &p->requests,
+                                           priotree.link)
+                               print_request(m, rq, "\t\tQ ");
+               }
+               spin_unlock_irq(&engine->timeline->lock);
+       } else if (INTEL_GEN(dev_priv) > 6) {
+               drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
+                          I915_READ(RING_PP_DIR_BASE(engine)));
+               drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
+                          I915_READ(RING_PP_DIR_BASE_READ(engine)));
+               drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
+                          I915_READ(RING_PP_DIR_DCLV(engine)));
+       }
+
+       spin_lock_irq(&b->rb_lock);
+       for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+               struct intel_wait *w = rb_entry(rb, typeof(*w), node);
+
+               drm_printf(m, "\t%s [%d] waiting for %x\n",
+                          w->tsk->comm, w->tsk->pid, w->seqno);
+       }
+       spin_unlock_irq(&b->rb_lock);
+
+       drm_printf(m, "\n");
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_engine.c"
 #endif
index 0fedda17488c2074db40fa81fe8566545bdcc445..17186f067408587543e8f7955e00f4d86c51cb72 100644 (file)
@@ -7,6 +7,8 @@
 #include "i915_gem_timeline.h"
 #include "i915_selftest.h"
 
+struct drm_printer;
+
 #define I915_CMD_HASH_ORDER 9
 
 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
@@ -839,4 +841,6 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915);
 
 bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
 
+void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);
+
 #endif /* _INTEL_RINGBUFFER_H_ */