drm/i915: Introduce new macros for tracing
authorVenkata Sandeep Dhanalakota <venkata.s.dhanalakota@intel.com>
Fri, 13 Dec 2019 15:51:52 +0000 (07:51 -0800)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 13 Dec 2019 20:16:23 +0000 (20:16 +0000)
New macros ENGINE_TRACE(), CE_TRACE(), RQ_TRACE() and
GT_TRACE() are introduce to tag device name and engine
name with contexts and requests tracing in i915.

Cc: Sudeep Dutt <sudeep.dutt@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Jani Nikula <jani.nikula@intel.com>
Signed-off-by: Venkata Sandeep Dhanalakota <venkata.s.dhanalakota@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20191213155152.69182-2-venkata.s.dhanalakota@intel.com
14 files changed:
drivers/gpu/drm/i915/gem/i915_gem_pm.c
drivers/gpu/drm/i915/gt/intel_context.c
drivers/gpu/drm/i915/gt/intel_context.h
drivers/gpu/drm/i915/gt/intel_engine.h
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_engine_pm.c
drivers/gpu/drm/i915/gt/intel_gt.h
drivers/gpu/drm/i915/gt/intel_gt_pm.c
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/gt/intel_ring_submission.c
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_request.h

index f88ee1317bb45f98916038c85a68680ebf707f51..3671a4e7e1cbb951adb993b3168797a3b7767c8b 100644 (file)
@@ -13,7 +13,7 @@
 
 void i915_gem_suspend(struct drm_i915_private *i915)
 {
-       GEM_TRACE("\n");
+       GEM_TRACE("%s\n", dev_name(i915->drm.dev));
 
        intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
        flush_workqueue(i915->wq);
@@ -99,7 +99,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
 
 void i915_gem_resume(struct drm_i915_private *i915)
 {
-       GEM_TRACE("\n");
+       GEM_TRACE("%s\n", dev_name(i915->drm.dev));
 
        intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
 
index 61c39e943f694395e911e035ba0b9f82b6d2a97b..b1e346d2d35f509ff0716386467ed321b8a2a596 100644 (file)
@@ -68,9 +68,8 @@ int __intel_context_do_pin(struct intel_context *ce)
                if (err)
                        goto err;
 
-               GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
-                         ce->engine->name, ce->timeline->fence_context,
-                         ce->ring->head, ce->ring->tail);
+               CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
+                        ce->ring->head, ce->ring->tail);
 
                i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
 
@@ -98,8 +97,7 @@ void intel_context_unpin(struct intel_context *ce)
        mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
 
        if (likely(atomic_dec_and_test(&ce->pin_count))) {
-               GEM_TRACE("%s context:%llx retire\n",
-                         ce->engine->name, ce->timeline->fence_context);
+               CE_TRACE(ce, "retire\n");
 
                ce->ops->unpin(ce);
 
@@ -141,8 +139,7 @@ static void __intel_context_retire(struct i915_active *active)
 {
        struct intel_context *ce = container_of(active, typeof(*ce), active);
 
-       GEM_TRACE("%s context:%llx retire\n",
-                 ce->engine->name, ce->timeline->fence_context);
+       CE_TRACE(ce, "retire\n");
 
        set_bit(CONTEXT_VALID_BIT, &ce->flags);
        if (ce->state)
index 68b3d317d959eefb341961bb8c64df174ff314be..b39eb1fcfbca3b84f58a742acc8390679f44fe42 100644 (file)
 #include "intel_ring_types.h"
 #include "intel_timeline_types.h"
 
+#define CE_TRACE(ce, fmt, ...) do {                                    \
+       const struct intel_context *ce__ = (ce);                        \
+       ENGINE_TRACE(ce__->engine, "context:%llx" fmt,                  \
+                    ce__->timeline->fence_context,                     \
+                    ##__VA_ARGS__);                                    \
+} while (0)
+
 void intel_context_init(struct intel_context *ce,
                        struct i915_gem_context *ctx,
                        struct intel_engine_cs *engine);
index c294ea80605e68637cf75ebb71754dbc919a73dd..0926ecea9147dc4c2352378c3e210f8235363754 100644 (file)
@@ -29,6 +29,13 @@ struct intel_gt;
 #define CACHELINE_BYTES 64
 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
 
+#define ENGINE_TRACE(e, fmt, ...) do {                                 \
+       const struct intel_engine_cs *e__ __maybe_unused = (e);         \
+       GEM_TRACE("%s %s: " fmt,                                        \
+                 dev_name(e__->i915->drm.dev), e__->name,              \
+                 ##__VA_ARGS__);                                       \
+} while (0)
+
 /*
  * The register defines to be used with the following macros need to accept a
  * base param, e.g:
index 49473c25916cc9cd4f27a3c5adbe2d54e46724ec..3d1d48bf90cf5901a698ccf1f8aa1f2652ab615c 100644 (file)
@@ -912,7 +912,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
        if (INTEL_GEN(engine->i915) < 3)
                return -ENODEV;
 
-       GEM_TRACE("%s\n", engine->name);
+       ENGINE_TRACE(engine, "\n");
 
        intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
 
@@ -921,7 +921,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
                                         mode, MODE_IDLE, MODE_IDLE,
                                         1000, stop_timeout(engine),
                                         NULL)) {
-               GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
+               ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE\n");
                err = -ETIMEDOUT;
        }
 
@@ -933,7 +933,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
 
 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
 {
-       GEM_TRACE("%s\n", engine->name);
+       ENGINE_TRACE(engine, "\n");
 
        ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
 }
index 889eb37e386a25dce8492ae487cb849362004643..bcbda8e52d41f2727ac2c5c52a327b57c77b3158 100644 (file)
@@ -21,7 +21,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
                container_of(wf, typeof(*engine), wakeref);
        void *map;
 
-       GEM_TRACE("%s\n", engine->name);
+       ENGINE_TRACE(engine, "\n");
 
        intel_gt_pm_get(engine->gt);
 
@@ -80,7 +80,7 @@ __queue_and_release_pm(struct i915_request *rq,
 {
        struct intel_gt_timelines *timelines = &engine->gt->timelines;
 
-       GEM_TRACE("%s\n", engine->name);
+       ENGINE_TRACE(engine, "\n");
 
        /*
         * We have to serialise all potential retirement paths with our
@@ -204,7 +204,7 @@ static int __engine_park(struct intel_wakeref *wf)
        if (!switch_to_kernel_context(engine))
                return -EBUSY;
 
-       GEM_TRACE("%s\n", engine->name);
+       ENGINE_TRACE(engine, "\n");
 
        call_idle_barriers(engine); /* cleanup after wedging */
 
index 5436f8c30708b3a4deefed345c03adeb2c39ec0e..9d9e8831daeb8c8519d39c6056e7889a03e300a1 100644 (file)
 
 struct drm_i915_private;
 
+#define GT_TRACE(gt__, fmt, ...) do {                                  \
+       typecheck(struct intel_gt, *(gt__));                            \
+       GEM_TRACE("%s  " fmt, dev_name(gt->i915->drm.dev),              \
+                 ##__VA_ARGS__);                                       \
+} while (0)
+
 static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
 {
        return container_of(uc, struct intel_gt, uc);
index ecde67a75e3276f371548db6a39670f649f8c5c5..bb57e3443a50471c33bf26f2dd1fb784f5f21d19 100644 (file)
@@ -43,7 +43,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
        struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
        struct drm_i915_private *i915 = gt->i915;
 
-       GEM_TRACE("\n");
+       GT_TRACE(gt, "\n");
 
        i915_globals_unpark();
 
@@ -76,7 +76,7 @@ static int __gt_park(struct intel_wakeref *wf)
        intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
        struct drm_i915_private *i915 = gt->i915;
 
-       GEM_TRACE("\n");
+       GT_TRACE(gt, "\n");
 
        intel_gt_park_requests(gt);
 
@@ -141,7 +141,7 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
        enum intel_engine_id id;
        intel_wakeref_t wakeref;
 
-       GEM_TRACE("force:%s\n", yesno(force));
+       GT_TRACE(gt, "force:%s", yesno(force));
 
        /* Use a raw wakeref to avoid calling intel_display_power_get early */
        wakeref = intel_runtime_pm_get(gt->uncore->rpm);
@@ -188,7 +188,7 @@ int intel_gt_resume(struct intel_gt *gt)
        enum intel_engine_id id;
        int err = 0;
 
-       GEM_TRACE("\n");
+       GT_TRACE(gt, "\n");
 
        /*
         * After resume, we may need to poke into the pinned kernel
@@ -301,20 +301,19 @@ void intel_gt_suspend_late(struct intel_gt *gt)
 
        intel_gt_sanitize(gt, false);
 
-       GEM_TRACE("\n");
+       GT_TRACE(gt, "\n");
 }
 
 void intel_gt_runtime_suspend(struct intel_gt *gt)
 {
        intel_uc_runtime_suspend(&gt->uc);
 
-       GEM_TRACE("\n");
+       GT_TRACE(gt, "\n");
 }
 
 int intel_gt_runtime_resume(struct intel_gt *gt)
 {
-       GEM_TRACE("\n");
-
+       GT_TRACE(gt, "\n");
        intel_gt_init_swizzling(gt);
 
        return intel_uc_runtime_resume(&gt->uc);
index 929f6bae4eba30ed8f1857f7b04cec76550a417b..4ebfecd9503287c8b6e3e86d4010a8b323ac08c2 100644 (file)
@@ -1069,8 +1069,8 @@ static void reset_active(struct i915_request *rq,
         * remain correctly ordered. And we defer to __i915_request_submit()
         * so that all asynchronous waits are correctly handled.
         */
-       GEM_TRACE("%s(%s): { rq=%llx:%lld }\n",
-                 __func__, engine->name, rq->fence.context, rq->fence.seqno);
+       ENGINE_TRACE(engine, "{ rq=%llx:%lld }\n",
+                    rq->fence.context, rq->fence.seqno);
 
        /* On resubmission of the active request, payload will be scrubbed */
        if (i915_request_completed(rq))
@@ -1274,15 +1274,14 @@ trace_ports(const struct intel_engine_execlists *execlists,
        if (!ports[0])
                return;
 
-       GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n",
-                 engine->name, msg,
-                 ports[0]->fence.context,
-                 ports[0]->fence.seqno,
-                 i915_request_completed(ports[0]) ? "!" :
-                 i915_request_started(ports[0]) ? "*" :
-                 "",
-                 ports[1] ? ports[1]->fence.context : 0,
-                 ports[1] ? ports[1]->fence.seqno : 0);
+       ENGINE_TRACE(engine, "%s { %llx:%lld%s, %llx:%lld }\n", msg,
+                    ports[0]->fence.context,
+                    ports[0]->fence.seqno,
+                    i915_request_completed(ports[0]) ? "!" :
+                    i915_request_started(ports[0]) ? "*" :
+                    "",
+                    ports[1] ? ports[1]->fence.context : 0,
+                    ports[1] ? ports[1]->fence.seqno : 0);
 }
 
 static __maybe_unused bool
@@ -1700,12 +1699,12 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
        last = last_active(execlists);
        if (last) {
                if (need_preempt(engine, last, rb)) {
-                       GEM_TRACE("%s: preempting last=%llx:%lld, prio=%d, hint=%d\n",
-                                 engine->name,
-                                 last->fence.context,
-                                 last->fence.seqno,
-                                 last->sched.attr.priority,
-                                 execlists->queue_priority_hint);
+                       ENGINE_TRACE(engine,
+                                    "preempting last=%llx:%lld, prio=%d, hint=%d\n",
+                                    last->fence.context,
+                                    last->fence.seqno,
+                                    last->sched.attr.priority,
+                                    execlists->queue_priority_hint);
                        record_preemption(execlists);
 
                        /*
@@ -1735,12 +1734,12 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                        last = NULL;
                } else if (need_timeslice(engine, last) &&
                           timer_expired(&engine->execlists.timer)) {
-                       GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n",
-                                 engine->name,
-                                 last->fence.context,
-                                 last->fence.seqno,
-                                 last->sched.attr.priority,
-                                 execlists->queue_priority_hint);
+                       ENGINE_TRACE(engine,
+                                    "expired last=%llx:%lld, prio=%d, hint=%d\n",
+                                    last->fence.context,
+                                    last->fence.seqno,
+                                    last->sched.attr.priority,
+                                    execlists->queue_priority_hint);
 
                        ring_set_paused(engine, 1);
                        defer_active(engine);
@@ -1817,14 +1816,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                return; /* leave this for another */
                        }
 
-                       GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n",
-                                 engine->name,
-                                 rq->fence.context,
-                                 rq->fence.seqno,
-                                 i915_request_completed(rq) ? "!" :
-                                 i915_request_started(rq) ? "*" :
-                                 "",
-                                 yesno(engine != ve->siblings[0]));
+                       ENGINE_TRACE(engine,
+                                    "virtual rq=%llx:%lld%s, new engine? %s\n",
+                                    rq->fence.context,
+                                    rq->fence.seqno,
+                                    i915_request_completed(rq) ? "!" :
+                                    i915_request_started(rq) ? "*" :
+                                    "",
+                                    yesno(engine != ve->siblings[0]));
 
                        ve->request = NULL;
                        ve->base.execlists.queue_priority_hint = INT_MIN;
@@ -1980,9 +1979,6 @@ done:
         * interrupt for secondary ports).
         */
        execlists->queue_priority_hint = queue_prio(execlists);
-       GEM_TRACE("%s: queue_priority_hint:%d, submit:%s\n",
-                 engine->name, execlists->queue_priority_hint,
-                 yesno(submit));
 
        if (submit) {
                *port = execlists_schedule_in(last, port - execlists->pending);
@@ -2131,7 +2127,7 @@ static void process_csb(struct intel_engine_cs *engine)
         */
        head = execlists->csb_head;
        tail = READ_ONCE(*execlists->csb_write);
-       GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail);
+       ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
        if (unlikely(head == tail))
                return;
 
@@ -2169,9 +2165,8 @@ static void process_csb(struct intel_engine_cs *engine)
                 * status notifier.
                 */
 
-               GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x\n",
-                         engine->name, head,
-                         buf[2 * head + 0], buf[2 * head + 1]);
+               ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
+                            head, buf[2 * head + 0], buf[2 * head + 1]);
 
                if (INTEL_GEN(engine->i915) >= 12)
                        promote = gen12_csb_parse(execlists, buf + 2 * head);
@@ -2262,10 +2257,9 @@ static noinline void preempt_reset(struct intel_engine_cs *engine)
        /* Mark this tasklet as disabled to avoid waiting for it to complete */
        tasklet_disable_nosync(&engine->execlists.tasklet);
 
-       GEM_TRACE("%s: preempt timeout %lu+%ums\n",
-                 engine->name,
-                 READ_ONCE(engine->props.preempt_timeout_ms),
-                 jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
+       ENGINE_TRACE(engine, "preempt timeout %lu+%ums\n",
+                    READ_ONCE(engine->props.preempt_timeout_ms),
+                    jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
        intel_engine_reset(engine, "preemption time out");
 
        tasklet_enable(&engine->execlists.tasklet);
@@ -2971,8 +2965,8 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
        struct intel_engine_execlists * const execlists = &engine->execlists;
        unsigned long flags;
 
-       GEM_TRACE("%s: depth<-%d\n", engine->name,
-                 atomic_read(&execlists->tasklet.count));
+       ENGINE_TRACE(engine, "depth<-%d\n",
+                    atomic_read(&execlists->tasklet.count));
 
        /*
         * Prevent request submission to the hardware until we have
@@ -3134,8 +3128,8 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
        restore_default_state(ce, engine);
 
 out_replay:
-       GEM_TRACE("%s replay {head:%04x, tail:%04x}\n",
-                 engine->name, ce->ring->head, ce->ring->tail);
+       ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
+                    ce->ring->head, ce->ring->tail);
        intel_ring_update_space(ce->ring);
        __execlists_reset_reg_state(ce, engine);
        __execlists_update_reg_state(ce, engine);
@@ -3151,7 +3145,7 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
 {
        unsigned long flags;
 
-       GEM_TRACE("%s\n", engine->name);
+       ENGINE_TRACE(engine, "\n");
 
        spin_lock_irqsave(&engine->active.lock, flags);
 
@@ -3172,7 +3166,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
        struct rb_node *rb;
        unsigned long flags;
 
-       GEM_TRACE("%s\n", engine->name);
+       ENGINE_TRACE(engine, "\n");
 
        /*
         * Before we call engine->cancel_requests(), we should have exclusive
@@ -3259,8 +3253,8 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
        if (__tasklet_enable(&execlists->tasklet))
                /* And kick in case we missed a new request submission. */
                tasklet_hi_schedule(&execlists->tasklet);
-       GEM_TRACE("%s: depth->%d\n", engine->name,
-                 atomic_read(&execlists->tasklet.count));
+       ENGINE_TRACE(engine, "depth->%d\n",
+                    atomic_read(&execlists->tasklet.count));
 }
 
 static int gen8_emit_bb_start(struct i915_request *rq,
@@ -4309,10 +4303,9 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
                mask = ve->siblings[0]->mask;
        }
 
-       GEM_TRACE("%s: rq=%llx:%lld, mask=%x, prio=%d\n",
-                 ve->base.name,
-                 rq->fence.context, rq->fence.seqno,
-                 mask, ve->base.execlists.queue_priority_hint);
+       ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
+                    rq->fence.context, rq->fence.seqno,
+                    mask, ve->base.execlists.queue_priority_hint);
 
        return mask;
 }
@@ -4403,10 +4396,9 @@ static void virtual_submit_request(struct i915_request *rq)
        struct i915_request *old;
        unsigned long flags;
 
-       GEM_TRACE("%s: rq=%llx:%lld\n",
-                 ve->base.name,
-                 rq->fence.context,
-                 rq->fence.seqno);
+       ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
+                    rq->fence.context,
+                    rq->fence.seqno);
 
        GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
 
index 8408cb84e52c66eece05ad2b05a64edf8dd39f34..f5b2e7c7e6c818f5e0c5cb4f6ad408299d47685a 100644 (file)
@@ -1089,7 +1089,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
        bool uses_guc = intel_engine_in_guc_submission_mode(engine);
        int ret;
 
-       GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags);
+       ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
        GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags));
 
        if (!intel_engine_pm_get_if_awake(engine))
index 5c22ca6f998ac486bc63fcd267c07b519737ea95..30ba67c9abe97413daf90fc1d23adfcf064b09a0 100644 (file)
@@ -632,8 +632,8 @@ static int xcs_resume(struct intel_engine_cs *engine)
        struct intel_ring *ring = engine->legacy.ring;
        int ret = 0;
 
-       GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n",
-                 engine->name, ring->head, ring->tail);
+       ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
+                    ring->head, ring->tail);
 
        intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
 
@@ -746,10 +746,10 @@ static void reset_prepare(struct intel_engine_cs *engine)
         *
         * FIXME: Wa for more modern gens needs to be validated
         */
-       GEM_TRACE("%s\n", engine->name);
+       ENGINE_TRACE(engine, "\n");
 
        if (intel_engine_stop_cs(engine))
-               GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
+               ENGINE_TRACE(engine, "timed out on STOP_RING\n");
 
        intel_uncore_write_fw(uncore,
                              RING_HEAD(base),
@@ -765,9 +765,8 @@ static void reset_prepare(struct intel_engine_cs *engine)
 
        /* Check acts as a post */
        if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
-               GEM_TRACE("%s: ring head [%x] not parked\n",
-                         engine->name,
-                         intel_uncore_read_fw(uncore, RING_HEAD(base)));
+               ENGINE_TRACE(engine, "ring head [%x] not parked\n",
+                            intel_uncore_read_fw(uncore, RING_HEAD(base)));
 }
 
 static void reset_ring(struct intel_engine_cs *engine, bool stalled)
index 172220e83079f895933f4871918785129c7b6f63..af04ed6e48d942e1c9d9d6873a255c1b92b74bd0 100644 (file)
@@ -375,7 +375,7 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
 
-       GEM_TRACE("%s\n", engine->name);
+       ENGINE_TRACE(engine, "\n");
 
        /*
         * Prevent request submission to the hardware until we have
@@ -434,7 +434,7 @@ static void guc_cancel_requests(struct intel_engine_cs *engine)
        struct rb_node *rb;
        unsigned long flags;
 
-       GEM_TRACE("%s\n", engine->name);
+       ENGINE_TRACE(engine, "\n");
 
        /*
         * Before we call engine->cancel_requests(), we should have exclusive
@@ -495,8 +495,8 @@ static void guc_reset_finish(struct intel_engine_cs *engine)
                /* And kick in case we missed a new request submission. */
                tasklet_hi_schedule(&execlists->tasklet);
 
-       GEM_TRACE("%s: depth->%d\n", engine->name,
-                 atomic_read(&execlists->tasklet.count));
+       ENGINE_TRACE(engine, "depth->%d\n",
+                    atomic_read(&execlists->tasklet.count));
 }
 
 /*
index 51bb8a0812a17f1f0183d130e0afc5d2d8d69743..c6d59d263550612aa04629639912a3e35a767c03 100644 (file)
@@ -223,10 +223,7 @@ bool i915_request_retire(struct i915_request *rq)
        if (!i915_request_completed(rq))
                return false;
 
-       GEM_TRACE("%s fence %llx:%lld, current %d\n",
-                 rq->engine->name,
-                 rq->fence.context, rq->fence.seqno,
-                 hwsp_seqno(rq));
+       RQ_TRACE(rq, "\n");
 
        GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
        trace_i915_request_retire(rq);
@@ -287,10 +284,7 @@ void i915_request_retire_upto(struct i915_request *rq)
        struct intel_timeline * const tl = i915_request_timeline(rq);
        struct i915_request *tmp;
 
-       GEM_TRACE("%s fence %llx:%lld, current %d\n",
-                 rq->engine->name,
-                 rq->fence.context, rq->fence.seqno,
-                 hwsp_seqno(rq));
+       RQ_TRACE(rq, "\n");
 
        GEM_BUG_ON(!i915_request_completed(rq));
 
@@ -351,10 +345,7 @@ bool __i915_request_submit(struct i915_request *request)
        struct intel_engine_cs *engine = request->engine;
        bool result = false;
 
-       GEM_TRACE("%s fence %llx:%lld, current %d\n",
-                 engine->name,
-                 request->fence.context, request->fence.seqno,
-                 hwsp_seqno(request));
+       RQ_TRACE(request, "\n");
 
        GEM_BUG_ON(!irqs_disabled());
        lockdep_assert_held(&engine->active.lock);
@@ -443,10 +434,7 @@ void __i915_request_unsubmit(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
 
-       GEM_TRACE("%s fence %llx:%lld, current %d\n",
-                 engine->name,
-                 request->fence.context, request->fence.seqno,
-                 hwsp_seqno(request));
+       RQ_TRACE(request, "\n");
 
        GEM_BUG_ON(!irqs_disabled());
        lockdep_assert_held(&engine->active.lock);
@@ -1261,8 +1249,7 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
        struct intel_ring *ring = rq->ring;
        u32 *cs;
 
-       GEM_TRACE("%s fence %llx:%lld\n",
-                 engine->name, rq->fence.context, rq->fence.seqno);
+       RQ_TRACE(rq, "\n");
 
        /*
         * To ensure that this call will not fail, space for its emissions
index 96991d64759cfe5d239ffe1bfc6dc1cdd3b3c992..a561b8efe8698b0f27c3a7ea012a4d9c2d381fcc 100644 (file)
@@ -49,6 +49,13 @@ struct i915_capture_list {
        struct i915_vma *vma;
 };
 
+#define RQ_TRACE(rq, fmt, ...) do {                                    \
+       const struct i915_request *rq__ = (rq);                         \
+       ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d" fmt,   \
+                    rq__->fence.context, rq__->fence.seqno,            \
+                    hwsp_seqno(rq__), ##__VA_ARGS__);                  \
+} while (0)
+
 enum {
        /*
         * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.