1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
8 #include "intel_breadcrumbs.h"
9 #include "intel_context.h"
10 #include "intel_engine.h"
11 #include "intel_engine_heartbeat.h"
12 #include "intel_engine_pm.h"
14 #include "intel_gt_pm.h"
15 #include "intel_rc6.h"
16 #include "intel_ring.h"
17 #include "shmem_utils.h"
19 static void dbg_poison_ce(struct intel_context *ce)
21 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
25 struct drm_i915_gem_object *obj = ce->state->obj;
26 int type = i915_coherent_map_type(ce->engine->i915, obj, true);
29 if (!i915_gem_object_trylock(obj, NULL))
32 map = i915_gem_object_pin_map(obj, type);
34 memset(map, CONTEXT_REDZONE, obj->base.size);
35 i915_gem_object_flush_map(obj);
36 i915_gem_object_unpin_map(obj);
38 i915_gem_object_unlock(obj);
42 static int __engine_unpark(struct intel_wakeref *wf)
44 struct intel_engine_cs *engine =
45 container_of(wf, typeof(*engine), wakeref);
46 struct intel_context *ce;
48 ENGINE_TRACE(engine, "\n");
50 intel_gt_pm_get(engine->gt);
52 /* Discard stale context state from across idling */
53 ce = engine->kernel_context;
55 GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
57 /* Flush all pending HW writes before we touch the context */
58 while (unlikely(intel_context_inflight(ce)))
59 intel_engine_flush_submission(engine);
61 /* First poison the image to verify we never fully trust it */
64 /* Scrub the context image after our loss of control */
67 CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n",
69 READ_ONCE(*ce->timeline->hwsp_seqno),
71 GEM_BUG_ON(ce->timeline->seqno !=
72 READ_ONCE(*ce->timeline->hwsp_seqno));
76 engine->unpark(engine);
78 intel_breadcrumbs_unpark(engine->breadcrumbs);
79 intel_engine_unpark_heartbeat(engine);
83 static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
85 struct i915_request *rq = to_request(fence);
87 ewma__engine_latency_add(&rq->engine->latency,
88 ktime_us_delta(rq->fence.timestamp,
89 rq->duration.emitted));
93 __queue_and_release_pm(struct i915_request *rq,
94 struct intel_timeline *tl,
95 struct intel_engine_cs *engine)
97 struct intel_gt_timelines *timelines = &engine->gt->timelines;
99 ENGINE_TRACE(engine, "parking\n");
102 * We have to serialise all potential retirement paths with our
103 * submission, as we don't want to underflow either the
104 * engine->wakeref.counter or our timeline->active_count.
106 * Equally, we cannot allow a new submission to start until
107 * after we finish queueing, nor could we allow that submitter
108 * to retire us before we are ready!
110 spin_lock(&timelines->lock);
112 /* Let intel_gt_retire_requests() retire us (acquired under lock) */
113 if (!atomic_fetch_inc(&tl->active_count))
114 list_add_tail(&tl->link, &timelines->active_list);
116 /* Hand the request over to HW and so engine_retire() */
117 __i915_request_queue_bh(rq);
119 /* Let new submissions commence (and maybe retire this timeline) */
120 __intel_wakeref_defer_park(&engine->wakeref);
122 spin_unlock(&timelines->lock);
125 static bool switch_to_kernel_context(struct intel_engine_cs *engine)
127 struct intel_context *ce = engine->kernel_context;
128 struct i915_request *rq;
132 * This is execlist specific behaviour intended to ensure the GPU is
133 * idle by switching to a known 'safe' context. With GuC submission, the
134 * same idle guarantee is achieved by other means (disabling
135 * scheduling). Further, switching to a 'safe' context has no effect
136 * with GuC submission as the scheduler can just switch back again.
138 * FIXME: Move this backend scheduler specific behaviour into the
141 if (intel_engine_uses_guc(engine))
144 /* GPU is pointing to the void, as good as in the kernel context. */
145 if (intel_gt_is_wedged(engine->gt))
148 GEM_BUG_ON(!intel_context_is_barrier(ce));
149 GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma);
151 /* Already inside the kernel context, safe to power down. */
152 if (engine->wakeref_serial == engine->serial)
156 * Note, we do this without taking the timeline->mutex. We cannot
157 * as we may be called while retiring the kernel context and so
158 * already underneath the timeline->mutex. Instead we rely on the
159 * exclusive property of the __engine_park that prevents anyone
160 * else from creating a request on this engine. This also requires
161 * that the ring is empty and we avoid any waits while constructing
162 * the context, as they assume protection by the timeline->mutex.
163 * This should hold true as we can only park the engine after
164 * retiring the last request, thus all rings should be empty and
165 * all timelines idle.
167 * For unlocking, there are 2 other parties and the GPU who have a
170 * A new gpu user will be waiting on the engine-pm to start their
171 * engine_unpark. New waiters are predicated on engine->wakeref.count
172 * and so intel_wakeref_defer_park() acts like a mutex_unlock of the
175 * The other party is intel_gt_retire_requests(), which is walking the
176 * list of active timelines looking for completions. Meanwhile as soon
177 * as we call __i915_request_queue(), the GPU may complete our request.
178 * Ergo, if we put ourselves on the timelines.active_list
179 * (se intel_timeline_enter()) before we increment the
180 * engine->wakeref.count, we may see the request completion and retire
181 * it causing an underflow of the engine->wakeref.
183 set_bit(CONTEXT_IS_PARKING, &ce->flags);
184 GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
186 rq = __i915_request_create(ce, GFP_NOWAIT);
188 /* Context switch failed, hope for the best! Maybe reset? */
191 /* Check again on the next retirement. */
192 engine->wakeref_serial = engine->serial + 1;
193 i915_request_add_active_barriers(rq);
195 /* Install ourselves as a preemption barrier */
196 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
197 if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */
199 * Use an interrupt for precise measurement of duration,
200 * otherwise we rely on someone else retiring all the requests
201 * which may delay the signaling (i.e. we will likely wait
202 * until the background request retirement running every
205 BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
206 dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
207 rq->duration.emitted = ktime_get();
210 /* Expose ourselves to the world */
211 __queue_and_release_pm(rq, ce->timeline, engine);
215 clear_bit(CONTEXT_IS_PARKING, &ce->flags);
219 static void call_idle_barriers(struct intel_engine_cs *engine)
221 struct llist_node *node, *next;
223 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
224 struct dma_fence_cb *cb =
225 container_of((struct list_head *)node,
228 cb->func(ERR_PTR(-EAGAIN), cb);
232 static int __engine_park(struct intel_wakeref *wf)
234 struct intel_engine_cs *engine =
235 container_of(wf, typeof(*engine), wakeref);
237 engine->saturated = 0;
240 * If one and only one request is completed between pm events,
241 * we know that we are inside the kernel context and it is
242 * safe to power down. (We are paranoid in case that runtime
243 * suspend causes corruption to the active context image, and
244 * want to avoid that impacting userspace.)
246 if (!switch_to_kernel_context(engine))
249 ENGINE_TRACE(engine, "parked\n");
251 call_idle_barriers(engine); /* cleanup after wedging */
253 intel_engine_park_heartbeat(engine);
254 intel_breadcrumbs_park(engine->breadcrumbs);
256 /* Must be reset upon idling, or we may miss the busy wakeup. */
257 GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);
260 engine->park(engine);
262 /* While gt calls i915_vma_parked(), we have to break the lock cycle */
263 intel_gt_pm_put_async(engine->gt);
267 static const struct intel_wakeref_ops wf_ops = {
268 .get = __engine_unpark,
269 .put = __engine_park,
272 void intel_engine_init__pm(struct intel_engine_cs *engine)
274 struct intel_runtime_pm *rpm = engine->uncore->rpm;
276 intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
277 intel_engine_init_heartbeat(engine);
281 * intel_engine_reset_pinned_contexts - Reset the pinned contexts of
283 * @engine: The engine whose pinned contexts we want to reset.
285 * Typically the pinned context LMEM images lose or get their content
286 * corrupted on suspend. This function resets their images.
288 void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine)
290 struct intel_context *ce;
292 list_for_each_entry(ce, &engine->pinned_contexts_list,
293 pinned_contexts_link) {
294 /* kernel context gets reset at __engine_unpark() */
295 if (ce == engine->kernel_context)
303 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
304 #include "selftest_engine_pm.c"