2 * SPDX-License-Identifier: GPL-2.0
4 * Copyright © 2019 Intel Corporation
7 #include "i915_selftest.h"
8 #include "intel_engine_heartbeat.h"
9 #include "intel_engine_pm.h"
12 #include "gem/selftests/mock_context.h"
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/mock_drm.h"
16 static int request_sync(struct i915_request *rq)
18 struct intel_timeline *tl = i915_request_timeline(rq);
22 intel_timeline_get(tl);
25 /* Opencode i915_request_add() so we can keep the timeline locked. */
26 __i915_request_commit(rq);
27 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
28 __i915_request_queue(rq, NULL);
30 timeout = i915_request_wait(rq, 0, HZ / 10);
34 i915_request_retire_upto(rq);
36 lockdep_unpin_lock(&tl->mutex, rq->cookie);
37 mutex_unlock(&tl->mutex);
40 intel_timeline_put(tl);
45 static int context_sync(struct intel_context *ce)
47 struct intel_timeline *tl = ce->timeline;
50 mutex_lock(&tl->mutex);
52 struct i915_request *rq;
55 if (list_empty(&tl->requests))
58 rq = list_last_entry(&tl->requests, typeof(*rq), link);
61 timeout = i915_request_wait(rq, 0, HZ / 10);
65 i915_request_retire_upto(rq);
69 mutex_unlock(&tl->mutex);
74 static int __live_context_size(struct intel_engine_cs *engine)
76 struct intel_context *ce;
77 struct i915_request *rq;
81 ce = intel_context_create(engine);
85 err = intel_context_pin(ce);
89 vaddr = i915_gem_object_pin_map(ce->state->obj,
90 i915_coherent_map_type(engine->i915));
93 intel_context_unpin(ce);
98 * Note that execlists also applies a redzone which it checks on
99 * context unpin when debugging. We are using the same location
100 * and same poison value so that our checks overlap. Despite the
101 * redundancy, we want to keep this little selftest so that we
102 * get coverage of any and all submission backends, and we can
103 * always extend this test to ensure we trick the HW into a
104 * compromising position wrt to the various sections that need
105 * to be written into the context state.
107 * TLDR; this overlaps with the execlists redzone.
109 vaddr += engine->context_size - I915_GTT_PAGE_SIZE;
110 memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
112 rq = intel_context_create_request(ce);
113 intel_context_unpin(ce);
119 err = request_sync(rq);
123 /* Force the context switch */
124 rq = intel_engine_create_kernel_request(engine);
129 err = request_sync(rq);
133 if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) {
134 pr_err("%s context overwrote trailing red-zone!", engine->name);
139 i915_gem_object_unpin_map(ce->state->obj);
141 intel_context_put(ce);
145 static int live_context_size(void *arg)
147 struct intel_gt *gt = arg;
148 struct intel_engine_cs *engine;
149 enum intel_engine_id id;
153 * Check that our context sizes are correct by seeing if the
154 * HW tries to write past the end of one.
157 for_each_engine(engine, gt, id) {
160 if (!engine->context_size)
163 intel_engine_pm_get(engine);
166 * Hide the old default state -- we lie about the context size
167 * and get confused when the default state is smaller than
168 * expected. For our do nothing request, inheriting the
169 * active state is sufficient, we are only checking that we
170 * don't use more than we planned.
172 saved = fetch_and_zero(&engine->default_state);
174 /* Overlaps with the execlists redzone */
175 engine->context_size += I915_GTT_PAGE_SIZE;
177 err = __live_context_size(engine);
179 engine->context_size -= I915_GTT_PAGE_SIZE;
181 engine->default_state = saved;
183 intel_engine_pm_put(engine);
192 static int __live_active_context(struct intel_engine_cs *engine)
194 unsigned long saved_heartbeat;
195 struct intel_context *ce;
200 * We keep active contexts alive until after a subsequent context
201 * switch as the final write from the context-save will be after
202 * we retire the final request. We track when we unpin the context,
203 * under the presumption that the final pin is from the last request,
204 * and instead of immediately unpinning the context, we add a task
205 * to unpin the context from the next idle-barrier.
207 * This test makes sure that the context is kept alive until a
208 * subsequent idle-barrier (emitted when the engine wakeref hits 0
209 * with no more outstanding requests).
212 if (intel_engine_pm_is_awake(engine)) {
213 pr_err("%s is awake before starting %s!\n",
214 engine->name, __func__);
218 ce = intel_context_create(engine);
222 saved_heartbeat = engine->props.heartbeat_interval_ms;
223 engine->props.heartbeat_interval_ms = 0;
225 for (pass = 0; pass <= 2; pass++) {
226 struct i915_request *rq;
228 intel_engine_pm_get(engine);
230 rq = intel_context_create_request(ce);
236 err = request_sync(rq);
240 /* Context will be kept active until after an idle-barrier. */
241 if (i915_active_is_idle(&ce->active)) {
242 pr_err("context is not active; expected idle-barrier (%s pass %d)\n",
248 if (!intel_engine_pm_is_awake(engine)) {
249 pr_err("%s is asleep before idle-barrier\n",
256 intel_engine_pm_put(engine);
261 /* Now make sure our idle-barriers are flushed */
262 err = intel_engine_flush_barriers(engine);
266 /* Wait for the barrier and in the process wait for engine to park */
267 err = context_sync(engine->kernel_context);
271 if (!i915_active_is_idle(&ce->active)) {
272 pr_err("context is still active!");
276 intel_engine_pm_flush(engine);
278 if (intel_engine_pm_is_awake(engine)) {
279 struct drm_printer p = drm_debug_printer(__func__);
281 intel_engine_dump(engine, &p,
282 "%s is still awake:%d after idle-barriers\n",
284 atomic_read(&engine->wakeref.count));
292 engine->props.heartbeat_interval_ms = saved_heartbeat;
293 intel_context_put(ce);
297 static int live_active_context(void *arg)
299 struct intel_gt *gt = arg;
300 struct intel_engine_cs *engine;
301 enum intel_engine_id id;
304 for_each_engine(engine, gt, id) {
305 err = __live_active_context(engine);
309 err = igt_flush_test(gt->i915);
317 static int __remote_sync(struct intel_context *ce, struct intel_context *remote)
319 struct i915_request *rq;
322 err = intel_context_pin(remote);
326 rq = intel_context_create_request(ce);
332 err = intel_context_prepare_remote_request(remote, rq);
334 i915_request_add(rq);
338 err = request_sync(rq);
341 intel_context_unpin(remote);
345 static int __live_remote_context(struct intel_engine_cs *engine)
347 struct intel_context *local, *remote;
348 unsigned long saved_heartbeat;
353 * Check that our idle barriers do not interfere with normal
354 * activity tracking. In particular, check that operating
355 * on the context image remotely (intel_context_prepare_remote_request),
356 * which inserts foreign fences into intel_context.active, does not
357 * clobber the idle-barrier.
360 if (intel_engine_pm_is_awake(engine)) {
361 pr_err("%s is awake before starting %s!\n",
362 engine->name, __func__);
366 remote = intel_context_create(engine);
368 return PTR_ERR(remote);
370 local = intel_context_create(engine);
372 err = PTR_ERR(local);
376 saved_heartbeat = engine->props.heartbeat_interval_ms;
377 engine->props.heartbeat_interval_ms = 0;
378 intel_engine_pm_get(engine);
380 for (pass = 0; pass <= 2; pass++) {
381 err = __remote_sync(local, remote);
385 err = __remote_sync(engine->kernel_context, remote);
389 if (i915_active_is_idle(&remote->active)) {
390 pr_err("remote context is not active; expected idle-barrier (%s pass %d)\n",
397 intel_engine_pm_put(engine);
398 engine->props.heartbeat_interval_ms = saved_heartbeat;
400 intel_context_put(local);
402 intel_context_put(remote);
406 static int live_remote_context(void *arg)
408 struct intel_gt *gt = arg;
409 struct intel_engine_cs *engine;
410 enum intel_engine_id id;
413 for_each_engine(engine, gt, id) {
414 err = __live_remote_context(engine);
418 err = igt_flush_test(gt->i915);
426 int intel_context_live_selftests(struct drm_i915_private *i915)
428 static const struct i915_subtest tests[] = {
429 SUBTEST(live_context_size),
430 SUBTEST(live_active_context),
431 SUBTEST(live_remote_context),
433 struct intel_gt *gt = &i915->gt;
435 if (intel_gt_is_wedged(gt))
438 return intel_gt_live_subtests(tests, gt);