2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include "../i915_selftest.h"
9 #include "igt_flush_test.h"
10 #include "igt_reset.h"
11 #include "igt_spinner.h"
12 #include "igt_wedge_me.h"
13 #include "mock_context.h"
15 #define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
17 struct i915_wa_list gt_wa_list;
19 char name[REF_NAME_MAX];
20 struct i915_wa_list wa_list;
21 } engine[I915_NUM_ENGINES];
25 reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
27 struct intel_engine_cs *engine;
28 enum intel_engine_id id;
30 memset(lists, 0, sizeof(*lists));
32 wa_init_start(&lists->gt_wa_list, "GT_REF");
33 gt_init_workarounds(i915, &lists->gt_wa_list);
34 wa_init_finish(&lists->gt_wa_list);
36 for_each_engine(engine, i915, id) {
37 struct i915_wa_list *wal = &lists->engine[id].wa_list;
38 char *name = lists->engine[id].name;
40 snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
42 wa_init_start(wal, name);
43 engine_init_workarounds(engine, wal);
49 reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
51 struct intel_engine_cs *engine;
52 enum intel_engine_id id;
54 for_each_engine(engine, i915, id)
55 intel_wa_list_free(&lists->engine[id].wa_list);
57 intel_wa_list_free(&lists->gt_wa_list);
60 static struct drm_i915_gem_object *
61 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
63 const u32 base = engine->mmio_base;
64 struct drm_i915_gem_object *result;
65 intel_wakeref_t wakeref;
66 struct i915_request *rq;
72 result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
76 i915_gem_object_set_cache_level(result, I915_CACHE_LLC);
78 cs = i915_gem_object_pin_map(result, I915_MAP_WB);
83 memset(cs, 0xc5, PAGE_SIZE);
84 i915_gem_object_unpin_map(result);
86 vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
92 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
96 rq = ERR_PTR(-ENODEV);
97 with_intel_runtime_pm(engine->i915, wakeref)
98 rq = i915_request_alloc(engine, ctx);
104 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
108 srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
109 if (INTEL_GEN(ctx->i915) >= 8)
112 cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
118 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
120 *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
121 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
124 intel_ring_advance(rq, cs);
126 i915_gem_object_get(result);
127 i915_gem_object_set_active_reference(result);
129 i915_request_add(rq);
135 i915_request_add(rq);
139 i915_gem_object_put(result);
144 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
146 i915_reg_t reg = i < engine->whitelist.count ?
147 engine->whitelist.list[i].reg :
148 RING_NOPID(engine->mmio_base);
150 return i915_mmio_reg_offset(reg);
154 print_results(const struct intel_engine_cs *engine, const u32 *results)
158 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
159 u32 expected = get_whitelist_reg(engine, i);
160 u32 actual = results[i];
162 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
163 i, expected, actual);
167 static int check_whitelist(struct i915_gem_context *ctx,
168 struct intel_engine_cs *engine)
170 struct drm_i915_gem_object *results;
171 struct igt_wedge_me wedge;
176 results = read_nonprivs(ctx, engine);
178 return PTR_ERR(results);
181 igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
182 err = i915_gem_object_set_to_cpu_domain(results, false);
183 if (i915_terminally_wedged(&ctx->i915->gpu_error))
188 vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
190 err = PTR_ERR(vaddr);
194 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
195 u32 expected = get_whitelist_reg(engine, i);
196 u32 actual = vaddr[i];
198 if (expected != actual) {
199 print_results(engine, vaddr);
200 pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
201 i, expected, actual);
208 i915_gem_object_unpin_map(results);
210 i915_gem_object_put(results);
214 static int do_device_reset(struct intel_engine_cs *engine)
216 set_bit(I915_RESET_HANDOFF, &engine->i915->gpu_error.flags);
217 i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds");
221 static int do_engine_reset(struct intel_engine_cs *engine)
223 return i915_reset_engine(engine, "live_workarounds");
227 switch_to_scratch_context(struct intel_engine_cs *engine,
228 struct igt_spinner *spin)
230 struct i915_gem_context *ctx;
231 struct i915_request *rq;
232 intel_wakeref_t wakeref;
235 ctx = kernel_context(engine->i915);
239 rq = ERR_PTR(-ENODEV);
240 with_intel_runtime_pm(engine->i915, wakeref) {
242 rq = igt_spinner_create_request(spin,
246 rq = i915_request_alloc(engine, ctx);
249 kernel_context_close(ctx);
257 i915_request_add(rq);
259 if (spin && !igt_wait_for_spinner(spin, rq)) {
260 pr_err("Spinner failed to start\n");
266 igt_spinner_end(spin);
271 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
272 int (*reset)(struct intel_engine_cs *),
275 struct drm_i915_private *i915 = engine->i915;
276 bool want_spin = reset == do_engine_reset;
277 struct i915_gem_context *ctx;
278 struct igt_spinner spin;
279 intel_wakeref_t wakeref;
282 pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
283 engine->whitelist.count, name);
286 err = igt_spinner_init(&spin, i915);
291 ctx = kernel_context(i915);
295 err = check_whitelist(ctx, engine);
297 pr_err("Invalid whitelist *before* %s reset!\n", name);
301 err = switch_to_scratch_context(engine, want_spin ? &spin : NULL);
305 with_intel_runtime_pm(i915, wakeref)
309 igt_spinner_end(&spin);
310 igt_spinner_fini(&spin);
314 pr_err("%s reset failed\n", name);
318 err = check_whitelist(ctx, engine);
320 pr_err("Whitelist not preserved in context across %s reset!\n",
325 kernel_context_close(ctx);
327 ctx = kernel_context(i915);
331 err = check_whitelist(ctx, engine);
333 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
339 kernel_context_close(ctx);
343 static int live_reset_whitelist(void *arg)
345 struct drm_i915_private *i915 = arg;
346 struct intel_engine_cs *engine = i915->engine[RCS];
349 /* If we reset the gpu, we should not lose the RING_NONPRIV */
351 if (!engine || engine->whitelist.count == 0)
354 igt_global_reset_lock(i915);
356 if (intel_has_reset_engine(i915)) {
357 err = check_whitelist_across_reset(engine,
364 if (intel_has_gpu_reset(i915)) {
365 err = check_whitelist_across_reset(engine,
373 igt_global_reset_unlock(i915);
377 static bool verify_gt_engine_wa(struct drm_i915_private *i915,
378 struct wa_lists *lists, const char *str)
380 struct intel_engine_cs *engine;
381 enum intel_engine_id id;
384 ok &= wa_list_verify(i915, &lists->gt_wa_list, str);
386 for_each_engine(engine, i915, id)
387 ok &= wa_list_verify(i915, &lists->engine[id].wa_list, str);
393 live_gpu_reset_gt_engine_workarounds(void *arg)
395 struct drm_i915_private *i915 = arg;
396 struct i915_gpu_error *error = &i915->gpu_error;
397 intel_wakeref_t wakeref;
398 struct wa_lists lists;
401 if (!intel_has_gpu_reset(i915))
404 pr_info("Verifying after GPU reset...\n");
406 igt_global_reset_lock(i915);
407 wakeref = intel_runtime_pm_get(i915);
409 reference_lists_init(i915, &lists);
411 ok = verify_gt_engine_wa(i915, &lists, "before reset");
415 set_bit(I915_RESET_HANDOFF, &error->flags);
416 i915_reset(i915, ALL_ENGINES, "live_workarounds");
418 ok = verify_gt_engine_wa(i915, &lists, "after reset");
421 reference_lists_fini(i915, &lists);
422 intel_runtime_pm_put(i915, wakeref);
423 igt_global_reset_unlock(i915);
425 return ok ? 0 : -ESRCH;
429 live_engine_reset_gt_engine_workarounds(void *arg)
431 struct drm_i915_private *i915 = arg;
432 struct intel_engine_cs *engine;
433 struct i915_gem_context *ctx;
434 struct igt_spinner spin;
435 enum intel_engine_id id;
436 struct i915_request *rq;
437 intel_wakeref_t wakeref;
438 struct wa_lists lists;
441 if (!intel_has_reset_engine(i915))
444 ctx = kernel_context(i915);
448 igt_global_reset_lock(i915);
449 wakeref = intel_runtime_pm_get(i915);
451 reference_lists_init(i915, &lists);
453 for_each_engine(engine, i915, id) {
456 pr_info("Verifying after %s reset...\n", engine->name);
458 ok = verify_gt_engine_wa(i915, &lists, "before reset");
464 i915_reset_engine(engine, "live_workarounds");
466 ok = verify_gt_engine_wa(i915, &lists, "after idle reset");
472 ret = igt_spinner_init(&spin, i915);
476 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
479 igt_spinner_fini(&spin);
483 i915_request_add(rq);
485 if (!igt_wait_for_spinner(&spin, rq)) {
486 pr_err("Spinner failed to start\n");
487 igt_spinner_fini(&spin);
492 i915_reset_engine(engine, "live_workarounds");
494 igt_spinner_end(&spin);
495 igt_spinner_fini(&spin);
497 ok = verify_gt_engine_wa(i915, &lists, "after busy reset");
505 reference_lists_fini(i915, &lists);
506 intel_runtime_pm_put(i915, wakeref);
507 igt_global_reset_unlock(i915);
508 kernel_context_close(ctx);
510 igt_flush_test(i915, I915_WAIT_LOCKED);
515 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
517 static const struct i915_subtest tests[] = {
518 SUBTEST(live_reset_whitelist),
519 SUBTEST(live_gpu_reset_gt_engine_workarounds),
520 SUBTEST(live_engine_reset_gt_engine_workarounds),
524 if (i915_terminally_wedged(&i915->gpu_error))
527 mutex_lock(&i915->drm.struct_mutex);
528 err = i915_subtests(tests, i915);
529 mutex_unlock(&i915->drm.struct_mutex);