1 // SPDX-License-Identifier: MIT
3 * Copyright © 2018 Intel Corporation
6 #include "gem/i915_gem_internal.h"
7 #include "gem/i915_gem_pm.h"
8 #include "gt/intel_engine_user.h"
9 #include "gt/intel_gt.h"
10 #include "i915_selftest.h"
11 #include "intel_reset.h"
13 #include "selftests/igt_flush_test.h"
14 #include "selftests/igt_reset.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/intel_scheduler_helpers.h"
17 #include "selftests/mock_drm.h"
19 #include "gem/selftests/igt_gem_utils.h"
20 #include "gem/selftests/mock_context.h"
22 static const struct wo_register {
23 enum intel_platform platform;
26 { INTEL_GEMINILAKE, 0x731c }
30 struct i915_wa_list gt_wa_list;
32 struct i915_wa_list wa_list;
33 struct i915_wa_list ctx_wa_list;
34 } engine[I915_NUM_ENGINES];
37 static int request_add_sync(struct i915_request *rq, int err)
41 if (i915_request_wait(rq, 0, HZ / 5) < 0)
48 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
54 if (spin && !igt_wait_for_spinner(spin, rq))
62 reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
64 struct intel_engine_cs *engine;
65 enum intel_engine_id id;
67 memset(lists, 0, sizeof(*lists));
69 wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
70 gt_init_workarounds(gt, &lists->gt_wa_list);
71 wa_init_finish(&lists->gt_wa_list);
73 for_each_engine(engine, gt, id) {
74 struct i915_wa_list *wal = &lists->engine[id].wa_list;
76 wa_init_start(wal, "REF", engine->name);
77 engine_init_workarounds(engine, wal);
80 __intel_engine_init_ctx_wa(engine,
81 &lists->engine[id].ctx_wa_list,
87 reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
89 struct intel_engine_cs *engine;
90 enum intel_engine_id id;
92 for_each_engine(engine, gt, id)
93 intel_wa_list_free(&lists->engine[id].wa_list);
95 intel_wa_list_free(&lists->gt_wa_list);
98 static struct drm_i915_gem_object *
99 read_nonprivs(struct intel_context *ce)
101 struct intel_engine_cs *engine = ce->engine;
102 const u32 base = engine->mmio_base;
103 struct drm_i915_gem_object *result;
104 struct i915_request *rq;
105 struct i915_vma *vma;
110 result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
114 i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
116 cs = i915_gem_object_pin_map_unlocked(result, I915_MAP_WB);
121 memset(cs, 0xc5, PAGE_SIZE);
122 i915_gem_object_flush_map(result);
123 i915_gem_object_unpin_map(result);
125 vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
131 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
135 rq = intel_context_create_request(ce);
142 err = i915_request_await_object(rq, vma->obj, true);
144 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
145 i915_vma_unlock(vma);
149 srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
150 if (GRAPHICS_VER(engine->i915) >= 8)
153 cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
159 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
161 *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
162 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
165 intel_ring_advance(rq, cs);
167 i915_request_add(rq);
173 i915_request_add(rq);
177 i915_gem_object_put(result);
182 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
184 i915_reg_t reg = i < engine->whitelist.count ?
185 engine->whitelist.list[i].reg :
186 RING_NOPID(engine->mmio_base);
188 return i915_mmio_reg_offset(reg);
192 print_results(const struct intel_engine_cs *engine, const u32 *results)
196 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
197 u32 expected = get_whitelist_reg(engine, i);
198 u32 actual = results[i];
200 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
201 i, expected, actual);
205 static int check_whitelist(struct intel_context *ce)
207 struct intel_engine_cs *engine = ce->engine;
208 struct drm_i915_gem_object *results;
209 struct intel_wedge_me wedge;
214 results = read_nonprivs(ce);
216 return PTR_ERR(results);
219 i915_gem_object_lock(results, NULL);
220 intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
221 err = i915_gem_object_set_to_cpu_domain(results, false);
223 if (intel_gt_is_wedged(engine->gt))
228 vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
230 err = PTR_ERR(vaddr);
234 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
235 u32 expected = get_whitelist_reg(engine, i);
236 u32 actual = vaddr[i];
238 if (expected != actual) {
239 print_results(engine, vaddr);
240 pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
241 i, expected, actual);
248 i915_gem_object_unpin_map(results);
250 i915_gem_object_unlock(results);
251 i915_gem_object_put(results);
255 static int do_device_reset(struct intel_engine_cs *engine)
257 intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
261 static int do_engine_reset(struct intel_engine_cs *engine)
263 return intel_engine_reset(engine, "live_workarounds");
266 static int do_guc_reset(struct intel_engine_cs *engine)
268 /* Currently a no-op as the reset is handled by GuC */
273 switch_to_scratch_context(struct intel_engine_cs *engine,
274 struct igt_spinner *spin,
275 struct i915_request **rq)
277 struct intel_context *ce;
280 ce = intel_context_create(engine);
284 *rq = igt_spinner_create_request(spin, ce, MI_NOOP);
285 intel_context_put(ce);
293 err = request_add_spin(*rq, spin);
296 igt_spinner_end(spin);
301 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
302 int (*reset)(struct intel_engine_cs *),
305 struct intel_context *ce, *tmp;
306 struct igt_spinner spin;
307 struct i915_request *rq;
308 intel_wakeref_t wakeref;
311 pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
312 engine->whitelist.count, engine->name, name);
314 ce = intel_context_create(engine);
318 err = igt_spinner_init(&spin, engine->gt);
322 err = check_whitelist(ce);
324 pr_err("Invalid whitelist *before* %s reset!\n", name);
328 err = switch_to_scratch_context(engine, &spin, &rq);
332 /* Ensure the spinner hasn't aborted */
333 if (i915_request_completed(rq)) {
334 pr_err("%s spinner failed to start\n", name);
339 with_intel_runtime_pm(engine->uncore->rpm, wakeref)
342 /* Ensure the reset happens and kills the engine */
344 err = intel_selftest_wait_for_rq(rq);
346 igt_spinner_end(&spin);
349 pr_err("%s reset failed\n", name);
353 err = check_whitelist(ce);
355 pr_err("Whitelist not preserved in context across %s reset!\n",
360 tmp = intel_context_create(engine);
365 intel_context_put(ce);
368 err = check_whitelist(ce);
370 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
376 igt_spinner_fini(&spin);
378 intel_context_put(ce);
382 static struct i915_vma *create_batch(struct i915_address_space *vm)
384 struct drm_i915_gem_object *obj;
385 struct i915_vma *vma;
388 obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
390 return ERR_CAST(obj);
392 vma = i915_vma_instance(obj, vm, NULL);
398 err = i915_vma_pin(vma, 0, 0, PIN_USER);
405 i915_gem_object_put(obj);
409 static u32 reg_write(u32 old, u32 new, u32 rsvd)
411 if (rsvd == 0x0000ffff) {
413 old |= new & (new >> 16);
422 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
424 enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
427 if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
428 RING_FORCE_TO_NONPRIV_ACCESS_WR)
431 for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
432 if (wo_registers[i].platform == platform &&
433 wo_registers[i].reg == reg)
440 static bool timestamp(const struct intel_engine_cs *engine, u32 reg)
442 reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
454 static bool ro_register(u32 reg)
456 if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
457 RING_FORCE_TO_NONPRIV_ACCESS_RD)
463 static int whitelist_writable_count(struct intel_engine_cs *engine)
465 int count = engine->whitelist.count;
468 for (i = 0; i < engine->whitelist.count; i++) {
469 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
471 if (ro_register(reg))
478 static int check_dirty_whitelist(struct intel_context *ce)
480 const u32 values[] = {
506 struct intel_engine_cs *engine = ce->engine;
507 struct i915_vma *scratch;
508 struct i915_vma *batch;
509 int err = 0, i, v, sz;
512 sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
513 scratch = __vm_create_scratch_for_read_pinned(ce->vm, sz);
515 return PTR_ERR(scratch);
517 batch = create_batch(ce->vm);
519 err = PTR_ERR(batch);
523 for (i = 0; i < engine->whitelist.count; i++) {
524 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
525 struct i915_gem_ww_ctx ww;
526 u64 addr = scratch->node.start;
527 struct i915_request *rq;
533 if (wo_register(engine, reg))
536 if (timestamp(engine, reg))
537 continue; /* timestamps are expected to autoincrement */
539 ro_reg = ro_register(reg);
541 i915_gem_ww_ctx_init(&ww, false);
544 err = i915_gem_object_lock(scratch->obj, &ww);
546 err = i915_gem_object_lock(batch->obj, &ww);
548 err = intel_context_pin_ww(ce, &ww);
552 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
558 results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
559 if (IS_ERR(results)) {
560 err = PTR_ERR(results);
561 goto out_unmap_batch;
564 /* Clear non priv flags */
565 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
567 srm = MI_STORE_REGISTER_MEM;
568 lrm = MI_LOAD_REGISTER_MEM;
569 if (GRAPHICS_VER(engine->i915) >= 8)
572 pr_debug("%s: Writing garbage to %x\n",
578 *cs++ = lower_32_bits(addr);
579 *cs++ = upper_32_bits(addr);
582 for (v = 0; v < ARRAY_SIZE(values); v++) {
584 *cs++ = MI_LOAD_REGISTER_IMM(1);
591 *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
592 *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
595 for (v = 0; v < ARRAY_SIZE(values); v++) {
597 *cs++ = MI_LOAD_REGISTER_IMM(1);
604 *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
605 *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
608 GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
610 /* LRM original -- don't leave garbage in the context! */
613 *cs++ = lower_32_bits(addr);
614 *cs++ = upper_32_bits(addr);
616 *cs++ = MI_BATCH_BUFFER_END;
618 i915_gem_object_flush_map(batch->obj);
619 i915_gem_object_unpin_map(batch->obj);
620 intel_gt_chipset_flush(engine->gt);
623 rq = i915_request_create(ce);
626 goto out_unmap_scratch;
629 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
630 err = engine->emit_init_breadcrumb(rq);
635 err = i915_request_await_object(rq, batch->obj, false);
637 err = i915_vma_move_to_active(batch, rq, 0);
641 err = i915_request_await_object(rq, scratch->obj, true);
643 err = i915_vma_move_to_active(scratch, rq,
648 err = engine->emit_bb_start(rq,
649 batch->node.start, PAGE_SIZE,
655 err = request_add_sync(rq, err);
657 pr_err("%s: Futzing %x timedout; cancelling test\n",
659 intel_gt_set_wedged(engine->gt);
660 goto out_unmap_scratch;
663 GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
665 /* detect write masking */
666 rsvd = results[ARRAY_SIZE(values)];
668 pr_err("%s: Unable to write to whitelisted register %x\n",
671 goto out_unmap_scratch;
679 for (v = 0; v < ARRAY_SIZE(values); v++) {
683 expect = reg_write(expect, values[v], rsvd);
685 if (results[idx] != expect)
689 for (v = 0; v < ARRAY_SIZE(values); v++) {
693 expect = reg_write(expect, ~values[v], rsvd);
695 if (results[idx] != expect)
700 pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
701 engine->name, err, reg);
704 pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
705 engine->name, reg, results[0]);
707 pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
708 engine->name, reg, results[0], rsvd);
712 for (v = 0; v < ARRAY_SIZE(values); v++) {
718 expect = reg_write(expect, w, rsvd);
719 pr_info("Wrote %08x, read %08x, expect %08x\n",
720 w, results[idx], expect);
723 for (v = 0; v < ARRAY_SIZE(values); v++) {
729 expect = reg_write(expect, w, rsvd);
730 pr_info("Wrote %08x, read %08x, expect %08x\n",
731 w, results[idx], expect);
738 i915_gem_object_unpin_map(scratch->obj);
741 i915_gem_object_unpin_map(batch->obj);
743 intel_context_unpin(ce);
745 if (err == -EDEADLK) {
746 err = i915_gem_ww_ctx_backoff(&ww);
750 i915_gem_ww_ctx_fini(&ww);
755 if (igt_flush_test(engine->i915))
758 i915_vma_unpin_and_release(&batch, 0);
760 i915_vma_unpin_and_release(&scratch, 0);
764 static int live_dirty_whitelist(void *arg)
766 struct intel_gt *gt = arg;
767 struct intel_engine_cs *engine;
768 enum intel_engine_id id;
770 /* Can the user write to the whitelisted registers? */
772 if (GRAPHICS_VER(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
775 for_each_engine(engine, gt, id) {
776 struct intel_context *ce;
779 if (engine->whitelist.count == 0)
782 ce = intel_context_create(engine);
786 err = check_dirty_whitelist(ce);
787 intel_context_put(ce);
795 static int live_reset_whitelist(void *arg)
797 struct intel_gt *gt = arg;
798 struct intel_engine_cs *engine;
799 enum intel_engine_id id;
802 /* If we reset the gpu, we should not lose the RING_NONPRIV */
803 igt_global_reset_lock(gt);
805 for_each_engine(engine, gt, id) {
806 if (engine->whitelist.count == 0)
809 if (intel_has_reset_engine(gt)) {
810 if (intel_engine_uses_guc(engine)) {
811 struct intel_selftest_saved_policy saved;
814 err = intel_selftest_modify_policy(engine, &saved,
815 SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
819 err = check_whitelist_across_reset(engine,
823 err2 = intel_selftest_restore_policy(engine, &saved);
827 err = check_whitelist_across_reset(engine,
836 if (intel_has_gpu_reset(gt)) {
837 err = check_whitelist_across_reset(engine,
846 igt_global_reset_unlock(gt);
850 static int read_whitelisted_registers(struct intel_context *ce,
851 struct i915_vma *results)
853 struct intel_engine_cs *engine = ce->engine;
854 struct i915_request *rq;
858 rq = intel_context_create_request(ce);
862 i915_vma_lock(results);
863 err = i915_request_await_object(rq, results->obj, true);
865 err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
866 i915_vma_unlock(results);
870 srm = MI_STORE_REGISTER_MEM;
871 if (GRAPHICS_VER(engine->i915) >= 8)
874 cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
880 for (i = 0; i < engine->whitelist.count; i++) {
881 u64 offset = results->node.start + sizeof(u32) * i;
882 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
884 /* Clear non priv flags */
885 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
889 *cs++ = lower_32_bits(offset);
890 *cs++ = upper_32_bits(offset);
892 intel_ring_advance(rq, cs);
895 return request_add_sync(rq, err);
898 static int scrub_whitelisted_registers(struct intel_context *ce)
900 struct intel_engine_cs *engine = ce->engine;
901 struct i915_request *rq;
902 struct i915_vma *batch;
906 batch = create_batch(ce->vm);
908 return PTR_ERR(batch);
910 cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
916 *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
917 for (i = 0; i < engine->whitelist.count; i++) {
918 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
920 if (ro_register(reg))
923 /* Clear non priv flags */
924 reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
929 *cs++ = MI_BATCH_BUFFER_END;
931 i915_gem_object_flush_map(batch->obj);
932 intel_gt_chipset_flush(engine->gt);
934 rq = intel_context_create_request(ce);
940 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
941 err = engine->emit_init_breadcrumb(rq);
946 i915_vma_lock(batch);
947 err = i915_request_await_object(rq, batch->obj, false);
949 err = i915_vma_move_to_active(batch, rq, 0);
950 i915_vma_unlock(batch);
954 /* Perform the writes from an unprivileged "user" batch */
955 err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
958 err = request_add_sync(rq, err);
961 i915_gem_object_unpin_map(batch->obj);
963 i915_vma_unpin_and_release(&batch, 0);
972 static bool find_reg(struct drm_i915_private *i915,
974 const struct regmask *tbl,
977 u32 offset = i915_mmio_reg_offset(reg);
980 if (GRAPHICS_VER(i915) == tbl->graphics_ver &&
981 i915_mmio_reg_offset(tbl->reg) == offset)
989 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
991 /* Alas, we must pardon some whitelists. Mistakes already made */
992 static const struct regmask pardon[] = {
993 { GEN9_CTX_PREEMPT_REG, 9 },
994 { GEN8_L3SQCREG4, 9 },
997 return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
1000 static bool result_eq(struct intel_engine_cs *engine,
1001 u32 a, u32 b, i915_reg_t reg)
1003 if (a != b && !pardon_reg(engine->i915, reg)) {
1004 pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
1005 i915_mmio_reg_offset(reg), a, b);
1012 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
1014 /* Some registers do not seem to behave and our writes unreadable */
1015 static const struct regmask wo[] = {
1016 { GEN9_SLICE_COMMON_ECO_CHICKEN1, 9 },
1019 return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
1022 static bool result_neq(struct intel_engine_cs *engine,
1023 u32 a, u32 b, i915_reg_t reg)
1025 if (a == b && !writeonly_reg(engine->i915, reg)) {
1026 pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
1027 i915_mmio_reg_offset(reg), a);
1035 check_whitelisted_registers(struct intel_engine_cs *engine,
1038 bool (*fn)(struct intel_engine_cs *engine,
1045 a = i915_gem_object_pin_map_unlocked(A->obj, I915_MAP_WB);
1049 b = i915_gem_object_pin_map_unlocked(B->obj, I915_MAP_WB);
1056 for (i = 0; i < engine->whitelist.count; i++) {
1057 const struct i915_wa *wa = &engine->whitelist.list[i];
1059 if (i915_mmio_reg_offset(wa->reg) &
1060 RING_FORCE_TO_NONPRIV_ACCESS_RD)
1063 if (!fn(engine, a[i], b[i], wa->reg))
1067 i915_gem_object_unpin_map(B->obj);
1069 i915_gem_object_unpin_map(A->obj);
1073 static int live_isolated_whitelist(void *arg)
1075 struct intel_gt *gt = arg;
1077 struct i915_vma *scratch[2];
1079 struct intel_engine_cs *engine;
1080 enum intel_engine_id id;
1084 * Check that a write into a whitelist register works, but
1085 * invisible to a second context.
1088 if (!intel_engines_has_context_isolation(gt->i915))
1091 for (i = 0; i < ARRAY_SIZE(client); i++) {
1092 client[i].scratch[0] =
1093 __vm_create_scratch_for_read_pinned(gt->vm, 4096);
1094 if (IS_ERR(client[i].scratch[0])) {
1095 err = PTR_ERR(client[i].scratch[0]);
1099 client[i].scratch[1] =
1100 __vm_create_scratch_for_read_pinned(gt->vm, 4096);
1101 if (IS_ERR(client[i].scratch[1])) {
1102 err = PTR_ERR(client[i].scratch[1]);
1103 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1108 for_each_engine(engine, gt, id) {
1109 struct intel_context *ce[2];
1111 if (!engine->kernel_context->vm)
1114 if (!whitelist_writable_count(engine))
1117 ce[0] = intel_context_create(engine);
1118 if (IS_ERR(ce[0])) {
1119 err = PTR_ERR(ce[0]);
1122 ce[1] = intel_context_create(engine);
1123 if (IS_ERR(ce[1])) {
1124 err = PTR_ERR(ce[1]);
1125 intel_context_put(ce[0]);
1129 /* Read default values */
1130 err = read_whitelisted_registers(ce[0], client[0].scratch[0]);
1134 /* Try to overwrite registers (should only affect ctx0) */
1135 err = scrub_whitelisted_registers(ce[0]);
1139 /* Read values from ctx1, we expect these to be defaults */
1140 err = read_whitelisted_registers(ce[1], client[1].scratch[0]);
1144 /* Verify that both reads return the same default values */
1145 err = check_whitelisted_registers(engine,
1146 client[0].scratch[0],
1147 client[1].scratch[0],
1152 /* Read back the updated values in ctx0 */
1153 err = read_whitelisted_registers(ce[0], client[0].scratch[1]);
1157 /* User should be granted privilege to overwhite regs */
1158 err = check_whitelisted_registers(engine,
1159 client[0].scratch[0],
1160 client[0].scratch[1],
1163 intel_context_put(ce[1]);
1164 intel_context_put(ce[0]);
1170 for (i = 0; i < ARRAY_SIZE(client); i++) {
1171 i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1172 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1175 if (igt_flush_test(gt->i915))
1182 verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists,
1185 struct intel_engine_cs *engine;
1186 enum intel_engine_id id;
1189 ok &= wa_list_verify(gt, &lists->gt_wa_list, str);
1191 for_each_engine(engine, gt, id) {
1192 struct intel_context *ce;
1194 ce = intel_context_create(engine);
1198 ok &= engine_wa_list_verify(ce,
1199 &lists->engine[id].wa_list,
1202 ok &= engine_wa_list_verify(ce,
1203 &lists->engine[id].ctx_wa_list,
1206 intel_context_put(ce);
1213 live_gpu_reset_workarounds(void *arg)
1215 struct intel_gt *gt = arg;
1216 intel_wakeref_t wakeref;
1217 struct wa_lists *lists;
1220 if (!intel_has_gpu_reset(gt))
1223 lists = kzalloc(sizeof(*lists), GFP_KERNEL);
1227 pr_info("Verifying after GPU reset...\n");
1229 igt_global_reset_lock(gt);
1230 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1232 reference_lists_init(gt, lists);
1234 ok = verify_wa_lists(gt, lists, "before reset");
1238 intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
1240 ok = verify_wa_lists(gt, lists, "after reset");
1243 reference_lists_fini(gt, lists);
1244 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1245 igt_global_reset_unlock(gt);
1248 return ok ? 0 : -ESRCH;
1252 live_engine_reset_workarounds(void *arg)
1254 struct intel_gt *gt = arg;
1255 struct intel_engine_cs *engine;
1256 enum intel_engine_id id;
1257 struct intel_context *ce;
1258 struct igt_spinner spin;
1259 struct i915_request *rq;
1260 intel_wakeref_t wakeref;
1261 struct wa_lists *lists;
1264 if (!intel_has_reset_engine(gt))
1267 lists = kzalloc(sizeof(*lists), GFP_KERNEL);
1271 igt_global_reset_lock(gt);
1272 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1274 reference_lists_init(gt, lists);
1276 for_each_engine(engine, gt, id) {
1277 struct intel_selftest_saved_policy saved;
1278 bool using_guc = intel_engine_uses_guc(engine);
1282 pr_info("Verifying after %s reset...\n", engine->name);
1283 ret = intel_selftest_modify_policy(engine, &saved,
1284 SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
1288 ce = intel_context_create(engine);
1295 ok = verify_wa_lists(gt, lists, "before reset");
1301 ret = intel_engine_reset(engine, "live_workarounds:idle");
1303 pr_err("%s: Reset failed while idle\n", engine->name);
1307 ok = verify_wa_lists(gt, lists, "after idle reset");
1314 ret = igt_spinner_init(&spin, engine->gt);
1318 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
1321 igt_spinner_fini(&spin);
1325 ret = request_add_spin(rq, &spin);
1327 pr_err("%s: Spinner failed to start\n", engine->name);
1328 igt_spinner_fini(&spin);
1332 /* Ensure the spinner hasn't aborted */
1333 if (i915_request_completed(rq)) {
1339 ret = intel_engine_reset(engine, "live_workarounds:active");
1341 pr_err("%s: Reset failed on an active spinner\n",
1343 igt_spinner_fini(&spin);
1348 /* Ensure the reset happens and kills the engine */
1350 ret = intel_selftest_wait_for_rq(rq);
1353 igt_spinner_end(&spin);
1354 igt_spinner_fini(&spin);
1356 ok = verify_wa_lists(gt, lists, "after busy reset");
1361 intel_context_put(ce);
1364 ret2 = intel_selftest_restore_policy(engine, &saved);
1371 reference_lists_fini(gt, lists);
1372 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1373 igt_global_reset_unlock(gt);
1376 igt_flush_test(gt->i915);
1381 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1383 static const struct i915_subtest tests[] = {
1384 SUBTEST(live_dirty_whitelist),
1385 SUBTEST(live_reset_whitelist),
1386 SUBTEST(live_isolated_whitelist),
1387 SUBTEST(live_gpu_reset_workarounds),
1388 SUBTEST(live_engine_reset_workarounds),
1391 if (intel_gt_is_wedged(to_gt(i915)))
1394 return intel_gt_live_subtests(tests, to_gt(i915));