Merge tag 'mediatek-drm-fixes-5.3' of https://github.com/ckhu-mediatek/linux.git...
[linux-2.6-block.git] / drivers / gpu / drm / i915 / gt / selftest_workarounds.c
CommitLineData
f4ecfbfc
CW
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
10be98a7 7#include "gem/i915_gem_pm.h"
112ed2d3
CW
8#include "i915_selftest.h"
9#include "intel_reset.h"
f4ecfbfc 10
112ed2d3
CW
11#include "selftests/igt_flush_test.h"
12#include "selftests/igt_reset.h"
13#include "selftests/igt_spinner.h"
14#include "selftests/igt_wedge_me.h"
112ed2d3 15#include "selftests/mock_drm.h"
34ae8455 16
10be98a7
CW
17#include "gem/selftests/igt_gem_utils.h"
18#include "gem/selftests/mock_context.h"
19
34ae8455
CW
20static const struct wo_register {
21 enum intel_platform platform;
22 u32 reg;
23} wo_registers[] = {
24 { INTEL_GEMINILAKE, 0x731c }
25};
f4ecfbfc 26
fde93886 27#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 8)
f663b0ca
DCS
28struct wa_lists {
29 struct i915_wa_list gt_wa_list;
30 struct {
31 char name[REF_NAME_MAX];
32 struct i915_wa_list wa_list;
fde93886 33 struct i915_wa_list ctx_wa_list;
f663b0ca
DCS
34 } engine[I915_NUM_ENGINES];
35};
36
37static void
38reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
39{
40 struct intel_engine_cs *engine;
41 enum intel_engine_id id;
42
43 memset(lists, 0, sizeof(*lists));
44
45 wa_init_start(&lists->gt_wa_list, "GT_REF");
46 gt_init_workarounds(i915, &lists->gt_wa_list);
47 wa_init_finish(&lists->gt_wa_list);
48
49 for_each_engine(engine, i915, id) {
50 struct i915_wa_list *wal = &lists->engine[id].wa_list;
51 char *name = lists->engine[id].name;
52
53 snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
54
55 wa_init_start(wal, name);
56 engine_init_workarounds(engine, wal);
57 wa_init_finish(wal);
fde93886
TU
58
59 snprintf(name, REF_NAME_MAX, "%s_CTX_REF", engine->name);
60
61 __intel_engine_init_ctx_wa(engine,
62 &lists->engine[id].ctx_wa_list,
63 name);
f663b0ca
DCS
64 }
65}
66
67static void
68reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
69{
70 struct intel_engine_cs *engine;
71 enum intel_engine_id id;
72
73 for_each_engine(engine, i915, id)
74 intel_wa_list_free(&lists->engine[id].wa_list);
75
76 intel_wa_list_free(&lists->gt_wa_list);
77}
78
f4ecfbfc
CW
79static struct drm_i915_gem_object *
80read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
81{
c9d08cc3 82 const u32 base = engine->mmio_base;
f4ecfbfc
CW
83 struct drm_i915_gem_object *result;
84 struct i915_request *rq;
85 struct i915_vma *vma;
f4ecfbfc
CW
86 u32 srm, *cs;
87 int err;
88 int i;
89
90 result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
91 if (IS_ERR(result))
92 return result;
93
34ae8455 94 i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
f4ecfbfc
CW
95
96 cs = i915_gem_object_pin_map(result, I915_MAP_WB);
97 if (IS_ERR(cs)) {
98 err = PTR_ERR(cs);
99 goto err_obj;
100 }
101 memset(cs, 0xc5, PAGE_SIZE);
a679f58d 102 i915_gem_object_flush_map(result);
f4ecfbfc
CW
103 i915_gem_object_unpin_map(result);
104
82ad6443 105 vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
f4ecfbfc
CW
106 if (IS_ERR(vma)) {
107 err = PTR_ERR(vma);
108 goto err_obj;
109 }
110
111 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
112 if (err)
113 goto err_obj;
114
46472b3e 115 rq = igt_request_alloc(ctx, engine);
f4ecfbfc
CW
116 if (IS_ERR(rq)) {
117 err = PTR_ERR(rq);
118 goto err_pin;
119 }
120
6951e589 121 i915_vma_lock(vma);
a5236978 122 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
6951e589 123 i915_vma_unlock(vma);
a5236978
CW
124 if (err)
125 goto err_req;
126
f4ecfbfc
CW
127 srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
128 if (INTEL_GEN(ctx->i915) >= 8)
129 srm++;
130
131 cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
94f8dfc6
OM
132 if (IS_ERR(cs)) {
133 err = PTR_ERR(cs);
134 goto err_req;
135 }
136
f4ecfbfc
CW
137 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
138 *cs++ = srm;
139 *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
140 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
141 *cs++ = 0;
142 }
143 intel_ring_advance(rq, cs);
144
697b9a87 145 i915_request_add(rq);
f4ecfbfc
CW
146 i915_vma_unpin(vma);
147
148 return result;
149
94f8dfc6
OM
150err_req:
151 i915_request_add(rq);
f4ecfbfc
CW
152err_pin:
153 i915_vma_unpin(vma);
154err_obj:
155 i915_gem_object_put(result);
156 return ERR_PTR(err);
157}
158
69bcdecf
TU
159static u32
160get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
f4ecfbfc 161{
69bcdecf
TU
162 i915_reg_t reg = i < engine->whitelist.count ?
163 engine->whitelist.list[i].reg :
164 RING_NOPID(engine->mmio_base);
165
166 return i915_mmio_reg_offset(reg);
f4ecfbfc
CW
167}
168
69bcdecf
TU
169static void
170print_results(const struct intel_engine_cs *engine, const u32 *results)
f4ecfbfc
CW
171{
172 unsigned int i;
173
174 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
69bcdecf 175 u32 expected = get_whitelist_reg(engine, i);
f4ecfbfc
CW
176 u32 actual = results[i];
177
178 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
179 i, expected, actual);
180 }
181}
182
69bcdecf 183static int check_whitelist(struct i915_gem_context *ctx,
f4ecfbfc
CW
184 struct intel_engine_cs *engine)
185{
186 struct drm_i915_gem_object *results;
cb4dc8da 187 struct igt_wedge_me wedge;
f4ecfbfc
CW
188 u32 *vaddr;
189 int err;
190 int i;
191
192 results = read_nonprivs(ctx, engine);
193 if (IS_ERR(results))
194 return PTR_ERR(results);
195
cb4dc8da 196 err = 0;
6951e589 197 i915_gem_object_lock(results);
cb4dc8da
CW
198 igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
199 err = i915_gem_object_set_to_cpu_domain(results, false);
6951e589 200 i915_gem_object_unlock(results);
c41166f9 201 if (i915_terminally_wedged(ctx->i915))
cb4dc8da 202 err = -EIO;
f4ecfbfc
CW
203 if (err)
204 goto out_put;
205
206 vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
207 if (IS_ERR(vaddr)) {
208 err = PTR_ERR(vaddr);
209 goto out_put;
210 }
211
212 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
69bcdecf 213 u32 expected = get_whitelist_reg(engine, i);
f4ecfbfc
CW
214 u32 actual = vaddr[i];
215
216 if (expected != actual) {
69bcdecf 217 print_results(engine, vaddr);
f4ecfbfc
CW
218 pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
219 i, expected, actual);
220
221 err = -EINVAL;
222 break;
223 }
224 }
225
226 i915_gem_object_unpin_map(results);
227out_put:
228 i915_gem_object_put(results);
229 return err;
230}
231
232static int do_device_reset(struct intel_engine_cs *engine)
233{
8a68d464 234 i915_reset(engine->i915, engine->mask, "live_workarounds");
f4ecfbfc
CW
235 return 0;
236}
237
238static int do_engine_reset(struct intel_engine_cs *engine)
239{
b9f78d67 240 return i915_reset_engine(engine, "live_workarounds");
f4ecfbfc
CW
241}
242
b9f78d67
TU
243static int
244switch_to_scratch_context(struct intel_engine_cs *engine,
245 struct igt_spinner *spin)
f4ecfbfc
CW
246{
247 struct i915_gem_context *ctx;
248 struct i915_request *rq;
c9d08cc3 249 intel_wakeref_t wakeref;
b9f78d67 250 int err = 0;
f4ecfbfc
CW
251
252 ctx = kernel_context(engine->i915);
253 if (IS_ERR(ctx))
254 return PTR_ERR(ctx);
255
e4106dae
CW
256 GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
257
d4225a53 258 rq = ERR_PTR(-ENODEV);
c447ff7d 259 with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
c836eb79 260 rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
b8bdd9cc 261
f4ecfbfc 262 kernel_context_close(ctx);
b9f78d67
TU
263
264 if (IS_ERR(rq)) {
265 spin = NULL;
266 err = PTR_ERR(rq);
267 goto err;
268 }
f4ecfbfc
CW
269
270 i915_request_add(rq);
271
b9f78d67
TU
272 if (spin && !igt_wait_for_spinner(spin, rq)) {
273 pr_err("Spinner failed to start\n");
274 err = -ETIMEDOUT;
275 }
276
277err:
278 if (err && spin)
279 igt_spinner_end(spin);
280
281 return err;
f4ecfbfc
CW
282}
283
284static int check_whitelist_across_reset(struct intel_engine_cs *engine,
285 int (*reset)(struct intel_engine_cs *),
f4ecfbfc
CW
286 const char *name)
287{
b9f78d67 288 struct drm_i915_private *i915 = engine->i915;
f4ecfbfc 289 struct i915_gem_context *ctx;
b9f78d67 290 struct igt_spinner spin;
c9d08cc3 291 intel_wakeref_t wakeref;
f4ecfbfc
CW
292 int err;
293
b9f78d67 294 pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
69bcdecf 295 engine->whitelist.count, name);
b9f78d67 296
c836eb79
CW
297 err = igt_spinner_init(&spin, i915);
298 if (err)
299 return err;
b9f78d67
TU
300
301 ctx = kernel_context(i915);
f4ecfbfc
CW
302 if (IS_ERR(ctx))
303 return PTR_ERR(ctx);
304
69bcdecf 305 err = check_whitelist(ctx, engine);
f4ecfbfc
CW
306 if (err) {
307 pr_err("Invalid whitelist *before* %s reset!\n", name);
308 goto out;
309 }
310
c836eb79 311 err = switch_to_scratch_context(engine, &spin);
f4ecfbfc
CW
312 if (err)
313 goto out;
314
c447ff7d 315 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
d4225a53 316 err = reset(engine);
b9f78d67 317
c836eb79
CW
318 igt_spinner_end(&spin);
319 igt_spinner_fini(&spin);
b9f78d67 320
f4ecfbfc
CW
321 if (err) {
322 pr_err("%s reset failed\n", name);
323 goto out;
324 }
325
69bcdecf 326 err = check_whitelist(ctx, engine);
f4ecfbfc
CW
327 if (err) {
328 pr_err("Whitelist not preserved in context across %s reset!\n",
329 name);
330 goto out;
331 }
332
333 kernel_context_close(ctx);
334
b9f78d67 335 ctx = kernel_context(i915);
f4ecfbfc
CW
336 if (IS_ERR(ctx))
337 return PTR_ERR(ctx);
338
69bcdecf 339 err = check_whitelist(ctx, engine);
f4ecfbfc
CW
340 if (err) {
341 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
342 name);
343 goto out;
344 }
345
346out:
347 kernel_context_close(ctx);
348 return err;
349}
350
34ae8455
CW
351static struct i915_vma *create_batch(struct i915_gem_context *ctx)
352{
353 struct drm_i915_gem_object *obj;
354 struct i915_vma *vma;
355 int err;
356
357 obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
358 if (IS_ERR(obj))
359 return ERR_CAST(obj);
360
e568ac38 361 vma = i915_vma_instance(obj, ctx->vm, NULL);
34ae8455
CW
362 if (IS_ERR(vma)) {
363 err = PTR_ERR(vma);
364 goto err_obj;
365 }
366
367 err = i915_vma_pin(vma, 0, 0, PIN_USER);
368 if (err)
369 goto err_obj;
370
34ae8455
CW
371 return vma;
372
373err_obj:
374 i915_gem_object_put(obj);
375 return ERR_PTR(err);
376}
377
378static u32 reg_write(u32 old, u32 new, u32 rsvd)
379{
380 if (rsvd == 0x0000ffff) {
381 old &= ~(new >> 16);
382 old |= new & (new >> 16);
383 } else {
384 old &= ~rsvd;
385 old |= new & rsvd;
386 }
387
388 return old;
389}
390
391static bool wo_register(struct intel_engine_cs *engine, u32 reg)
392{
393 enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
394 int i;
395
396 for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
397 if (wo_registers[i].platform == platform &&
398 wo_registers[i].reg == reg)
399 return true;
400 }
401
402 return false;
403}
404
767662bc
RF
405static bool ro_register(u32 reg)
406{
407 if (reg & RING_FORCE_TO_NONPRIV_RD)
408 return true;
409
410 return false;
411}
412
413static int whitelist_writable_count(struct intel_engine_cs *engine)
414{
415 int count = engine->whitelist.count;
416 int i;
417
418 for (i = 0; i < engine->whitelist.count; i++) {
419 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
420
421 if (ro_register(reg))
422 count--;
423 }
424
425 return count;
426}
427
34ae8455
CW
428static int check_dirty_whitelist(struct i915_gem_context *ctx,
429 struct intel_engine_cs *engine)
430{
431 const u32 values[] = {
432 0x00000000,
433 0x01010101,
434 0x10100101,
435 0x03030303,
436 0x30300303,
437 0x05050505,
438 0x50500505,
439 0x0f0f0f0f,
440 0xf00ff00f,
441 0x10101010,
442 0xf0f01010,
443 0x30303030,
444 0xa0a03030,
445 0x50505050,
446 0xc0c05050,
447 0xf0f0f0f0,
448 0x11111111,
449 0x33333333,
450 0x55555555,
451 0x0000ffff,
452 0x00ff00ff,
453 0xff0000ff,
454 0xffff00ff,
455 0xffffffff,
456 };
457 struct i915_vma *scratch;
458 struct i915_vma *batch;
459 int err = 0, i, v;
460 u32 *cs, *results;
461
e568ac38 462 scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1);
34ae8455
CW
463 if (IS_ERR(scratch))
464 return PTR_ERR(scratch);
465
466 batch = create_batch(ctx);
467 if (IS_ERR(batch)) {
468 err = PTR_ERR(batch);
469 goto out_scratch;
470 }
471
472 for (i = 0; i < engine->whitelist.count; i++) {
473 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
474 u64 addr = scratch->node.start;
475 struct i915_request *rq;
476 u32 srm, lrm, rsvd;
477 u32 expect;
478 int idx;
479
480 if (wo_register(engine, reg))
481 continue;
482
767662bc
RF
483 if (ro_register(reg))
484 continue;
485
34ae8455
CW
486 srm = MI_STORE_REGISTER_MEM;
487 lrm = MI_LOAD_REGISTER_MEM;
488 if (INTEL_GEN(ctx->i915) >= 8)
489 lrm++, srm++;
490
491 pr_debug("%s: Writing garbage to %x\n",
492 engine->name, reg);
493
494 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
495 if (IS_ERR(cs)) {
496 err = PTR_ERR(cs);
497 goto out_batch;
498 }
499
500 /* SRM original */
501 *cs++ = srm;
502 *cs++ = reg;
503 *cs++ = lower_32_bits(addr);
504 *cs++ = upper_32_bits(addr);
505
506 idx = 1;
507 for (v = 0; v < ARRAY_SIZE(values); v++) {
508 /* LRI garbage */
509 *cs++ = MI_LOAD_REGISTER_IMM(1);
510 *cs++ = reg;
511 *cs++ = values[v];
512
513 /* SRM result */
514 *cs++ = srm;
515 *cs++ = reg;
516 *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
517 *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
518 idx++;
519 }
520 for (v = 0; v < ARRAY_SIZE(values); v++) {
521 /* LRI garbage */
522 *cs++ = MI_LOAD_REGISTER_IMM(1);
523 *cs++ = reg;
524 *cs++ = ~values[v];
525
526 /* SRM result */
527 *cs++ = srm;
528 *cs++ = reg;
529 *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
530 *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
531 idx++;
532 }
533 GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
534
535 /* LRM original -- don't leave garbage in the context! */
536 *cs++ = lrm;
537 *cs++ = reg;
538 *cs++ = lower_32_bits(addr);
539 *cs++ = upper_32_bits(addr);
540
541 *cs++ = MI_BATCH_BUFFER_END;
542
a679f58d 543 i915_gem_object_flush_map(batch->obj);
34ae8455
CW
544 i915_gem_object_unpin_map(batch->obj);
545 i915_gem_chipset_flush(ctx->i915);
546
46472b3e 547 rq = igt_request_alloc(ctx, engine);
34ae8455
CW
548 if (IS_ERR(rq)) {
549 err = PTR_ERR(rq);
550 goto out_batch;
551 }
552
553 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
554 err = engine->emit_init_breadcrumb(rq);
555 if (err)
556 goto err_request;
557 }
558
559 err = engine->emit_bb_start(rq,
560 batch->node.start, PAGE_SIZE,
561 0);
562 if (err)
563 goto err_request;
564
565err_request:
566 i915_request_add(rq);
567 if (err)
568 goto out_batch;
569
2f530945 570 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
34ae8455
CW
571 pr_err("%s: Futzing %x timedout; cancelling test\n",
572 engine->name, reg);
573 i915_gem_set_wedged(ctx->i915);
574 err = -EIO;
575 goto out_batch;
576 }
577
578 results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
579 if (IS_ERR(results)) {
580 err = PTR_ERR(results);
581 goto out_batch;
582 }
583
584 GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
585 rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */
586 if (!rsvd) {
587 pr_err("%s: Unable to write to whitelisted register %x\n",
588 engine->name, reg);
589 err = -EINVAL;
590 goto out_unpin;
591 }
592
593 expect = results[0];
594 idx = 1;
595 for (v = 0; v < ARRAY_SIZE(values); v++) {
596 expect = reg_write(expect, values[v], rsvd);
597 if (results[idx] != expect)
598 err++;
599 idx++;
600 }
601 for (v = 0; v < ARRAY_SIZE(values); v++) {
602 expect = reg_write(expect, ~values[v], rsvd);
603 if (results[idx] != expect)
604 err++;
605 idx++;
606 }
607 if (err) {
608 pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
609 engine->name, err, reg);
610
611 pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
612 engine->name, reg, results[0], rsvd);
613
614 expect = results[0];
615 idx = 1;
616 for (v = 0; v < ARRAY_SIZE(values); v++) {
617 u32 w = values[v];
618
619 expect = reg_write(expect, w, rsvd);
620 pr_info("Wrote %08x, read %08x, expect %08x\n",
621 w, results[idx], expect);
622 idx++;
623 }
624 for (v = 0; v < ARRAY_SIZE(values); v++) {
625 u32 w = ~values[v];
626
627 expect = reg_write(expect, w, rsvd);
628 pr_info("Wrote %08x, read %08x, expect %08x\n",
629 w, results[idx], expect);
630 idx++;
631 }
632
633 err = -EINVAL;
634 }
635out_unpin:
636 i915_gem_object_unpin_map(scratch->obj);
637 if (err)
638 break;
639 }
640
641 if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
642 err = -EIO;
643out_batch:
644 i915_vma_unpin_and_release(&batch, 0);
645out_scratch:
646 i915_vma_unpin_and_release(&scratch, 0);
647 return err;
648}
649
650static int live_dirty_whitelist(void *arg)
651{
652 struct drm_i915_private *i915 = arg;
653 struct intel_engine_cs *engine;
654 struct i915_gem_context *ctx;
655 enum intel_engine_id id;
656 intel_wakeref_t wakeref;
657 struct drm_file *file;
658 int err = 0;
659
660 /* Can the user write to the whitelisted registers? */
661
662 if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
663 return 0;
664
d858d569 665 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
34ae8455
CW
666
667 mutex_unlock(&i915->drm.struct_mutex);
668 file = mock_file(i915);
669 mutex_lock(&i915->drm.struct_mutex);
670 if (IS_ERR(file)) {
671 err = PTR_ERR(file);
672 goto out_rpm;
673 }
674
675 ctx = live_context(i915, file);
676 if (IS_ERR(ctx)) {
677 err = PTR_ERR(ctx);
678 goto out_file;
679 }
680
681 for_each_engine(engine, i915, id) {
682 if (engine->whitelist.count == 0)
683 continue;
684
685 err = check_dirty_whitelist(ctx, engine);
686 if (err)
687 goto out_file;
688 }
689
690out_file:
691 mutex_unlock(&i915->drm.struct_mutex);
692 mock_file_free(i915, file);
693 mutex_lock(&i915->drm.struct_mutex);
694out_rpm:
d858d569 695 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
34ae8455
CW
696 return err;
697}
698
f4ecfbfc
CW
699static int live_reset_whitelist(void *arg)
700{
701 struct drm_i915_private *i915 = arg;
8a68d464 702 struct intel_engine_cs *engine = i915->engine[RCS0];
a3997159 703 int err = 0;
f4ecfbfc
CW
704
705 /* If we reset the gpu, we should not lose the RING_NONPRIV */
706
69bcdecf 707 if (!engine || engine->whitelist.count == 0)
f4ecfbfc
CW
708 return 0;
709
28d6ccce 710 igt_global_reset_lock(i915);
f4ecfbfc
CW
711
712 if (intel_has_reset_engine(i915)) {
713 err = check_whitelist_across_reset(engine,
69bcdecf 714 do_engine_reset,
f4ecfbfc
CW
715 "engine");
716 if (err)
717 goto out;
718 }
719
720 if (intel_has_gpu_reset(i915)) {
721 err = check_whitelist_across_reset(engine,
69bcdecf 722 do_device_reset,
f4ecfbfc
CW
723 "device");
724 if (err)
725 goto out;
726 }
727
728out:
28d6ccce 729 igt_global_reset_unlock(i915);
f4ecfbfc
CW
730 return err;
731}
732
86554f48
CW
733static int read_whitelisted_registers(struct i915_gem_context *ctx,
734 struct intel_engine_cs *engine,
735 struct i915_vma *results)
736{
86554f48
CW
737 struct i915_request *rq;
738 int i, err = 0;
739 u32 srm, *cs;
740
46472b3e 741 rq = igt_request_alloc(ctx, engine);
86554f48
CW
742 if (IS_ERR(rq))
743 return PTR_ERR(rq);
744
745 srm = MI_STORE_REGISTER_MEM;
746 if (INTEL_GEN(ctx->i915) >= 8)
747 srm++;
748
749 cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
750 if (IS_ERR(cs)) {
751 err = PTR_ERR(cs);
752 goto err_req;
753 }
754
755 for (i = 0; i < engine->whitelist.count; i++) {
756 u64 offset = results->node.start + sizeof(u32) * i;
767662bc
RF
757 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
758
759 /* Clear RD only and WR only flags */
760 reg &= ~(RING_FORCE_TO_NONPRIV_RD | RING_FORCE_TO_NONPRIV_WR);
86554f48
CW
761
762 *cs++ = srm;
767662bc 763 *cs++ = reg;
86554f48
CW
764 *cs++ = lower_32_bits(offset);
765 *cs++ = upper_32_bits(offset);
766 }
767 intel_ring_advance(rq, cs);
768
769err_req:
770 i915_request_add(rq);
771
2f530945 772 if (i915_request_wait(rq, 0, HZ / 5) < 0)
86554f48
CW
773 err = -EIO;
774
775 return err;
776}
777
778static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
779 struct intel_engine_cs *engine)
780{
86554f48
CW
781 struct i915_request *rq;
782 struct i915_vma *batch;
783 int i, err = 0;
784 u32 *cs;
785
786 batch = create_batch(ctx);
787 if (IS_ERR(batch))
788 return PTR_ERR(batch);
789
790 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
791 if (IS_ERR(cs)) {
792 err = PTR_ERR(cs);
793 goto err_batch;
794 }
795
767662bc 796 *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
86554f48 797 for (i = 0; i < engine->whitelist.count; i++) {
767662bc
RF
798 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
799
800 if (ro_register(reg))
801 continue;
802
803 *cs++ = reg;
86554f48
CW
804 *cs++ = 0xffffffff;
805 }
806 *cs++ = MI_BATCH_BUFFER_END;
807
808 i915_gem_object_flush_map(batch->obj);
809 i915_gem_chipset_flush(ctx->i915);
810
46472b3e 811 rq = igt_request_alloc(ctx, engine);
86554f48
CW
812 if (IS_ERR(rq)) {
813 err = PTR_ERR(rq);
814 goto err_unpin;
815 }
816
817 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
818 err = engine->emit_init_breadcrumb(rq);
819 if (err)
820 goto err_request;
821 }
822
823 /* Perform the writes from an unprivileged "user" batch */
824 err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
825
826err_request:
827 i915_request_add(rq);
2f530945 828 if (i915_request_wait(rq, 0, HZ / 5) < 0)
86554f48
CW
829 err = -EIO;
830
831err_unpin:
832 i915_gem_object_unpin_map(batch->obj);
833err_batch:
834 i915_vma_unpin_and_release(&batch, 0);
835 return err;
836}
837
838struct regmask {
839 i915_reg_t reg;
840 unsigned long gen_mask;
841};
842
843static bool find_reg(struct drm_i915_private *i915,
844 i915_reg_t reg,
845 const struct regmask *tbl,
846 unsigned long count)
847{
848 u32 offset = i915_mmio_reg_offset(reg);
849
850 while (count--) {
851 if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
852 i915_mmio_reg_offset(tbl->reg) == offset)
853 return true;
854 tbl++;
855 }
856
857 return false;
858}
859
860static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
861{
862 /* Alas, we must pardon some whitelists. Mistakes already made */
863 static const struct regmask pardon[] = {
864 { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
865 { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
866 };
867
868 return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
869}
870
871static bool result_eq(struct intel_engine_cs *engine,
872 u32 a, u32 b, i915_reg_t reg)
873{
874 if (a != b && !pardon_reg(engine->i915, reg)) {
875 pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
876 i915_mmio_reg_offset(reg), a, b);
877 return false;
878 }
879
880 return true;
881}
882
883static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
884{
885 /* Some registers do not seem to behave and our writes unreadable */
886 static const struct regmask wo[] = {
887 { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
888 };
889
890 return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
891}
892
893static bool result_neq(struct intel_engine_cs *engine,
894 u32 a, u32 b, i915_reg_t reg)
895{
896 if (a == b && !writeonly_reg(engine->i915, reg)) {
897 pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
898 i915_mmio_reg_offset(reg), a);
899 return false;
900 }
901
902 return true;
903}
904
905static int
906check_whitelisted_registers(struct intel_engine_cs *engine,
907 struct i915_vma *A,
908 struct i915_vma *B,
909 bool (*fn)(struct intel_engine_cs *engine,
910 u32 a, u32 b,
911 i915_reg_t reg))
912{
913 u32 *a, *b;
914 int i, err;
915
916 a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
917 if (IS_ERR(a))
918 return PTR_ERR(a);
919
920 b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
921 if (IS_ERR(b)) {
922 err = PTR_ERR(b);
923 goto err_a;
924 }
925
926 err = 0;
927 for (i = 0; i < engine->whitelist.count; i++) {
c270cac4
LL
928 const struct i915_wa *wa = &engine->whitelist.list[i];
929
930 if (i915_mmio_reg_offset(wa->reg) & RING_FORCE_TO_NONPRIV_RD)
931 continue;
932
933 if (!fn(engine, a[i], b[i], wa->reg))
86554f48
CW
934 err = -EINVAL;
935 }
936
937 i915_gem_object_unpin_map(B->obj);
938err_a:
939 i915_gem_object_unpin_map(A->obj);
940 return err;
941}
942
943static int live_isolated_whitelist(void *arg)
944{
945 struct drm_i915_private *i915 = arg;
946 struct {
947 struct i915_gem_context *ctx;
948 struct i915_vma *scratch[2];
949 } client[2] = {};
950 struct intel_engine_cs *engine;
951 enum intel_engine_id id;
952 int i, err = 0;
953
954 /*
955 * Check that a write into a whitelist register works, but
956 * invisible to a second context.
957 */
958
959 if (!intel_engines_has_context_isolation(i915))
960 return 0;
961
e568ac38 962 if (!i915->kernel_context->vm)
86554f48
CW
963 return 0;
964
965 for (i = 0; i < ARRAY_SIZE(client); i++) {
966 struct i915_gem_context *c;
967
968 c = kernel_context(i915);
969 if (IS_ERR(c)) {
970 err = PTR_ERR(c);
971 goto err;
972 }
973
e568ac38 974 client[i].scratch[0] = create_scratch(c->vm, 1024);
86554f48
CW
975 if (IS_ERR(client[i].scratch[0])) {
976 err = PTR_ERR(client[i].scratch[0]);
977 kernel_context_close(c);
978 goto err;
979 }
980
e568ac38 981 client[i].scratch[1] = create_scratch(c->vm, 1024);
86554f48
CW
982 if (IS_ERR(client[i].scratch[1])) {
983 err = PTR_ERR(client[i].scratch[1]);
984 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
985 kernel_context_close(c);
986 goto err;
987 }
988
989 client[i].ctx = c;
990 }
991
992 for_each_engine(engine, i915, id) {
767662bc 993 if (!whitelist_writable_count(engine))
86554f48
CW
994 continue;
995
996 /* Read default values */
997 err = read_whitelisted_registers(client[0].ctx, engine,
998 client[0].scratch[0]);
999 if (err)
1000 goto err;
1001
1002 /* Try to overwrite registers (should only affect ctx0) */
1003 err = scrub_whitelisted_registers(client[0].ctx, engine);
1004 if (err)
1005 goto err;
1006
1007 /* Read values from ctx1, we expect these to be defaults */
1008 err = read_whitelisted_registers(client[1].ctx, engine,
1009 client[1].scratch[0]);
1010 if (err)
1011 goto err;
1012
1013 /* Verify that both reads return the same default values */
1014 err = check_whitelisted_registers(engine,
1015 client[0].scratch[0],
1016 client[1].scratch[0],
1017 result_eq);
1018 if (err)
1019 goto err;
1020
1021 /* Read back the updated values in ctx0 */
1022 err = read_whitelisted_registers(client[0].ctx, engine,
1023 client[0].scratch[1]);
1024 if (err)
1025 goto err;
1026
1027 /* User should be granted privilege to overwhite regs */
1028 err = check_whitelisted_registers(engine,
1029 client[0].scratch[0],
1030 client[0].scratch[1],
1031 result_neq);
1032 if (err)
1033 goto err;
1034 }
1035
1036err:
1037 for (i = 0; i < ARRAY_SIZE(client); i++) {
1038 if (!client[i].ctx)
1039 break;
1040
1041 i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1042 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1043 kernel_context_close(client[i].ctx);
1044 }
1045
1046 if (igt_flush_test(i915, I915_WAIT_LOCKED))
1047 err = -EIO;
1048
1049 return err;
1050}
1051
fde93886
TU
1052static bool
1053verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
1054 const char *str)
28d6ccce 1055{
fde93886
TU
1056 struct drm_i915_private *i915 = ctx->i915;
1057 struct i915_gem_engines_iter it;
1058 struct intel_context *ce;
28d6ccce
TU
1059 bool ok = true;
1060
5d75dc2b 1061 ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
f663b0ca 1062
fde93886
TU
1063 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1064 enum intel_engine_id id = ce->engine->id;
1065
1066 ok &= engine_wa_list_verify(ce,
254e1186
CW
1067 &lists->engine[id].wa_list,
1068 str) == 0;
fde93886
TU
1069
1070 ok &= engine_wa_list_verify(ce,
1071 &lists->engine[id].ctx_wa_list,
1072 str) == 0;
254e1186 1073 }
fde93886 1074 i915_gem_context_unlock_engines(ctx);
28d6ccce
TU
1075
1076 return ok;
1077}
1078
1079static int
fde93886 1080live_gpu_reset_workarounds(void *arg)
28d6ccce
TU
1081{
1082 struct drm_i915_private *i915 = arg;
fde93886 1083 struct i915_gem_context *ctx;
c9d08cc3 1084 intel_wakeref_t wakeref;
f663b0ca 1085 struct wa_lists lists;
28d6ccce
TU
1086 bool ok;
1087
1088 if (!intel_has_gpu_reset(i915))
1089 return 0;
1090
fde93886
TU
1091 ctx = kernel_context(i915);
1092 if (IS_ERR(ctx))
1093 return PTR_ERR(ctx);
1094
28d6ccce
TU
1095 pr_info("Verifying after GPU reset...\n");
1096
1097 igt_global_reset_lock(i915);
d858d569 1098 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
c9d08cc3 1099
f663b0ca 1100 reference_lists_init(i915, &lists);
28d6ccce 1101
fde93886 1102 ok = verify_wa_lists(ctx, &lists, "before reset");
28d6ccce
TU
1103 if (!ok)
1104 goto out;
1105
28d6ccce 1106 i915_reset(i915, ALL_ENGINES, "live_workarounds");
28d6ccce 1107
fde93886 1108 ok = verify_wa_lists(ctx, &lists, "after reset");
28d6ccce
TU
1109
1110out:
fde93886 1111 kernel_context_close(ctx);
f663b0ca 1112 reference_lists_fini(i915, &lists);
d858d569 1113 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
28d6ccce
TU
1114 igt_global_reset_unlock(i915);
1115
1116 return ok ? 0 : -ESRCH;
1117}
1118
1119static int
fde93886 1120live_engine_reset_workarounds(void *arg)
28d6ccce
TU
1121{
1122 struct drm_i915_private *i915 = arg;
1123 struct intel_engine_cs *engine;
1124 struct i915_gem_context *ctx;
1125 struct igt_spinner spin;
1126 enum intel_engine_id id;
1127 struct i915_request *rq;
c9d08cc3 1128 intel_wakeref_t wakeref;
f663b0ca 1129 struct wa_lists lists;
28d6ccce
TU
1130 int ret = 0;
1131
1132 if (!intel_has_reset_engine(i915))
1133 return 0;
1134
1135 ctx = kernel_context(i915);
1136 if (IS_ERR(ctx))
1137 return PTR_ERR(ctx);
1138
1139 igt_global_reset_lock(i915);
d858d569 1140 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
c9d08cc3 1141
f663b0ca 1142 reference_lists_init(i915, &lists);
28d6ccce
TU
1143
1144 for_each_engine(engine, i915, id) {
1145 bool ok;
1146
1147 pr_info("Verifying after %s reset...\n", engine->name);
1148
fde93886 1149 ok = verify_wa_lists(ctx, &lists, "before reset");
28d6ccce
TU
1150 if (!ok) {
1151 ret = -ESRCH;
1152 goto err;
1153 }
1154
28d6ccce 1155 i915_reset_engine(engine, "live_workarounds");
28d6ccce 1156
fde93886 1157 ok = verify_wa_lists(ctx, &lists, "after idle reset");
28d6ccce
TU
1158 if (!ok) {
1159 ret = -ESRCH;
1160 goto err;
1161 }
1162
1163 ret = igt_spinner_init(&spin, i915);
1164 if (ret)
1165 goto err;
1166
28d6ccce
TU
1167 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
1168 if (IS_ERR(rq)) {
1169 ret = PTR_ERR(rq);
1170 igt_spinner_fini(&spin);
28d6ccce
TU
1171 goto err;
1172 }
1173
1174 i915_request_add(rq);
1175
1176 if (!igt_wait_for_spinner(&spin, rq)) {
1177 pr_err("Spinner failed to start\n");
1178 igt_spinner_fini(&spin);
28d6ccce
TU
1179 ret = -ETIMEDOUT;
1180 goto err;
1181 }
1182
1183 i915_reset_engine(engine, "live_workarounds");
1184
28d6ccce
TU
1185 igt_spinner_end(&spin);
1186 igt_spinner_fini(&spin);
1187
fde93886 1188 ok = verify_wa_lists(ctx, &lists, "after busy reset");
28d6ccce
TU
1189 if (!ok) {
1190 ret = -ESRCH;
1191 goto err;
1192 }
1193 }
1194
1195err:
f663b0ca 1196 reference_lists_fini(i915, &lists);
d858d569 1197 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
28d6ccce
TU
1198 igt_global_reset_unlock(i915);
1199 kernel_context_close(ctx);
1200
1201 igt_flush_test(i915, I915_WAIT_LOCKED);
1202
1203 return ret;
1204}
1205
f4ecfbfc
CW
1206int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1207{
1208 static const struct i915_subtest tests[] = {
34ae8455 1209 SUBTEST(live_dirty_whitelist),
f4ecfbfc 1210 SUBTEST(live_reset_whitelist),
86554f48 1211 SUBTEST(live_isolated_whitelist),
fde93886
TU
1212 SUBTEST(live_gpu_reset_workarounds),
1213 SUBTEST(live_engine_reset_workarounds),
f4ecfbfc
CW
1214 };
1215 int err;
1216
c41166f9 1217 if (i915_terminally_wedged(i915))
47e61a79
CW
1218 return 0;
1219
f4ecfbfc
CW
1220 mutex_lock(&i915->drm.struct_mutex);
1221 err = i915_subtests(tests, i915);
1222 mutex_unlock(&i915->drm.struct_mutex);
1223
1224 return err;
1225}