drm/i915: Pass intel_gt to has-reset?
[linux-2.6-block.git] / drivers / gpu / drm / i915 / gt / selftest_workarounds.c
CommitLineData
f4ecfbfc
CW
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
10be98a7 7#include "gem/i915_gem_pm.h"
750e76b4 8#include "gt/intel_engine_user.h"
baea429d 9#include "gt/intel_gt.h"
112ed2d3
CW
10#include "i915_selftest.h"
11#include "intel_reset.h"
f4ecfbfc 12
112ed2d3
CW
13#include "selftests/igt_flush_test.h"
14#include "selftests/igt_reset.h"
15#include "selftests/igt_spinner.h"
112ed2d3 16#include "selftests/mock_drm.h"
34ae8455 17
10be98a7
CW
18#include "gem/selftests/igt_gem_utils.h"
19#include "gem/selftests/mock_context.h"
20
34ae8455
CW
21static const struct wo_register {
22 enum intel_platform platform;
23 u32 reg;
24} wo_registers[] = {
25 { INTEL_GEMINILAKE, 0x731c }
26};
f4ecfbfc 27
f663b0ca
DCS
28struct wa_lists {
29 struct i915_wa_list gt_wa_list;
30 struct {
f663b0ca 31 struct i915_wa_list wa_list;
fde93886 32 struct i915_wa_list ctx_wa_list;
f663b0ca
DCS
33 } engine[I915_NUM_ENGINES];
34};
35
36static void
37reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
38{
39 struct intel_engine_cs *engine;
40 enum intel_engine_id id;
41
42 memset(lists, 0, sizeof(*lists));
43
3e1f0a51 44 wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
f663b0ca
DCS
45 gt_init_workarounds(i915, &lists->gt_wa_list);
46 wa_init_finish(&lists->gt_wa_list);
47
48 for_each_engine(engine, i915, id) {
49 struct i915_wa_list *wal = &lists->engine[id].wa_list;
f663b0ca 50
3e1f0a51 51 wa_init_start(wal, "REF", engine->name);
f663b0ca
DCS
52 engine_init_workarounds(engine, wal);
53 wa_init_finish(wal);
fde93886 54
fde93886
TU
55 __intel_engine_init_ctx_wa(engine,
56 &lists->engine[id].ctx_wa_list,
3e1f0a51 57 "CTX_REF");
f663b0ca
DCS
58 }
59}
60
61static void
62reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
63{
64 struct intel_engine_cs *engine;
65 enum intel_engine_id id;
66
67 for_each_engine(engine, i915, id)
68 intel_wa_list_free(&lists->engine[id].wa_list);
69
70 intel_wa_list_free(&lists->gt_wa_list);
71}
72
f4ecfbfc
CW
73static struct drm_i915_gem_object *
74read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
75{
c9d08cc3 76 const u32 base = engine->mmio_base;
f4ecfbfc
CW
77 struct drm_i915_gem_object *result;
78 struct i915_request *rq;
79 struct i915_vma *vma;
f4ecfbfc
CW
80 u32 srm, *cs;
81 int err;
82 int i;
83
84 result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
85 if (IS_ERR(result))
86 return result;
87
34ae8455 88 i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
f4ecfbfc
CW
89
90 cs = i915_gem_object_pin_map(result, I915_MAP_WB);
91 if (IS_ERR(cs)) {
92 err = PTR_ERR(cs);
93 goto err_obj;
94 }
95 memset(cs, 0xc5, PAGE_SIZE);
a679f58d 96 i915_gem_object_flush_map(result);
f4ecfbfc
CW
97 i915_gem_object_unpin_map(result);
98
ba4134a4 99 vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
f4ecfbfc
CW
100 if (IS_ERR(vma)) {
101 err = PTR_ERR(vma);
102 goto err_obj;
103 }
104
105 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
106 if (err)
107 goto err_obj;
108
46472b3e 109 rq = igt_request_alloc(ctx, engine);
f4ecfbfc
CW
110 if (IS_ERR(rq)) {
111 err = PTR_ERR(rq);
112 goto err_pin;
113 }
114
6951e589 115 i915_vma_lock(vma);
70d6894d
CW
116 err = i915_request_await_object(rq, vma->obj, true);
117 if (err == 0)
118 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
6951e589 119 i915_vma_unlock(vma);
a5236978
CW
120 if (err)
121 goto err_req;
122
f4ecfbfc
CW
123 srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
124 if (INTEL_GEN(ctx->i915) >= 8)
125 srm++;
126
127 cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
94f8dfc6
OM
128 if (IS_ERR(cs)) {
129 err = PTR_ERR(cs);
130 goto err_req;
131 }
132
f4ecfbfc
CW
133 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
134 *cs++ = srm;
135 *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
136 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
137 *cs++ = 0;
138 }
139 intel_ring_advance(rq, cs);
140
697b9a87 141 i915_request_add(rq);
f4ecfbfc
CW
142 i915_vma_unpin(vma);
143
144 return result;
145
94f8dfc6
OM
146err_req:
147 i915_request_add(rq);
f4ecfbfc
CW
148err_pin:
149 i915_vma_unpin(vma);
150err_obj:
151 i915_gem_object_put(result);
152 return ERR_PTR(err);
153}
154
69bcdecf
TU
155static u32
156get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
f4ecfbfc 157{
69bcdecf
TU
158 i915_reg_t reg = i < engine->whitelist.count ?
159 engine->whitelist.list[i].reg :
160 RING_NOPID(engine->mmio_base);
161
162 return i915_mmio_reg_offset(reg);
f4ecfbfc
CW
163}
164
69bcdecf
TU
165static void
166print_results(const struct intel_engine_cs *engine, const u32 *results)
f4ecfbfc
CW
167{
168 unsigned int i;
169
170 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
69bcdecf 171 u32 expected = get_whitelist_reg(engine, i);
f4ecfbfc
CW
172 u32 actual = results[i];
173
174 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
175 i, expected, actual);
176 }
177}
178
69bcdecf 179static int check_whitelist(struct i915_gem_context *ctx,
f4ecfbfc
CW
180 struct intel_engine_cs *engine)
181{
182 struct drm_i915_gem_object *results;
cb823ed9 183 struct intel_wedge_me wedge;
f4ecfbfc
CW
184 u32 *vaddr;
185 int err;
186 int i;
187
188 results = read_nonprivs(ctx, engine);
189 if (IS_ERR(results))
190 return PTR_ERR(results);
191
cb4dc8da 192 err = 0;
6951e589 193 i915_gem_object_lock(results);
cb823ed9 194 intel_wedge_on_timeout(&wedge, &ctx->i915->gt, HZ / 5) /* safety net! */
cb4dc8da 195 err = i915_gem_object_set_to_cpu_domain(results, false);
6951e589 196 i915_gem_object_unlock(results);
cb823ed9 197 if (intel_gt_is_wedged(&ctx->i915->gt))
cb4dc8da 198 err = -EIO;
f4ecfbfc
CW
199 if (err)
200 goto out_put;
201
202 vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
203 if (IS_ERR(vaddr)) {
204 err = PTR_ERR(vaddr);
205 goto out_put;
206 }
207
208 for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
69bcdecf 209 u32 expected = get_whitelist_reg(engine, i);
f4ecfbfc
CW
210 u32 actual = vaddr[i];
211
212 if (expected != actual) {
69bcdecf 213 print_results(engine, vaddr);
f4ecfbfc
CW
214 pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
215 i, expected, actual);
216
217 err = -EINVAL;
218 break;
219 }
220 }
221
222 i915_gem_object_unpin_map(results);
223out_put:
224 i915_gem_object_put(results);
225 return err;
226}
227
228static int do_device_reset(struct intel_engine_cs *engine)
229{
cb823ed9 230 intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
f4ecfbfc
CW
231 return 0;
232}
233
234static int do_engine_reset(struct intel_engine_cs *engine)
235{
cb823ed9 236 return intel_engine_reset(engine, "live_workarounds");
f4ecfbfc
CW
237}
238
b9f78d67
TU
239static int
240switch_to_scratch_context(struct intel_engine_cs *engine,
241 struct igt_spinner *spin)
f4ecfbfc
CW
242{
243 struct i915_gem_context *ctx;
f277bc0c 244 struct intel_context *ce;
f4ecfbfc 245 struct i915_request *rq;
c9d08cc3 246 intel_wakeref_t wakeref;
b9f78d67 247 int err = 0;
f4ecfbfc
CW
248
249 ctx = kernel_context(engine->i915);
250 if (IS_ERR(ctx))
251 return PTR_ERR(ctx);
252
e4106dae
CW
253 GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
254
f1c4d157 255 ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
f277bc0c
CW
256 GEM_BUG_ON(IS_ERR(ce));
257
d4225a53 258 rq = ERR_PTR(-ENODEV);
c447ff7d 259 with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
f277bc0c 260 rq = igt_spinner_create_request(spin, ce, MI_NOOP);
b8bdd9cc 261
f277bc0c 262 intel_context_put(ce);
f4ecfbfc 263 kernel_context_close(ctx);
b9f78d67
TU
264
265 if (IS_ERR(rq)) {
266 spin = NULL;
267 err = PTR_ERR(rq);
268 goto err;
269 }
f4ecfbfc
CW
270
271 i915_request_add(rq);
272
b9f78d67
TU
273 if (spin && !igt_wait_for_spinner(spin, rq)) {
274 pr_err("Spinner failed to start\n");
275 err = -ETIMEDOUT;
276 }
277
278err:
279 if (err && spin)
280 igt_spinner_end(spin);
281
282 return err;
f4ecfbfc
CW
283}
284
285static int check_whitelist_across_reset(struct intel_engine_cs *engine,
286 int (*reset)(struct intel_engine_cs *),
f4ecfbfc
CW
287 const char *name)
288{
b9f78d67 289 struct drm_i915_private *i915 = engine->i915;
cf3bd1a0 290 struct i915_gem_context *ctx, *tmp;
b9f78d67 291 struct igt_spinner spin;
c9d08cc3 292 intel_wakeref_t wakeref;
f4ecfbfc
CW
293 int err;
294
3e1f0a51
JH
295 pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
296 engine->whitelist.count, engine->name, name);
b9f78d67 297
b9f78d67 298 ctx = kernel_context(i915);
f4ecfbfc
CW
299 if (IS_ERR(ctx))
300 return PTR_ERR(ctx);
301
f277bc0c 302 err = igt_spinner_init(&spin, engine->gt);
cf3bd1a0
CW
303 if (err)
304 goto out_ctx;
305
69bcdecf 306 err = check_whitelist(ctx, engine);
f4ecfbfc
CW
307 if (err) {
308 pr_err("Invalid whitelist *before* %s reset!\n", name);
cf3bd1a0 309 goto out_spin;
f4ecfbfc
CW
310 }
311
c836eb79 312 err = switch_to_scratch_context(engine, &spin);
f4ecfbfc 313 if (err)
cf3bd1a0 314 goto out_spin;
f4ecfbfc 315
c447ff7d 316 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
d4225a53 317 err = reset(engine);
b9f78d67 318
c836eb79 319 igt_spinner_end(&spin);
b9f78d67 320
f4ecfbfc
CW
321 if (err) {
322 pr_err("%s reset failed\n", name);
cf3bd1a0 323 goto out_spin;
f4ecfbfc
CW
324 }
325
69bcdecf 326 err = check_whitelist(ctx, engine);
f4ecfbfc
CW
327 if (err) {
328 pr_err("Whitelist not preserved in context across %s reset!\n",
329 name);
cf3bd1a0 330 goto out_spin;
f4ecfbfc
CW
331 }
332
cf3bd1a0
CW
333 tmp = kernel_context(i915);
334 if (IS_ERR(tmp)) {
335 err = PTR_ERR(tmp);
336 goto out_spin;
337 }
f4ecfbfc 338 kernel_context_close(ctx);
cf3bd1a0 339 ctx = tmp;
f4ecfbfc 340
69bcdecf 341 err = check_whitelist(ctx, engine);
f4ecfbfc
CW
342 if (err) {
343 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
344 name);
cf3bd1a0 345 goto out_spin;
f4ecfbfc
CW
346 }
347
cf3bd1a0
CW
348out_spin:
349 igt_spinner_fini(&spin);
350out_ctx:
f4ecfbfc
CW
351 kernel_context_close(ctx);
352 return err;
353}
354
34ae8455
CW
355static struct i915_vma *create_batch(struct i915_gem_context *ctx)
356{
357 struct drm_i915_gem_object *obj;
358 struct i915_vma *vma;
359 int err;
360
361 obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
362 if (IS_ERR(obj))
363 return ERR_CAST(obj);
364
e568ac38 365 vma = i915_vma_instance(obj, ctx->vm, NULL);
34ae8455
CW
366 if (IS_ERR(vma)) {
367 err = PTR_ERR(vma);
368 goto err_obj;
369 }
370
371 err = i915_vma_pin(vma, 0, 0, PIN_USER);
372 if (err)
373 goto err_obj;
374
34ae8455
CW
375 return vma;
376
377err_obj:
378 i915_gem_object_put(obj);
379 return ERR_PTR(err);
380}
381
382static u32 reg_write(u32 old, u32 new, u32 rsvd)
383{
384 if (rsvd == 0x0000ffff) {
385 old &= ~(new >> 16);
386 old |= new & (new >> 16);
387 } else {
388 old &= ~rsvd;
389 old |= new & rsvd;
390 }
391
392 return old;
393}
394
395static bool wo_register(struct intel_engine_cs *engine, u32 reg)
396{
397 enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
398 int i;
399
1e2b7f49
JH
400 if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
401 RING_FORCE_TO_NONPRIV_ACCESS_WR)
402 return true;
403
34ae8455
CW
404 for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
405 if (wo_registers[i].platform == platform &&
406 wo_registers[i].reg == reg)
407 return true;
408 }
409
410 return false;
411}
412
767662bc
RF
413static bool ro_register(u32 reg)
414{
1e2b7f49
JH
415 if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
416 RING_FORCE_TO_NONPRIV_ACCESS_RD)
767662bc
RF
417 return true;
418
419 return false;
420}
421
422static int whitelist_writable_count(struct intel_engine_cs *engine)
423{
424 int count = engine->whitelist.count;
425 int i;
426
427 for (i = 0; i < engine->whitelist.count; i++) {
428 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
429
430 if (ro_register(reg))
431 count--;
432 }
433
434 return count;
435}
436
34ae8455
CW
437static int check_dirty_whitelist(struct i915_gem_context *ctx,
438 struct intel_engine_cs *engine)
439{
440 const u32 values[] = {
441 0x00000000,
442 0x01010101,
443 0x10100101,
444 0x03030303,
445 0x30300303,
446 0x05050505,
447 0x50500505,
448 0x0f0f0f0f,
449 0xf00ff00f,
450 0x10101010,
451 0xf0f01010,
452 0x30303030,
453 0xa0a03030,
454 0x50505050,
455 0xc0c05050,
456 0xf0f0f0f0,
457 0x11111111,
458 0x33333333,
459 0x55555555,
460 0x0000ffff,
461 0x00ff00ff,
462 0xff0000ff,
463 0xffff00ff,
464 0xffffffff,
465 };
466 struct i915_vma *scratch;
467 struct i915_vma *batch;
468 int err = 0, i, v;
469 u32 *cs, *results;
470
e568ac38 471 scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1);
34ae8455
CW
472 if (IS_ERR(scratch))
473 return PTR_ERR(scratch);
474
475 batch = create_batch(ctx);
476 if (IS_ERR(batch)) {
477 err = PTR_ERR(batch);
478 goto out_scratch;
479 }
480
481 for (i = 0; i < engine->whitelist.count; i++) {
482 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
483 u64 addr = scratch->node.start;
484 struct i915_request *rq;
485 u32 srm, lrm, rsvd;
486 u32 expect;
487 int idx;
aee20aae 488 bool ro_reg;
34ae8455
CW
489
490 if (wo_register(engine, reg))
491 continue;
492
aee20aae 493 ro_reg = ro_register(reg);
767662bc 494
34ae8455
CW
495 srm = MI_STORE_REGISTER_MEM;
496 lrm = MI_LOAD_REGISTER_MEM;
497 if (INTEL_GEN(ctx->i915) >= 8)
498 lrm++, srm++;
499
500 pr_debug("%s: Writing garbage to %x\n",
501 engine->name, reg);
502
503 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
504 if (IS_ERR(cs)) {
505 err = PTR_ERR(cs);
506 goto out_batch;
507 }
508
509 /* SRM original */
510 *cs++ = srm;
511 *cs++ = reg;
512 *cs++ = lower_32_bits(addr);
513 *cs++ = upper_32_bits(addr);
514
515 idx = 1;
516 for (v = 0; v < ARRAY_SIZE(values); v++) {
517 /* LRI garbage */
518 *cs++ = MI_LOAD_REGISTER_IMM(1);
519 *cs++ = reg;
520 *cs++ = values[v];
521
522 /* SRM result */
523 *cs++ = srm;
524 *cs++ = reg;
525 *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
526 *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
527 idx++;
528 }
529 for (v = 0; v < ARRAY_SIZE(values); v++) {
530 /* LRI garbage */
531 *cs++ = MI_LOAD_REGISTER_IMM(1);
532 *cs++ = reg;
533 *cs++ = ~values[v];
534
535 /* SRM result */
536 *cs++ = srm;
537 *cs++ = reg;
538 *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
539 *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
540 idx++;
541 }
542 GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
543
544 /* LRM original -- don't leave garbage in the context! */
545 *cs++ = lrm;
546 *cs++ = reg;
547 *cs++ = lower_32_bits(addr);
548 *cs++ = upper_32_bits(addr);
549
550 *cs++ = MI_BATCH_BUFFER_END;
551
a679f58d 552 i915_gem_object_flush_map(batch->obj);
34ae8455 553 i915_gem_object_unpin_map(batch->obj);
baea429d 554 intel_gt_chipset_flush(engine->gt);
34ae8455 555
46472b3e 556 rq = igt_request_alloc(ctx, engine);
34ae8455
CW
557 if (IS_ERR(rq)) {
558 err = PTR_ERR(rq);
559 goto out_batch;
560 }
561
562 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
563 err = engine->emit_init_breadcrumb(rq);
564 if (err)
565 goto err_request;
566 }
567
1d5b7773
CW
568 i915_vma_lock(batch);
569 err = i915_request_await_object(rq, batch->obj, false);
570 if (err == 0)
571 err = i915_vma_move_to_active(batch, rq, 0);
572 i915_vma_unlock(batch);
573 if (err)
574 goto err_request;
575
34ae8455
CW
576 err = engine->emit_bb_start(rq,
577 batch->node.start, PAGE_SIZE,
578 0);
579 if (err)
580 goto err_request;
581
582err_request:
583 i915_request_add(rq);
584 if (err)
585 goto out_batch;
586
2f530945 587 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
34ae8455
CW
588 pr_err("%s: Futzing %x timedout; cancelling test\n",
589 engine->name, reg);
cb823ed9 590 intel_gt_set_wedged(&ctx->i915->gt);
34ae8455
CW
591 err = -EIO;
592 goto out_batch;
593 }
594
595 results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
596 if (IS_ERR(results)) {
597 err = PTR_ERR(results);
598 goto out_batch;
599 }
600
601 GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
aee20aae
JH
602 if (!ro_reg) {
603 /* detect write masking */
604 rsvd = results[ARRAY_SIZE(values)];
605 if (!rsvd) {
606 pr_err("%s: Unable to write to whitelisted register %x\n",
607 engine->name, reg);
608 err = -EINVAL;
609 goto out_unpin;
610 }
34ae8455
CW
611 }
612
613 expect = results[0];
614 idx = 1;
615 for (v = 0; v < ARRAY_SIZE(values); v++) {
aee20aae
JH
616 if (ro_reg)
617 expect = results[0];
618 else
619 expect = reg_write(expect, values[v], rsvd);
620
34ae8455
CW
621 if (results[idx] != expect)
622 err++;
623 idx++;
624 }
625 for (v = 0; v < ARRAY_SIZE(values); v++) {
aee20aae
JH
626 if (ro_reg)
627 expect = results[0];
628 else
629 expect = reg_write(expect, ~values[v], rsvd);
630
34ae8455
CW
631 if (results[idx] != expect)
632 err++;
633 idx++;
634 }
635 if (err) {
636 pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
637 engine->name, err, reg);
638
aee20aae
JH
639 if (ro_reg)
640 pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
641 engine->name, reg, results[0]);
642 else
643 pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
644 engine->name, reg, results[0], rsvd);
34ae8455
CW
645
646 expect = results[0];
647 idx = 1;
648 for (v = 0; v < ARRAY_SIZE(values); v++) {
649 u32 w = values[v];
650
aee20aae
JH
651 if (ro_reg)
652 expect = results[0];
653 else
654 expect = reg_write(expect, w, rsvd);
34ae8455
CW
655 pr_info("Wrote %08x, read %08x, expect %08x\n",
656 w, results[idx], expect);
657 idx++;
658 }
659 for (v = 0; v < ARRAY_SIZE(values); v++) {
660 u32 w = ~values[v];
661
aee20aae
JH
662 if (ro_reg)
663 expect = results[0];
664 else
665 expect = reg_write(expect, w, rsvd);
34ae8455
CW
666 pr_info("Wrote %08x, read %08x, expect %08x\n",
667 w, results[idx], expect);
668 idx++;
669 }
670
671 err = -EINVAL;
672 }
673out_unpin:
674 i915_gem_object_unpin_map(scratch->obj);
675 if (err)
676 break;
677 }
678
679 if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
680 err = -EIO;
681out_batch:
682 i915_vma_unpin_and_release(&batch, 0);
683out_scratch:
684 i915_vma_unpin_and_release(&scratch, 0);
685 return err;
686}
687
688static int live_dirty_whitelist(void *arg)
689{
690 struct drm_i915_private *i915 = arg;
691 struct intel_engine_cs *engine;
692 struct i915_gem_context *ctx;
693 enum intel_engine_id id;
694 intel_wakeref_t wakeref;
695 struct drm_file *file;
696 int err = 0;
697
698 /* Can the user write to the whitelisted registers? */
699
700 if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
701 return 0;
702
d858d569 703 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
34ae8455
CW
704
705 mutex_unlock(&i915->drm.struct_mutex);
706 file = mock_file(i915);
707 mutex_lock(&i915->drm.struct_mutex);
708 if (IS_ERR(file)) {
709 err = PTR_ERR(file);
710 goto out_rpm;
711 }
712
713 ctx = live_context(i915, file);
714 if (IS_ERR(ctx)) {
715 err = PTR_ERR(ctx);
716 goto out_file;
717 }
718
719 for_each_engine(engine, i915, id) {
720 if (engine->whitelist.count == 0)
721 continue;
722
723 err = check_dirty_whitelist(ctx, engine);
724 if (err)
725 goto out_file;
726 }
727
728out_file:
729 mutex_unlock(&i915->drm.struct_mutex);
730 mock_file_free(i915, file);
731 mutex_lock(&i915->drm.struct_mutex);
732out_rpm:
d858d569 733 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
34ae8455
CW
734 return err;
735}
736
f4ecfbfc
CW
737static int live_reset_whitelist(void *arg)
738{
739 struct drm_i915_private *i915 = arg;
8a68d464 740 struct intel_engine_cs *engine = i915->engine[RCS0];
a3997159 741 int err = 0;
f4ecfbfc
CW
742
743 /* If we reset the gpu, we should not lose the RING_NONPRIV */
744
69bcdecf 745 if (!engine || engine->whitelist.count == 0)
f4ecfbfc
CW
746 return 0;
747
cb823ed9 748 igt_global_reset_lock(&i915->gt);
f4ecfbfc 749
260e6b71 750 if (intel_has_reset_engine(&i915->gt)) {
f4ecfbfc 751 err = check_whitelist_across_reset(engine,
69bcdecf 752 do_engine_reset,
f4ecfbfc
CW
753 "engine");
754 if (err)
755 goto out;
756 }
757
260e6b71 758 if (intel_has_gpu_reset(&i915->gt)) {
f4ecfbfc 759 err = check_whitelist_across_reset(engine,
69bcdecf 760 do_device_reset,
f4ecfbfc
CW
761 "device");
762 if (err)
763 goto out;
764 }
765
766out:
cb823ed9 767 igt_global_reset_unlock(&i915->gt);
f4ecfbfc
CW
768 return err;
769}
770
86554f48
CW
771static int read_whitelisted_registers(struct i915_gem_context *ctx,
772 struct intel_engine_cs *engine,
773 struct i915_vma *results)
774{
86554f48
CW
775 struct i915_request *rq;
776 int i, err = 0;
777 u32 srm, *cs;
778
46472b3e 779 rq = igt_request_alloc(ctx, engine);
86554f48
CW
780 if (IS_ERR(rq))
781 return PTR_ERR(rq);
782
783 srm = MI_STORE_REGISTER_MEM;
784 if (INTEL_GEN(ctx->i915) >= 8)
785 srm++;
786
787 cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
788 if (IS_ERR(cs)) {
789 err = PTR_ERR(cs);
790 goto err_req;
791 }
792
793 for (i = 0; i < engine->whitelist.count; i++) {
794 u64 offset = results->node.start + sizeof(u32) * i;
767662bc
RF
795 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
796
1e2b7f49
JH
797 /* Clear access permission field */
798 reg &= ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
86554f48
CW
799
800 *cs++ = srm;
767662bc 801 *cs++ = reg;
86554f48
CW
802 *cs++ = lower_32_bits(offset);
803 *cs++ = upper_32_bits(offset);
804 }
805 intel_ring_advance(rq, cs);
806
807err_req:
808 i915_request_add(rq);
809
2f530945 810 if (i915_request_wait(rq, 0, HZ / 5) < 0)
86554f48
CW
811 err = -EIO;
812
813 return err;
814}
815
816static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
817 struct intel_engine_cs *engine)
818{
86554f48
CW
819 struct i915_request *rq;
820 struct i915_vma *batch;
821 int i, err = 0;
822 u32 *cs;
823
824 batch = create_batch(ctx);
825 if (IS_ERR(batch))
826 return PTR_ERR(batch);
827
828 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
829 if (IS_ERR(cs)) {
830 err = PTR_ERR(cs);
831 goto err_batch;
832 }
833
767662bc 834 *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
86554f48 835 for (i = 0; i < engine->whitelist.count; i++) {
767662bc
RF
836 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
837
838 if (ro_register(reg))
839 continue;
840
841 *cs++ = reg;
86554f48
CW
842 *cs++ = 0xffffffff;
843 }
844 *cs++ = MI_BATCH_BUFFER_END;
845
846 i915_gem_object_flush_map(batch->obj);
baea429d 847 intel_gt_chipset_flush(engine->gt);
86554f48 848
46472b3e 849 rq = igt_request_alloc(ctx, engine);
86554f48
CW
850 if (IS_ERR(rq)) {
851 err = PTR_ERR(rq);
852 goto err_unpin;
853 }
854
855 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
856 err = engine->emit_init_breadcrumb(rq);
857 if (err)
858 goto err_request;
859 }
860
1d5b7773
CW
861 i915_vma_lock(batch);
862 err = i915_request_await_object(rq, batch->obj, false);
863 if (err == 0)
864 err = i915_vma_move_to_active(batch, rq, 0);
865 i915_vma_unlock(batch);
866 if (err)
867 goto err_request;
868
86554f48
CW
869 /* Perform the writes from an unprivileged "user" batch */
870 err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
871
872err_request:
873 i915_request_add(rq);
2f530945 874 if (i915_request_wait(rq, 0, HZ / 5) < 0)
86554f48
CW
875 err = -EIO;
876
877err_unpin:
878 i915_gem_object_unpin_map(batch->obj);
879err_batch:
880 i915_vma_unpin_and_release(&batch, 0);
881 return err;
882}
883
884struct regmask {
885 i915_reg_t reg;
886 unsigned long gen_mask;
887};
888
889static bool find_reg(struct drm_i915_private *i915,
890 i915_reg_t reg,
891 const struct regmask *tbl,
892 unsigned long count)
893{
894 u32 offset = i915_mmio_reg_offset(reg);
895
896 while (count--) {
897 if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
898 i915_mmio_reg_offset(tbl->reg) == offset)
899 return true;
900 tbl++;
901 }
902
903 return false;
904}
905
906static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
907{
908 /* Alas, we must pardon some whitelists. Mistakes already made */
909 static const struct regmask pardon[] = {
910 { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
911 { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
912 };
913
914 return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
915}
916
917static bool result_eq(struct intel_engine_cs *engine,
918 u32 a, u32 b, i915_reg_t reg)
919{
920 if (a != b && !pardon_reg(engine->i915, reg)) {
921 pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
922 i915_mmio_reg_offset(reg), a, b);
923 return false;
924 }
925
926 return true;
927}
928
929static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
930{
931 /* Some registers do not seem to behave and our writes unreadable */
932 static const struct regmask wo[] = {
933 { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
934 };
935
936 return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
937}
938
939static bool result_neq(struct intel_engine_cs *engine,
940 u32 a, u32 b, i915_reg_t reg)
941{
942 if (a == b && !writeonly_reg(engine->i915, reg)) {
943 pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
944 i915_mmio_reg_offset(reg), a);
945 return false;
946 }
947
948 return true;
949}
950
951static int
952check_whitelisted_registers(struct intel_engine_cs *engine,
953 struct i915_vma *A,
954 struct i915_vma *B,
955 bool (*fn)(struct intel_engine_cs *engine,
956 u32 a, u32 b,
957 i915_reg_t reg))
958{
959 u32 *a, *b;
960 int i, err;
961
962 a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
963 if (IS_ERR(a))
964 return PTR_ERR(a);
965
966 b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
967 if (IS_ERR(b)) {
968 err = PTR_ERR(b);
969 goto err_a;
970 }
971
972 err = 0;
973 for (i = 0; i < engine->whitelist.count; i++) {
361b6905
LL
974 const struct i915_wa *wa = &engine->whitelist.list[i];
975
1e2b7f49
JH
976 if (i915_mmio_reg_offset(wa->reg) &
977 RING_FORCE_TO_NONPRIV_ACCESS_RD)
361b6905
LL
978 continue;
979
980 if (!fn(engine, a[i], b[i], wa->reg))
86554f48
CW
981 err = -EINVAL;
982 }
983
984 i915_gem_object_unpin_map(B->obj);
985err_a:
986 i915_gem_object_unpin_map(A->obj);
987 return err;
988}
989
990static int live_isolated_whitelist(void *arg)
991{
992 struct drm_i915_private *i915 = arg;
993 struct {
994 struct i915_gem_context *ctx;
995 struct i915_vma *scratch[2];
996 } client[2] = {};
997 struct intel_engine_cs *engine;
998 enum intel_engine_id id;
999 int i, err = 0;
1000
1001 /*
1002 * Check that a write into a whitelist register works, but
1003 * invisible to a second context.
1004 */
1005
1006 if (!intel_engines_has_context_isolation(i915))
1007 return 0;
1008
e568ac38 1009 if (!i915->kernel_context->vm)
86554f48
CW
1010 return 0;
1011
1012 for (i = 0; i < ARRAY_SIZE(client); i++) {
1013 struct i915_gem_context *c;
1014
1015 c = kernel_context(i915);
1016 if (IS_ERR(c)) {
1017 err = PTR_ERR(c);
1018 goto err;
1019 }
1020
e568ac38 1021 client[i].scratch[0] = create_scratch(c->vm, 1024);
86554f48
CW
1022 if (IS_ERR(client[i].scratch[0])) {
1023 err = PTR_ERR(client[i].scratch[0]);
1024 kernel_context_close(c);
1025 goto err;
1026 }
1027
e568ac38 1028 client[i].scratch[1] = create_scratch(c->vm, 1024);
86554f48
CW
1029 if (IS_ERR(client[i].scratch[1])) {
1030 err = PTR_ERR(client[i].scratch[1]);
1031 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1032 kernel_context_close(c);
1033 goto err;
1034 }
1035
1036 client[i].ctx = c;
1037 }
1038
1039 for_each_engine(engine, i915, id) {
767662bc 1040 if (!whitelist_writable_count(engine))
86554f48
CW
1041 continue;
1042
1043 /* Read default values */
1044 err = read_whitelisted_registers(client[0].ctx, engine,
1045 client[0].scratch[0]);
1046 if (err)
1047 goto err;
1048
1049 /* Try to overwrite registers (should only affect ctx0) */
1050 err = scrub_whitelisted_registers(client[0].ctx, engine);
1051 if (err)
1052 goto err;
1053
1054 /* Read values from ctx1, we expect these to be defaults */
1055 err = read_whitelisted_registers(client[1].ctx, engine,
1056 client[1].scratch[0]);
1057 if (err)
1058 goto err;
1059
1060 /* Verify that both reads return the same default values */
1061 err = check_whitelisted_registers(engine,
1062 client[0].scratch[0],
1063 client[1].scratch[0],
1064 result_eq);
1065 if (err)
1066 goto err;
1067
1068 /* Read back the updated values in ctx0 */
1069 err = read_whitelisted_registers(client[0].ctx, engine,
1070 client[0].scratch[1]);
1071 if (err)
1072 goto err;
1073
1074 /* User should be granted privilege to overwhite regs */
1075 err = check_whitelisted_registers(engine,
1076 client[0].scratch[0],
1077 client[0].scratch[1],
1078 result_neq);
1079 if (err)
1080 goto err;
1081 }
1082
1083err:
1084 for (i = 0; i < ARRAY_SIZE(client); i++) {
1085 if (!client[i].ctx)
1086 break;
1087
1088 i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1089 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1090 kernel_context_close(client[i].ctx);
1091 }
1092
1093 if (igt_flush_test(i915, I915_WAIT_LOCKED))
1094 err = -EIO;
1095
1096 return err;
1097}
1098
fde93886
TU
1099static bool
1100verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
1101 const char *str)
28d6ccce 1102{
fde93886
TU
1103 struct drm_i915_private *i915 = ctx->i915;
1104 struct i915_gem_engines_iter it;
1105 struct intel_context *ce;
28d6ccce
TU
1106 bool ok = true;
1107
5d75dc2b 1108 ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
f663b0ca 1109
f277bc0c 1110 for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) {
fde93886
TU
1111 enum intel_engine_id id = ce->engine->id;
1112
1113 ok &= engine_wa_list_verify(ce,
254e1186
CW
1114 &lists->engine[id].wa_list,
1115 str) == 0;
fde93886
TU
1116
1117 ok &= engine_wa_list_verify(ce,
1118 &lists->engine[id].ctx_wa_list,
1119 str) == 0;
254e1186 1120 }
28d6ccce
TU
1121
1122 return ok;
1123}
1124
1125static int
fde93886 1126live_gpu_reset_workarounds(void *arg)
28d6ccce
TU
1127{
1128 struct drm_i915_private *i915 = arg;
fde93886 1129 struct i915_gem_context *ctx;
c9d08cc3 1130 intel_wakeref_t wakeref;
f663b0ca 1131 struct wa_lists lists;
28d6ccce
TU
1132 bool ok;
1133
260e6b71 1134 if (!intel_has_gpu_reset(&i915->gt))
28d6ccce
TU
1135 return 0;
1136
fde93886
TU
1137 ctx = kernel_context(i915);
1138 if (IS_ERR(ctx))
1139 return PTR_ERR(ctx);
1140
f277bc0c
CW
1141 i915_gem_context_lock_engines(ctx);
1142
28d6ccce
TU
1143 pr_info("Verifying after GPU reset...\n");
1144
cb823ed9 1145 igt_global_reset_lock(&i915->gt);
d858d569 1146 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
c9d08cc3 1147
f663b0ca 1148 reference_lists_init(i915, &lists);
28d6ccce 1149
fde93886 1150 ok = verify_wa_lists(ctx, &lists, "before reset");
28d6ccce
TU
1151 if (!ok)
1152 goto out;
1153
cb823ed9 1154 intel_gt_reset(&i915->gt, ALL_ENGINES, "live_workarounds");
28d6ccce 1155
fde93886 1156 ok = verify_wa_lists(ctx, &lists, "after reset");
28d6ccce
TU
1157
1158out:
f277bc0c 1159 i915_gem_context_unlock_engines(ctx);
fde93886 1160 kernel_context_close(ctx);
f663b0ca 1161 reference_lists_fini(i915, &lists);
d858d569 1162 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
cb823ed9 1163 igt_global_reset_unlock(&i915->gt);
28d6ccce
TU
1164
1165 return ok ? 0 : -ESRCH;
1166}
1167
1168static int
fde93886 1169live_engine_reset_workarounds(void *arg)
28d6ccce
TU
1170{
1171 struct drm_i915_private *i915 = arg;
f277bc0c 1172 struct i915_gem_engines_iter it;
28d6ccce 1173 struct i915_gem_context *ctx;
f277bc0c 1174 struct intel_context *ce;
28d6ccce 1175 struct igt_spinner spin;
28d6ccce 1176 struct i915_request *rq;
c9d08cc3 1177 intel_wakeref_t wakeref;
f663b0ca 1178 struct wa_lists lists;
28d6ccce
TU
1179 int ret = 0;
1180
260e6b71 1181 if (!intel_has_reset_engine(&i915->gt))
28d6ccce
TU
1182 return 0;
1183
1184 ctx = kernel_context(i915);
1185 if (IS_ERR(ctx))
1186 return PTR_ERR(ctx);
1187
cb823ed9 1188 igt_global_reset_lock(&i915->gt);
d858d569 1189 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
c9d08cc3 1190
f663b0ca 1191 reference_lists_init(i915, &lists);
28d6ccce 1192
f277bc0c
CW
1193 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1194 struct intel_engine_cs *engine = ce->engine;
28d6ccce
TU
1195 bool ok;
1196
1197 pr_info("Verifying after %s reset...\n", engine->name);
1198
fde93886 1199 ok = verify_wa_lists(ctx, &lists, "before reset");
28d6ccce
TU
1200 if (!ok) {
1201 ret = -ESRCH;
1202 goto err;
1203 }
1204
cb823ed9 1205 intel_engine_reset(engine, "live_workarounds");
28d6ccce 1206
fde93886 1207 ok = verify_wa_lists(ctx, &lists, "after idle reset");
28d6ccce
TU
1208 if (!ok) {
1209 ret = -ESRCH;
1210 goto err;
1211 }
1212
f277bc0c 1213 ret = igt_spinner_init(&spin, engine->gt);
28d6ccce
TU
1214 if (ret)
1215 goto err;
1216
f277bc0c 1217 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
28d6ccce
TU
1218 if (IS_ERR(rq)) {
1219 ret = PTR_ERR(rq);
1220 igt_spinner_fini(&spin);
28d6ccce
TU
1221 goto err;
1222 }
1223
1224 i915_request_add(rq);
1225
1226 if (!igt_wait_for_spinner(&spin, rq)) {
1227 pr_err("Spinner failed to start\n");
1228 igt_spinner_fini(&spin);
28d6ccce
TU
1229 ret = -ETIMEDOUT;
1230 goto err;
1231 }
1232
cb823ed9 1233 intel_engine_reset(engine, "live_workarounds");
28d6ccce 1234
28d6ccce
TU
1235 igt_spinner_end(&spin);
1236 igt_spinner_fini(&spin);
1237
fde93886 1238 ok = verify_wa_lists(ctx, &lists, "after busy reset");
28d6ccce
TU
1239 if (!ok) {
1240 ret = -ESRCH;
1241 goto err;
1242 }
1243 }
28d6ccce 1244err:
f277bc0c 1245 i915_gem_context_unlock_engines(ctx);
f663b0ca 1246 reference_lists_fini(i915, &lists);
d858d569 1247 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
cb823ed9 1248 igt_global_reset_unlock(&i915->gt);
28d6ccce
TU
1249 kernel_context_close(ctx);
1250
1251 igt_flush_test(i915, I915_WAIT_LOCKED);
1252
1253 return ret;
1254}
1255
f4ecfbfc
CW
1256int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1257{
1258 static const struct i915_subtest tests[] = {
34ae8455 1259 SUBTEST(live_dirty_whitelist),
f4ecfbfc 1260 SUBTEST(live_reset_whitelist),
86554f48 1261 SUBTEST(live_isolated_whitelist),
fde93886
TU
1262 SUBTEST(live_gpu_reset_workarounds),
1263 SUBTEST(live_engine_reset_workarounds),
f4ecfbfc
CW
1264 };
1265 int err;
1266
cb823ed9 1267 if (intel_gt_is_wedged(&i915->gt))
47e61a79
CW
1268 return 0;
1269
f4ecfbfc
CW
1270 mutex_lock(&i915->drm.struct_mutex);
1271 err = i915_subtests(tests, i915);
1272 mutex_unlock(&i915->drm.struct_mutex);
1273
1274 return err;
1275}