Merge https://gitlab.freedesktop.org/drm/msm into drm-next
[linux-block.git] / drivers / gpu / drm / i915 / selftests / intel_workarounds.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include "../i915_selftest.h"
8 #include "../i915_reset.h"
9
10 #include "igt_flush_test.h"
11 #include "igt_reset.h"
12 #include "igt_spinner.h"
13 #include "igt_wedge_me.h"
14 #include "mock_context.h"
15
16 #define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
17 struct wa_lists {
18         struct i915_wa_list gt_wa_list;
19         struct {
20                 char name[REF_NAME_MAX];
21                 struct i915_wa_list wa_list;
22         } engine[I915_NUM_ENGINES];
23 };
24
25 static void
26 reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
27 {
28         struct intel_engine_cs *engine;
29         enum intel_engine_id id;
30
31         memset(lists, 0, sizeof(*lists));
32
33         wa_init_start(&lists->gt_wa_list, "GT_REF");
34         gt_init_workarounds(i915, &lists->gt_wa_list);
35         wa_init_finish(&lists->gt_wa_list);
36
37         for_each_engine(engine, i915, id) {
38                 struct i915_wa_list *wal = &lists->engine[id].wa_list;
39                 char *name = lists->engine[id].name;
40
41                 snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
42
43                 wa_init_start(wal, name);
44                 engine_init_workarounds(engine, wal);
45                 wa_init_finish(wal);
46         }
47 }
48
49 static void
50 reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
51 {
52         struct intel_engine_cs *engine;
53         enum intel_engine_id id;
54
55         for_each_engine(engine, i915, id)
56                 intel_wa_list_free(&lists->engine[id].wa_list);
57
58         intel_wa_list_free(&lists->gt_wa_list);
59 }
60
61 static struct drm_i915_gem_object *
62 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
63 {
64         const u32 base = engine->mmio_base;
65         struct drm_i915_gem_object *result;
66         intel_wakeref_t wakeref;
67         struct i915_request *rq;
68         struct i915_vma *vma;
69         u32 srm, *cs;
70         int err;
71         int i;
72
73         result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
74         if (IS_ERR(result))
75                 return result;
76
77         i915_gem_object_set_cache_level(result, I915_CACHE_LLC);
78
79         cs = i915_gem_object_pin_map(result, I915_MAP_WB);
80         if (IS_ERR(cs)) {
81                 err = PTR_ERR(cs);
82                 goto err_obj;
83         }
84         memset(cs, 0xc5, PAGE_SIZE);
85         i915_gem_object_unpin_map(result);
86
87         vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
88         if (IS_ERR(vma)) {
89                 err = PTR_ERR(vma);
90                 goto err_obj;
91         }
92
93         err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
94         if (err)
95                 goto err_obj;
96
97         rq = ERR_PTR(-ENODEV);
98         with_intel_runtime_pm(engine->i915, wakeref)
99                 rq = i915_request_alloc(engine, ctx);
100         if (IS_ERR(rq)) {
101                 err = PTR_ERR(rq);
102                 goto err_pin;
103         }
104
105         err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
106         if (err)
107                 goto err_req;
108
109         srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
110         if (INTEL_GEN(ctx->i915) >= 8)
111                 srm++;
112
113         cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
114         if (IS_ERR(cs)) {
115                 err = PTR_ERR(cs);
116                 goto err_req;
117         }
118
119         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
120                 *cs++ = srm;
121                 *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
122                 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
123                 *cs++ = 0;
124         }
125         intel_ring_advance(rq, cs);
126
127         i915_gem_object_get(result);
128         i915_gem_object_set_active_reference(result);
129
130         i915_request_add(rq);
131         i915_vma_unpin(vma);
132
133         return result;
134
135 err_req:
136         i915_request_add(rq);
137 err_pin:
138         i915_vma_unpin(vma);
139 err_obj:
140         i915_gem_object_put(result);
141         return ERR_PTR(err);
142 }
143
144 static u32
145 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
146 {
147         i915_reg_t reg = i < engine->whitelist.count ?
148                          engine->whitelist.list[i].reg :
149                          RING_NOPID(engine->mmio_base);
150
151         return i915_mmio_reg_offset(reg);
152 }
153
154 static void
155 print_results(const struct intel_engine_cs *engine, const u32 *results)
156 {
157         unsigned int i;
158
159         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
160                 u32 expected = get_whitelist_reg(engine, i);
161                 u32 actual = results[i];
162
163                 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
164                         i, expected, actual);
165         }
166 }
167
168 static int check_whitelist(struct i915_gem_context *ctx,
169                            struct intel_engine_cs *engine)
170 {
171         struct drm_i915_gem_object *results;
172         struct igt_wedge_me wedge;
173         u32 *vaddr;
174         int err;
175         int i;
176
177         results = read_nonprivs(ctx, engine);
178         if (IS_ERR(results))
179                 return PTR_ERR(results);
180
181         err = 0;
182         igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
183                 err = i915_gem_object_set_to_cpu_domain(results, false);
184         if (i915_terminally_wedged(&ctx->i915->gpu_error))
185                 err = -EIO;
186         if (err)
187                 goto out_put;
188
189         vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
190         if (IS_ERR(vaddr)) {
191                 err = PTR_ERR(vaddr);
192                 goto out_put;
193         }
194
195         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
196                 u32 expected = get_whitelist_reg(engine, i);
197                 u32 actual = vaddr[i];
198
199                 if (expected != actual) {
200                         print_results(engine, vaddr);
201                         pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
202                                i, expected, actual);
203
204                         err = -EINVAL;
205                         break;
206                 }
207         }
208
209         i915_gem_object_unpin_map(results);
210 out_put:
211         i915_gem_object_put(results);
212         return err;
213 }
214
215 static int do_device_reset(struct intel_engine_cs *engine)
216 {
217         i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds");
218         return 0;
219 }
220
221 static int do_engine_reset(struct intel_engine_cs *engine)
222 {
223         return i915_reset_engine(engine, "live_workarounds");
224 }
225
226 static int
227 switch_to_scratch_context(struct intel_engine_cs *engine,
228                           struct igt_spinner *spin)
229 {
230         struct i915_gem_context *ctx;
231         struct i915_request *rq;
232         intel_wakeref_t wakeref;
233         int err = 0;
234
235         ctx = kernel_context(engine->i915);
236         if (IS_ERR(ctx))
237                 return PTR_ERR(ctx);
238
239         rq = ERR_PTR(-ENODEV);
240         with_intel_runtime_pm(engine->i915, wakeref) {
241                 if (spin)
242                         rq = igt_spinner_create_request(spin,
243                                                         ctx, engine,
244                                                         MI_NOOP);
245                 else
246                         rq = i915_request_alloc(engine, ctx);
247         }
248
249         kernel_context_close(ctx);
250
251         if (IS_ERR(rq)) {
252                 spin = NULL;
253                 err = PTR_ERR(rq);
254                 goto err;
255         }
256
257         i915_request_add(rq);
258
259         if (spin && !igt_wait_for_spinner(spin, rq)) {
260                 pr_err("Spinner failed to start\n");
261                 err = -ETIMEDOUT;
262         }
263
264 err:
265         if (err && spin)
266                 igt_spinner_end(spin);
267
268         return err;
269 }
270
271 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
272                                         int (*reset)(struct intel_engine_cs *),
273                                         const char *name)
274 {
275         struct drm_i915_private *i915 = engine->i915;
276         bool want_spin = reset == do_engine_reset;
277         struct i915_gem_context *ctx;
278         struct igt_spinner spin;
279         intel_wakeref_t wakeref;
280         int err;
281
282         pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
283                 engine->whitelist.count, name);
284
285         if (want_spin) {
286                 err = igt_spinner_init(&spin, i915);
287                 if (err)
288                         return err;
289         }
290
291         ctx = kernel_context(i915);
292         if (IS_ERR(ctx))
293                 return PTR_ERR(ctx);
294
295         err = check_whitelist(ctx, engine);
296         if (err) {
297                 pr_err("Invalid whitelist *before* %s reset!\n", name);
298                 goto out;
299         }
300
301         err = switch_to_scratch_context(engine, want_spin ? &spin : NULL);
302         if (err)
303                 goto out;
304
305         with_intel_runtime_pm(i915, wakeref)
306                 err = reset(engine);
307
308         if (want_spin) {
309                 igt_spinner_end(&spin);
310                 igt_spinner_fini(&spin);
311         }
312
313         if (err) {
314                 pr_err("%s reset failed\n", name);
315                 goto out;
316         }
317
318         err = check_whitelist(ctx, engine);
319         if (err) {
320                 pr_err("Whitelist not preserved in context across %s reset!\n",
321                        name);
322                 goto out;
323         }
324
325         kernel_context_close(ctx);
326
327         ctx = kernel_context(i915);
328         if (IS_ERR(ctx))
329                 return PTR_ERR(ctx);
330
331         err = check_whitelist(ctx, engine);
332         if (err) {
333                 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
334                        name);
335                 goto out;
336         }
337
338 out:
339         kernel_context_close(ctx);
340         return err;
341 }
342
343 static int live_reset_whitelist(void *arg)
344 {
345         struct drm_i915_private *i915 = arg;
346         struct intel_engine_cs *engine = i915->engine[RCS];
347         int err = 0;
348
349         /* If we reset the gpu, we should not lose the RING_NONPRIV */
350
351         if (!engine || engine->whitelist.count == 0)
352                 return 0;
353
354         igt_global_reset_lock(i915);
355
356         if (intel_has_reset_engine(i915)) {
357                 err = check_whitelist_across_reset(engine,
358                                                    do_engine_reset,
359                                                    "engine");
360                 if (err)
361                         goto out;
362         }
363
364         if (intel_has_gpu_reset(i915)) {
365                 err = check_whitelist_across_reset(engine,
366                                                    do_device_reset,
367                                                    "device");
368                 if (err)
369                         goto out;
370         }
371
372 out:
373         igt_global_reset_unlock(i915);
374         return err;
375 }
376
377 static bool verify_gt_engine_wa(struct drm_i915_private *i915,
378                                 struct wa_lists *lists, const char *str)
379 {
380         struct intel_engine_cs *engine;
381         enum intel_engine_id id;
382         bool ok = true;
383
384         ok &= wa_list_verify(i915, &lists->gt_wa_list, str);
385
386         for_each_engine(engine, i915, id)
387                 ok &= wa_list_verify(i915, &lists->engine[id].wa_list, str);
388
389         return ok;
390 }
391
392 static int
393 live_gpu_reset_gt_engine_workarounds(void *arg)
394 {
395         struct drm_i915_private *i915 = arg;
396         intel_wakeref_t wakeref;
397         struct wa_lists lists;
398         bool ok;
399
400         if (!intel_has_gpu_reset(i915))
401                 return 0;
402
403         pr_info("Verifying after GPU reset...\n");
404
405         igt_global_reset_lock(i915);
406         wakeref = intel_runtime_pm_get(i915);
407
408         reference_lists_init(i915, &lists);
409
410         ok = verify_gt_engine_wa(i915, &lists, "before reset");
411         if (!ok)
412                 goto out;
413
414         i915_reset(i915, ALL_ENGINES, "live_workarounds");
415
416         ok = verify_gt_engine_wa(i915, &lists, "after reset");
417
418 out:
419         reference_lists_fini(i915, &lists);
420         intel_runtime_pm_put(i915, wakeref);
421         igt_global_reset_unlock(i915);
422
423         return ok ? 0 : -ESRCH;
424 }
425
426 static int
427 live_engine_reset_gt_engine_workarounds(void *arg)
428 {
429         struct drm_i915_private *i915 = arg;
430         struct intel_engine_cs *engine;
431         struct i915_gem_context *ctx;
432         struct igt_spinner spin;
433         enum intel_engine_id id;
434         struct i915_request *rq;
435         intel_wakeref_t wakeref;
436         struct wa_lists lists;
437         int ret = 0;
438
439         if (!intel_has_reset_engine(i915))
440                 return 0;
441
442         ctx = kernel_context(i915);
443         if (IS_ERR(ctx))
444                 return PTR_ERR(ctx);
445
446         igt_global_reset_lock(i915);
447         wakeref = intel_runtime_pm_get(i915);
448
449         reference_lists_init(i915, &lists);
450
451         for_each_engine(engine, i915, id) {
452                 bool ok;
453
454                 pr_info("Verifying after %s reset...\n", engine->name);
455
456                 ok = verify_gt_engine_wa(i915, &lists, "before reset");
457                 if (!ok) {
458                         ret = -ESRCH;
459                         goto err;
460                 }
461
462                 i915_reset_engine(engine, "live_workarounds");
463
464                 ok = verify_gt_engine_wa(i915, &lists, "after idle reset");
465                 if (!ok) {
466                         ret = -ESRCH;
467                         goto err;
468                 }
469
470                 ret = igt_spinner_init(&spin, i915);
471                 if (ret)
472                         goto err;
473
474                 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
475                 if (IS_ERR(rq)) {
476                         ret = PTR_ERR(rq);
477                         igt_spinner_fini(&spin);
478                         goto err;
479                 }
480
481                 i915_request_add(rq);
482
483                 if (!igt_wait_for_spinner(&spin, rq)) {
484                         pr_err("Spinner failed to start\n");
485                         igt_spinner_fini(&spin);
486                         ret = -ETIMEDOUT;
487                         goto err;
488                 }
489
490                 i915_reset_engine(engine, "live_workarounds");
491
492                 igt_spinner_end(&spin);
493                 igt_spinner_fini(&spin);
494
495                 ok = verify_gt_engine_wa(i915, &lists, "after busy reset");
496                 if (!ok) {
497                         ret = -ESRCH;
498                         goto err;
499                 }
500         }
501
502 err:
503         reference_lists_fini(i915, &lists);
504         intel_runtime_pm_put(i915, wakeref);
505         igt_global_reset_unlock(i915);
506         kernel_context_close(ctx);
507
508         igt_flush_test(i915, I915_WAIT_LOCKED);
509
510         return ret;
511 }
512
513 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
514 {
515         static const struct i915_subtest tests[] = {
516                 SUBTEST(live_reset_whitelist),
517                 SUBTEST(live_gpu_reset_gt_engine_workarounds),
518                 SUBTEST(live_engine_reset_gt_engine_workarounds),
519         };
520         int err;
521
522         if (i915_terminally_wedged(&i915->gpu_error))
523                 return 0;
524
525         mutex_lock(&i915->drm.struct_mutex);
526         err = i915_subtests(tests, i915);
527         mutex_unlock(&i915->drm.struct_mutex);
528
529         return err;
530 }