drm/i915: Syntatic sugar for using intel_runtime_pm
[linux-block.git] / drivers / gpu / drm / i915 / selftests / intel_workarounds.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include "../i915_selftest.h"
8
9 #include "igt_flush_test.h"
10 #include "igt_reset.h"
11 #include "igt_spinner.h"
12 #include "igt_wedge_me.h"
13 #include "mock_context.h"
14
15 #define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
16 struct wa_lists {
17         struct i915_wa_list gt_wa_list;
18         struct {
19                 char name[REF_NAME_MAX];
20                 struct i915_wa_list wa_list;
21         } engine[I915_NUM_ENGINES];
22 };
23
24 static void
25 reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
26 {
27         struct intel_engine_cs *engine;
28         enum intel_engine_id id;
29
30         memset(lists, 0, sizeof(*lists));
31
32         wa_init_start(&lists->gt_wa_list, "GT_REF");
33         gt_init_workarounds(i915, &lists->gt_wa_list);
34         wa_init_finish(&lists->gt_wa_list);
35
36         for_each_engine(engine, i915, id) {
37                 struct i915_wa_list *wal = &lists->engine[id].wa_list;
38                 char *name = lists->engine[id].name;
39
40                 snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
41
42                 wa_init_start(wal, name);
43                 engine_init_workarounds(engine, wal);
44                 wa_init_finish(wal);
45         }
46 }
47
48 static void
49 reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
50 {
51         struct intel_engine_cs *engine;
52         enum intel_engine_id id;
53
54         for_each_engine(engine, i915, id)
55                 intel_wa_list_free(&lists->engine[id].wa_list);
56
57         intel_wa_list_free(&lists->gt_wa_list);
58 }
59
60 static struct drm_i915_gem_object *
61 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
62 {
63         const u32 base = engine->mmio_base;
64         struct drm_i915_gem_object *result;
65         intel_wakeref_t wakeref;
66         struct i915_request *rq;
67         struct i915_vma *vma;
68         u32 srm, *cs;
69         int err;
70         int i;
71
72         result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
73         if (IS_ERR(result))
74                 return result;
75
76         i915_gem_object_set_cache_level(result, I915_CACHE_LLC);
77
78         cs = i915_gem_object_pin_map(result, I915_MAP_WB);
79         if (IS_ERR(cs)) {
80                 err = PTR_ERR(cs);
81                 goto err_obj;
82         }
83         memset(cs, 0xc5, PAGE_SIZE);
84         i915_gem_object_unpin_map(result);
85
86         vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
87         if (IS_ERR(vma)) {
88                 err = PTR_ERR(vma);
89                 goto err_obj;
90         }
91
92         err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
93         if (err)
94                 goto err_obj;
95
96         rq = ERR_PTR(-ENODEV);
97         with_intel_runtime_pm(engine->i915, wakeref)
98                 rq = i915_request_alloc(engine, ctx);
99         if (IS_ERR(rq)) {
100                 err = PTR_ERR(rq);
101                 goto err_pin;
102         }
103
104         err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
105         if (err)
106                 goto err_req;
107
108         srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
109         if (INTEL_GEN(ctx->i915) >= 8)
110                 srm++;
111
112         cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
113         if (IS_ERR(cs)) {
114                 err = PTR_ERR(cs);
115                 goto err_req;
116         }
117
118         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
119                 *cs++ = srm;
120                 *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
121                 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
122                 *cs++ = 0;
123         }
124         intel_ring_advance(rq, cs);
125
126         i915_gem_object_get(result);
127         i915_gem_object_set_active_reference(result);
128
129         i915_request_add(rq);
130         i915_vma_unpin(vma);
131
132         return result;
133
134 err_req:
135         i915_request_add(rq);
136 err_pin:
137         i915_vma_unpin(vma);
138 err_obj:
139         i915_gem_object_put(result);
140         return ERR_PTR(err);
141 }
142
143 static u32
144 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
145 {
146         i915_reg_t reg = i < engine->whitelist.count ?
147                          engine->whitelist.list[i].reg :
148                          RING_NOPID(engine->mmio_base);
149
150         return i915_mmio_reg_offset(reg);
151 }
152
153 static void
154 print_results(const struct intel_engine_cs *engine, const u32 *results)
155 {
156         unsigned int i;
157
158         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
159                 u32 expected = get_whitelist_reg(engine, i);
160                 u32 actual = results[i];
161
162                 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
163                         i, expected, actual);
164         }
165 }
166
167 static int check_whitelist(struct i915_gem_context *ctx,
168                            struct intel_engine_cs *engine)
169 {
170         struct drm_i915_gem_object *results;
171         struct igt_wedge_me wedge;
172         u32 *vaddr;
173         int err;
174         int i;
175
176         results = read_nonprivs(ctx, engine);
177         if (IS_ERR(results))
178                 return PTR_ERR(results);
179
180         err = 0;
181         igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
182                 err = i915_gem_object_set_to_cpu_domain(results, false);
183         if (i915_terminally_wedged(&ctx->i915->gpu_error))
184                 err = -EIO;
185         if (err)
186                 goto out_put;
187
188         vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
189         if (IS_ERR(vaddr)) {
190                 err = PTR_ERR(vaddr);
191                 goto out_put;
192         }
193
194         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
195                 u32 expected = get_whitelist_reg(engine, i);
196                 u32 actual = vaddr[i];
197
198                 if (expected != actual) {
199                         print_results(engine, vaddr);
200                         pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
201                                i, expected, actual);
202
203                         err = -EINVAL;
204                         break;
205                 }
206         }
207
208         i915_gem_object_unpin_map(results);
209 out_put:
210         i915_gem_object_put(results);
211         return err;
212 }
213
214 static int do_device_reset(struct intel_engine_cs *engine)
215 {
216         set_bit(I915_RESET_HANDOFF, &engine->i915->gpu_error.flags);
217         i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds");
218         return 0;
219 }
220
221 static int do_engine_reset(struct intel_engine_cs *engine)
222 {
223         return i915_reset_engine(engine, "live_workarounds");
224 }
225
226 static int
227 switch_to_scratch_context(struct intel_engine_cs *engine,
228                           struct igt_spinner *spin)
229 {
230         struct i915_gem_context *ctx;
231         struct i915_request *rq;
232         intel_wakeref_t wakeref;
233         int err = 0;
234
235         ctx = kernel_context(engine->i915);
236         if (IS_ERR(ctx))
237                 return PTR_ERR(ctx);
238
239         rq = ERR_PTR(-ENODEV);
240         with_intel_runtime_pm(engine->i915, wakeref) {
241                 if (spin)
242                         rq = igt_spinner_create_request(spin,
243                                                         ctx, engine,
244                                                         MI_NOOP);
245                 else
246                         rq = i915_request_alloc(engine, ctx);
247         }
248
249         kernel_context_close(ctx);
250
251         if (IS_ERR(rq)) {
252                 spin = NULL;
253                 err = PTR_ERR(rq);
254                 goto err;
255         }
256
257         i915_request_add(rq);
258
259         if (spin && !igt_wait_for_spinner(spin, rq)) {
260                 pr_err("Spinner failed to start\n");
261                 err = -ETIMEDOUT;
262         }
263
264 err:
265         if (err && spin)
266                 igt_spinner_end(spin);
267
268         return err;
269 }
270
271 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
272                                         int (*reset)(struct intel_engine_cs *),
273                                         const char *name)
274 {
275         struct drm_i915_private *i915 = engine->i915;
276         bool want_spin = reset == do_engine_reset;
277         struct i915_gem_context *ctx;
278         struct igt_spinner spin;
279         intel_wakeref_t wakeref;
280         int err;
281
282         pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
283                 engine->whitelist.count, name);
284
285         if (want_spin) {
286                 err = igt_spinner_init(&spin, i915);
287                 if (err)
288                         return err;
289         }
290
291         ctx = kernel_context(i915);
292         if (IS_ERR(ctx))
293                 return PTR_ERR(ctx);
294
295         err = check_whitelist(ctx, engine);
296         if (err) {
297                 pr_err("Invalid whitelist *before* %s reset!\n", name);
298                 goto out;
299         }
300
301         err = switch_to_scratch_context(engine, want_spin ? &spin : NULL);
302         if (err)
303                 goto out;
304
305         with_intel_runtime_pm(i915, wakeref)
306                 err = reset(engine);
307
308         if (want_spin) {
309                 igt_spinner_end(&spin);
310                 igt_spinner_fini(&spin);
311         }
312
313         if (err) {
314                 pr_err("%s reset failed\n", name);
315                 goto out;
316         }
317
318         err = check_whitelist(ctx, engine);
319         if (err) {
320                 pr_err("Whitelist not preserved in context across %s reset!\n",
321                        name);
322                 goto out;
323         }
324
325         kernel_context_close(ctx);
326
327         ctx = kernel_context(i915);
328         if (IS_ERR(ctx))
329                 return PTR_ERR(ctx);
330
331         err = check_whitelist(ctx, engine);
332         if (err) {
333                 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
334                        name);
335                 goto out;
336         }
337
338 out:
339         kernel_context_close(ctx);
340         return err;
341 }
342
343 static int live_reset_whitelist(void *arg)
344 {
345         struct drm_i915_private *i915 = arg;
346         struct intel_engine_cs *engine = i915->engine[RCS];
347         int err = 0;
348
349         /* If we reset the gpu, we should not lose the RING_NONPRIV */
350
351         if (!engine || engine->whitelist.count == 0)
352                 return 0;
353
354         igt_global_reset_lock(i915);
355
356         if (intel_has_reset_engine(i915)) {
357                 err = check_whitelist_across_reset(engine,
358                                                    do_engine_reset,
359                                                    "engine");
360                 if (err)
361                         goto out;
362         }
363
364         if (intel_has_gpu_reset(i915)) {
365                 err = check_whitelist_across_reset(engine,
366                                                    do_device_reset,
367                                                    "device");
368                 if (err)
369                         goto out;
370         }
371
372 out:
373         igt_global_reset_unlock(i915);
374         return err;
375 }
376
377 static bool verify_gt_engine_wa(struct drm_i915_private *i915,
378                                 struct wa_lists *lists, const char *str)
379 {
380         struct intel_engine_cs *engine;
381         enum intel_engine_id id;
382         bool ok = true;
383
384         ok &= wa_list_verify(i915, &lists->gt_wa_list, str);
385
386         for_each_engine(engine, i915, id)
387                 ok &= wa_list_verify(i915, &lists->engine[id].wa_list, str);
388
389         return ok;
390 }
391
392 static int
393 live_gpu_reset_gt_engine_workarounds(void *arg)
394 {
395         struct drm_i915_private *i915 = arg;
396         struct i915_gpu_error *error = &i915->gpu_error;
397         intel_wakeref_t wakeref;
398         struct wa_lists lists;
399         bool ok;
400
401         if (!intel_has_gpu_reset(i915))
402                 return 0;
403
404         pr_info("Verifying after GPU reset...\n");
405
406         igt_global_reset_lock(i915);
407         wakeref = intel_runtime_pm_get(i915);
408
409         reference_lists_init(i915, &lists);
410
411         ok = verify_gt_engine_wa(i915, &lists, "before reset");
412         if (!ok)
413                 goto out;
414
415         set_bit(I915_RESET_HANDOFF, &error->flags);
416         i915_reset(i915, ALL_ENGINES, "live_workarounds");
417
418         ok = verify_gt_engine_wa(i915, &lists, "after reset");
419
420 out:
421         reference_lists_fini(i915, &lists);
422         intel_runtime_pm_put(i915, wakeref);
423         igt_global_reset_unlock(i915);
424
425         return ok ? 0 : -ESRCH;
426 }
427
428 static int
429 live_engine_reset_gt_engine_workarounds(void *arg)
430 {
431         struct drm_i915_private *i915 = arg;
432         struct intel_engine_cs *engine;
433         struct i915_gem_context *ctx;
434         struct igt_spinner spin;
435         enum intel_engine_id id;
436         struct i915_request *rq;
437         intel_wakeref_t wakeref;
438         struct wa_lists lists;
439         int ret = 0;
440
441         if (!intel_has_reset_engine(i915))
442                 return 0;
443
444         ctx = kernel_context(i915);
445         if (IS_ERR(ctx))
446                 return PTR_ERR(ctx);
447
448         igt_global_reset_lock(i915);
449         wakeref = intel_runtime_pm_get(i915);
450
451         reference_lists_init(i915, &lists);
452
453         for_each_engine(engine, i915, id) {
454                 bool ok;
455
456                 pr_info("Verifying after %s reset...\n", engine->name);
457
458                 ok = verify_gt_engine_wa(i915, &lists, "before reset");
459                 if (!ok) {
460                         ret = -ESRCH;
461                         goto err;
462                 }
463
464                 i915_reset_engine(engine, "live_workarounds");
465
466                 ok = verify_gt_engine_wa(i915, &lists, "after idle reset");
467                 if (!ok) {
468                         ret = -ESRCH;
469                         goto err;
470                 }
471
472                 ret = igt_spinner_init(&spin, i915);
473                 if (ret)
474                         goto err;
475
476                 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
477                 if (IS_ERR(rq)) {
478                         ret = PTR_ERR(rq);
479                         igt_spinner_fini(&spin);
480                         goto err;
481                 }
482
483                 i915_request_add(rq);
484
485                 if (!igt_wait_for_spinner(&spin, rq)) {
486                         pr_err("Spinner failed to start\n");
487                         igt_spinner_fini(&spin);
488                         ret = -ETIMEDOUT;
489                         goto err;
490                 }
491
492                 i915_reset_engine(engine, "live_workarounds");
493
494                 igt_spinner_end(&spin);
495                 igt_spinner_fini(&spin);
496
497                 ok = verify_gt_engine_wa(i915, &lists, "after busy reset");
498                 if (!ok) {
499                         ret = -ESRCH;
500                         goto err;
501                 }
502         }
503
504 err:
505         reference_lists_fini(i915, &lists);
506         intel_runtime_pm_put(i915, wakeref);
507         igt_global_reset_unlock(i915);
508         kernel_context_close(ctx);
509
510         igt_flush_test(i915, I915_WAIT_LOCKED);
511
512         return ret;
513 }
514
515 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
516 {
517         static const struct i915_subtest tests[] = {
518                 SUBTEST(live_reset_whitelist),
519                 SUBTEST(live_gpu_reset_gt_engine_workarounds),
520                 SUBTEST(live_engine_reset_gt_engine_workarounds),
521         };
522         int err;
523
524         if (i915_terminally_wedged(&i915->gpu_error))
525                 return 0;
526
527         mutex_lock(&i915->drm.struct_mutex);
528         err = i915_subtests(tests, i915);
529         mutex_unlock(&i915->drm.struct_mutex);
530
531         return err;
532 }