Merge tag 'batadv-next-pullrequest-20210408' of git://git.open-mesh.org/linux-merge
[linux-2.6-block.git] / drivers / gpu / drm / i915 / gt / selftest_reset.c
CommitLineData
932309fb
MW
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2018 Intel Corporation
4 */
5
3da3c5c1
CW
6#include <linux/crc32.h>
7
8#include "gem/i915_gem_stolen.h"
9
10#include "i915_memcpy.h"
932309fb 11#include "i915_selftest.h"
45233ab2 12#include "intel_gpu_commands.h"
932309fb 13#include "selftests/igt_reset.h"
f6470c9b 14#include "selftests/igt_atomic.h"
3da3c5c1
CW
15#include "selftests/igt_spinner.h"
16
17static int
18__igt_reset_stolen(struct intel_gt *gt,
19 intel_engine_mask_t mask,
20 const char *msg)
21{
22 struct i915_ggtt *ggtt = &gt->i915->ggtt;
23 const struct resource *dsm = &gt->i915->dsm;
24 resource_size_t num_pages, page;
25 struct intel_engine_cs *engine;
26 intel_wakeref_t wakeref;
27 enum intel_engine_id id;
28 struct igt_spinner spin;
29 long max, count;
30 void *tmp;
31 u32 *crc;
32 int err;
33
34 if (!drm_mm_node_allocated(&ggtt->error_capture))
35 return 0;
36
37 num_pages = resource_size(dsm) >> PAGE_SHIFT;
38 if (!num_pages)
39 return 0;
40
41 crc = kmalloc_array(num_pages, sizeof(u32), GFP_KERNEL);
42 if (!crc)
43 return -ENOMEM;
44
45 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
46 if (!tmp) {
47 err = -ENOMEM;
48 goto err_crc;
49 }
50
51 igt_global_reset_lock(gt);
52 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
53
54 err = igt_spinner_init(&spin, gt);
55 if (err)
56 goto err_lock;
57
58 for_each_engine(engine, gt, id) {
59 struct intel_context *ce;
60 struct i915_request *rq;
61
62 if (!(mask & engine->mask))
63 continue;
64
65 if (!intel_engine_can_store_dword(engine))
66 continue;
67
68 ce = intel_context_create(engine);
69 if (IS_ERR(ce)) {
70 err = PTR_ERR(ce);
71 goto err_spin;
72 }
73 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
74 intel_context_put(ce);
75 if (IS_ERR(rq)) {
76 err = PTR_ERR(rq);
77 goto err_spin;
78 }
79 i915_request_add(rq);
80 }
81
82 for (page = 0; page < num_pages; page++) {
83 dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT);
84 void __iomem *s;
85 void *in;
86
87 ggtt->vm.insert_page(&ggtt->vm, dma,
88 ggtt->error_capture.start,
89 I915_CACHE_NONE, 0);
90 mb();
91
92 s = io_mapping_map_wc(&ggtt->iomap,
93 ggtt->error_capture.start,
94 PAGE_SIZE);
95
96 if (!__drm_mm_interval_first(&gt->i915->mm.stolen,
97 page << PAGE_SHIFT,
98 ((page + 1) << PAGE_SHIFT) - 1))
88b39600 99 memset_io(s, STACK_MAGIC, PAGE_SIZE);
3da3c5c1 100
88b39600
CW
101 in = (void __force *)s;
102 if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
3da3c5c1
CW
103 in = tmp;
104 crc[page] = crc32_le(0, in, PAGE_SIZE);
105
106 io_mapping_unmap(s);
107 }
108 mb();
109 ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
110
111 if (mask == ALL_ENGINES) {
112 intel_gt_reset(gt, mask, NULL);
113 } else {
114 for_each_engine(engine, gt, id) {
115 if (mask & engine->mask)
116 intel_engine_reset(engine, NULL);
117 }
118 }
119
120 max = -1;
121 count = 0;
122 for (page = 0; page < num_pages; page++) {
123 dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT);
124 void __iomem *s;
125 void *in;
126 u32 x;
127
128 ggtt->vm.insert_page(&ggtt->vm, dma,
129 ggtt->error_capture.start,
130 I915_CACHE_NONE, 0);
131 mb();
132
133 s = io_mapping_map_wc(&ggtt->iomap,
134 ggtt->error_capture.start,
135 PAGE_SIZE);
136
88b39600
CW
137 in = (void __force *)s;
138 if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
3da3c5c1
CW
139 in = tmp;
140 x = crc32_le(0, in, PAGE_SIZE);
141
142 if (x != crc[page] &&
143 !__drm_mm_interval_first(&gt->i915->mm.stolen,
144 page << PAGE_SHIFT,
145 ((page + 1) << PAGE_SHIFT) - 1)) {
146 pr_debug("unused stolen page %pa modified by GPU reset\n",
147 &page);
148 if (count++ == 0)
149 igt_hexdump(in, PAGE_SIZE);
150 max = page;
151 }
152
153 io_mapping_unmap(s);
154 }
155 mb();
156 ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
157
158 if (count > 0) {
159 pr_info("%s reset clobbered %ld pages of stolen, last clobber at page %ld\n",
160 msg, count, max);
161 }
162 if (max >= I915_GEM_STOLEN_BIAS >> PAGE_SHIFT) {
163 pr_err("%s reset clobbered unreserved area [above %x] of stolen; may cause severe faults\n",
164 msg, I915_GEM_STOLEN_BIAS);
165 err = -EINVAL;
166 }
167
168err_spin:
169 igt_spinner_fini(&spin);
170
171err_lock:
172 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
173 igt_global_reset_unlock(gt);
174
175 kfree(tmp);
176err_crc:
177 kfree(crc);
178 return err;
179}
180
181static int igt_reset_device_stolen(void *arg)
182{
183 return __igt_reset_stolen(arg, ALL_ENGINES, "device");
184}
185
186static int igt_reset_engines_stolen(void *arg)
187{
188 struct intel_gt *gt = arg;
189 struct intel_engine_cs *engine;
190 enum intel_engine_id id;
191 int err;
192
193 if (!intel_has_reset_engine(gt))
194 return 0;
195
196 for_each_engine(engine, gt, id) {
197 err = __igt_reset_stolen(gt, engine->mask, engine->name);
198 if (err)
199 return err;
200 }
201
202 return 0;
203}
932309fb
MW
204
205static int igt_global_reset(void *arg)
206{
cb823ed9 207 struct intel_gt *gt = arg;
932309fb 208 unsigned int reset_count;
cb823ed9 209 intel_wakeref_t wakeref;
932309fb
MW
210 int err = 0;
211
212 /* Check that we can issue a global GPU reset */
213
cb823ed9 214 igt_global_reset_lock(gt);
cd6a8513 215 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
932309fb 216
cb823ed9 217 reset_count = i915_reset_count(&gt->i915->gpu_error);
932309fb 218
cb823ed9 219 intel_gt_reset(gt, ALL_ENGINES, NULL);
932309fb 220
cb823ed9 221 if (i915_reset_count(&gt->i915->gpu_error) == reset_count) {
932309fb
MW
222 pr_err("No GPU reset recorded!\n");
223 err = -EINVAL;
224 }
225
cd6a8513 226 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
cb823ed9 227 igt_global_reset_unlock(gt);
932309fb 228
cb823ed9 229 if (intel_gt_is_wedged(gt))
932309fb
MW
230 err = -EIO;
231
232 return err;
233}
234
235static int igt_wedged_reset(void *arg)
236{
cb823ed9 237 struct intel_gt *gt = arg;
932309fb
MW
238 intel_wakeref_t wakeref;
239
240 /* Check that we can recover a wedged device with a GPU reset */
241
cb823ed9 242 igt_global_reset_lock(gt);
cd6a8513 243 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
932309fb 244
cb823ed9 245 intel_gt_set_wedged(gt);
932309fb 246
cb823ed9
CW
247 GEM_BUG_ON(!intel_gt_is_wedged(gt));
248 intel_gt_reset(gt, ALL_ENGINES, NULL);
932309fb 249
cd6a8513 250 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
cb823ed9 251 igt_global_reset_unlock(gt);
932309fb 252
cb823ed9 253 return intel_gt_is_wedged(gt) ? -EIO : 0;
932309fb
MW
254}
255
f6470c9b
MW
256static int igt_atomic_reset(void *arg)
257{
cb823ed9 258 struct intel_gt *gt = arg;
f6470c9b
MW
259 const typeof(*igt_atomic_phases) *p;
260 int err = 0;
261
262 /* Check that the resets are usable from atomic context */
263
cb823ed9
CW
264 intel_gt_pm_get(gt);
265 igt_global_reset_lock(gt);
f6470c9b
MW
266
267 /* Flush any requests before we get started and check basics */
cb823ed9 268 if (!igt_force_reset(gt))
f6470c9b
MW
269 goto unlock;
270
271 for (p = igt_atomic_phases; p->name; p++) {
18398904
CW
272 intel_engine_mask_t awake;
273
cb823ed9 274 GEM_TRACE("__intel_gt_reset under %s\n", p->name);
f6470c9b 275
cb823ed9 276 awake = reset_prepare(gt);
faaa2902
CW
277 p->critical_section_begin();
278
cb823ed9 279 err = __intel_gt_reset(gt, ALL_ENGINES);
faaa2902 280
f6470c9b 281 p->critical_section_end();
cb823ed9 282 reset_finish(gt, awake);
f6470c9b
MW
283
284 if (err) {
cb823ed9 285 pr_err("__intel_gt_reset failed under %s\n", p->name);
f6470c9b
MW
286 break;
287 }
288 }
289
290 /* As we poke around the guts, do a full reset before continuing. */
cb823ed9 291 igt_force_reset(gt);
f6470c9b
MW
292
293unlock:
cb823ed9
CW
294 igt_global_reset_unlock(gt);
295 intel_gt_pm_put(gt);
f6470c9b
MW
296
297 return err;
298}
299
faaa2902
CW
300static int igt_atomic_engine_reset(void *arg)
301{
cb823ed9 302 struct intel_gt *gt = arg;
faaa2902
CW
303 const typeof(*igt_atomic_phases) *p;
304 struct intel_engine_cs *engine;
305 enum intel_engine_id id;
306 int err = 0;
307
308 /* Check that the resets are usable from atomic context */
309
260e6b71 310 if (!intel_has_reset_engine(gt))
faaa2902
CW
311 return 0;
312
065273f7 313 if (intel_uc_uses_guc_submission(&gt->uc))
faaa2902
CW
314 return 0;
315
cb823ed9
CW
316 intel_gt_pm_get(gt);
317 igt_global_reset_lock(gt);
faaa2902
CW
318
319 /* Flush any requests before we get started and check basics */
cb823ed9 320 if (!igt_force_reset(gt))
faaa2902
CW
321 goto out_unlock;
322
5d904e3c 323 for_each_engine(engine, gt, id) {
80655d2a
CW
324 struct tasklet_struct *t = &engine->execlists.tasklet;
325
326 if (t->func)
327 tasklet_disable(t);
faaa2902
CW
328 intel_engine_pm_get(engine);
329
330 for (p = igt_atomic_phases; p->name; p++) {
cb823ed9 331 GEM_TRACE("intel_engine_reset(%s) under %s\n",
faaa2902 332 engine->name, p->name);
16f2941a
CW
333 if (strcmp(p->name, "softirq"))
334 local_bh_disable();
faaa2902
CW
335
336 p->critical_section_begin();
16f2941a 337 err = __intel_engine_reset_bh(engine, NULL);
faaa2902
CW
338 p->critical_section_end();
339
16f2941a
CW
340 if (strcmp(p->name, "softirq"))
341 local_bh_enable();
342
faaa2902 343 if (err) {
cb823ed9 344 pr_err("intel_engine_reset(%s) failed under %s\n",
faaa2902
CW
345 engine->name, p->name);
346 break;
347 }
348 }
349
350 intel_engine_pm_put(engine);
80655d2a
CW
351 if (t->func) {
352 tasklet_enable(t);
353 tasklet_hi_schedule(t);
354 }
faaa2902
CW
355 if (err)
356 break;
357 }
358
359 /* As we poke around the guts, do a full reset before continuing. */
cb823ed9 360 igt_force_reset(gt);
faaa2902
CW
361
362out_unlock:
cb823ed9
CW
363 igt_global_reset_unlock(gt);
364 intel_gt_pm_put(gt);
faaa2902
CW
365
366 return err;
367}
368
932309fb
MW
369int intel_reset_live_selftests(struct drm_i915_private *i915)
370{
371 static const struct i915_subtest tests[] = {
372 SUBTEST(igt_global_reset), /* attempt to recover GPU first */
3da3c5c1
CW
373 SUBTEST(igt_reset_device_stolen),
374 SUBTEST(igt_reset_engines_stolen),
932309fb 375 SUBTEST(igt_wedged_reset),
f6470c9b 376 SUBTEST(igt_atomic_reset),
faaa2902 377 SUBTEST(igt_atomic_engine_reset),
932309fb 378 };
cb823ed9 379 struct intel_gt *gt = &i915->gt;
932309fb 380
260e6b71 381 if (!intel_has_gpu_reset(gt))
932309fb
MW
382 return 0;
383
cb823ed9 384 if (intel_gt_is_wedged(gt))
932309fb
MW
385 return -EIO; /* we're long past hope of a successful reset */
386
cb823ed9 387 return intel_gt_live_subtests(tests, gt);
932309fb 388}