Merge tag 'coccinelle-5.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_gpu_error.c
CommitLineData
84734a04
MK
1/*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
0e39037b
CW
30#include <linux/ascii85.h>
31#include <linux/nmi.h>
3bdd4f84 32#include <linux/pagevec.h>
0e39037b 33#include <linux/scatterlist.h>
0e39037b 34#include <linux/utsname.h>
0a97015d 35#include <linux/zlib.h>
0e39037b 36
7d41ef34
MW
37#include <drm/drm_print.h>
38
32f9402d 39#include "display/intel_dmc.h"
df0566a6
JN
40#include "display/intel_overlay.h"
41
10be98a7 42#include "gem/i915_gem_context.h"
895d8ebe 43#include "gem/i915_gem_lmem.h"
792592e7 44#include "gt/intel_gt.h"
742379c0 45#include "gt/intel_gt_pm.h"
10be98a7 46
84734a04 47#include "i915_drv.h"
05ca9306 48#include "i915_gpu_error.h"
9c9082b9 49#include "i915_memcpy.h"
37d63f8f 50#include "i915_scatterlist.h"
84734a04 51
3bdd4f84
CW
52#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
53#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
54
0e39037b
CW
55static void __sg_set_buf(struct scatterlist *sg,
56 void *addr, unsigned int len, loff_t it)
84734a04 57{
0e39037b
CW
58 sg->page_link = (unsigned long)virt_to_page(addr);
59 sg->offset = offset_in_page(addr);
60 sg->length = len;
61 sg->dma_address = it;
84734a04
MK
62}
63
0e39037b 64static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
84734a04 65{
0e39037b 66 if (!len)
84734a04 67 return false;
84734a04 68
0e39037b
CW
69 if (e->bytes + len + 1 <= e->size)
70 return true;
71
72 if (e->bytes) {
73 __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
74 e->iter += e->bytes;
75 e->buf = NULL;
76 e->bytes = 0;
84734a04
MK
77 }
78
0e39037b
CW
79 if (e->cur == e->end) {
80 struct scatterlist *sgl;
84734a04 81
3bdd4f84 82 sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
0e39037b
CW
83 if (!sgl) {
84 e->err = -ENOMEM;
85 return false;
86 }
84734a04 87
0e39037b
CW
88 if (e->cur) {
89 e->cur->offset = 0;
90 e->cur->length = 0;
91 e->cur->page_link =
92 (unsigned long)sgl | SG_CHAIN;
93 } else {
94 e->sgl = sgl;
84734a04
MK
95 }
96
0e39037b
CW
97 e->cur = sgl;
98 e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
84734a04
MK
99 }
100
0e39037b 101 e->size = ALIGN(len + 1, SZ_64K);
3bdd4f84 102 e->buf = kmalloc(e->size, ALLOW_FAIL);
0e39037b
CW
103 if (!e->buf) {
104 e->size = PAGE_ALIGN(len + 1);
105 e->buf = kmalloc(e->size, GFP_KERNEL);
106 }
107 if (!e->buf) {
108 e->err = -ENOMEM;
109 return false;
110 }
111
112 return true;
84734a04
MK
113}
114
dda35931 115__printf(2, 0)
84734a04 116static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
0e39037b 117 const char *fmt, va_list args)
84734a04 118{
0e39037b
CW
119 va_list ap;
120 int len;
84734a04 121
0e39037b 122 if (e->err)
84734a04
MK
123 return;
124
0e39037b
CW
125 va_copy(ap, args);
126 len = vsnprintf(NULL, 0, fmt, ap);
127 va_end(ap);
128 if (len <= 0) {
129 e->err = len;
130 return;
84734a04
MK
131 }
132
0e39037b
CW
133 if (!__i915_error_grow(e, len))
134 return;
84734a04 135
0e39037b
CW
136 GEM_BUG_ON(e->bytes >= e->size);
137 len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
138 if (len < 0) {
139 e->err = len;
140 return;
141 }
142 e->bytes += len;
84734a04
MK
143}
144
0e39037b 145static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
84734a04
MK
146{
147 unsigned len;
148
0e39037b 149 if (e->err || !str)
84734a04
MK
150 return;
151
152 len = strlen(str);
0e39037b
CW
153 if (!__i915_error_grow(e, len))
154 return;
84734a04 155
0e39037b 156 GEM_BUG_ON(e->bytes + len > e->size);
84734a04 157 memcpy(e->buf + e->bytes, str, len);
0e39037b 158 e->bytes += len;
84734a04
MK
159}
160
161#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
162#define err_puts(e, s) i915_error_puts(e, s)
163
7d41ef34
MW
164static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
165{
166 i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
167}
168
169static inline struct drm_printer
170i915_error_printer(struct drm_i915_error_state_buf *e)
171{
172 struct drm_printer p = {
173 .printfn = __i915_printfn_error,
174 .arg = e,
175 };
176 return p;
177}
178
3bdd4f84
CW
179/* single threaded page allocator with a reserved stash for emergencies */
180static void pool_fini(struct pagevec *pv)
181{
182 pagevec_release(pv);
183}
184
185static int pool_refill(struct pagevec *pv, gfp_t gfp)
186{
187 while (pagevec_space(pv)) {
188 struct page *p;
189
190 p = alloc_page(gfp);
191 if (!p)
192 return -ENOMEM;
193
194 pagevec_add(pv, p);
195 }
196
197 return 0;
198}
199
200static int pool_init(struct pagevec *pv, gfp_t gfp)
201{
202 int err;
203
204 pagevec_init(pv);
205
206 err = pool_refill(pv, gfp);
207 if (err)
208 pool_fini(pv);
209
210 return err;
211}
212
213static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
214{
215 struct page *p;
216
217 p = alloc_page(gfp);
218 if (!p && pagevec_count(pv))
219 p = pv->pages[--pv->nr];
220
221 return p ? page_address(p) : NULL;
222}
223
224static void pool_free(struct pagevec *pv, void *addr)
225{
226 struct page *p = virt_to_page(addr);
227
228 if (pagevec_space(pv))
229 pagevec_add(pv, p);
230 else
231 __free_page(p);
232}
233
0a97015d
CW
234#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
235
742379c0 236struct i915_vma_compress {
3bdd4f84 237 struct pagevec pool;
d637c178
CW
238 struct z_stream_s zstream;
239 void *tmp;
240};
241
742379c0 242static bool compress_init(struct i915_vma_compress *c)
0a97015d 243{
3bdd4f84 244 struct z_stream_s *zstream = &c->zstream;
0a97015d 245
3bdd4f84 246 if (pool_init(&c->pool, ALLOW_FAIL))
0a97015d
CW
247 return false;
248
3bdd4f84
CW
249 zstream->workspace =
250 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
251 ALLOW_FAIL);
252 if (!zstream->workspace) {
253 pool_fini(&c->pool);
0a97015d
CW
254 return false;
255 }
256
d637c178 257 c->tmp = NULL;
c4d3ae68 258 if (i915_has_memcpy_from_wc())
3bdd4f84 259 c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
d637c178 260
0a97015d
CW
261 return true;
262}
263
742379c0 264static bool compress_start(struct i915_vma_compress *c)
83bc0f5b 265{
3bdd4f84
CW
266 struct z_stream_s *zstream = &c->zstream;
267 void *workspace = zstream->workspace;
268
269 memset(zstream, 0, sizeof(*zstream));
270 zstream->workspace = workspace;
271
272 return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
273}
274
742379c0
CW
275static void *compress_next_page(struct i915_vma_compress *c,
276 struct i915_vma_coredump *dst)
3bdd4f84
CW
277{
278 void *page;
83bc0f5b
CW
279
280 if (dst->page_count >= dst->num_pages)
281 return ERR_PTR(-ENOSPC);
282
79c7a28e 283 page = pool_alloc(&c->pool, ALLOW_FAIL);
83bc0f5b
CW
284 if (!page)
285 return ERR_PTR(-ENOMEM);
286
3bdd4f84 287 return dst->pages[dst->page_count++] = page;
83bc0f5b
CW
288}
289
742379c0 290static int compress_page(struct i915_vma_compress *c,
0a97015d 291 void *src,
742379c0
CW
292 struct i915_vma_coredump *dst,
293 bool wc)
0a97015d 294{
d637c178
CW
295 struct z_stream_s *zstream = &c->zstream;
296
0a97015d 297 zstream->next_in = src;
742379c0 298 if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
d637c178 299 zstream->next_in = c->tmp;
0a97015d
CW
300 zstream->avail_in = PAGE_SIZE;
301
302 do {
303 if (zstream->avail_out == 0) {
3bdd4f84 304 zstream->next_out = compress_next_page(c, dst);
83bc0f5b
CW
305 if (IS_ERR(zstream->next_out))
306 return PTR_ERR(zstream->next_out);
0a97015d 307
0a97015d
CW
308 zstream->avail_out = PAGE_SIZE;
309 }
310
83bc0f5b 311 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
0a97015d 312 return -EIO;
7d555314
CW
313
314 cond_resched();
0a97015d
CW
315 } while (zstream->avail_in);
316
317 /* Fallback to uncompressed if we increase size? */
318 if (0 && zstream->total_out > zstream->total_in)
319 return -E2BIG;
320
321 return 0;
322}
323
742379c0
CW
324static int compress_flush(struct i915_vma_compress *c,
325 struct i915_vma_coredump *dst)
0a97015d 326{
d637c178
CW
327 struct z_stream_s *zstream = &c->zstream;
328
83bc0f5b
CW
329 do {
330 switch (zlib_deflate(zstream, Z_FINISH)) {
331 case Z_OK: /* more space requested */
3bdd4f84 332 zstream->next_out = compress_next_page(c, dst);
83bc0f5b
CW
333 if (IS_ERR(zstream->next_out))
334 return PTR_ERR(zstream->next_out);
335
336 zstream->avail_out = PAGE_SIZE;
337 break;
338
339 case Z_STREAM_END:
340 goto end;
341
342 default: /* any error */
343 return -EIO;
344 }
345 } while (1);
346
347end:
348 memset(zstream->next_out, 0, zstream->avail_out);
349 dst->unused = zstream->avail_out;
350 return 0;
351}
352
742379c0 353static void compress_finish(struct i915_vma_compress *c)
83bc0f5b 354{
3bdd4f84
CW
355 zlib_deflateEnd(&c->zstream);
356}
0a97015d 357
742379c0 358static void compress_fini(struct i915_vma_compress *c)
3bdd4f84
CW
359{
360 kfree(c->zstream.workspace);
d637c178 361 if (c->tmp)
3bdd4f84
CW
362 pool_free(&c->pool, c->tmp);
363 pool_fini(&c->pool);
0a97015d
CW
364}
365
366static void err_compression_marker(struct drm_i915_error_state_buf *m)
367{
368 err_puts(m, ":");
369}
370
371#else
372
742379c0 373struct i915_vma_compress {
3bdd4f84 374 struct pagevec pool;
d637c178
CW
375};
376
742379c0 377static bool compress_init(struct i915_vma_compress *c)
3bdd4f84
CW
378{
379 return pool_init(&c->pool, ALLOW_FAIL) == 0;
380}
381
742379c0 382static bool compress_start(struct i915_vma_compress *c)
0a97015d
CW
383{
384 return true;
385}
386
742379c0 387static int compress_page(struct i915_vma_compress *c,
0a97015d 388 void *src,
742379c0
CW
389 struct i915_vma_coredump *dst,
390 bool wc)
0a97015d 391{
d637c178 392 void *ptr;
0a97015d 393
79c7a28e 394 ptr = pool_alloc(&c->pool, ALLOW_FAIL);
3bdd4f84 395 if (!ptr)
0a97015d
CW
396 return -ENOMEM;
397
742379c0 398 if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
d637c178
CW
399 memcpy(ptr, src, PAGE_SIZE);
400 dst->pages[dst->page_count++] = ptr;
7d555314 401 cond_resched();
0a97015d
CW
402
403 return 0;
404}
405
742379c0
CW
406static int compress_flush(struct i915_vma_compress *c,
407 struct i915_vma_coredump *dst)
83bc0f5b
CW
408{
409 return 0;
410}
411
742379c0 412static void compress_finish(struct i915_vma_compress *c)
0a97015d
CW
413{
414}
415
742379c0 416static void compress_fini(struct i915_vma_compress *c)
3bdd4f84
CW
417{
418 pool_fini(&c->pool);
419}
420
0a97015d
CW
421static void err_compression_marker(struct drm_i915_error_state_buf *m)
422{
423 err_puts(m, "~");
424}
425
426#endif
427
d636951e 428static void error_print_instdone(struct drm_i915_error_state_buf *m,
742379c0 429 const struct intel_engine_coredump *ee)
d636951e 430{
0b6613c6 431 const struct sseu_dev_info *sseu = &ee->engine->gt->info.sseu;
f9e61372
BW
432 int slice;
433 int subslice;
89f2e7ab 434 int iter;
f9e61372 435
d636951e
BW
436 err_printf(m, " INSTDONE: 0x%08x\n",
437 ee->instdone.instdone);
438
651e7d48 439 if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3)
d636951e
BW
440 return;
441
442 err_printf(m, " SC_INSTDONE: 0x%08x\n",
443 ee->instdone.slice_common);
444
651e7d48 445 if (GRAPHICS_VER(m->i915) <= 6)
d636951e
BW
446 return;
447
fa9899da 448 if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 50)) {
fa9899da
MR
449 for_each_instdone_gslice_dss_xehp(m->i915, sseu, iter, slice, subslice)
450 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
451 slice, subslice,
452 ee->instdone.sampler[slice][subslice]);
453
454 for_each_instdone_gslice_dss_xehp(m->i915, sseu, iter, slice, subslice)
455 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
456 slice, subslice,
457 ee->instdone.row[slice][subslice]);
458 } else {
459 for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
460 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
461 slice, subslice,
462 ee->instdone.sampler[slice][subslice]);
463
464 for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
465 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
466 slice, subslice,
467 ee->instdone.row[slice][subslice]);
468 }
f7043102 469
651e7d48 470 if (GRAPHICS_VER(m->i915) < 12)
f7043102
LL
471 return;
472
89f2e7ab
MR
473 if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 55)) {
474 for_each_instdone_gslice_dss_xehp(m->i915, sseu, iter, slice, subslice)
475 err_printf(m, " GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n",
476 slice, subslice,
477 ee->instdone.geom_svg[slice][subslice]);
478 }
479
f7043102
LL
480 err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",
481 ee->instdone.slice_common_extra[0]);
482 err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n",
483 ee->instdone.slice_common_extra[1]);
d636951e
BW
484}
485
35ca039e
CW
486static void error_print_request(struct drm_i915_error_state_buf *m,
487 const char *prefix,
742379c0 488 const struct i915_request_coredump *erq)
35ca039e
CW
489{
490 if (!erq->seqno)
491 return;
492
9669a507 493 err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
7f4127c4 494 prefix, erq->pid, erq->context, erq->seqno,
52c0fdb2
CW
495 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
496 &erq->flags) ? "!" : "",
497 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
498 &erq->flags) ? "+" : "",
499 erq->sched_attr.priority,
9669a507 500 erq->head, erq->tail);
35ca039e
CW
501}
502
4fa6053e
CW
503static void error_print_context(struct drm_i915_error_state_buf *m,
504 const char *header,
742379c0 505 const struct i915_gem_context_coredump *ctx)
4fa6053e 506{
f170523a 507 const u32 period = m->i915->gt.clock_period_ns;
1883a0a4
TU
508
509 err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
2935ed53 510 header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
1883a0a4
TU
511 ctx->guilty, ctx->active,
512 ctx->total_runtime * period,
513 mul_u32_u32(ctx->avg_runtime, period));
4fa6053e
CW
514}
515
742379c0
CW
516static struct i915_vma_coredump *
517__find_vma(struct i915_vma_coredump *vma, const char *name)
518{
519 while (vma) {
520 if (strcmp(vma->name, name) == 0)
521 return vma;
522 vma = vma->next;
523 }
524
525 return NULL;
526}
527
528static struct i915_vma_coredump *
529find_batch(const struct intel_engine_coredump *ee)
530{
531 return __find_vma(ee->vma, "batch");
532}
533
6361f4ba 534static void error_print_engine(struct drm_i915_error_state_buf *m,
742379c0 535 const struct intel_engine_coredump *ee)
84734a04 536{
742379c0 537 struct i915_vma_coredump *batch;
76e70087
MK
538 int n;
539
c990b4c3 540 err_printf(m, "%s command stream:\n", ee->engine->name);
742379c0 541 err_printf(m, " CCID: 0x%08x\n", ee->ccid);
6361f4ba 542 err_printf(m, " START: 0x%08x\n", ee->start);
06392e3b 543 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
cdb324bd
CW
544 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
545 ee->tail, ee->rq_post, ee->rq_tail);
6361f4ba 546 err_printf(m, " CTL: 0x%08x\n", ee->ctl);
21a2c58a 547 err_printf(m, " MODE: 0x%08x\n", ee->mode);
6361f4ba
CW
548 err_printf(m, " HWS: 0x%08x\n", ee->hws);
549 err_printf(m, " ACTHD: 0x%08x %08x\n",
550 (u32)(ee->acthd>>32), (u32)ee->acthd);
551 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
552 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
70a76a9b 553 err_printf(m, " ESR: 0x%08x\n", ee->esr);
d636951e
BW
554
555 error_print_instdone(m, ee);
556
742379c0
CW
557 batch = find_batch(ee);
558 if (batch) {
559 u64 start = batch->gtt_offset;
560 u64 end = start + batch->gtt_size;
03382dfb
CW
561
562 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
563 upper_32_bits(start), lower_32_bits(start),
564 upper_32_bits(end), lower_32_bits(end));
565 }
651e7d48 566 if (GRAPHICS_VER(m->i915) >= 4) {
03382dfb 567 err_printf(m, " BBADDR: 0x%08x_%08x\n",
6361f4ba
CW
568 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
569 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
570 err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
3dda20a9 571 }
6361f4ba
CW
572 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
573 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
574 lower_32_bits(ee->faddr));
651e7d48 575 if (GRAPHICS_VER(m->i915) >= 6) {
6361f4ba
CW
576 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
577 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
84734a04 578 }
4bdafb9d 579 if (HAS_PPGTT(m->i915)) {
6361f4ba 580 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
6c7a01ec 581
651e7d48 582 if (GRAPHICS_VER(m->i915) >= 8) {
6c7a01ec
BW
583 int i;
584 for (i = 0; i < 4; i++)
585 err_printf(m, " PDP%d: 0x%016llx\n",
6361f4ba 586 i, ee->vm_info.pdp[i]);
6c7a01ec
BW
587 } else {
588 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
6361f4ba 589 ee->vm_info.pp_dir_base);
6c7a01ec
BW
590 }
591 }
bda30024 592 err_printf(m, " hung: %u\n", ee->hung);
702c8f8e 593 err_printf(m, " engine reset count: %u\n", ee->reset_count);
3fe3b030 594
76e70087
MK
595 for (n = 0; n < ee->num_ports; n++) {
596 err_printf(m, " ELSP[%d]:", n);
742379c0 597 error_print_request(m, " ", &ee->execlist[n]);
76e70087
MK
598 }
599
4fa6053e 600 error_print_context(m, " Active context: ", &ee->context);
84734a04
MK
601}
602
603void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
604{
605 va_list args;
606
607 va_start(args, f);
608 i915_error_vprintf(e, f, args);
609 va_end(args);
610}
611
742379c0 612static void print_error_vma(struct drm_i915_error_state_buf *m,
c990b4c3 613 const struct intel_engine_cs *engine,
742379c0 614 const struct i915_vma_coredump *vma)
ab0e7ff9 615{
489cae63 616 char out[ASCII85_BUFSZ];
0a97015d 617 int page;
ab0e7ff9 618
742379c0 619 if (!vma)
fc4c79c3
CW
620 return;
621
742379c0
CW
622 err_printf(m, "%s --- %s = 0x%08x %08x\n",
623 engine ? engine->name : "global", vma->name,
624 upper_32_bits(vma->gtt_offset),
625 lower_32_bits(vma->gtt_offset));
fc4c79c3 626
742379c0
CW
627 if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
628 err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
fd521d3b 629
0a97015d 630 err_compression_marker(m);
742379c0 631 for (page = 0; page < vma->page_count; page++) {
0a97015d
CW
632 int i, len;
633
634 len = PAGE_SIZE;
742379c0
CW
635 if (page == vma->page_count - 1)
636 len -= vma->unused;
0a97015d
CW
637 len = ascii85_encode_len(len);
638
489cae63 639 for (i = 0; i < len; i++)
742379c0 640 err_puts(m, ascii85_encode(vma->pages[page][i], out));
ab0e7ff9 641 }
0a97015d 642 err_puts(m, "\n");
ab0e7ff9
CW
643}
644
2bd160a1 645static void err_print_capabilities(struct drm_i915_error_state_buf *m,
792592e7 646 struct i915_gpu_coredump *error)
2bd160a1 647{
a8c9b849
MW
648 struct drm_printer p = i915_error_printer(m);
649
792592e7
DCS
650 intel_device_info_print_static(&error->device_info, &p);
651 intel_device_info_print_runtime(&error->runtime_info, &p);
792592e7 652 intel_driver_caps_print(&error->driver_caps, &p);
2bd160a1
CW
653}
654
642c8a72 655static void err_print_params(struct drm_i915_error_state_buf *m,
acfb9973 656 const struct i915_params *params)
642c8a72 657{
acfb9973
MW
658 struct drm_printer p = i915_error_printer(m);
659
660 i915_params_dump(params, &p);
642c8a72
CW
661}
662
5a4c6f1b
CW
663static void err_print_pciid(struct drm_i915_error_state_buf *m,
664 struct drm_i915_private *i915)
665{
8ff5446a 666 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
5a4c6f1b
CW
667
668 err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
669 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
670 err_printf(m, "PCI Subsystem: %04x:%04x\n",
671 pdev->subsystem_vendor,
672 pdev->subsystem_device);
673}
674
7d41ef34 675static void err_print_uc(struct drm_i915_error_state_buf *m,
742379c0 676 const struct intel_uc_coredump *error_uc)
7d41ef34
MW
677{
678 struct drm_printer p = i915_error_printer(m);
7d41ef34
MW
679
680 intel_uc_fw_dump(&error_uc->guc_fw, &p);
681 intel_uc_fw_dump(&error_uc->huc_fw, &p);
742379c0 682 print_error_vma(m, NULL, error_uc->guc_log);
7d41ef34
MW
683}
684
0e39037b 685static void err_free_sgl(struct scatterlist *sgl)
84734a04 686{
0e39037b
CW
687 while (sgl) {
688 struct scatterlist *sg;
84734a04 689
0e39037b
CW
690 for (sg = sgl; !sg_is_chain(sg); sg++) {
691 kfree(sg_virt(sg));
692 if (sg_is_last(sg))
693 break;
694 }
695
696 sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
697 free_page((unsigned long)sgl);
698 sgl = sg;
84734a04 699 }
0e39037b 700}
84734a04 701
68172f2c
CW
702static void err_print_gt_info(struct drm_i915_error_state_buf *m,
703 struct intel_gt_coredump *gt)
704{
705 struct drm_printer p = i915_error_printer(m);
706
707 intel_gt_info_print(&gt->info, &p);
708 intel_sseu_print_topology(&gt->info.sseu, &p);
709}
710
742379c0
CW
711static void err_print_gt(struct drm_i915_error_state_buf *m,
712 struct intel_gt_coredump *gt)
713{
714 const struct intel_engine_coredump *ee;
1a8585bd 715 int i;
742379c0
CW
716
717 err_printf(m, "GT awake: %s\n", yesno(gt->awake));
718 err_printf(m, "EIR: 0x%08x\n", gt->eir);
719 err_printf(m, "IER: 0x%08x\n", gt->ier);
720 for (i = 0; i < gt->ngtier; i++)
721 err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
722 err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
723 err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
724 err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
725
726 for (i = 0; i < gt->nfence; i++)
727 err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
728
651e7d48 729 if (IS_GRAPHICS_VER(m->i915, 6, 11)) {
742379c0
CW
730 err_printf(m, "ERROR: 0x%08x\n", gt->error);
731 err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
732 }
733
651e7d48 734 if (GRAPHICS_VER(m->i915) >= 8)
742379c0
CW
735 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
736 gt->fault_data1, gt->fault_data0);
737
651e7d48 738 if (GRAPHICS_VER(m->i915) == 7)
742379c0
CW
739 err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
740
651e7d48 741 if (IS_GRAPHICS_VER(m->i915, 8, 11))
742379c0
CW
742 err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
743
651e7d48 744 if (GRAPHICS_VER(m->i915) == 12)
742379c0
CW
745 err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
746
651e7d48 747 if (GRAPHICS_VER(m->i915) >= 12) {
742379c0
CW
748 int i;
749
24d032e2
MR
750 for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
751 /*
752 * SFC_DONE resides in the VD forcewake domain, so it
753 * only exists if the corresponding VCS engine is
754 * present.
755 */
45f63790
MR
756 if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
757 !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
24d032e2
MR
758 continue;
759
742379c0
CW
760 err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
761 gt->sfc_done[i]);
24d032e2 762 }
742379c0
CW
763
764 err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
765 }
766
767 for (ee = gt->engine; ee; ee = ee->next) {
768 const struct i915_vma_coredump *vma;
769
770 error_print_engine(m, ee);
742379c0
CW
771 for (vma = ee->vma; vma; vma = vma->next)
772 print_error_vma(m, ee->engine, vma);
742379c0
CW
773 }
774
775 if (gt->uc)
776 err_print_uc(m, gt->uc);
68172f2c
CW
777
778 err_print_gt_info(m, gt);
742379c0
CW
779}
780
0e39037b 781static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
742379c0 782 struct i915_gpu_coredump *error)
0e39037b 783{
742379c0 784 const struct intel_engine_coredump *ee;
0e39037b 785 struct timespec64 ts;
fb6f0b64 786
5a4c6f1b
CW
787 if (*error->error_msg)
788 err_printf(m, "%s\n", error->error_msg);
57428bcc
CW
789 err_printf(m, "Kernel: %s %s\n",
790 init_utsname()->release,
791 init_utsname()->machine);
d71c4b03 792 err_printf(m, "Driver: %s\n", DRIVER_DATE);
c6270dbc
AB
793 ts = ktime_to_timespec64(error->time);
794 err_printf(m, "Time: %lld s %ld us\n",
795 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
796 ts = ktime_to_timespec64(error->boottime);
797 err_printf(m, "Boottime: %lld s %ld us\n",
798 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
799 ts = ktime_to_timespec64(error->uptime);
800 err_printf(m, "Uptime: %lld s %ld us\n",
801 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
058179e7
CW
802 err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
803 error->capture, jiffies_to_msecs(jiffies - error->capture));
3fe3b030 804
742379c0 805 for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
7f4127c4 806 err_printf(m, "Active process (on ring %s): %s [%d]\n",
c990b4c3
CW
807 ee->engine->name,
808 ee->context.comm,
809 ee->context.pid);
810
48b031e3 811 err_printf(m, "Reset count: %u\n", error->reset_count);
62d5d69b 812 err_printf(m, "Suspend count: %u\n", error->suspend_count);
2e0d26f8 813 err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
805446c8
TU
814 err_printf(m, "Subplatform: 0x%x\n",
815 intel_subplatform(&error->runtime_info,
816 error->device_info.platform));
0e39037b 817 err_print_pciid(m, m->i915);
642c8a72 818
eb5be9d0 819 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
0ac7655c 820
ec2b1485 821 if (HAS_DMC(m->i915)) {
c24760cf 822 struct intel_dmc *dmc = &m->i915->dmc;
0ac7655c
MK
823
824 err_printf(m, "DMC loaded: %s\n",
03256487 825 yesno(intel_dmc_has_payload(m->i915) != 0));
0ac7655c 826 err_printf(m, "DMC fw version: %d.%d\n",
0633cdcb
AS
827 DMC_VERSION_MAJOR(dmc->version),
828 DMC_VERSION_MINOR(dmc->version));
0ac7655c
MK
829 }
830
e5aac87e
CW
831 err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
832 err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
84734a04 833
742379c0
CW
834 if (error->gt)
835 err_print_gt(m, error->gt);
84734a04
MK
836
837 if (error->overlay)
838 intel_overlay_print_error_state(m, error->overlay);
839
792592e7 840 err_print_capabilities(m, error);
642c8a72 841 err_print_params(m, &error->params);
0e39037b
CW
842}
843
742379c0 844static int err_print_to_sgl(struct i915_gpu_coredump *error)
0e39037b
CW
845{
846 struct drm_i915_error_state_buf m;
847
848 if (IS_ERR(error))
849 return PTR_ERR(error);
850
851 if (READ_ONCE(error->sgl))
852 return 0;
853
854 memset(&m, 0, sizeof(m));
855 m.i915 = error->i915;
856
857 __err_print_to_sgl(&m, error);
858
859 if (m.buf) {
860 __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
861 m.bytes = 0;
862 m.buf = NULL;
863 }
864 if (m.cur) {
865 GEM_BUG_ON(m.end < m.cur);
866 sg_mark_end(m.cur - 1);
867 }
868 GEM_BUG_ON(m.sgl && !m.cur);
869
870 if (m.err) {
871 err_free_sgl(m.sgl);
872 return m.err;
873 }
642c8a72 874
0e39037b
CW
875 if (cmpxchg(&error->sgl, NULL, m.sgl))
876 err_free_sgl(m.sgl);
84734a04
MK
877
878 return 0;
879}
880
742379c0
CW
881ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
882 char *buf, loff_t off, size_t rem)
84734a04 883{
0e39037b
CW
884 struct scatterlist *sg;
885 size_t count;
886 loff_t pos;
887 int err;
84734a04 888
0e39037b
CW
889 if (!error || !rem)
890 return 0;
84734a04 891
0e39037b
CW
892 err = err_print_to_sgl(error);
893 if (err)
894 return err;
84734a04 895
0e39037b
CW
896 sg = READ_ONCE(error->fit);
897 if (!sg || off < sg->dma_address)
898 sg = error->sgl;
899 if (!sg)
900 return 0;
84734a04 901
0e39037b
CW
902 pos = sg->dma_address;
903 count = 0;
904 do {
905 size_t len, start;
906
907 if (sg_is_chain(sg)) {
908 sg = sg_chain_ptr(sg);
909 GEM_BUG_ON(sg_is_chain(sg));
910 }
84734a04 911
0e39037b
CW
912 len = sg->length;
913 if (pos + len <= off) {
914 pos += len;
915 continue;
916 }
84734a04 917
0e39037b
CW
918 start = sg->offset;
919 if (pos < off) {
920 GEM_BUG_ON(off - pos > len);
921 len -= off - pos;
922 start += off - pos;
923 pos = off;
924 }
925
926 len = min(len, rem);
927 GEM_BUG_ON(!len || len > sg->length);
928
929 memcpy(buf, page_address(sg_page(sg)) + start, len);
930
931 count += len;
932 pos += len;
933
934 buf += len;
935 rem -= len;
936 if (!rem) {
937 WRITE_ONCE(error->fit, sg);
938 break;
939 }
940 } while (!sg_is_last(sg++));
941
942 return count;
84734a04
MK
943}
944
742379c0 945static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
84734a04 946{
742379c0
CW
947 while (vma) {
948 struct i915_vma_coredump *next = vma->next;
949 int page;
84734a04 950
742379c0
CW
951 for (page = 0; page < vma->page_count; page++)
952 free_page((unsigned long)vma->pages[page]);
84734a04 953
742379c0
CW
954 kfree(vma);
955 vma = next;
956 }
84734a04
MK
957}
958
742379c0 959static void cleanup_params(struct i915_gpu_coredump *error)
84a20a8a 960{
16cabb12 961 i915_params_free(&error->params);
84a20a8a
MW
962}
963
742379c0 964static void cleanup_uc(struct intel_uc_coredump *uc)
7d41ef34 965{
742379c0
CW
966 kfree(uc->guc_fw.path);
967 kfree(uc->huc_fw.path);
968 i915_vma_coredump_free(uc->guc_log);
7d41ef34 969
742379c0 970 kfree(uc);
7d41ef34
MW
971}
972
742379c0 973static void cleanup_gt(struct intel_gt_coredump *gt)
84734a04 974{
742379c0
CW
975 while (gt->engine) {
976 struct intel_engine_coredump *ee = gt->engine;
977
978 gt->engine = ee->next;
84734a04 979
742379c0 980 i915_vma_coredump_free(ee->vma);
742379c0
CW
981 kfree(ee);
982 }
6361f4ba 983
742379c0
CW
984 if (gt->uc)
985 cleanup_uc(gt->uc);
c990b4c3 986
742379c0
CW
987 kfree(gt);
988}
b0fd47ad 989
742379c0
CW
990void __i915_gpu_coredump_free(struct kref *error_ref)
991{
992 struct i915_gpu_coredump *error =
993 container_of(error_ref, typeof(*error), ref);
6361f4ba 994
742379c0
CW
995 while (error->gt) {
996 struct intel_gt_coredump *gt = error->gt;
997
998 error->gt = gt->next;
999 cleanup_gt(gt);
84734a04
MK
1000 }
1001
84734a04 1002 kfree(error->overlay);
1d6aa7a3 1003
84a20a8a 1004 cleanup_params(error);
7d41ef34 1005
0e39037b 1006 err_free_sgl(error->sgl);
84734a04
MK
1007 kfree(error);
1008}
1009
742379c0
CW
1010static struct i915_vma_coredump *
1011i915_vma_coredump_create(const struct intel_gt *gt,
1012 const struct i915_vma *vma,
1013 const char *name,
1014 struct i915_vma_compress *compress)
84734a04 1015{
742379c0 1016 struct i915_ggtt *ggtt = gt->ggtt;
95374d75 1017 const u64 slot = ggtt->error_capture.start;
742379c0 1018 struct i915_vma_coredump *dst;
95374d75
CW
1019 unsigned long num_pages;
1020 struct sgt_iter iter;
83bc0f5b 1021 int ret;
84734a04 1022
79c7a28e
CW
1023 might_sleep();
1024
742379c0 1025 if (!vma || !vma->pages || !compress)
058d88c4
CW
1026 return NULL;
1027
95374d75 1028 num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
0a97015d 1029 num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
79c7a28e 1030 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL);
058d88c4 1031 if (!dst)
84734a04
MK
1032 return NULL;
1033
3bdd4f84
CW
1034 if (!compress_start(compress)) {
1035 kfree(dst);
1036 return NULL;
1037 }
1038
742379c0
CW
1039 strcpy(dst->name, name);
1040 dst->next = NULL;
1041
03382dfb
CW
1042 dst->gtt_offset = vma->node.start;
1043 dst->gtt_size = vma->node.size;
fd521d3b 1044 dst->gtt_page_sizes = vma->page_sizes.gtt;
83bc0f5b 1045 dst->num_pages = num_pages;
95374d75 1046 dst->page_count = 0;
0a97015d
CW
1047 dst->unused = 0;
1048
83bc0f5b 1049 ret = -EINVAL;
895d8ebe 1050 if (drm_mm_node_allocated(&ggtt->error_capture)) {
95374d75 1051 void __iomem *s;
895d8ebe 1052 dma_addr_t dma;
b3c3f5e6 1053
895d8ebe 1054 for_each_sgt_daddr(dma, iter, vma->pages) {
f2acf740 1055 mutex_lock(&ggtt->error_mutex);
895d8ebe
DCS
1056 ggtt->vm.insert_page(&ggtt->vm, dma, slot,
1057 I915_CACHE_NONE, 0);
742379c0 1058 mb();
b3c3f5e6 1059
895d8ebe 1060 s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
742379c0
CW
1061 ret = compress_page(compress,
1062 (void __force *)s, dst,
1063 true);
895d8ebe 1064 io_mapping_unmap(s);
f2acf740
CW
1065
1066 mb();
1067 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1068 mutex_unlock(&ggtt->error_mutex);
895d8ebe
DCS
1069 if (ret)
1070 break;
1071 }
0ff37575 1072 } else if (__i915_gem_object_is_lmem(vma->obj)) {
895d8ebe
DCS
1073 struct intel_memory_region *mem = vma->obj->mm.region;
1074 dma_addr_t dma;
1075
1076 for_each_sgt_daddr(dma, iter, vma->pages) {
1077 void __iomem *s;
1078
c9749836
CT
1079 s = io_mapping_map_wc(&mem->iomap,
1080 dma - mem->region.start,
1081 PAGE_SIZE);
742379c0
CW
1082 ret = compress_page(compress,
1083 (void __force *)s, dst,
1084 true);
48715f70 1085 io_mapping_unmap(s);
895d8ebe
DCS
1086 if (ret)
1087 break;
1088 }
1089 } else {
1090 struct page *page;
1091
1092 for_each_sgt_page(page, iter, vma->pages) {
1093 void *s;
1094
1095 drm_clflush_pages(&page, 1);
1096
48715f70 1097 s = kmap(page);
742379c0 1098 ret = compress_page(compress, s, dst, false);
bae21dac 1099 kunmap(page);
895d8ebe
DCS
1100
1101 drm_clflush_pages(&page, 1);
1102
1103 if (ret)
1104 break;
1105 }
84734a04 1106 }
84734a04 1107
3bdd4f84 1108 if (ret || compress_flush(compress, dst)) {
83bc0f5b 1109 while (dst->page_count--)
3bdd4f84 1110 pool_free(&compress->pool, dst->pages[dst->page_count]);
83bc0f5b
CW
1111 kfree(dst);
1112 dst = NULL;
1113 }
3bdd4f84 1114 compress_finish(compress);
95374d75 1115
95374d75 1116 return dst;
84734a04 1117}
84734a04 1118
742379c0 1119static void gt_record_fences(struct intel_gt_coredump *gt)
011cf577 1120{
742379c0
CW
1121 struct i915_ggtt *ggtt = gt->_gt->ggtt;
1122 struct intel_uncore *uncore = gt->_gt->uncore;
84734a04
MK
1123 int i;
1124
651e7d48 1125 if (GRAPHICS_VER(uncore->i915) >= 6) {
742379c0
CW
1126 for (i = 0; i < ggtt->num_fences; i++)
1127 gt->fence[i] =
7f1502d9
TU
1128 intel_uncore_read64(uncore,
1129 FENCE_REG_GEN6_LO(i));
651e7d48 1130 } else if (GRAPHICS_VER(uncore->i915) >= 4) {
742379c0
CW
1131 for (i = 0; i < ggtt->num_fences; i++)
1132 gt->fence[i] =
7f1502d9
TU
1133 intel_uncore_read64(uncore,
1134 FENCE_REG_965_LO(i));
5a4c6f1b 1135 } else {
742379c0
CW
1136 for (i = 0; i < ggtt->num_fences; i++)
1137 gt->fence[i] =
7f1502d9 1138 intel_uncore_read(uncore, FENCE_REG(i));
eecf613a 1139 }
742379c0 1140 gt->nfence = i;
84734a04
MK
1141}
1142
742379c0 1143static void engine_record_registers(struct intel_engine_coredump *ee)
84734a04 1144{
742379c0
CW
1145 const struct intel_engine_cs *engine = ee->engine;
1146 struct drm_i915_private *i915 = engine->i915;
6361f4ba 1147
651e7d48 1148 if (GRAPHICS_VER(i915) >= 6) {
baba6e57 1149 ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
91b59cd9 1150
651e7d48 1151 if (GRAPHICS_VER(i915) >= 12)
742379c0
CW
1152 ee->fault_reg = intel_uncore_read(engine->uncore,
1153 GEN12_RING_FAULT_REG);
651e7d48 1154 else if (GRAPHICS_VER(i915) >= 8)
742379c0
CW
1155 ee->fault_reg = intel_uncore_read(engine->uncore,
1156 GEN8_RING_FAULT_REG);
62acc7e8 1157 else
77a302e0 1158 ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
4e5aabfd
BW
1159 }
1160
651e7d48 1161 if (GRAPHICS_VER(i915) >= 4) {
70a76a9b 1162 ee->esr = ENGINE_READ(engine, RING_ESR);
baba6e57
DCS
1163 ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
1164 ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
1165 ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
1166 ee->instps = ENGINE_READ(engine, RING_INSTPS);
1167 ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
742379c0 1168 ee->ccid = ENGINE_READ(engine, CCID);
651e7d48 1169 if (GRAPHICS_VER(i915) >= 8) {
baba6e57
DCS
1170 ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
1171 ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
13ffadd1 1172 }
baba6e57 1173 ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
84734a04 1174 } else {
baba6e57
DCS
1175 ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
1176 ee->ipeir = ENGINE_READ(engine, IPEIR);
1177 ee->ipehr = ENGINE_READ(engine, IPEHR);
84734a04
MK
1178 }
1179
0e704476 1180 intel_engine_get_instdone(engine, &ee->instdone);
d636951e 1181
baba6e57 1182 ee->instpm = ENGINE_READ(engine, RING_INSTPM);
7e37f889 1183 ee->acthd = intel_engine_get_active_head(engine);
baba6e57
DCS
1184 ee->start = ENGINE_READ(engine, RING_START);
1185 ee->head = ENGINE_READ(engine, RING_HEAD);
1186 ee->tail = ENGINE_READ(engine, RING_TAIL);
1187 ee->ctl = ENGINE_READ(engine, RING_CTL);
651e7d48 1188 if (GRAPHICS_VER(i915) > 2)
baba6e57 1189 ee->mode = ENGINE_READ(engine, RING_MI_MODE);
84734a04 1190
742379c0 1191 if (!HWS_NEEDS_PHYSICAL(i915)) {
f0f59a00 1192 i915_reg_t mmio;
f3ce3821 1193
651e7d48 1194 if (GRAPHICS_VER(i915) == 7) {
0bc40be8 1195 switch (engine->id) {
f3ce3821 1196 default:
8a68d464 1197 MISSING_CASE(engine->id);
df561f66 1198 fallthrough;
8a68d464 1199 case RCS0:
f3ce3821
CW
1200 mmio = RENDER_HWS_PGA_GEN7;
1201 break;
8a68d464 1202 case BCS0:
f3ce3821
CW
1203 mmio = BLT_HWS_PGA_GEN7;
1204 break;
8a68d464 1205 case VCS0:
f3ce3821
CW
1206 mmio = BSD_HWS_PGA_GEN7;
1207 break;
8a68d464 1208 case VECS0:
f3ce3821
CW
1209 mmio = VEBOX_HWS_PGA_GEN7;
1210 break;
1211 }
651e7d48 1212 } else if (GRAPHICS_VER(engine->i915) == 6) {
0bc40be8 1213 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
f3ce3821
CW
1214 } else {
1215 /* XXX: gen8 returns to sanity */
0bc40be8 1216 mmio = RING_HWS_PGA(engine->mmio_base);
f3ce3821
CW
1217 }
1218
742379c0 1219 ee->hws = intel_uncore_read(engine->uncore, mmio);
f3ce3821
CW
1220 }
1221
742379c0 1222 ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
6c7a01ec 1223
742379c0 1224 if (HAS_PPGTT(i915)) {
6c7a01ec
BW
1225 int i;
1226
dbc65183 1227 ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
6c7a01ec 1228
651e7d48 1229 if (GRAPHICS_VER(i915) == 6) {
6361f4ba 1230 ee->vm_info.pp_dir_base =
baba6e57 1231 ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
651e7d48 1232 } else if (GRAPHICS_VER(i915) == 7) {
6361f4ba 1233 ee->vm_info.pp_dir_base =
6d425728 1234 ENGINE_READ(engine, RING_PP_DIR_BASE);
651e7d48 1235 } else if (GRAPHICS_VER(i915) >= 8) {
6d425728
CW
1236 u32 base = engine->mmio_base;
1237
6c7a01ec 1238 for (i = 0; i < 4; i++) {
6361f4ba 1239 ee->vm_info.pdp[i] =
742379c0
CW
1240 intel_uncore_read(engine->uncore,
1241 GEN8_RING_PDP_UDW(base, i));
6361f4ba
CW
1242 ee->vm_info.pdp[i] <<= 32;
1243 ee->vm_info.pdp[i] |=
742379c0
CW
1244 intel_uncore_read(engine->uncore,
1245 GEN8_RING_PDP_LDW(base, i));
6c7a01ec 1246 }
6d425728 1247 }
6c7a01ec 1248 }
84734a04
MK
1249}
1250
22b7a426 1251static void record_request(const struct i915_request *request,
742379c0 1252 struct i915_request_coredump *erq)
35ca039e 1253{
52c0fdb2 1254 erq->flags = request->fence.flags;
b300fde8
CW
1255 erq->context = request->fence.context;
1256 erq->seqno = request->fence.seqno;
b7268c5e 1257 erq->sched_attr = request->sched.attr;
35ca039e
CW
1258 erq->head = request->head;
1259 erq->tail = request->tail;
6a8679c0
CW
1260
1261 erq->pid = 0;
1262 rcu_read_lock();
24aac336
CW
1263 if (!intel_context_is_closed(request->context)) {
1264 const struct i915_gem_context *ctx;
1265
1266 ctx = rcu_dereference(request->context->gem_context);
1267 if (ctx)
1268 erq->pid = pid_nr(ctx->pid);
1269 }
6a8679c0 1270 rcu_read_unlock();
35ca039e
CW
1271}
1272
742379c0 1273static void engine_record_execlists(struct intel_engine_coredump *ee)
35ca039e 1274{
742379c0
CW
1275 const struct intel_engine_execlists * const el = &ee->engine->execlists;
1276 struct i915_request * const *port = el->active;
22b7a426 1277 unsigned int n = 0;
35ca039e 1278
22b7a426
CW
1279 while (*port)
1280 record_request(*port++, &ee->execlist[n++]);
76e70087
MK
1281
1282 ee->num_ports = n;
35ca039e
CW
1283}
1284
742379c0 1285static bool record_context(struct i915_gem_context_coredump *e,
c990b4c3 1286 const struct i915_request *rq)
4fa6053e 1287{
6a8679c0
CW
1288 struct i915_gem_context *ctx;
1289 struct task_struct *task;
03d0ed8a 1290 bool simulated;
6a8679c0
CW
1291
1292 rcu_read_lock();
1293 ctx = rcu_dereference(rq->context->gem_context);
1294 if (ctx && !kref_get_unless_zero(&ctx->ref))
1295 ctx = NULL;
1296 rcu_read_unlock();
9f3ccd40 1297 if (!ctx)
03d0ed8a 1298 return true;
c990b4c3 1299
6a8679c0
CW
1300 rcu_read_lock();
1301 task = pid_task(ctx->pid, PIDTYPE_PID);
1302 if (task) {
1303 strcpy(e->comm, task->comm);
1304 e->pid = task->pid;
4fa6053e 1305 }
6a8679c0 1306 rcu_read_unlock();
4fa6053e 1307
b7268c5e 1308 e->sched_attr = ctx->sched;
77b25a97
CW
1309 e->guilty = atomic_read(&ctx->guilty_count);
1310 e->active = atomic_read(&ctx->active_count);
c990b4c3 1311
1883a0a4
TU
1312 e->total_runtime = rq->context->runtime.total;
1313 e->avg_runtime = ewma_runtime_read(&rq->context->runtime.avg);
1314
03d0ed8a 1315 simulated = i915_gem_context_no_error_capture(ctx);
6a8679c0
CW
1316
1317 i915_gem_context_put(ctx);
03d0ed8a 1318 return simulated;
4fa6053e
CW
1319}
1320
742379c0
CW
1321struct intel_engine_capture_vma {
1322 struct intel_engine_capture_vma *next;
1323 struct i915_vma *vma;
1324 char name[16];
79c7a28e
CW
1325};
1326
742379c0
CW
1327static struct intel_engine_capture_vma *
1328capture_vma(struct intel_engine_capture_vma *next,
79c7a28e 1329 struct i915_vma *vma,
742379c0
CW
1330 const char *name,
1331 gfp_t gfp)
79c7a28e 1332{
742379c0 1333 struct intel_engine_capture_vma *c;
79c7a28e 1334
79c7a28e
CW
1335 if (!vma)
1336 return next;
1337
742379c0 1338 c = kmalloc(sizeof(*c), gfp);
79c7a28e
CW
1339 if (!c)
1340 return next;
1341
b1e3177b 1342 if (!i915_active_acquire_if_busy(&vma->active)) {
79c7a28e
CW
1343 kfree(c);
1344 return next;
1345 }
1346
742379c0 1347 strcpy(c->name, name);
db9bc2d3 1348 c->vma = vma; /* reference held while active */
79c7a28e
CW
1349
1350 c->next = next;
1351 return c;
1352}
1353
742379c0
CW
1354static struct intel_engine_capture_vma *
1355capture_user(struct intel_engine_capture_vma *capture,
1356 const struct i915_request *rq,
1357 gfp_t gfp)
b0fd47ad 1358{
e61e0f51 1359 struct i915_capture_list *c;
b0fd47ad 1360
742379c0
CW
1361 for (c = rq->capture_list; c; c = c->next)
1362 capture = capture_vma(capture, c->vma, "user", gfp);
79c7a28e
CW
1363
1364 return capture;
b0fd47ad
CW
1365}
1366
742379c0
CW
1367static void add_vma(struct intel_engine_coredump *ee,
1368 struct i915_vma_coredump *vma)
84734a04 1369{
742379c0
CW
1370 if (vma) {
1371 vma->next = ee->vma;
1372 ee->vma = vma;
1373 }
1374}
1375
1376struct intel_engine_coredump *
1377intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
1378{
1379 struct intel_engine_coredump *ee;
c990b4c3 1380
742379c0 1381 ee = kzalloc(sizeof(*ee), gfp);
c990b4c3 1382 if (!ee)
742379c0 1383 return NULL;
84734a04 1384
742379c0 1385 ee->engine = engine;
372fbb8e 1386
742379c0
CW
1387 engine_record_registers(ee);
1388 engine_record_execlists(ee);
3bdd4f84 1389
742379c0
CW
1390 return ee;
1391}
ab0e7ff9 1392
742379c0
CW
1393struct intel_engine_capture_vma *
1394intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
1395 struct i915_request *rq,
1396 gfp_t gfp)
1397{
1398 struct intel_engine_capture_vma *vma = NULL;
79c7a28e 1399
742379c0
CW
1400 ee->simulated |= record_context(&ee->context, rq);
1401 if (ee->simulated)
1402 return NULL;
ab0e7ff9 1403
742379c0
CW
1404 /*
1405 * We need to copy these to an anonymous buffer
1406 * as the simplest method to avoid being overwritten
1407 * by userspace.
1408 */
1409 vma = capture_vma(vma, rq->batch, "batch", gfp);
1410 vma = capture_user(vma, rq, gfp);
1411 vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
1412 vma = capture_vma(vma, rq->context->state, "HW context", gfp);
79c7a28e 1413
742379c0
CW
1414 ee->rq_head = rq->head;
1415 ee->rq_post = rq->postfix;
1416 ee->rq_tail = rq->tail;
bc3d6744 1417
742379c0
CW
1418 return vma;
1419}
cdb324bd 1420
742379c0
CW
1421void
1422intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
1423 struct intel_engine_capture_vma *capture,
1424 struct i915_vma_compress *compress)
1425{
1426 const struct intel_engine_cs *engine = ee->engine;
57bc699d 1427
742379c0
CW
1428 while (capture) {
1429 struct intel_engine_capture_vma *this = capture;
1430 struct i915_vma *vma = this->vma;
c990b4c3 1431
742379c0
CW
1432 add_vma(ee,
1433 i915_vma_coredump_create(engine->gt,
1434 vma, this->name,
1435 compress));
84734a04 1436
742379c0 1437 i915_active_release(&vma->active);
c990b4c3 1438
742379c0
CW
1439 capture = this->next;
1440 kfree(this);
1441 }
79c7a28e 1442
742379c0
CW
1443 add_vma(ee,
1444 i915_vma_coredump_create(engine->gt,
1445 engine->status_page.vma,
1446 "HW Status",
1447 compress));
79c7a28e 1448
742379c0
CW
1449 add_vma(ee,
1450 i915_vma_coredump_create(engine->gt,
1451 engine->wa_ctx.vma,
1452 "WA context",
1453 compress));
742379c0
CW
1454}
1455
1456static struct intel_engine_coredump *
1457capture_engine(struct intel_engine_cs *engine,
1458 struct i915_vma_compress *compress)
1459{
1a8585bd 1460 struct intel_engine_capture_vma *capture = NULL;
742379c0 1461 struct intel_engine_coredump *ee;
573ba126
MB
1462 struct intel_context *ce;
1463 struct i915_request *rq = NULL;
742379c0 1464 unsigned long flags;
79c7a28e 1465
742379c0
CW
1466 ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
1467 if (!ee)
1468 return NULL;
c0ce4663 1469
573ba126
MB
1470 ce = intel_engine_get_hung_context(engine);
1471 if (ce) {
1472 intel_engine_clear_hung_context(engine);
1473 rq = intel_context_find_active_request(ce);
1474 if (!rq || !i915_request_started(rq))
1475 goto no_request_capture;
1476 } else {
1477 /*
1478 * Getting here with GuC enabled means it is a forced error capture
1479 * with no actual hang. So, no need to attempt the execlist search.
1480 */
1481 if (!intel_uc_uses_guc_submission(&engine->gt->uc)) {
1482 spin_lock_irqsave(&engine->sched_engine->lock, flags);
1483 rq = intel_engine_execlist_find_hung_request(engine);
1484 spin_unlock_irqrestore(&engine->sched_engine->lock,
1485 flags);
1486 }
1487 }
1a8585bd
CW
1488 if (rq)
1489 capture = intel_engine_coredump_add_request(ee, rq,
1490 ATOMIC_MAYFAIL);
1a8585bd 1491 if (!capture) {
573ba126 1492no_request_capture:
742379c0
CW
1493 kfree(ee);
1494 return NULL;
1495 }
c990b4c3 1496
742379c0 1497 intel_engine_coredump_add_vma(ee, capture, compress);
c990b4c3 1498
742379c0 1499 return ee;
84734a04
MK
1500}
1501
3bdd4f84 1502static void
742379c0 1503gt_record_engines(struct intel_gt_coredump *gt,
bda30024 1504 intel_engine_mask_t engine_mask,
742379c0 1505 struct i915_vma_compress *compress)
7d41ef34 1506{
742379c0
CW
1507 struct intel_engine_cs *engine;
1508 enum intel_engine_id id;
7d41ef34 1509
742379c0
CW
1510 for_each_engine(engine, gt->_gt, id) {
1511 struct intel_engine_coredump *ee;
1512
1513 /* Refill our page pool before entering atomic section */
1514 pool_refill(&compress->pool, ALLOW_FAIL);
1515
1516 ee = capture_engine(engine, compress);
1517 if (!ee)
1518 continue;
1519
bda30024
TU
1520 ee->hung = engine->mask & engine_mask;
1521
742379c0
CW
1522 gt->simulated |= ee->simulated;
1523 if (ee->simulated) {
1524 kfree(ee);
1525 continue;
1526 }
1527
1528 ee->next = gt->engine;
1529 gt->engine = ee;
1530 }
1531}
1532
1533static struct intel_uc_coredump *
1534gt_record_uc(struct intel_gt_coredump *gt,
1535 struct i915_vma_compress *compress)
1536{
1537 const struct intel_uc *uc = &gt->_gt->uc;
1538 struct intel_uc_coredump *error_uc;
1539
1540 error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
1541 if (!error_uc)
1542 return NULL;
7d41ef34 1543
abb042f3
MW
1544 memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
1545 memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
7d41ef34
MW
1546
1547 /* Non-default firmware paths will be specified by the modparam.
1548 * As modparams are generally accesible from the userspace make
1549 * explicit copies of the firmware paths.
1550 */
3bdd4f84
CW
1551 error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
1552 error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
742379c0
CW
1553 error_uc->guc_log =
1554 i915_vma_coredump_create(gt->_gt,
1555 uc->guc.log.vma, "GuC log buffer",
1556 compress);
1557
1558 return error_uc;
1559}
1560
1d762aad 1561/* Capture all registers which don't fit into another category. */
742379c0 1562static void gt_record_regs(struct intel_gt_coredump *gt)
84734a04 1563{
742379c0
CW
1564 struct intel_uncore *uncore = gt->_gt->uncore;
1565 struct drm_i915_private *i915 = uncore->i915;
885ea5a8 1566 int i;
84734a04 1567
742379c0
CW
1568 /*
1569 * General organization
654c90c6
BW
1570 * 1. Registers specific to a single generation
1571 * 2. Registers which belong to multiple generations
1572 * 3. Feature specific registers.
1573 * 4. Everything else
1574 * Please try to follow the order.
1575 */
84734a04 1576
654c90c6 1577 /* 1: Registers specific to a single generation */
4f5fd91f 1578 if (IS_VALLEYVIEW(i915)) {
742379c0
CW
1579 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1580 gt->ier = intel_uncore_read(uncore, VLV_IER);
1581 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
654c90c6 1582 }
84734a04 1583
651e7d48 1584 if (GRAPHICS_VER(i915) == 7)
742379c0 1585 gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
84734a04 1586
651e7d48 1587 if (GRAPHICS_VER(i915) >= 12) {
742379c0
CW
1588 gt->fault_data0 = intel_uncore_read(uncore,
1589 GEN12_FAULT_TLB_DATA0);
1590 gt->fault_data1 = intel_uncore_read(uncore,
1591 GEN12_FAULT_TLB_DATA1);
651e7d48 1592 } else if (GRAPHICS_VER(i915) >= 8) {
742379c0
CW
1593 gt->fault_data0 = intel_uncore_read(uncore,
1594 GEN8_FAULT_TLB_DATA0);
1595 gt->fault_data1 = intel_uncore_read(uncore,
1596 GEN8_FAULT_TLB_DATA1);
6c826f34
MK
1597 }
1598
651e7d48 1599 if (GRAPHICS_VER(i915) == 6) {
742379c0
CW
1600 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
1601 gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
1602 gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
91ec5d11 1603 }
84734a04 1604
654c90c6 1605 /* 2: Registers which belong to multiple generations */
651e7d48 1606 if (GRAPHICS_VER(i915) >= 7)
742379c0 1607 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
84734a04 1608
651e7d48 1609 if (GRAPHICS_VER(i915) >= 6) {
742379c0 1610 gt->derrmr = intel_uncore_read(uncore, DERRMR);
651e7d48 1611 if (GRAPHICS_VER(i915) < 12) {
742379c0
CW
1612 gt->error = intel_uncore_read(uncore, ERROR_GEN6);
1613 gt->done_reg = intel_uncore_read(uncore, DONE_REG);
23dea051 1614 }
84734a04
MK
1615 }
1616
654c90c6 1617 /* 3: Feature specific registers */
651e7d48 1618 if (IS_GRAPHICS_VER(i915, 6, 7)) {
742379c0
CW
1619 gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
1620 gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
91ec5d11
BW
1621 }
1622
651e7d48 1623 if (IS_GRAPHICS_VER(i915, 8, 11))
742379c0 1624 gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
fd521d3b 1625
651e7d48 1626 if (GRAPHICS_VER(i915) == 12)
742379c0 1627 gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
ba1d18e3 1628
651e7d48 1629 if (GRAPHICS_VER(i915) >= 12) {
e50dbdbf 1630 for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
24d032e2
MR
1631 /*
1632 * SFC_DONE resides in the VD forcewake domain, so it
1633 * only exists if the corresponding VCS engine is
1634 * present.
1635 */
45f63790
MR
1636 if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
1637 !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
24d032e2
MR
1638 continue;
1639
742379c0 1640 gt->sfc_done[i] =
e50dbdbf
MK
1641 intel_uncore_read(uncore, GEN12_SFC_DONE(i));
1642 }
811bb3db 1643
742379c0 1644 gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
e50dbdbf
MK
1645 }
1646
91ec5d11 1647 /* 4: Everything else */
651e7d48 1648 if (GRAPHICS_VER(i915) >= 11) {
742379c0
CW
1649 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1650 gt->gtier[0] =
4f5fd91f
TU
1651 intel_uncore_read(uncore,
1652 GEN11_RENDER_COPY_INTR_ENABLE);
742379c0 1653 gt->gtier[1] =
4f5fd91f 1654 intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
742379c0 1655 gt->gtier[2] =
4f5fd91f 1656 intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
742379c0 1657 gt->gtier[3] =
4f5fd91f
TU
1658 intel_uncore_read(uncore,
1659 GEN11_GPM_WGBOXPERF_INTR_ENABLE);
742379c0 1660 gt->gtier[4] =
4f5fd91f
TU
1661 intel_uncore_read(uncore,
1662 GEN11_CRYPTO_RSVD_INTR_ENABLE);
742379c0 1663 gt->gtier[5] =
4f5fd91f
TU
1664 intel_uncore_read(uncore,
1665 GEN11_GUNIT_CSME_INTR_ENABLE);
742379c0 1666 gt->ngtier = 6;
651e7d48 1667 } else if (GRAPHICS_VER(i915) >= 8) {
742379c0 1668 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
885ea5a8 1669 for (i = 0; i < 4; i++)
742379c0
CW
1670 gt->gtier[i] =
1671 intel_uncore_read(uncore, GEN8_GT_IER(i));
1672 gt->ngtier = 4;
4f5fd91f 1673 } else if (HAS_PCH_SPLIT(i915)) {
742379c0
CW
1674 gt->ier = intel_uncore_read(uncore, DEIER);
1675 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1676 gt->ngtier = 1;
651e7d48 1677 } else if (GRAPHICS_VER(i915) == 2) {
742379c0 1678 gt->ier = intel_uncore_read16(uncore, GEN2_IER);
4f5fd91f 1679 } else if (!IS_VALLEYVIEW(i915)) {
742379c0 1680 gt->ier = intel_uncore_read(uncore, GEN2_IER);
654c90c6 1681 }
742379c0
CW
1682 gt->eir = intel_uncore_read(uncore, EIR);
1683 gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
1684}
1685
792592e7
DCS
1686static void gt_record_info(struct intel_gt_coredump *gt)
1687{
1688 memcpy(&gt->info, &gt->_gt->info, sizeof(struct intel_gt_info));
1689}
1690
742379c0
CW
1691/*
1692 * Generate a semi-unique error code. The code is not meant to have meaning, The
1693 * code's only purpose is to try to prevent false duplicated bug reports by
1694 * grossly estimating a GPU error state.
1695 *
1696 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
1697 * the hang if we could strip the GTT offset information from it.
1698 *
1699 * It's only a small step better than a random number in its current form.
1700 */
1701static u32 generate_ecode(const struct intel_engine_coredump *ee)
1702{
1703 /*
1704 * IPEHR would be an ideal way to detect errors, as it's the gross
1705 * measure of "the command that hung." However, has some very common
1706 * synchronization commands which almost always appear in the case
1707 * strictly a client bug. Use instdone to differentiate those some.
1708 */
1709 return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
1d762aad
BW
1710}
1711
742379c0 1712static const char *error_msg(struct i915_gpu_coredump *error)
cb383002 1713{
742379c0 1714 struct intel_engine_coredump *first = NULL;
2dae0c85 1715 unsigned int hung_classes = 0;
742379c0 1716 struct intel_gt_coredump *gt;
eb8d0f5a 1717 int len;
cb383002 1718
742379c0
CW
1719 for (gt = error->gt; gt; gt = gt->next) {
1720 struct intel_engine_coredump *cs;
1721
bda30024
TU
1722 for (cs = gt->engine; cs; cs = cs->next) {
1723 if (cs->hung) {
2dae0c85 1724 hung_classes |= BIT(cs->engine->uabi_class);
bda30024
TU
1725 if (!first)
1726 first = cs;
1727 }
1728 }
742379c0
CW
1729 }
1730
58174462 1731 len = scnprintf(error->error_msg, sizeof(error->error_msg),
742379c0 1732 "GPU HANG: ecode %d:%x:%08x",
651e7d48 1733 GRAPHICS_VER(error->i915), hung_classes,
742379c0 1734 generate_ecode(first));
29baf3ae 1735 if (first && first->context.pid) {
eb8d0f5a 1736 /* Just show the first executing process, more is confusing */
58174462
MK
1737 len += scnprintf(error->error_msg + len,
1738 sizeof(error->error_msg) - len,
1739 ", in %s [%d]",
742379c0 1740 first->context.comm, first->context.pid);
eb8d0f5a 1741 }
58174462 1742
eb8d0f5a 1743 return error->error_msg;
cb383002
MK
1744}
1745
742379c0 1746static void capture_gen(struct i915_gpu_coredump *error)
48b031e3 1747{
53b725c7
DCS
1748 struct drm_i915_private *i915 = error->i915;
1749
53b725c7
DCS
1750 error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
1751 error->suspended = i915->runtime_pm.suspended;
f73b5674 1752
eb5be9d0
CW
1753 error->iommu = -1;
1754#ifdef CONFIG_INTEL_IOMMU
1755 error->iommu = intel_iommu_gfx_mapped;
1756#endif
53b725c7
DCS
1757 error->reset_count = i915_reset_count(&i915->gpu_error);
1758 error->suspend_count = i915->suspend_count;
2bd160a1 1759
8a25c4be 1760 i915_params_copy(&error->params, &i915->params);
2bd160a1 1761 memcpy(&error->device_info,
53b725c7 1762 INTEL_INFO(i915),
2bd160a1 1763 sizeof(error->device_info));
0258404f
JN
1764 memcpy(&error->runtime_info,
1765 RUNTIME_INFO(i915),
1766 sizeof(error->runtime_info));
53b725c7 1767 error->driver_caps = i915->caps;
48b031e3
MK
1768}
1769
742379c0
CW
1770struct i915_gpu_coredump *
1771i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
84a20a8a 1772{
742379c0
CW
1773 struct i915_gpu_coredump *error;
1774
8a25c4be 1775 if (!i915->params.error_capture)
742379c0
CW
1776 return NULL;
1777
1778 error = kzalloc(sizeof(*error), gfp);
1779 if (!error)
1780 return NULL;
1781
1782 kref_init(&error->ref);
1783 error->i915 = i915;
1784
1785 error->time = ktime_get_real();
1786 error->boottime = ktime_get_boottime();
1787 error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
1788 error->capture = jiffies;
1789
1790 capture_gen(error);
1791
1792 return error;
84a20a8a
MW
1793}
1794
742379c0
CW
1795#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
1796
1797struct intel_gt_coredump *
1798intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
8f5c6fe4 1799{
742379c0 1800 struct intel_gt_coredump *gc;
8f5c6fe4 1801
742379c0
CW
1802 gc = kzalloc(sizeof(*gc), gfp);
1803 if (!gc)
1804 return NULL;
1805
1806 gc->_gt = gt;
1807 gc->awake = intel_gt_pm_is_awake(gt);
1808
1809 gt_record_regs(gc);
1810 gt_record_fences(gc);
1811
1812 return gc;
1813}
895d8ebe 1814
742379c0
CW
1815struct i915_vma_compress *
1816i915_vma_capture_prepare(struct intel_gt_coredump *gt)
1817{
1818 struct i915_vma_compress *compress;
1819
1820 compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
1821 if (!compress)
1822 return NULL;
1823
1824 if (!compress_init(compress)) {
1825 kfree(compress);
1826 return NULL;
895d8ebe 1827 }
742379c0 1828
742379c0 1829 return compress;
8f5c6fe4
CW
1830}
1831
742379c0
CW
1832void i915_vma_capture_finish(struct intel_gt_coredump *gt,
1833 struct i915_vma_compress *compress)
1834{
1835 if (!compress)
1836 return;
eafc4894 1837
742379c0
CW
1838 compress_fini(compress);
1839 kfree(compress);
1840}
1841
bda30024
TU
1842struct i915_gpu_coredump *
1843i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
5a4c6f1b 1844{
bda30024 1845 struct drm_i915_private *i915 = gt->i915;
742379c0 1846 struct i915_gpu_coredump *error;
5a4c6f1b 1847
e6154e4c
CW
1848 /* Check if GPU capture has been disabled */
1849 error = READ_ONCE(i915->gpu_error.first_error);
1850 if (IS_ERR(error))
1851 return error;
1852
742379c0
CW
1853 error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
1854 if (!error)
e6154e4c 1855 return ERR_PTR(-ENOMEM);
5a4c6f1b 1856
bda30024 1857 error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL);
742379c0
CW
1858 if (error->gt) {
1859 struct i915_vma_compress *compress;
3bdd4f84 1860
742379c0
CW
1861 compress = i915_vma_capture_prepare(error->gt);
1862 if (!compress) {
1863 kfree(error->gt);
1864 kfree(error);
1865 return ERR_PTR(-ENOMEM);
1866 }
5a4c6f1b 1867
792592e7 1868 gt_record_info(error->gt);
bda30024 1869 gt_record_engines(error->gt, engine_mask, compress);
742379c0
CW
1870
1871 if (INTEL_INFO(i915)->has_gt_uc)
1872 error->gt->uc = gt_record_uc(error->gt, compress);
3bdd4f84 1873
742379c0
CW
1874 i915_vma_capture_finish(error->gt, compress);
1875
1876 error->simulated |= error->gt->simulated;
1877 }
3bdd4f84
CW
1878
1879 error->overlay = intel_overlay_capture_error_state(i915);
3bdd4f84 1880
5a4c6f1b
CW
1881 return error;
1882}
1883
742379c0 1884void i915_error_state_store(struct i915_gpu_coredump *error)
1d762aad 1885{
742379c0 1886 struct drm_i915_private *i915;
53a4c6b2 1887 static bool warned;
1d762aad 1888
742379c0 1889 if (IS_ERR_OR_NULL(error))
98a2f411
CW
1890 return;
1891
742379c0 1892 i915 = error->i915;
58f44aad 1893 drm_info(&i915->drm, "%s\n", error_msg(error));
9777cca0 1894
742379c0
CW
1895 if (error->simulated ||
1896 cmpxchg(&i915->gpu_error.first_error, NULL, error))
1d762aad 1897 return;
1d762aad 1898
742379c0 1899 i915_gpu_coredump_get(error);
cb383002 1900
a1e37b02 1901 if (!xchg(&warned, true) &&
eafc4894 1902 ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
88f8065c 1903 pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
ddae4d7a
JN
1904 pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
1905 pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
88f8065c
CW
1906 pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1907 pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
1908 pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1909 i915->drm.primary->index);
cb383002 1910 }
84734a04
MK
1911}
1912
742379c0
CW
1913/**
1914 * i915_capture_error_state - capture an error record for later analysis
bda30024
TU
1915 * @gt: intel_gt which originated the hang
1916 * @engine_mask: hung engines
1917 *
742379c0
CW
1918 *
1919 * Should be called when an error is detected (either a hang or an error
1920 * interrupt) to capture error state from the time of the error. Fills
1921 * out a structure which becomes available in debugfs for user level tools
1922 * to pick up.
1923 */
bda30024
TU
1924void i915_capture_error_state(struct intel_gt *gt,
1925 intel_engine_mask_t engine_mask)
742379c0
CW
1926{
1927 struct i915_gpu_coredump *error;
1928
bda30024 1929 error = i915_gpu_coredump(gt, engine_mask);
742379c0 1930 if (IS_ERR(error)) {
bda30024 1931 cmpxchg(&gt->i915->gpu_error.first_error, NULL, error);
742379c0
CW
1932 return;
1933 }
1934
1935 i915_error_state_store(error);
1936 i915_gpu_coredump_put(error);
1937}
1938
1939struct i915_gpu_coredump *
5a4c6f1b 1940i915_first_error_state(struct drm_i915_private *i915)
84734a04 1941{
742379c0 1942 struct i915_gpu_coredump *error;
84734a04 1943
5a4c6f1b
CW
1944 spin_lock_irq(&i915->gpu_error.lock);
1945 error = i915->gpu_error.first_error;
e6154e4c 1946 if (!IS_ERR_OR_NULL(error))
742379c0 1947 i915_gpu_coredump_get(error);
5a4c6f1b 1948 spin_unlock_irq(&i915->gpu_error.lock);
84734a04 1949
5a4c6f1b 1950 return error;
84734a04
MK
1951}
1952
5a4c6f1b 1953void i915_reset_error_state(struct drm_i915_private *i915)
84734a04 1954{
742379c0 1955 struct i915_gpu_coredump *error;
84734a04 1956
5a4c6f1b
CW
1957 spin_lock_irq(&i915->gpu_error.lock);
1958 error = i915->gpu_error.first_error;
e6154e4c
CW
1959 if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
1960 i915->gpu_error.first_error = NULL;
5a4c6f1b 1961 spin_unlock_irq(&i915->gpu_error.lock);
84734a04 1962
e6154e4c 1963 if (!IS_ERR_OR_NULL(error))
742379c0 1964 i915_gpu_coredump_put(error);
fb6f0b64
CW
1965}
1966
1967void i915_disable_error_state(struct drm_i915_private *i915, int err)
1968{
1969 spin_lock_irq(&i915->gpu_error.lock);
1970 if (!i915->gpu_error.first_error)
1971 i915->gpu_error.first_error = ERR_PTR(err);
1972 spin_unlock_irq(&i915->gpu_error.lock);
84734a04 1973}