2 * Copyright (c) 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
30 #include <linux/ascii85.h>
31 #include <linux/highmem.h>
32 #include <linux/nmi.h>
33 #include <linux/pagevec.h>
34 #include <linux/scatterlist.h>
35 #include <linux/string_helpers.h>
36 #include <linux/utsname.h>
37 #include <linux/zlib.h>
39 #include <drm/drm_cache.h>
40 #include <drm/drm_print.h>
42 #include "display/intel_dmc.h"
43 #include "display/intel_overlay.h"
45 #include "gem/i915_gem_context.h"
46 #include "gem/i915_gem_lmem.h"
47 #include "gt/intel_engine_regs.h"
48 #include "gt/intel_gt.h"
49 #include "gt/intel_gt_mcr.h"
50 #include "gt/intel_gt_pm.h"
51 #include "gt/intel_gt_regs.h"
52 #include "gt/uc/intel_guc_capture.h"
54 #include "i915_driver.h"
56 #include "i915_gpu_error.h"
57 #include "i915_memcpy.h"
59 #include "i915_scatterlist.h"
60 #include "i915_utils.h"
62 #define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
63 #define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
65 static void __sg_set_buf(struct scatterlist *sg,
66 void *addr, unsigned int len, loff_t it)
68 sg->page_link = (unsigned long)virt_to_page(addr);
69 sg->offset = offset_in_page(addr);
74 static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
79 if (e->bytes + len + 1 <= e->size)
83 __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
89 if (e->cur == e->end) {
90 struct scatterlist *sgl;
92 sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
102 (unsigned long)sgl | SG_CHAIN;
108 e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
111 e->size = ALIGN(len + 1, SZ_64K);
112 e->buf = kmalloc(e->size, ALLOW_FAIL);
114 e->size = PAGE_ALIGN(len + 1);
115 e->buf = kmalloc(e->size, GFP_KERNEL);
126 static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
127 const char *fmt, va_list args)
136 len = vsnprintf(NULL, 0, fmt, ap);
143 if (!__i915_error_grow(e, len))
146 GEM_BUG_ON(e->bytes >= e->size);
147 len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
155 static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
163 if (!__i915_error_grow(e, len))
166 GEM_BUG_ON(e->bytes + len > e->size);
167 memcpy(e->buf + e->bytes, str, len);
171 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
172 #define err_puts(e, s) i915_error_puts(e, s)
174 static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
176 i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
179 static inline struct drm_printer
180 i915_error_printer(struct drm_i915_error_state_buf *e)
182 struct drm_printer p = {
183 .printfn = __i915_printfn_error,
189 /* single threaded page allocator with a reserved stash for emergencies */
190 static void pool_fini(struct folio_batch *fbatch)
192 folio_batch_release(fbatch);
195 static int pool_refill(struct folio_batch *fbatch, gfp_t gfp)
197 while (folio_batch_space(fbatch)) {
200 folio = folio_alloc(gfp, 0);
204 folio_batch_add(fbatch, folio);
210 static int pool_init(struct folio_batch *fbatch, gfp_t gfp)
214 folio_batch_init(fbatch);
216 err = pool_refill(fbatch, gfp);
223 static void *pool_alloc(struct folio_batch *fbatch, gfp_t gfp)
227 folio = folio_alloc(gfp, 0);
228 if (!folio && folio_batch_count(fbatch))
229 folio = fbatch->folios[--fbatch->nr];
231 return folio ? folio_address(folio) : NULL;
234 static void pool_free(struct folio_batch *fbatch, void *addr)
236 struct folio *folio = virt_to_folio(addr);
238 if (folio_batch_space(fbatch))
239 folio_batch_add(fbatch, folio);
244 #ifdef CONFIG_DRM_I915_COMPRESS_ERROR
246 struct i915_vma_compress {
247 struct folio_batch pool;
248 struct z_stream_s zstream;
252 static bool compress_init(struct i915_vma_compress *c)
254 struct z_stream_s *zstream = &c->zstream;
256 if (pool_init(&c->pool, ALLOW_FAIL))
260 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
262 if (!zstream->workspace) {
268 if (i915_has_memcpy_from_wc())
269 c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
274 static bool compress_start(struct i915_vma_compress *c)
276 struct z_stream_s *zstream = &c->zstream;
277 void *workspace = zstream->workspace;
279 memset(zstream, 0, sizeof(*zstream));
280 zstream->workspace = workspace;
282 return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
285 static void *compress_next_page(struct i915_vma_compress *c,
286 struct i915_vma_coredump *dst)
291 page_addr = pool_alloc(&c->pool, ALLOW_FAIL);
293 return ERR_PTR(-ENOMEM);
295 page = virt_to_page(page_addr);
296 list_add_tail(&page->lru, &dst->page_list);
300 static int compress_page(struct i915_vma_compress *c,
302 struct i915_vma_coredump *dst,
305 struct z_stream_s *zstream = &c->zstream;
307 zstream->next_in = src;
308 if (wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
309 zstream->next_in = c->tmp;
310 zstream->avail_in = PAGE_SIZE;
313 if (zstream->avail_out == 0) {
314 zstream->next_out = compress_next_page(c, dst);
315 if (IS_ERR(zstream->next_out))
316 return PTR_ERR(zstream->next_out);
318 zstream->avail_out = PAGE_SIZE;
321 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
325 } while (zstream->avail_in);
327 /* Fallback to uncompressed if we increase size? */
328 if (0 && zstream->total_out > zstream->total_in)
334 static int compress_flush(struct i915_vma_compress *c,
335 struct i915_vma_coredump *dst)
337 struct z_stream_s *zstream = &c->zstream;
340 switch (zlib_deflate(zstream, Z_FINISH)) {
341 case Z_OK: /* more space requested */
342 zstream->next_out = compress_next_page(c, dst);
343 if (IS_ERR(zstream->next_out))
344 return PTR_ERR(zstream->next_out);
346 zstream->avail_out = PAGE_SIZE;
352 default: /* any error */
358 memset(zstream->next_out, 0, zstream->avail_out);
359 dst->unused = zstream->avail_out;
363 static void compress_finish(struct i915_vma_compress *c)
365 zlib_deflateEnd(&c->zstream);
368 static void compress_fini(struct i915_vma_compress *c)
370 kfree(c->zstream.workspace);
372 pool_free(&c->pool, c->tmp);
376 static void err_compression_marker(struct drm_i915_error_state_buf *m)
383 struct i915_vma_compress {
384 struct folio_batch pool;
387 static bool compress_init(struct i915_vma_compress *c)
389 return pool_init(&c->pool, ALLOW_FAIL) == 0;
392 static bool compress_start(struct i915_vma_compress *c)
397 static int compress_page(struct i915_vma_compress *c,
399 struct i915_vma_coredump *dst,
404 ptr = pool_alloc(&c->pool, ALLOW_FAIL);
408 if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
409 memcpy(ptr, src, PAGE_SIZE);
410 list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list);
416 static int compress_flush(struct i915_vma_compress *c,
417 struct i915_vma_coredump *dst)
422 static void compress_finish(struct i915_vma_compress *c)
426 static void compress_fini(struct i915_vma_compress *c)
431 static void err_compression_marker(struct drm_i915_error_state_buf *m)
438 static void error_print_instdone(struct drm_i915_error_state_buf *m,
439 const struct intel_engine_coredump *ee)
445 err_printf(m, " INSTDONE: 0x%08x\n",
446 ee->instdone.instdone);
448 if (ee->engine->class != RENDER_CLASS || GRAPHICS_VER(m->i915) <= 3)
451 err_printf(m, " SC_INSTDONE: 0x%08x\n",
452 ee->instdone.slice_common);
454 if (GRAPHICS_VER(m->i915) <= 6)
457 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
458 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
460 ee->instdone.sampler[slice][subslice]);
462 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
463 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
465 ee->instdone.row[slice][subslice]);
467 if (GRAPHICS_VER(m->i915) < 12)
470 if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 55)) {
471 for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
472 err_printf(m, " GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n",
474 ee->instdone.geom_svg[slice][subslice]);
477 err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",
478 ee->instdone.slice_common_extra[0]);
479 err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n",
480 ee->instdone.slice_common_extra[1]);
483 static void error_print_request(struct drm_i915_error_state_buf *m,
485 const struct i915_request_coredump *erq)
490 err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
491 prefix, erq->pid, erq->context, erq->seqno,
492 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
493 &erq->flags) ? "!" : "",
494 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
495 &erq->flags) ? "+" : "",
496 erq->sched_attr.priority,
497 erq->head, erq->tail);
500 static void error_print_context(struct drm_i915_error_state_buf *m,
502 const struct i915_gem_context_coredump *ctx)
504 err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
505 header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
506 ctx->guilty, ctx->active,
507 ctx->total_runtime, ctx->avg_runtime);
508 err_printf(m, " context timeline seqno %u\n", ctx->hwsp_seqno);
511 static struct i915_vma_coredump *
512 __find_vma(struct i915_vma_coredump *vma, const char *name)
515 if (strcmp(vma->name, name) == 0)
523 struct i915_vma_coredump *
524 intel_gpu_error_find_batch(const struct intel_engine_coredump *ee)
526 return __find_vma(ee->vma, "batch");
529 static void error_print_engine(struct drm_i915_error_state_buf *m,
530 const struct intel_engine_coredump *ee)
532 struct i915_vma_coredump *batch;
535 err_printf(m, "%s command stream:\n", ee->engine->name);
536 err_printf(m, " CCID: 0x%08x\n", ee->ccid);
537 err_printf(m, " START: 0x%08x\n", ee->start);
538 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
539 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
540 ee->tail, ee->rq_post, ee->rq_tail);
541 err_printf(m, " CTL: 0x%08x\n", ee->ctl);
542 err_printf(m, " MODE: 0x%08x\n", ee->mode);
543 err_printf(m, " HWS: 0x%08x\n", ee->hws);
544 err_printf(m, " ACTHD: 0x%08x %08x\n",
545 (u32)(ee->acthd>>32), (u32)ee->acthd);
546 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
547 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
548 err_printf(m, " ESR: 0x%08x\n", ee->esr);
550 error_print_instdone(m, ee);
552 batch = intel_gpu_error_find_batch(ee);
554 u64 start = batch->gtt_offset;
555 u64 end = start + batch->gtt_size;
557 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
558 upper_32_bits(start), lower_32_bits(start),
559 upper_32_bits(end), lower_32_bits(end));
561 if (GRAPHICS_VER(m->i915) >= 4) {
562 err_printf(m, " BBADDR: 0x%08x_%08x\n",
563 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
564 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
565 err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
567 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
568 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
569 lower_32_bits(ee->faddr));
570 if (GRAPHICS_VER(m->i915) >= 6) {
571 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
572 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
574 if (GRAPHICS_VER(m->i915) >= 11) {
575 err_printf(m, " NOPID: 0x%08x\n", ee->nopid);
576 err_printf(m, " EXCC: 0x%08x\n", ee->excc);
577 err_printf(m, " CMD_CCTL: 0x%08x\n", ee->cmd_cctl);
578 err_printf(m, " CSCMDOP: 0x%08x\n", ee->cscmdop);
579 err_printf(m, " CTX_SR_CTL: 0x%08x\n", ee->ctx_sr_ctl);
580 err_printf(m, " DMA_FADDR_HI: 0x%08x\n", ee->dma_faddr_hi);
581 err_printf(m, " DMA_FADDR_LO: 0x%08x\n", ee->dma_faddr_lo);
583 if (HAS_PPGTT(m->i915)) {
584 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
586 if (GRAPHICS_VER(m->i915) >= 8) {
588 for (i = 0; i < 4; i++)
589 err_printf(m, " PDP%d: 0x%016llx\n",
590 i, ee->vm_info.pdp[i]);
592 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
593 ee->vm_info.pp_dir_base);
597 for (n = 0; n < ee->num_ports; n++) {
598 err_printf(m, " ELSP[%d]:", n);
599 error_print_request(m, " ", &ee->execlist[n]);
603 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
608 i915_error_vprintf(e, f, args);
612 void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
613 const struct intel_engine_cs *engine,
614 const struct i915_vma_coredump *vma)
616 char out[ASCII85_BUFSZ];
622 err_printf(m, "%s --- %s = 0x%08x %08x\n",
623 engine ? engine->name : "global", vma->name,
624 upper_32_bits(vma->gtt_offset),
625 lower_32_bits(vma->gtt_offset));
627 if (vma->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
628 err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes);
630 err_compression_marker(m);
631 list_for_each_entry(page, &vma->page_list, lru) {
633 const u32 *addr = page_address(page);
636 if (page == list_last_entry(&vma->page_list, typeof(*page), lru))
638 len = ascii85_encode_len(len);
640 for (i = 0; i < len; i++)
641 err_puts(m, ascii85_encode(addr[i], out));
646 static void err_print_capabilities(struct drm_i915_error_state_buf *m,
647 struct i915_gpu_coredump *error)
649 struct drm_printer p = i915_error_printer(m);
651 intel_device_info_print(&error->device_info, &error->runtime_info, &p);
652 intel_display_device_info_print(&error->display_device_info,
653 &error->display_runtime_info, &p);
654 intel_driver_caps_print(&error->driver_caps, &p);
657 static void err_print_params(struct drm_i915_error_state_buf *m,
658 const struct i915_params *params)
660 struct drm_printer p = i915_error_printer(m);
662 i915_params_dump(params, &p);
665 static void err_print_pciid(struct drm_i915_error_state_buf *m,
666 struct drm_i915_private *i915)
668 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
670 err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
671 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
672 err_printf(m, "PCI Subsystem: %04x:%04x\n",
673 pdev->subsystem_vendor,
674 pdev->subsystem_device);
677 static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
679 const struct intel_ctb_coredump *ctb)
684 err_printf(m, "GuC %s CTB: raw: 0x%08X, 0x%08X/%08X, cached: 0x%08X/%08X, desc = 0x%08X, buf = 0x%08X x 0x%08X\n",
685 name, ctb->raw_status, ctb->raw_head, ctb->raw_tail,
686 ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset, ctb->size);
689 static void err_print_uc(struct drm_i915_error_state_buf *m,
690 const struct intel_uc_coredump *error_uc)
692 struct drm_printer p = i915_error_printer(m);
694 intel_uc_fw_dump(&error_uc->guc_fw, &p);
695 intel_uc_fw_dump(&error_uc->huc_fw, &p);
696 err_printf(m, "GuC timestamp: 0x%08x\n", error_uc->guc.timestamp);
697 intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_log);
698 err_printf(m, "GuC CTB fence: %d\n", error_uc->guc.last_fence);
699 err_print_guc_ctb(m, "Send", error_uc->guc.ctb + 0);
700 err_print_guc_ctb(m, "Recv", error_uc->guc.ctb + 1);
701 intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_ctb);
704 static void err_free_sgl(struct scatterlist *sgl)
707 struct scatterlist *sg;
709 for (sg = sgl; !sg_is_chain(sg); sg++) {
715 sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
716 free_page((unsigned long)sgl);
721 static void err_print_gt_info(struct drm_i915_error_state_buf *m,
722 struct intel_gt_coredump *gt)
724 struct drm_printer p = i915_error_printer(m);
726 intel_gt_info_print(>->info, &p);
727 intel_sseu_print_topology(gt->_gt->i915, >->info.sseu, &p);
730 static void err_print_gt_display(struct drm_i915_error_state_buf *m,
731 struct intel_gt_coredump *gt)
733 err_printf(m, "IER: 0x%08x\n", gt->ier);
734 err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
737 static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
738 struct intel_gt_coredump *gt)
742 err_printf(m, "GT awake: %s\n", str_yes_no(gt->awake));
743 err_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
744 gt->clock_frequency, gt->clock_period_ns);
745 err_printf(m, "EIR: 0x%08x\n", gt->eir);
746 err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
748 for (i = 0; i < gt->ngtier; i++)
749 err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
752 static void err_print_gt_global(struct drm_i915_error_state_buf *m,
753 struct intel_gt_coredump *gt)
755 err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
757 if (IS_GRAPHICS_VER(m->i915, 6, 11)) {
758 err_printf(m, "ERROR: 0x%08x\n", gt->error);
759 err_printf(m, "DONE_REG: 0x%08x\n", gt->done_reg);
762 if (GRAPHICS_VER(m->i915) >= 8)
763 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
764 gt->fault_data1, gt->fault_data0);
766 if (GRAPHICS_VER(m->i915) == 7)
767 err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
769 if (IS_GRAPHICS_VER(m->i915, 8, 11))
770 err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
772 if (GRAPHICS_VER(m->i915) == 12)
773 err_printf(m, "AUX_ERR_DBG: 0x%08x\n", gt->aux_err);
775 if (GRAPHICS_VER(m->i915) >= 12) {
778 for (i = 0; i < I915_MAX_SFC; i++) {
780 * SFC_DONE resides in the VD forcewake domain, so it
781 * only exists if the corresponding VCS engine is
784 if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
785 !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
788 err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
792 err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
796 static void err_print_gt_fences(struct drm_i915_error_state_buf *m,
797 struct intel_gt_coredump *gt)
801 for (i = 0; i < gt->nfence; i++)
802 err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
805 static void err_print_gt_engines(struct drm_i915_error_state_buf *m,
806 struct intel_gt_coredump *gt)
808 const struct intel_engine_coredump *ee;
810 for (ee = gt->engine; ee; ee = ee->next) {
811 const struct i915_vma_coredump *vma;
813 if (gt->uc && gt->uc->guc.is_guc_capture) {
814 if (ee->guc_capture_node)
815 intel_guc_capture_print_engine_node(m, ee);
817 err_printf(m, " Missing GuC capture node for %s\n",
820 error_print_engine(m, ee);
823 err_printf(m, " hung: %u\n", ee->hung);
824 err_printf(m, " engine reset count: %u\n", ee->reset_count);
825 error_print_context(m, " Active context: ", &ee->context);
827 for (vma = ee->vma; vma; vma = vma->next)
828 intel_gpu_error_print_vma(m, ee->engine, vma);
833 static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
834 struct i915_gpu_coredump *error)
836 const struct intel_engine_coredump *ee;
837 struct timespec64 ts;
839 if (*error->error_msg)
840 err_printf(m, "%s\n", error->error_msg);
841 err_printf(m, "Kernel: %s %s\n",
842 init_utsname()->release,
843 init_utsname()->machine);
844 err_printf(m, "Driver: %s\n", DRIVER_DATE);
845 ts = ktime_to_timespec64(error->time);
846 err_printf(m, "Time: %lld s %ld us\n",
847 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
848 ts = ktime_to_timespec64(error->boottime);
849 err_printf(m, "Boottime: %lld s %ld us\n",
850 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
851 ts = ktime_to_timespec64(error->uptime);
852 err_printf(m, "Uptime: %lld s %ld us\n",
853 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
854 err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
855 error->capture, jiffies_to_msecs(jiffies - error->capture));
857 for (ee = error->gt ? error->gt->engine : NULL; ee; ee = ee->next)
858 err_printf(m, "Active process (on ring %s): %s [%d]\n",
863 err_printf(m, "Reset count: %u\n", error->reset_count);
864 err_printf(m, "Suspend count: %u\n", error->suspend_count);
865 err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
866 err_printf(m, "Subplatform: 0x%x\n",
867 intel_subplatform(&error->runtime_info,
868 error->device_info.platform));
869 err_print_pciid(m, m->i915);
871 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
873 intel_dmc_print_error_state(m, m->i915);
875 err_printf(m, "RPM wakelock: %s\n", str_yes_no(error->wakelock));
876 err_printf(m, "PM suspended: %s\n", str_yes_no(error->suspended));
879 bool print_guc_capture = false;
881 if (error->gt->uc && error->gt->uc->guc.is_guc_capture)
882 print_guc_capture = true;
884 err_print_gt_display(m, error->gt);
885 err_print_gt_global_nonguc(m, error->gt);
886 err_print_gt_fences(m, error->gt);
889 * GuC dumped global, eng-class and eng-instance registers together
890 * as part of engine state dump so we print in err_print_gt_engines
892 if (!print_guc_capture)
893 err_print_gt_global(m, error->gt);
895 err_print_gt_engines(m, error->gt);
898 err_print_uc(m, error->gt->uc);
900 err_print_gt_info(m, error->gt);
904 intel_overlay_print_error_state(m, error->overlay);
906 err_print_capabilities(m, error);
907 err_print_params(m, &error->params);
910 static int err_print_to_sgl(struct i915_gpu_coredump *error)
912 struct drm_i915_error_state_buf m;
915 return PTR_ERR(error);
917 if (READ_ONCE(error->sgl))
920 memset(&m, 0, sizeof(m));
921 m.i915 = error->i915;
923 __err_print_to_sgl(&m, error);
926 __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
931 GEM_BUG_ON(m.end < m.cur);
932 sg_mark_end(m.cur - 1);
934 GEM_BUG_ON(m.sgl && !m.cur);
941 if (cmpxchg(&error->sgl, NULL, m.sgl))
947 ssize_t i915_gpu_coredump_copy_to_buffer(struct i915_gpu_coredump *error,
948 char *buf, loff_t off, size_t rem)
950 struct scatterlist *sg;
958 err = err_print_to_sgl(error);
962 sg = READ_ONCE(error->fit);
963 if (!sg || off < sg->dma_address)
968 pos = sg->dma_address;
973 if (sg_is_chain(sg)) {
974 sg = sg_chain_ptr(sg);
975 GEM_BUG_ON(sg_is_chain(sg));
979 if (pos + len <= off) {
986 GEM_BUG_ON(off - pos > len);
993 GEM_BUG_ON(!len || len > sg->length);
995 memcpy(buf, page_address(sg_page(sg)) + start, len);
1003 WRITE_ONCE(error->fit, sg);
1006 } while (!sg_is_last(sg++));
1011 static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
1014 struct i915_vma_coredump *next = vma->next;
1015 struct page *page, *n;
1017 list_for_each_entry_safe(page, n, &vma->page_list, lru) {
1018 list_del_init(&page->lru);
1027 static void cleanup_params(struct i915_gpu_coredump *error)
1029 i915_params_free(&error->params);
1032 static void cleanup_uc(struct intel_uc_coredump *uc)
1034 kfree(uc->guc_fw.file_selected.path);
1035 kfree(uc->huc_fw.file_selected.path);
1036 kfree(uc->guc_fw.file_wanted.path);
1037 kfree(uc->huc_fw.file_wanted.path);
1038 i915_vma_coredump_free(uc->guc.vma_log);
1039 i915_vma_coredump_free(uc->guc.vma_ctb);
1044 static void cleanup_gt(struct intel_gt_coredump *gt)
1046 while (gt->engine) {
1047 struct intel_engine_coredump *ee = gt->engine;
1049 gt->engine = ee->next;
1051 i915_vma_coredump_free(ee->vma);
1052 intel_guc_capture_free_node(ee);
1062 void __i915_gpu_coredump_free(struct kref *error_ref)
1064 struct i915_gpu_coredump *error =
1065 container_of(error_ref, typeof(*error), ref);
1068 struct intel_gt_coredump *gt = error->gt;
1070 error->gt = gt->next;
1074 kfree(error->overlay);
1076 cleanup_params(error);
1078 err_free_sgl(error->sgl);
1082 static struct i915_vma_coredump *
1083 i915_vma_coredump_create(const struct intel_gt *gt,
1084 const struct i915_vma_resource *vma_res,
1085 struct i915_vma_compress *compress,
1089 struct i915_ggtt *ggtt = gt->ggtt;
1090 const u64 slot = ggtt->error_capture.start;
1091 struct i915_vma_coredump *dst;
1092 struct sgt_iter iter;
1097 if (!vma_res || !vma_res->bi.pages || !compress)
1100 dst = kmalloc(sizeof(*dst), ALLOW_FAIL);
1104 if (!compress_start(compress)) {
1109 INIT_LIST_HEAD(&dst->page_list);
1110 strcpy(dst->name, name);
1113 dst->gtt_offset = vma_res->start;
1114 dst->gtt_size = vma_res->node_size;
1115 dst->gtt_page_sizes = vma_res->page_sizes_gtt;
1119 if (drm_mm_node_allocated(&ggtt->error_capture)) {
1123 for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
1124 mutex_lock(&ggtt->error_mutex);
1125 if (ggtt->vm.raw_insert_page)
1126 ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot,
1127 i915_gem_get_pat_index(gt->i915,
1131 ggtt->vm.insert_page(&ggtt->vm, dma, slot,
1132 i915_gem_get_pat_index(gt->i915,
1137 s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
1138 ret = compress_page(compress,
1139 (void __force *)s, dst,
1141 io_mapping_unmap(s);
1144 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1145 mutex_unlock(&ggtt->error_mutex);
1149 } else if (vma_res->bi.lmem) {
1150 struct intel_memory_region *mem = vma_res->mr;
1153 for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
1154 dma_addr_t offset = dma - mem->region.start;
1157 if (offset + PAGE_SIZE > mem->io_size) {
1162 s = io_mapping_map_wc(&mem->iomap, offset, PAGE_SIZE);
1163 ret = compress_page(compress,
1164 (void __force *)s, dst,
1166 io_mapping_unmap(s);
1173 for_each_sgt_page(page, iter, vma_res->bi.pages) {
1176 drm_clflush_pages(&page, 1);
1178 s = kmap_local_page(page);
1179 ret = compress_page(compress, s, dst, false);
1182 drm_clflush_pages(&page, 1);
1189 if (ret || compress_flush(compress, dst)) {
1190 struct page *page, *n;
1192 list_for_each_entry_safe_reverse(page, n, &dst->page_list, lru) {
1193 list_del_init(&page->lru);
1194 pool_free(&compress->pool, page_address(page));
1200 compress_finish(compress);
1205 static void gt_record_fences(struct intel_gt_coredump *gt)
1207 struct i915_ggtt *ggtt = gt->_gt->ggtt;
1208 struct intel_uncore *uncore = gt->_gt->uncore;
1211 if (GRAPHICS_VER(uncore->i915) >= 6) {
1212 for (i = 0; i < ggtt->num_fences; i++)
1214 intel_uncore_read64(uncore,
1215 FENCE_REG_GEN6_LO(i));
1216 } else if (GRAPHICS_VER(uncore->i915) >= 4) {
1217 for (i = 0; i < ggtt->num_fences; i++)
1219 intel_uncore_read64(uncore,
1220 FENCE_REG_965_LO(i));
1222 for (i = 0; i < ggtt->num_fences; i++)
1224 intel_uncore_read(uncore, FENCE_REG(i));
1229 static void engine_record_registers(struct intel_engine_coredump *ee)
1231 const struct intel_engine_cs *engine = ee->engine;
1232 struct drm_i915_private *i915 = engine->i915;
1234 if (GRAPHICS_VER(i915) >= 6) {
1235 ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
1237 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
1238 ee->fault_reg = intel_gt_mcr_read_any(engine->gt,
1239 XEHP_RING_FAULT_REG);
1240 else if (GRAPHICS_VER(i915) >= 12)
1241 ee->fault_reg = intel_uncore_read(engine->uncore,
1242 GEN12_RING_FAULT_REG);
1243 else if (GRAPHICS_VER(i915) >= 8)
1244 ee->fault_reg = intel_uncore_read(engine->uncore,
1245 GEN8_RING_FAULT_REG);
1247 ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
1250 if (GRAPHICS_VER(i915) >= 4) {
1251 ee->esr = ENGINE_READ(engine, RING_ESR);
1252 ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
1253 ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
1254 ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
1255 ee->instps = ENGINE_READ(engine, RING_INSTPS);
1256 ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
1257 ee->ccid = ENGINE_READ(engine, CCID);
1258 if (GRAPHICS_VER(i915) >= 8) {
1259 ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
1260 ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
1262 ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
1264 ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
1265 ee->ipeir = ENGINE_READ(engine, IPEIR);
1266 ee->ipehr = ENGINE_READ(engine, IPEHR);
1269 if (GRAPHICS_VER(i915) >= 11) {
1270 ee->cmd_cctl = ENGINE_READ(engine, RING_CMD_CCTL);
1271 ee->cscmdop = ENGINE_READ(engine, RING_CSCMDOP);
1272 ee->ctx_sr_ctl = ENGINE_READ(engine, RING_CTX_SR_CTL);
1273 ee->dma_faddr_hi = ENGINE_READ(engine, RING_DMA_FADD_UDW);
1274 ee->dma_faddr_lo = ENGINE_READ(engine, RING_DMA_FADD);
1275 ee->nopid = ENGINE_READ(engine, RING_NOPID);
1276 ee->excc = ENGINE_READ(engine, RING_EXCC);
1279 intel_engine_get_instdone(engine, &ee->instdone);
1281 ee->instpm = ENGINE_READ(engine, RING_INSTPM);
1282 ee->acthd = intel_engine_get_active_head(engine);
1283 ee->start = ENGINE_READ(engine, RING_START);
1284 ee->head = ENGINE_READ(engine, RING_HEAD);
1285 ee->tail = ENGINE_READ(engine, RING_TAIL);
1286 ee->ctl = ENGINE_READ(engine, RING_CTL);
1287 if (GRAPHICS_VER(i915) > 2)
1288 ee->mode = ENGINE_READ(engine, RING_MI_MODE);
1290 if (!HWS_NEEDS_PHYSICAL(i915)) {
1293 if (GRAPHICS_VER(i915) == 7) {
1294 switch (engine->id) {
1296 MISSING_CASE(engine->id);
1299 mmio = RENDER_HWS_PGA_GEN7;
1302 mmio = BLT_HWS_PGA_GEN7;
1305 mmio = BSD_HWS_PGA_GEN7;
1308 mmio = VEBOX_HWS_PGA_GEN7;
1311 } else if (GRAPHICS_VER(engine->i915) == 6) {
1312 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1314 /* XXX: gen8 returns to sanity */
1315 mmio = RING_HWS_PGA(engine->mmio_base);
1318 ee->hws = intel_uncore_read(engine->uncore, mmio);
1321 ee->reset_count = i915_reset_engine_count(&i915->gpu_error, engine);
1323 if (HAS_PPGTT(i915)) {
1326 ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
1328 if (GRAPHICS_VER(i915) == 6) {
1329 ee->vm_info.pp_dir_base =
1330 ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
1331 } else if (GRAPHICS_VER(i915) == 7) {
1332 ee->vm_info.pp_dir_base =
1333 ENGINE_READ(engine, RING_PP_DIR_BASE);
1334 } else if (GRAPHICS_VER(i915) >= 8) {
1335 u32 base = engine->mmio_base;
1337 for (i = 0; i < 4; i++) {
1338 ee->vm_info.pdp[i] =
1339 intel_uncore_read(engine->uncore,
1340 GEN8_RING_PDP_UDW(base, i));
1341 ee->vm_info.pdp[i] <<= 32;
1342 ee->vm_info.pdp[i] |=
1343 intel_uncore_read(engine->uncore,
1344 GEN8_RING_PDP_LDW(base, i));
1350 static void record_request(const struct i915_request *request,
1351 struct i915_request_coredump *erq)
1353 erq->flags = request->fence.flags;
1354 erq->context = request->fence.context;
1355 erq->seqno = request->fence.seqno;
1356 erq->sched_attr = request->sched.attr;
1357 erq->head = request->head;
1358 erq->tail = request->tail;
1362 if (!intel_context_is_closed(request->context)) {
1363 const struct i915_gem_context *ctx;
1365 ctx = rcu_dereference(request->context->gem_context);
1367 erq->pid = pid_nr(ctx->pid);
1372 static void engine_record_execlists(struct intel_engine_coredump *ee)
1374 const struct intel_engine_execlists * const el = &ee->engine->execlists;
1375 struct i915_request * const *port = el->active;
1379 record_request(*port++, &ee->execlist[n++]);
1384 static bool record_context(struct i915_gem_context_coredump *e,
1385 struct intel_context *ce)
1387 struct i915_gem_context *ctx;
1388 struct task_struct *task;
1392 ctx = rcu_dereference(ce->gem_context);
1393 if (ctx && !kref_get_unless_zero(&ctx->ref))
1400 task = pid_task(ctx->pid, PIDTYPE_PID);
1402 strcpy(e->comm, task->comm);
1407 e->sched_attr = ctx->sched;
1408 e->guilty = atomic_read(&ctx->guilty_count);
1409 e->active = atomic_read(&ctx->active_count);
1410 e->hwsp_seqno = (ce->timeline && ce->timeline->hwsp_seqno) ?
1411 *ce->timeline->hwsp_seqno : ~0U;
1413 e->total_runtime = intel_context_get_total_runtime_ns(ce);
1414 e->avg_runtime = intel_context_get_avg_runtime_ns(ce);
1416 simulated = i915_gem_context_no_error_capture(ctx);
1418 i915_gem_context_put(ctx);
1422 struct intel_engine_capture_vma {
1423 struct intel_engine_capture_vma *next;
1424 struct i915_vma_resource *vma_res;
1426 bool lockdep_cookie;
1429 static struct intel_engine_capture_vma *
1430 capture_vma_snapshot(struct intel_engine_capture_vma *next,
1431 struct i915_vma_resource *vma_res,
1432 gfp_t gfp, const char *name)
1434 struct intel_engine_capture_vma *c;
1439 c = kmalloc(sizeof(*c), gfp);
1443 if (!i915_vma_resource_hold(vma_res, &c->lockdep_cookie)) {
1448 strcpy(c->name, name);
1449 c->vma_res = i915_vma_resource_get(vma_res);
1455 static struct intel_engine_capture_vma *
1456 capture_vma(struct intel_engine_capture_vma *next,
1457 struct i915_vma *vma,
1465 * If the vma isn't pinned, then the vma should be snapshotted
1466 * to a struct i915_vma_snapshot at command submission time.
1469 if (GEM_WARN_ON(!i915_vma_is_pinned(vma)))
1472 next = capture_vma_snapshot(next, vma->resource, gfp, name);
1477 static struct intel_engine_capture_vma *
1478 capture_user(struct intel_engine_capture_vma *capture,
1479 const struct i915_request *rq,
1482 struct i915_capture_list *c;
1484 for (c = rq->capture_list; c; c = c->next)
1485 capture = capture_vma_snapshot(capture, c->vma_res, gfp,
1491 static void add_vma(struct intel_engine_coredump *ee,
1492 struct i915_vma_coredump *vma)
1495 vma->next = ee->vma;
1500 static struct i915_vma_coredump *
1501 create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma,
1502 const char *name, struct i915_vma_compress *compress)
1504 struct i915_vma_coredump *ret = NULL;
1505 struct i915_vma_resource *vma_res;
1506 bool lockdep_cookie;
1511 vma_res = vma->resource;
1513 if (i915_vma_resource_hold(vma_res, &lockdep_cookie)) {
1514 ret = i915_vma_coredump_create(gt, vma_res, compress, name);
1515 i915_vma_resource_unhold(vma_res, lockdep_cookie);
1521 static void add_vma_coredump(struct intel_engine_coredump *ee,
1522 const struct intel_gt *gt,
1523 struct i915_vma *vma,
1525 struct i915_vma_compress *compress)
1527 add_vma(ee, create_vma_coredump(gt, vma, name, compress));
1530 struct intel_engine_coredump *
1531 intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags)
1533 struct intel_engine_coredump *ee;
1535 ee = kzalloc(sizeof(*ee), gfp);
1539 ee->engine = engine;
1541 if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)) {
1542 engine_record_registers(ee);
1543 engine_record_execlists(ee);
1549 static struct intel_engine_capture_vma *
1550 engine_coredump_add_context(struct intel_engine_coredump *ee,
1551 struct intel_context *ce,
1554 struct intel_engine_capture_vma *vma = NULL;
1556 ee->simulated |= record_context(&ee->context, ce);
1561 * We need to copy these to an anonymous buffer
1562 * as the simplest method to avoid being overwritten
1565 vma = capture_vma(vma, ce->ring->vma, "ring", gfp);
1566 vma = capture_vma(vma, ce->state, "HW context", gfp);
1571 struct intel_engine_capture_vma *
1572 intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
1573 struct i915_request *rq,
1576 struct intel_engine_capture_vma *vma;
1578 vma = engine_coredump_add_context(ee, rq->context, gfp);
1583 * We need to copy these to an anonymous buffer
1584 * as the simplest method to avoid being overwritten
1587 vma = capture_vma_snapshot(vma, rq->batch_res, gfp, "batch");
1588 vma = capture_user(vma, rq, gfp);
1590 ee->rq_head = rq->head;
1591 ee->rq_post = rq->postfix;
1592 ee->rq_tail = rq->tail;
1598 intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
1599 struct intel_engine_capture_vma *capture,
1600 struct i915_vma_compress *compress)
1602 const struct intel_engine_cs *engine = ee->engine;
1605 struct intel_engine_capture_vma *this = capture;
1606 struct i915_vma_resource *vma_res = this->vma_res;
1609 i915_vma_coredump_create(engine->gt, vma_res,
1610 compress, this->name));
1612 i915_vma_resource_unhold(vma_res, this->lockdep_cookie);
1613 i915_vma_resource_put(vma_res);
1615 capture = this->next;
1619 add_vma_coredump(ee, engine->gt, engine->status_page.vma,
1620 "HW Status", compress);
1622 add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma,
1623 "WA context", compress);
1626 static struct intel_engine_coredump *
1627 capture_engine(struct intel_engine_cs *engine,
1628 struct i915_vma_compress *compress,
1631 struct intel_engine_capture_vma *capture = NULL;
1632 struct intel_engine_coredump *ee;
1633 struct intel_context *ce = NULL;
1634 struct i915_request *rq = NULL;
1636 ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags);
1640 intel_engine_get_hung_entity(engine, &ce, &rq);
1641 if (rq && !i915_request_started(rq))
1642 drm_info(&engine->gt->i915->drm, "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n",
1643 engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
1646 capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
1647 i915_request_put(rq);
1649 capture = engine_coredump_add_context(ee, ce, ATOMIC_MAYFAIL);
1653 intel_engine_coredump_add_vma(ee, capture, compress);
1655 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
1656 intel_guc_capture_get_matching_node(engine->gt, ee, ce);
1666 gt_record_engines(struct intel_gt_coredump *gt,
1667 intel_engine_mask_t engine_mask,
1668 struct i915_vma_compress *compress,
1671 struct intel_engine_cs *engine;
1672 enum intel_engine_id id;
1674 for_each_engine(engine, gt->_gt, id) {
1675 struct intel_engine_coredump *ee;
1677 /* Refill our page pool before entering atomic section */
1678 pool_refill(&compress->pool, ALLOW_FAIL);
1680 ee = capture_engine(engine, compress, dump_flags);
1684 ee->hung = engine->mask & engine_mask;
1686 gt->simulated |= ee->simulated;
1687 if (ee->simulated) {
1688 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
1689 intel_guc_capture_free_node(ee);
1694 ee->next = gt->engine;
1699 static void gt_record_guc_ctb(struct intel_ctb_coredump *saved,
1700 const struct intel_guc_ct_buffer *ctb,
1701 const void *blob_ptr, struct intel_guc *guc)
1703 if (!ctb || !ctb->desc)
1706 saved->raw_status = ctb->desc->status;
1707 saved->raw_head = ctb->desc->head;
1708 saved->raw_tail = ctb->desc->tail;
1709 saved->head = ctb->head;
1710 saved->tail = ctb->tail;
1711 saved->size = ctb->size;
1712 saved->desc_offset = ((void *)ctb->desc) - blob_ptr;
1713 saved->cmds_offset = ((void *)ctb->cmds) - blob_ptr;
1716 static struct intel_uc_coredump *
1717 gt_record_uc(struct intel_gt_coredump *gt,
1718 struct i915_vma_compress *compress)
1720 const struct intel_uc *uc = >->_gt->uc;
1721 struct intel_uc_coredump *error_uc;
1723 error_uc = kzalloc(sizeof(*error_uc), ALLOW_FAIL);
1727 memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
1728 memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
1730 error_uc->guc_fw.file_selected.path = kstrdup(uc->guc.fw.file_selected.path, ALLOW_FAIL);
1731 error_uc->huc_fw.file_selected.path = kstrdup(uc->huc.fw.file_selected.path, ALLOW_FAIL);
1732 error_uc->guc_fw.file_wanted.path = kstrdup(uc->guc.fw.file_wanted.path, ALLOW_FAIL);
1733 error_uc->huc_fw.file_wanted.path = kstrdup(uc->huc.fw.file_wanted.path, ALLOW_FAIL);
1736 * Save the GuC log and include a timestamp reference for converting the
1737 * log times to system times (in conjunction with the error->boottime and
1738 * gt->clock_frequency fields saved elsewhere).
1740 error_uc->guc.timestamp = intel_uncore_read(gt->_gt->uncore, GUCPMTIMESTAMP);
1741 error_uc->guc.vma_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
1742 "GuC log buffer", compress);
1743 error_uc->guc.vma_ctb = create_vma_coredump(gt->_gt, uc->guc.ct.vma,
1744 "GuC CT buffer", compress);
1745 error_uc->guc.last_fence = uc->guc.ct.requests.last_fence;
1746 gt_record_guc_ctb(error_uc->guc.ctb + 0, &uc->guc.ct.ctbs.send,
1747 uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
1748 gt_record_guc_ctb(error_uc->guc.ctb + 1, &uc->guc.ct.ctbs.recv,
1749 uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
1754 /* Capture display registers. */
1755 static void gt_record_display_regs(struct intel_gt_coredump *gt)
1757 struct intel_uncore *uncore = gt->_gt->uncore;
1758 struct drm_i915_private *i915 = uncore->i915;
1760 if (GRAPHICS_VER(i915) >= 6)
1761 gt->derrmr = intel_uncore_read(uncore, DERRMR);
1763 if (GRAPHICS_VER(i915) >= 8)
1764 gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
1765 else if (IS_VALLEYVIEW(i915))
1766 gt->ier = intel_uncore_read(uncore, VLV_IER);
1767 else if (HAS_PCH_SPLIT(i915))
1768 gt->ier = intel_uncore_read(uncore, DEIER);
1769 else if (GRAPHICS_VER(i915) == 2)
1770 gt->ier = intel_uncore_read16(uncore, GEN2_IER);
1772 gt->ier = intel_uncore_read(uncore, GEN2_IER);
1775 /* Capture all other registers that GuC doesn't capture. */
1776 static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
1778 struct intel_uncore *uncore = gt->_gt->uncore;
1779 struct drm_i915_private *i915 = uncore->i915;
1782 if (IS_VALLEYVIEW(i915)) {
1783 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1785 } else if (GRAPHICS_VER(i915) >= 11) {
1787 intel_uncore_read(uncore,
1788 GEN11_RENDER_COPY_INTR_ENABLE);
1790 intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
1792 intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
1794 intel_uncore_read(uncore,
1795 GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1797 intel_uncore_read(uncore,
1798 GEN11_CRYPTO_RSVD_INTR_ENABLE);
1800 intel_uncore_read(uncore,
1801 GEN11_GUNIT_CSME_INTR_ENABLE);
1803 } else if (GRAPHICS_VER(i915) >= 8) {
1804 for (i = 0; i < 4; i++)
1806 intel_uncore_read(uncore, GEN8_GT_IER(i));
1808 } else if (HAS_PCH_SPLIT(i915)) {
1809 gt->gtier[0] = intel_uncore_read(uncore, GTIER);
1813 gt->eir = intel_uncore_read(uncore, EIR);
1814 gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
1818 * Capture all registers that relate to workload submission.
1819 * NOTE: In GuC submission, when GuC resets an engine, it can dump these for us
1821 static void gt_record_global_regs(struct intel_gt_coredump *gt)
1823 struct intel_uncore *uncore = gt->_gt->uncore;
1824 struct drm_i915_private *i915 = uncore->i915;
1828 * General organization
1829 * 1. Registers specific to a single generation
1830 * 2. Registers which belong to multiple generations
1831 * 3. Feature specific registers.
1832 * 4. Everything else
1833 * Please try to follow the order.
1836 /* 1: Registers specific to a single generation */
1837 if (IS_VALLEYVIEW(i915))
1838 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
1840 if (GRAPHICS_VER(i915) == 7)
1841 gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
1843 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
1844 gt->fault_data0 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
1845 XEHP_FAULT_TLB_DATA0);
1846 gt->fault_data1 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
1847 XEHP_FAULT_TLB_DATA1);
1848 } else if (GRAPHICS_VER(i915) >= 12) {
1849 gt->fault_data0 = intel_uncore_read(uncore,
1850 GEN12_FAULT_TLB_DATA0);
1851 gt->fault_data1 = intel_uncore_read(uncore,
1852 GEN12_FAULT_TLB_DATA1);
1853 } else if (GRAPHICS_VER(i915) >= 8) {
1854 gt->fault_data0 = intel_uncore_read(uncore,
1855 GEN8_FAULT_TLB_DATA0);
1856 gt->fault_data1 = intel_uncore_read(uncore,
1857 GEN8_FAULT_TLB_DATA1);
1860 if (GRAPHICS_VER(i915) == 6) {
1861 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
1862 gt->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
1863 gt->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
1866 /* 2: Registers which belong to multiple generations */
1867 if (GRAPHICS_VER(i915) >= 7)
1868 gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
1870 if (GRAPHICS_VER(i915) >= 6) {
1871 if (GRAPHICS_VER(i915) < 12) {
1872 gt->error = intel_uncore_read(uncore, ERROR_GEN6);
1873 gt->done_reg = intel_uncore_read(uncore, DONE_REG);
1877 /* 3: Feature specific registers */
1878 if (IS_GRAPHICS_VER(i915, 6, 7)) {
1879 gt->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
1880 gt->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
1883 if (IS_GRAPHICS_VER(i915, 8, 11))
1884 gt->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
1886 if (GRAPHICS_VER(i915) == 12)
1887 gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
1889 if (GRAPHICS_VER(i915) >= 12) {
1890 for (i = 0; i < I915_MAX_SFC; i++) {
1892 * SFC_DONE resides in the VD forcewake domain, so it
1893 * only exists if the corresponding VCS engine is
1896 if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
1897 !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
1901 intel_uncore_read(uncore, GEN12_SFC_DONE(i));
1904 gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
1908 static void gt_record_info(struct intel_gt_coredump *gt)
1910 memcpy(>->info, >->_gt->info, sizeof(struct intel_gt_info));
1911 gt->clock_frequency = gt->_gt->clock_frequency;
1912 gt->clock_period_ns = gt->_gt->clock_period_ns;
1916 * Generate a semi-unique error code. The code is not meant to have meaning, The
1917 * code's only purpose is to try to prevent false duplicated bug reports by
1918 * grossly estimating a GPU error state.
1920 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
1921 * the hang if we could strip the GTT offset information from it.
1923 * It's only a small step better than a random number in its current form.
1925 static u32 generate_ecode(const struct intel_engine_coredump *ee)
1928 * IPEHR would be an ideal way to detect errors, as it's the gross
1929 * measure of "the command that hung." However, has some very common
1930 * synchronization commands which almost always appear in the case
1931 * strictly a client bug. Use instdone to differentiate those some.
1933 return ee ? ee->ipehr ^ ee->instdone.instdone : 0;
1936 static const char *error_msg(struct i915_gpu_coredump *error)
1938 struct intel_engine_coredump *first = NULL;
1939 unsigned int hung_classes = 0;
1940 struct intel_gt_coredump *gt;
1943 for (gt = error->gt; gt; gt = gt->next) {
1944 struct intel_engine_coredump *cs;
1946 for (cs = gt->engine; cs; cs = cs->next) {
1948 hung_classes |= BIT(cs->engine->uabi_class);
1955 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1956 "GPU HANG: ecode %d:%x:%08x",
1957 GRAPHICS_VER(error->i915), hung_classes,
1958 generate_ecode(first));
1959 if (first && first->context.pid) {
1960 /* Just show the first executing process, more is confusing */
1961 len += scnprintf(error->error_msg + len,
1962 sizeof(error->error_msg) - len,
1964 first->context.comm, first->context.pid);
1967 return error->error_msg;
1970 static void capture_gen(struct i915_gpu_coredump *error)
1972 struct drm_i915_private *i915 = error->i915;
1974 error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
1975 error->suspended = i915->runtime_pm.suspended;
1977 error->iommu = i915_vtd_active(i915);
1978 error->reset_count = i915_reset_count(&i915->gpu_error);
1979 error->suspend_count = i915->suspend_count;
1981 i915_params_copy(&error->params, &i915->params);
1982 memcpy(&error->device_info,
1984 sizeof(error->device_info));
1985 memcpy(&error->runtime_info,
1987 sizeof(error->runtime_info));
1988 memcpy(&error->display_device_info, DISPLAY_INFO(i915),
1989 sizeof(error->display_device_info));
1990 memcpy(&error->display_runtime_info, DISPLAY_RUNTIME_INFO(i915),
1991 sizeof(error->display_runtime_info));
1992 error->driver_caps = i915->caps;
1995 struct i915_gpu_coredump *
1996 i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
1998 struct i915_gpu_coredump *error;
2000 if (!i915->params.error_capture)
2003 error = kzalloc(sizeof(*error), gfp);
2007 kref_init(&error->ref);
2010 error->time = ktime_get_real();
2011 error->boottime = ktime_get_boottime();
2012 error->uptime = ktime_sub(ktime_get(), to_gt(i915)->last_init_time);
2013 error->capture = jiffies;
2020 #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
2022 struct intel_gt_coredump *
2023 intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags)
2025 struct intel_gt_coredump *gc;
2027 gc = kzalloc(sizeof(*gc), gfp);
2032 gc->awake = intel_gt_pm_is_awake(gt);
2034 gt_record_display_regs(gc);
2035 gt_record_global_nonguc_regs(gc);
2038 * GuC dumps global, eng-class and eng-instance registers
2039 * (that can change as part of engine state during execution)
2040 * before an engine is reset due to a hung context.
2041 * GuC captures and reports all three groups of registers
2042 * together as a single set before the engine is reset.
2043 * Thus, if GuC triggered the context reset we retrieve
2044 * the register values as part of gt_record_engines.
2046 if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE))
2047 gt_record_global_regs(gc);
2049 gt_record_fences(gc);
2054 struct i915_vma_compress *
2055 i915_vma_capture_prepare(struct intel_gt_coredump *gt)
2057 struct i915_vma_compress *compress;
2059 compress = kmalloc(sizeof(*compress), ALLOW_FAIL);
2063 if (!compress_init(compress)) {
2071 void i915_vma_capture_finish(struct intel_gt_coredump *gt,
2072 struct i915_vma_compress *compress)
2077 compress_fini(compress);
2081 static struct i915_gpu_coredump *
2082 __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
2084 struct drm_i915_private *i915 = gt->i915;
2085 struct i915_gpu_coredump *error;
2087 /* Check if GPU capture has been disabled */
2088 error = READ_ONCE(i915->gpu_error.first_error);
2092 error = i915_gpu_coredump_alloc(i915, ALLOW_FAIL);
2094 return ERR_PTR(-ENOMEM);
2096 error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL, dump_flags);
2098 struct i915_vma_compress *compress;
2100 compress = i915_vma_capture_prepare(error->gt);
2104 return ERR_PTR(-ENOMEM);
2107 if (INTEL_INFO(i915)->has_gt_uc) {
2108 error->gt->uc = gt_record_uc(error->gt, compress);
2109 if (error->gt->uc) {
2110 if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
2111 error->gt->uc->guc.is_guc_capture = true;
2113 GEM_BUG_ON(error->gt->uc->guc.is_guc_capture);
2117 gt_record_info(error->gt);
2118 gt_record_engines(error->gt, engine_mask, compress, dump_flags);
2121 i915_vma_capture_finish(error->gt, compress);
2123 error->simulated |= error->gt->simulated;
2126 error->overlay = intel_overlay_capture_error_state(i915);
2131 struct i915_gpu_coredump *
2132 i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
2134 static DEFINE_MUTEX(capture_mutex);
2135 int ret = mutex_lock_interruptible(&capture_mutex);
2136 struct i915_gpu_coredump *dump;
2139 return ERR_PTR(ret);
2141 dump = __i915_gpu_coredump(gt, engine_mask, dump_flags);
2142 mutex_unlock(&capture_mutex);
2147 void i915_error_state_store(struct i915_gpu_coredump *error)
2149 struct drm_i915_private *i915;
2152 if (IS_ERR_OR_NULL(error))
2156 drm_info(&i915->drm, "%s\n", error_msg(error));
2158 if (error->simulated ||
2159 cmpxchg(&i915->gpu_error.first_error, NULL, error))
2162 i915_gpu_coredump_get(error);
2164 if (!xchg(&warned, true) &&
2165 ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
2166 pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
2167 pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
2168 pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
2169 pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
2170 pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
2171 pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
2172 i915->drm.primary->index);
2177 * i915_capture_error_state - capture an error record for later analysis
2178 * @gt: intel_gt which originated the hang
2179 * @engine_mask: hung engines
2180 * @dump_flags: dump flags
2182 * Should be called when an error is detected (either a hang or an error
2183 * interrupt) to capture error state from the time of the error. Fills
2184 * out a structure which becomes available in debugfs for user level tools
2187 void i915_capture_error_state(struct intel_gt *gt,
2188 intel_engine_mask_t engine_mask, u32 dump_flags)
2190 struct i915_gpu_coredump *error;
2192 error = i915_gpu_coredump(gt, engine_mask, dump_flags);
2193 if (IS_ERR(error)) {
2194 cmpxchg(>->i915->gpu_error.first_error, NULL, error);
2198 i915_error_state_store(error);
2199 i915_gpu_coredump_put(error);
2202 struct i915_gpu_coredump *
2203 i915_first_error_state(struct drm_i915_private *i915)
2205 struct i915_gpu_coredump *error;
2207 spin_lock_irq(&i915->gpu_error.lock);
2208 error = i915->gpu_error.first_error;
2209 if (!IS_ERR_OR_NULL(error))
2210 i915_gpu_coredump_get(error);
2211 spin_unlock_irq(&i915->gpu_error.lock);
2216 void i915_reset_error_state(struct drm_i915_private *i915)
2218 struct i915_gpu_coredump *error;
2220 spin_lock_irq(&i915->gpu_error.lock);
2221 error = i915->gpu_error.first_error;
2222 if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
2223 i915->gpu_error.first_error = NULL;
2224 spin_unlock_irq(&i915->gpu_error.lock);
2226 if (!IS_ERR_OR_NULL(error))
2227 i915_gpu_coredump_put(error);
2230 void i915_disable_error_state(struct drm_i915_private *i915, int err)
2232 spin_lock_irq(&i915->gpu_error.lock);
2233 if (!i915->gpu_error.first_error)
2234 i915->gpu_error.first_error = ERR_PTR(err);
2235 spin_unlock_irq(&i915->gpu_error.lock);
2238 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
2239 void intel_klog_error_capture(struct intel_gt *gt,
2240 intel_engine_mask_t engine_mask)
2243 struct drm_i915_private *i915 = gt->i915;
2244 struct i915_gpu_coredump *error;
2245 intel_wakeref_t wakeref;
2246 size_t buf_size = PAGE_SIZE * 128;
2248 char *buf, *ptr, *next;
2249 int l_count = g_count++;
2252 /* Can't allocate memory during a reset */
2253 if (test_bit(I915_RESET_BACKOFF, >->reset.flags)) {
2254 drm_err(>->i915->drm, "[Capture/%d.%d] Inside GT reset, skipping error capture :(\n",
2259 error = READ_ONCE(i915->gpu_error.first_error);
2261 drm_err(&i915->drm, "[Capture/%d.%d] Clearing existing error capture first...\n",
2263 i915_reset_error_state(i915);
2266 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
2267 error = i915_gpu_coredump(gt, engine_mask, CORE_DUMP_FLAG_NONE);
2269 if (IS_ERR(error)) {
2270 drm_err(&i915->drm, "[Capture/%d.%d] Failed to capture error capture: %ld!\n",
2271 l_count, line++, PTR_ERR(error));
2275 buf = kvmalloc(buf_size, GFP_KERNEL);
2277 drm_err(&i915->drm, "[Capture/%d.%d] Failed to allocate buffer for error capture!\n",
2279 i915_gpu_coredump_put(error);
2283 drm_info(&i915->drm, "[Capture/%d.%d] Dumping i915 error capture for %ps...\n",
2284 l_count, line++, __builtin_return_address(0));
2286 /* Largest string length safe to print via dmesg */
2287 # define MAX_CHUNK 800
2291 ssize_t got = i915_gpu_coredump_copy_to_buffer(error, buf, pos_err, buf_size - 1);
2304 next = strnchr(ptr, got, '\n');
2316 if (count > MAX_CHUNK) {
2320 for (pos = MAX_CHUNK; pos < count; pos += MAX_CHUNK) {
2321 char chr = ptr[pos];
2324 drm_info(&i915->drm, "[Capture/%d.%d] }%s{\n",
2325 l_count, line++, ptr2);
2330 * If spewing large amounts of data via a serial console,
2331 * this can be a very slow process. So be friendly and try
2332 * not to cause 'softlockup on CPU' problems.
2337 if (ptr2 < (ptr + count))
2338 drm_info(&i915->drm, "[Capture/%d.%d] %c%s%c\n",
2339 l_count, line++, tag[0], ptr2, tag[1]);
2340 else if (tag[0] == '>')
2341 drm_info(&i915->drm, "[Capture/%d.%d] ><\n",
2344 drm_info(&i915->drm, "[Capture/%d.%d] %c%s%c\n",
2345 l_count, line++, tag[0], ptr, tag[1]);
2360 drm_info(&i915->drm, "[Capture/%d.%d] Got %zd bytes remaining!\n",
2361 l_count, line++, got);
2366 drm_info(&i915->drm, "[Capture/%d.%d] Dumped %zd bytes\n", l_count, line++, pos_err);