2 * Copyright (c) 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
30 #include <linux/ascii85.h>
31 #include <linux/nmi.h>
32 #include <linux/scatterlist.h>
33 #include <linux/stop_machine.h>
34 #include <linux/utsname.h>
35 #include <linux/zlib.h>
37 #include <drm/drm_print.h>
39 #include "i915_gpu_error.h"
42 static inline const struct intel_engine_cs *
43 engine_lookup(const struct drm_i915_private *i915, unsigned int id)
45 if (id >= I915_NUM_ENGINES)
48 return i915->engine[id];
51 static inline const char *
52 __engine_name(const struct intel_engine_cs *engine)
54 return engine ? engine->name : "";
58 engine_name(const struct drm_i915_private *i915, unsigned int id)
60 return __engine_name(engine_lookup(i915, id));
63 static const char *tiling_flag(int tiling)
67 case I915_TILING_NONE: return "";
68 case I915_TILING_X: return " X";
69 case I915_TILING_Y: return " Y";
73 static const char *dirty_flag(int dirty)
75 return dirty ? " dirty" : "";
78 static const char *purgeable_flag(int purgeable)
80 return purgeable ? " purgeable" : "";
83 static void __sg_set_buf(struct scatterlist *sg,
84 void *addr, unsigned int len, loff_t it)
86 sg->page_link = (unsigned long)virt_to_page(addr);
87 sg->offset = offset_in_page(addr);
92 static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
97 if (e->bytes + len + 1 <= e->size)
101 __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
107 if (e->cur == e->end) {
108 struct scatterlist *sgl;
110 sgl = (typeof(sgl))__get_free_page(GFP_KERNEL);
120 (unsigned long)sgl | SG_CHAIN;
126 e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
129 e->size = ALIGN(len + 1, SZ_64K);
130 e->buf = kmalloc(e->size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
132 e->size = PAGE_ALIGN(len + 1);
133 e->buf = kmalloc(e->size, GFP_KERNEL);
144 static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
145 const char *fmt, va_list args)
154 len = vsnprintf(NULL, 0, fmt, ap);
161 if (!__i915_error_grow(e, len))
164 GEM_BUG_ON(e->bytes >= e->size);
165 len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
173 static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
181 if (!__i915_error_grow(e, len))
184 GEM_BUG_ON(e->bytes + len > e->size);
185 memcpy(e->buf + e->bytes, str, len);
189 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
190 #define err_puts(e, s) i915_error_puts(e, s)
192 static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
194 i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
197 static inline struct drm_printer
198 i915_error_printer(struct drm_i915_error_state_buf *e)
200 struct drm_printer p = {
201 .printfn = __i915_printfn_error,
207 #ifdef CONFIG_DRM_I915_COMPRESS_ERROR
210 struct z_stream_s zstream;
214 static bool compress_init(struct compress *c)
216 struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream));
219 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
220 GFP_ATOMIC | __GFP_NOWARN);
221 if (!zstream->workspace)
224 if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
225 kfree(zstream->workspace);
230 if (i915_has_memcpy_from_wc())
231 c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN);
236 static void *compress_next_page(struct drm_i915_error_object *dst)
240 if (dst->page_count >= dst->num_pages)
241 return ERR_PTR(-ENOSPC);
243 page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
245 return ERR_PTR(-ENOMEM);
247 return dst->pages[dst->page_count++] = (void *)page;
250 static int compress_page(struct compress *c,
252 struct drm_i915_error_object *dst)
254 struct z_stream_s *zstream = &c->zstream;
256 zstream->next_in = src;
257 if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
258 zstream->next_in = c->tmp;
259 zstream->avail_in = PAGE_SIZE;
262 if (zstream->avail_out == 0) {
263 zstream->next_out = compress_next_page(dst);
264 if (IS_ERR(zstream->next_out))
265 return PTR_ERR(zstream->next_out);
267 zstream->avail_out = PAGE_SIZE;
270 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
273 touch_nmi_watchdog();
274 } while (zstream->avail_in);
276 /* Fallback to uncompressed if we increase size? */
277 if (0 && zstream->total_out > zstream->total_in)
283 static int compress_flush(struct compress *c,
284 struct drm_i915_error_object *dst)
286 struct z_stream_s *zstream = &c->zstream;
289 switch (zlib_deflate(zstream, Z_FINISH)) {
290 case Z_OK: /* more space requested */
291 zstream->next_out = compress_next_page(dst);
292 if (IS_ERR(zstream->next_out))
293 return PTR_ERR(zstream->next_out);
295 zstream->avail_out = PAGE_SIZE;
301 default: /* any error */
307 memset(zstream->next_out, 0, zstream->avail_out);
308 dst->unused = zstream->avail_out;
312 static void compress_fini(struct compress *c,
313 struct drm_i915_error_object *dst)
315 struct z_stream_s *zstream = &c->zstream;
317 zlib_deflateEnd(zstream);
318 kfree(zstream->workspace);
320 free_page((unsigned long)c->tmp);
323 static void err_compression_marker(struct drm_i915_error_state_buf *m)
333 static bool compress_init(struct compress *c)
338 static int compress_page(struct compress *c,
340 struct drm_i915_error_object *dst)
345 page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
350 if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
351 memcpy(ptr, src, PAGE_SIZE);
352 dst->pages[dst->page_count++] = ptr;
357 static int compress_flush(struct compress *c,
358 struct drm_i915_error_object *dst)
363 static void compress_fini(struct compress *c,
364 struct drm_i915_error_object *dst)
368 static void err_compression_marker(struct drm_i915_error_state_buf *m)
375 static void print_error_buffers(struct drm_i915_error_state_buf *m,
377 struct drm_i915_error_buffer *err,
380 err_printf(m, "%s [%d]:\n", name, count);
383 err_printf(m, " %08x_%08x %8u %02x %02x %02x",
384 upper_32_bits(err->gtt_offset),
385 lower_32_bits(err->gtt_offset),
390 err_puts(m, tiling_flag(err->tiling));
391 err_puts(m, dirty_flag(err->dirty));
392 err_puts(m, purgeable_flag(err->purgeable));
393 err_puts(m, err->userptr ? " userptr" : "");
394 err_puts(m, err->engine != -1 ? " " : "");
395 err_puts(m, engine_name(m->i915, err->engine));
396 err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
399 err_printf(m, " (name: %d)", err->name);
400 if (err->fence_reg != I915_FENCE_REG_NONE)
401 err_printf(m, " (fence: %d)", err->fence_reg);
408 static void error_print_instdone(struct drm_i915_error_state_buf *m,
409 const struct drm_i915_error_engine *ee)
414 err_printf(m, " INSTDONE: 0x%08x\n",
415 ee->instdone.instdone);
417 if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
420 err_printf(m, " SC_INSTDONE: 0x%08x\n",
421 ee->instdone.slice_common);
423 if (INTEL_GEN(m->i915) <= 6)
426 for_each_instdone_slice_subslice(m->i915, slice, subslice)
427 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
429 ee->instdone.sampler[slice][subslice]);
431 for_each_instdone_slice_subslice(m->i915, slice, subslice)
432 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
434 ee->instdone.row[slice][subslice]);
437 static const char *bannable(const struct drm_i915_error_context *ctx)
439 return ctx->bannable ? "" : " (unbannable)";
442 static void error_print_request(struct drm_i915_error_state_buf *m,
444 const struct drm_i915_error_request *erq,
445 const unsigned long epoch)
450 err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
451 prefix, erq->pid, erq->ban_score,
452 erq->context, erq->seqno, erq->sched_attr.priority,
453 jiffies_to_msecs(erq->jiffies - epoch),
454 erq->start, erq->head, erq->tail);
457 static void error_print_context(struct drm_i915_error_state_buf *m,
459 const struct drm_i915_error_context *ctx)
461 err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n",
462 header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
463 ctx->sched_attr.priority, ctx->ban_score, bannable(ctx),
464 ctx->guilty, ctx->active);
467 static void error_print_engine(struct drm_i915_error_state_buf *m,
468 const struct drm_i915_error_engine *ee,
469 const unsigned long epoch)
473 err_printf(m, "%s command stream:\n",
474 engine_name(m->i915, ee->engine_id));
475 err_printf(m, " IDLE?: %s\n", yesno(ee->idle));
476 err_printf(m, " START: 0x%08x\n", ee->start);
477 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
478 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
479 ee->tail, ee->rq_post, ee->rq_tail);
480 err_printf(m, " CTL: 0x%08x\n", ee->ctl);
481 err_printf(m, " MODE: 0x%08x\n", ee->mode);
482 err_printf(m, " HWS: 0x%08x\n", ee->hws);
483 err_printf(m, " ACTHD: 0x%08x %08x\n",
484 (u32)(ee->acthd>>32), (u32)ee->acthd);
485 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
486 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
488 error_print_instdone(m, ee);
490 if (ee->batchbuffer) {
491 u64 start = ee->batchbuffer->gtt_offset;
492 u64 end = start + ee->batchbuffer->gtt_size;
494 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
495 upper_32_bits(start), lower_32_bits(start),
496 upper_32_bits(end), lower_32_bits(end));
498 if (INTEL_GEN(m->i915) >= 4) {
499 err_printf(m, " BBADDR: 0x%08x_%08x\n",
500 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
501 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
502 err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
504 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
505 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
506 lower_32_bits(ee->faddr));
507 if (INTEL_GEN(m->i915) >= 6) {
508 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
509 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
510 err_printf(m, " SYNC_0: 0x%08x\n",
511 ee->semaphore_mboxes[0]);
512 err_printf(m, " SYNC_1: 0x%08x\n",
513 ee->semaphore_mboxes[1]);
514 if (HAS_VEBOX(m->i915))
515 err_printf(m, " SYNC_2: 0x%08x\n",
516 ee->semaphore_mboxes[2]);
518 if (HAS_PPGTT(m->i915)) {
519 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
521 if (INTEL_GEN(m->i915) >= 8) {
523 for (i = 0; i < 4; i++)
524 err_printf(m, " PDP%d: 0x%016llx\n",
525 i, ee->vm_info.pdp[i]);
527 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
528 ee->vm_info.pp_dir_base);
531 err_printf(m, " seqno: 0x%08x\n", ee->seqno);
532 err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno);
533 err_printf(m, " waiting: %s\n", yesno(ee->waiting));
534 err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
535 err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
536 err_printf(m, " hangcheck stall: %s\n", yesno(ee->hangcheck_stalled));
537 err_printf(m, " hangcheck action: %s\n",
538 hangcheck_action_to_str(ee->hangcheck_action));
539 err_printf(m, " hangcheck action timestamp: %dms (%lu%s)\n",
540 jiffies_to_msecs(ee->hangcheck_timestamp - epoch),
541 ee->hangcheck_timestamp,
542 ee->hangcheck_timestamp == epoch ? "; epoch" : "");
543 err_printf(m, " engine reset count: %u\n", ee->reset_count);
545 for (n = 0; n < ee->num_ports; n++) {
546 err_printf(m, " ELSP[%d]:", n);
547 error_print_request(m, " ", &ee->execlist[n], epoch);
550 error_print_context(m, " Active context: ", &ee->context);
553 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
558 i915_error_vprintf(e, f, args);
562 static void print_error_obj(struct drm_i915_error_state_buf *m,
563 struct intel_engine_cs *engine,
565 struct drm_i915_error_object *obj)
567 char out[ASCII85_BUFSZ];
574 err_printf(m, "%s --- %s = 0x%08x %08x\n",
575 engine ? engine->name : "global", name,
576 upper_32_bits(obj->gtt_offset),
577 lower_32_bits(obj->gtt_offset));
580 err_compression_marker(m);
581 for (page = 0; page < obj->page_count; page++) {
585 if (page == obj->page_count - 1)
587 len = ascii85_encode_len(len);
589 for (i = 0; i < len; i++)
590 err_puts(m, ascii85_encode(obj->pages[page][i], out));
595 static void err_print_capabilities(struct drm_i915_error_state_buf *m,
596 const struct intel_device_info *info,
597 const struct intel_driver_caps *caps)
599 struct drm_printer p = i915_error_printer(m);
601 intel_device_info_dump_flags(info, &p);
602 intel_driver_caps_print(caps, &p);
603 intel_device_info_dump_topology(&info->sseu, &p);
606 static void err_print_params(struct drm_i915_error_state_buf *m,
607 const struct i915_params *params)
609 struct drm_printer p = i915_error_printer(m);
611 i915_params_dump(params, &p);
614 static void err_print_pciid(struct drm_i915_error_state_buf *m,
615 struct drm_i915_private *i915)
617 struct pci_dev *pdev = i915->drm.pdev;
619 err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
620 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
621 err_printf(m, "PCI Subsystem: %04x:%04x\n",
622 pdev->subsystem_vendor,
623 pdev->subsystem_device);
626 static void err_print_uc(struct drm_i915_error_state_buf *m,
627 const struct i915_error_uc *error_uc)
629 struct drm_printer p = i915_error_printer(m);
630 const struct i915_gpu_state *error =
631 container_of(error_uc, typeof(*error), uc);
633 if (!error->device_info.has_guc)
636 intel_uc_fw_dump(&error_uc->guc_fw, &p);
637 intel_uc_fw_dump(&error_uc->huc_fw, &p);
638 print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log);
641 static void err_free_sgl(struct scatterlist *sgl)
644 struct scatterlist *sg;
646 for (sg = sgl; !sg_is_chain(sg); sg++) {
652 sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
653 free_page((unsigned long)sgl);
658 static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
659 struct i915_gpu_state *error)
661 struct drm_i915_error_object *obj;
662 struct timespec64 ts;
665 if (*error->error_msg)
666 err_printf(m, "%s\n", error->error_msg);
667 err_printf(m, "Kernel: %s\n", init_utsname()->release);
668 ts = ktime_to_timespec64(error->time);
669 err_printf(m, "Time: %lld s %ld us\n",
670 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
671 ts = ktime_to_timespec64(error->boottime);
672 err_printf(m, "Boottime: %lld s %ld us\n",
673 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
674 ts = ktime_to_timespec64(error->uptime);
675 err_printf(m, "Uptime: %lld s %ld us\n",
676 (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
677 err_printf(m, "Epoch: %lu jiffies (%u HZ)\n", error->epoch, HZ);
678 err_printf(m, "Capture: %lu jiffies; %d ms ago, %d ms after epoch\n",
680 jiffies_to_msecs(jiffies - error->capture),
681 jiffies_to_msecs(error->capture - error->epoch));
683 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
684 if (error->engine[i].hangcheck_stalled &&
685 error->engine[i].context.pid) {
686 err_printf(m, "Active process (on ring %s): %s [%d], score %d%s\n",
687 engine_name(m->i915, i),
688 error->engine[i].context.comm,
689 error->engine[i].context.pid,
690 error->engine[i].context.ban_score,
691 bannable(&error->engine[i].context));
694 err_printf(m, "Reset count: %u\n", error->reset_count);
695 err_printf(m, "Suspend count: %u\n", error->suspend_count);
696 err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
697 err_print_pciid(m, m->i915);
699 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
701 if (HAS_CSR(m->i915)) {
702 struct intel_csr *csr = &m->i915->csr;
704 err_printf(m, "DMC loaded: %s\n",
705 yesno(csr->dmc_payload != NULL));
706 err_printf(m, "DMC fw version: %d.%d\n",
707 CSR_VERSION_MAJOR(csr->version),
708 CSR_VERSION_MINOR(csr->version));
711 err_printf(m, "GT awake: %s\n", yesno(error->awake));
712 err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
713 err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
714 err_printf(m, "EIR: 0x%08x\n", error->eir);
715 err_printf(m, "IER: 0x%08x\n", error->ier);
716 for (i = 0; i < error->ngtier; i++)
717 err_printf(m, "GTIER[%d]: 0x%08x\n", i, error->gtier[i]);
718 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
719 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
720 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
721 err_printf(m, "CCID: 0x%08x\n", error->ccid);
722 err_printf(m, "Missed interrupts: 0x%08lx\n",
723 m->i915->gpu_error.missed_irq_rings);
725 for (i = 0; i < error->nfence; i++)
726 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
728 if (INTEL_GEN(m->i915) >= 6) {
729 err_printf(m, "ERROR: 0x%08x\n", error->error);
731 if (INTEL_GEN(m->i915) >= 8)
732 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
733 error->fault_data1, error->fault_data0);
735 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
738 if (IS_GEN7(m->i915))
739 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
741 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
742 if (error->engine[i].engine_id != -1)
743 error_print_engine(m, &error->engine[i], error->epoch);
746 for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) {
750 if (!error->active_vm[i])
753 len = scnprintf(buf, sizeof(buf), "Active (");
754 for (j = 0; j < ARRAY_SIZE(error->engine); j++) {
755 if (error->engine[j].vm != error->active_vm[i])
758 len += scnprintf(buf + len, sizeof(buf), "%s%s",
760 m->i915->engine[j]->name);
763 scnprintf(buf + len, sizeof(buf), ")");
764 print_error_buffers(m, buf,
766 error->active_bo_count[i]);
769 print_error_buffers(m, "Pinned (global)",
771 error->pinned_bo_count);
773 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
774 const struct drm_i915_error_engine *ee = &error->engine[i];
776 obj = ee->batchbuffer;
778 err_puts(m, m->i915->engine[i]->name);
780 err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)",
785 ee->context.ban_score,
786 bannable(&ee->context));
787 err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
788 upper_32_bits(obj->gtt_offset),
789 lower_32_bits(obj->gtt_offset));
790 print_error_obj(m, m->i915->engine[i], NULL, obj);
793 for (j = 0; j < ee->user_bo_count; j++)
794 print_error_obj(m, m->i915->engine[i],
795 "user", ee->user_bo[j]);
797 if (ee->num_requests) {
798 err_printf(m, "%s --- %d requests\n",
799 m->i915->engine[i]->name,
801 for (j = 0; j < ee->num_requests; j++)
802 error_print_request(m, " ",
807 if (IS_ERR(ee->waiters)) {
808 err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
809 m->i915->engine[i]->name);
810 } else if (ee->num_waiters) {
811 err_printf(m, "%s --- %d waiters\n",
812 m->i915->engine[i]->name,
814 for (j = 0; j < ee->num_waiters; j++) {
815 err_printf(m, " seqno 0x%08x for %s [%d]\n",
816 ee->waiters[j].seqno,
822 print_error_obj(m, m->i915->engine[i],
823 "ringbuffer", ee->ringbuffer);
825 print_error_obj(m, m->i915->engine[i],
826 "HW Status", ee->hws_page);
828 print_error_obj(m, m->i915->engine[i],
829 "HW context", ee->ctx);
831 print_error_obj(m, m->i915->engine[i],
832 "WA context", ee->wa_ctx);
834 print_error_obj(m, m->i915->engine[i],
835 "WA batchbuffer", ee->wa_batchbuffer);
837 print_error_obj(m, m->i915->engine[i],
838 "NULL context", ee->default_state);
842 intel_overlay_print_error_state(m, error->overlay);
845 intel_display_print_error_state(m, error->display);
847 err_print_capabilities(m, &error->device_info, &error->driver_caps);
848 err_print_params(m, &error->params);
849 err_print_uc(m, &error->uc);
852 static int err_print_to_sgl(struct i915_gpu_state *error)
854 struct drm_i915_error_state_buf m;
857 return PTR_ERR(error);
859 if (READ_ONCE(error->sgl))
862 memset(&m, 0, sizeof(m));
863 m.i915 = error->i915;
865 __err_print_to_sgl(&m, error);
868 __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
873 GEM_BUG_ON(m.end < m.cur);
874 sg_mark_end(m.cur - 1);
876 GEM_BUG_ON(m.sgl && !m.cur);
883 if (cmpxchg(&error->sgl, NULL, m.sgl))
889 ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
890 char *buf, loff_t off, size_t rem)
892 struct scatterlist *sg;
900 err = err_print_to_sgl(error);
904 sg = READ_ONCE(error->fit);
905 if (!sg || off < sg->dma_address)
910 pos = sg->dma_address;
915 if (sg_is_chain(sg)) {
916 sg = sg_chain_ptr(sg);
917 GEM_BUG_ON(sg_is_chain(sg));
921 if (pos + len <= off) {
928 GEM_BUG_ON(off - pos > len);
935 GEM_BUG_ON(!len || len > sg->length);
937 memcpy(buf, page_address(sg_page(sg)) + start, len);
945 WRITE_ONCE(error->fit, sg);
948 } while (!sg_is_last(sg++));
953 static void i915_error_object_free(struct drm_i915_error_object *obj)
960 for (page = 0; page < obj->page_count; page++)
961 free_page((unsigned long)obj->pages[page]);
966 static __always_inline void free_param(const char *type, void *x)
968 if (!__builtin_strcmp(type, "char *"))
972 static void cleanup_params(struct i915_gpu_state *error)
974 #define FREE(T, x, ...) free_param(#T, &error->params.x);
975 I915_PARAMS_FOR_EACH(FREE);
979 static void cleanup_uc_state(struct i915_gpu_state *error)
981 struct i915_error_uc *error_uc = &error->uc;
983 kfree(error_uc->guc_fw.path);
984 kfree(error_uc->huc_fw.path);
985 i915_error_object_free(error_uc->guc_log);
988 void __i915_gpu_state_free(struct kref *error_ref)
990 struct i915_gpu_state *error =
991 container_of(error_ref, typeof(*error), ref);
994 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
995 struct drm_i915_error_engine *ee = &error->engine[i];
997 for (j = 0; j < ee->user_bo_count; j++)
998 i915_error_object_free(ee->user_bo[j]);
1001 i915_error_object_free(ee->batchbuffer);
1002 i915_error_object_free(ee->wa_batchbuffer);
1003 i915_error_object_free(ee->ringbuffer);
1004 i915_error_object_free(ee->hws_page);
1005 i915_error_object_free(ee->ctx);
1006 i915_error_object_free(ee->wa_ctx);
1008 kfree(ee->requests);
1009 if (!IS_ERR_OR_NULL(ee->waiters))
1013 for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
1014 kfree(error->active_bo[i]);
1015 kfree(error->pinned_bo);
1017 kfree(error->overlay);
1018 kfree(error->display);
1020 cleanup_params(error);
1021 cleanup_uc_state(error);
1023 err_free_sgl(error->sgl);
1027 static struct drm_i915_error_object *
1028 i915_error_object_create(struct drm_i915_private *i915,
1029 struct i915_vma *vma)
1031 struct i915_ggtt *ggtt = &i915->ggtt;
1032 const u64 slot = ggtt->error_capture.start;
1033 struct drm_i915_error_object *dst;
1034 struct compress compress;
1035 unsigned long num_pages;
1036 struct sgt_iter iter;
1043 num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
1044 num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
1045 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
1046 GFP_ATOMIC | __GFP_NOWARN);
1050 dst->gtt_offset = vma->node.start;
1051 dst->gtt_size = vma->node.size;
1052 dst->num_pages = num_pages;
1053 dst->page_count = 0;
1056 if (!compress_init(&compress)) {
1062 for_each_sgt_dma(dma, iter, vma->pages) {
1065 ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
1067 s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
1068 ret = compress_page(&compress, (void __force *)s, dst);
1069 io_mapping_unmap_atomic(s);
1074 if (ret || compress_flush(&compress, dst)) {
1075 while (dst->page_count--)
1076 free_page((unsigned long)dst->pages[dst->page_count]);
1081 compress_fini(&compress, dst);
1085 /* The error capture is special as tries to run underneath the normal
1086 * locking rules - so we use the raw version of the i915_gem_active lookup.
1088 static inline uint32_t
1089 __active_get_seqno(struct i915_gem_active *active)
1091 struct i915_request *request;
1093 request = __i915_gem_active_peek(active);
1094 return request ? request->global_seqno : 0;
1098 __active_get_engine_id(struct i915_gem_active *active)
1100 struct i915_request *request;
1102 request = __i915_gem_active_peek(active);
1103 return request ? request->engine->id : -1;
1106 static void capture_bo(struct drm_i915_error_buffer *err,
1107 struct i915_vma *vma)
1109 struct drm_i915_gem_object *obj = vma->obj;
1111 err->size = obj->base.size;
1112 err->name = obj->base.name;
1114 err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
1115 err->engine = __active_get_engine_id(&obj->frontbuffer_write);
1117 err->gtt_offset = vma->node.start;
1118 err->read_domains = obj->read_domains;
1119 err->write_domain = obj->write_domain;
1120 err->fence_reg = vma->fence ? vma->fence->id : -1;
1121 err->tiling = i915_gem_object_get_tiling(obj);
1122 err->dirty = obj->mm.dirty;
1123 err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
1124 err->userptr = obj->userptr.mm != NULL;
1125 err->cache_level = obj->cache_level;
1128 static u32 capture_error_bo(struct drm_i915_error_buffer *err,
1129 int count, struct list_head *head,
1132 struct i915_vma *vma;
1135 list_for_each_entry(vma, head, vm_link) {
1139 if (pinned_only && !i915_vma_is_pinned(vma))
1142 capture_bo(err++, vma);
1150 /* Generate a semi-unique error code. The code is not meant to have meaning, The
1151 * code's only purpose is to try to prevent false duplicated bug reports by
1152 * grossly estimating a GPU error state.
1154 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
1155 * the hang if we could strip the GTT offset information from it.
1157 * It's only a small step better than a random number in its current form.
1159 static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
1160 struct i915_gpu_state *error,
1163 uint32_t error_code = 0;
1166 /* IPEHR would be an ideal way to detect errors, as it's the gross
1167 * measure of "the command that hung." However, has some very common
1168 * synchronization commands which almost always appear in the case
1169 * strictly a client bug. Use instdone to differentiate those some.
1171 for (i = 0; i < I915_NUM_ENGINES; i++) {
1172 if (error->engine[i].hangcheck_stalled) {
1176 return error->engine[i].ipehr ^
1177 error->engine[i].instdone.instdone;
1184 static void gem_record_fences(struct i915_gpu_state *error)
1186 struct drm_i915_private *dev_priv = error->i915;
1189 if (INTEL_GEN(dev_priv) >= 6) {
1190 for (i = 0; i < dev_priv->num_fence_regs; i++)
1191 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
1192 } else if (INTEL_GEN(dev_priv) >= 4) {
1193 for (i = 0; i < dev_priv->num_fence_regs; i++)
1194 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
1196 for (i = 0; i < dev_priv->num_fence_regs; i++)
1197 error->fence[i] = I915_READ(FENCE_REG(i));
1202 static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
1203 struct drm_i915_error_engine *ee)
1205 struct drm_i915_private *dev_priv = engine->i915;
1207 ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
1208 ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
1209 if (HAS_VEBOX(dev_priv))
1210 ee->semaphore_mboxes[2] =
1211 I915_READ(RING_SYNC_2(engine->mmio_base));
1214 static void error_record_engine_waiters(struct intel_engine_cs *engine,
1215 struct drm_i915_error_engine *ee)
1217 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1218 struct drm_i915_error_waiter *waiter;
1222 ee->num_waiters = 0;
1225 if (RB_EMPTY_ROOT(&b->waiters))
1228 if (!spin_trylock_irq(&b->rb_lock)) {
1229 ee->waiters = ERR_PTR(-EDEADLK);
1234 for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
1236 spin_unlock_irq(&b->rb_lock);
1240 waiter = kmalloc_array(count,
1241 sizeof(struct drm_i915_error_waiter),
1246 if (!spin_trylock_irq(&b->rb_lock)) {
1248 ee->waiters = ERR_PTR(-EDEADLK);
1252 ee->waiters = waiter;
1253 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1254 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1256 strcpy(waiter->comm, w->tsk->comm);
1257 waiter->pid = w->tsk->pid;
1258 waiter->seqno = w->seqno;
1261 if (++ee->num_waiters == count)
1264 spin_unlock_irq(&b->rb_lock);
1267 static void error_record_engine_registers(struct i915_gpu_state *error,
1268 struct intel_engine_cs *engine,
1269 struct drm_i915_error_engine *ee)
1271 struct drm_i915_private *dev_priv = engine->i915;
1273 if (INTEL_GEN(dev_priv) >= 6) {
1274 ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
1275 if (INTEL_GEN(dev_priv) >= 8) {
1276 ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
1278 gen6_record_semaphore_state(engine, ee);
1279 ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
1283 if (INTEL_GEN(dev_priv) >= 4) {
1284 ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
1285 ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
1286 ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
1287 ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
1288 ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
1289 if (INTEL_GEN(dev_priv) >= 8) {
1290 ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
1291 ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
1293 ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
1295 ee->faddr = I915_READ(DMA_FADD_I8XX);
1296 ee->ipeir = I915_READ(IPEIR);
1297 ee->ipehr = I915_READ(IPEHR);
1300 intel_engine_get_instdone(engine, &ee->instdone);
1302 ee->waiting = intel_engine_has_waiter(engine);
1303 ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
1304 ee->acthd = intel_engine_get_active_head(engine);
1305 ee->seqno = intel_engine_get_seqno(engine);
1306 ee->last_seqno = intel_engine_last_submit(engine);
1307 ee->start = I915_READ_START(engine);
1308 ee->head = I915_READ_HEAD(engine);
1309 ee->tail = I915_READ_TAIL(engine);
1310 ee->ctl = I915_READ_CTL(engine);
1311 if (INTEL_GEN(dev_priv) > 2)
1312 ee->mode = I915_READ_MODE(engine);
1314 if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
1317 if (IS_GEN7(dev_priv)) {
1318 switch (engine->id) {
1321 mmio = RENDER_HWS_PGA_GEN7;
1324 mmio = BLT_HWS_PGA_GEN7;
1327 mmio = BSD_HWS_PGA_GEN7;
1330 mmio = VEBOX_HWS_PGA_GEN7;
1333 } else if (IS_GEN6(engine->i915)) {
1334 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1336 /* XXX: gen8 returns to sanity */
1337 mmio = RING_HWS_PGA(engine->mmio_base);
1340 ee->hws = I915_READ(mmio);
1343 ee->idle = intel_engine_is_idle(engine);
1344 ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
1345 ee->hangcheck_action = engine->hangcheck.action;
1346 ee->hangcheck_stalled = engine->hangcheck.stalled;
1347 ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
1350 if (HAS_PPGTT(dev_priv)) {
1353 ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
1355 if (IS_GEN6(dev_priv))
1356 ee->vm_info.pp_dir_base =
1357 I915_READ(RING_PP_DIR_BASE_READ(engine));
1358 else if (IS_GEN7(dev_priv))
1359 ee->vm_info.pp_dir_base =
1360 I915_READ(RING_PP_DIR_BASE(engine));
1361 else if (INTEL_GEN(dev_priv) >= 8)
1362 for (i = 0; i < 4; i++) {
1363 ee->vm_info.pdp[i] =
1364 I915_READ(GEN8_RING_PDP_UDW(engine, i));
1365 ee->vm_info.pdp[i] <<= 32;
1366 ee->vm_info.pdp[i] |=
1367 I915_READ(GEN8_RING_PDP_LDW(engine, i));
1372 static void record_request(struct i915_request *request,
1373 struct drm_i915_error_request *erq)
1375 struct i915_gem_context *ctx = request->gem_context;
1377 erq->context = ctx->hw_id;
1378 erq->sched_attr = request->sched.attr;
1379 erq->ban_score = atomic_read(&ctx->ban_score);
1380 erq->seqno = request->global_seqno;
1381 erq->jiffies = request->emitted_jiffies;
1382 erq->start = i915_ggtt_offset(request->ring->vma);
1383 erq->head = request->head;
1384 erq->tail = request->tail;
1387 erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
1391 static void engine_record_requests(struct intel_engine_cs *engine,
1392 struct i915_request *first,
1393 struct drm_i915_error_engine *ee)
1395 struct i915_request *request;
1400 list_for_each_entry_from(request, &engine->timeline.requests, link)
1405 ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC);
1409 ee->num_requests = count;
1413 list_for_each_entry_from(request, &engine->timeline.requests, link) {
1414 if (count >= ee->num_requests) {
1416 * If the ring request list was changed in
1417 * between the point where the error request
1418 * list was created and dimensioned and this
1419 * point then just exit early to avoid crashes.
1421 * We don't need to communicate that the
1422 * request list changed state during error
1423 * state capture and that the error state is
1424 * slightly incorrect as a consequence since we
1425 * are typically only interested in the request
1426 * list state at the point of error state
1427 * capture, not in any changes happening during
1433 record_request(request, &ee->requests[count++]);
1435 ee->num_requests = count;
1438 static void error_record_engine_execlists(struct intel_engine_cs *engine,
1439 struct drm_i915_error_engine *ee)
1441 const struct intel_engine_execlists * const execlists = &engine->execlists;
1444 for (n = 0; n < execlists_num_ports(execlists); n++) {
1445 struct i915_request *rq = port_request(&execlists->port[n]);
1450 record_request(rq, &ee->execlist[n]);
1456 static void record_context(struct drm_i915_error_context *e,
1457 struct i915_gem_context *ctx)
1460 struct task_struct *task;
1463 task = pid_task(ctx->pid, PIDTYPE_PID);
1465 strcpy(e->comm, task->comm);
1471 e->handle = ctx->user_handle;
1472 e->hw_id = ctx->hw_id;
1473 e->sched_attr = ctx->sched;
1474 e->ban_score = atomic_read(&ctx->ban_score);
1475 e->bannable = i915_gem_context_is_bannable(ctx);
1476 e->guilty = atomic_read(&ctx->guilty_count);
1477 e->active = atomic_read(&ctx->active_count);
1480 static void request_record_user_bo(struct i915_request *request,
1481 struct drm_i915_error_engine *ee)
1483 struct i915_capture_list *c;
1484 struct drm_i915_error_object **bo;
1488 for (c = request->capture_list; c; c = c->next)
1493 bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC);
1495 /* If we can't capture everything, try to capture something. */
1496 max = min_t(long, max, PAGE_SIZE / sizeof(*bo));
1497 bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC);
1503 for (c = request->capture_list; c; c = c->next) {
1504 bo[count] = i915_error_object_create(request->i915, c->vma);
1512 ee->user_bo_count = count;
1515 static struct drm_i915_error_object *
1516 capture_object(struct drm_i915_private *dev_priv,
1517 struct drm_i915_gem_object *obj)
1519 if (obj && i915_gem_object_has_pages(obj)) {
1520 struct i915_vma fake = {
1521 .node = { .start = U64_MAX, .size = obj->base.size },
1522 .size = obj->base.size,
1523 .pages = obj->mm.pages,
1527 return i915_error_object_create(dev_priv, &fake);
1533 static void gem_record_rings(struct i915_gpu_state *error)
1535 struct drm_i915_private *i915 = error->i915;
1536 struct i915_ggtt *ggtt = &i915->ggtt;
1539 for (i = 0; i < I915_NUM_ENGINES; i++) {
1540 struct intel_engine_cs *engine = i915->engine[i];
1541 struct drm_i915_error_engine *ee = &error->engine[i];
1542 struct i915_request *request;
1551 error_record_engine_registers(error, engine, ee);
1552 error_record_engine_waiters(engine, ee);
1553 error_record_engine_execlists(engine, ee);
1555 request = i915_gem_find_active_request(engine);
1557 struct i915_gem_context *ctx = request->gem_context;
1558 struct intel_ring *ring;
1560 ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm;
1562 record_context(&ee->context, ctx);
1564 /* We need to copy these to an anonymous buffer
1565 * as the simplest method to avoid being overwritten
1569 i915_error_object_create(i915, request->batch);
1571 if (HAS_BROKEN_CS_TLB(i915))
1572 ee->wa_batchbuffer =
1573 i915_error_object_create(i915,
1575 request_record_user_bo(request, ee);
1578 i915_error_object_create(i915,
1579 request->hw_context->state);
1582 i915_gem_context_no_error_capture(ctx);
1584 ee->rq_head = request->head;
1585 ee->rq_post = request->postfix;
1586 ee->rq_tail = request->tail;
1588 ring = request->ring;
1589 ee->cpu_ring_head = ring->head;
1590 ee->cpu_ring_tail = ring->tail;
1592 i915_error_object_create(i915, ring->vma);
1594 engine_record_requests(engine, request, ee);
1598 i915_error_object_create(i915,
1599 engine->status_page.vma);
1601 ee->wa_ctx = i915_error_object_create(i915, engine->wa_ctx.vma);
1603 ee->default_state = capture_object(i915, engine->default_state);
1607 static void gem_capture_vm(struct i915_gpu_state *error,
1608 struct i915_address_space *vm,
1611 struct drm_i915_error_buffer *active_bo;
1612 struct i915_vma *vma;
1616 list_for_each_entry(vma, &vm->active_list, vm_link)
1621 active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
1623 count = capture_error_bo(active_bo, count, &vm->active_list, false);
1627 error->active_vm[idx] = vm;
1628 error->active_bo[idx] = active_bo;
1629 error->active_bo_count[idx] = count;
1632 static void capture_active_buffers(struct i915_gpu_state *error)
1636 BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo));
1637 BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm));
1638 BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count));
1640 /* Scan each engine looking for unique active contexts/vm */
1641 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
1642 struct drm_i915_error_engine *ee = &error->engine[i];
1649 for (j = 0; j < i && !found; j++)
1650 found = error->engine[j].vm == ee->vm;
1652 gem_capture_vm(error, ee->vm, cnt++);
1656 static void capture_pinned_buffers(struct i915_gpu_state *error)
1658 struct i915_address_space *vm = &error->i915->ggtt.vm;
1659 struct drm_i915_error_buffer *bo;
1660 struct i915_vma *vma;
1661 int count_inactive, count_active;
1664 list_for_each_entry(vma, &vm->inactive_list, vm_link)
1668 list_for_each_entry(vma, &vm->active_list, vm_link)
1672 if (count_inactive + count_active)
1673 bo = kcalloc(count_inactive + count_active,
1674 sizeof(*bo), GFP_ATOMIC);
1678 count_inactive = capture_error_bo(bo, count_inactive,
1679 &vm->active_list, true);
1680 count_active = capture_error_bo(bo + count_inactive, count_active,
1681 &vm->inactive_list, true);
1682 error->pinned_bo_count = count_inactive + count_active;
1683 error->pinned_bo = bo;
1686 static void capture_uc_state(struct i915_gpu_state *error)
1688 struct drm_i915_private *i915 = error->i915;
1689 struct i915_error_uc *error_uc = &error->uc;
1691 /* Capturing uC state won't be useful if there is no GuC */
1692 if (!error->device_info.has_guc)
1695 error_uc->guc_fw = i915->guc.fw;
1696 error_uc->huc_fw = i915->huc.fw;
1698 /* Non-default firmware paths will be specified by the modparam.
1699 * As modparams are generally accesible from the userspace make
1700 * explicit copies of the firmware paths.
1702 error_uc->guc_fw.path = kstrdup(i915->guc.fw.path, GFP_ATOMIC);
1703 error_uc->huc_fw.path = kstrdup(i915->huc.fw.path, GFP_ATOMIC);
1704 error_uc->guc_log = i915_error_object_create(i915, i915->guc.log.vma);
1707 /* Capture all registers which don't fit into another category. */
1708 static void capture_reg_state(struct i915_gpu_state *error)
1710 struct drm_i915_private *dev_priv = error->i915;
1713 /* General organization
1714 * 1. Registers specific to a single generation
1715 * 2. Registers which belong to multiple generations
1716 * 3. Feature specific registers.
1717 * 4. Everything else
1718 * Please try to follow the order.
1721 /* 1: Registers specific to a single generation */
1722 if (IS_VALLEYVIEW(dev_priv)) {
1723 error->gtier[0] = I915_READ(GTIER);
1724 error->ier = I915_READ(VLV_IER);
1725 error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
1728 if (IS_GEN7(dev_priv))
1729 error->err_int = I915_READ(GEN7_ERR_INT);
1731 if (INTEL_GEN(dev_priv) >= 8) {
1732 error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
1733 error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
1736 if (IS_GEN6(dev_priv)) {
1737 error->forcewake = I915_READ_FW(FORCEWAKE);
1738 error->gab_ctl = I915_READ(GAB_CTL);
1739 error->gfx_mode = I915_READ(GFX_MODE);
1742 /* 2: Registers which belong to multiple generations */
1743 if (INTEL_GEN(dev_priv) >= 7)
1744 error->forcewake = I915_READ_FW(FORCEWAKE_MT);
1746 if (INTEL_GEN(dev_priv) >= 6) {
1747 error->derrmr = I915_READ(DERRMR);
1748 error->error = I915_READ(ERROR_GEN6);
1749 error->done_reg = I915_READ(DONE_REG);
1752 if (INTEL_GEN(dev_priv) >= 5)
1753 error->ccid = I915_READ(CCID);
1755 /* 3: Feature specific registers */
1756 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
1757 error->gam_ecochk = I915_READ(GAM_ECOCHK);
1758 error->gac_eco = I915_READ(GAC_ECO_BITS);
1761 /* 4: Everything else */
1762 if (INTEL_GEN(dev_priv) >= 11) {
1763 error->ier = I915_READ(GEN8_DE_MISC_IER);
1764 error->gtier[0] = I915_READ(GEN11_RENDER_COPY_INTR_ENABLE);
1765 error->gtier[1] = I915_READ(GEN11_VCS_VECS_INTR_ENABLE);
1766 error->gtier[2] = I915_READ(GEN11_GUC_SG_INTR_ENABLE);
1767 error->gtier[3] = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1768 error->gtier[4] = I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE);
1769 error->gtier[5] = I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE);
1771 } else if (INTEL_GEN(dev_priv) >= 8) {
1772 error->ier = I915_READ(GEN8_DE_MISC_IER);
1773 for (i = 0; i < 4; i++)
1774 error->gtier[i] = I915_READ(GEN8_GT_IER(i));
1776 } else if (HAS_PCH_SPLIT(dev_priv)) {
1777 error->ier = I915_READ(DEIER);
1778 error->gtier[0] = I915_READ(GTIER);
1780 } else if (IS_GEN2(dev_priv)) {
1781 error->ier = I915_READ16(IER);
1782 } else if (!IS_VALLEYVIEW(dev_priv)) {
1783 error->ier = I915_READ(IER);
1785 error->eir = I915_READ(EIR);
1786 error->pgtbl_er = I915_READ(PGTBL_ER);
1789 static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
1790 struct i915_gpu_state *error,
1792 const char *error_msg)
1795 int engine_id = -1, len;
1797 ecode = i915_error_generate_code(dev_priv, error, &engine_id);
1799 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1800 "GPU HANG: ecode %d:%d:0x%08x",
1801 INTEL_GEN(dev_priv), engine_id, ecode);
1803 if (engine_id != -1 && error->engine[engine_id].context.pid)
1804 len += scnprintf(error->error_msg + len,
1805 sizeof(error->error_msg) - len,
1807 error->engine[engine_id].context.comm,
1808 error->engine[engine_id].context.pid);
1810 scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
1811 ", reason: %s, action: %s",
1813 engine_mask ? "reset" : "continue");
1816 static void capture_gen_state(struct i915_gpu_state *error)
1818 struct drm_i915_private *i915 = error->i915;
1820 error->awake = i915->gt.awake;
1821 error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count);
1822 error->suspended = i915->runtime_pm.suspended;
1825 #ifdef CONFIG_INTEL_IOMMU
1826 error->iommu = intel_iommu_gfx_mapped;
1828 error->reset_count = i915_reset_count(&i915->gpu_error);
1829 error->suspend_count = i915->suspend_count;
1831 memcpy(&error->device_info,
1833 sizeof(error->device_info));
1834 error->driver_caps = i915->caps;
1837 static __always_inline void dup_param(const char *type, void *x)
1839 if (!__builtin_strcmp(type, "char *"))
1840 *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
1843 static void capture_params(struct i915_gpu_state *error)
1845 error->params = i915_modparams;
1846 #define DUP(T, x, ...) dup_param(#T, &error->params.x);
1847 I915_PARAMS_FOR_EACH(DUP);
1851 static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
1853 unsigned long epoch = error->capture;
1856 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
1857 const struct drm_i915_error_engine *ee = &error->engine[i];
1859 if (ee->hangcheck_stalled &&
1860 time_before(ee->hangcheck_timestamp, epoch))
1861 epoch = ee->hangcheck_timestamp;
1867 static void capture_finish(struct i915_gpu_state *error)
1869 struct i915_ggtt *ggtt = &error->i915->ggtt;
1870 const u64 slot = ggtt->error_capture.start;
1872 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
1875 static int capture(void *data)
1877 struct i915_gpu_state *error = data;
1879 error->time = ktime_get_real();
1880 error->boottime = ktime_get_boottime();
1881 error->uptime = ktime_sub(ktime_get(),
1882 error->i915->gt.last_init_time);
1883 error->capture = jiffies;
1885 capture_params(error);
1886 capture_gen_state(error);
1887 capture_uc_state(error);
1888 capture_reg_state(error);
1889 gem_record_fences(error);
1890 gem_record_rings(error);
1891 capture_active_buffers(error);
1892 capture_pinned_buffers(error);
1894 error->overlay = intel_overlay_capture_error_state(error->i915);
1895 error->display = intel_display_capture_error_state(error->i915);
1897 error->epoch = capture_find_epoch(error);
1899 capture_finish(error);
1903 #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
1905 struct i915_gpu_state *
1906 i915_capture_gpu_state(struct drm_i915_private *i915)
1908 struct i915_gpu_state *error;
1910 /* Check if GPU capture has been disabled */
1911 error = READ_ONCE(i915->gpu_error.first_error);
1915 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1917 i915_disable_error_state(i915, -ENOMEM);
1918 return ERR_PTR(-ENOMEM);
1921 kref_init(&error->ref);
1924 stop_machine(capture, error, NULL);
1930 * i915_capture_error_state - capture an error record for later analysis
1931 * @i915: i915 device
1932 * @engine_mask: the mask of engines triggering the hang
1933 * @error_msg: a message to insert into the error capture header
1935 * Should be called when an error is detected (either a hang or an error
1936 * interrupt) to capture error state from the time of the error. Fills
1937 * out a structure which becomes available in debugfs for user level tools
1940 void i915_capture_error_state(struct drm_i915_private *i915,
1942 const char *error_msg)
1945 struct i915_gpu_state *error;
1946 unsigned long flags;
1948 if (!i915_modparams.error_capture)
1951 if (READ_ONCE(i915->gpu_error.first_error))
1954 error = i915_capture_gpu_state(i915);
1958 i915_error_capture_msg(i915, error, engine_mask, error_msg);
1959 DRM_INFO("%s\n", error->error_msg);
1961 if (!error->simulated) {
1962 spin_lock_irqsave(&i915->gpu_error.lock, flags);
1963 if (!i915->gpu_error.first_error) {
1964 i915->gpu_error.first_error = error;
1967 spin_unlock_irqrestore(&i915->gpu_error.lock, flags);
1971 __i915_gpu_state_free(&error->ref);
1976 ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
1977 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1978 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1979 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1980 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1981 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1982 i915->drm.primary->index);
1987 struct i915_gpu_state *
1988 i915_first_error_state(struct drm_i915_private *i915)
1990 struct i915_gpu_state *error;
1992 spin_lock_irq(&i915->gpu_error.lock);
1993 error = i915->gpu_error.first_error;
1994 if (!IS_ERR_OR_NULL(error))
1995 i915_gpu_state_get(error);
1996 spin_unlock_irq(&i915->gpu_error.lock);
2001 void i915_reset_error_state(struct drm_i915_private *i915)
2003 struct i915_gpu_state *error;
2005 spin_lock_irq(&i915->gpu_error.lock);
2006 error = i915->gpu_error.first_error;
2007 if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
2008 i915->gpu_error.first_error = NULL;
2009 spin_unlock_irq(&i915->gpu_error.lock);
2011 if (!IS_ERR_OR_NULL(error))
2012 i915_gpu_state_put(error);
2015 void i915_disable_error_state(struct drm_i915_private *i915, int err)
2017 spin_lock_irq(&i915->gpu_error.lock);
2018 if (!i915->gpu_error.first_error)
2019 i915->gpu_error.first_error = ERR_PTR(err);
2020 spin_unlock_irq(&i915->gpu_error.lock);