2 * Copyright (c) 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
30 #include <generated/utsrelease.h>
31 #include <linux/stop_machine.h>
32 #include <linux/zlib.h>
35 static const char *engine_str(int engine)
38 case RCS: return "render";
39 case VCS: return "bsd";
40 case BCS: return "blt";
41 case VECS: return "vebox";
42 case VCS2: return "bsd2";
47 static const char *tiling_flag(int tiling)
51 case I915_TILING_NONE: return "";
52 case I915_TILING_X: return " X";
53 case I915_TILING_Y: return " Y";
57 static const char *dirty_flag(int dirty)
59 return dirty ? " dirty" : "";
62 static const char *purgeable_flag(int purgeable)
64 return purgeable ? " purgeable" : "";
67 static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
70 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
75 if (e->bytes == e->size - 1 || e->err)
81 static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
84 if (e->pos + len <= e->start) {
89 /* First vsnprintf needs to fit in its entirety for memmove */
98 static void __i915_error_advance(struct drm_i915_error_state_buf *e,
101 /* If this is first printf in this window, adjust it so that
102 * start position matches start of the buffer
105 if (e->pos < e->start) {
106 const size_t off = e->start - e->pos;
108 /* Should not happen but be paranoid */
109 if (off > len || e->bytes) {
114 memmove(e->buf, e->buf + off, len - off);
115 e->bytes = len - off;
124 static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
125 const char *f, va_list args)
129 if (!__i915_error_ok(e))
132 /* Seek the first printf which is hits start position */
133 if (e->pos < e->start) {
137 len = vsnprintf(NULL, 0, f, tmp);
140 if (!__i915_error_seek(e, len))
144 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
145 if (len >= e->size - e->bytes)
146 len = e->size - e->bytes - 1;
148 __i915_error_advance(e, len);
151 static void i915_error_puts(struct drm_i915_error_state_buf *e,
156 if (!__i915_error_ok(e))
161 /* Seek the first printf which is hits start position */
162 if (e->pos < e->start) {
163 if (!__i915_error_seek(e, len))
167 if (len >= e->size - e->bytes)
168 len = e->size - e->bytes - 1;
169 memcpy(e->buf + e->bytes, str, len);
171 __i915_error_advance(e, len);
174 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
175 #define err_puts(e, s) i915_error_puts(e, s)
177 #ifdef CONFIG_DRM_I915_COMPRESS_ERROR
179 static bool compress_init(struct z_stream_s *zstream)
181 memset(zstream, 0, sizeof(*zstream));
184 kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
185 GFP_ATOMIC | __GFP_NOWARN);
186 if (!zstream->workspace)
189 if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
190 kfree(zstream->workspace);
197 static int compress_page(struct z_stream_s *zstream,
199 struct drm_i915_error_object *dst)
201 zstream->next_in = src;
202 zstream->avail_in = PAGE_SIZE;
205 if (zstream->avail_out == 0) {
208 page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
212 dst->pages[dst->page_count++] = (void *)page;
214 zstream->next_out = (void *)page;
215 zstream->avail_out = PAGE_SIZE;
218 if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
220 } while (zstream->avail_in);
222 /* Fallback to uncompressed if we increase size? */
223 if (0 && zstream->total_out > zstream->total_in)
229 static void compress_fini(struct z_stream_s *zstream,
230 struct drm_i915_error_object *dst)
233 zlib_deflate(zstream, Z_FINISH);
234 dst->unused = zstream->avail_out;
237 zlib_deflateEnd(zstream);
238 kfree(zstream->workspace);
241 static void err_compression_marker(struct drm_i915_error_state_buf *m)
248 static bool compress_init(struct z_stream_s *zstream)
253 static int compress_page(struct z_stream_s *zstream,
255 struct drm_i915_error_object *dst)
259 page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
263 dst->pages[dst->page_count++] =
264 memcpy((void *)page, src, PAGE_SIZE);
269 static void compress_fini(struct z_stream_s *zstream,
270 struct drm_i915_error_object *dst)
274 static void err_compression_marker(struct drm_i915_error_state_buf *m)
281 static void print_error_buffers(struct drm_i915_error_state_buf *m,
283 struct drm_i915_error_buffer *err,
288 err_printf(m, "%s [%d]:\n", name, count);
291 err_printf(m, " %08x_%08x %8u %02x %02x [ ",
292 upper_32_bits(err->gtt_offset),
293 lower_32_bits(err->gtt_offset),
297 for (i = 0; i < I915_NUM_ENGINES; i++)
298 err_printf(m, "%02x ", err->rseqno[i]);
300 err_printf(m, "] %02x", err->wseqno);
301 err_puts(m, tiling_flag(err->tiling));
302 err_puts(m, dirty_flag(err->dirty));
303 err_puts(m, purgeable_flag(err->purgeable));
304 err_puts(m, err->userptr ? " userptr" : "");
305 err_puts(m, err->engine != -1 ? " " : "");
306 err_puts(m, engine_str(err->engine));
307 err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
310 err_printf(m, " (name: %d)", err->name);
311 if (err->fence_reg != I915_FENCE_REG_NONE)
312 err_printf(m, " (fence: %d)", err->fence_reg);
319 static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
326 case HANGCHECK_ACTIVE:
337 static void error_print_instdone(struct drm_i915_error_state_buf *m,
338 struct drm_i915_error_engine *ee)
343 err_printf(m, " INSTDONE: 0x%08x\n",
344 ee->instdone.instdone);
346 if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
349 err_printf(m, " SC_INSTDONE: 0x%08x\n",
350 ee->instdone.slice_common);
352 if (INTEL_GEN(m->i915) <= 6)
355 for_each_instdone_slice_subslice(m->i915, slice, subslice)
356 err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
358 ee->instdone.sampler[slice][subslice]);
360 for_each_instdone_slice_subslice(m->i915, slice, subslice)
361 err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
363 ee->instdone.row[slice][subslice]);
366 static void error_print_request(struct drm_i915_error_state_buf *m,
368 struct drm_i915_error_request *erq)
373 err_printf(m, "%s pid %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
375 erq->context, erq->seqno,
376 jiffies_to_msecs(jiffies - erq->jiffies),
377 erq->head, erq->tail);
380 static void error_print_engine(struct drm_i915_error_state_buf *m,
381 struct drm_i915_error_engine *ee)
383 err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
384 err_printf(m, " START: 0x%08x\n", ee->start);
385 err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
386 err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
387 ee->tail, ee->rq_post, ee->rq_tail);
388 err_printf(m, " CTL: 0x%08x\n", ee->ctl);
389 err_printf(m, " MODE: 0x%08x\n", ee->mode);
390 err_printf(m, " HWS: 0x%08x\n", ee->hws);
391 err_printf(m, " ACTHD: 0x%08x %08x\n",
392 (u32)(ee->acthd>>32), (u32)ee->acthd);
393 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
394 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
396 error_print_instdone(m, ee);
398 if (ee->batchbuffer) {
399 u64 start = ee->batchbuffer->gtt_offset;
400 u64 end = start + ee->batchbuffer->gtt_size;
402 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
403 upper_32_bits(start), lower_32_bits(start),
404 upper_32_bits(end), lower_32_bits(end));
406 if (INTEL_GEN(m->i915) >= 4) {
407 err_printf(m, " BBADDR: 0x%08x_%08x\n",
408 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
409 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
410 err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
412 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
413 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
414 lower_32_bits(ee->faddr));
415 if (INTEL_GEN(m->i915) >= 6) {
416 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
417 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
418 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
419 ee->semaphore_mboxes[0],
420 ee->semaphore_seqno[0]);
421 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
422 ee->semaphore_mboxes[1],
423 ee->semaphore_seqno[1]);
424 if (HAS_VEBOX(m->i915)) {
425 err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
426 ee->semaphore_mboxes[2],
427 ee->semaphore_seqno[2]);
430 if (USES_PPGTT(m->i915)) {
431 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
433 if (INTEL_GEN(m->i915) >= 8) {
435 for (i = 0; i < 4; i++)
436 err_printf(m, " PDP%d: 0x%016llx\n",
437 i, ee->vm_info.pdp[i]);
439 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
440 ee->vm_info.pp_dir_base);
443 err_printf(m, " seqno: 0x%08x\n", ee->seqno);
444 err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno);
445 err_printf(m, " waiting: %s\n", yesno(ee->waiting));
446 err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
447 err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
448 err_printf(m, " hangcheck: %s [%d]\n",
449 hangcheck_action_to_str(ee->hangcheck_action),
450 ee->hangcheck_score);
451 error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
452 error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
455 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
460 i915_error_vprintf(e, f, args);
465 ascii85_encode_len(int len)
467 return DIV_ROUND_UP(len, 4);
471 ascii85_encode(u32 in, char *out)
480 out[i] = '!' + in % 85;
487 static void print_error_obj(struct drm_i915_error_state_buf *m,
488 struct intel_engine_cs *engine,
490 struct drm_i915_error_object *obj)
499 err_printf(m, "%s --- %s = 0x%08x %08x\n",
500 engine ? engine->name : "global", name,
501 upper_32_bits(obj->gtt_offset),
502 lower_32_bits(obj->gtt_offset));
505 err_compression_marker(m);
506 for (page = 0; page < obj->page_count; page++) {
510 if (page == obj->page_count - 1)
512 len = ascii85_encode_len(len);
514 for (i = 0; i < len; i++) {
515 if (ascii85_encode(obj->pages[page][i], out))
524 static void err_print_capabilities(struct drm_i915_error_state_buf *m,
525 const struct intel_device_info *info)
527 #define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x))
528 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
532 int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
533 const struct i915_error_state_file_priv *error_priv)
535 struct drm_device *dev = error_priv->dev;
536 struct drm_i915_private *dev_priv = to_i915(dev);
537 struct pci_dev *pdev = dev_priv->drm.pdev;
538 struct drm_i915_error_state *error = error_priv->error;
539 struct drm_i915_error_object *obj;
540 int max_hangcheck_score;
544 err_printf(m, "no error state collected\n");
548 err_printf(m, "%s\n", error->error_msg);
549 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
550 error->time.tv_usec);
551 err_printf(m, "Kernel: " UTS_RELEASE "\n");
552 err_print_capabilities(m, &error->device_info);
553 max_hangcheck_score = 0;
554 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
555 if (error->engine[i].hangcheck_score > max_hangcheck_score)
556 max_hangcheck_score = error->engine[i].hangcheck_score;
558 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
559 if (error->engine[i].hangcheck_score == max_hangcheck_score &&
560 error->engine[i].pid != -1) {
561 err_printf(m, "Active process (on ring %s): %s [%d]\n",
563 error->engine[i].comm,
564 error->engine[i].pid);
567 err_printf(m, "Reset count: %u\n", error->reset_count);
568 err_printf(m, "Suspend count: %u\n", error->suspend_count);
569 err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
570 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
571 err_printf(m, "PCI Subsystem: %04x:%04x\n",
572 pdev->subsystem_vendor,
573 pdev->subsystem_device);
574 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
577 struct intel_csr *csr = &dev_priv->csr;
579 err_printf(m, "DMC loaded: %s\n",
580 yesno(csr->dmc_payload != NULL));
581 err_printf(m, "DMC fw version: %d.%d\n",
582 CSR_VERSION_MAJOR(csr->version),
583 CSR_VERSION_MINOR(csr->version));
586 err_printf(m, "EIR: 0x%08x\n", error->eir);
587 err_printf(m, "IER: 0x%08x\n", error->ier);
588 if (INTEL_INFO(dev)->gen >= 8) {
589 for (i = 0; i < 4; i++)
590 err_printf(m, "GTIER gt %d: 0x%08x\n", i,
592 } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
593 err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
594 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
595 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
596 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
597 err_printf(m, "CCID: 0x%08x\n", error->ccid);
598 err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
600 for (i = 0; i < dev_priv->num_fence_regs; i++)
601 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
603 if (INTEL_INFO(dev)->gen >= 6) {
604 err_printf(m, "ERROR: 0x%08x\n", error->error);
606 if (INTEL_INFO(dev)->gen >= 8)
607 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
608 error->fault_data1, error->fault_data0);
610 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
614 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
616 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
617 if (error->engine[i].engine_id != -1)
618 error_print_engine(m, &error->engine[i]);
621 for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) {
625 if (!error->active_vm[i])
628 len = scnprintf(buf, sizeof(buf), "Active (");
629 for (j = 0; j < ARRAY_SIZE(error->engine); j++) {
630 if (error->engine[j].vm != error->active_vm[i])
633 len += scnprintf(buf + len, sizeof(buf), "%s%s",
635 dev_priv->engine[j].name);
638 scnprintf(buf + len, sizeof(buf), ")");
639 print_error_buffers(m, buf,
641 error->active_bo_count[i]);
644 print_error_buffers(m, "Pinned (global)",
646 error->pinned_bo_count);
648 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
649 struct drm_i915_error_engine *ee = &error->engine[i];
651 obj = ee->batchbuffer;
653 err_puts(m, dev_priv->engine[i].name);
655 err_printf(m, " (submitted by %s [%d])",
658 err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
659 upper_32_bits(obj->gtt_offset),
660 lower_32_bits(obj->gtt_offset));
661 print_error_obj(m, &dev_priv->engine[i], NULL, obj);
664 if (ee->num_requests) {
665 err_printf(m, "%s --- %d requests\n",
666 dev_priv->engine[i].name,
668 for (j = 0; j < ee->num_requests; j++)
669 error_print_request(m, " ", &ee->requests[j]);
672 if (IS_ERR(ee->waiters)) {
673 err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
674 dev_priv->engine[i].name);
675 } else if (ee->num_waiters) {
676 err_printf(m, "%s --- %d waiters\n",
677 dev_priv->engine[i].name,
679 for (j = 0; j < ee->num_waiters; j++) {
680 err_printf(m, " seqno 0x%08x for %s [%d]\n",
681 ee->waiters[j].seqno,
687 print_error_obj(m, &dev_priv->engine[i],
688 "ringbuffer", ee->ringbuffer);
690 print_error_obj(m, &dev_priv->engine[i],
691 "HW Status", ee->hws_page);
693 print_error_obj(m, &dev_priv->engine[i],
694 "HW context", ee->ctx);
696 print_error_obj(m, &dev_priv->engine[i],
697 "WA context", ee->wa_ctx);
699 print_error_obj(m, &dev_priv->engine[i],
700 "WA batchbuffer", ee->wa_batchbuffer);
703 print_error_obj(m, NULL, "Semaphores", error->semaphore);
706 intel_overlay_print_error_state(m, error->overlay);
709 intel_display_print_error_state(m, dev, error->display);
712 if (m->bytes == 0 && m->err)
718 int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
719 struct drm_i915_private *i915,
720 size_t count, loff_t pos)
722 memset(ebuf, 0, sizeof(*ebuf));
725 /* We need to have enough room to store any i915_error_state printf
726 * so that we can move it to start position.
728 ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
729 ebuf->buf = kmalloc(ebuf->size,
730 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
732 if (ebuf->buf == NULL) {
733 ebuf->size = PAGE_SIZE;
734 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
737 if (ebuf->buf == NULL) {
739 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
742 if (ebuf->buf == NULL)
750 static void i915_error_object_free(struct drm_i915_error_object *obj)
757 for (page = 0; page < obj->page_count; page++)
758 free_page((unsigned long)obj->pages[page]);
763 static void i915_error_state_free(struct kref *error_ref)
765 struct drm_i915_error_state *error = container_of(error_ref,
766 typeof(*error), ref);
769 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
770 struct drm_i915_error_engine *ee = &error->engine[i];
772 i915_error_object_free(ee->batchbuffer);
773 i915_error_object_free(ee->wa_batchbuffer);
774 i915_error_object_free(ee->ringbuffer);
775 i915_error_object_free(ee->hws_page);
776 i915_error_object_free(ee->ctx);
777 i915_error_object_free(ee->wa_ctx);
780 if (!IS_ERR_OR_NULL(ee->waiters))
784 i915_error_object_free(error->semaphore);
786 for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
787 kfree(error->active_bo[i]);
788 kfree(error->pinned_bo);
790 kfree(error->overlay);
791 kfree(error->display);
795 static struct drm_i915_error_object *
796 i915_error_object_create(struct drm_i915_private *i915,
797 struct i915_vma *vma)
799 struct i915_ggtt *ggtt = &i915->ggtt;
800 const u64 slot = ggtt->error_capture.start;
801 struct drm_i915_error_object *dst;
802 struct z_stream_s zstream;
803 unsigned long num_pages;
804 struct sgt_iter iter;
810 num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
811 num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
812 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
813 GFP_ATOMIC | __GFP_NOWARN);
817 dst->gtt_offset = vma->node.start;
818 dst->gtt_size = vma->node.size;
822 if (!compress_init(&zstream)) {
827 for_each_sgt_dma(dma, iter, vma->pages) {
831 ggtt->base.insert_page(&ggtt->base, dma, slot,
834 s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
835 ret = compress_page(&zstream, (void __force *)s, dst);
836 io_mapping_unmap_atomic(s);
844 while (dst->page_count--)
845 free_page((unsigned long)dst->pages[dst->page_count]);
850 compress_fini(&zstream, dst);
851 ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE, true);
855 /* The error capture is special as tries to run underneath the normal
856 * locking rules - so we use the raw version of the i915_gem_active lookup.
858 static inline uint32_t
859 __active_get_seqno(struct i915_gem_active *active)
861 return i915_gem_request_get_seqno(__i915_gem_active_peek(active));
865 __active_get_engine_id(struct i915_gem_active *active)
867 struct intel_engine_cs *engine;
869 engine = i915_gem_request_get_engine(__i915_gem_active_peek(active));
870 return engine ? engine->id : -1;
873 static void capture_bo(struct drm_i915_error_buffer *err,
874 struct i915_vma *vma)
876 struct drm_i915_gem_object *obj = vma->obj;
879 err->size = obj->base.size;
880 err->name = obj->base.name;
882 for (i = 0; i < I915_NUM_ENGINES; i++)
883 err->rseqno[i] = __active_get_seqno(&obj->last_read[i]);
884 err->wseqno = __active_get_seqno(&obj->last_write);
885 err->engine = __active_get_engine_id(&obj->last_write);
887 err->gtt_offset = vma->node.start;
888 err->read_domains = obj->base.read_domains;
889 err->write_domain = obj->base.write_domain;
890 err->fence_reg = vma->fence ? vma->fence->id : -1;
891 err->tiling = i915_gem_object_get_tiling(obj);
892 err->dirty = obj->dirty;
893 err->purgeable = obj->madv != I915_MADV_WILLNEED;
894 err->userptr = obj->userptr.mm != NULL;
895 err->cache_level = obj->cache_level;
898 static u32 capture_error_bo(struct drm_i915_error_buffer *err,
899 int count, struct list_head *head,
902 struct i915_vma *vma;
905 list_for_each_entry(vma, head, vm_link) {
906 if (pinned_only && !i915_vma_is_pinned(vma))
909 capture_bo(err++, vma);
917 /* Generate a semi-unique error code. The code is not meant to have meaning, The
918 * code's only purpose is to try to prevent false duplicated bug reports by
919 * grossly estimating a GPU error state.
921 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
922 * the hang if we could strip the GTT offset information from it.
924 * It's only a small step better than a random number in its current form.
926 static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
927 struct drm_i915_error_state *error,
930 uint32_t error_code = 0;
933 /* IPEHR would be an ideal way to detect errors, as it's the gross
934 * measure of "the command that hung." However, has some very common
935 * synchronization commands which almost always appear in the case
936 * strictly a client bug. Use instdone to differentiate those some.
938 for (i = 0; i < I915_NUM_ENGINES; i++) {
939 if (error->engine[i].hangcheck_action == HANGCHECK_HUNG) {
943 return error->engine[i].ipehr ^
944 error->engine[i].instdone.instdone;
951 static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
952 struct drm_i915_error_state *error)
956 if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
957 for (i = 0; i < dev_priv->num_fence_regs; i++)
958 error->fence[i] = I915_READ(FENCE_REG(i));
959 } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
960 for (i = 0; i < dev_priv->num_fence_regs; i++)
961 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
962 } else if (INTEL_GEN(dev_priv) >= 6) {
963 for (i = 0; i < dev_priv->num_fence_regs; i++)
964 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
969 static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
970 struct intel_engine_cs *engine,
971 struct drm_i915_error_engine *ee)
973 struct drm_i915_private *dev_priv = engine->i915;
974 struct intel_engine_cs *to;
975 enum intel_engine_id id;
977 if (!error->semaphore)
980 for_each_engine_id(to, dev_priv, id) {
989 (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
990 tmp = error->semaphore->pages[0];
991 idx = intel_engine_sync_index(engine, to);
993 ee->semaphore_mboxes[idx] = tmp[signal_offset];
994 ee->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
998 static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
999 struct drm_i915_error_engine *ee)
1001 struct drm_i915_private *dev_priv = engine->i915;
1003 ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
1004 ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
1005 ee->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
1006 ee->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
1008 if (HAS_VEBOX(dev_priv)) {
1009 ee->semaphore_mboxes[2] =
1010 I915_READ(RING_SYNC_2(engine->mmio_base));
1011 ee->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
1015 static void error_record_engine_waiters(struct intel_engine_cs *engine,
1016 struct drm_i915_error_engine *ee)
1018 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1019 struct drm_i915_error_waiter *waiter;
1023 ee->num_waiters = 0;
1026 if (RB_EMPTY_ROOT(&b->waiters))
1029 if (!spin_trylock(&b->lock)) {
1030 ee->waiters = ERR_PTR(-EDEADLK);
1035 for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
1037 spin_unlock(&b->lock);
1041 waiter = kmalloc_array(count,
1042 sizeof(struct drm_i915_error_waiter),
1047 if (!spin_trylock(&b->lock)) {
1049 ee->waiters = ERR_PTR(-EDEADLK);
1053 ee->waiters = waiter;
1054 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1055 struct intel_wait *w = container_of(rb, typeof(*w), node);
1057 strcpy(waiter->comm, w->tsk->comm);
1058 waiter->pid = w->tsk->pid;
1059 waiter->seqno = w->seqno;
1062 if (++ee->num_waiters == count)
1065 spin_unlock(&b->lock);
1068 static void error_record_engine_registers(struct drm_i915_error_state *error,
1069 struct intel_engine_cs *engine,
1070 struct drm_i915_error_engine *ee)
1072 struct drm_i915_private *dev_priv = engine->i915;
1074 if (INTEL_GEN(dev_priv) >= 6) {
1075 ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
1076 ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
1077 if (INTEL_GEN(dev_priv) >= 8)
1078 gen8_record_semaphore_state(error, engine, ee);
1080 gen6_record_semaphore_state(engine, ee);
1083 if (INTEL_GEN(dev_priv) >= 4) {
1084 ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
1085 ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
1086 ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
1087 ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
1088 ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
1089 if (INTEL_GEN(dev_priv) >= 8) {
1090 ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
1091 ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
1093 ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
1095 ee->faddr = I915_READ(DMA_FADD_I8XX);
1096 ee->ipeir = I915_READ(IPEIR);
1097 ee->ipehr = I915_READ(IPEHR);
1100 intel_engine_get_instdone(engine, &ee->instdone);
1102 ee->waiting = intel_engine_has_waiter(engine);
1103 ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
1104 ee->acthd = intel_engine_get_active_head(engine);
1105 ee->seqno = intel_engine_get_seqno(engine);
1106 ee->last_seqno = engine->last_submitted_seqno;
1107 ee->start = I915_READ_START(engine);
1108 ee->head = I915_READ_HEAD(engine);
1109 ee->tail = I915_READ_TAIL(engine);
1110 ee->ctl = I915_READ_CTL(engine);
1111 if (INTEL_GEN(dev_priv) > 2)
1112 ee->mode = I915_READ_MODE(engine);
1114 if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
1117 if (IS_GEN7(dev_priv)) {
1118 switch (engine->id) {
1121 mmio = RENDER_HWS_PGA_GEN7;
1124 mmio = BLT_HWS_PGA_GEN7;
1127 mmio = BSD_HWS_PGA_GEN7;
1130 mmio = VEBOX_HWS_PGA_GEN7;
1133 } else if (IS_GEN6(engine->i915)) {
1134 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1136 /* XXX: gen8 returns to sanity */
1137 mmio = RING_HWS_PGA(engine->mmio_base);
1140 ee->hws = I915_READ(mmio);
1143 ee->hangcheck_score = engine->hangcheck.score;
1144 ee->hangcheck_action = engine->hangcheck.action;
1146 if (USES_PPGTT(dev_priv)) {
1149 ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
1151 if (IS_GEN6(dev_priv))
1152 ee->vm_info.pp_dir_base =
1153 I915_READ(RING_PP_DIR_BASE_READ(engine));
1154 else if (IS_GEN7(dev_priv))
1155 ee->vm_info.pp_dir_base =
1156 I915_READ(RING_PP_DIR_BASE(engine));
1157 else if (INTEL_GEN(dev_priv) >= 8)
1158 for (i = 0; i < 4; i++) {
1159 ee->vm_info.pdp[i] =
1160 I915_READ(GEN8_RING_PDP_UDW(engine, i));
1161 ee->vm_info.pdp[i] <<= 32;
1162 ee->vm_info.pdp[i] |=
1163 I915_READ(GEN8_RING_PDP_LDW(engine, i));
1168 static void record_request(struct drm_i915_gem_request *request,
1169 struct drm_i915_error_request *erq)
1171 erq->context = request->ctx->hw_id;
1172 erq->seqno = request->fence.seqno;
1173 erq->jiffies = request->emitted_jiffies;
1174 erq->head = request->head;
1175 erq->tail = request->tail;
1178 erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
1182 static void engine_record_requests(struct intel_engine_cs *engine,
1183 struct drm_i915_gem_request *first,
1184 struct drm_i915_error_engine *ee)
1186 struct drm_i915_gem_request *request;
1191 list_for_each_entry_from(request, &engine->request_list, link)
1196 ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC);
1200 ee->num_requests = count;
1204 list_for_each_entry_from(request, &engine->request_list, link) {
1205 if (count >= ee->num_requests) {
1207 * If the ring request list was changed in
1208 * between the point where the error request
1209 * list was created and dimensioned and this
1210 * point then just exit early to avoid crashes.
1212 * We don't need to communicate that the
1213 * request list changed state during error
1214 * state capture and that the error state is
1215 * slightly incorrect as a consequence since we
1216 * are typically only interested in the request
1217 * list state at the point of error state
1218 * capture, not in any changes happening during
1224 record_request(request, &ee->requests[count++]);
1226 ee->num_requests = count;
1229 static void error_record_engine_execlists(struct intel_engine_cs *engine,
1230 struct drm_i915_error_engine *ee)
1234 for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
1235 if (engine->execlist_port[n].request)
1236 record_request(engine->execlist_port[n].request,
1240 static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
1241 struct drm_i915_error_state *error)
1243 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1247 i915_error_object_create(dev_priv, dev_priv->semaphore);
1249 for (i = 0; i < I915_NUM_ENGINES; i++) {
1250 struct intel_engine_cs *engine = &dev_priv->engine[i];
1251 struct drm_i915_error_engine *ee = &error->engine[i];
1252 struct drm_i915_gem_request *request;
1257 if (!intel_engine_initialized(engine))
1262 error_record_engine_registers(error, engine, ee);
1263 error_record_engine_waiters(engine, ee);
1264 error_record_engine_execlists(engine, ee);
1266 request = i915_gem_find_active_request(engine);
1268 struct intel_ring *ring;
1271 ee->vm = request->ctx->ppgtt ?
1272 &request->ctx->ppgtt->base : &ggtt->base;
1274 /* We need to copy these to an anonymous buffer
1275 * as the simplest method to avoid being overwritten
1279 i915_error_object_create(dev_priv,
1282 if (HAS_BROKEN_CS_TLB(dev_priv))
1283 ee->wa_batchbuffer =
1284 i915_error_object_create(dev_priv,
1288 i915_error_object_create(dev_priv,
1289 request->ctx->engine[i].state);
1291 pid = request->ctx->pid;
1293 struct task_struct *task;
1296 task = pid_task(pid, PIDTYPE_PID);
1298 strcpy(ee->comm, task->comm);
1299 ee->pid = task->pid;
1305 request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
1307 ee->rq_head = request->head;
1308 ee->rq_post = request->postfix;
1309 ee->rq_tail = request->tail;
1311 ring = request->ring;
1312 ee->cpu_ring_head = ring->head;
1313 ee->cpu_ring_tail = ring->tail;
1315 i915_error_object_create(dev_priv, ring->vma);
1317 engine_record_requests(engine, request, ee);
1321 i915_error_object_create(dev_priv,
1322 engine->status_page.vma);
1325 i915_error_object_create(dev_priv, engine->wa_ctx.vma);
1329 static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
1330 struct drm_i915_error_state *error,
1331 struct i915_address_space *vm,
1334 struct drm_i915_error_buffer *active_bo;
1335 struct i915_vma *vma;
1339 list_for_each_entry(vma, &vm->active_list, vm_link)
1344 active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
1346 count = capture_error_bo(active_bo, count, &vm->active_list, false);
1350 error->active_vm[idx] = vm;
1351 error->active_bo[idx] = active_bo;
1352 error->active_bo_count[idx] = count;
1355 static void i915_capture_active_buffers(struct drm_i915_private *dev_priv,
1356 struct drm_i915_error_state *error)
1360 BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo));
1361 BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm));
1362 BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count));
1364 /* Scan each engine looking for unique active contexts/vm */
1365 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
1366 struct drm_i915_error_engine *ee = &error->engine[i];
1373 for (j = 0; j < i && !found; j++)
1374 found = error->engine[j].vm == ee->vm;
1376 i915_gem_capture_vm(dev_priv, error, ee->vm, cnt++);
1380 static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
1381 struct drm_i915_error_state *error)
1383 struct i915_address_space *vm = &dev_priv->ggtt.base;
1384 struct drm_i915_error_buffer *bo;
1385 struct i915_vma *vma;
1386 int count_inactive, count_active;
1389 list_for_each_entry(vma, &vm->active_list, vm_link)
1393 list_for_each_entry(vma, &vm->inactive_list, vm_link)
1397 if (count_inactive + count_active)
1398 bo = kcalloc(count_inactive + count_active,
1399 sizeof(*bo), GFP_ATOMIC);
1403 count_inactive = capture_error_bo(bo, count_inactive,
1404 &vm->active_list, true);
1405 count_active = capture_error_bo(bo + count_inactive, count_active,
1406 &vm->inactive_list, true);
1407 error->pinned_bo_count = count_inactive + count_active;
1408 error->pinned_bo = bo;
1411 /* Capture all registers which don't fit into another category. */
1412 static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1413 struct drm_i915_error_state *error)
1415 struct drm_device *dev = &dev_priv->drm;
1418 /* General organization
1419 * 1. Registers specific to a single generation
1420 * 2. Registers which belong to multiple generations
1421 * 3. Feature specific registers.
1422 * 4. Everything else
1423 * Please try to follow the order.
1426 /* 1: Registers specific to a single generation */
1427 if (IS_VALLEYVIEW(dev)) {
1428 error->gtier[0] = I915_READ(GTIER);
1429 error->ier = I915_READ(VLV_IER);
1430 error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
1434 error->err_int = I915_READ(GEN7_ERR_INT);
1436 if (INTEL_INFO(dev)->gen >= 8) {
1437 error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
1438 error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
1442 error->forcewake = I915_READ_FW(FORCEWAKE);
1443 error->gab_ctl = I915_READ(GAB_CTL);
1444 error->gfx_mode = I915_READ(GFX_MODE);
1447 /* 2: Registers which belong to multiple generations */
1448 if (INTEL_INFO(dev)->gen >= 7)
1449 error->forcewake = I915_READ_FW(FORCEWAKE_MT);
1451 if (INTEL_INFO(dev)->gen >= 6) {
1452 error->derrmr = I915_READ(DERRMR);
1453 error->error = I915_READ(ERROR_GEN6);
1454 error->done_reg = I915_READ(DONE_REG);
1457 /* 3: Feature specific registers */
1458 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1459 error->gam_ecochk = I915_READ(GAM_ECOCHK);
1460 error->gac_eco = I915_READ(GAC_ECO_BITS);
1463 /* 4: Everything else */
1464 if (HAS_HW_CONTEXTS(dev))
1465 error->ccid = I915_READ(CCID);
1467 if (INTEL_INFO(dev)->gen >= 8) {
1468 error->ier = I915_READ(GEN8_DE_MISC_IER);
1469 for (i = 0; i < 4; i++)
1470 error->gtier[i] = I915_READ(GEN8_GT_IER(i));
1471 } else if (HAS_PCH_SPLIT(dev)) {
1472 error->ier = I915_READ(DEIER);
1473 error->gtier[0] = I915_READ(GTIER);
1474 } else if (IS_GEN2(dev)) {
1475 error->ier = I915_READ16(IER);
1476 } else if (!IS_VALLEYVIEW(dev)) {
1477 error->ier = I915_READ(IER);
1479 error->eir = I915_READ(EIR);
1480 error->pgtbl_er = I915_READ(PGTBL_ER);
1483 static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
1484 struct drm_i915_error_state *error,
1486 const char *error_msg)
1489 int engine_id = -1, len;
1491 ecode = i915_error_generate_code(dev_priv, error, &engine_id);
1493 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1494 "GPU HANG: ecode %d:%d:0x%08x",
1495 INTEL_GEN(dev_priv), engine_id, ecode);
1497 if (engine_id != -1 && error->engine[engine_id].pid != -1)
1498 len += scnprintf(error->error_msg + len,
1499 sizeof(error->error_msg) - len,
1501 error->engine[engine_id].comm,
1502 error->engine[engine_id].pid);
1504 scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
1505 ", reason: %s, action: %s",
1507 engine_mask ? "reset" : "continue");
1510 static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
1511 struct drm_i915_error_state *error)
1514 #ifdef CONFIG_INTEL_IOMMU
1515 error->iommu = intel_iommu_gfx_mapped;
1517 error->reset_count = i915_reset_count(&dev_priv->gpu_error);
1518 error->suspend_count = dev_priv->suspend_count;
1520 memcpy(&error->device_info,
1521 INTEL_INFO(dev_priv),
1522 sizeof(error->device_info));
1525 static int capture(void *data)
1527 struct drm_i915_error_state *error = data;
1529 i915_capture_gen_state(error->i915, error);
1530 i915_capture_reg_state(error->i915, error);
1531 i915_gem_record_fences(error->i915, error);
1532 i915_gem_record_rings(error->i915, error);
1533 i915_capture_active_buffers(error->i915, error);
1534 i915_capture_pinned_buffers(error->i915, error);
1536 do_gettimeofday(&error->time);
1538 error->overlay = intel_overlay_capture_error_state(error->i915);
1539 error->display = intel_display_capture_error_state(error->i915);
1545 * i915_capture_error_state - capture an error record for later analysis
1548 * Should be called when an error is detected (either a hang or an error
1549 * interrupt) to capture error state from the time of the error. Fills
1550 * out a structure which becomes available in debugfs for user level tools
1553 void i915_capture_error_state(struct drm_i915_private *dev_priv,
1555 const char *error_msg)
1558 struct drm_i915_error_state *error;
1559 unsigned long flags;
1561 if (!i915.error_capture)
1564 if (READ_ONCE(dev_priv->gpu_error.first_error))
1567 /* Account for pipe specific data like PIPE*STAT */
1568 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1570 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1574 kref_init(&error->ref);
1575 error->i915 = dev_priv;
1577 stop_machine(capture, error, NULL);
1579 i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
1580 DRM_INFO("%s\n", error->error_msg);
1582 if (!error->simulated) {
1583 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1584 if (!dev_priv->gpu_error.first_error) {
1585 dev_priv->gpu_error.first_error = error;
1588 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1592 i915_error_state_free(&error->ref);
1597 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1598 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1599 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1600 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1601 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1602 dev_priv->drm.primary->index);
1607 void i915_error_state_get(struct drm_device *dev,
1608 struct i915_error_state_file_priv *error_priv)
1610 struct drm_i915_private *dev_priv = to_i915(dev);
1612 spin_lock_irq(&dev_priv->gpu_error.lock);
1613 error_priv->error = dev_priv->gpu_error.first_error;
1614 if (error_priv->error)
1615 kref_get(&error_priv->error->ref);
1616 spin_unlock_irq(&dev_priv->gpu_error.lock);
1619 void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
1621 if (error_priv->error)
1622 kref_put(&error_priv->error->ref, i915_error_state_free);
1625 void i915_destroy_error_state(struct drm_device *dev)
1627 struct drm_i915_private *dev_priv = to_i915(dev);
1628 struct drm_i915_error_state *error;
1630 spin_lock_irq(&dev_priv->gpu_error.lock);
1631 error = dev_priv->gpu_error.first_error;
1632 dev_priv->gpu_error.first_error = NULL;
1633 spin_unlock_irq(&dev_priv->gpu_error.lock);
1636 kref_put(&error->ref, i915_error_state_free);