drm/i915: Cleanup instdone collection
[linux-block.git] / drivers / gpu / drm / i915 / i915_gpu_error.c
CommitLineData
84734a04
MK
1/*
2 * Copyright (c) 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
27 *
28 */
29
30#include <generated/utsrelease.h>
31#include "i915_drv.h"
32
6361f4ba 33static const char *engine_str(int engine)
84734a04 34{
6361f4ba 35 switch (engine) {
84734a04
MK
36 case RCS: return "render";
37 case VCS: return "bsd";
38 case BCS: return "blt";
39 case VECS: return "vebox";
845f74a7 40 case VCS2: return "bsd2";
84734a04
MK
41 default: return "";
42 }
43}
44
84734a04
MK
45static const char *tiling_flag(int tiling)
46{
47 switch (tiling) {
48 default:
49 case I915_TILING_NONE: return "";
50 case I915_TILING_X: return " X";
51 case I915_TILING_Y: return " Y";
52 }
53}
54
55static const char *dirty_flag(int dirty)
56{
57 return dirty ? " dirty" : "";
58}
59
60static const char *purgeable_flag(int purgeable)
61{
62 return purgeable ? " purgeable" : "";
63}
64
65static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
66{
67
68 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
69 e->err = -ENOSPC;
70 return false;
71 }
72
73 if (e->bytes == e->size - 1 || e->err)
74 return false;
75
76 return true;
77}
78
79static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
80 unsigned len)
81{
82 if (e->pos + len <= e->start) {
83 e->pos += len;
84 return false;
85 }
86
87 /* First vsnprintf needs to fit in its entirety for memmove */
88 if (len >= e->size) {
89 e->err = -EIO;
90 return false;
91 }
92
93 return true;
94}
95
96static void __i915_error_advance(struct drm_i915_error_state_buf *e,
97 unsigned len)
98{
99 /* If this is first printf in this window, adjust it so that
100 * start position matches start of the buffer
101 */
102
103 if (e->pos < e->start) {
104 const size_t off = e->start - e->pos;
105
106 /* Should not happen but be paranoid */
107 if (off > len || e->bytes) {
108 e->err = -EIO;
109 return;
110 }
111
112 memmove(e->buf, e->buf + off, len - off);
113 e->bytes = len - off;
114 e->pos = e->start;
115 return;
116 }
117
118 e->bytes += len;
119 e->pos += len;
120}
121
122static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
123 const char *f, va_list args)
124{
125 unsigned len;
126
127 if (!__i915_error_ok(e))
128 return;
129
130 /* Seek the first printf which is hits start position */
131 if (e->pos < e->start) {
e29bb4eb
CW
132 va_list tmp;
133
134 va_copy(tmp, args);
1d2cb9a5
MK
135 len = vsnprintf(NULL, 0, f, tmp);
136 va_end(tmp);
137
138 if (!__i915_error_seek(e, len))
84734a04
MK
139 return;
140 }
141
142 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
143 if (len >= e->size - e->bytes)
144 len = e->size - e->bytes - 1;
145
146 __i915_error_advance(e, len);
147}
148
149static void i915_error_puts(struct drm_i915_error_state_buf *e,
150 const char *str)
151{
152 unsigned len;
153
154 if (!__i915_error_ok(e))
155 return;
156
157 len = strlen(str);
158
159 /* Seek the first printf which is hits start position */
160 if (e->pos < e->start) {
161 if (!__i915_error_seek(e, len))
162 return;
163 }
164
165 if (len >= e->size - e->bytes)
166 len = e->size - e->bytes - 1;
167 memcpy(e->buf + e->bytes, str, len);
168
169 __i915_error_advance(e, len);
170}
171
172#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
173#define err_puts(e, s) i915_error_puts(e, s)
174
175static void print_error_buffers(struct drm_i915_error_state_buf *m,
176 const char *name,
177 struct drm_i915_error_buffer *err,
178 int count)
179{
b4716185
CW
180 int i;
181
c0ce4663 182 err_printf(m, "%s [%d]:\n", name, count);
84734a04
MK
183
184 while (count--) {
e1f12325
MT
185 err_printf(m, " %08x_%08x %8u %02x %02x [ ",
186 upper_32_bits(err->gtt_offset),
187 lower_32_bits(err->gtt_offset),
84734a04
MK
188 err->size,
189 err->read_domains,
b4716185 190 err->write_domain);
666796da 191 for (i = 0; i < I915_NUM_ENGINES; i++)
b4716185
CW
192 err_printf(m, "%02x ", err->rseqno[i]);
193
194 err_printf(m, "] %02x", err->wseqno);
84734a04
MK
195 err_puts(m, tiling_flag(err->tiling));
196 err_puts(m, dirty_flag(err->dirty));
197 err_puts(m, purgeable_flag(err->purgeable));
5cc9ed4b 198 err_puts(m, err->userptr ? " userptr" : "");
6361f4ba
CW
199 err_puts(m, err->engine != -1 ? " " : "");
200 err_puts(m, engine_str(err->engine));
0a4cd7c8 201 err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
84734a04
MK
202
203 if (err->name)
204 err_printf(m, " (name: %d)", err->name);
205 if (err->fence_reg != I915_FENCE_REG_NONE)
206 err_printf(m, " (fence: %d)", err->fence_reg);
207
208 err_puts(m, "\n");
209 err++;
210 }
211}
212
7e37f889 213static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
da661464
MK
214{
215 switch (a) {
216 case HANGCHECK_IDLE:
217 return "idle";
218 case HANGCHECK_WAIT:
219 return "wait";
220 case HANGCHECK_ACTIVE:
221 return "active";
222 case HANGCHECK_KICK:
223 return "kick";
224 case HANGCHECK_HUNG:
225 return "hung";
226 }
227
228 return "unknown";
229}
230
d636951e
BW
231static void error_print_instdone(struct drm_i915_error_state_buf *m,
232 struct drm_i915_error_engine *ee)
233{
234 err_printf(m, " INSTDONE: 0x%08x\n",
235 ee->instdone.instdone);
236
237 if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
238 return;
239
240 err_printf(m, " SC_INSTDONE: 0x%08x\n",
241 ee->instdone.slice_common);
242
243 if (INTEL_GEN(m->i915) <= 6)
244 return;
245
246 err_printf(m, " SAMPLER_INSTDONE: 0x%08x\n",
247 ee->instdone.sampler);
248 err_printf(m, " ROW_INSTDONE: 0x%08x\n",
249 ee->instdone.row);
250}
251
6361f4ba
CW
252static void error_print_engine(struct drm_i915_error_state_buf *m,
253 struct drm_i915_error_engine *ee)
84734a04 254{
6361f4ba
CW
255 err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
256 err_printf(m, " START: 0x%08x\n", ee->start);
257 err_printf(m, " HEAD: 0x%08x\n", ee->head);
258 err_printf(m, " TAIL: 0x%08x\n", ee->tail);
259 err_printf(m, " CTL: 0x%08x\n", ee->ctl);
21a2c58a 260 err_printf(m, " MODE: 0x%08x\n", ee->mode);
6361f4ba
CW
261 err_printf(m, " HWS: 0x%08x\n", ee->hws);
262 err_printf(m, " ACTHD: 0x%08x %08x\n",
263 (u32)(ee->acthd>>32), (u32)ee->acthd);
264 err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
265 err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
d636951e
BW
266
267 error_print_instdone(m, ee);
268
03382dfb
CW
269 if (ee->batchbuffer) {
270 u64 start = ee->batchbuffer->gtt_offset;
271 u64 end = start + ee->batchbuffer->gtt_size;
272
273 err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n",
274 upper_32_bits(start), lower_32_bits(start),
275 upper_32_bits(end), lower_32_bits(end));
276 }
6361f4ba 277 if (INTEL_GEN(m->i915) >= 4) {
03382dfb 278 err_printf(m, " BBADDR: 0x%08x_%08x\n",
6361f4ba
CW
279 (u32)(ee->bbaddr>>32), (u32)ee->bbaddr);
280 err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate);
281 err_printf(m, " INSTPS: 0x%08x\n", ee->instps);
3dda20a9 282 }
6361f4ba
CW
283 err_printf(m, " INSTPM: 0x%08x\n", ee->instpm);
284 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr),
285 lower_32_bits(ee->faddr));
286 if (INTEL_GEN(m->i915) >= 6) {
287 err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
288 err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
84734a04 289 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
6361f4ba
CW
290 ee->semaphore_mboxes[0],
291 ee->semaphore_seqno[0]);
84734a04 292 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
6361f4ba
CW
293 ee->semaphore_mboxes[1],
294 ee->semaphore_seqno[1]);
295 if (HAS_VEBOX(m->i915)) {
4e5aabfd 296 err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
6361f4ba
CW
297 ee->semaphore_mboxes[2],
298 ee->semaphore_seqno[2]);
4e5aabfd 299 }
84734a04 300 }
6361f4ba
CW
301 if (USES_PPGTT(m->i915)) {
302 err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
6c7a01ec 303
6361f4ba 304 if (INTEL_GEN(m->i915) >= 8) {
6c7a01ec
BW
305 int i;
306 for (i = 0; i < 4; i++)
307 err_printf(m, " PDP%d: 0x%016llx\n",
6361f4ba 308 i, ee->vm_info.pdp[i]);
6c7a01ec
BW
309 } else {
310 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
6361f4ba 311 ee->vm_info.pp_dir_base);
6c7a01ec
BW
312 }
313 }
6361f4ba
CW
314 err_printf(m, " seqno: 0x%08x\n", ee->seqno);
315 err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno);
316 err_printf(m, " waiting: %s\n", yesno(ee->waiting));
317 err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
318 err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
da661464 319 err_printf(m, " hangcheck: %s [%d]\n",
6361f4ba
CW
320 hangcheck_action_to_str(ee->hangcheck_action),
321 ee->hangcheck_score);
84734a04
MK
322}
323
324void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
325{
326 va_list args;
327
328 va_start(args, f);
329 i915_error_vprintf(e, f, args);
330 va_end(args);
331}
332
ab0e7ff9
CW
333static void print_error_obj(struct drm_i915_error_state_buf *m,
334 struct drm_i915_error_object *obj)
335{
336 int page, offset, elt;
337
338 for (page = offset = 0; page < obj->page_count; page++) {
339 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
340 err_printf(m, "%08x : %08x\n", offset,
341 obj->pages[page][elt]);
342 offset += 4;
343 }
344 }
345}
346
2bd160a1
CW
347static void err_print_capabilities(struct drm_i915_error_state_buf *m,
348 const struct intel_device_info *info)
349{
350#define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x))
351#define SEP_SEMICOLON ;
352 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
353#undef PRINT_FLAG
354#undef SEP_SEMICOLON
355}
356
84734a04
MK
357int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
358 const struct i915_error_state_file_priv *error_priv)
359{
360 struct drm_device *dev = error_priv->dev;
fac5e23e 361 struct drm_i915_private *dev_priv = to_i915(dev);
52a05c30 362 struct pci_dev *pdev = dev_priv->drm.pdev;
84734a04 363 struct drm_i915_error_state *error = error_priv->error;
0ca36d78 364 struct drm_i915_error_object *obj;
ab0e7ff9
CW
365 int i, j, offset, elt;
366 int max_hangcheck_score;
84734a04
MK
367
368 if (!error) {
369 err_printf(m, "no error state collected\n");
370 goto out;
371 }
372
cb383002 373 err_printf(m, "%s\n", error->error_msg);
84734a04
MK
374 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
375 error->time.tv_usec);
376 err_printf(m, "Kernel: " UTS_RELEASE "\n");
2bd160a1 377 err_print_capabilities(m, &error->device_info);
ab0e7ff9 378 max_hangcheck_score = 0;
6361f4ba
CW
379 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
380 if (error->engine[i].hangcheck_score > max_hangcheck_score)
381 max_hangcheck_score = error->engine[i].hangcheck_score;
ab0e7ff9 382 }
6361f4ba
CW
383 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
384 if (error->engine[i].hangcheck_score == max_hangcheck_score &&
385 error->engine[i].pid != -1) {
ab0e7ff9 386 err_printf(m, "Active process (on ring %s): %s [%d]\n",
6361f4ba
CW
387 engine_str(i),
388 error->engine[i].comm,
389 error->engine[i].pid);
ab0e7ff9
CW
390 }
391 }
48b031e3 392 err_printf(m, "Reset count: %u\n", error->reset_count);
62d5d69b 393 err_printf(m, "Suspend count: %u\n", error->suspend_count);
52a05c30
DW
394 err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
395 err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
06e6ff8f 396 err_printf(m, "PCI Subsystem: %04x:%04x\n",
52a05c30
DW
397 pdev->subsystem_vendor,
398 pdev->subsystem_device);
eb5be9d0 399 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
0ac7655c
MK
400
401 if (HAS_CSR(dev)) {
402 struct intel_csr *csr = &dev_priv->csr;
403
404 err_printf(m, "DMC loaded: %s\n",
405 yesno(csr->dmc_payload != NULL));
406 err_printf(m, "DMC fw version: %d.%d\n",
407 CSR_VERSION_MAJOR(csr->version),
408 CSR_VERSION_MINOR(csr->version));
409 }
410
84734a04
MK
411 err_printf(m, "EIR: 0x%08x\n", error->eir);
412 err_printf(m, "IER: 0x%08x\n", error->ier);
885ea5a8
RV
413 if (INTEL_INFO(dev)->gen >= 8) {
414 for (i = 0; i < 4; i++)
415 err_printf(m, "GTIER gt %d: 0x%08x\n", i,
416 error->gtier[i]);
417 } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
418 err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
84734a04
MK
419 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
420 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
421 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
422 err_printf(m, "CCID: 0x%08x\n", error->ccid);
094f9a54 423 err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
84734a04
MK
424
425 for (i = 0; i < dev_priv->num_fence_regs; i++)
426 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
427
84734a04
MK
428 if (INTEL_INFO(dev)->gen >= 6) {
429 err_printf(m, "ERROR: 0x%08x\n", error->error);
6c826f34
MK
430
431 if (INTEL_INFO(dev)->gen >= 8)
432 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
433 error->fault_data1, error->fault_data0);
434
84734a04
MK
435 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
436 }
437
7e22dbbb 438 if (IS_GEN7(dev))
84734a04
MK
439 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
440
6361f4ba
CW
441 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
442 if (error->engine[i].engine_id != -1)
443 error_print_engine(m, &error->engine[i]);
444 }
84734a04 445
c0ce4663
CW
446 for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) {
447 char buf[128];
448 int len, first = 1;
3a448734 449
c0ce4663
CW
450 if (!error->active_vm[i])
451 break;
452
453 len = scnprintf(buf, sizeof(buf), "Active (");
454 for (j = 0; j < ARRAY_SIZE(error->engine); j++) {
455 if (error->engine[j].vm != error->active_vm[i])
456 continue;
457
458 len += scnprintf(buf + len, sizeof(buf), "%s%s",
459 first ? "" : ", ",
460 dev_priv->engine[j].name);
461 first = 0;
462 }
463 scnprintf(buf + len, sizeof(buf), ")");
464 print_error_buffers(m, buf,
3a448734
CW
465 error->active_bo[i],
466 error->active_bo_count[i]);
3a448734 467 }
84734a04 468
c0ce4663
CW
469 print_error_buffers(m, "Pinned (global)",
470 error->pinned_bo,
471 error->pinned_bo_count);
472
6361f4ba
CW
473 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
474 struct drm_i915_error_engine *ee = &error->engine[i];
475
476 obj = ee->batchbuffer;
ab0e7ff9 477 if (obj) {
4a570db5 478 err_puts(m, dev_priv->engine[i].name);
6361f4ba 479 if (ee->pid != -1)
ab0e7ff9 480 err_printf(m, " (submitted by %s [%d])",
6361f4ba
CW
481 ee->comm,
482 ee->pid);
e1f12325
MT
483 err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
484 upper_32_bits(obj->gtt_offset),
485 lower_32_bits(obj->gtt_offset));
ab0e7ff9
CW
486 print_error_obj(m, obj);
487 }
488
6361f4ba 489 obj = ee->wa_batchbuffer;
ab0e7ff9
CW
490 if (obj) {
491 err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
4a570db5 492 dev_priv->engine[i].name,
e1f12325 493 lower_32_bits(obj->gtt_offset));
ab0e7ff9 494 print_error_obj(m, obj);
84734a04
MK
495 }
496
6361f4ba 497 if (ee->num_requests) {
84734a04 498 err_printf(m, "%s --- %d requests\n",
4a570db5 499 dev_priv->engine[i].name,
6361f4ba
CW
500 ee->num_requests);
501 for (j = 0; j < ee->num_requests; j++) {
c84455b4
CW
502 err_printf(m, " pid %d, seqno 0x%08x, emitted %ld, head 0x%08x, tail 0x%08x\n",
503 ee->requests[j].pid,
6361f4ba
CW
504 ee->requests[j].seqno,
505 ee->requests[j].jiffies,
d045446d 506 ee->requests[j].head,
6361f4ba 507 ee->requests[j].tail);
84734a04
MK
508 }
509 }
510
19eb9189
CW
511 if (IS_ERR(ee->waiters)) {
512 err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
513 dev_priv->engine[i].name);
514 } else if (ee->num_waiters) {
688e6c72
CW
515 err_printf(m, "%s --- %d waiters\n",
516 dev_priv->engine[i].name,
6361f4ba
CW
517 ee->num_waiters);
518 for (j = 0; j < ee->num_waiters; j++) {
688e6c72 519 err_printf(m, " seqno 0x%08x for %s [%d]\n",
6361f4ba
CW
520 ee->waiters[j].seqno,
521 ee->waiters[j].comm,
522 ee->waiters[j].pid);
688e6c72
CW
523 }
524 }
525
6361f4ba 526 if ((obj = ee->ringbuffer)) {
84734a04 527 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
4a570db5 528 dev_priv->engine[i].name,
e1f12325 529 lower_32_bits(obj->gtt_offset));
ab0e7ff9 530 print_error_obj(m, obj);
84734a04
MK
531 }
532
6361f4ba 533 if ((obj = ee->hws_page)) {
3a5a0393
JB
534 u64 hws_offset = obj->gtt_offset;
535 u32 *hws_page = &obj->pages[0][0];
536
537 if (i915.enable_execlists) {
538 hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
539 hws_page = &obj->pages[LRC_PPHWSP_PN][0];
540 }
d1675198 541 err_printf(m, "%s --- HW Status = 0x%08llx\n",
4a570db5 542 dev_priv->engine[i].name, hws_offset);
f3ce3821
CW
543 offset = 0;
544 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
545 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
546 offset,
3a5a0393
JB
547 hws_page[elt],
548 hws_page[elt+1],
549 hws_page[elt+2],
550 hws_page[elt+3]);
a98b7e58 551 offset += 16;
f3ce3821
CW
552 }
553 }
554
6361f4ba 555 obj = ee->wa_ctx;
f85db059 556 if (obj) {
557 u64 wa_ctx_offset = obj->gtt_offset;
558 u32 *wa_ctx_page = &obj->pages[0][0];
4a570db5 559 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
e2f80391
TU
560 u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
561 engine->wa_ctx.per_ctx.size);
f85db059 562
563 err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
4a570db5 564 dev_priv->engine[i].name, wa_ctx_offset);
f85db059 565 offset = 0;
566 for (elt = 0; elt < wa_ctx_size; elt += 4) {
567 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
568 offset,
569 wa_ctx_page[elt + 0],
570 wa_ctx_page[elt + 1],
571 wa_ctx_page[elt + 2],
572 wa_ctx_page[elt + 3]);
573 offset += 16;
574 }
575 }
576
6361f4ba 577 if ((obj = ee->ctx)) {
84734a04 578 err_printf(m, "%s --- HW Context = 0x%08x\n",
4a570db5 579 dev_priv->engine[i].name,
e1f12325 580 lower_32_bits(obj->gtt_offset));
17d36749 581 print_error_obj(m, obj);
84734a04
MK
582 }
583 }
584
51d545d0 585 if ((obj = error->semaphore)) {
e1f12325
MT
586 err_printf(m, "Semaphore page = 0x%08x\n",
587 lower_32_bits(obj->gtt_offset));
0ca36d78
BW
588 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
589 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
590 elt * 4,
591 obj->pages[0][elt],
592 obj->pages[0][elt+1],
593 obj->pages[0][elt+2],
594 obj->pages[0][elt+3]);
595 }
596 }
597
84734a04
MK
598 if (error->overlay)
599 intel_overlay_print_error_state(m, error->overlay);
600
601 if (error->display)
602 intel_display_print_error_state(m, dev, error->display);
603
604out:
605 if (m->bytes == 0 && m->err)
606 return m->err;
607
608 return 0;
609}
610
611int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
0a4cd7c8 612 struct drm_i915_private *i915,
84734a04
MK
613 size_t count, loff_t pos)
614{
615 memset(ebuf, 0, sizeof(*ebuf));
0a4cd7c8 616 ebuf->i915 = i915;
84734a04
MK
617
618 /* We need to have enough room to store any i915_error_state printf
619 * so that we can move it to start position.
620 */
621 ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
622 ebuf->buf = kmalloc(ebuf->size,
623 GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
624
625 if (ebuf->buf == NULL) {
626 ebuf->size = PAGE_SIZE;
627 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
628 }
629
630 if (ebuf->buf == NULL) {
631 ebuf->size = 128;
632 ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
633 }
634
635 if (ebuf->buf == NULL)
636 return -ENOMEM;
637
638 ebuf->start = pos;
639
640 return 0;
641}
642
643static void i915_error_object_free(struct drm_i915_error_object *obj)
644{
645 int page;
646
647 if (obj == NULL)
648 return;
649
650 for (page = 0; page < obj->page_count; page++)
651 kfree(obj->pages[page]);
652
653 kfree(obj);
654}
655
656static void i915_error_state_free(struct kref *error_ref)
657{
658 struct drm_i915_error_state *error = container_of(error_ref,
659 typeof(*error), ref);
660 int i;
661
6361f4ba
CW
662 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
663 struct drm_i915_error_engine *ee = &error->engine[i];
664
665 i915_error_object_free(ee->batchbuffer);
666 i915_error_object_free(ee->wa_batchbuffer);
667 i915_error_object_free(ee->ringbuffer);
668 i915_error_object_free(ee->hws_page);
669 i915_error_object_free(ee->ctx);
670 i915_error_object_free(ee->wa_ctx);
671
672 kfree(ee->requests);
19eb9189
CW
673 if (!IS_ERR_OR_NULL(ee->waiters))
674 kfree(ee->waiters);
84734a04
MK
675 }
676
51d545d0 677 i915_error_object_free(error->semaphore);
0b37a9a9 678
c0ce4663 679 for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
0b37a9a9 680 kfree(error->active_bo[i]);
0b37a9a9 681 kfree(error->pinned_bo);
c0ce4663 682
84734a04
MK
683 kfree(error->overlay);
684 kfree(error->display);
685 kfree(error);
686}
687
688static struct drm_i915_error_object *
8ae62dc6 689i915_error_object_create(struct drm_i915_private *dev_priv,
058d88c4 690 struct i915_vma *vma)
84734a04 691{
72e96d64 692 struct i915_ggtt *ggtt = &dev_priv->ggtt;
058d88c4 693 struct drm_i915_gem_object *src;
84734a04 694 struct drm_i915_error_object *dst;
8ae62dc6 695 int num_pages;
b3c3f5e6
CW
696 bool use_ggtt;
697 int i = 0;
e1f12325 698 u64 reloc_offset;
84734a04 699
058d88c4
CW
700 if (!vma)
701 return NULL;
702
703 src = vma->obj;
704 if (!src->pages)
84734a04
MK
705 return NULL;
706
8ae62dc6
CW
707 num_pages = src->base.size >> PAGE_SHIFT;
708
84734a04 709 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
058d88c4 710 if (!dst)
84734a04
MK
711 return NULL;
712
03382dfb
CW
713 dst->gtt_offset = vma->node.start;
714 dst->gtt_size = vma->node.size;
715
716 reloc_offset = dst->gtt_offset;
b3c3f5e6 717 use_ggtt = (src->cache_level == I915_CACHE_NONE &&
058d88c4 718 (vma->flags & I915_VMA_GLOBAL_BIND) &&
72e96d64 719 reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
b3c3f5e6
CW
720
721 /* Cannot access stolen address directly, try to use the aperture */
722 if (src->stolen) {
723 use_ggtt = true;
724
058d88c4 725 if (!(vma->flags & I915_VMA_GLOBAL_BIND))
b3c3f5e6
CW
726 goto unwind;
727
058d88c4 728 reloc_offset = vma->node.start;
72e96d64 729 if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
b3c3f5e6
CW
730 goto unwind;
731 }
732
733 /* Cannot access snooped pages through the aperture */
2d1fe073
JL
734 if (use_ggtt && src->cache_level != I915_CACHE_NONE &&
735 !HAS_LLC(dev_priv))
b3c3f5e6
CW
736 goto unwind;
737
738 dst->page_count = num_pages;
739 while (num_pages--) {
84734a04
MK
740 unsigned long flags;
741 void *d;
742
743 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
744 if (d == NULL)
745 goto unwind;
746
747 local_irq_save(flags);
b3c3f5e6 748 if (use_ggtt) {
84734a04
MK
749 void __iomem *s;
750
751 /* Simply ignore tiling or any overlapping fence.
752 * It's part of the error state, and this hopefully
753 * captures what the GPU read.
754 */
755
f7bbe788 756 s = io_mapping_map_atomic_wc(&ggtt->mappable,
84734a04
MK
757 reloc_offset);
758 memcpy_fromio(d, s, PAGE_SIZE);
759 io_mapping_unmap_atomic(s);
84734a04
MK
760 } else {
761 struct page *page;
762 void *s;
763
764 page = i915_gem_object_get_page(src, i);
765
766 drm_clflush_pages(&page, 1);
767
768 s = kmap_atomic(page);
769 memcpy(d, s, PAGE_SIZE);
770 kunmap_atomic(s);
771
772 drm_clflush_pages(&page, 1);
773 }
774 local_irq_restore(flags);
775
b3c3f5e6 776 dst->pages[i++] = d;
84734a04
MK
777 reloc_offset += PAGE_SIZE;
778 }
84734a04
MK
779
780 return dst;
781
782unwind:
783 while (i--)
784 kfree(dst->pages[i]);
785 kfree(dst);
786 return NULL;
787}
84734a04 788
d72d908b
CW
789/* The error capture is special as tries to run underneath the normal
790 * locking rules - so we use the raw version of the i915_gem_active lookup.
791 */
792static inline uint32_t
793__active_get_seqno(struct i915_gem_active *active)
794{
795 return i915_gem_request_get_seqno(__i915_gem_active_peek(active));
796}
797
798static inline int
799__active_get_engine_id(struct i915_gem_active *active)
800{
801 struct intel_engine_cs *engine;
802
803 engine = i915_gem_request_get_engine(__i915_gem_active_peek(active));
804 return engine ? engine->id : -1;
805}
806
84734a04 807static void capture_bo(struct drm_i915_error_buffer *err,
3a448734 808 struct i915_vma *vma)
84734a04 809{
3a448734 810 struct drm_i915_gem_object *obj = vma->obj;
b4716185 811 int i;
3a448734 812
84734a04
MK
813 err->size = obj->base.size;
814 err->name = obj->base.name;
d72d908b 815
666796da 816 for (i = 0; i < I915_NUM_ENGINES; i++)
d72d908b
CW
817 err->rseqno[i] = __active_get_seqno(&obj->last_read[i]);
818 err->wseqno = __active_get_seqno(&obj->last_write);
819 err->engine = __active_get_engine_id(&obj->last_write);
820
3a448734 821 err->gtt_offset = vma->node.start;
84734a04
MK
822 err->read_domains = obj->base.read_domains;
823 err->write_domain = obj->base.write_domain;
49ef5294 824 err->fence_reg = vma->fence ? vma->fence->id : -1;
3e510a8e 825 err->tiling = i915_gem_object_get_tiling(obj);
84734a04
MK
826 err->dirty = obj->dirty;
827 err->purgeable = obj->madv != I915_MADV_WILLNEED;
5cc9ed4b 828 err->userptr = obj->userptr.mm != NULL;
84734a04
MK
829 err->cache_level = obj->cache_level;
830}
831
c0ce4663
CW
832static u32 capture_error_bo(struct drm_i915_error_buffer *err,
833 int count, struct list_head *head,
834 bool pinned_only)
84734a04 835{
ca191b13 836 struct i915_vma *vma;
84734a04
MK
837 int i = 0;
838
1c7f4bca 839 list_for_each_entry(vma, head, vm_link) {
c0ce4663
CW
840 if (pinned_only && !i915_vma_is_pinned(vma))
841 continue;
842
3a448734 843 capture_bo(err++, vma);
84734a04
MK
844 if (++i == count)
845 break;
846 }
847
848 return i;
849}
850
011cf577
BW
851/* Generate a semi-unique error code. The code is not meant to have meaning, The
852 * code's only purpose is to try to prevent false duplicated bug reports by
853 * grossly estimating a GPU error state.
854 *
855 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
856 * the hang if we could strip the GTT offset information from it.
857 *
858 * It's only a small step better than a random number in its current form.
859 */
860static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
cb383002 861 struct drm_i915_error_state *error,
6361f4ba 862 int *engine_id)
011cf577
BW
863{
864 uint32_t error_code = 0;
865 int i;
866
867 /* IPEHR would be an ideal way to detect errors, as it's the gross
868 * measure of "the command that hung." However, has some very common
869 * synchronization commands which almost always appear in the case
870 * strictly a client bug. Use instdone to differentiate those some.
871 */
666796da 872 for (i = 0; i < I915_NUM_ENGINES; i++) {
6361f4ba
CW
873 if (error->engine[i].hangcheck_action == HANGCHECK_HUNG) {
874 if (engine_id)
875 *engine_id = i;
cb383002 876
d636951e
BW
877 return error->engine[i].ipehr ^
878 error->engine[i].instdone.instdone;
cb383002
MK
879 }
880 }
011cf577
BW
881
882 return error_code;
883}
884
c033666a 885static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
84734a04
MK
886 struct drm_i915_error_state *error)
887{
84734a04
MK
888 int i;
889
c033666a 890 if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
ce38ab05 891 for (i = 0; i < dev_priv->num_fence_regs; i++)
eecf613a 892 error->fence[i] = I915_READ(FENCE_REG(i));
c033666a 893 } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
eecf613a
VS
894 for (i = 0; i < dev_priv->num_fence_regs; i++)
895 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
c033666a 896 } else if (INTEL_GEN(dev_priv) >= 6) {
eecf613a
VS
897 for (i = 0; i < dev_priv->num_fence_regs; i++)
898 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
899 }
84734a04
MK
900}
901
87f85ebc 902
6361f4ba 903static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
0bc40be8 904 struct intel_engine_cs *engine,
6361f4ba 905 struct drm_i915_error_engine *ee)
0ca36d78 906{
6361f4ba 907 struct drm_i915_private *dev_priv = engine->i915;
b4558b46 908 struct intel_engine_cs *to;
c3232b18 909 enum intel_engine_id id;
0ca36d78 910
51d545d0 911 if (!error->semaphore)
6361f4ba 912 return;
0ca36d78 913
c3232b18 914 for_each_engine_id(to, dev_priv, id) {
b4558b46
RV
915 int idx;
916 u16 signal_offset;
917 u32 *tmp;
0ca36d78 918
0bc40be8 919 if (engine == to)
b4558b46
RV
920 continue;
921
6361f4ba
CW
922 signal_offset =
923 (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
51d545d0 924 tmp = error->semaphore->pages[0];
7e37f889 925 idx = intel_engine_sync_index(engine, to);
b4558b46 926
6361f4ba
CW
927 ee->semaphore_mboxes[idx] = tmp[signal_offset];
928 ee->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
0ca36d78
BW
929 }
930}
931
6361f4ba
CW
932static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
933 struct drm_i915_error_engine *ee)
87f85ebc 934{
6361f4ba
CW
935 struct drm_i915_private *dev_priv = engine->i915;
936
937 ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
938 ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
939 ee->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
940 ee->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
87f85ebc 941
2d1fe073 942 if (HAS_VEBOX(dev_priv)) {
6361f4ba 943 ee->semaphore_mboxes[2] =
0bc40be8 944 I915_READ(RING_SYNC_2(engine->mmio_base));
6361f4ba 945 ee->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
87f85ebc
BW
946 }
947}
948
6361f4ba
CW
949static void error_record_engine_waiters(struct intel_engine_cs *engine,
950 struct drm_i915_error_engine *ee)
688e6c72
CW
951{
952 struct intel_breadcrumbs *b = &engine->breadcrumbs;
953 struct drm_i915_error_waiter *waiter;
954 struct rb_node *rb;
955 int count;
956
6361f4ba
CW
957 ee->num_waiters = 0;
958 ee->waiters = NULL;
688e6c72 959
19eb9189
CW
960 if (RB_EMPTY_ROOT(&b->waiters))
961 return;
962
963 if (!spin_trylock(&b->lock)) {
964 ee->waiters = ERR_PTR(-EDEADLK);
965 return;
966 }
967
688e6c72
CW
968 count = 0;
969 for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
970 count++;
971 spin_unlock(&b->lock);
972
973 waiter = NULL;
974 if (count)
975 waiter = kmalloc_array(count,
976 sizeof(struct drm_i915_error_waiter),
977 GFP_ATOMIC);
978 if (!waiter)
979 return;
980
19eb9189
CW
981 if (!spin_trylock(&b->lock)) {
982 kfree(waiter);
983 ee->waiters = ERR_PTR(-EDEADLK);
984 return;
985 }
688e6c72 986
19eb9189 987 ee->waiters = waiter;
688e6c72
CW
988 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
989 struct intel_wait *w = container_of(rb, typeof(*w), node);
990
991 strcpy(waiter->comm, w->tsk->comm);
992 waiter->pid = w->tsk->pid;
993 waiter->seqno = w->seqno;
994 waiter++;
995
6361f4ba 996 if (++ee->num_waiters == count)
688e6c72
CW
997 break;
998 }
999 spin_unlock(&b->lock);
1000}
1001
6361f4ba
CW
1002static void error_record_engine_registers(struct drm_i915_error_state *error,
1003 struct intel_engine_cs *engine,
1004 struct drm_i915_error_engine *ee)
84734a04 1005{
6361f4ba
CW
1006 struct drm_i915_private *dev_priv = engine->i915;
1007
c033666a 1008 if (INTEL_GEN(dev_priv) >= 6) {
6361f4ba
CW
1009 ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
1010 ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
c033666a 1011 if (INTEL_GEN(dev_priv) >= 8)
6361f4ba 1012 gen8_record_semaphore_state(error, engine, ee);
0ca36d78 1013 else
6361f4ba 1014 gen6_record_semaphore_state(engine, ee);
4e5aabfd
BW
1015 }
1016
c033666a 1017 if (INTEL_GEN(dev_priv) >= 4) {
6361f4ba
CW
1018 ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
1019 ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
1020 ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
6361f4ba
CW
1021 ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
1022 ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
c033666a 1023 if (INTEL_GEN(dev_priv) >= 8) {
6361f4ba
CW
1024 ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
1025 ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
13ffadd1 1026 }
6361f4ba 1027 ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
84734a04 1028 } else {
6361f4ba
CW
1029 ee->faddr = I915_READ(DMA_FADD_I8XX);
1030 ee->ipeir = I915_READ(IPEIR);
1031 ee->ipehr = I915_READ(IPEHR);
84734a04
MK
1032 }
1033
d636951e
BW
1034 i915_get_engine_instdone(dev_priv, engine->id, &ee->instdone);
1035
6361f4ba
CW
1036 ee->waiting = intel_engine_has_waiter(engine);
1037 ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
7e37f889 1038 ee->acthd = intel_engine_get_active_head(engine);
6361f4ba
CW
1039 ee->seqno = intel_engine_get_seqno(engine);
1040 ee->last_seqno = engine->last_submitted_seqno;
1041 ee->start = I915_READ_START(engine);
1042 ee->head = I915_READ_HEAD(engine);
1043 ee->tail = I915_READ_TAIL(engine);
1044 ee->ctl = I915_READ_CTL(engine);
21a2c58a
CW
1045 if (INTEL_GEN(dev_priv) > 2)
1046 ee->mode = I915_READ_MODE(engine);
84734a04 1047
3177659a 1048 if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
f0f59a00 1049 i915_reg_t mmio;
f3ce3821 1050
c033666a 1051 if (IS_GEN7(dev_priv)) {
0bc40be8 1052 switch (engine->id) {
f3ce3821
CW
1053 default:
1054 case RCS:
1055 mmio = RENDER_HWS_PGA_GEN7;
1056 break;
1057 case BCS:
1058 mmio = BLT_HWS_PGA_GEN7;
1059 break;
1060 case VCS:
1061 mmio = BSD_HWS_PGA_GEN7;
1062 break;
1063 case VECS:
1064 mmio = VEBOX_HWS_PGA_GEN7;
1065 break;
1066 }
c033666a 1067 } else if (IS_GEN6(engine->i915)) {
0bc40be8 1068 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
f3ce3821
CW
1069 } else {
1070 /* XXX: gen8 returns to sanity */
0bc40be8 1071 mmio = RING_HWS_PGA(engine->mmio_base);
f3ce3821
CW
1072 }
1073
6361f4ba 1074 ee->hws = I915_READ(mmio);
f3ce3821
CW
1075 }
1076
6361f4ba
CW
1077 ee->hangcheck_score = engine->hangcheck.score;
1078 ee->hangcheck_action = engine->hangcheck.action;
6c7a01ec 1079
c033666a 1080 if (USES_PPGTT(dev_priv)) {
6c7a01ec
BW
1081 int i;
1082
6361f4ba 1083 ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
6c7a01ec 1084
c033666a 1085 if (IS_GEN6(dev_priv))
6361f4ba 1086 ee->vm_info.pp_dir_base =
0bc40be8 1087 I915_READ(RING_PP_DIR_BASE_READ(engine));
c033666a 1088 else if (IS_GEN7(dev_priv))
6361f4ba 1089 ee->vm_info.pp_dir_base =
0bc40be8 1090 I915_READ(RING_PP_DIR_BASE(engine));
c033666a 1091 else if (INTEL_GEN(dev_priv) >= 8)
6c7a01ec 1092 for (i = 0; i < 4; i++) {
6361f4ba 1093 ee->vm_info.pdp[i] =
0bc40be8 1094 I915_READ(GEN8_RING_PDP_UDW(engine, i));
6361f4ba
CW
1095 ee->vm_info.pdp[i] <<= 32;
1096 ee->vm_info.pdp[i] |=
0bc40be8 1097 I915_READ(GEN8_RING_PDP_LDW(engine, i));
6c7a01ec 1098 }
6c7a01ec 1099 }
84734a04
MK
1100}
1101
57bc699d
CW
1102static void engine_record_requests(struct intel_engine_cs *engine,
1103 struct drm_i915_gem_request *first,
1104 struct drm_i915_error_engine *ee)
1105{
1106 struct drm_i915_gem_request *request;
1107 int count;
1108
1109 count = 0;
1110 request = first;
1111 list_for_each_entry_from(request, &engine->request_list, link)
1112 count++;
1113 if (!count)
1114 return;
1115
1116 ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC);
1117 if (!ee->requests)
1118 return;
1119
1120 ee->num_requests = count;
1121
1122 count = 0;
1123 request = first;
1124 list_for_each_entry_from(request, &engine->request_list, link) {
1125 struct drm_i915_error_request *erq;
1126
1127 if (count >= ee->num_requests) {
1128 /*
1129 * If the ring request list was changed in
1130 * between the point where the error request
1131 * list was created and dimensioned and this
1132 * point then just exit early to avoid crashes.
1133 *
1134 * We don't need to communicate that the
1135 * request list changed state during error
1136 * state capture and that the error state is
1137 * slightly incorrect as a consequence since we
1138 * are typically only interested in the request
1139 * list state at the point of error state
1140 * capture, not in any changes happening during
1141 * the capture.
1142 */
1143 break;
1144 }
1145
1146 erq = &ee->requests[count++];
1147 erq->seqno = request->fence.seqno;
1148 erq->jiffies = request->emitted_jiffies;
1149 erq->head = request->head;
1150 erq->tail = request->tail;
1151
1152 rcu_read_lock();
1153 erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
1154 rcu_read_unlock();
1155 }
1156 ee->num_requests = count;
1157}
1158
c033666a 1159static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
84734a04
MK
1160 struct drm_i915_error_state *error)
1161{
72e96d64 1162 struct i915_ggtt *ggtt = &dev_priv->ggtt;
57bc699d 1163 int i;
84734a04 1164
51d545d0 1165 error->semaphore =
058d88c4 1166 i915_error_object_create(dev_priv, dev_priv->semaphore);
6361f4ba 1167
666796da 1168 for (i = 0; i < I915_NUM_ENGINES; i++) {
4a570db5 1169 struct intel_engine_cs *engine = &dev_priv->engine[i];
6361f4ba 1170 struct drm_i915_error_engine *ee = &error->engine[i];
57bc699d 1171 struct drm_i915_gem_request *request;
372fbb8e 1172
6361f4ba
CW
1173 ee->pid = -1;
1174 ee->engine_id = -1;
eee73b46 1175
c033666a 1176 if (!intel_engine_initialized(engine))
372fbb8e
CW
1177 continue;
1178
6361f4ba 1179 ee->engine_id = i;
372fbb8e 1180
6361f4ba
CW
1181 error_record_engine_registers(error, engine, ee);
1182 error_record_engine_waiters(engine, ee);
84734a04 1183
e2f80391 1184 request = i915_gem_find_active_request(engine);
ab0e7ff9 1185 if (request) {
7e37f889 1186 struct intel_ring *ring;
c84455b4 1187 struct pid *pid;
ae6c4806 1188
c0ce4663 1189 ee->vm = request->ctx->ppgtt ?
bc3d6744 1190 &request->ctx->ppgtt->base : &ggtt->base;
ae6c4806 1191
ab0e7ff9
CW
1192 /* We need to copy these to an anonymous buffer
1193 * as the simplest method to avoid being overwritten
1194 * by userspace.
1195 */
6361f4ba 1196 ee->batchbuffer =
ab0e7ff9 1197 i915_error_object_create(dev_priv,
058d88c4 1198 request->batch);
ab0e7ff9 1199
2d1fe073 1200 if (HAS_BROKEN_CS_TLB(dev_priv))
6361f4ba 1201 ee->wa_batchbuffer =
058d88c4
CW
1202 i915_error_object_create(dev_priv,
1203 engine->scratch);
ab0e7ff9 1204
058d88c4
CW
1205 ee->ctx =
1206 i915_error_object_create(dev_priv,
1207 request->ctx->engine[i].state);
546b1b6a 1208
c84455b4
CW
1209 pid = request->ctx->pid;
1210 if (pid) {
ab0e7ff9
CW
1211 struct task_struct *task;
1212
1213 rcu_read_lock();
c84455b4 1214 task = pid_task(pid, PIDTYPE_PID);
ab0e7ff9 1215 if (task) {
6361f4ba
CW
1216 strcpy(ee->comm, task->comm);
1217 ee->pid = task->pid;
ab0e7ff9
CW
1218 }
1219 rcu_read_unlock();
1220 }
84734a04 1221
bc3d6744
CW
1222 error->simulated |=
1223 request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
1224
1dae2dfb
CW
1225 ring = request->ring;
1226 ee->cpu_ring_head = ring->head;
1227 ee->cpu_ring_tail = ring->tail;
6361f4ba 1228 ee->ringbuffer =
058d88c4 1229 i915_error_object_create(dev_priv, ring->vma);
57bc699d
CW
1230
1231 engine_record_requests(engine, request, ee);
ba6e0418 1232 }
84734a04 1233
6361f4ba 1234 ee->hws_page =
058d88c4
CW
1235 i915_error_object_create(dev_priv,
1236 engine->status_page.vma);
84734a04 1237
058d88c4
CW
1238 ee->wa_ctx =
1239 i915_error_object_create(dev_priv, engine->wa_ctx.vma);
84734a04
MK
1240 }
1241}
1242
95f5301d
BW
1243static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
1244 struct drm_i915_error_state *error,
1245 struct i915_address_space *vm,
c0ce4663 1246 int idx)
84734a04 1247{
c0ce4663 1248 struct drm_i915_error_buffer *active_bo;
95f5301d 1249 struct i915_vma *vma;
c0ce4663 1250 int count;
84734a04 1251
c0ce4663 1252 count = 0;
1c7f4bca 1253 list_for_each_entry(vma, &vm->active_list, vm_link)
c0ce4663 1254 count++;
84734a04 1255
c0ce4663
CW
1256 active_bo = NULL;
1257 if (count)
1258 active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
95f5301d 1259 if (active_bo)
c0ce4663
CW
1260 count = capture_error_bo(active_bo, count, &vm->active_list, false);
1261 else
1262 count = 0;
1263
1264 error->active_vm[idx] = vm;
1265 error->active_bo[idx] = active_bo;
1266 error->active_bo_count[idx] = count;
95f5301d
BW
1267}
1268
c0ce4663
CW
1269static void i915_capture_active_buffers(struct drm_i915_private *dev_priv,
1270 struct drm_i915_error_state *error)
95f5301d 1271{
c0ce4663
CW
1272 int cnt = 0, i, j;
1273
1274 BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo));
1275 BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm));
1276 BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count));
1277
1278 /* Scan each engine looking for unique active contexts/vm */
1279 for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
1280 struct drm_i915_error_engine *ee = &error->engine[i];
1281 bool found;
1282
1283 if (!ee->vm)
1284 continue;
3a448734 1285
c0ce4663
CW
1286 found = false;
1287 for (j = 0; j < i && !found; j++)
1288 found = error->engine[j].vm == ee->vm;
1289 if (!found)
1290 i915_gem_capture_vm(dev_priv, error, ee->vm, cnt++);
3a448734 1291 }
84734a04
MK
1292}
1293
c0ce4663
CW
1294static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
1295 struct drm_i915_error_state *error)
1296{
1297 struct i915_address_space *vm = &dev_priv->ggtt.base;
1298 struct drm_i915_error_buffer *bo;
1299 struct i915_vma *vma;
1300 int count_inactive, count_active;
1301
1302 count_inactive = 0;
1303 list_for_each_entry(vma, &vm->active_list, vm_link)
1304 count_inactive++;
1305
1306 count_active = 0;
1307 list_for_each_entry(vma, &vm->inactive_list, vm_link)
1308 count_active++;
1309
1310 bo = NULL;
1311 if (count_inactive + count_active)
1312 bo = kcalloc(count_inactive + count_active,
1313 sizeof(*bo), GFP_ATOMIC);
1314 if (!bo)
1315 return;
1316
1317 count_inactive = capture_error_bo(bo, count_inactive,
1318 &vm->active_list, true);
1319 count_active = capture_error_bo(bo + count_inactive, count_active,
1320 &vm->inactive_list, true);
1321 error->pinned_bo_count = count_inactive + count_active;
1322 error->pinned_bo = bo;
1323}
1324
1d762aad
BW
1325/* Capture all registers which don't fit into another category. */
1326static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1327 struct drm_i915_error_state *error)
84734a04 1328{
91c8a326 1329 struct drm_device *dev = &dev_priv->drm;
885ea5a8 1330 int i;
84734a04 1331
654c90c6
BW
1332 /* General organization
1333 * 1. Registers specific to a single generation
1334 * 2. Registers which belong to multiple generations
1335 * 3. Feature specific registers.
1336 * 4. Everything else
1337 * Please try to follow the order.
1338 */
84734a04 1339
654c90c6
BW
1340 /* 1: Registers specific to a single generation */
1341 if (IS_VALLEYVIEW(dev)) {
885ea5a8 1342 error->gtier[0] = I915_READ(GTIER);
843db716 1343 error->ier = I915_READ(VLV_IER);
40181697 1344 error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
654c90c6 1345 }
84734a04 1346
654c90c6
BW
1347 if (IS_GEN7(dev))
1348 error->err_int = I915_READ(GEN7_ERR_INT);
84734a04 1349
6c826f34
MK
1350 if (INTEL_INFO(dev)->gen >= 8) {
1351 error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
1352 error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
1353 }
1354
91ec5d11 1355 if (IS_GEN6(dev)) {
40181697 1356 error->forcewake = I915_READ_FW(FORCEWAKE);
91ec5d11
BW
1357 error->gab_ctl = I915_READ(GAB_CTL);
1358 error->gfx_mode = I915_READ(GFX_MODE);
1359 }
84734a04 1360
654c90c6
BW
1361 /* 2: Registers which belong to multiple generations */
1362 if (INTEL_INFO(dev)->gen >= 7)
40181697 1363 error->forcewake = I915_READ_FW(FORCEWAKE_MT);
84734a04
MK
1364
1365 if (INTEL_INFO(dev)->gen >= 6) {
654c90c6 1366 error->derrmr = I915_READ(DERRMR);
84734a04
MK
1367 error->error = I915_READ(ERROR_GEN6);
1368 error->done_reg = I915_READ(DONE_REG);
1369 }
1370
654c90c6 1371 /* 3: Feature specific registers */
91ec5d11
BW
1372 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1373 error->gam_ecochk = I915_READ(GAM_ECOCHK);
1374 error->gac_eco = I915_READ(GAC_ECO_BITS);
1375 }
1376
1377 /* 4: Everything else */
654c90c6
BW
1378 if (HAS_HW_CONTEXTS(dev))
1379 error->ccid = I915_READ(CCID);
1380
885ea5a8
RV
1381 if (INTEL_INFO(dev)->gen >= 8) {
1382 error->ier = I915_READ(GEN8_DE_MISC_IER);
1383 for (i = 0; i < 4; i++)
1384 error->gtier[i] = I915_READ(GEN8_GT_IER(i));
1385 } else if (HAS_PCH_SPLIT(dev)) {
843db716 1386 error->ier = I915_READ(DEIER);
885ea5a8 1387 error->gtier[0] = I915_READ(GTIER);
843db716
RV
1388 } else if (IS_GEN2(dev)) {
1389 error->ier = I915_READ16(IER);
1390 } else if (!IS_VALLEYVIEW(dev)) {
1391 error->ier = I915_READ(IER);
654c90c6 1392 }
654c90c6
BW
1393 error->eir = I915_READ(EIR);
1394 error->pgtbl_er = I915_READ(PGTBL_ER);
1d762aad
BW
1395}
1396
c033666a 1397static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
58174462 1398 struct drm_i915_error_state *error,
14b730fc 1399 u32 engine_mask,
58174462 1400 const char *error_msg)
cb383002 1401{
cb383002 1402 u32 ecode;
6361f4ba 1403 int engine_id = -1, len;
cb383002 1404
6361f4ba 1405 ecode = i915_error_generate_code(dev_priv, error, &engine_id);
cb383002 1406
58174462 1407 len = scnprintf(error->error_msg, sizeof(error->error_msg),
0b5492d6 1408 "GPU HANG: ecode %d:%d:0x%08x",
6361f4ba 1409 INTEL_GEN(dev_priv), engine_id, ecode);
58174462 1410
6361f4ba 1411 if (engine_id != -1 && error->engine[engine_id].pid != -1)
58174462
MK
1412 len += scnprintf(error->error_msg + len,
1413 sizeof(error->error_msg) - len,
1414 ", in %s [%d]",
6361f4ba
CW
1415 error->engine[engine_id].comm,
1416 error->engine[engine_id].pid);
58174462
MK
1417
1418 scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
1419 ", reason: %s, action: %s",
1420 error_msg,
14b730fc 1421 engine_mask ? "reset" : "continue");
cb383002
MK
1422}
1423
48b031e3
MK
1424static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
1425 struct drm_i915_error_state *error)
1426{
eb5be9d0
CW
1427 error->iommu = -1;
1428#ifdef CONFIG_INTEL_IOMMU
1429 error->iommu = intel_iommu_gfx_mapped;
1430#endif
48b031e3 1431 error->reset_count = i915_reset_count(&dev_priv->gpu_error);
62d5d69b 1432 error->suspend_count = dev_priv->suspend_count;
2bd160a1
CW
1433
1434 memcpy(&error->device_info,
1435 INTEL_INFO(dev_priv),
1436 sizeof(error->device_info));
48b031e3
MK
1437}
1438
1d762aad
BW
1439/**
1440 * i915_capture_error_state - capture an error record for later analysis
1441 * @dev: drm device
1442 *
1443 * Should be called when an error is detected (either a hang or an error
1444 * interrupt) to capture error state from the time of the error. Fills
1445 * out a structure which becomes available in debugfs for user level tools
1446 * to pick up.
1447 */
c033666a
CW
1448void i915_capture_error_state(struct drm_i915_private *dev_priv,
1449 u32 engine_mask,
58174462 1450 const char *error_msg)
1d762aad 1451{
53a4c6b2 1452 static bool warned;
1d762aad
BW
1453 struct drm_i915_error_state *error;
1454 unsigned long flags;
1d762aad 1455
9777cca0
CW
1456 if (READ_ONCE(dev_priv->gpu_error.first_error))
1457 return;
1458
1d762aad
BW
1459 /* Account for pipe specific data like PIPE*STAT */
1460 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1461 if (!error) {
1462 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1463 return;
1464 }
1465
011cf577
BW
1466 kref_init(&error->ref);
1467
48b031e3 1468 i915_capture_gen_state(dev_priv, error);
011cf577 1469 i915_capture_reg_state(dev_priv, error);
c033666a
CW
1470 i915_gem_record_fences(dev_priv, error);
1471 i915_gem_record_rings(dev_priv, error);
c0ce4663
CW
1472 i915_capture_active_buffers(dev_priv, error);
1473 i915_capture_pinned_buffers(dev_priv, error);
1d762aad 1474
84734a04
MK
1475 do_gettimeofday(&error->time);
1476
c033666a
CW
1477 error->overlay = intel_overlay_capture_error_state(dev_priv);
1478 error->display = intel_display_capture_error_state(dev_priv);
84734a04 1479
c033666a 1480 i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
cb383002
MK
1481 DRM_INFO("%s\n", error->error_msg);
1482
bc3d6744
CW
1483 if (!error->simulated) {
1484 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1485 if (!dev_priv->gpu_error.first_error) {
1486 dev_priv->gpu_error.first_error = error;
1487 error = NULL;
1488 }
1489 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
84734a04 1490 }
84734a04 1491
cb383002 1492 if (error) {
84734a04 1493 i915_error_state_free(&error->ref);
cb383002
MK
1494 return;
1495 }
1496
1497 if (!warned) {
1498 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1499 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1500 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1501 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
91c8a326
CW
1502 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1503 dev_priv->drm.primary->index);
cb383002
MK
1504 warned = true;
1505 }
84734a04
MK
1506}
1507
1508void i915_error_state_get(struct drm_device *dev,
1509 struct i915_error_state_file_priv *error_priv)
1510{
fac5e23e 1511 struct drm_i915_private *dev_priv = to_i915(dev);
84734a04 1512
5b254c59 1513 spin_lock_irq(&dev_priv->gpu_error.lock);
84734a04
MK
1514 error_priv->error = dev_priv->gpu_error.first_error;
1515 if (error_priv->error)
1516 kref_get(&error_priv->error->ref);
5b254c59 1517 spin_unlock_irq(&dev_priv->gpu_error.lock);
84734a04
MK
1518
1519}
1520
1521void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
1522{
1523 if (error_priv->error)
1524 kref_put(&error_priv->error->ref, i915_error_state_free);
1525}
1526
1527void i915_destroy_error_state(struct drm_device *dev)
1528{
fac5e23e 1529 struct drm_i915_private *dev_priv = to_i915(dev);
84734a04 1530 struct drm_i915_error_state *error;
84734a04 1531
5b254c59 1532 spin_lock_irq(&dev_priv->gpu_error.lock);
84734a04
MK
1533 error = dev_priv->gpu_error.first_error;
1534 dev_priv->gpu_error.first_error = NULL;
5b254c59 1535 spin_unlock_irq(&dev_priv->gpu_error.lock);
84734a04
MK
1536
1537 if (error)
1538 kref_put(&error->ref, i915_error_state_free);
1539}
1540
0a4cd7c8 1541const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
84734a04
MK
1542{
1543 switch (type) {
1544 case I915_CACHE_NONE: return " uncached";
0a4cd7c8 1545 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
350ec881 1546 case I915_CACHE_L3_LLC: return " L3+LLC";
f56383cb 1547 case I915_CACHE_WT: return " WT";
84734a04
MK
1548 default: return "";
1549 }
1550}
1551
1552/* NB: please notice the memset */
d636951e
BW
1553void i915_get_engine_instdone(struct drm_i915_private *dev_priv,
1554 enum intel_engine_id engine_id,
1555 struct intel_instdone *instdone)
84734a04 1556{
d636951e
BW
1557 u32 mmio_base = dev_priv->engine[engine_id].mmio_base;
1558
1559 memset(instdone, 0, sizeof(*instdone));
1560
1561 switch (INTEL_GEN(dev_priv)) {
1562 default:
1563 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
1564
1565 if (engine_id != RCS)
1566 break;
1567
1568 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
1569 instdone->sampler = I915_READ(GEN7_SAMPLER_INSTDONE);
1570 instdone->row = I915_READ(GEN7_ROW_INSTDONE);
1571
1572 break;
1573 case 6:
1574 case 5:
1575 case 4:
1576 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
1577
1578 if (engine_id == RCS)
1579 /* HACK: Using the wrong struct member */
1580 instdone->slice_common = I915_READ(GEN4_INSTDONE1);
1581 break;
1582 case 3:
1583 case 2:
1584 instdone->instdone = I915_READ(GEN2_INSTDONE);
1585 break;
84734a04
MK
1586 }
1587}