Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-2.6-block.git] / drivers / gpu / drm / i915 / gt / intel_reset.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2008-2018 Intel Corporation
5  */
6
7 #include <linux/sched/mm.h>
8 #include <linux/stop_machine.h>
9
10 #include "display/intel_overlay.h"
11
12 #include "gem/i915_gem_context.h"
13
14 #include "i915_drv.h"
15 #include "i915_gpu_error.h"
16 #include "i915_irq.h"
17 #include "intel_engine_pm.h"
18 #include "intel_gt_pm.h"
19 #include "intel_reset.h"
20
21 #include "intel_guc.h"
22
23 #define RESET_MAX_RETRIES 3
24
25 /* XXX How to handle concurrent GGTT updates using tiling registers? */
26 #define RESET_UNDER_STOP_MACHINE 0
27
28 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
29 {
30         intel_uncore_rmw(uncore, reg, 0, set);
31 }
32
33 static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
34 {
35         intel_uncore_rmw(uncore, reg, clr, 0);
36 }
37
38 static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
39 {
40         intel_uncore_rmw_fw(uncore, reg, 0, set);
41 }
42
43 static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
44 {
45         intel_uncore_rmw_fw(uncore, reg, clr, 0);
46 }
47
48 static void engine_skip_context(struct i915_request *rq)
49 {
50         struct intel_engine_cs *engine = rq->engine;
51         struct i915_gem_context *hung_ctx = rq->gem_context;
52
53         lockdep_assert_held(&engine->active.lock);
54
55         if (!i915_request_is_active(rq))
56                 return;
57
58         list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
59                 if (rq->gem_context == hung_ctx)
60                         i915_request_skip(rq, -EIO);
61 }
62
63 static void client_mark_guilty(struct drm_i915_file_private *file_priv,
64                                const struct i915_gem_context *ctx)
65 {
66         unsigned int score;
67         unsigned long prev_hang;
68
69         if (i915_gem_context_is_banned(ctx))
70                 score = I915_CLIENT_SCORE_CONTEXT_BAN;
71         else
72                 score = 0;
73
74         prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
75         if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
76                 score += I915_CLIENT_SCORE_HANG_FAST;
77
78         if (score) {
79                 atomic_add(score, &file_priv->ban_score);
80
81                 DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
82                                  ctx->name, score,
83                                  atomic_read(&file_priv->ban_score));
84         }
85 }
86
87 static bool context_mark_guilty(struct i915_gem_context *ctx)
88 {
89         unsigned long prev_hang;
90         bool banned;
91         int i;
92
93         atomic_inc(&ctx->guilty_count);
94
95         /* Cool contexts are too cool to be banned! (Used for reset testing.) */
96         if (!i915_gem_context_is_bannable(ctx))
97                 return false;
98
99         /* Record the timestamp for the last N hangs */
100         prev_hang = ctx->hang_timestamp[0];
101         for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
102                 ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
103         ctx->hang_timestamp[i] = jiffies;
104
105         /* If we have hung N+1 times in rapid succession, we ban the context! */
106         banned = !i915_gem_context_is_recoverable(ctx);
107         if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
108                 banned = true;
109         if (banned) {
110                 DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
111                                  ctx->name, atomic_read(&ctx->guilty_count));
112                 i915_gem_context_set_banned(ctx);
113         }
114
115         if (!IS_ERR_OR_NULL(ctx->file_priv))
116                 client_mark_guilty(ctx->file_priv, ctx);
117
118         return banned;
119 }
120
121 static void context_mark_innocent(struct i915_gem_context *ctx)
122 {
123         atomic_inc(&ctx->active_count);
124 }
125
126 void i915_reset_request(struct i915_request *rq, bool guilty)
127 {
128         GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
129                   rq->engine->name,
130                   rq->fence.context,
131                   rq->fence.seqno,
132                   yesno(guilty));
133
134         lockdep_assert_held(&rq->engine->active.lock);
135         GEM_BUG_ON(i915_request_completed(rq));
136
137         if (guilty) {
138                 i915_request_skip(rq, -EIO);
139                 if (context_mark_guilty(rq->gem_context))
140                         engine_skip_context(rq);
141         } else {
142                 dma_fence_set_error(&rq->fence, -EAGAIN);
143                 context_mark_innocent(rq->gem_context);
144         }
145 }
146
147 static void gen3_stop_engine(struct intel_engine_cs *engine)
148 {
149         struct intel_uncore *uncore = engine->uncore;
150         const u32 base = engine->mmio_base;
151
152         GEM_TRACE("%s\n", engine->name);
153
154         if (intel_engine_stop_cs(engine))
155                 GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
156
157         intel_uncore_write_fw(uncore,
158                               RING_HEAD(base),
159                               intel_uncore_read_fw(uncore, RING_TAIL(base)));
160         intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
161
162         intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
163         intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
164         intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
165
166         /* The ring must be empty before it is disabled */
167         intel_uncore_write_fw(uncore, RING_CTL(base), 0);
168
169         /* Check acts as a post */
170         if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
171                 GEM_TRACE("%s: ring head [%x] not parked\n",
172                           engine->name,
173                           intel_uncore_read_fw(uncore, RING_HEAD(base)));
174 }
175
176 static void i915_stop_engines(struct drm_i915_private *i915,
177                               intel_engine_mask_t engine_mask)
178 {
179         struct intel_engine_cs *engine;
180         intel_engine_mask_t tmp;
181
182         if (INTEL_GEN(i915) < 3)
183                 return;
184
185         for_each_engine_masked(engine, i915, engine_mask, tmp)
186                 gen3_stop_engine(engine);
187 }
188
189 static bool i915_in_reset(struct pci_dev *pdev)
190 {
191         u8 gdrst;
192
193         pci_read_config_byte(pdev, I915_GDRST, &gdrst);
194         return gdrst & GRDOM_RESET_STATUS;
195 }
196
197 static int i915_do_reset(struct drm_i915_private *i915,
198                          intel_engine_mask_t engine_mask,
199                          unsigned int retry)
200 {
201         struct pci_dev *pdev = i915->drm.pdev;
202         int err;
203
204         /* Assert reset for at least 20 usec, and wait for acknowledgement. */
205         pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
206         udelay(50);
207         err = wait_for_atomic(i915_in_reset(pdev), 50);
208
209         /* Clear the reset request. */
210         pci_write_config_byte(pdev, I915_GDRST, 0);
211         udelay(50);
212         if (!err)
213                 err = wait_for_atomic(!i915_in_reset(pdev), 50);
214
215         return err;
216 }
217
218 static bool g4x_reset_complete(struct pci_dev *pdev)
219 {
220         u8 gdrst;
221
222         pci_read_config_byte(pdev, I915_GDRST, &gdrst);
223         return (gdrst & GRDOM_RESET_ENABLE) == 0;
224 }
225
226 static int g33_do_reset(struct drm_i915_private *i915,
227                         intel_engine_mask_t engine_mask,
228                         unsigned int retry)
229 {
230         struct pci_dev *pdev = i915->drm.pdev;
231
232         pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
233         return wait_for_atomic(g4x_reset_complete(pdev), 50);
234 }
235
236 static int g4x_do_reset(struct drm_i915_private *i915,
237                         intel_engine_mask_t engine_mask,
238                         unsigned int retry)
239 {
240         struct pci_dev *pdev = i915->drm.pdev;
241         struct intel_uncore *uncore = &i915->uncore;
242         int ret;
243
244         /* WaVcpClkGateDisableForMediaReset:ctg,elk */
245         rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
246         intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
247
248         pci_write_config_byte(pdev, I915_GDRST,
249                               GRDOM_MEDIA | GRDOM_RESET_ENABLE);
250         ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
251         if (ret) {
252                 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
253                 goto out;
254         }
255
256         pci_write_config_byte(pdev, I915_GDRST,
257                               GRDOM_RENDER | GRDOM_RESET_ENABLE);
258         ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
259         if (ret) {
260                 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
261                 goto out;
262         }
263
264 out:
265         pci_write_config_byte(pdev, I915_GDRST, 0);
266
267         rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
268         intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
269
270         return ret;
271 }
272
273 static int ironlake_do_reset(struct drm_i915_private *i915,
274                              intel_engine_mask_t engine_mask,
275                              unsigned int retry)
276 {
277         struct intel_uncore *uncore = &i915->uncore;
278         int ret;
279
280         intel_uncore_write_fw(uncore, ILK_GDSR,
281                               ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
282         ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
283                                            ILK_GRDOM_RESET_ENABLE, 0,
284                                            5000, 0,
285                                            NULL);
286         if (ret) {
287                 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
288                 goto out;
289         }
290
291         intel_uncore_write_fw(uncore, ILK_GDSR,
292                               ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
293         ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
294                                            ILK_GRDOM_RESET_ENABLE, 0,
295                                            5000, 0,
296                                            NULL);
297         if (ret) {
298                 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
299                 goto out;
300         }
301
302 out:
303         intel_uncore_write_fw(uncore, ILK_GDSR, 0);
304         intel_uncore_posting_read_fw(uncore, ILK_GDSR);
305         return ret;
306 }
307
308 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
309 static int gen6_hw_domain_reset(struct drm_i915_private *i915,
310                                 u32 hw_domain_mask)
311 {
312         struct intel_uncore *uncore = &i915->uncore;
313         int err;
314
315         /*
316          * GEN6_GDRST is not in the gt power well, no need to check
317          * for fifo space for the write or forcewake the chip for
318          * the read
319          */
320         intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
321
322         /* Wait for the device to ack the reset requests */
323         err = __intel_wait_for_register_fw(uncore,
324                                            GEN6_GDRST, hw_domain_mask, 0,
325                                            500, 0,
326                                            NULL);
327         if (err)
328                 DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
329                                  hw_domain_mask);
330
331         return err;
332 }
333
334 static int gen6_reset_engines(struct drm_i915_private *i915,
335                               intel_engine_mask_t engine_mask,
336                               unsigned int retry)
337 {
338         struct intel_engine_cs *engine;
339         const u32 hw_engine_mask[] = {
340                 [RCS0]  = GEN6_GRDOM_RENDER,
341                 [BCS0]  = GEN6_GRDOM_BLT,
342                 [VCS0]  = GEN6_GRDOM_MEDIA,
343                 [VCS1]  = GEN8_GRDOM_MEDIA2,
344                 [VECS0] = GEN6_GRDOM_VECS,
345         };
346         u32 hw_mask;
347
348         if (engine_mask == ALL_ENGINES) {
349                 hw_mask = GEN6_GRDOM_FULL;
350         } else {
351                 intel_engine_mask_t tmp;
352
353                 hw_mask = 0;
354                 for_each_engine_masked(engine, i915, engine_mask, tmp) {
355                         GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
356                         hw_mask |= hw_engine_mask[engine->id];
357                 }
358         }
359
360         return gen6_hw_domain_reset(i915, hw_mask);
361 }
362
363 static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
364 {
365         struct intel_uncore *uncore = engine->uncore;
366         u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
367         i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
368         u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
369         i915_reg_t sfc_usage;
370         u32 sfc_usage_bit;
371         u32 sfc_reset_bit;
372
373         switch (engine->class) {
374         case VIDEO_DECODE_CLASS:
375                 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
376                         return 0;
377
378                 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
379                 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
380
381                 sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
382                 sfc_forced_lock_ack_bit  = GEN11_VCS_SFC_LOCK_ACK_BIT;
383
384                 sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
385                 sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
386                 sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
387                 break;
388
389         case VIDEO_ENHANCEMENT_CLASS:
390                 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
391                 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
392
393                 sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
394                 sfc_forced_lock_ack_bit  = GEN11_VECS_SFC_LOCK_ACK_BIT;
395
396                 sfc_usage = GEN11_VECS_SFC_USAGE(engine);
397                 sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
398                 sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
399                 break;
400
401         default:
402                 return 0;
403         }
404
405         /*
406          * Tell the engine that a software reset is going to happen. The engine
407          * will then try to force lock the SFC (if currently locked, it will
408          * remain so until we tell the engine it is safe to unlock; if currently
409          * unlocked, it will ignore this and all new lock requests). If SFC
410          * ends up being locked to the engine we want to reset, we have to reset
411          * it as well (we will unlock it once the reset sequence is completed).
412          */
413         rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
414
415         if (__intel_wait_for_register_fw(uncore,
416                                          sfc_forced_lock_ack,
417                                          sfc_forced_lock_ack_bit,
418                                          sfc_forced_lock_ack_bit,
419                                          1000, 0, NULL)) {
420                 DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
421                 return 0;
422         }
423
424         if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
425                 return sfc_reset_bit;
426
427         return 0;
428 }
429
430 static void gen11_unlock_sfc(struct intel_engine_cs *engine)
431 {
432         struct intel_uncore *uncore = engine->uncore;
433         u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
434         i915_reg_t sfc_forced_lock;
435         u32 sfc_forced_lock_bit;
436
437         switch (engine->class) {
438         case VIDEO_DECODE_CLASS:
439                 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
440                         return;
441
442                 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
443                 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
444                 break;
445
446         case VIDEO_ENHANCEMENT_CLASS:
447                 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
448                 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
449                 break;
450
451         default:
452                 return;
453         }
454
455         rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
456 }
457
458 static int gen11_reset_engines(struct drm_i915_private *i915,
459                                intel_engine_mask_t engine_mask,
460                                unsigned int retry)
461 {
462         const u32 hw_engine_mask[] = {
463                 [RCS0]  = GEN11_GRDOM_RENDER,
464                 [BCS0]  = GEN11_GRDOM_BLT,
465                 [VCS0]  = GEN11_GRDOM_MEDIA,
466                 [VCS1]  = GEN11_GRDOM_MEDIA2,
467                 [VCS2]  = GEN11_GRDOM_MEDIA3,
468                 [VCS3]  = GEN11_GRDOM_MEDIA4,
469                 [VECS0] = GEN11_GRDOM_VECS,
470                 [VECS1] = GEN11_GRDOM_VECS2,
471         };
472         struct intel_engine_cs *engine;
473         intel_engine_mask_t tmp;
474         u32 hw_mask;
475         int ret;
476
477         if (engine_mask == ALL_ENGINES) {
478                 hw_mask = GEN11_GRDOM_FULL;
479         } else {
480                 hw_mask = 0;
481                 for_each_engine_masked(engine, i915, engine_mask, tmp) {
482                         GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
483                         hw_mask |= hw_engine_mask[engine->id];
484                         hw_mask |= gen11_lock_sfc(engine);
485                 }
486         }
487
488         ret = gen6_hw_domain_reset(i915, hw_mask);
489
490         if (engine_mask != ALL_ENGINES)
491                 for_each_engine_masked(engine, i915, engine_mask, tmp)
492                         gen11_unlock_sfc(engine);
493
494         return ret;
495 }
496
497 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
498 {
499         struct intel_uncore *uncore = engine->uncore;
500         const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
501         u32 request, mask, ack;
502         int ret;
503
504         ack = intel_uncore_read_fw(uncore, reg);
505         if (ack & RESET_CTL_CAT_ERROR) {
506                 /*
507                  * For catastrophic errors, ready-for-reset sequence
508                  * needs to be bypassed: HAS#396813
509                  */
510                 request = RESET_CTL_CAT_ERROR;
511                 mask = RESET_CTL_CAT_ERROR;
512
513                 /* Catastrophic errors need to be cleared by HW */
514                 ack = 0;
515         } else if (!(ack & RESET_CTL_READY_TO_RESET)) {
516                 request = RESET_CTL_REQUEST_RESET;
517                 mask = RESET_CTL_READY_TO_RESET;
518                 ack = RESET_CTL_READY_TO_RESET;
519         } else {
520                 return 0;
521         }
522
523         intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
524         ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
525                                            700, 0, NULL);
526         if (ret)
527                 DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
528                           engine->name, request,
529                           intel_uncore_read_fw(uncore, reg));
530
531         return ret;
532 }
533
534 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
535 {
536         intel_uncore_write_fw(engine->uncore,
537                               RING_RESET_CTL(engine->mmio_base),
538                               _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
539 }
540
541 static int gen8_reset_engines(struct drm_i915_private *i915,
542                               intel_engine_mask_t engine_mask,
543                               unsigned int retry)
544 {
545         struct intel_engine_cs *engine;
546         const bool reset_non_ready = retry >= 1;
547         intel_engine_mask_t tmp;
548         int ret;
549
550         for_each_engine_masked(engine, i915, engine_mask, tmp) {
551                 ret = gen8_engine_reset_prepare(engine);
552                 if (ret && !reset_non_ready)
553                         goto skip_reset;
554
555                 /*
556                  * If this is not the first failed attempt to prepare,
557                  * we decide to proceed anyway.
558                  *
559                  * By doing so we risk context corruption and with
560                  * some gens (kbl), possible system hang if reset
561                  * happens during active bb execution.
562                  *
563                  * We rather take context corruption instead of
564                  * failed reset with a wedged driver/gpu. And
565                  * active bb execution case should be covered by
566                  * i915_stop_engines we have before the reset.
567                  */
568         }
569
570         if (INTEL_GEN(i915) >= 11)
571                 ret = gen11_reset_engines(i915, engine_mask, retry);
572         else
573                 ret = gen6_reset_engines(i915, engine_mask, retry);
574
575 skip_reset:
576         for_each_engine_masked(engine, i915, engine_mask, tmp)
577                 gen8_engine_reset_cancel(engine);
578
579         return ret;
580 }
581
582 typedef int (*reset_func)(struct drm_i915_private *,
583                           intel_engine_mask_t engine_mask,
584                           unsigned int retry);
585
586 static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
587 {
588         if (INTEL_GEN(i915) >= 8)
589                 return gen8_reset_engines;
590         else if (INTEL_GEN(i915) >= 6)
591                 return gen6_reset_engines;
592         else if (INTEL_GEN(i915) >= 5)
593                 return ironlake_do_reset;
594         else if (IS_G4X(i915))
595                 return g4x_do_reset;
596         else if (IS_G33(i915) || IS_PINEVIEW(i915))
597                 return g33_do_reset;
598         else if (INTEL_GEN(i915) >= 3)
599                 return i915_do_reset;
600         else
601                 return NULL;
602 }
603
604 int intel_gpu_reset(struct drm_i915_private *i915,
605                     intel_engine_mask_t engine_mask)
606 {
607         const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
608         reset_func reset;
609         int ret = -ETIMEDOUT;
610         int retry;
611
612         reset = intel_get_gpu_reset(i915);
613         if (!reset)
614                 return -ENODEV;
615
616         /*
617          * If the power well sleeps during the reset, the reset
618          * request may be dropped and never completes (causing -EIO).
619          */
620         intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
621         for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
622                 /*
623                  * We stop engines, otherwise we might get failed reset and a
624                  * dead gpu (on elk). Also as modern gpu as kbl can suffer
625                  * from system hang if batchbuffer is progressing when
626                  * the reset is issued, regardless of READY_TO_RESET ack.
627                  * Thus assume it is best to stop engines on all gens
628                  * where we have a gpu reset.
629                  *
630                  * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
631                  *
632                  * WaMediaResetMainRingCleanup:ctg,elk (presumably)
633                  *
634                  * FIXME: Wa for more modern gens needs to be validated
635                  */
636                 if (retry)
637                         i915_stop_engines(i915, engine_mask);
638
639                 GEM_TRACE("engine_mask=%x\n", engine_mask);
640                 preempt_disable();
641                 ret = reset(i915, engine_mask, retry);
642                 preempt_enable();
643         }
644         intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
645
646         return ret;
647 }
648
649 bool intel_has_gpu_reset(struct drm_i915_private *i915)
650 {
651         if (!i915_modparams.reset)
652                 return NULL;
653
654         return intel_get_gpu_reset(i915);
655 }
656
657 bool intel_has_reset_engine(struct drm_i915_private *i915)
658 {
659         return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2;
660 }
661
662 int intel_reset_guc(struct drm_i915_private *i915)
663 {
664         u32 guc_domain =
665                 INTEL_GEN(i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
666         int ret;
667
668         GEM_BUG_ON(!HAS_GUC(i915));
669
670         intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
671         ret = gen6_hw_domain_reset(i915, guc_domain);
672         intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
673
674         return ret;
675 }
676
677 /*
678  * Ensure irq handler finishes, and not run again.
679  * Also return the active request so that we only search for it once.
680  */
681 static void reset_prepare_engine(struct intel_engine_cs *engine)
682 {
683         /*
684          * During the reset sequence, we must prevent the engine from
685          * entering RC6. As the context state is undefined until we restart
686          * the engine, if it does enter RC6 during the reset, the state
687          * written to the powercontext is undefined and so we may lose
688          * GPU state upon resume, i.e. fail to restart after a reset.
689          */
690         intel_engine_pm_get(engine);
691         intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
692         engine->reset.prepare(engine);
693 }
694
695 static void revoke_mmaps(struct drm_i915_private *i915)
696 {
697         int i;
698
699         for (i = 0; i < i915->ggtt.num_fences; i++) {
700                 struct drm_vma_offset_node *node;
701                 struct i915_vma *vma;
702                 u64 vma_offset;
703
704                 vma = READ_ONCE(i915->ggtt.fence_regs[i].vma);
705                 if (!vma)
706                         continue;
707
708                 if (!i915_vma_has_userfault(vma))
709                         continue;
710
711                 GEM_BUG_ON(vma->fence != &i915->ggtt.fence_regs[i]);
712                 node = &vma->obj->base.vma_node;
713                 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
714                 unmap_mapping_range(i915->drm.anon_inode->i_mapping,
715                                     drm_vma_node_offset_addr(node) + vma_offset,
716                                     vma->size,
717                                     1);
718         }
719 }
720
721 static void reset_prepare(struct drm_i915_private *i915)
722 {
723         struct intel_engine_cs *engine;
724         enum intel_engine_id id;
725
726         intel_gt_pm_get(i915);
727         for_each_engine(engine, i915, id)
728                 reset_prepare_engine(engine);
729
730         intel_uc_reset_prepare(i915);
731 }
732
733 static void gt_revoke(struct drm_i915_private *i915)
734 {
735         revoke_mmaps(i915);
736 }
737
738 static int gt_reset(struct drm_i915_private *i915,
739                     intel_engine_mask_t stalled_mask)
740 {
741         struct intel_engine_cs *engine;
742         enum intel_engine_id id;
743         int err;
744
745         /*
746          * Everything depends on having the GTT running, so we need to start
747          * there.
748          */
749         err = i915_ggtt_enable_hw(i915);
750         if (err)
751                 return err;
752
753         for_each_engine(engine, i915, id)
754                 intel_engine_reset(engine, stalled_mask & engine->mask);
755
756         i915_gem_restore_fences(i915);
757
758         return err;
759 }
760
761 static void reset_finish_engine(struct intel_engine_cs *engine)
762 {
763         engine->reset.finish(engine);
764         intel_engine_pm_put(engine);
765         intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
766 }
767
768 static void reset_finish(struct drm_i915_private *i915)
769 {
770         struct intel_engine_cs *engine;
771         enum intel_engine_id id;
772
773         for_each_engine(engine, i915, id) {
774                 reset_finish_engine(engine);
775                 intel_engine_signal_breadcrumbs(engine);
776         }
777         intel_gt_pm_put(i915);
778 }
779
780 static void nop_submit_request(struct i915_request *request)
781 {
782         struct intel_engine_cs *engine = request->engine;
783         unsigned long flags;
784
785         GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
786                   engine->name, request->fence.context, request->fence.seqno);
787         dma_fence_set_error(&request->fence, -EIO);
788
789         spin_lock_irqsave(&engine->active.lock, flags);
790         __i915_request_submit(request);
791         i915_request_mark_complete(request);
792         spin_unlock_irqrestore(&engine->active.lock, flags);
793
794         intel_engine_queue_breadcrumbs(engine);
795 }
796
797 static void __i915_gem_set_wedged(struct drm_i915_private *i915)
798 {
799         struct i915_gpu_error *error = &i915->gpu_error;
800         struct intel_engine_cs *engine;
801         enum intel_engine_id id;
802
803         if (test_bit(I915_WEDGED, &error->flags))
804                 return;
805
806         if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
807                 struct drm_printer p = drm_debug_printer(__func__);
808
809                 for_each_engine(engine, i915, id)
810                         intel_engine_dump(engine, &p, "%s\n", engine->name);
811         }
812
813         GEM_TRACE("start\n");
814
815         /*
816          * First, stop submission to hw, but do not yet complete requests by
817          * rolling the global seqno forward (since this would complete requests
818          * for which we haven't set the fence error to EIO yet).
819          */
820         reset_prepare(i915);
821
822         /* Even if the GPU reset fails, it should still stop the engines */
823         if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
824                 intel_gpu_reset(i915, ALL_ENGINES);
825
826         for_each_engine(engine, i915, id) {
827                 engine->submit_request = nop_submit_request;
828                 engine->schedule = NULL;
829         }
830         i915->caps.scheduler = 0;
831
832         /*
833          * Make sure no request can slip through without getting completed by
834          * either this call here to intel_engine_write_global_seqno, or the one
835          * in nop_submit_request.
836          */
837         synchronize_rcu_expedited();
838         set_bit(I915_WEDGED, &error->flags);
839
840         /* Mark all executing requests as skipped */
841         for_each_engine(engine, i915, id)
842                 engine->cancel_requests(engine);
843
844         reset_finish(i915);
845
846         GEM_TRACE("end\n");
847 }
848
849 void i915_gem_set_wedged(struct drm_i915_private *i915)
850 {
851         struct i915_gpu_error *error = &i915->gpu_error;
852         intel_wakeref_t wakeref;
853
854         mutex_lock(&error->wedge_mutex);
855         with_intel_runtime_pm(&i915->runtime_pm, wakeref)
856                 __i915_gem_set_wedged(i915);
857         mutex_unlock(&error->wedge_mutex);
858 }
859
860 static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
861 {
862         struct i915_gpu_error *error = &i915->gpu_error;
863         struct i915_timeline *tl;
864
865         if (!test_bit(I915_WEDGED, &error->flags))
866                 return true;
867
868         if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
869                 return false;
870
871         GEM_TRACE("start\n");
872
873         /*
874          * Before unwedging, make sure that all pending operations
875          * are flushed and errored out - we may have requests waiting upon
876          * third party fences. We marked all inflight requests as EIO, and
877          * every execbuf since returned EIO, for consistency we want all
878          * the currently pending requests to also be marked as EIO, which
879          * is done inside our nop_submit_request - and so we must wait.
880          *
881          * No more can be submitted until we reset the wedged bit.
882          */
883         mutex_lock(&i915->gt.timelines.mutex);
884         list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
885                 struct i915_request *rq;
886
887                 rq = i915_active_request_get_unlocked(&tl->last_request);
888                 if (!rq)
889                         continue;
890
891                 /*
892                  * All internal dependencies (i915_requests) will have
893                  * been flushed by the set-wedge, but we may be stuck waiting
894                  * for external fences. These should all be capped to 10s
895                  * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
896                  * in the worst case.
897                  */
898                 dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
899                 i915_request_put(rq);
900         }
901         mutex_unlock(&i915->gt.timelines.mutex);
902
903         intel_gt_sanitize(i915, false);
904
905         /*
906          * Undo nop_submit_request. We prevent all new i915 requests from
907          * being queued (by disallowing execbuf whilst wedged) so having
908          * waited for all active requests above, we know the system is idle
909          * and do not have to worry about a thread being inside
910          * engine->submit_request() as we swap over. So unlike installing
911          * the nop_submit_request on reset, we can do this from normal
912          * context and do not require stop_machine().
913          */
914         intel_engines_reset_default_submission(i915);
915
916         GEM_TRACE("end\n");
917
918         smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
919         clear_bit(I915_WEDGED, &i915->gpu_error.flags);
920
921         return true;
922 }
923
924 bool i915_gem_unset_wedged(struct drm_i915_private *i915)
925 {
926         struct i915_gpu_error *error = &i915->gpu_error;
927         bool result;
928
929         mutex_lock(&error->wedge_mutex);
930         result = __i915_gem_unset_wedged(i915);
931         mutex_unlock(&error->wedge_mutex);
932
933         return result;
934 }
935
936 static int do_reset(struct drm_i915_private *i915,
937                     intel_engine_mask_t stalled_mask)
938 {
939         int err, i;
940
941         gt_revoke(i915);
942
943         err = intel_gpu_reset(i915, ALL_ENGINES);
944         for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
945                 msleep(10 * (i + 1));
946                 err = intel_gpu_reset(i915, ALL_ENGINES);
947         }
948         if (err)
949                 return err;
950
951         return gt_reset(i915, stalled_mask);
952 }
953
954 /**
955  * i915_reset - reset chip after a hang
956  * @i915: #drm_i915_private to reset
957  * @stalled_mask: mask of the stalled engines with the guilty requests
958  * @reason: user error message for why we are resetting
959  *
960  * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
961  * on failure.
962  *
963  * Procedure is fairly simple:
964  *   - reset the chip using the reset reg
965  *   - re-init context state
966  *   - re-init hardware status page
967  *   - re-init ring buffer
968  *   - re-init interrupt state
969  *   - re-init display
970  */
971 void i915_reset(struct drm_i915_private *i915,
972                 intel_engine_mask_t stalled_mask,
973                 const char *reason)
974 {
975         struct i915_gpu_error *error = &i915->gpu_error;
976         int ret;
977
978         GEM_TRACE("flags=%lx\n", error->flags);
979
980         might_sleep();
981         GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
982         mutex_lock(&error->wedge_mutex);
983
984         /* Clear any previous failed attempts at recovery. Time to try again. */
985         if (!__i915_gem_unset_wedged(i915))
986                 goto unlock;
987
988         if (reason)
989                 dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
990         error->reset_count++;
991
992         reset_prepare(i915);
993
994         if (!intel_has_gpu_reset(i915)) {
995                 if (i915_modparams.reset)
996                         dev_err(i915->drm.dev, "GPU reset not supported\n");
997                 else
998                         DRM_DEBUG_DRIVER("GPU reset disabled\n");
999                 goto error;
1000         }
1001
1002         if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
1003                 intel_runtime_pm_disable_interrupts(i915);
1004
1005         if (do_reset(i915, stalled_mask)) {
1006                 dev_err(i915->drm.dev, "Failed to reset chip\n");
1007                 goto taint;
1008         }
1009
1010         if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
1011                 intel_runtime_pm_enable_interrupts(i915);
1012
1013         intel_overlay_reset(i915);
1014
1015         /*
1016          * Next we need to restore the context, but we don't use those
1017          * yet either...
1018          *
1019          * Ring buffer needs to be re-initialized in the KMS case, or if X
1020          * was running at the time of the reset (i.e. we weren't VT
1021          * switched away).
1022          */
1023         ret = i915_gem_init_hw(i915);
1024         if (ret) {
1025                 DRM_ERROR("Failed to initialise HW following reset (%d)\n",
1026                           ret);
1027                 goto error;
1028         }
1029
1030         i915_queue_hangcheck(i915);
1031
1032 finish:
1033         reset_finish(i915);
1034 unlock:
1035         mutex_unlock(&error->wedge_mutex);
1036         return;
1037
1038 taint:
1039         /*
1040          * History tells us that if we cannot reset the GPU now, we
1041          * never will. This then impacts everything that is run
1042          * subsequently. On failing the reset, we mark the driver
1043          * as wedged, preventing further execution on the GPU.
1044          * We also want to go one step further and add a taint to the
1045          * kernel so that any subsequent faults can be traced back to
1046          * this failure. This is important for CI, where if the
1047          * GPU/driver fails we would like to reboot and restart testing
1048          * rather than continue on into oblivion. For everyone else,
1049          * the system should still plod along, but they have been warned!
1050          */
1051         add_taint_for_CI(TAINT_WARN);
1052 error:
1053         __i915_gem_set_wedged(i915);
1054         goto finish;
1055 }
1056
1057 static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
1058                                         struct intel_engine_cs *engine)
1059 {
1060         return intel_gpu_reset(i915, engine->mask);
1061 }
1062
1063 /**
1064  * i915_reset_engine - reset GPU engine to recover from a hang
1065  * @engine: engine to reset
1066  * @msg: reason for GPU reset; or NULL for no dev_notice()
1067  *
1068  * Reset a specific GPU engine. Useful if a hang is detected.
1069  * Returns zero on successful reset or otherwise an error code.
1070  *
1071  * Procedure is:
1072  *  - identifies the request that caused the hang and it is dropped
1073  *  - reset engine (which will force the engine to idle)
1074  *  - re-init/configure engine
1075  */
1076 int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
1077 {
1078         struct i915_gpu_error *error = &engine->i915->gpu_error;
1079         int ret;
1080
1081         GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
1082         GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
1083
1084         if (!intel_wakeref_active(&engine->wakeref))
1085                 return 0;
1086
1087         reset_prepare_engine(engine);
1088
1089         if (msg)
1090                 dev_notice(engine->i915->drm.dev,
1091                            "Resetting %s for %s\n", engine->name, msg);
1092         error->reset_engine_count[engine->id]++;
1093
1094         if (!engine->i915->guc.execbuf_client)
1095                 ret = intel_gt_reset_engine(engine->i915, engine);
1096         else
1097                 ret = intel_guc_reset_engine(&engine->i915->guc, engine);
1098         if (ret) {
1099                 /* If we fail here, we expect to fallback to a global reset */
1100                 DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
1101                                  engine->i915->guc.execbuf_client ? "GuC " : "",
1102                                  engine->name, ret);
1103                 goto out;
1104         }
1105
1106         /*
1107          * The request that caused the hang is stuck on elsp, we know the
1108          * active request and can drop it, adjust head to skip the offending
1109          * request to resume executing remaining requests in the queue.
1110          */
1111         intel_engine_reset(engine, true);
1112
1113         /*
1114          * The engine and its registers (and workarounds in case of render)
1115          * have been reset to their default values. Follow the init_ring
1116          * process to program RING_MODE, HWSP and re-enable submission.
1117          */
1118         ret = engine->resume(engine);
1119         if (ret)
1120                 goto out;
1121
1122 out:
1123         intel_engine_cancel_stop_cs(engine);
1124         reset_finish_engine(engine);
1125         return ret;
1126 }
1127
1128 static void i915_reset_device(struct drm_i915_private *i915,
1129                               u32 engine_mask,
1130                               const char *reason)
1131 {
1132         struct i915_gpu_error *error = &i915->gpu_error;
1133         struct kobject *kobj = &i915->drm.primary->kdev->kobj;
1134         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1135         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1136         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1137         struct i915_wedge_me w;
1138
1139         kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1140
1141         DRM_DEBUG_DRIVER("resetting chip\n");
1142         kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1143
1144         /* Use a watchdog to ensure that our reset completes */
1145         i915_wedge_on_timeout(&w, i915, 5 * HZ) {
1146                 intel_prepare_reset(i915);
1147
1148                 /* Flush everyone using a resource about to be clobbered */
1149                 synchronize_srcu_expedited(&error->reset_backoff_srcu);
1150
1151                 i915_reset(i915, engine_mask, reason);
1152
1153                 intel_finish_reset(i915);
1154         }
1155
1156         if (!test_bit(I915_WEDGED, &error->flags))
1157                 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1158 }
1159
1160 static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
1161 {
1162         intel_uncore_rmw(uncore, reg, 0, 0);
1163 }
1164
1165 static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
1166 {
1167         GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
1168         GEN6_RING_FAULT_REG_POSTING_READ(engine);
1169 }
1170
1171 static void clear_error_registers(struct drm_i915_private *i915,
1172                                   intel_engine_mask_t engine_mask)
1173 {
1174         struct intel_uncore *uncore = &i915->uncore;
1175         u32 eir;
1176
1177         if (!IS_GEN(i915, 2))
1178                 clear_register(uncore, PGTBL_ER);
1179
1180         if (INTEL_GEN(i915) < 4)
1181                 clear_register(uncore, IPEIR(RENDER_RING_BASE));
1182         else
1183                 clear_register(uncore, IPEIR_I965);
1184
1185         clear_register(uncore, EIR);
1186         eir = intel_uncore_read(uncore, EIR);
1187         if (eir) {
1188                 /*
1189                  * some errors might have become stuck,
1190                  * mask them.
1191                  */
1192                 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
1193                 rmw_set(uncore, EMR, eir);
1194                 intel_uncore_write(uncore, GEN2_IIR,
1195                                    I915_MASTER_ERROR_INTERRUPT);
1196         }
1197
1198         if (INTEL_GEN(i915) >= 8) {
1199                 rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
1200                 intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
1201         } else if (INTEL_GEN(i915) >= 6) {
1202                 struct intel_engine_cs *engine;
1203                 enum intel_engine_id id;
1204
1205                 for_each_engine_masked(engine, i915, engine_mask, id)
1206                         gen8_clear_engine_error_register(engine);
1207         }
1208 }
1209
1210 static void gen6_check_faults(struct drm_i915_private *dev_priv)
1211 {
1212         struct intel_engine_cs *engine;
1213         enum intel_engine_id id;
1214         u32 fault;
1215
1216         for_each_engine(engine, dev_priv, id) {
1217                 fault = GEN6_RING_FAULT_REG_READ(engine);
1218                 if (fault & RING_FAULT_VALID) {
1219                         DRM_DEBUG_DRIVER("Unexpected fault\n"
1220                                          "\tAddr: 0x%08lx\n"
1221                                          "\tAddress space: %s\n"
1222                                          "\tSource ID: %d\n"
1223                                          "\tType: %d\n",
1224                                          fault & PAGE_MASK,
1225                                          fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
1226                                          RING_FAULT_SRCID(fault),
1227                                          RING_FAULT_FAULT_TYPE(fault));
1228                 }
1229         }
1230 }
1231
1232 static void gen8_check_faults(struct drm_i915_private *dev_priv)
1233 {
1234         u32 fault = I915_READ(GEN8_RING_FAULT_REG);
1235
1236         if (fault & RING_FAULT_VALID) {
1237                 u32 fault_data0, fault_data1;
1238                 u64 fault_addr;
1239
1240                 fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
1241                 fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
1242                 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
1243                              ((u64)fault_data0 << 12);
1244
1245                 DRM_DEBUG_DRIVER("Unexpected fault\n"
1246                                  "\tAddr: 0x%08x_%08x\n"
1247                                  "\tAddress space: %s\n"
1248                                  "\tEngine ID: %d\n"
1249                                  "\tSource ID: %d\n"
1250                                  "\tType: %d\n",
1251                                  upper_32_bits(fault_addr),
1252                                  lower_32_bits(fault_addr),
1253                                  fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
1254                                  GEN8_RING_FAULT_ENGINE_ID(fault),
1255                                  RING_FAULT_SRCID(fault),
1256                                  RING_FAULT_FAULT_TYPE(fault));
1257         }
1258 }
1259
1260 void i915_check_and_clear_faults(struct drm_i915_private *i915)
1261 {
1262         /* From GEN8 onwards we only have one 'All Engine Fault Register' */
1263         if (INTEL_GEN(i915) >= 8)
1264                 gen8_check_faults(i915);
1265         else if (INTEL_GEN(i915) >= 6)
1266                 gen6_check_faults(i915);
1267         else
1268                 return;
1269
1270         clear_error_registers(i915, ALL_ENGINES);
1271 }
1272
1273 /**
1274  * i915_handle_error - handle a gpu error
1275  * @i915: i915 device private
1276  * @engine_mask: mask representing engines that are hung
1277  * @flags: control flags
1278  * @fmt: Error message format string
1279  *
1280  * Do some basic checking of register state at error time and
1281  * dump it to the syslog.  Also call i915_capture_error_state() to make
1282  * sure we get a record and make it available in debugfs.  Fire a uevent
1283  * so userspace knows something bad happened (should trigger collection
1284  * of a ring dump etc.).
1285  */
1286 void i915_handle_error(struct drm_i915_private *i915,
1287                        intel_engine_mask_t engine_mask,
1288                        unsigned long flags,
1289                        const char *fmt, ...)
1290 {
1291         struct i915_gpu_error *error = &i915->gpu_error;
1292         struct intel_engine_cs *engine;
1293         intel_wakeref_t wakeref;
1294         intel_engine_mask_t tmp;
1295         char error_msg[80];
1296         char *msg = NULL;
1297
1298         if (fmt) {
1299                 va_list args;
1300
1301                 va_start(args, fmt);
1302                 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1303                 va_end(args);
1304
1305                 msg = error_msg;
1306         }
1307
1308         /*
1309          * In most cases it's guaranteed that we get here with an RPM
1310          * reference held, for example because there is a pending GPU
1311          * request that won't finish until the reset is done. This
1312          * isn't the case at least when we get here by doing a
1313          * simulated reset via debugfs, so get an RPM reference.
1314          */
1315         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1316
1317         engine_mask &= INTEL_INFO(i915)->engine_mask;
1318
1319         if (flags & I915_ERROR_CAPTURE) {
1320                 i915_capture_error_state(i915, engine_mask, msg);
1321                 clear_error_registers(i915, engine_mask);
1322         }
1323
1324         /*
1325          * Try engine reset when available. We fall back to full reset if
1326          * single reset fails.
1327          */
1328         if (intel_has_reset_engine(i915) && !__i915_wedged(error)) {
1329                 for_each_engine_masked(engine, i915, engine_mask, tmp) {
1330                         BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1331                         if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1332                                              &error->flags))
1333                                 continue;
1334
1335                         if (i915_reset_engine(engine, msg) == 0)
1336                                 engine_mask &= ~engine->mask;
1337
1338                         clear_bit(I915_RESET_ENGINE + engine->id,
1339                                   &error->flags);
1340                         wake_up_bit(&error->flags,
1341                                     I915_RESET_ENGINE + engine->id);
1342                 }
1343         }
1344
1345         if (!engine_mask)
1346                 goto out;
1347
1348         /* Full reset needs the mutex, stop any other user trying to do so. */
1349         if (test_and_set_bit(I915_RESET_BACKOFF, &error->flags)) {
1350                 wait_event(error->reset_queue,
1351                            !test_bit(I915_RESET_BACKOFF, &error->flags));
1352                 goto out; /* piggy-back on the other reset */
1353         }
1354
1355         /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1356         synchronize_rcu_expedited();
1357
1358         /* Prevent any other reset-engine attempt. */
1359         for_each_engine(engine, i915, tmp) {
1360                 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1361                                         &error->flags))
1362                         wait_on_bit(&error->flags,
1363                                     I915_RESET_ENGINE + engine->id,
1364                                     TASK_UNINTERRUPTIBLE);
1365         }
1366
1367         i915_reset_device(i915, engine_mask, msg);
1368
1369         for_each_engine(engine, i915, tmp) {
1370                 clear_bit(I915_RESET_ENGINE + engine->id,
1371                           &error->flags);
1372         }
1373
1374         clear_bit(I915_RESET_BACKOFF, &error->flags);
1375         wake_up_all(&error->reset_queue);
1376
1377 out:
1378         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1379 }
1380
1381 int i915_reset_trylock(struct drm_i915_private *i915)
1382 {
1383         struct i915_gpu_error *error = &i915->gpu_error;
1384         int srcu;
1385
1386         might_lock(&error->reset_backoff_srcu);
1387         might_sleep();
1388
1389         rcu_read_lock();
1390         while (test_bit(I915_RESET_BACKOFF, &error->flags)) {
1391                 rcu_read_unlock();
1392
1393                 if (wait_event_interruptible(error->reset_queue,
1394                                              !test_bit(I915_RESET_BACKOFF,
1395                                                        &error->flags)))
1396                         return -EINTR;
1397
1398                 rcu_read_lock();
1399         }
1400         srcu = srcu_read_lock(&error->reset_backoff_srcu);
1401         rcu_read_unlock();
1402
1403         return srcu;
1404 }
1405
1406 void i915_reset_unlock(struct drm_i915_private *i915, int tag)
1407 __releases(&i915->gpu_error.reset_backoff_srcu)
1408 {
1409         struct i915_gpu_error *error = &i915->gpu_error;
1410
1411         srcu_read_unlock(&error->reset_backoff_srcu, tag);
1412 }
1413
1414 int i915_terminally_wedged(struct drm_i915_private *i915)
1415 {
1416         struct i915_gpu_error *error = &i915->gpu_error;
1417
1418         might_sleep();
1419
1420         if (!__i915_wedged(error))
1421                 return 0;
1422
1423         /* Reset still in progress? Maybe we will recover? */
1424         if (!test_bit(I915_RESET_BACKOFF, &error->flags))
1425                 return -EIO;
1426
1427         /* XXX intel_reset_finish() still takes struct_mutex!!! */
1428         if (mutex_is_locked(&i915->drm.struct_mutex))
1429                 return -EAGAIN;
1430
1431         if (wait_event_interruptible(error->reset_queue,
1432                                      !test_bit(I915_RESET_BACKOFF,
1433                                                &error->flags)))
1434                 return -EINTR;
1435
1436         return __i915_wedged(error) ? -EIO : 0;
1437 }
1438
1439 static void i915_wedge_me(struct work_struct *work)
1440 {
1441         struct i915_wedge_me *w = container_of(work, typeof(*w), work.work);
1442
1443         dev_err(w->i915->drm.dev,
1444                 "%s timed out, cancelling all in-flight rendering.\n",
1445                 w->name);
1446         i915_gem_set_wedged(w->i915);
1447 }
1448
1449 void __i915_init_wedge(struct i915_wedge_me *w,
1450                        struct drm_i915_private *i915,
1451                        long timeout,
1452                        const char *name)
1453 {
1454         w->i915 = i915;
1455         w->name = name;
1456
1457         INIT_DELAYED_WORK_ONSTACK(&w->work, i915_wedge_me);
1458         schedule_delayed_work(&w->work, timeout);
1459 }
1460
1461 void __i915_fini_wedge(struct i915_wedge_me *w)
1462 {
1463         cancel_delayed_work_sync(&w->work);
1464         destroy_delayed_work_on_stack(&w->work);
1465         w->i915 = NULL;
1466 }
1467
1468 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1469 #include "selftest_reset.c"
1470 #endif