2 * SPDX-License-Identifier: MIT
4 * Copyright © 2008-2018 Intel Corporation
7 #include <linux/sched/mm.h>
8 #include <linux/stop_machine.h>
10 #include "display/intel_overlay.h"
12 #include "gem/i915_gem_context.h"
15 #include "i915_gpu_error.h"
17 #include "intel_engine_pm.h"
18 #include "intel_gt_pm.h"
19 #include "intel_reset.h"
21 #include "intel_guc.h"
23 #define RESET_MAX_RETRIES 3
25 /* XXX How to handle concurrent GGTT updates using tiling registers? */
26 #define RESET_UNDER_STOP_MACHINE 0
28 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
30 intel_uncore_rmw(uncore, reg, 0, set);
33 static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
35 intel_uncore_rmw(uncore, reg, clr, 0);
38 static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
40 intel_uncore_rmw_fw(uncore, reg, 0, set);
43 static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
45 intel_uncore_rmw_fw(uncore, reg, clr, 0);
48 static void engine_skip_context(struct i915_request *rq)
50 struct intel_engine_cs *engine = rq->engine;
51 struct i915_gem_context *hung_ctx = rq->gem_context;
53 lockdep_assert_held(&engine->active.lock);
55 if (!i915_request_is_active(rq))
58 list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
59 if (rq->gem_context == hung_ctx)
60 i915_request_skip(rq, -EIO);
63 static void client_mark_guilty(struct drm_i915_file_private *file_priv,
64 const struct i915_gem_context *ctx)
67 unsigned long prev_hang;
69 if (i915_gem_context_is_banned(ctx))
70 score = I915_CLIENT_SCORE_CONTEXT_BAN;
74 prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
75 if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
76 score += I915_CLIENT_SCORE_HANG_FAST;
79 atomic_add(score, &file_priv->ban_score);
81 DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
83 atomic_read(&file_priv->ban_score));
87 static bool context_mark_guilty(struct i915_gem_context *ctx)
89 unsigned long prev_hang;
93 atomic_inc(&ctx->guilty_count);
95 /* Cool contexts are too cool to be banned! (Used for reset testing.) */
96 if (!i915_gem_context_is_bannable(ctx))
99 /* Record the timestamp for the last N hangs */
100 prev_hang = ctx->hang_timestamp[0];
101 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
102 ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
103 ctx->hang_timestamp[i] = jiffies;
105 /* If we have hung N+1 times in rapid succession, we ban the context! */
106 banned = !i915_gem_context_is_recoverable(ctx);
107 if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
110 DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
111 ctx->name, atomic_read(&ctx->guilty_count));
112 i915_gem_context_set_banned(ctx);
115 if (!IS_ERR_OR_NULL(ctx->file_priv))
116 client_mark_guilty(ctx->file_priv, ctx);
121 static void context_mark_innocent(struct i915_gem_context *ctx)
123 atomic_inc(&ctx->active_count);
126 void i915_reset_request(struct i915_request *rq, bool guilty)
128 GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
134 lockdep_assert_held(&rq->engine->active.lock);
135 GEM_BUG_ON(i915_request_completed(rq));
138 i915_request_skip(rq, -EIO);
139 if (context_mark_guilty(rq->gem_context))
140 engine_skip_context(rq);
142 dma_fence_set_error(&rq->fence, -EAGAIN);
143 context_mark_innocent(rq->gem_context);
147 static void gen3_stop_engine(struct intel_engine_cs *engine)
149 struct intel_uncore *uncore = engine->uncore;
150 const u32 base = engine->mmio_base;
152 GEM_TRACE("%s\n", engine->name);
154 if (intel_engine_stop_cs(engine))
155 GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
157 intel_uncore_write_fw(uncore,
159 intel_uncore_read_fw(uncore, RING_TAIL(base)));
160 intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
162 intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
163 intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
164 intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
166 /* The ring must be empty before it is disabled */
167 intel_uncore_write_fw(uncore, RING_CTL(base), 0);
169 /* Check acts as a post */
170 if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
171 GEM_TRACE("%s: ring head [%x] not parked\n",
173 intel_uncore_read_fw(uncore, RING_HEAD(base)));
176 static void i915_stop_engines(struct drm_i915_private *i915,
177 intel_engine_mask_t engine_mask)
179 struct intel_engine_cs *engine;
180 intel_engine_mask_t tmp;
182 if (INTEL_GEN(i915) < 3)
185 for_each_engine_masked(engine, i915, engine_mask, tmp)
186 gen3_stop_engine(engine);
189 static bool i915_in_reset(struct pci_dev *pdev)
193 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
194 return gdrst & GRDOM_RESET_STATUS;
197 static int i915_do_reset(struct drm_i915_private *i915,
198 intel_engine_mask_t engine_mask,
201 struct pci_dev *pdev = i915->drm.pdev;
204 /* Assert reset for at least 20 usec, and wait for acknowledgement. */
205 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
207 err = wait_for_atomic(i915_in_reset(pdev), 50);
209 /* Clear the reset request. */
210 pci_write_config_byte(pdev, I915_GDRST, 0);
213 err = wait_for_atomic(!i915_in_reset(pdev), 50);
218 static bool g4x_reset_complete(struct pci_dev *pdev)
222 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
223 return (gdrst & GRDOM_RESET_ENABLE) == 0;
226 static int g33_do_reset(struct drm_i915_private *i915,
227 intel_engine_mask_t engine_mask,
230 struct pci_dev *pdev = i915->drm.pdev;
232 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
233 return wait_for_atomic(g4x_reset_complete(pdev), 50);
236 static int g4x_do_reset(struct drm_i915_private *i915,
237 intel_engine_mask_t engine_mask,
240 struct pci_dev *pdev = i915->drm.pdev;
241 struct intel_uncore *uncore = &i915->uncore;
244 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
245 rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
246 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
248 pci_write_config_byte(pdev, I915_GDRST,
249 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
250 ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
252 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
256 pci_write_config_byte(pdev, I915_GDRST,
257 GRDOM_RENDER | GRDOM_RESET_ENABLE);
258 ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
260 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
265 pci_write_config_byte(pdev, I915_GDRST, 0);
267 rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
268 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
273 static int ironlake_do_reset(struct drm_i915_private *i915,
274 intel_engine_mask_t engine_mask,
277 struct intel_uncore *uncore = &i915->uncore;
280 intel_uncore_write_fw(uncore, ILK_GDSR,
281 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
282 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
283 ILK_GRDOM_RESET_ENABLE, 0,
287 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
291 intel_uncore_write_fw(uncore, ILK_GDSR,
292 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
293 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
294 ILK_GRDOM_RESET_ENABLE, 0,
298 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
303 intel_uncore_write_fw(uncore, ILK_GDSR, 0);
304 intel_uncore_posting_read_fw(uncore, ILK_GDSR);
308 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
309 static int gen6_hw_domain_reset(struct drm_i915_private *i915,
312 struct intel_uncore *uncore = &i915->uncore;
316 * GEN6_GDRST is not in the gt power well, no need to check
317 * for fifo space for the write or forcewake the chip for
320 intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
322 /* Wait for the device to ack the reset requests */
323 err = __intel_wait_for_register_fw(uncore,
324 GEN6_GDRST, hw_domain_mask, 0,
328 DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
334 static int gen6_reset_engines(struct drm_i915_private *i915,
335 intel_engine_mask_t engine_mask,
338 struct intel_engine_cs *engine;
339 const u32 hw_engine_mask[] = {
340 [RCS0] = GEN6_GRDOM_RENDER,
341 [BCS0] = GEN6_GRDOM_BLT,
342 [VCS0] = GEN6_GRDOM_MEDIA,
343 [VCS1] = GEN8_GRDOM_MEDIA2,
344 [VECS0] = GEN6_GRDOM_VECS,
348 if (engine_mask == ALL_ENGINES) {
349 hw_mask = GEN6_GRDOM_FULL;
351 intel_engine_mask_t tmp;
354 for_each_engine_masked(engine, i915, engine_mask, tmp) {
355 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
356 hw_mask |= hw_engine_mask[engine->id];
360 return gen6_hw_domain_reset(i915, hw_mask);
363 static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
365 struct intel_uncore *uncore = engine->uncore;
366 u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
367 i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
368 u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
369 i915_reg_t sfc_usage;
373 switch (engine->class) {
374 case VIDEO_DECODE_CLASS:
375 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
378 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
379 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
381 sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
382 sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
384 sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
385 sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
386 sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
389 case VIDEO_ENHANCEMENT_CLASS:
390 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
391 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
393 sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
394 sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
396 sfc_usage = GEN11_VECS_SFC_USAGE(engine);
397 sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
398 sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
406 * Tell the engine that a software reset is going to happen. The engine
407 * will then try to force lock the SFC (if currently locked, it will
408 * remain so until we tell the engine it is safe to unlock; if currently
409 * unlocked, it will ignore this and all new lock requests). If SFC
410 * ends up being locked to the engine we want to reset, we have to reset
411 * it as well (we will unlock it once the reset sequence is completed).
413 rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
415 if (__intel_wait_for_register_fw(uncore,
417 sfc_forced_lock_ack_bit,
418 sfc_forced_lock_ack_bit,
420 DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
424 if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
425 return sfc_reset_bit;
430 static void gen11_unlock_sfc(struct intel_engine_cs *engine)
432 struct intel_uncore *uncore = engine->uncore;
433 u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
434 i915_reg_t sfc_forced_lock;
435 u32 sfc_forced_lock_bit;
437 switch (engine->class) {
438 case VIDEO_DECODE_CLASS:
439 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
442 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
443 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
446 case VIDEO_ENHANCEMENT_CLASS:
447 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
448 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
455 rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
458 static int gen11_reset_engines(struct drm_i915_private *i915,
459 intel_engine_mask_t engine_mask,
462 const u32 hw_engine_mask[] = {
463 [RCS0] = GEN11_GRDOM_RENDER,
464 [BCS0] = GEN11_GRDOM_BLT,
465 [VCS0] = GEN11_GRDOM_MEDIA,
466 [VCS1] = GEN11_GRDOM_MEDIA2,
467 [VCS2] = GEN11_GRDOM_MEDIA3,
468 [VCS3] = GEN11_GRDOM_MEDIA4,
469 [VECS0] = GEN11_GRDOM_VECS,
470 [VECS1] = GEN11_GRDOM_VECS2,
472 struct intel_engine_cs *engine;
473 intel_engine_mask_t tmp;
477 if (engine_mask == ALL_ENGINES) {
478 hw_mask = GEN11_GRDOM_FULL;
481 for_each_engine_masked(engine, i915, engine_mask, tmp) {
482 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
483 hw_mask |= hw_engine_mask[engine->id];
484 hw_mask |= gen11_lock_sfc(engine);
488 ret = gen6_hw_domain_reset(i915, hw_mask);
490 if (engine_mask != ALL_ENGINES)
491 for_each_engine_masked(engine, i915, engine_mask, tmp)
492 gen11_unlock_sfc(engine);
497 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
499 struct intel_uncore *uncore = engine->uncore;
500 const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
501 u32 request, mask, ack;
504 ack = intel_uncore_read_fw(uncore, reg);
505 if (ack & RESET_CTL_CAT_ERROR) {
507 * For catastrophic errors, ready-for-reset sequence
508 * needs to be bypassed: HAS#396813
510 request = RESET_CTL_CAT_ERROR;
511 mask = RESET_CTL_CAT_ERROR;
513 /* Catastrophic errors need to be cleared by HW */
515 } else if (!(ack & RESET_CTL_READY_TO_RESET)) {
516 request = RESET_CTL_REQUEST_RESET;
517 mask = RESET_CTL_READY_TO_RESET;
518 ack = RESET_CTL_READY_TO_RESET;
523 intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
524 ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
527 DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
528 engine->name, request,
529 intel_uncore_read_fw(uncore, reg));
534 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
536 intel_uncore_write_fw(engine->uncore,
537 RING_RESET_CTL(engine->mmio_base),
538 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
541 static int gen8_reset_engines(struct drm_i915_private *i915,
542 intel_engine_mask_t engine_mask,
545 struct intel_engine_cs *engine;
546 const bool reset_non_ready = retry >= 1;
547 intel_engine_mask_t tmp;
550 for_each_engine_masked(engine, i915, engine_mask, tmp) {
551 ret = gen8_engine_reset_prepare(engine);
552 if (ret && !reset_non_ready)
556 * If this is not the first failed attempt to prepare,
557 * we decide to proceed anyway.
559 * By doing so we risk context corruption and with
560 * some gens (kbl), possible system hang if reset
561 * happens during active bb execution.
563 * We rather take context corruption instead of
564 * failed reset with a wedged driver/gpu. And
565 * active bb execution case should be covered by
566 * i915_stop_engines we have before the reset.
570 if (INTEL_GEN(i915) >= 11)
571 ret = gen11_reset_engines(i915, engine_mask, retry);
573 ret = gen6_reset_engines(i915, engine_mask, retry);
576 for_each_engine_masked(engine, i915, engine_mask, tmp)
577 gen8_engine_reset_cancel(engine);
582 typedef int (*reset_func)(struct drm_i915_private *,
583 intel_engine_mask_t engine_mask,
586 static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
588 if (INTEL_GEN(i915) >= 8)
589 return gen8_reset_engines;
590 else if (INTEL_GEN(i915) >= 6)
591 return gen6_reset_engines;
592 else if (INTEL_GEN(i915) >= 5)
593 return ironlake_do_reset;
594 else if (IS_G4X(i915))
596 else if (IS_G33(i915) || IS_PINEVIEW(i915))
598 else if (INTEL_GEN(i915) >= 3)
599 return i915_do_reset;
604 int intel_gpu_reset(struct drm_i915_private *i915,
605 intel_engine_mask_t engine_mask)
607 const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
609 int ret = -ETIMEDOUT;
612 reset = intel_get_gpu_reset(i915);
617 * If the power well sleeps during the reset, the reset
618 * request may be dropped and never completes (causing -EIO).
620 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
621 for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
623 * We stop engines, otherwise we might get failed reset and a
624 * dead gpu (on elk). Also as modern gpu as kbl can suffer
625 * from system hang if batchbuffer is progressing when
626 * the reset is issued, regardless of READY_TO_RESET ack.
627 * Thus assume it is best to stop engines on all gens
628 * where we have a gpu reset.
630 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
632 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
634 * FIXME: Wa for more modern gens needs to be validated
637 i915_stop_engines(i915, engine_mask);
639 GEM_TRACE("engine_mask=%x\n", engine_mask);
641 ret = reset(i915, engine_mask, retry);
644 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
649 bool intel_has_gpu_reset(struct drm_i915_private *i915)
651 if (!i915_modparams.reset)
654 return intel_get_gpu_reset(i915);
657 bool intel_has_reset_engine(struct drm_i915_private *i915)
659 return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2;
662 int intel_reset_guc(struct drm_i915_private *i915)
665 INTEL_GEN(i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
668 GEM_BUG_ON(!HAS_GUC(i915));
670 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
671 ret = gen6_hw_domain_reset(i915, guc_domain);
672 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
678 * Ensure irq handler finishes, and not run again.
679 * Also return the active request so that we only search for it once.
681 static void reset_prepare_engine(struct intel_engine_cs *engine)
684 * During the reset sequence, we must prevent the engine from
685 * entering RC6. As the context state is undefined until we restart
686 * the engine, if it does enter RC6 during the reset, the state
687 * written to the powercontext is undefined and so we may lose
688 * GPU state upon resume, i.e. fail to restart after a reset.
690 intel_engine_pm_get(engine);
691 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
692 engine->reset.prepare(engine);
695 static void revoke_mmaps(struct drm_i915_private *i915)
699 for (i = 0; i < i915->ggtt.num_fences; i++) {
700 struct drm_vma_offset_node *node;
701 struct i915_vma *vma;
704 vma = READ_ONCE(i915->ggtt.fence_regs[i].vma);
708 if (!i915_vma_has_userfault(vma))
711 GEM_BUG_ON(vma->fence != &i915->ggtt.fence_regs[i]);
712 node = &vma->obj->base.vma_node;
713 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
714 unmap_mapping_range(i915->drm.anon_inode->i_mapping,
715 drm_vma_node_offset_addr(node) + vma_offset,
721 static void reset_prepare(struct drm_i915_private *i915)
723 struct intel_engine_cs *engine;
724 enum intel_engine_id id;
726 intel_gt_pm_get(i915);
727 for_each_engine(engine, i915, id)
728 reset_prepare_engine(engine);
730 intel_uc_reset_prepare(i915);
733 static void gt_revoke(struct drm_i915_private *i915)
738 static int gt_reset(struct drm_i915_private *i915,
739 intel_engine_mask_t stalled_mask)
741 struct intel_engine_cs *engine;
742 enum intel_engine_id id;
746 * Everything depends on having the GTT running, so we need to start
749 err = i915_ggtt_enable_hw(i915);
753 for_each_engine(engine, i915, id)
754 intel_engine_reset(engine, stalled_mask & engine->mask);
756 i915_gem_restore_fences(i915);
761 static void reset_finish_engine(struct intel_engine_cs *engine)
763 engine->reset.finish(engine);
764 intel_engine_pm_put(engine);
765 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
768 static void reset_finish(struct drm_i915_private *i915)
770 struct intel_engine_cs *engine;
771 enum intel_engine_id id;
773 for_each_engine(engine, i915, id) {
774 reset_finish_engine(engine);
775 intel_engine_signal_breadcrumbs(engine);
777 intel_gt_pm_put(i915);
780 static void nop_submit_request(struct i915_request *request)
782 struct intel_engine_cs *engine = request->engine;
785 GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
786 engine->name, request->fence.context, request->fence.seqno);
787 dma_fence_set_error(&request->fence, -EIO);
789 spin_lock_irqsave(&engine->active.lock, flags);
790 __i915_request_submit(request);
791 i915_request_mark_complete(request);
792 spin_unlock_irqrestore(&engine->active.lock, flags);
794 intel_engine_queue_breadcrumbs(engine);
797 static void __i915_gem_set_wedged(struct drm_i915_private *i915)
799 struct i915_gpu_error *error = &i915->gpu_error;
800 struct intel_engine_cs *engine;
801 enum intel_engine_id id;
803 if (test_bit(I915_WEDGED, &error->flags))
806 if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
807 struct drm_printer p = drm_debug_printer(__func__);
809 for_each_engine(engine, i915, id)
810 intel_engine_dump(engine, &p, "%s\n", engine->name);
813 GEM_TRACE("start\n");
816 * First, stop submission to hw, but do not yet complete requests by
817 * rolling the global seqno forward (since this would complete requests
818 * for which we haven't set the fence error to EIO yet).
822 /* Even if the GPU reset fails, it should still stop the engines */
823 if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
824 intel_gpu_reset(i915, ALL_ENGINES);
826 for_each_engine(engine, i915, id) {
827 engine->submit_request = nop_submit_request;
828 engine->schedule = NULL;
830 i915->caps.scheduler = 0;
833 * Make sure no request can slip through without getting completed by
834 * either this call here to intel_engine_write_global_seqno, or the one
835 * in nop_submit_request.
837 synchronize_rcu_expedited();
838 set_bit(I915_WEDGED, &error->flags);
840 /* Mark all executing requests as skipped */
841 for_each_engine(engine, i915, id)
842 engine->cancel_requests(engine);
849 void i915_gem_set_wedged(struct drm_i915_private *i915)
851 struct i915_gpu_error *error = &i915->gpu_error;
852 intel_wakeref_t wakeref;
854 mutex_lock(&error->wedge_mutex);
855 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
856 __i915_gem_set_wedged(i915);
857 mutex_unlock(&error->wedge_mutex);
860 static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
862 struct i915_gpu_error *error = &i915->gpu_error;
863 struct i915_timeline *tl;
865 if (!test_bit(I915_WEDGED, &error->flags))
868 if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
871 GEM_TRACE("start\n");
874 * Before unwedging, make sure that all pending operations
875 * are flushed and errored out - we may have requests waiting upon
876 * third party fences. We marked all inflight requests as EIO, and
877 * every execbuf since returned EIO, for consistency we want all
878 * the currently pending requests to also be marked as EIO, which
879 * is done inside our nop_submit_request - and so we must wait.
881 * No more can be submitted until we reset the wedged bit.
883 mutex_lock(&i915->gt.timelines.mutex);
884 list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
885 struct i915_request *rq;
887 rq = i915_active_request_get_unlocked(&tl->last_request);
892 * All internal dependencies (i915_requests) will have
893 * been flushed by the set-wedge, but we may be stuck waiting
894 * for external fences. These should all be capped to 10s
895 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
898 dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
899 i915_request_put(rq);
901 mutex_unlock(&i915->gt.timelines.mutex);
903 intel_gt_sanitize(i915, false);
906 * Undo nop_submit_request. We prevent all new i915 requests from
907 * being queued (by disallowing execbuf whilst wedged) so having
908 * waited for all active requests above, we know the system is idle
909 * and do not have to worry about a thread being inside
910 * engine->submit_request() as we swap over. So unlike installing
911 * the nop_submit_request on reset, we can do this from normal
912 * context and do not require stop_machine().
914 intel_engines_reset_default_submission(i915);
918 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
919 clear_bit(I915_WEDGED, &i915->gpu_error.flags);
924 bool i915_gem_unset_wedged(struct drm_i915_private *i915)
926 struct i915_gpu_error *error = &i915->gpu_error;
929 mutex_lock(&error->wedge_mutex);
930 result = __i915_gem_unset_wedged(i915);
931 mutex_unlock(&error->wedge_mutex);
936 static int do_reset(struct drm_i915_private *i915,
937 intel_engine_mask_t stalled_mask)
943 err = intel_gpu_reset(i915, ALL_ENGINES);
944 for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
945 msleep(10 * (i + 1));
946 err = intel_gpu_reset(i915, ALL_ENGINES);
951 return gt_reset(i915, stalled_mask);
955 * i915_reset - reset chip after a hang
956 * @i915: #drm_i915_private to reset
957 * @stalled_mask: mask of the stalled engines with the guilty requests
958 * @reason: user error message for why we are resetting
960 * Reset the chip. Useful if a hang is detected. Marks the device as wedged
963 * Procedure is fairly simple:
964 * - reset the chip using the reset reg
965 * - re-init context state
966 * - re-init hardware status page
967 * - re-init ring buffer
968 * - re-init interrupt state
971 void i915_reset(struct drm_i915_private *i915,
972 intel_engine_mask_t stalled_mask,
975 struct i915_gpu_error *error = &i915->gpu_error;
978 GEM_TRACE("flags=%lx\n", error->flags);
981 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
982 mutex_lock(&error->wedge_mutex);
984 /* Clear any previous failed attempts at recovery. Time to try again. */
985 if (!__i915_gem_unset_wedged(i915))
989 dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
990 error->reset_count++;
994 if (!intel_has_gpu_reset(i915)) {
995 if (i915_modparams.reset)
996 dev_err(i915->drm.dev, "GPU reset not supported\n");
998 DRM_DEBUG_DRIVER("GPU reset disabled\n");
1002 if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
1003 intel_runtime_pm_disable_interrupts(i915);
1005 if (do_reset(i915, stalled_mask)) {
1006 dev_err(i915->drm.dev, "Failed to reset chip\n");
1010 if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
1011 intel_runtime_pm_enable_interrupts(i915);
1013 intel_overlay_reset(i915);
1016 * Next we need to restore the context, but we don't use those
1019 * Ring buffer needs to be re-initialized in the KMS case, or if X
1020 * was running at the time of the reset (i.e. we weren't VT
1023 ret = i915_gem_init_hw(i915);
1025 DRM_ERROR("Failed to initialise HW following reset (%d)\n",
1030 i915_queue_hangcheck(i915);
1035 mutex_unlock(&error->wedge_mutex);
1040 * History tells us that if we cannot reset the GPU now, we
1041 * never will. This then impacts everything that is run
1042 * subsequently. On failing the reset, we mark the driver
1043 * as wedged, preventing further execution on the GPU.
1044 * We also want to go one step further and add a taint to the
1045 * kernel so that any subsequent faults can be traced back to
1046 * this failure. This is important for CI, where if the
1047 * GPU/driver fails we would like to reboot and restart testing
1048 * rather than continue on into oblivion. For everyone else,
1049 * the system should still plod along, but they have been warned!
1051 add_taint_for_CI(TAINT_WARN);
1053 __i915_gem_set_wedged(i915);
1057 static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
1058 struct intel_engine_cs *engine)
1060 return intel_gpu_reset(i915, engine->mask);
1064 * i915_reset_engine - reset GPU engine to recover from a hang
1065 * @engine: engine to reset
1066 * @msg: reason for GPU reset; or NULL for no dev_notice()
1068 * Reset a specific GPU engine. Useful if a hang is detected.
1069 * Returns zero on successful reset or otherwise an error code.
1072 * - identifies the request that caused the hang and it is dropped
1073 * - reset engine (which will force the engine to idle)
1074 * - re-init/configure engine
1076 int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
1078 struct i915_gpu_error *error = &engine->i915->gpu_error;
1081 GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
1082 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
1084 if (!intel_wakeref_active(&engine->wakeref))
1087 reset_prepare_engine(engine);
1090 dev_notice(engine->i915->drm.dev,
1091 "Resetting %s for %s\n", engine->name, msg);
1092 error->reset_engine_count[engine->id]++;
1094 if (!engine->i915->guc.execbuf_client)
1095 ret = intel_gt_reset_engine(engine->i915, engine);
1097 ret = intel_guc_reset_engine(&engine->i915->guc, engine);
1099 /* If we fail here, we expect to fallback to a global reset */
1100 DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
1101 engine->i915->guc.execbuf_client ? "GuC " : "",
1107 * The request that caused the hang is stuck on elsp, we know the
1108 * active request and can drop it, adjust head to skip the offending
1109 * request to resume executing remaining requests in the queue.
1111 intel_engine_reset(engine, true);
1114 * The engine and its registers (and workarounds in case of render)
1115 * have been reset to their default values. Follow the init_ring
1116 * process to program RING_MODE, HWSP and re-enable submission.
1118 ret = engine->resume(engine);
1123 intel_engine_cancel_stop_cs(engine);
1124 reset_finish_engine(engine);
1128 static void i915_reset_device(struct drm_i915_private *i915,
1132 struct i915_gpu_error *error = &i915->gpu_error;
1133 struct kobject *kobj = &i915->drm.primary->kdev->kobj;
1134 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1135 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1136 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1137 struct i915_wedge_me w;
1139 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1141 DRM_DEBUG_DRIVER("resetting chip\n");
1142 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1144 /* Use a watchdog to ensure that our reset completes */
1145 i915_wedge_on_timeout(&w, i915, 5 * HZ) {
1146 intel_prepare_reset(i915);
1148 /* Flush everyone using a resource about to be clobbered */
1149 synchronize_srcu_expedited(&error->reset_backoff_srcu);
1151 i915_reset(i915, engine_mask, reason);
1153 intel_finish_reset(i915);
1156 if (!test_bit(I915_WEDGED, &error->flags))
1157 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1160 static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
1162 intel_uncore_rmw(uncore, reg, 0, 0);
1165 static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
1167 GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
1168 GEN6_RING_FAULT_REG_POSTING_READ(engine);
1171 static void clear_error_registers(struct drm_i915_private *i915,
1172 intel_engine_mask_t engine_mask)
1174 struct intel_uncore *uncore = &i915->uncore;
1177 if (!IS_GEN(i915, 2))
1178 clear_register(uncore, PGTBL_ER);
1180 if (INTEL_GEN(i915) < 4)
1181 clear_register(uncore, IPEIR(RENDER_RING_BASE));
1183 clear_register(uncore, IPEIR_I965);
1185 clear_register(uncore, EIR);
1186 eir = intel_uncore_read(uncore, EIR);
1189 * some errors might have become stuck,
1192 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
1193 rmw_set(uncore, EMR, eir);
1194 intel_uncore_write(uncore, GEN2_IIR,
1195 I915_MASTER_ERROR_INTERRUPT);
1198 if (INTEL_GEN(i915) >= 8) {
1199 rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
1200 intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
1201 } else if (INTEL_GEN(i915) >= 6) {
1202 struct intel_engine_cs *engine;
1203 enum intel_engine_id id;
1205 for_each_engine_masked(engine, i915, engine_mask, id)
1206 gen8_clear_engine_error_register(engine);
1210 static void gen6_check_faults(struct drm_i915_private *dev_priv)
1212 struct intel_engine_cs *engine;
1213 enum intel_engine_id id;
1216 for_each_engine(engine, dev_priv, id) {
1217 fault = GEN6_RING_FAULT_REG_READ(engine);
1218 if (fault & RING_FAULT_VALID) {
1219 DRM_DEBUG_DRIVER("Unexpected fault\n"
1221 "\tAddress space: %s\n"
1225 fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
1226 RING_FAULT_SRCID(fault),
1227 RING_FAULT_FAULT_TYPE(fault));
1232 static void gen8_check_faults(struct drm_i915_private *dev_priv)
1234 u32 fault = I915_READ(GEN8_RING_FAULT_REG);
1236 if (fault & RING_FAULT_VALID) {
1237 u32 fault_data0, fault_data1;
1240 fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
1241 fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
1242 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
1243 ((u64)fault_data0 << 12);
1245 DRM_DEBUG_DRIVER("Unexpected fault\n"
1246 "\tAddr: 0x%08x_%08x\n"
1247 "\tAddress space: %s\n"
1251 upper_32_bits(fault_addr),
1252 lower_32_bits(fault_addr),
1253 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
1254 GEN8_RING_FAULT_ENGINE_ID(fault),
1255 RING_FAULT_SRCID(fault),
1256 RING_FAULT_FAULT_TYPE(fault));
1260 void i915_check_and_clear_faults(struct drm_i915_private *i915)
1262 /* From GEN8 onwards we only have one 'All Engine Fault Register' */
1263 if (INTEL_GEN(i915) >= 8)
1264 gen8_check_faults(i915);
1265 else if (INTEL_GEN(i915) >= 6)
1266 gen6_check_faults(i915);
1270 clear_error_registers(i915, ALL_ENGINES);
1274 * i915_handle_error - handle a gpu error
1275 * @i915: i915 device private
1276 * @engine_mask: mask representing engines that are hung
1277 * @flags: control flags
1278 * @fmt: Error message format string
1280 * Do some basic checking of register state at error time and
1281 * dump it to the syslog. Also call i915_capture_error_state() to make
1282 * sure we get a record and make it available in debugfs. Fire a uevent
1283 * so userspace knows something bad happened (should trigger collection
1284 * of a ring dump etc.).
1286 void i915_handle_error(struct drm_i915_private *i915,
1287 intel_engine_mask_t engine_mask,
1288 unsigned long flags,
1289 const char *fmt, ...)
1291 struct i915_gpu_error *error = &i915->gpu_error;
1292 struct intel_engine_cs *engine;
1293 intel_wakeref_t wakeref;
1294 intel_engine_mask_t tmp;
1301 va_start(args, fmt);
1302 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1309 * In most cases it's guaranteed that we get here with an RPM
1310 * reference held, for example because there is a pending GPU
1311 * request that won't finish until the reset is done. This
1312 * isn't the case at least when we get here by doing a
1313 * simulated reset via debugfs, so get an RPM reference.
1315 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1317 engine_mask &= INTEL_INFO(i915)->engine_mask;
1319 if (flags & I915_ERROR_CAPTURE) {
1320 i915_capture_error_state(i915, engine_mask, msg);
1321 clear_error_registers(i915, engine_mask);
1325 * Try engine reset when available. We fall back to full reset if
1326 * single reset fails.
1328 if (intel_has_reset_engine(i915) && !__i915_wedged(error)) {
1329 for_each_engine_masked(engine, i915, engine_mask, tmp) {
1330 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1331 if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1335 if (i915_reset_engine(engine, msg) == 0)
1336 engine_mask &= ~engine->mask;
1338 clear_bit(I915_RESET_ENGINE + engine->id,
1340 wake_up_bit(&error->flags,
1341 I915_RESET_ENGINE + engine->id);
1348 /* Full reset needs the mutex, stop any other user trying to do so. */
1349 if (test_and_set_bit(I915_RESET_BACKOFF, &error->flags)) {
1350 wait_event(error->reset_queue,
1351 !test_bit(I915_RESET_BACKOFF, &error->flags));
1352 goto out; /* piggy-back on the other reset */
1355 /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1356 synchronize_rcu_expedited();
1358 /* Prevent any other reset-engine attempt. */
1359 for_each_engine(engine, i915, tmp) {
1360 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1362 wait_on_bit(&error->flags,
1363 I915_RESET_ENGINE + engine->id,
1364 TASK_UNINTERRUPTIBLE);
1367 i915_reset_device(i915, engine_mask, msg);
1369 for_each_engine(engine, i915, tmp) {
1370 clear_bit(I915_RESET_ENGINE + engine->id,
1374 clear_bit(I915_RESET_BACKOFF, &error->flags);
1375 wake_up_all(&error->reset_queue);
1378 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1381 int i915_reset_trylock(struct drm_i915_private *i915)
1383 struct i915_gpu_error *error = &i915->gpu_error;
1386 might_lock(&error->reset_backoff_srcu);
1390 while (test_bit(I915_RESET_BACKOFF, &error->flags)) {
1393 if (wait_event_interruptible(error->reset_queue,
1394 !test_bit(I915_RESET_BACKOFF,
1400 srcu = srcu_read_lock(&error->reset_backoff_srcu);
1406 void i915_reset_unlock(struct drm_i915_private *i915, int tag)
1407 __releases(&i915->gpu_error.reset_backoff_srcu)
1409 struct i915_gpu_error *error = &i915->gpu_error;
1411 srcu_read_unlock(&error->reset_backoff_srcu, tag);
1414 int i915_terminally_wedged(struct drm_i915_private *i915)
1416 struct i915_gpu_error *error = &i915->gpu_error;
1420 if (!__i915_wedged(error))
1423 /* Reset still in progress? Maybe we will recover? */
1424 if (!test_bit(I915_RESET_BACKOFF, &error->flags))
1427 /* XXX intel_reset_finish() still takes struct_mutex!!! */
1428 if (mutex_is_locked(&i915->drm.struct_mutex))
1431 if (wait_event_interruptible(error->reset_queue,
1432 !test_bit(I915_RESET_BACKOFF,
1436 return __i915_wedged(error) ? -EIO : 0;
1439 static void i915_wedge_me(struct work_struct *work)
1441 struct i915_wedge_me *w = container_of(work, typeof(*w), work.work);
1443 dev_err(w->i915->drm.dev,
1444 "%s timed out, cancelling all in-flight rendering.\n",
1446 i915_gem_set_wedged(w->i915);
1449 void __i915_init_wedge(struct i915_wedge_me *w,
1450 struct drm_i915_private *i915,
1457 INIT_DELAYED_WORK_ONSTACK(&w->work, i915_wedge_me);
1458 schedule_delayed_work(&w->work, timeout);
1461 void __i915_fini_wedge(struct i915_wedge_me *w)
1463 cancel_delayed_work_sync(&w->work);
1464 destroy_delayed_work_on_stack(&w->work);
1468 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1469 #include "selftest_reset.c"