2 * SPDX-License-Identifier: MIT
4 * Copyright © 2008-2018 Intel Corporation
7 #include <linux/sched/mm.h>
8 #include <linux/stop_machine.h>
10 #include "display/intel_display_types.h"
11 #include "display/intel_overlay.h"
13 #include "gem/i915_gem_context.h"
16 #include "i915_gpu_error.h"
18 #include "intel_engine_pm.h"
20 #include "intel_gt_pm.h"
21 #include "intel_reset.h"
23 #include "uc/intel_guc.h"
25 #define RESET_MAX_RETRIES 3
27 /* XXX How to handle concurrent GGTT updates using tiling registers? */
28 #define RESET_UNDER_STOP_MACHINE 0
30 static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
32 intel_uncore_rmw_fw(uncore, reg, 0, set);
35 static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
37 intel_uncore_rmw_fw(uncore, reg, clr, 0);
40 static void engine_skip_context(struct i915_request *rq)
42 struct intel_engine_cs *engine = rq->engine;
43 struct i915_gem_context *hung_ctx = rq->gem_context;
45 if (!i915_request_is_active(rq))
48 lockdep_assert_held(&engine->active.lock);
49 list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
50 if (rq->gem_context == hung_ctx)
51 i915_request_skip(rq, -EIO);
54 static void client_mark_guilty(struct drm_i915_file_private *file_priv,
55 const struct i915_gem_context *ctx)
58 unsigned long prev_hang;
60 if (i915_gem_context_is_banned(ctx))
61 score = I915_CLIENT_SCORE_CONTEXT_BAN;
65 prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
66 if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
67 score += I915_CLIENT_SCORE_HANG_FAST;
70 atomic_add(score, &file_priv->ban_score);
72 DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
74 atomic_read(&file_priv->ban_score));
78 static bool context_mark_guilty(struct i915_gem_context *ctx)
80 unsigned long prev_hang;
84 atomic_inc(&ctx->guilty_count);
86 /* Cool contexts are too cool to be banned! (Used for reset testing.) */
87 if (!i915_gem_context_is_bannable(ctx))
90 /* Record the timestamp for the last N hangs */
91 prev_hang = ctx->hang_timestamp[0];
92 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
93 ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
94 ctx->hang_timestamp[i] = jiffies;
96 /* If we have hung N+1 times in rapid succession, we ban the context! */
97 banned = !i915_gem_context_is_recoverable(ctx);
98 if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
101 DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
102 ctx->name, atomic_read(&ctx->guilty_count));
103 i915_gem_context_set_banned(ctx);
106 if (!IS_ERR_OR_NULL(ctx->file_priv))
107 client_mark_guilty(ctx->file_priv, ctx);
112 static void context_mark_innocent(struct i915_gem_context *ctx)
114 atomic_inc(&ctx->active_count);
117 void __i915_request_reset(struct i915_request *rq, bool guilty)
119 GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
125 GEM_BUG_ON(i915_request_completed(rq));
128 i915_request_skip(rq, -EIO);
129 if (context_mark_guilty(rq->gem_context))
130 engine_skip_context(rq);
132 dma_fence_set_error(&rq->fence, -EAGAIN);
133 context_mark_innocent(rq->gem_context);
137 static bool i915_in_reset(struct pci_dev *pdev)
141 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
142 return gdrst & GRDOM_RESET_STATUS;
145 static int i915_do_reset(struct intel_gt *gt,
146 intel_engine_mask_t engine_mask,
149 struct pci_dev *pdev = gt->i915->drm.pdev;
152 /* Assert reset for at least 20 usec, and wait for acknowledgement. */
153 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
155 err = wait_for_atomic(i915_in_reset(pdev), 50);
157 /* Clear the reset request. */
158 pci_write_config_byte(pdev, I915_GDRST, 0);
161 err = wait_for_atomic(!i915_in_reset(pdev), 50);
166 static bool g4x_reset_complete(struct pci_dev *pdev)
170 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
171 return (gdrst & GRDOM_RESET_ENABLE) == 0;
174 static int g33_do_reset(struct intel_gt *gt,
175 intel_engine_mask_t engine_mask,
178 struct pci_dev *pdev = gt->i915->drm.pdev;
180 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
181 return wait_for_atomic(g4x_reset_complete(pdev), 50);
184 static int g4x_do_reset(struct intel_gt *gt,
185 intel_engine_mask_t engine_mask,
188 struct pci_dev *pdev = gt->i915->drm.pdev;
189 struct intel_uncore *uncore = gt->uncore;
192 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
193 rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
194 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
196 pci_write_config_byte(pdev, I915_GDRST,
197 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
198 ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
200 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
204 pci_write_config_byte(pdev, I915_GDRST,
205 GRDOM_RENDER | GRDOM_RESET_ENABLE);
206 ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
208 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
213 pci_write_config_byte(pdev, I915_GDRST, 0);
215 rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
216 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
221 static int ironlake_do_reset(struct intel_gt *gt,
222 intel_engine_mask_t engine_mask,
225 struct intel_uncore *uncore = gt->uncore;
228 intel_uncore_write_fw(uncore, ILK_GDSR,
229 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
230 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
231 ILK_GRDOM_RESET_ENABLE, 0,
235 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
239 intel_uncore_write_fw(uncore, ILK_GDSR,
240 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
241 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
242 ILK_GRDOM_RESET_ENABLE, 0,
246 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
251 intel_uncore_write_fw(uncore, ILK_GDSR, 0);
252 intel_uncore_posting_read_fw(uncore, ILK_GDSR);
256 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
257 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
259 struct intel_uncore *uncore = gt->uncore;
263 * GEN6_GDRST is not in the gt power well, no need to check
264 * for fifo space for the write or forcewake the chip for
267 intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
269 /* Wait for the device to ack the reset requests */
270 err = __intel_wait_for_register_fw(uncore,
271 GEN6_GDRST, hw_domain_mask, 0,
275 DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
281 static int gen6_reset_engines(struct intel_gt *gt,
282 intel_engine_mask_t engine_mask,
285 static const u32 hw_engine_mask[] = {
286 [RCS0] = GEN6_GRDOM_RENDER,
287 [BCS0] = GEN6_GRDOM_BLT,
288 [VCS0] = GEN6_GRDOM_MEDIA,
289 [VCS1] = GEN8_GRDOM_MEDIA2,
290 [VECS0] = GEN6_GRDOM_VECS,
292 struct intel_engine_cs *engine;
295 if (engine_mask == ALL_ENGINES) {
296 hw_mask = GEN6_GRDOM_FULL;
298 intel_engine_mask_t tmp;
301 for_each_engine_masked(engine, gt, engine_mask, tmp) {
302 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
303 hw_mask |= hw_engine_mask[engine->id];
307 return gen6_hw_domain_reset(gt, hw_mask);
310 static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
312 struct intel_uncore *uncore = engine->uncore;
313 u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
314 i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
315 u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
316 i915_reg_t sfc_usage;
321 switch (engine->class) {
322 case VIDEO_DECODE_CLASS:
323 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
326 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
327 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
329 sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
330 sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
332 sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
333 sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
334 sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
337 case VIDEO_ENHANCEMENT_CLASS:
338 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
339 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
341 sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
342 sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
344 sfc_usage = GEN11_VECS_SFC_USAGE(engine);
345 sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
346 sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
354 * If the engine is using a SFC, tell the engine that a software reset
355 * is going to happen. The engine will then try to force lock the SFC.
356 * If SFC ends up being locked to the engine we want to reset, we have
357 * to reset it as well (we will unlock it once the reset sequence is
360 if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
363 rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
365 ret = __intel_wait_for_register_fw(uncore,
367 sfc_forced_lock_ack_bit,
368 sfc_forced_lock_ack_bit,
371 /* Was the SFC released while we were trying to lock it? */
372 if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
376 DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
380 *hw_mask |= sfc_reset_bit;
384 static void gen11_unlock_sfc(struct intel_engine_cs *engine)
386 struct intel_uncore *uncore = engine->uncore;
387 u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
388 i915_reg_t sfc_forced_lock;
389 u32 sfc_forced_lock_bit;
391 switch (engine->class) {
392 case VIDEO_DECODE_CLASS:
393 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
396 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
397 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
400 case VIDEO_ENHANCEMENT_CLASS:
401 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
402 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
409 rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
412 static int gen11_reset_engines(struct intel_gt *gt,
413 intel_engine_mask_t engine_mask,
416 static const u32 hw_engine_mask[] = {
417 [RCS0] = GEN11_GRDOM_RENDER,
418 [BCS0] = GEN11_GRDOM_BLT,
419 [VCS0] = GEN11_GRDOM_MEDIA,
420 [VCS1] = GEN11_GRDOM_MEDIA2,
421 [VCS2] = GEN11_GRDOM_MEDIA3,
422 [VCS3] = GEN11_GRDOM_MEDIA4,
423 [VECS0] = GEN11_GRDOM_VECS,
424 [VECS1] = GEN11_GRDOM_VECS2,
426 struct intel_engine_cs *engine;
427 intel_engine_mask_t tmp;
431 if (engine_mask == ALL_ENGINES) {
432 hw_mask = GEN11_GRDOM_FULL;
435 for_each_engine_masked(engine, gt, engine_mask, tmp) {
436 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
437 hw_mask |= hw_engine_mask[engine->id];
438 ret = gen11_lock_sfc(engine, &hw_mask);
444 ret = gen6_hw_domain_reset(gt, hw_mask);
448 * We unlock the SFC based on the lock status and not the result of
449 * gen11_lock_sfc to make sure that we clean properly if something
450 * wrong happened during the lock (e.g. lock acquired after timeout
453 if (engine_mask != ALL_ENGINES)
454 for_each_engine_masked(engine, gt, engine_mask, tmp)
455 gen11_unlock_sfc(engine);
460 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
462 struct intel_uncore *uncore = engine->uncore;
463 const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
464 u32 request, mask, ack;
467 ack = intel_uncore_read_fw(uncore, reg);
468 if (ack & RESET_CTL_CAT_ERROR) {
470 * For catastrophic errors, ready-for-reset sequence
471 * needs to be bypassed: HAS#396813
473 request = RESET_CTL_CAT_ERROR;
474 mask = RESET_CTL_CAT_ERROR;
476 /* Catastrophic errors need to be cleared by HW */
478 } else if (!(ack & RESET_CTL_READY_TO_RESET)) {
479 request = RESET_CTL_REQUEST_RESET;
480 mask = RESET_CTL_READY_TO_RESET;
481 ack = RESET_CTL_READY_TO_RESET;
486 intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
487 ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
490 DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
491 engine->name, request,
492 intel_uncore_read_fw(uncore, reg));
497 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
499 intel_uncore_write_fw(engine->uncore,
500 RING_RESET_CTL(engine->mmio_base),
501 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
504 static int gen8_reset_engines(struct intel_gt *gt,
505 intel_engine_mask_t engine_mask,
508 struct intel_engine_cs *engine;
509 const bool reset_non_ready = retry >= 1;
510 intel_engine_mask_t tmp;
513 for_each_engine_masked(engine, gt, engine_mask, tmp) {
514 ret = gen8_engine_reset_prepare(engine);
515 if (ret && !reset_non_ready)
519 * If this is not the first failed attempt to prepare,
520 * we decide to proceed anyway.
522 * By doing so we risk context corruption and with
523 * some gens (kbl), possible system hang if reset
524 * happens during active bb execution.
526 * We rather take context corruption instead of
527 * failed reset with a wedged driver/gpu. And
528 * active bb execution case should be covered by
529 * stop_engines() we have before the reset.
533 if (INTEL_GEN(gt->i915) >= 11)
534 ret = gen11_reset_engines(gt, engine_mask, retry);
536 ret = gen6_reset_engines(gt, engine_mask, retry);
539 for_each_engine_masked(engine, gt, engine_mask, tmp)
540 gen8_engine_reset_cancel(engine);
545 static int mock_reset(struct intel_gt *gt,
546 intel_engine_mask_t mask,
552 typedef int (*reset_func)(struct intel_gt *,
553 intel_engine_mask_t engine_mask,
556 static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
558 struct drm_i915_private *i915 = gt->i915;
562 else if (INTEL_GEN(i915) >= 8)
563 return gen8_reset_engines;
564 else if (INTEL_GEN(i915) >= 6)
565 return gen6_reset_engines;
566 else if (INTEL_GEN(i915) >= 5)
567 return ironlake_do_reset;
568 else if (IS_G4X(i915))
570 else if (IS_G33(i915) || IS_PINEVIEW(i915))
572 else if (INTEL_GEN(i915) >= 3)
573 return i915_do_reset;
578 int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
580 const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
582 int ret = -ETIMEDOUT;
585 reset = intel_get_gpu_reset(gt);
590 * If the power well sleeps during the reset, the reset
591 * request may be dropped and never completes (causing -EIO).
593 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
594 for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
595 GEM_TRACE("engine_mask=%x\n", engine_mask);
597 ret = reset(gt, engine_mask, retry);
600 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
605 bool intel_has_gpu_reset(const struct intel_gt *gt)
607 if (!i915_modparams.reset)
610 return intel_get_gpu_reset(gt);
613 bool intel_has_reset_engine(const struct intel_gt *gt)
615 if (i915_modparams.reset < 2)
618 return INTEL_INFO(gt->i915)->has_reset_engine;
621 int intel_reset_guc(struct intel_gt *gt)
624 INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
627 GEM_BUG_ON(!HAS_GT_UC(gt->i915));
629 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
630 ret = gen6_hw_domain_reset(gt, guc_domain);
631 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
637 * Ensure irq handler finishes, and not run again.
638 * Also return the active request so that we only search for it once.
640 static void reset_prepare_engine(struct intel_engine_cs *engine)
643 * During the reset sequence, we must prevent the engine from
644 * entering RC6. As the context state is undefined until we restart
645 * the engine, if it does enter RC6 during the reset, the state
646 * written to the powercontext is undefined and so we may lose
647 * GPU state upon resume, i.e. fail to restart after a reset.
649 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
650 engine->reset.prepare(engine);
653 static void revoke_mmaps(struct intel_gt *gt)
657 for (i = 0; i < gt->ggtt->num_fences; i++) {
658 struct drm_vma_offset_node *node;
659 struct i915_vma *vma;
662 vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
666 if (!i915_vma_has_userfault(vma))
669 GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]);
670 node = &vma->obj->base.vma_node;
671 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
672 unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
673 drm_vma_node_offset_addr(node) + vma_offset,
679 static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
681 struct intel_engine_cs *engine;
682 intel_engine_mask_t awake = 0;
683 enum intel_engine_id id;
685 for_each_engine(engine, gt, id) {
686 if (intel_engine_pm_get_if_awake(engine))
687 awake |= engine->mask;
688 reset_prepare_engine(engine);
691 intel_uc_reset_prepare(>->uc);
696 static void gt_revoke(struct intel_gt *gt)
701 static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
703 struct intel_engine_cs *engine;
704 enum intel_engine_id id;
708 * Everything depends on having the GTT running, so we need to start
711 err = i915_ggtt_enable_hw(gt->i915);
715 for_each_engine(engine, gt, id)
716 __intel_engine_reset(engine, stalled_mask & engine->mask);
718 i915_gem_restore_fences(gt->ggtt);
723 static void reset_finish_engine(struct intel_engine_cs *engine)
725 engine->reset.finish(engine);
726 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
728 intel_engine_breadcrumbs_irq(engine);
731 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
733 struct intel_engine_cs *engine;
734 enum intel_engine_id id;
736 for_each_engine(engine, gt, id) {
737 reset_finish_engine(engine);
738 if (awake & engine->mask)
739 intel_engine_pm_put(engine);
743 static void nop_submit_request(struct i915_request *request)
745 struct intel_engine_cs *engine = request->engine;
748 GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
749 engine->name, request->fence.context, request->fence.seqno);
750 dma_fence_set_error(&request->fence, -EIO);
752 spin_lock_irqsave(&engine->active.lock, flags);
753 __i915_request_submit(request);
754 i915_request_mark_complete(request);
755 spin_unlock_irqrestore(&engine->active.lock, flags);
757 intel_engine_queue_breadcrumbs(engine);
760 static void __intel_gt_set_wedged(struct intel_gt *gt)
762 struct intel_engine_cs *engine;
763 intel_engine_mask_t awake;
764 enum intel_engine_id id;
766 if (test_bit(I915_WEDGED, >->reset.flags))
769 if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) {
770 struct drm_printer p = drm_debug_printer(__func__);
772 for_each_engine(engine, gt, id)
773 intel_engine_dump(engine, &p, "%s\n", engine->name);
776 GEM_TRACE("start\n");
779 * First, stop submission to hw, but do not yet complete requests by
780 * rolling the global seqno forward (since this would complete requests
781 * for which we haven't set the fence error to EIO yet).
783 awake = reset_prepare(gt);
785 /* Even if the GPU reset fails, it should still stop the engines */
786 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
787 __intel_gt_reset(gt, ALL_ENGINES);
789 for_each_engine(engine, gt, id)
790 engine->submit_request = nop_submit_request;
793 * Make sure no request can slip through without getting completed by
794 * either this call here to intel_engine_write_global_seqno, or the one
795 * in nop_submit_request.
797 synchronize_rcu_expedited();
798 set_bit(I915_WEDGED, >->reset.flags);
800 /* Mark all executing requests as skipped */
801 for_each_engine(engine, gt, id)
802 engine->cancel_requests(engine);
804 reset_finish(gt, awake);
809 void intel_gt_set_wedged(struct intel_gt *gt)
811 intel_wakeref_t wakeref;
813 mutex_lock(>->reset.mutex);
814 with_intel_runtime_pm(gt->uncore->rpm, wakeref)
815 __intel_gt_set_wedged(gt);
816 mutex_unlock(>->reset.mutex);
819 static bool __intel_gt_unset_wedged(struct intel_gt *gt)
821 struct intel_gt_timelines *timelines = >->timelines;
822 struct intel_timeline *tl;
826 if (!test_bit(I915_WEDGED, >->reset.flags))
829 /* Never fully initialised, recovery impossible */
830 if (test_bit(I915_WEDGED_ON_INIT, >->reset.flags))
833 GEM_TRACE("start\n");
836 * Before unwedging, make sure that all pending operations
837 * are flushed and errored out - we may have requests waiting upon
838 * third party fences. We marked all inflight requests as EIO, and
839 * every execbuf since returned EIO, for consistency we want all
840 * the currently pending requests to also be marked as EIO, which
841 * is done inside our nop_submit_request - and so we must wait.
843 * No more can be submitted until we reset the wedged bit.
845 spin_lock_irqsave(&timelines->lock, flags);
846 list_for_each_entry(tl, &timelines->active_list, link) {
847 struct dma_fence *fence;
849 fence = i915_active_fence_get(&tl->last_request);
853 spin_unlock_irqrestore(&timelines->lock, flags);
856 * All internal dependencies (i915_requests) will have
857 * been flushed by the set-wedge, but we may be stuck waiting
858 * for external fences. These should all be capped to 10s
859 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
862 dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
863 dma_fence_put(fence);
865 /* Restart iteration after droping lock */
866 spin_lock_irqsave(&timelines->lock, flags);
867 tl = list_entry(&timelines->active_list, typeof(*tl), link);
869 spin_unlock_irqrestore(&timelines->lock, flags);
871 /* We must reset pending GPU events before restoring our submission */
872 ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
873 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
874 ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
877 * Warn CI about the unrecoverable wedged condition.
880 add_taint_for_CI(TAINT_WARN);
885 * Undo nop_submit_request. We prevent all new i915 requests from
886 * being queued (by disallowing execbuf whilst wedged) so having
887 * waited for all active requests above, we know the system is idle
888 * and do not have to worry about a thread being inside
889 * engine->submit_request() as we swap over. So unlike installing
890 * the nop_submit_request on reset, we can do this from normal
891 * context and do not require stop_machine().
893 intel_engines_reset_default_submission(gt);
897 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
898 clear_bit(I915_WEDGED, >->reset.flags);
903 bool intel_gt_unset_wedged(struct intel_gt *gt)
907 mutex_lock(>->reset.mutex);
908 result = __intel_gt_unset_wedged(gt);
909 mutex_unlock(>->reset.mutex);
914 static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
920 err = __intel_gt_reset(gt, ALL_ENGINES);
921 for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
922 msleep(10 * (i + 1));
923 err = __intel_gt_reset(gt, ALL_ENGINES);
928 return gt_reset(gt, stalled_mask);
931 static int resume(struct intel_gt *gt)
933 struct intel_engine_cs *engine;
934 enum intel_engine_id id;
937 for_each_engine(engine, gt, id) {
938 ret = engine->resume(engine);
947 * intel_gt_reset - reset chip after a hang
948 * @gt: #intel_gt to reset
949 * @stalled_mask: mask of the stalled engines with the guilty requests
950 * @reason: user error message for why we are resetting
952 * Reset the chip. Useful if a hang is detected. Marks the device as wedged
955 * Procedure is fairly simple:
956 * - reset the chip using the reset reg
957 * - re-init context state
958 * - re-init hardware status page
959 * - re-init ring buffer
960 * - re-init interrupt state
963 void intel_gt_reset(struct intel_gt *gt,
964 intel_engine_mask_t stalled_mask,
967 intel_engine_mask_t awake;
970 GEM_TRACE("flags=%lx\n", gt->reset.flags);
973 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags));
974 mutex_lock(>->reset.mutex);
976 /* Clear any previous failed attempts at recovery. Time to try again. */
977 if (!__intel_gt_unset_wedged(gt))
981 dev_notice(gt->i915->drm.dev,
982 "Resetting chip for %s\n", reason);
983 atomic_inc(>->i915->gpu_error.reset_count);
985 awake = reset_prepare(gt);
987 if (!intel_has_gpu_reset(gt)) {
988 if (i915_modparams.reset)
989 dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
991 DRM_DEBUG_DRIVER("GPU reset disabled\n");
995 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
996 intel_runtime_pm_disable_interrupts(gt->i915);
998 if (do_reset(gt, stalled_mask)) {
999 dev_err(gt->i915->drm.dev, "Failed to reset chip\n");
1003 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1004 intel_runtime_pm_enable_interrupts(gt->i915);
1006 intel_overlay_reset(gt->i915);
1009 * Next we need to restore the context, but we don't use those
1012 * Ring buffer needs to be re-initialized in the KMS case, or if X
1013 * was running at the time of the reset (i.e. we weren't VT
1016 ret = intel_gt_init_hw(gt);
1018 DRM_ERROR("Failed to initialise HW following reset (%d)\n",
1028 reset_finish(gt, awake);
1030 mutex_unlock(>->reset.mutex);
1035 * History tells us that if we cannot reset the GPU now, we
1036 * never will. This then impacts everything that is run
1037 * subsequently. On failing the reset, we mark the driver
1038 * as wedged, preventing further execution on the GPU.
1039 * We also want to go one step further and add a taint to the
1040 * kernel so that any subsequent faults can be traced back to
1041 * this failure. This is important for CI, where if the
1042 * GPU/driver fails we would like to reboot and restart testing
1043 * rather than continue on into oblivion. For everyone else,
1044 * the system should still plod along, but they have been warned!
1046 add_taint_for_CI(TAINT_WARN);
1048 __intel_gt_set_wedged(gt);
1052 static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
1054 return __intel_gt_reset(engine->gt, engine->mask);
1058 * intel_engine_reset - reset GPU engine to recover from a hang
1059 * @engine: engine to reset
1060 * @msg: reason for GPU reset; or NULL for no dev_notice()
1062 * Reset a specific GPU engine. Useful if a hang is detected.
1063 * Returns zero on successful reset or otherwise an error code.
1066 * - identifies the request that caused the hang and it is dropped
1067 * - reset engine (which will force the engine to idle)
1068 * - re-init/configure engine
1070 int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
1072 struct intel_gt *gt = engine->gt;
1075 GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags);
1076 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags));
1078 if (!intel_engine_pm_get_if_awake(engine))
1081 reset_prepare_engine(engine);
1084 dev_notice(engine->i915->drm.dev,
1085 "Resetting %s for %s\n", engine->name, msg);
1086 atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
1088 if (!engine->gt->uc.guc.execbuf_client)
1089 ret = intel_gt_reset_engine(engine);
1091 ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
1093 /* If we fail here, we expect to fallback to a global reset */
1094 DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
1095 engine->gt->uc.guc.execbuf_client ? "GuC " : "",
1101 * The request that caused the hang is stuck on elsp, we know the
1102 * active request and can drop it, adjust head to skip the offending
1103 * request to resume executing remaining requests in the queue.
1105 __intel_engine_reset(engine, true);
1108 * The engine and its registers (and workarounds in case of render)
1109 * have been reset to their default values. Follow the init_ring
1110 * process to program RING_MODE, HWSP and re-enable submission.
1112 ret = engine->resume(engine);
1115 intel_engine_cancel_stop_cs(engine);
1116 reset_finish_engine(engine);
1117 intel_engine_pm_put_async(engine);
1121 static void intel_gt_reset_global(struct intel_gt *gt,
1125 struct kobject *kobj = >->i915->drm.primary->kdev->kobj;
1126 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1127 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1128 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1129 struct intel_wedge_me w;
1131 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1133 DRM_DEBUG_DRIVER("resetting chip\n");
1134 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1136 /* Use a watchdog to ensure that our reset completes */
1137 intel_wedge_on_timeout(&w, gt, 5 * HZ) {
1138 intel_prepare_reset(gt->i915);
1140 /* Flush everyone using a resource about to be clobbered */
1141 synchronize_srcu_expedited(>->reset.backoff_srcu);
1143 intel_gt_reset(gt, engine_mask, reason);
1145 intel_finish_reset(gt->i915);
1148 if (!test_bit(I915_WEDGED, >->reset.flags))
1149 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1153 * intel_gt_handle_error - handle a gpu error
1155 * @engine_mask: mask representing engines that are hung
1156 * @flags: control flags
1157 * @fmt: Error message format string
1159 * Do some basic checking of register state at error time and
1160 * dump it to the syslog. Also call i915_capture_error_state() to make
1161 * sure we get a record and make it available in debugfs. Fire a uevent
1162 * so userspace knows something bad happened (should trigger collection
1163 * of a ring dump etc.).
1165 void intel_gt_handle_error(struct intel_gt *gt,
1166 intel_engine_mask_t engine_mask,
1167 unsigned long flags,
1168 const char *fmt, ...)
1170 struct intel_engine_cs *engine;
1171 intel_wakeref_t wakeref;
1172 intel_engine_mask_t tmp;
1179 va_start(args, fmt);
1180 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1187 * In most cases it's guaranteed that we get here with an RPM
1188 * reference held, for example because there is a pending GPU
1189 * request that won't finish until the reset is done. This
1190 * isn't the case at least when we get here by doing a
1191 * simulated reset via debugfs, so get an RPM reference.
1193 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1195 engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
1197 if (flags & I915_ERROR_CAPTURE) {
1198 i915_capture_error_state(gt->i915, engine_mask, msg);
1199 intel_gt_clear_error_registers(gt, engine_mask);
1203 * Try engine reset when available. We fall back to full reset if
1204 * single reset fails.
1206 if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
1207 for_each_engine_masked(engine, gt, engine_mask, tmp) {
1208 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1209 if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1213 if (intel_engine_reset(engine, msg) == 0)
1214 engine_mask &= ~engine->mask;
1216 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
1224 /* Full reset needs the mutex, stop any other user trying to do so. */
1225 if (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) {
1226 wait_event(gt->reset.queue,
1227 !test_bit(I915_RESET_BACKOFF, >->reset.flags));
1228 goto out; /* piggy-back on the other reset */
1231 /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1232 synchronize_rcu_expedited();
1234 /* Prevent any other reset-engine attempt. */
1235 for_each_engine(engine, gt, tmp) {
1236 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1238 wait_on_bit(>->reset.flags,
1239 I915_RESET_ENGINE + engine->id,
1240 TASK_UNINTERRUPTIBLE);
1243 intel_gt_reset_global(gt, engine_mask, msg);
1245 for_each_engine(engine, gt, tmp)
1246 clear_bit_unlock(I915_RESET_ENGINE + engine->id,
1248 clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags);
1249 smp_mb__after_atomic();
1250 wake_up_all(>->reset.queue);
1253 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1256 int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
1258 might_lock(>->reset.backoff_srcu);
1262 while (test_bit(I915_RESET_BACKOFF, >->reset.flags)) {
1265 if (wait_event_interruptible(gt->reset.queue,
1266 !test_bit(I915_RESET_BACKOFF,
1272 *srcu = srcu_read_lock(>->reset.backoff_srcu);
1278 void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
1279 __releases(>->reset.backoff_srcu)
1281 srcu_read_unlock(>->reset.backoff_srcu, tag);
1284 int intel_gt_terminally_wedged(struct intel_gt *gt)
1288 if (!intel_gt_is_wedged(gt))
1291 /* Reset still in progress? Maybe we will recover? */
1292 if (!test_bit(I915_RESET_BACKOFF, >->reset.flags))
1295 if (wait_event_interruptible(gt->reset.queue,
1296 !test_bit(I915_RESET_BACKOFF,
1300 return intel_gt_is_wedged(gt) ? -EIO : 0;
1303 void intel_gt_set_wedged_on_init(struct intel_gt *gt)
1305 BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
1306 I915_WEDGED_ON_INIT);
1307 intel_gt_set_wedged(gt);
1308 set_bit(I915_WEDGED_ON_INIT, >->reset.flags);
1311 void intel_gt_init_reset(struct intel_gt *gt)
1313 init_waitqueue_head(>->reset.queue);
1314 mutex_init(>->reset.mutex);
1315 init_srcu_struct(>->reset.backoff_srcu);
1318 void intel_gt_fini_reset(struct intel_gt *gt)
1320 cleanup_srcu_struct(>->reset.backoff_srcu);
1323 static void intel_wedge_me(struct work_struct *work)
1325 struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
1327 dev_err(w->gt->i915->drm.dev,
1328 "%s timed out, cancelling all in-flight rendering.\n",
1330 intel_gt_set_wedged(w->gt);
1333 void __intel_init_wedge(struct intel_wedge_me *w,
1334 struct intel_gt *gt,
1341 INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
1342 schedule_delayed_work(&w->work, timeout);
1345 void __intel_fini_wedge(struct intel_wedge_me *w)
1347 cancel_delayed_work_sync(&w->work);
1348 destroy_delayed_work_on_stack(&w->work);
1352 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1353 #include "selftest_reset.c"
1354 #include "selftest_hangcheck.c"