2 * SPDX-License-Identifier: MIT
4 * Copyright © 2008-2018 Intel Corporation
7 #include <linux/sched/mm.h>
8 #include <linux/stop_machine.h>
10 #include "display/intel_display_types.h"
11 #include "display/intel_overlay.h"
13 #include "gem/i915_gem_context.h"
16 #include "i915_gpu_error.h"
18 #include "intel_breadcrumbs.h"
19 #include "intel_engine_pm.h"
21 #include "intel_gt_pm.h"
22 #include "intel_gt_requests.h"
23 #include "intel_reset.h"
25 #include "uc/intel_guc.h"
26 #include "uc/intel_guc_submission.h"
28 #define RESET_MAX_RETRIES 3
30 /* XXX How to handle concurrent GGTT updates using tiling registers? */
31 #define RESET_UNDER_STOP_MACHINE 0
33 static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
35 intel_uncore_rmw_fw(uncore, reg, 0, set);
38 static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
40 intel_uncore_rmw_fw(uncore, reg, clr, 0);
43 static void skip_context(struct i915_request *rq)
45 struct intel_context *hung_ctx = rq->context;
47 list_for_each_entry_from_rcu(rq, &hung_ctx->timeline->requests, link) {
48 if (!i915_request_is_active(rq))
51 if (rq->context == hung_ctx) {
52 i915_request_set_error_once(rq, -EIO);
53 __i915_request_skip(rq);
58 static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
60 struct drm_i915_file_private *file_priv = ctx->file_priv;
61 unsigned long prev_hang;
64 if (IS_ERR_OR_NULL(file_priv))
69 score = I915_CLIENT_SCORE_CONTEXT_BAN;
71 prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
72 if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
73 score += I915_CLIENT_SCORE_HANG_FAST;
76 atomic_add(score, &file_priv->ban_score);
78 drm_dbg(&ctx->i915->drm,
79 "client %s: gained %u ban score, now %u\n",
81 atomic_read(&file_priv->ban_score));
85 static bool mark_guilty(struct i915_request *rq)
87 struct i915_gem_context *ctx;
88 unsigned long prev_hang;
92 if (intel_context_is_closed(rq->context)) {
93 intel_context_set_banned(rq->context);
98 ctx = rcu_dereference(rq->context->gem_context);
99 if (ctx && !kref_get_unless_zero(&ctx->ref))
103 return intel_context_is_banned(rq->context);
105 atomic_inc(&ctx->guilty_count);
107 /* Cool contexts are too cool to be banned! (Used for reset testing.) */
108 if (!i915_gem_context_is_bannable(ctx)) {
113 drm_notice(&ctx->i915->drm,
114 "%s context reset due to GPU hang\n",
117 /* Record the timestamp for the last N hangs */
118 prev_hang = ctx->hang_timestamp[0];
119 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
120 ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
121 ctx->hang_timestamp[i] = jiffies;
123 /* If we have hung N+1 times in rapid succession, we ban the context! */
124 banned = !i915_gem_context_is_recoverable(ctx);
125 if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
128 drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
129 ctx->name, atomic_read(&ctx->guilty_count));
130 intel_context_set_banned(rq->context);
133 client_mark_guilty(ctx, banned);
136 i915_gem_context_put(ctx);
140 static void mark_innocent(struct i915_request *rq)
142 struct i915_gem_context *ctx;
145 ctx = rcu_dereference(rq->context->gem_context);
147 atomic_inc(&ctx->active_count);
151 void __i915_request_reset(struct i915_request *rq, bool guilty)
153 RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
154 GEM_BUG_ON(__i915_request_is_complete(rq));
156 rcu_read_lock(); /* protect the GEM context */
158 i915_request_set_error_once(rq, -EIO);
159 __i915_request_skip(rq);
163 i915_request_set_error_once(rq, -EAGAIN);
169 static bool i915_in_reset(struct pci_dev *pdev)
173 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
174 return gdrst & GRDOM_RESET_STATUS;
177 static int i915_do_reset(struct intel_gt *gt,
178 intel_engine_mask_t engine_mask,
181 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
184 /* Assert reset for at least 20 usec, and wait for acknowledgement. */
185 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
187 err = wait_for_atomic(i915_in_reset(pdev), 50);
189 /* Clear the reset request. */
190 pci_write_config_byte(pdev, I915_GDRST, 0);
193 err = wait_for_atomic(!i915_in_reset(pdev), 50);
198 static bool g4x_reset_complete(struct pci_dev *pdev)
202 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
203 return (gdrst & GRDOM_RESET_ENABLE) == 0;
206 static int g33_do_reset(struct intel_gt *gt,
207 intel_engine_mask_t engine_mask,
210 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
212 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
213 return wait_for_atomic(g4x_reset_complete(pdev), 50);
216 static int g4x_do_reset(struct intel_gt *gt,
217 intel_engine_mask_t engine_mask,
220 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
221 struct intel_uncore *uncore = gt->uncore;
224 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
225 rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
226 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
228 pci_write_config_byte(pdev, I915_GDRST,
229 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
230 ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
232 GT_TRACE(gt, "Wait for media reset failed\n");
236 pci_write_config_byte(pdev, I915_GDRST,
237 GRDOM_RENDER | GRDOM_RESET_ENABLE);
238 ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
240 GT_TRACE(gt, "Wait for render reset failed\n");
245 pci_write_config_byte(pdev, I915_GDRST, 0);
247 rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
248 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
253 static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
256 struct intel_uncore *uncore = gt->uncore;
259 intel_uncore_write_fw(uncore, ILK_GDSR,
260 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
261 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
262 ILK_GRDOM_RESET_ENABLE, 0,
266 GT_TRACE(gt, "Wait for render reset failed\n");
270 intel_uncore_write_fw(uncore, ILK_GDSR,
271 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
272 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
273 ILK_GRDOM_RESET_ENABLE, 0,
277 GT_TRACE(gt, "Wait for media reset failed\n");
282 intel_uncore_write_fw(uncore, ILK_GDSR, 0);
283 intel_uncore_posting_read_fw(uncore, ILK_GDSR);
287 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
288 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
290 struct intel_uncore *uncore = gt->uncore;
294 * GEN6_GDRST is not in the gt power well, no need to check
295 * for fifo space for the write or forcewake the chip for
298 intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
300 /* Wait for the device to ack the reset requests */
301 err = __intel_wait_for_register_fw(uncore,
302 GEN6_GDRST, hw_domain_mask, 0,
307 "Wait for 0x%08x engines reset failed\n",
313 static int gen6_reset_engines(struct intel_gt *gt,
314 intel_engine_mask_t engine_mask,
317 static const u32 hw_engine_mask[] = {
318 [RCS0] = GEN6_GRDOM_RENDER,
319 [BCS0] = GEN6_GRDOM_BLT,
320 [VCS0] = GEN6_GRDOM_MEDIA,
321 [VCS1] = GEN8_GRDOM_MEDIA2,
322 [VECS0] = GEN6_GRDOM_VECS,
324 struct intel_engine_cs *engine;
327 if (engine_mask == ALL_ENGINES) {
328 hw_mask = GEN6_GRDOM_FULL;
330 intel_engine_mask_t tmp;
333 for_each_engine_masked(engine, gt, engine_mask, tmp) {
334 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
335 hw_mask |= hw_engine_mask[engine->id];
339 return gen6_hw_domain_reset(gt, hw_mask);
342 static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
344 struct intel_uncore *uncore = engine->uncore;
345 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
346 i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
347 u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
348 i915_reg_t sfc_usage;
353 switch (engine->class) {
354 case VIDEO_DECODE_CLASS:
355 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
358 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
359 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
361 sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
362 sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
364 sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
365 sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
366 sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
369 case VIDEO_ENHANCEMENT_CLASS:
370 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
371 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
373 sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
374 sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
376 sfc_usage = GEN11_VECS_SFC_USAGE(engine);
377 sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
378 sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
386 * If the engine is using a SFC, tell the engine that a software reset
387 * is going to happen. The engine will then try to force lock the SFC.
388 * If SFC ends up being locked to the engine we want to reset, we have
389 * to reset it as well (we will unlock it once the reset sequence is
392 if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
395 rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
397 ret = __intel_wait_for_register_fw(uncore,
399 sfc_forced_lock_ack_bit,
400 sfc_forced_lock_ack_bit,
403 /* Was the SFC released while we were trying to lock it? */
404 if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
408 ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n");
412 *hw_mask |= sfc_reset_bit;
416 static void gen11_unlock_sfc(struct intel_engine_cs *engine)
418 struct intel_uncore *uncore = engine->uncore;
419 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
420 i915_reg_t sfc_forced_lock;
421 u32 sfc_forced_lock_bit;
423 switch (engine->class) {
424 case VIDEO_DECODE_CLASS:
425 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
428 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
429 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
432 case VIDEO_ENHANCEMENT_CLASS:
433 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
434 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
441 rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
444 static int gen11_reset_engines(struct intel_gt *gt,
445 intel_engine_mask_t engine_mask,
448 static const u32 hw_engine_mask[] = {
449 [RCS0] = GEN11_GRDOM_RENDER,
450 [BCS0] = GEN11_GRDOM_BLT,
451 [VCS0] = GEN11_GRDOM_MEDIA,
452 [VCS1] = GEN11_GRDOM_MEDIA2,
453 [VCS2] = GEN11_GRDOM_MEDIA3,
454 [VCS3] = GEN11_GRDOM_MEDIA4,
455 [VECS0] = GEN11_GRDOM_VECS,
456 [VECS1] = GEN11_GRDOM_VECS2,
458 struct intel_engine_cs *engine;
459 intel_engine_mask_t tmp;
463 if (engine_mask == ALL_ENGINES) {
464 hw_mask = GEN11_GRDOM_FULL;
467 for_each_engine_masked(engine, gt, engine_mask, tmp) {
468 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
469 hw_mask |= hw_engine_mask[engine->id];
470 ret = gen11_lock_sfc(engine, &hw_mask);
476 ret = gen6_hw_domain_reset(gt, hw_mask);
480 * We unlock the SFC based on the lock status and not the result of
481 * gen11_lock_sfc to make sure that we clean properly if something
482 * wrong happened during the lock (e.g. lock acquired after timeout
485 if (engine_mask != ALL_ENGINES)
486 for_each_engine_masked(engine, gt, engine_mask, tmp)
487 gen11_unlock_sfc(engine);
492 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
494 struct intel_uncore *uncore = engine->uncore;
495 const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
496 u32 request, mask, ack;
499 if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1)))
502 ack = intel_uncore_read_fw(uncore, reg);
503 if (ack & RESET_CTL_CAT_ERROR) {
505 * For catastrophic errors, ready-for-reset sequence
506 * needs to be bypassed: HAS#396813
508 request = RESET_CTL_CAT_ERROR;
509 mask = RESET_CTL_CAT_ERROR;
511 /* Catastrophic errors need to be cleared by HW */
513 } else if (!(ack & RESET_CTL_READY_TO_RESET)) {
514 request = RESET_CTL_REQUEST_RESET;
515 mask = RESET_CTL_READY_TO_RESET;
516 ack = RESET_CTL_READY_TO_RESET;
521 intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
522 ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
525 drm_err(&engine->i915->drm,
526 "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
527 engine->name, request,
528 intel_uncore_read_fw(uncore, reg));
533 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
535 intel_uncore_write_fw(engine->uncore,
536 RING_RESET_CTL(engine->mmio_base),
537 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
540 static int gen8_reset_engines(struct intel_gt *gt,
541 intel_engine_mask_t engine_mask,
544 struct intel_engine_cs *engine;
545 const bool reset_non_ready = retry >= 1;
546 intel_engine_mask_t tmp;
549 for_each_engine_masked(engine, gt, engine_mask, tmp) {
550 ret = gen8_engine_reset_prepare(engine);
551 if (ret && !reset_non_ready)
555 * If this is not the first failed attempt to prepare,
556 * we decide to proceed anyway.
558 * By doing so we risk context corruption and with
559 * some gens (kbl), possible system hang if reset
560 * happens during active bb execution.
562 * We rather take context corruption instead of
563 * failed reset with a wedged driver/gpu. And
564 * active bb execution case should be covered by
565 * stop_engines() we have before the reset.
569 if (INTEL_GEN(gt->i915) >= 11)
570 ret = gen11_reset_engines(gt, engine_mask, retry);
572 ret = gen6_reset_engines(gt, engine_mask, retry);
575 for_each_engine_masked(engine, gt, engine_mask, tmp)
576 gen8_engine_reset_cancel(engine);
581 static int mock_reset(struct intel_gt *gt,
582 intel_engine_mask_t mask,
588 typedef int (*reset_func)(struct intel_gt *,
589 intel_engine_mask_t engine_mask,
592 static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
594 struct drm_i915_private *i915 = gt->i915;
598 else if (INTEL_GEN(i915) >= 8)
599 return gen8_reset_engines;
600 else if (INTEL_GEN(i915) >= 6)
601 return gen6_reset_engines;
602 else if (INTEL_GEN(i915) >= 5)
604 else if (IS_G4X(i915))
606 else if (IS_G33(i915) || IS_PINEVIEW(i915))
608 else if (INTEL_GEN(i915) >= 3)
609 return i915_do_reset;
614 int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
616 const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
618 int ret = -ETIMEDOUT;
621 reset = intel_get_gpu_reset(gt);
626 * If the power well sleeps during the reset, the reset
627 * request may be dropped and never completes (causing -EIO).
629 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
630 for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
631 GT_TRACE(gt, "engine_mask=%x\n", engine_mask);
633 ret = reset(gt, engine_mask, retry);
636 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
641 bool intel_has_gpu_reset(const struct intel_gt *gt)
643 if (!gt->i915->params.reset)
646 return intel_get_gpu_reset(gt);
649 bool intel_has_reset_engine(const struct intel_gt *gt)
651 if (gt->i915->params.reset < 2)
654 return INTEL_INFO(gt->i915)->has_reset_engine;
657 int intel_reset_guc(struct intel_gt *gt)
660 INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
663 GEM_BUG_ON(!HAS_GT_UC(gt->i915));
665 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
666 ret = gen6_hw_domain_reset(gt, guc_domain);
667 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
673 * Ensure irq handler finishes, and not run again.
674 * Also return the active request so that we only search for it once.
676 static void reset_prepare_engine(struct intel_engine_cs *engine)
679 * During the reset sequence, we must prevent the engine from
680 * entering RC6. As the context state is undefined until we restart
681 * the engine, if it does enter RC6 during the reset, the state
682 * written to the powercontext is undefined and so we may lose
683 * GPU state upon resume, i.e. fail to restart after a reset.
685 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
686 if (engine->reset.prepare)
687 engine->reset.prepare(engine);
690 static void revoke_mmaps(struct intel_gt *gt)
694 for (i = 0; i < gt->ggtt->num_fences; i++) {
695 struct drm_vma_offset_node *node;
696 struct i915_vma *vma;
699 vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
703 if (!i915_vma_has_userfault(vma))
706 GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]);
711 node = &vma->mmo->vma_node;
712 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
714 unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
715 drm_vma_node_offset_addr(node) + vma_offset,
721 static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
723 struct intel_engine_cs *engine;
724 intel_engine_mask_t awake = 0;
725 enum intel_engine_id id;
727 for_each_engine(engine, gt, id) {
728 if (intel_engine_pm_get_if_awake(engine))
729 awake |= engine->mask;
730 reset_prepare_engine(engine);
733 intel_uc_reset_prepare(>->uc);
738 static void gt_revoke(struct intel_gt *gt)
743 static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
745 struct intel_engine_cs *engine;
746 enum intel_engine_id id;
750 * Everything depends on having the GTT running, so we need to start
753 err = i915_ggtt_enable_hw(gt->i915);
758 for_each_engine(engine, gt, id)
759 __intel_engine_reset(engine, stalled_mask & engine->mask);
762 intel_ggtt_restore_fences(gt->ggtt);
767 static void reset_finish_engine(struct intel_engine_cs *engine)
769 if (engine->reset.finish)
770 engine->reset.finish(engine);
771 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
773 intel_engine_signal_breadcrumbs(engine);
776 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
778 struct intel_engine_cs *engine;
779 enum intel_engine_id id;
781 for_each_engine(engine, gt, id) {
782 reset_finish_engine(engine);
783 if (awake & engine->mask)
784 intel_engine_pm_put(engine);
788 static void nop_submit_request(struct i915_request *request)
790 struct intel_engine_cs *engine = request->engine;
793 RQ_TRACE(request, "-EIO\n");
794 i915_request_set_error_once(request, -EIO);
796 spin_lock_irqsave(&engine->active.lock, flags);
797 __i915_request_submit(request);
798 i915_request_mark_complete(request);
799 spin_unlock_irqrestore(&engine->active.lock, flags);
801 intel_engine_signal_breadcrumbs(engine);
804 static void __intel_gt_set_wedged(struct intel_gt *gt)
806 struct intel_engine_cs *engine;
807 intel_engine_mask_t awake;
808 enum intel_engine_id id;
810 if (test_bit(I915_WEDGED, >->reset.flags))
813 GT_TRACE(gt, "start\n");
816 * First, stop submission to hw, but do not yet complete requests by
817 * rolling the global seqno forward (since this would complete requests
818 * for which we haven't set the fence error to EIO yet).
820 awake = reset_prepare(gt);
822 /* Even if the GPU reset fails, it should still stop the engines */
823 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
824 __intel_gt_reset(gt, ALL_ENGINES);
826 for_each_engine(engine, gt, id)
827 engine->submit_request = nop_submit_request;
830 * Make sure no request can slip through without getting completed by
831 * either this call here to intel_engine_write_global_seqno, or the one
832 * in nop_submit_request.
834 synchronize_rcu_expedited();
835 set_bit(I915_WEDGED, >->reset.flags);
837 /* Mark all executing requests as skipped */
839 for_each_engine(engine, gt, id)
840 if (engine->reset.cancel)
841 engine->reset.cancel(engine);
844 reset_finish(gt, awake);
846 GT_TRACE(gt, "end\n");
849 void intel_gt_set_wedged(struct intel_gt *gt)
851 intel_wakeref_t wakeref;
853 if (test_bit(I915_WEDGED, >->reset.flags))
856 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
857 mutex_lock(>->reset.mutex);
859 if (GEM_SHOW_DEBUG()) {
860 struct drm_printer p = drm_debug_printer(__func__);
861 struct intel_engine_cs *engine;
862 enum intel_engine_id id;
864 drm_printf(&p, "called from %pS\n", (void *)_RET_IP_);
865 for_each_engine(engine, gt, id) {
866 if (intel_engine_is_idle(engine))
869 intel_engine_dump(engine, &p, "%s\n", engine->name);
873 __intel_gt_set_wedged(gt);
875 mutex_unlock(>->reset.mutex);
876 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
879 static bool __intel_gt_unset_wedged(struct intel_gt *gt)
881 struct intel_gt_timelines *timelines = >->timelines;
882 struct intel_timeline *tl;
885 if (!test_bit(I915_WEDGED, >->reset.flags))
888 /* Never fully initialised, recovery impossible */
889 if (intel_gt_has_unrecoverable_error(gt))
892 GT_TRACE(gt, "start\n");
895 * Before unwedging, make sure that all pending operations
896 * are flushed and errored out - we may have requests waiting upon
897 * third party fences. We marked all inflight requests as EIO, and
898 * every execbuf since returned EIO, for consistency we want all
899 * the currently pending requests to also be marked as EIO, which
900 * is done inside our nop_submit_request - and so we must wait.
902 * No more can be submitted until we reset the wedged bit.
904 spin_lock(&timelines->lock);
905 list_for_each_entry(tl, &timelines->active_list, link) {
906 struct dma_fence *fence;
908 fence = i915_active_fence_get(&tl->last_request);
912 spin_unlock(&timelines->lock);
915 * All internal dependencies (i915_requests) will have
916 * been flushed by the set-wedge, but we may be stuck waiting
917 * for external fences. These should all be capped to 10s
918 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
921 dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
922 dma_fence_put(fence);
924 /* Restart iteration after droping lock */
925 spin_lock(&timelines->lock);
926 tl = list_entry(&timelines->active_list, typeof(*tl), link);
928 spin_unlock(&timelines->lock);
930 /* We must reset pending GPU events before restoring our submission */
931 ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
932 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
933 ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
936 * Warn CI about the unrecoverable wedged condition.
939 add_taint_for_CI(gt->i915, TAINT_WARN);
944 * Undo nop_submit_request. We prevent all new i915 requests from
945 * being queued (by disallowing execbuf whilst wedged) so having
946 * waited for all active requests above, we know the system is idle
947 * and do not have to worry about a thread being inside
948 * engine->submit_request() as we swap over. So unlike installing
949 * the nop_submit_request on reset, we can do this from normal
950 * context and do not require stop_machine().
952 intel_engines_reset_default_submission(gt);
954 GT_TRACE(gt, "end\n");
956 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
957 clear_bit(I915_WEDGED, >->reset.flags);
962 bool intel_gt_unset_wedged(struct intel_gt *gt)
966 mutex_lock(>->reset.mutex);
967 result = __intel_gt_unset_wedged(gt);
968 mutex_unlock(>->reset.mutex);
973 static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
979 err = __intel_gt_reset(gt, ALL_ENGINES);
980 for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
981 msleep(10 * (i + 1));
982 err = __intel_gt_reset(gt, ALL_ENGINES);
987 return gt_reset(gt, stalled_mask);
990 static int resume(struct intel_gt *gt)
992 struct intel_engine_cs *engine;
993 enum intel_engine_id id;
996 for_each_engine(engine, gt, id) {
997 ret = intel_engine_resume(engine);
1006 * intel_gt_reset - reset chip after a hang
1007 * @gt: #intel_gt to reset
1008 * @stalled_mask: mask of the stalled engines with the guilty requests
1009 * @reason: user error message for why we are resetting
1011 * Reset the chip. Useful if a hang is detected. Marks the device as wedged
1014 * Procedure is fairly simple:
1015 * - reset the chip using the reset reg
1016 * - re-init context state
1017 * - re-init hardware status page
1018 * - re-init ring buffer
1019 * - re-init interrupt state
1022 void intel_gt_reset(struct intel_gt *gt,
1023 intel_engine_mask_t stalled_mask,
1026 intel_engine_mask_t awake;
1029 GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
1032 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags));
1033 mutex_lock(>->reset.mutex);
1035 /* Clear any previous failed attempts at recovery. Time to try again. */
1036 if (!__intel_gt_unset_wedged(gt))
1040 drm_notice(>->i915->drm,
1041 "Resetting chip for %s\n", reason);
1042 atomic_inc(>->i915->gpu_error.reset_count);
1044 awake = reset_prepare(gt);
1046 if (!intel_has_gpu_reset(gt)) {
1047 if (gt->i915->params.reset)
1048 drm_err(>->i915->drm, "GPU reset not supported\n");
1050 drm_dbg(>->i915->drm, "GPU reset disabled\n");
1054 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1055 intel_runtime_pm_disable_interrupts(gt->i915);
1057 if (do_reset(gt, stalled_mask)) {
1058 drm_err(>->i915->drm, "Failed to reset chip\n");
1062 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1063 intel_runtime_pm_enable_interrupts(gt->i915);
1065 intel_overlay_reset(gt->i915);
1068 * Next we need to restore the context, but we don't use those
1071 * Ring buffer needs to be re-initialized in the KMS case, or if X
1072 * was running at the time of the reset (i.e. we weren't VT
1075 ret = intel_gt_init_hw(gt);
1077 drm_err(>->i915->drm,
1078 "Failed to initialise HW following reset (%d)\n",
1088 reset_finish(gt, awake);
1090 mutex_unlock(>->reset.mutex);
1095 * History tells us that if we cannot reset the GPU now, we
1096 * never will. This then impacts everything that is run
1097 * subsequently. On failing the reset, we mark the driver
1098 * as wedged, preventing further execution on the GPU.
1099 * We also want to go one step further and add a taint to the
1100 * kernel so that any subsequent faults can be traced back to
1101 * this failure. This is important for CI, where if the
1102 * GPU/driver fails we would like to reboot and restart testing
1103 * rather than continue on into oblivion. For everyone else,
1104 * the system should still plod along, but they have been warned!
1106 add_taint_for_CI(gt->i915, TAINT_WARN);
1108 __intel_gt_set_wedged(gt);
1112 static int intel_gt_reset_engine(struct intel_engine_cs *engine)
1114 return __intel_gt_reset(engine->gt, engine->mask);
1117 int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
1119 struct intel_gt *gt = engine->gt;
1120 bool uses_guc = intel_engine_in_guc_submission_mode(engine);
1123 ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
1124 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags));
1126 if (!intel_engine_pm_get_if_awake(engine))
1129 reset_prepare_engine(engine);
1132 drm_notice(&engine->i915->drm,
1133 "Resetting %s for %s\n", engine->name, msg);
1134 atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
1137 ret = intel_gt_reset_engine(engine);
1139 ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
1141 /* If we fail here, we expect to fallback to a global reset */
1142 ENGINE_TRACE(engine, "Failed to reset, err: %d\n", ret);
1147 * The request that caused the hang is stuck on elsp, we know the
1148 * active request and can drop it, adjust head to skip the offending
1149 * request to resume executing remaining requests in the queue.
1151 __intel_engine_reset(engine, true);
1154 * The engine and its registers (and workarounds in case of render)
1155 * have been reset to their default values. Follow the init_ring
1156 * process to program RING_MODE, HWSP and re-enable submission.
1158 ret = intel_engine_resume(engine);
1161 intel_engine_cancel_stop_cs(engine);
1162 reset_finish_engine(engine);
1163 intel_engine_pm_put_async(engine);
1168 * intel_engine_reset - reset GPU engine to recover from a hang
1169 * @engine: engine to reset
1170 * @msg: reason for GPU reset; or NULL for no drm_notice()
1172 * Reset a specific GPU engine. Useful if a hang is detected.
1173 * Returns zero on successful reset or otherwise an error code.
1176 * - identifies the request that caused the hang and it is dropped
1177 * - reset engine (which will force the engine to idle)
1178 * - re-init/configure engine
1180 int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
1185 err = __intel_engine_reset_bh(engine, msg);
1191 static void intel_gt_reset_global(struct intel_gt *gt,
1195 struct kobject *kobj = >->i915->drm.primary->kdev->kobj;
1196 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1197 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1198 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1199 struct intel_wedge_me w;
1201 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1203 GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask);
1204 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1206 /* Use a watchdog to ensure that our reset completes */
1207 intel_wedge_on_timeout(&w, gt, 5 * HZ) {
1208 intel_display_prepare_reset(gt->i915);
1210 /* Flush everyone using a resource about to be clobbered */
1211 synchronize_srcu_expedited(>->reset.backoff_srcu);
1213 intel_gt_reset(gt, engine_mask, reason);
1215 intel_display_finish_reset(gt->i915);
1218 if (!test_bit(I915_WEDGED, >->reset.flags))
1219 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1223 * intel_gt_handle_error - handle a gpu error
1225 * @engine_mask: mask representing engines that are hung
1226 * @flags: control flags
1227 * @fmt: Error message format string
1229 * Do some basic checking of register state at error time and
1230 * dump it to the syslog. Also call i915_capture_error_state() to make
1231 * sure we get a record and make it available in debugfs. Fire a uevent
1232 * so userspace knows something bad happened (should trigger collection
1233 * of a ring dump etc.).
1235 void intel_gt_handle_error(struct intel_gt *gt,
1236 intel_engine_mask_t engine_mask,
1237 unsigned long flags,
1238 const char *fmt, ...)
1240 struct intel_engine_cs *engine;
1241 intel_wakeref_t wakeref;
1242 intel_engine_mask_t tmp;
1249 va_start(args, fmt);
1250 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1257 * In most cases it's guaranteed that we get here with an RPM
1258 * reference held, for example because there is a pending GPU
1259 * request that won't finish until the reset is done. This
1260 * isn't the case at least when we get here by doing a
1261 * simulated reset via debugfs, so get an RPM reference.
1263 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1265 engine_mask &= gt->info.engine_mask;
1267 if (flags & I915_ERROR_CAPTURE) {
1268 i915_capture_error_state(gt, engine_mask);
1269 intel_gt_clear_error_registers(gt, engine_mask);
1273 * Try engine reset when available. We fall back to full reset if
1274 * single reset fails.
1276 if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
1278 for_each_engine_masked(engine, gt, engine_mask, tmp) {
1279 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1280 if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1284 if (__intel_engine_reset_bh(engine, msg) == 0)
1285 engine_mask &= ~engine->mask;
1287 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
1296 /* Full reset needs the mutex, stop any other user trying to do so. */
1297 if (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) {
1298 wait_event(gt->reset.queue,
1299 !test_bit(I915_RESET_BACKOFF, >->reset.flags));
1300 goto out; /* piggy-back on the other reset */
1303 /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1304 synchronize_rcu_expedited();
1306 /* Prevent any other reset-engine attempt. */
1307 for_each_engine(engine, gt, tmp) {
1308 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1310 wait_on_bit(>->reset.flags,
1311 I915_RESET_ENGINE + engine->id,
1312 TASK_UNINTERRUPTIBLE);
1315 intel_gt_reset_global(gt, engine_mask, msg);
1317 for_each_engine(engine, gt, tmp)
1318 clear_bit_unlock(I915_RESET_ENGINE + engine->id,
1320 clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags);
1321 smp_mb__after_atomic();
1322 wake_up_all(>->reset.queue);
1325 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1328 int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
1330 might_lock(>->reset.backoff_srcu);
1334 while (test_bit(I915_RESET_BACKOFF, >->reset.flags)) {
1337 if (wait_event_interruptible(gt->reset.queue,
1338 !test_bit(I915_RESET_BACKOFF,
1344 *srcu = srcu_read_lock(>->reset.backoff_srcu);
1350 void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
1351 __releases(>->reset.backoff_srcu)
1353 srcu_read_unlock(>->reset.backoff_srcu, tag);
1356 int intel_gt_terminally_wedged(struct intel_gt *gt)
1360 if (!intel_gt_is_wedged(gt))
1363 if (intel_gt_has_unrecoverable_error(gt))
1366 /* Reset still in progress? Maybe we will recover? */
1367 if (wait_event_interruptible(gt->reset.queue,
1368 !test_bit(I915_RESET_BACKOFF,
1372 return intel_gt_is_wedged(gt) ? -EIO : 0;
1375 void intel_gt_set_wedged_on_init(struct intel_gt *gt)
1377 BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
1378 I915_WEDGED_ON_INIT);
1379 intel_gt_set_wedged(gt);
1380 set_bit(I915_WEDGED_ON_INIT, >->reset.flags);
1382 /* Wedged on init is non-recoverable */
1383 add_taint_for_CI(gt->i915, TAINT_WARN);
1386 void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
1388 intel_gt_set_wedged(gt);
1389 set_bit(I915_WEDGED_ON_FINI, >->reset.flags);
1390 intel_gt_retire_requests(gt); /* cleanup any wedged requests */
1393 void intel_gt_init_reset(struct intel_gt *gt)
1395 init_waitqueue_head(>->reset.queue);
1396 mutex_init(>->reset.mutex);
1397 init_srcu_struct(>->reset.backoff_srcu);
1400 * While undesirable to wait inside the shrinker, complain anyway.
1402 * If we have to wait during shrinking, we guarantee forward progress
1403 * by forcing the reset. Therefore during the reset we must not
1404 * re-enter the shrinker. By declaring that we take the reset mutex
1405 * within the shrinker, we forbid ourselves from performing any
1406 * fs-reclaim or taking related locks during reset.
1408 i915_gem_shrinker_taints_mutex(gt->i915, >->reset.mutex);
1410 /* no GPU until we are ready! */
1411 __set_bit(I915_WEDGED, >->reset.flags);
1414 void intel_gt_fini_reset(struct intel_gt *gt)
1416 cleanup_srcu_struct(>->reset.backoff_srcu);
1419 static void intel_wedge_me(struct work_struct *work)
1421 struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
1423 drm_err(&w->gt->i915->drm,
1424 "%s timed out, cancelling all in-flight rendering.\n",
1426 intel_gt_set_wedged(w->gt);
1429 void __intel_init_wedge(struct intel_wedge_me *w,
1430 struct intel_gt *gt,
1437 INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
1438 schedule_delayed_work(&w->work, timeout);
1441 void __intel_fini_wedge(struct intel_wedge_me *w)
1443 cancel_delayed_work_sync(&w->work);
1444 destroy_delayed_work_on_stack(&w->work);
1448 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1449 #include "selftest_reset.c"
1450 #include "selftest_hangcheck.c"