2 * SPDX-License-Identifier: MIT
4 * Copyright © 2008-2018 Intel Corporation
7 #include <linux/sched/mm.h>
8 #include <linux/stop_machine.h>
10 #include "display/intel_overlay.h"
12 #include "gem/i915_gem_context.h"
15 #include "i915_gpu_error.h"
17 #include "intel_engine_pm.h"
19 #include "intel_gt_pm.h"
20 #include "intel_reset.h"
22 #include "uc/intel_guc.h"
24 #define RESET_MAX_RETRIES 3
26 /* XXX How to handle concurrent GGTT updates using tiling registers? */
27 #define RESET_UNDER_STOP_MACHINE 0
29 static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
31 intel_uncore_rmw_fw(uncore, reg, 0, set);
34 static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
36 intel_uncore_rmw_fw(uncore, reg, clr, 0);
39 static void engine_skip_context(struct i915_request *rq)
41 struct intel_engine_cs *engine = rq->engine;
42 struct i915_gem_context *hung_ctx = rq->gem_context;
44 lockdep_assert_held(&engine->active.lock);
46 if (!i915_request_is_active(rq))
49 list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
50 if (rq->gem_context == hung_ctx)
51 i915_request_skip(rq, -EIO);
54 static void client_mark_guilty(struct drm_i915_file_private *file_priv,
55 const struct i915_gem_context *ctx)
58 unsigned long prev_hang;
60 if (i915_gem_context_is_banned(ctx))
61 score = I915_CLIENT_SCORE_CONTEXT_BAN;
65 prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
66 if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
67 score += I915_CLIENT_SCORE_HANG_FAST;
70 atomic_add(score, &file_priv->ban_score);
72 DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
74 atomic_read(&file_priv->ban_score));
78 static bool context_mark_guilty(struct i915_gem_context *ctx)
80 unsigned long prev_hang;
84 atomic_inc(&ctx->guilty_count);
86 /* Cool contexts are too cool to be banned! (Used for reset testing.) */
87 if (!i915_gem_context_is_bannable(ctx))
90 /* Record the timestamp for the last N hangs */
91 prev_hang = ctx->hang_timestamp[0];
92 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
93 ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
94 ctx->hang_timestamp[i] = jiffies;
96 /* If we have hung N+1 times in rapid succession, we ban the context! */
97 banned = !i915_gem_context_is_recoverable(ctx);
98 if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
101 DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
102 ctx->name, atomic_read(&ctx->guilty_count));
103 i915_gem_context_set_banned(ctx);
106 if (!IS_ERR_OR_NULL(ctx->file_priv))
107 client_mark_guilty(ctx->file_priv, ctx);
112 static void context_mark_innocent(struct i915_gem_context *ctx)
114 atomic_inc(&ctx->active_count);
117 void __i915_request_reset(struct i915_request *rq, bool guilty)
119 GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
125 lockdep_assert_held(&rq->engine->active.lock);
126 GEM_BUG_ON(i915_request_completed(rq));
129 i915_request_skip(rq, -EIO);
130 if (context_mark_guilty(rq->gem_context))
131 engine_skip_context(rq);
133 dma_fence_set_error(&rq->fence, -EAGAIN);
134 context_mark_innocent(rq->gem_context);
138 static bool i915_in_reset(struct pci_dev *pdev)
142 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
143 return gdrst & GRDOM_RESET_STATUS;
146 static int i915_do_reset(struct intel_gt *gt,
147 intel_engine_mask_t engine_mask,
150 struct pci_dev *pdev = gt->i915->drm.pdev;
153 /* Assert reset for at least 20 usec, and wait for acknowledgement. */
154 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
156 err = wait_for_atomic(i915_in_reset(pdev), 50);
158 /* Clear the reset request. */
159 pci_write_config_byte(pdev, I915_GDRST, 0);
162 err = wait_for_atomic(!i915_in_reset(pdev), 50);
167 static bool g4x_reset_complete(struct pci_dev *pdev)
171 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
172 return (gdrst & GRDOM_RESET_ENABLE) == 0;
175 static int g33_do_reset(struct intel_gt *gt,
176 intel_engine_mask_t engine_mask,
179 struct pci_dev *pdev = gt->i915->drm.pdev;
181 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
182 return wait_for_atomic(g4x_reset_complete(pdev), 50);
185 static int g4x_do_reset(struct intel_gt *gt,
186 intel_engine_mask_t engine_mask,
189 struct pci_dev *pdev = gt->i915->drm.pdev;
190 struct intel_uncore *uncore = gt->uncore;
193 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
194 rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
195 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
197 pci_write_config_byte(pdev, I915_GDRST,
198 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
199 ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
201 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
205 pci_write_config_byte(pdev, I915_GDRST,
206 GRDOM_RENDER | GRDOM_RESET_ENABLE);
207 ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
209 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
214 pci_write_config_byte(pdev, I915_GDRST, 0);
216 rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
217 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
222 static int ironlake_do_reset(struct intel_gt *gt,
223 intel_engine_mask_t engine_mask,
226 struct intel_uncore *uncore = gt->uncore;
229 intel_uncore_write_fw(uncore, ILK_GDSR,
230 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
231 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
232 ILK_GRDOM_RESET_ENABLE, 0,
236 DRM_DEBUG_DRIVER("Wait for render reset failed\n");
240 intel_uncore_write_fw(uncore, ILK_GDSR,
241 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
242 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
243 ILK_GRDOM_RESET_ENABLE, 0,
247 DRM_DEBUG_DRIVER("Wait for media reset failed\n");
252 intel_uncore_write_fw(uncore, ILK_GDSR, 0);
253 intel_uncore_posting_read_fw(uncore, ILK_GDSR);
257 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
258 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
260 struct intel_uncore *uncore = gt->uncore;
264 * GEN6_GDRST is not in the gt power well, no need to check
265 * for fifo space for the write or forcewake the chip for
268 intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
270 /* Wait for the device to ack the reset requests */
271 err = __intel_wait_for_register_fw(uncore,
272 GEN6_GDRST, hw_domain_mask, 0,
276 DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
282 static int gen6_reset_engines(struct intel_gt *gt,
283 intel_engine_mask_t engine_mask,
286 struct intel_engine_cs *engine;
287 const u32 hw_engine_mask[] = {
288 [RCS0] = GEN6_GRDOM_RENDER,
289 [BCS0] = GEN6_GRDOM_BLT,
290 [VCS0] = GEN6_GRDOM_MEDIA,
291 [VCS1] = GEN8_GRDOM_MEDIA2,
292 [VECS0] = GEN6_GRDOM_VECS,
296 if (engine_mask == ALL_ENGINES) {
297 hw_mask = GEN6_GRDOM_FULL;
299 intel_engine_mask_t tmp;
302 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
303 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
304 hw_mask |= hw_engine_mask[engine->id];
308 return gen6_hw_domain_reset(gt, hw_mask);
311 static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
313 struct intel_uncore *uncore = engine->uncore;
314 u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
315 i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
316 u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
317 i915_reg_t sfc_usage;
321 switch (engine->class) {
322 case VIDEO_DECODE_CLASS:
323 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
326 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
327 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
329 sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
330 sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
332 sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
333 sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
334 sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
337 case VIDEO_ENHANCEMENT_CLASS:
338 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
339 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
341 sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
342 sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
344 sfc_usage = GEN11_VECS_SFC_USAGE(engine);
345 sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
346 sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
354 * Tell the engine that a software reset is going to happen. The engine
355 * will then try to force lock the SFC (if currently locked, it will
356 * remain so until we tell the engine it is safe to unlock; if currently
357 * unlocked, it will ignore this and all new lock requests). If SFC
358 * ends up being locked to the engine we want to reset, we have to reset
359 * it as well (we will unlock it once the reset sequence is completed).
361 rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
363 if (__intel_wait_for_register_fw(uncore,
365 sfc_forced_lock_ack_bit,
366 sfc_forced_lock_ack_bit,
368 DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
372 if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
373 return sfc_reset_bit;
378 static void gen11_unlock_sfc(struct intel_engine_cs *engine)
380 struct intel_uncore *uncore = engine->uncore;
381 u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
382 i915_reg_t sfc_forced_lock;
383 u32 sfc_forced_lock_bit;
385 switch (engine->class) {
386 case VIDEO_DECODE_CLASS:
387 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
390 sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
391 sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
394 case VIDEO_ENHANCEMENT_CLASS:
395 sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
396 sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
403 rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
406 static int gen11_reset_engines(struct intel_gt *gt,
407 intel_engine_mask_t engine_mask,
410 const u32 hw_engine_mask[] = {
411 [RCS0] = GEN11_GRDOM_RENDER,
412 [BCS0] = GEN11_GRDOM_BLT,
413 [VCS0] = GEN11_GRDOM_MEDIA,
414 [VCS1] = GEN11_GRDOM_MEDIA2,
415 [VCS2] = GEN11_GRDOM_MEDIA3,
416 [VCS3] = GEN11_GRDOM_MEDIA4,
417 [VECS0] = GEN11_GRDOM_VECS,
418 [VECS1] = GEN11_GRDOM_VECS2,
420 struct intel_engine_cs *engine;
421 intel_engine_mask_t tmp;
425 if (engine_mask == ALL_ENGINES) {
426 hw_mask = GEN11_GRDOM_FULL;
429 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
430 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
431 hw_mask |= hw_engine_mask[engine->id];
432 hw_mask |= gen11_lock_sfc(engine);
436 ret = gen6_hw_domain_reset(gt, hw_mask);
438 if (engine_mask != ALL_ENGINES)
439 for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
440 gen11_unlock_sfc(engine);
445 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
447 struct intel_uncore *uncore = engine->uncore;
448 const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
449 u32 request, mask, ack;
452 ack = intel_uncore_read_fw(uncore, reg);
453 if (ack & RESET_CTL_CAT_ERROR) {
455 * For catastrophic errors, ready-for-reset sequence
456 * needs to be bypassed: HAS#396813
458 request = RESET_CTL_CAT_ERROR;
459 mask = RESET_CTL_CAT_ERROR;
461 /* Catastrophic errors need to be cleared by HW */
463 } else if (!(ack & RESET_CTL_READY_TO_RESET)) {
464 request = RESET_CTL_REQUEST_RESET;
465 mask = RESET_CTL_READY_TO_RESET;
466 ack = RESET_CTL_READY_TO_RESET;
471 intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
472 ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
475 DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
476 engine->name, request,
477 intel_uncore_read_fw(uncore, reg));
482 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
484 intel_uncore_write_fw(engine->uncore,
485 RING_RESET_CTL(engine->mmio_base),
486 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
489 static int gen8_reset_engines(struct intel_gt *gt,
490 intel_engine_mask_t engine_mask,
493 struct intel_engine_cs *engine;
494 const bool reset_non_ready = retry >= 1;
495 intel_engine_mask_t tmp;
498 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
499 ret = gen8_engine_reset_prepare(engine);
500 if (ret && !reset_non_ready)
504 * If this is not the first failed attempt to prepare,
505 * we decide to proceed anyway.
507 * By doing so we risk context corruption and with
508 * some gens (kbl), possible system hang if reset
509 * happens during active bb execution.
511 * We rather take context corruption instead of
512 * failed reset with a wedged driver/gpu. And
513 * active bb execution case should be covered by
514 * stop_engines() we have before the reset.
518 if (INTEL_GEN(gt->i915) >= 11)
519 ret = gen11_reset_engines(gt, engine_mask, retry);
521 ret = gen6_reset_engines(gt, engine_mask, retry);
524 for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
525 gen8_engine_reset_cancel(engine);
530 typedef int (*reset_func)(struct intel_gt *,
531 intel_engine_mask_t engine_mask,
534 static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
536 if (INTEL_GEN(i915) >= 8)
537 return gen8_reset_engines;
538 else if (INTEL_GEN(i915) >= 6)
539 return gen6_reset_engines;
540 else if (INTEL_GEN(i915) >= 5)
541 return ironlake_do_reset;
542 else if (IS_G4X(i915))
544 else if (IS_G33(i915) || IS_PINEVIEW(i915))
546 else if (INTEL_GEN(i915) >= 3)
547 return i915_do_reset;
552 int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
554 const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
556 int ret = -ETIMEDOUT;
559 reset = intel_get_gpu_reset(gt->i915);
564 * If the power well sleeps during the reset, the reset
565 * request may be dropped and never completes (causing -EIO).
567 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
568 for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
569 GEM_TRACE("engine_mask=%x\n", engine_mask);
571 ret = reset(gt, engine_mask, retry);
574 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
579 bool intel_has_gpu_reset(struct drm_i915_private *i915)
581 if (!i915_modparams.reset)
584 return intel_get_gpu_reset(i915);
587 bool intel_has_reset_engine(struct drm_i915_private *i915)
589 return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2;
592 int intel_reset_guc(struct intel_gt *gt)
595 INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
598 GEM_BUG_ON(!HAS_GT_UC(gt->i915));
600 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
601 ret = gen6_hw_domain_reset(gt, guc_domain);
602 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
608 * Ensure irq handler finishes, and not run again.
609 * Also return the active request so that we only search for it once.
611 static void reset_prepare_engine(struct intel_engine_cs *engine)
614 * During the reset sequence, we must prevent the engine from
615 * entering RC6. As the context state is undefined until we restart
616 * the engine, if it does enter RC6 during the reset, the state
617 * written to the powercontext is undefined and so we may lose
618 * GPU state upon resume, i.e. fail to restart after a reset.
620 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
621 engine->reset.prepare(engine);
624 static void revoke_mmaps(struct intel_gt *gt)
628 for (i = 0; i < gt->ggtt->num_fences; i++) {
629 struct drm_vma_offset_node *node;
630 struct i915_vma *vma;
633 vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
637 if (!i915_vma_has_userfault(vma))
640 GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]);
641 node = &vma->obj->base.vma_node;
642 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
643 unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
644 drm_vma_node_offset_addr(node) + vma_offset,
650 static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
652 struct intel_engine_cs *engine;
653 intel_engine_mask_t awake = 0;
654 enum intel_engine_id id;
656 for_each_engine(engine, gt->i915, id) {
657 if (intel_engine_pm_get_if_awake(engine))
658 awake |= engine->mask;
659 reset_prepare_engine(engine);
662 intel_uc_reset_prepare(>->uc);
667 static void gt_revoke(struct intel_gt *gt)
672 static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
674 struct intel_engine_cs *engine;
675 enum intel_engine_id id;
679 * Everything depends on having the GTT running, so we need to start
682 err = i915_ggtt_enable_hw(gt->i915);
686 for_each_engine(engine, gt->i915, id)
687 __intel_engine_reset(engine, stalled_mask & engine->mask);
689 i915_gem_restore_fences(gt->i915);
694 static void reset_finish_engine(struct intel_engine_cs *engine)
696 engine->reset.finish(engine);
697 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
699 intel_engine_signal_breadcrumbs(engine);
702 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
704 struct intel_engine_cs *engine;
705 enum intel_engine_id id;
707 for_each_engine(engine, gt->i915, id) {
708 reset_finish_engine(engine);
709 if (awake & engine->mask)
710 intel_engine_pm_put(engine);
714 static void nop_submit_request(struct i915_request *request)
716 struct intel_engine_cs *engine = request->engine;
719 GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
720 engine->name, request->fence.context, request->fence.seqno);
721 dma_fence_set_error(&request->fence, -EIO);
723 spin_lock_irqsave(&engine->active.lock, flags);
724 __i915_request_submit(request);
725 i915_request_mark_complete(request);
726 spin_unlock_irqrestore(&engine->active.lock, flags);
728 intel_engine_queue_breadcrumbs(engine);
731 static void __intel_gt_set_wedged(struct intel_gt *gt)
733 struct intel_engine_cs *engine;
734 intel_engine_mask_t awake;
735 enum intel_engine_id id;
737 if (test_bit(I915_WEDGED, >->reset.flags))
740 if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) {
741 struct drm_printer p = drm_debug_printer(__func__);
743 for_each_engine(engine, gt->i915, id)
744 intel_engine_dump(engine, &p, "%s\n", engine->name);
747 GEM_TRACE("start\n");
750 * First, stop submission to hw, but do not yet complete requests by
751 * rolling the global seqno forward (since this would complete requests
752 * for which we haven't set the fence error to EIO yet).
754 awake = reset_prepare(gt);
756 /* Even if the GPU reset fails, it should still stop the engines */
757 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
758 __intel_gt_reset(gt, ALL_ENGINES);
760 for_each_engine(engine, gt->i915, id) {
761 engine->submit_request = nop_submit_request;
762 engine->schedule = NULL;
764 gt->i915->caps.scheduler = 0;
767 * Make sure no request can slip through without getting completed by
768 * either this call here to intel_engine_write_global_seqno, or the one
769 * in nop_submit_request.
771 synchronize_rcu_expedited();
772 set_bit(I915_WEDGED, >->reset.flags);
774 /* Mark all executing requests as skipped */
775 for_each_engine(engine, gt->i915, id)
776 engine->cancel_requests(engine);
778 reset_finish(gt, awake);
783 void intel_gt_set_wedged(struct intel_gt *gt)
785 intel_wakeref_t wakeref;
787 mutex_lock(>->reset.mutex);
788 with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
789 __intel_gt_set_wedged(gt);
790 mutex_unlock(>->reset.mutex);
793 static bool __intel_gt_unset_wedged(struct intel_gt *gt)
795 struct intel_gt_timelines *timelines = >->timelines;
796 struct intel_timeline *tl;
798 if (!test_bit(I915_WEDGED, >->reset.flags))
801 if (!gt->scratch) /* Never full initialised, recovery impossible */
804 GEM_TRACE("start\n");
807 * Before unwedging, make sure that all pending operations
808 * are flushed and errored out - we may have requests waiting upon
809 * third party fences. We marked all inflight requests as EIO, and
810 * every execbuf since returned EIO, for consistency we want all
811 * the currently pending requests to also be marked as EIO, which
812 * is done inside our nop_submit_request - and so we must wait.
814 * No more can be submitted until we reset the wedged bit.
816 mutex_lock(&timelines->mutex);
817 list_for_each_entry(tl, &timelines->active_list, link) {
818 struct i915_request *rq;
820 rq = i915_active_request_get_unlocked(&tl->last_request);
825 * All internal dependencies (i915_requests) will have
826 * been flushed by the set-wedge, but we may be stuck waiting
827 * for external fences. These should all be capped to 10s
828 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
831 dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
832 i915_request_put(rq);
834 mutex_unlock(&timelines->mutex);
836 intel_gt_sanitize(gt, false);
839 * Undo nop_submit_request. We prevent all new i915 requests from
840 * being queued (by disallowing execbuf whilst wedged) so having
841 * waited for all active requests above, we know the system is idle
842 * and do not have to worry about a thread being inside
843 * engine->submit_request() as we swap over. So unlike installing
844 * the nop_submit_request on reset, we can do this from normal
845 * context and do not require stop_machine().
847 intel_engines_reset_default_submission(gt);
851 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
852 clear_bit(I915_WEDGED, >->reset.flags);
857 bool intel_gt_unset_wedged(struct intel_gt *gt)
861 mutex_lock(>->reset.mutex);
862 result = __intel_gt_unset_wedged(gt);
863 mutex_unlock(>->reset.mutex);
868 static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
874 err = __intel_gt_reset(gt, ALL_ENGINES);
875 for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
876 msleep(10 * (i + 1));
877 err = __intel_gt_reset(gt, ALL_ENGINES);
882 return gt_reset(gt, stalled_mask);
885 static int resume(struct intel_gt *gt)
887 struct intel_engine_cs *engine;
888 enum intel_engine_id id;
891 for_each_engine(engine, gt->i915, id) {
892 ret = engine->resume(engine);
901 * intel_gt_reset - reset chip after a hang
902 * @gt: #intel_gt to reset
903 * @stalled_mask: mask of the stalled engines with the guilty requests
904 * @reason: user error message for why we are resetting
906 * Reset the chip. Useful if a hang is detected. Marks the device as wedged
909 * Procedure is fairly simple:
910 * - reset the chip using the reset reg
911 * - re-init context state
912 * - re-init hardware status page
913 * - re-init ring buffer
914 * - re-init interrupt state
917 void intel_gt_reset(struct intel_gt *gt,
918 intel_engine_mask_t stalled_mask,
921 intel_engine_mask_t awake;
924 GEM_TRACE("flags=%lx\n", gt->reset.flags);
927 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags));
928 mutex_lock(>->reset.mutex);
930 /* Clear any previous failed attempts at recovery. Time to try again. */
931 if (!__intel_gt_unset_wedged(gt))
935 dev_notice(gt->i915->drm.dev,
936 "Resetting chip for %s\n", reason);
937 atomic_inc(>->i915->gpu_error.reset_count);
939 awake = reset_prepare(gt);
941 if (!intel_has_gpu_reset(gt->i915)) {
942 if (i915_modparams.reset)
943 dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
945 DRM_DEBUG_DRIVER("GPU reset disabled\n");
949 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
950 intel_runtime_pm_disable_interrupts(gt->i915);
952 if (do_reset(gt, stalled_mask)) {
953 dev_err(gt->i915->drm.dev, "Failed to reset chip\n");
957 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
958 intel_runtime_pm_enable_interrupts(gt->i915);
960 intel_overlay_reset(gt->i915);
963 * Next we need to restore the context, but we don't use those
966 * Ring buffer needs to be re-initialized in the KMS case, or if X
967 * was running at the time of the reset (i.e. we weren't VT
970 ret = i915_gem_init_hw(gt->i915);
972 DRM_ERROR("Failed to initialise HW following reset (%d)\n",
981 intel_gt_queue_hangcheck(gt);
984 reset_finish(gt, awake);
986 mutex_unlock(>->reset.mutex);
991 * History tells us that if we cannot reset the GPU now, we
992 * never will. This then impacts everything that is run
993 * subsequently. On failing the reset, we mark the driver
994 * as wedged, preventing further execution on the GPU.
995 * We also want to go one step further and add a taint to the
996 * kernel so that any subsequent faults can be traced back to
997 * this failure. This is important for CI, where if the
998 * GPU/driver fails we would like to reboot and restart testing
999 * rather than continue on into oblivion. For everyone else,
1000 * the system should still plod along, but they have been warned!
1002 add_taint_for_CI(TAINT_WARN);
1004 __intel_gt_set_wedged(gt);
1008 static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
1010 return __intel_gt_reset(engine->gt, engine->mask);
1014 * intel_engine_reset - reset GPU engine to recover from a hang
1015 * @engine: engine to reset
1016 * @msg: reason for GPU reset; or NULL for no dev_notice()
1018 * Reset a specific GPU engine. Useful if a hang is detected.
1019 * Returns zero on successful reset or otherwise an error code.
1022 * - identifies the request that caused the hang and it is dropped
1023 * - reset engine (which will force the engine to idle)
1024 * - re-init/configure engine
1026 int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
1028 struct intel_gt *gt = engine->gt;
1031 GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags);
1032 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags));
1034 if (!intel_engine_pm_get_if_awake(engine))
1037 reset_prepare_engine(engine);
1040 dev_notice(engine->i915->drm.dev,
1041 "Resetting %s for %s\n", engine->name, msg);
1042 atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
1044 if (!engine->gt->uc.guc.execbuf_client)
1045 ret = intel_gt_reset_engine(engine);
1047 ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
1049 /* If we fail here, we expect to fallback to a global reset */
1050 DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
1051 engine->gt->uc.guc.execbuf_client ? "GuC " : "",
1057 * The request that caused the hang is stuck on elsp, we know the
1058 * active request and can drop it, adjust head to skip the offending
1059 * request to resume executing remaining requests in the queue.
1061 __intel_engine_reset(engine, true);
1064 * The engine and its registers (and workarounds in case of render)
1065 * have been reset to their default values. Follow the init_ring
1066 * process to program RING_MODE, HWSP and re-enable submission.
1068 ret = engine->resume(engine);
1071 intel_engine_cancel_stop_cs(engine);
1072 reset_finish_engine(engine);
1073 intel_engine_pm_put(engine);
1077 static void intel_gt_reset_global(struct intel_gt *gt,
1081 struct kobject *kobj = >->i915->drm.primary->kdev->kobj;
1082 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1083 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1084 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1085 struct intel_wedge_me w;
1087 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1089 DRM_DEBUG_DRIVER("resetting chip\n");
1090 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1092 /* Use a watchdog to ensure that our reset completes */
1093 intel_wedge_on_timeout(&w, gt, 5 * HZ) {
1094 intel_prepare_reset(gt->i915);
1096 /* Flush everyone using a resource about to be clobbered */
1097 synchronize_srcu_expedited(>->reset.backoff_srcu);
1099 intel_gt_reset(gt, engine_mask, reason);
1101 intel_finish_reset(gt->i915);
1104 if (!test_bit(I915_WEDGED, >->reset.flags))
1105 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1109 * intel_gt_handle_error - handle a gpu error
1111 * @engine_mask: mask representing engines that are hung
1112 * @flags: control flags
1113 * @fmt: Error message format string
1115 * Do some basic checking of register state at error time and
1116 * dump it to the syslog. Also call i915_capture_error_state() to make
1117 * sure we get a record and make it available in debugfs. Fire a uevent
1118 * so userspace knows something bad happened (should trigger collection
1119 * of a ring dump etc.).
1121 void intel_gt_handle_error(struct intel_gt *gt,
1122 intel_engine_mask_t engine_mask,
1123 unsigned long flags,
1124 const char *fmt, ...)
1126 struct intel_engine_cs *engine;
1127 intel_wakeref_t wakeref;
1128 intel_engine_mask_t tmp;
1135 va_start(args, fmt);
1136 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1143 * In most cases it's guaranteed that we get here with an RPM
1144 * reference held, for example because there is a pending GPU
1145 * request that won't finish until the reset is done. This
1146 * isn't the case at least when we get here by doing a
1147 * simulated reset via debugfs, so get an RPM reference.
1149 wakeref = intel_runtime_pm_get(>->i915->runtime_pm);
1151 engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
1153 if (flags & I915_ERROR_CAPTURE) {
1154 i915_capture_error_state(gt->i915, engine_mask, msg);
1155 intel_gt_clear_error_registers(gt, engine_mask);
1159 * Try engine reset when available. We fall back to full reset if
1160 * single reset fails.
1162 if (intel_has_reset_engine(gt->i915) && !intel_gt_is_wedged(gt)) {
1163 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
1164 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1165 if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1169 if (intel_engine_reset(engine, msg) == 0)
1170 engine_mask &= ~engine->mask;
1172 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
1180 /* Full reset needs the mutex, stop any other user trying to do so. */
1181 if (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) {
1182 wait_event(gt->reset.queue,
1183 !test_bit(I915_RESET_BACKOFF, >->reset.flags));
1184 goto out; /* piggy-back on the other reset */
1187 /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1188 synchronize_rcu_expedited();
1190 /* Prevent any other reset-engine attempt. */
1191 for_each_engine(engine, gt->i915, tmp) {
1192 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1194 wait_on_bit(>->reset.flags,
1195 I915_RESET_ENGINE + engine->id,
1196 TASK_UNINTERRUPTIBLE);
1199 intel_gt_reset_global(gt, engine_mask, msg);
1201 for_each_engine(engine, gt->i915, tmp)
1202 clear_bit_unlock(I915_RESET_ENGINE + engine->id,
1204 clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags);
1205 smp_mb__after_atomic();
1206 wake_up_all(>->reset.queue);
1209 intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
1212 int intel_gt_reset_trylock(struct intel_gt *gt)
1216 might_lock(>->reset.backoff_srcu);
1220 while (test_bit(I915_RESET_BACKOFF, >->reset.flags)) {
1223 if (wait_event_interruptible(gt->reset.queue,
1224 !test_bit(I915_RESET_BACKOFF,
1230 srcu = srcu_read_lock(>->reset.backoff_srcu);
1236 void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
1237 __releases(>->reset.backoff_srcu)
1239 srcu_read_unlock(>->reset.backoff_srcu, tag);
1242 int intel_gt_terminally_wedged(struct intel_gt *gt)
1246 if (!intel_gt_is_wedged(gt))
1249 /* Reset still in progress? Maybe we will recover? */
1250 if (!test_bit(I915_RESET_BACKOFF, >->reset.flags))
1253 /* XXX intel_reset_finish() still takes struct_mutex!!! */
1254 if (mutex_is_locked(>->i915->drm.struct_mutex))
1257 if (wait_event_interruptible(gt->reset.queue,
1258 !test_bit(I915_RESET_BACKOFF,
1262 return intel_gt_is_wedged(gt) ? -EIO : 0;
1265 void intel_gt_init_reset(struct intel_gt *gt)
1267 init_waitqueue_head(>->reset.queue);
1268 mutex_init(>->reset.mutex);
1269 init_srcu_struct(>->reset.backoff_srcu);
1272 void intel_gt_fini_reset(struct intel_gt *gt)
1274 cleanup_srcu_struct(>->reset.backoff_srcu);
1277 static void intel_wedge_me(struct work_struct *work)
1279 struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
1281 dev_err(w->gt->i915->drm.dev,
1282 "%s timed out, cancelling all in-flight rendering.\n",
1284 intel_gt_set_wedged(w->gt);
1287 void __intel_init_wedge(struct intel_wedge_me *w,
1288 struct intel_gt *gt,
1295 INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
1296 schedule_delayed_work(&w->work, timeout);
1299 void __intel_fini_wedge(struct intel_wedge_me *w)
1301 cancel_delayed_work_sync(&w->work);
1302 destroy_delayed_work_on_stack(&w->work);
1306 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1307 #include "selftest_reset.c"