Commit | Line | Data |
---|---|---|
24f90d66 | 1 | // SPDX-License-Identifier: MIT |
9f58892e | 2 | /* |
9f58892e CW |
3 | * Copyright © 2008-2018 Intel Corporation |
4 | */ | |
5 | ||
6 | #include <linux/sched/mm.h> | |
eb8d0f5a | 7 | #include <linux/stop_machine.h> |
01fabda8 | 8 | #include <linux/string_helpers.h> |
9f58892e | 9 | |
35291c9c | 10 | #include "display/intel_display.h" |
df0566a6 JN |
11 | #include "display/intel_overlay.h" |
12 | ||
10be98a7 CW |
13 | #include "gem/i915_gem_context.h" |
14 | ||
0d6419e9 MR |
15 | #include "gt/intel_gt_regs.h" |
16 | ||
9f58892e | 17 | #include "i915_drv.h" |
5472b3f2 | 18 | #include "i915_file_private.h" |
9f58892e | 19 | #include "i915_gpu_error.h" |
440e2b3d | 20 | #include "i915_irq.h" |
b3786b29 | 21 | #include "intel_breadcrumbs.h" |
79ffac85 | 22 | #include "intel_engine_pm.h" |
202b1f4c | 23 | #include "intel_engine_regs.h" |
eaf522f6 | 24 | #include "intel_gt.h" |
79ffac85 | 25 | #include "intel_gt_pm.h" |
b0573472 | 26 | #include "intel_gt_requests.h" |
e30e6c7b | 27 | #include "intel_mchbar_regs.h" |
7e470f10 | 28 | #include "intel_pci_config.h" |
112ed2d3 | 29 | #include "intel_reset.h" |
9f58892e | 30 | |
0f261b24 | 31 | #include "uc/intel_guc.h" |
9f58892e | 32 | |
ade8a0f5 CW |
33 | #define RESET_MAX_RETRIES 3 |
34 | ||
eb8d0f5a CW |
35 | /* XXX How to handle concurrent GGTT updates using tiling registers? */ |
36 | #define RESET_UNDER_STOP_MACHINE 0 | |
37 | ||
95007efb CW |
38 | static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set) |
39 | { | |
40 | intel_uncore_rmw_fw(uncore, reg, 0, set); | |
41 | } | |
42 | ||
43 | static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr) | |
44 | { | |
45 | intel_uncore_rmw_fw(uncore, reg, clr, 0); | |
46 | } | |
47 | ||
e6ba7648 | 48 | static void client_mark_guilty(struct i915_gem_context *ctx, bool banned) |
9f58892e | 49 | { |
9f3ccd40 | 50 | struct drm_i915_file_private *file_priv = ctx->file_priv; |
9f58892e | 51 | unsigned long prev_hang; |
9f3ccd40 CW |
52 | unsigned int score; |
53 | ||
54 | if (IS_ERR_OR_NULL(file_priv)) | |
55 | return; | |
9f58892e | 56 | |
9f3ccd40 CW |
57 | score = 0; |
58 | if (banned) | |
9f58892e | 59 | score = I915_CLIENT_SCORE_CONTEXT_BAN; |
9f58892e CW |
60 | |
61 | prev_hang = xchg(&file_priv->hang_timestamp, jiffies); | |
62 | if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES)) | |
63 | score += I915_CLIENT_SCORE_HANG_FAST; | |
64 | ||
65 | if (score) { | |
66 | atomic_add(score, &file_priv->ban_score); | |
67 | ||
f8474622 WK |
68 | drm_dbg(&ctx->i915->drm, |
69 | "client %s: gained %u ban score, now %u\n", | |
70 | ctx->name, score, | |
71 | atomic_read(&file_priv->ban_score)); | |
9f58892e CW |
72 | } |
73 | } | |
74 | ||
9f3ccd40 | 75 | static bool mark_guilty(struct i915_request *rq) |
9f58892e | 76 | { |
e6ba7648 | 77 | struct i915_gem_context *ctx; |
7f4127c4 CW |
78 | unsigned long prev_hang; |
79 | bool banned; | |
80 | int i; | |
9f58892e | 81 | |
ae8ac10d | 82 | if (intel_context_is_closed(rq->context)) |
8e37d699 | 83 | return true; |
8e37d699 | 84 | |
6a8679c0 CW |
85 | rcu_read_lock(); |
86 | ctx = rcu_dereference(rq->context->gem_context); | |
87 | if (ctx && !kref_get_unless_zero(&ctx->ref)) | |
88 | ctx = NULL; | |
89 | rcu_read_unlock(); | |
e6ba7648 | 90 | if (!ctx) |
be90e344 | 91 | return intel_context_is_banned(rq->context); |
e8887bb3 | 92 | |
9f58892e CW |
93 | atomic_inc(&ctx->guilty_count); |
94 | ||
7f4127c4 | 95 | /* Cool contexts are too cool to be banned! (Used for reset testing.) */ |
6a8679c0 CW |
96 | if (!i915_gem_context_is_bannable(ctx)) { |
97 | banned = false; | |
98 | goto out; | |
99 | } | |
9f58892e | 100 | |
dc483ba5 | 101 | drm_notice(&ctx->i915->drm, |
dfd9c1b4 CW |
102 | "%s context reset due to GPU hang\n", |
103 | ctx->name); | |
104 | ||
7f4127c4 CW |
105 | /* Record the timestamp for the last N hangs */ |
106 | prev_hang = ctx->hang_timestamp[0]; | |
107 | for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++) | |
108 | ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1]; | |
109 | ctx->hang_timestamp[i] = jiffies; | |
110 | ||
111 | /* If we have hung N+1 times in rapid succession, we ban the context! */ | |
112 | banned = !i915_gem_context_is_recoverable(ctx); | |
113 | if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES)) | |
114 | banned = true; | |
ae8ac10d | 115 | if (banned) |
f8474622 WK |
116 | drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n", |
117 | ctx->name, atomic_read(&ctx->guilty_count)); | |
9f58892e | 118 | |
e6ba7648 | 119 | client_mark_guilty(ctx, banned); |
eb8d0f5a | 120 | |
6a8679c0 CW |
121 | out: |
122 | i915_gem_context_put(ctx); | |
eb8d0f5a | 123 | return banned; |
9f58892e CW |
124 | } |
125 | ||
9f3ccd40 | 126 | static void mark_innocent(struct i915_request *rq) |
9f58892e | 127 | { |
6a8679c0 CW |
128 | struct i915_gem_context *ctx; |
129 | ||
130 | rcu_read_lock(); | |
131 | ctx = rcu_dereference(rq->context->gem_context); | |
132 | if (ctx) | |
133 | atomic_inc(&ctx->active_count); | |
134 | rcu_read_unlock(); | |
9f58892e CW |
135 | } |
136 | ||
cb823ed9 | 137 | void __i915_request_reset(struct i915_request *rq, bool guilty) |
eb8d0f5a | 138 | { |
ae8ac10d MB |
139 | bool banned = false; |
140 | ||
01fabda8 | 141 | RQ_TRACE(rq, "guilty? %s\n", str_yes_no(guilty)); |
163433e5 | 142 | GEM_BUG_ON(__i915_request_is_complete(rq)); |
eb8d0f5a | 143 | |
e8887bb3 | 144 | rcu_read_lock(); /* protect the GEM context */ |
eb8d0f5a | 145 | if (guilty) { |
36e191f0 CW |
146 | i915_request_set_error_once(rq, -EIO); |
147 | __i915_request_skip(rq); | |
ae8ac10d | 148 | banned = mark_guilty(rq); |
eb8d0f5a | 149 | } else { |
36e191f0 | 150 | i915_request_set_error_once(rq, -EAGAIN); |
9f3ccd40 | 151 | mark_innocent(rq); |
eb8d0f5a | 152 | } |
e8887bb3 | 153 | rcu_read_unlock(); |
ae8ac10d MB |
154 | |
155 | if (banned) | |
156 | intel_context_ban(rq->context, rq); | |
eb8d0f5a CW |
157 | } |
158 | ||
9f58892e CW |
159 | static bool i915_in_reset(struct pci_dev *pdev) |
160 | { | |
161 | u8 gdrst; | |
162 | ||
163 | pci_read_config_byte(pdev, I915_GDRST, &gdrst); | |
164 | return gdrst & GRDOM_RESET_STATUS; | |
165 | } | |
166 | ||
cb823ed9 | 167 | static int i915_do_reset(struct intel_gt *gt, |
3a891a62 | 168 | intel_engine_mask_t engine_mask, |
9f58892e CW |
169 | unsigned int retry) |
170 | { | |
e322551f | 171 | struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); |
9f58892e CW |
172 | int err; |
173 | ||
174 | /* Assert reset for at least 20 usec, and wait for acknowledgement. */ | |
175 | pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); | |
ade8a0f5 CW |
176 | udelay(50); |
177 | err = wait_for_atomic(i915_in_reset(pdev), 50); | |
9f58892e CW |
178 | |
179 | /* Clear the reset request. */ | |
180 | pci_write_config_byte(pdev, I915_GDRST, 0); | |
ade8a0f5 | 181 | udelay(50); |
9f58892e | 182 | if (!err) |
ade8a0f5 | 183 | err = wait_for_atomic(!i915_in_reset(pdev), 50); |
9f58892e CW |
184 | |
185 | return err; | |
186 | } | |
187 | ||
188 | static bool g4x_reset_complete(struct pci_dev *pdev) | |
189 | { | |
190 | u8 gdrst; | |
191 | ||
192 | pci_read_config_byte(pdev, I915_GDRST, &gdrst); | |
193 | return (gdrst & GRDOM_RESET_ENABLE) == 0; | |
194 | } | |
195 | ||
cb823ed9 | 196 | static int g33_do_reset(struct intel_gt *gt, |
3a891a62 | 197 | intel_engine_mask_t engine_mask, |
9f58892e CW |
198 | unsigned int retry) |
199 | { | |
e322551f | 200 | struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); |
9f58892e CW |
201 | |
202 | pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); | |
ade8a0f5 | 203 | return wait_for_atomic(g4x_reset_complete(pdev), 50); |
9f58892e CW |
204 | } |
205 | ||
cb823ed9 | 206 | static int g4x_do_reset(struct intel_gt *gt, |
3a891a62 | 207 | intel_engine_mask_t engine_mask, |
9f58892e CW |
208 | unsigned int retry) |
209 | { | |
e322551f | 210 | struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev); |
cb823ed9 | 211 | struct intel_uncore *uncore = gt->uncore; |
9f58892e CW |
212 | int ret; |
213 | ||
214 | /* WaVcpClkGateDisableForMediaReset:ctg,elk */ | |
95007efb CW |
215 | rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE); |
216 | intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D); | |
9f58892e CW |
217 | |
218 | pci_write_config_byte(pdev, I915_GDRST, | |
219 | GRDOM_MEDIA | GRDOM_RESET_ENABLE); | |
ade8a0f5 | 220 | ret = wait_for_atomic(g4x_reset_complete(pdev), 50); |
9f58892e | 221 | if (ret) { |
cb56a07d | 222 | GT_TRACE(gt, "Wait for media reset failed\n"); |
9f58892e CW |
223 | goto out; |
224 | } | |
225 | ||
226 | pci_write_config_byte(pdev, I915_GDRST, | |
227 | GRDOM_RENDER | GRDOM_RESET_ENABLE); | |
ade8a0f5 | 228 | ret = wait_for_atomic(g4x_reset_complete(pdev), 50); |
9f58892e | 229 | if (ret) { |
cb56a07d | 230 | GT_TRACE(gt, "Wait for render reset failed\n"); |
9f58892e CW |
231 | goto out; |
232 | } | |
233 | ||
234 | out: | |
235 | pci_write_config_byte(pdev, I915_GDRST, 0); | |
236 | ||
95007efb CW |
237 | rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE); |
238 | intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D); | |
9f58892e CW |
239 | |
240 | return ret; | |
241 | } | |
242 | ||
9eae5e27 LDM |
243 | static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, |
244 | unsigned int retry) | |
9f58892e | 245 | { |
cb823ed9 | 246 | struct intel_uncore *uncore = gt->uncore; |
9f58892e CW |
247 | int ret; |
248 | ||
d2d551c0 DCS |
249 | intel_uncore_write_fw(uncore, ILK_GDSR, |
250 | ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); | |
251 | ret = __intel_wait_for_register_fw(uncore, ILK_GDSR, | |
ade8a0f5 CW |
252 | ILK_GRDOM_RESET_ENABLE, 0, |
253 | 5000, 0, | |
254 | NULL); | |
9f58892e | 255 | if (ret) { |
cb56a07d | 256 | GT_TRACE(gt, "Wait for render reset failed\n"); |
9f58892e CW |
257 | goto out; |
258 | } | |
259 | ||
d2d551c0 DCS |
260 | intel_uncore_write_fw(uncore, ILK_GDSR, |
261 | ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); | |
262 | ret = __intel_wait_for_register_fw(uncore, ILK_GDSR, | |
ade8a0f5 CW |
263 | ILK_GRDOM_RESET_ENABLE, 0, |
264 | 5000, 0, | |
265 | NULL); | |
9f58892e | 266 | if (ret) { |
cb56a07d | 267 | GT_TRACE(gt, "Wait for media reset failed\n"); |
9f58892e CW |
268 | goto out; |
269 | } | |
270 | ||
271 | out: | |
d2d551c0 DCS |
272 | intel_uncore_write_fw(uncore, ILK_GDSR, 0); |
273 | intel_uncore_posting_read_fw(uncore, ILK_GDSR); | |
9f58892e CW |
274 | return ret; |
275 | } | |
276 | ||
277 | /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ | |
cb823ed9 | 278 | static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask) |
9f58892e | 279 | { |
cb823ed9 | 280 | struct intel_uncore *uncore = gt->uncore; |
9f58892e CW |
281 | int err; |
282 | ||
283 | /* | |
284 | * GEN6_GDRST is not in the gt power well, no need to check | |
285 | * for fifo space for the write or forcewake the chip for | |
286 | * the read | |
287 | */ | |
d2d551c0 | 288 | intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask); |
9f58892e CW |
289 | |
290 | /* Wait for the device to ack the reset requests */ | |
d2d551c0 | 291 | err = __intel_wait_for_register_fw(uncore, |
9f58892e CW |
292 | GEN6_GDRST, hw_domain_mask, 0, |
293 | 500, 0, | |
294 | NULL); | |
295 | if (err) | |
cb56a07d CW |
296 | GT_TRACE(gt, |
297 | "Wait for 0x%08x engines reset failed\n", | |
298 | hw_domain_mask); | |
9f58892e CW |
299 | |
300 | return err; | |
301 | } | |
302 | ||
cb823ed9 | 303 | static int gen6_reset_engines(struct intel_gt *gt, |
3a891a62 | 304 | intel_engine_mask_t engine_mask, |
9f58892e CW |
305 | unsigned int retry) |
306 | { | |
b9dcb97b | 307 | struct intel_engine_cs *engine; |
9f58892e CW |
308 | u32 hw_mask; |
309 | ||
310 | if (engine_mask == ALL_ENGINES) { | |
311 | hw_mask = GEN6_GRDOM_FULL; | |
312 | } else { | |
3a891a62 | 313 | intel_engine_mask_t tmp; |
9f58892e CW |
314 | |
315 | hw_mask = 0; | |
a50134b1 | 316 | for_each_engine_masked(engine, gt, engine_mask, tmp) { |
20cddfcc | 317 | hw_mask |= engine->reset_domain; |
8a68d464 | 318 | } |
9f58892e CW |
319 | } |
320 | ||
cb823ed9 | 321 | return gen6_hw_domain_reset(gt, hw_mask); |
9f58892e CW |
322 | } |
323 | ||
5b26d57f AS |
324 | static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine) |
325 | { | |
326 | int vecs_id; | |
327 | ||
328 | GEM_BUG_ON(engine->class != VIDEO_DECODE_CLASS); | |
329 | ||
330 | vecs_id = _VECS((engine->instance) / 2); | |
331 | ||
332 | return engine->gt->engine[vecs_id]; | |
333 | } | |
334 | ||
335 | struct sfc_lock_data { | |
336 | i915_reg_t lock_reg; | |
337 | i915_reg_t ack_reg; | |
338 | i915_reg_t usage_reg; | |
339 | u32 lock_bit; | |
340 | u32 ack_bit; | |
341 | u32 usage_bit; | |
342 | u32 reset_bit; | |
343 | }; | |
344 | ||
345 | static void get_sfc_forced_lock_data(struct intel_engine_cs *engine, | |
346 | struct sfc_lock_data *sfc_lock) | |
347 | { | |
348 | switch (engine->class) { | |
349 | default: | |
350 | MISSING_CASE(engine->class); | |
351 | fallthrough; | |
352 | case VIDEO_DECODE_CLASS: | |
93cc7aa0 | 353 | sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine->mmio_base); |
5b26d57f AS |
354 | sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT; |
355 | ||
93cc7aa0 | 356 | sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base); |
5b26d57f AS |
357 | sfc_lock->ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT; |
358 | ||
93cc7aa0 | 359 | sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base); |
5b26d57f AS |
360 | sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT; |
361 | sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance); | |
362 | ||
363 | break; | |
364 | case VIDEO_ENHANCEMENT_CLASS: | |
93cc7aa0 | 365 | sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine->mmio_base); |
5b26d57f AS |
366 | sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT; |
367 | ||
93cc7aa0 | 368 | sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine->mmio_base); |
5b26d57f AS |
369 | sfc_lock->ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT; |
370 | ||
93cc7aa0 | 371 | sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine->mmio_base); |
5b26d57f AS |
372 | sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT; |
373 | sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance); | |
374 | ||
375 | break; | |
376 | } | |
377 | } | |
378 | ||
379 | static int gen11_lock_sfc(struct intel_engine_cs *engine, | |
380 | u32 *reset_mask, | |
381 | u32 *unlock_mask) | |
9f58892e | 382 | { |
fdc4e926 | 383 | struct intel_uncore *uncore = engine->uncore; |
792592e7 | 384 | u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access; |
5b26d57f AS |
385 | struct sfc_lock_data sfc_lock; |
386 | bool lock_obtained, lock_to_other = false; | |
0d333ac7 | 387 | int ret; |
9f58892e CW |
388 | |
389 | switch (engine->class) { | |
390 | case VIDEO_DECODE_CLASS: | |
391 | if ((BIT(engine->instance) & vdbox_sfc_access) == 0) | |
392 | return 0; | |
393 | ||
5b26d57f AS |
394 | fallthrough; |
395 | case VIDEO_ENHANCEMENT_CLASS: | |
396 | get_sfc_forced_lock_data(engine, &sfc_lock); | |
9f58892e | 397 | |
9f58892e | 398 | break; |
5b26d57f AS |
399 | default: |
400 | return 0; | |
401 | } | |
9f58892e | 402 | |
5b26d57f AS |
403 | if (!(intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & sfc_lock.usage_bit)) { |
404 | struct intel_engine_cs *paired_vecs; | |
9f58892e | 405 | |
5b26d57f | 406 | if (engine->class != VIDEO_DECODE_CLASS || |
c816723b | 407 | GRAPHICS_VER(engine->i915) != 12) |
5b26d57f | 408 | return 0; |
9f58892e | 409 | |
5b26d57f AS |
410 | /* |
411 | * Wa_14010733141 | |
412 | * | |
413 | * If the VCS-MFX isn't using the SFC, we also need to check | |
414 | * whether VCS-HCP is using it. If so, we need to issue a *VE* | |
415 | * forced lock on the VE engine that shares the same SFC. | |
416 | */ | |
417 | if (!(intel_uncore_read_fw(uncore, | |
93cc7aa0 | 418 | GEN12_HCP_SFC_LOCK_STATUS(engine->mmio_base)) & |
5b26d57f AS |
419 | GEN12_HCP_SFC_USAGE_BIT)) |
420 | return 0; | |
9f58892e | 421 | |
5b26d57f AS |
422 | paired_vecs = find_sfc_paired_vecs_engine(engine); |
423 | get_sfc_forced_lock_data(paired_vecs, &sfc_lock); | |
424 | lock_to_other = true; | |
425 | *unlock_mask |= paired_vecs->mask; | |
426 | } else { | |
427 | *unlock_mask |= engine->mask; | |
9f58892e CW |
428 | } |
429 | ||
430 | /* | |
5b26d57f | 431 | * If the engine is using an SFC, tell the engine that a software reset |
0d333ac7 DCS |
432 | * is going to happen. The engine will then try to force lock the SFC. |
433 | * If SFC ends up being locked to the engine we want to reset, we have | |
434 | * to reset it as well (we will unlock it once the reset sequence is | |
435 | * completed). | |
9f58892e | 436 | */ |
5b26d57f | 437 | rmw_set_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit); |
9f58892e | 438 | |
0d333ac7 | 439 | ret = __intel_wait_for_register_fw(uncore, |
5b26d57f AS |
440 | sfc_lock.ack_reg, |
441 | sfc_lock.ack_bit, | |
442 | sfc_lock.ack_bit, | |
0d333ac7 DCS |
443 | 1000, 0, NULL); |
444 | ||
5b26d57f AS |
445 | /* |
446 | * Was the SFC released while we were trying to lock it? | |
447 | * | |
448 | * We should reset both the engine and the SFC if: | |
449 | * - We were locking the SFC to this engine and the lock succeeded | |
450 | * OR | |
451 | * - We were locking the SFC to a different engine (Wa_14010733141) | |
452 | * but the SFC was released before the lock was obtained. | |
453 | * | |
454 | * Otherwise we need only reset the engine by itself and we can | |
455 | * leave the SFC alone. | |
456 | */ | |
457 | lock_obtained = (intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & | |
458 | sfc_lock.usage_bit) != 0; | |
459 | if (lock_obtained == lock_to_other) | |
9f58892e | 460 | return 0; |
9f58892e | 461 | |
0d333ac7 | 462 | if (ret) { |
cb56a07d | 463 | ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n"); |
0d333ac7 DCS |
464 | return ret; |
465 | } | |
9f58892e | 466 | |
5b26d57f | 467 | *reset_mask |= sfc_lock.reset_bit; |
9f58892e CW |
468 | return 0; |
469 | } | |
470 | ||
fdc4e926 | 471 | static void gen11_unlock_sfc(struct intel_engine_cs *engine) |
9f58892e | 472 | { |
fdc4e926 | 473 | struct intel_uncore *uncore = engine->uncore; |
792592e7 | 474 | u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access; |
5b26d57f | 475 | struct sfc_lock_data sfc_lock = {}; |
9f58892e | 476 | |
5b26d57f AS |
477 | if (engine->class != VIDEO_DECODE_CLASS && |
478 | engine->class != VIDEO_ENHANCEMENT_CLASS) | |
479 | return; | |
9f58892e | 480 | |
5b26d57f AS |
481 | if (engine->class == VIDEO_DECODE_CLASS && |
482 | (BIT(engine->instance) & vdbox_sfc_access) == 0) | |
9f58892e | 483 | return; |
9f58892e | 484 | |
5b26d57f AS |
485 | get_sfc_forced_lock_data(engine, &sfc_lock); |
486 | ||
487 | rmw_clear_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit); | |
9f58892e CW |
488 | } |
489 | ||
cb823ed9 | 490 | static int gen11_reset_engines(struct intel_gt *gt, |
3a891a62 | 491 | intel_engine_mask_t engine_mask, |
9f58892e CW |
492 | unsigned int retry) |
493 | { | |
9f58892e | 494 | struct intel_engine_cs *engine; |
3a891a62 | 495 | intel_engine_mask_t tmp; |
5b26d57f | 496 | u32 reset_mask, unlock_mask = 0; |
9f58892e CW |
497 | int ret; |
498 | ||
9f58892e | 499 | if (engine_mask == ALL_ENGINES) { |
5b26d57f | 500 | reset_mask = GEN11_GRDOM_FULL; |
9f58892e | 501 | } else { |
5b26d57f | 502 | reset_mask = 0; |
a50134b1 | 503 | for_each_engine_masked(engine, gt, engine_mask, tmp) { |
20cddfcc | 504 | reset_mask |= engine->reset_domain; |
5b26d57f | 505 | ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask); |
0d333ac7 DCS |
506 | if (ret) |
507 | goto sfc_unlock; | |
9f58892e CW |
508 | } |
509 | } | |
510 | ||
5b26d57f | 511 | ret = gen6_hw_domain_reset(gt, reset_mask); |
9f58892e | 512 | |
0d333ac7 DCS |
513 | sfc_unlock: |
514 | /* | |
515 | * We unlock the SFC based on the lock status and not the result of | |
516 | * gen11_lock_sfc to make sure that we clean properly if something | |
517 | * wrong happened during the lock (e.g. lock acquired after timeout | |
518 | * expiration). | |
5b26d57f AS |
519 | * |
520 | * Due to Wa_14010733141, we may have locked an SFC to an engine that | |
521 | * wasn't being reset. So instead of calling gen11_unlock_sfc() | |
522 | * on engine_mask, we instead call it on the mask of engines that our | |
523 | * gen11_lock_sfc() calls told us actually had locks attempted. | |
0d333ac7 | 524 | */ |
5b26d57f AS |
525 | for_each_engine_masked(engine, gt, unlock_mask, tmp) |
526 | gen11_unlock_sfc(engine); | |
9f58892e CW |
527 | |
528 | return ret; | |
529 | } | |
530 | ||
531 | static int gen8_engine_reset_prepare(struct intel_engine_cs *engine) | |
532 | { | |
fdc4e926 | 533 | struct intel_uncore *uncore = engine->uncore; |
019d2600 MK |
534 | const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base); |
535 | u32 request, mask, ack; | |
9f58892e CW |
536 | int ret; |
537 | ||
0a7d355e CW |
538 | if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1))) |
539 | return -ETIMEDOUT; | |
540 | ||
019d2600 | 541 | ack = intel_uncore_read_fw(uncore, reg); |
5ce5f61b MK |
542 | if (ack & RESET_CTL_CAT_ERROR) { |
543 | /* | |
544 | * For catastrophic errors, ready-for-reset sequence | |
545 | * needs to be bypassed: HAS#396813 | |
546 | */ | |
547 | request = RESET_CTL_CAT_ERROR; | |
548 | mask = RESET_CTL_CAT_ERROR; | |
549 | ||
550 | /* Catastrophic errors need to be cleared by HW */ | |
551 | ack = 0; | |
552 | } else if (!(ack & RESET_CTL_READY_TO_RESET)) { | |
019d2600 MK |
553 | request = RESET_CTL_REQUEST_RESET; |
554 | mask = RESET_CTL_READY_TO_RESET; | |
555 | ack = RESET_CTL_READY_TO_RESET; | |
556 | } else { | |
557 | return 0; | |
558 | } | |
9f58892e | 559 | |
019d2600 MK |
560 | intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request)); |
561 | ret = __intel_wait_for_register_fw(uncore, reg, mask, ack, | |
562 | 700, 0, NULL); | |
9f58892e | 563 | if (ret) |
f8474622 WK |
564 | drm_err(&engine->i915->drm, |
565 | "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n", | |
566 | engine->name, request, | |
567 | intel_uncore_read_fw(uncore, reg)); | |
9f58892e CW |
568 | |
569 | return ret; | |
570 | } | |
571 | ||
572 | static void gen8_engine_reset_cancel(struct intel_engine_cs *engine) | |
573 | { | |
95007efb CW |
574 | intel_uncore_write_fw(engine->uncore, |
575 | RING_RESET_CTL(engine->mmio_base), | |
576 | _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); | |
9f58892e CW |
577 | } |
578 | ||
cb823ed9 | 579 | static int gen8_reset_engines(struct intel_gt *gt, |
3a891a62 | 580 | intel_engine_mask_t engine_mask, |
9f58892e CW |
581 | unsigned int retry) |
582 | { | |
583 | struct intel_engine_cs *engine; | |
584 | const bool reset_non_ready = retry >= 1; | |
3a891a62 | 585 | intel_engine_mask_t tmp; |
9f58892e CW |
586 | int ret; |
587 | ||
a50134b1 | 588 | for_each_engine_masked(engine, gt, engine_mask, tmp) { |
9f58892e CW |
589 | ret = gen8_engine_reset_prepare(engine); |
590 | if (ret && !reset_non_ready) | |
591 | goto skip_reset; | |
592 | ||
593 | /* | |
594 | * If this is not the first failed attempt to prepare, | |
595 | * we decide to proceed anyway. | |
596 | * | |
597 | * By doing so we risk context corruption and with | |
598 | * some gens (kbl), possible system hang if reset | |
599 | * happens during active bb execution. | |
600 | * | |
601 | * We rather take context corruption instead of | |
602 | * failed reset with a wedged driver/gpu. And | |
603 | * active bb execution case should be covered by | |
cb823ed9 | 604 | * stop_engines() we have before the reset. |
9f58892e CW |
605 | */ |
606 | } | |
607 | ||
154cfae6 BC |
608 | /* |
609 | * Wa_22011100796:dg2, whenever Full soft reset is required, | |
610 | * reset all individual engines firstly, and then do a full soft reset. | |
611 | * | |
612 | * This is best effort, so ignore any error from the initial reset. | |
613 | */ | |
614 | if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES) | |
615 | gen11_reset_engines(gt, gt->info.engine_mask, 0); | |
616 | ||
c816723b | 617 | if (GRAPHICS_VER(gt->i915) >= 11) |
cb823ed9 | 618 | ret = gen11_reset_engines(gt, engine_mask, retry); |
9f58892e | 619 | else |
cb823ed9 | 620 | ret = gen6_reset_engines(gt, engine_mask, retry); |
9f58892e CW |
621 | |
622 | skip_reset: | |
a50134b1 | 623 | for_each_engine_masked(engine, gt, engine_mask, tmp) |
9f58892e CW |
624 | gen8_engine_reset_cancel(engine); |
625 | ||
626 | return ret; | |
627 | } | |
628 | ||
4abc6e7c CW |
629 | static int mock_reset(struct intel_gt *gt, |
630 | intel_engine_mask_t mask, | |
631 | unsigned int retry) | |
632 | { | |
633 | return 0; | |
634 | } | |
635 | ||
cb823ed9 | 636 | typedef int (*reset_func)(struct intel_gt *, |
3a891a62 | 637 | intel_engine_mask_t engine_mask, |
9f58892e CW |
638 | unsigned int retry); |
639 | ||
260e6b71 | 640 | static reset_func intel_get_gpu_reset(const struct intel_gt *gt) |
9f58892e | 641 | { |
260e6b71 CW |
642 | struct drm_i915_private *i915 = gt->i915; |
643 | ||
4abc6e7c CW |
644 | if (is_mock_gt(gt)) |
645 | return mock_reset; | |
c816723b | 646 | else if (GRAPHICS_VER(i915) >= 8) |
9f58892e | 647 | return gen8_reset_engines; |
c816723b | 648 | else if (GRAPHICS_VER(i915) >= 6) |
9f58892e | 649 | return gen6_reset_engines; |
c816723b | 650 | else if (GRAPHICS_VER(i915) >= 5) |
9eae5e27 | 651 | return ilk_do_reset; |
9f58892e CW |
652 | else if (IS_G4X(i915)) |
653 | return g4x_do_reset; | |
654 | else if (IS_G33(i915) || IS_PINEVIEW(i915)) | |
655 | return g33_do_reset; | |
c816723b | 656 | else if (GRAPHICS_VER(i915) >= 3) |
9f58892e CW |
657 | return i915_do_reset; |
658 | else | |
659 | return NULL; | |
660 | } | |
661 | ||
cb823ed9 | 662 | int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) |
9f58892e | 663 | { |
ade8a0f5 CW |
664 | const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1; |
665 | reset_func reset; | |
666 | int ret = -ETIMEDOUT; | |
9f58892e | 667 | int retry; |
9f58892e | 668 | |
260e6b71 | 669 | reset = intel_get_gpu_reset(gt); |
ade8a0f5 CW |
670 | if (!reset) |
671 | return -ENODEV; | |
9f58892e CW |
672 | |
673 | /* | |
674 | * If the power well sleeps during the reset, the reset | |
675 | * request may be dropped and never completes (causing -EIO). | |
676 | */ | |
cb823ed9 | 677 | intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); |
ade8a0f5 | 678 | for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) { |
3fbbbef4 | 679 | GT_TRACE(gt, "engine_mask=%x\n", engine_mask); |
ade8a0f5 | 680 | preempt_disable(); |
cb823ed9 | 681 | ret = reset(gt, engine_mask, retry); |
ade8a0f5 | 682 | preempt_enable(); |
9f58892e | 683 | } |
cb823ed9 | 684 | intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); |
9f58892e CW |
685 | |
686 | return ret; | |
687 | } | |
688 | ||
260e6b71 | 689 | bool intel_has_gpu_reset(const struct intel_gt *gt) |
9f58892e | 690 | { |
8a25c4be | 691 | if (!gt->i915->params.reset) |
0eb6a3f7 CW |
692 | return NULL; |
693 | ||
260e6b71 | 694 | return intel_get_gpu_reset(gt); |
9f58892e CW |
695 | } |
696 | ||
260e6b71 | 697 | bool intel_has_reset_engine(const struct intel_gt *gt) |
9f58892e | 698 | { |
8a25c4be | 699 | if (gt->i915->params.reset < 2) |
260e6b71 CW |
700 | return false; |
701 | ||
702 | return INTEL_INFO(gt->i915)->has_reset_engine; | |
9f58892e CW |
703 | } |
704 | ||
cb823ed9 | 705 | int intel_reset_guc(struct intel_gt *gt) |
9f58892e CW |
706 | { |
707 | u32 guc_domain = | |
c816723b | 708 | GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC; |
9f58892e CW |
709 | int ret; |
710 | ||
702668e6 | 711 | GEM_BUG_ON(!HAS_GT_UC(gt->i915)); |
9f58892e | 712 | |
cb823ed9 CW |
713 | intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); |
714 | ret = gen6_hw_domain_reset(gt, guc_domain); | |
715 | intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); | |
9f58892e CW |
716 | |
717 | return ret; | |
718 | } | |
719 | ||
720 | /* | |
721 | * Ensure irq handler finishes, and not run again. | |
722 | * Also return the active request so that we only search for it once. | |
723 | */ | |
eb8d0f5a | 724 | static void reset_prepare_engine(struct intel_engine_cs *engine) |
9f58892e | 725 | { |
9f58892e CW |
726 | /* |
727 | * During the reset sequence, we must prevent the engine from | |
728 | * entering RC6. As the context state is undefined until we restart | |
729 | * the engine, if it does enter RC6 during the reset, the state | |
730 | * written to the powercontext is undefined and so we may lose | |
731 | * GPU state upon resume, i.e. fail to restart after a reset. | |
732 | */ | |
fdc4e926 | 733 | intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); |
e26b6d43 CW |
734 | if (engine->reset.prepare) |
735 | engine->reset.prepare(engine); | |
9f58892e CW |
736 | } |
737 | ||
cb823ed9 | 738 | static void revoke_mmaps(struct intel_gt *gt) |
2caffbf1 CW |
739 | { |
740 | int i; | |
741 | ||
cb823ed9 | 742 | for (i = 0; i < gt->ggtt->num_fences; i++) { |
2caffbf1 CW |
743 | struct drm_vma_offset_node *node; |
744 | struct i915_vma *vma; | |
745 | u64 vma_offset; | |
746 | ||
cb823ed9 | 747 | vma = READ_ONCE(gt->ggtt->fence_regs[i].vma); |
2caffbf1 CW |
748 | if (!vma) |
749 | continue; | |
750 | ||
751 | if (!i915_vma_has_userfault(vma)) | |
752 | continue; | |
753 | ||
cb823ed9 | 754 | GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]); |
cc662126 AJ |
755 | |
756 | if (!vma->mmo) | |
757 | continue; | |
758 | ||
759 | node = &vma->mmo->vma_node; | |
2caffbf1 | 760 | vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; |
cc662126 | 761 | |
cb823ed9 | 762 | unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping, |
2caffbf1 CW |
763 | drm_vma_node_offset_addr(node) + vma_offset, |
764 | vma->size, | |
765 | 1); | |
766 | } | |
767 | } | |
768 | ||
cb823ed9 | 769 | static intel_engine_mask_t reset_prepare(struct intel_gt *gt) |
9f58892e CW |
770 | { |
771 | struct intel_engine_cs *engine; | |
18398904 | 772 | intel_engine_mask_t awake = 0; |
9f58892e | 773 | enum intel_engine_id id; |
9f58892e | 774 | |
dac38381 UNR |
775 | /* For GuC mode, ensure submission is disabled before stopping ring */ |
776 | intel_uc_reset_prepare(>->uc); | |
777 | ||
5d904e3c | 778 | for_each_engine(engine, gt, id) { |
18398904 CW |
779 | if (intel_engine_pm_get_if_awake(engine)) |
780 | awake |= engine->mask; | |
eb8d0f5a | 781 | reset_prepare_engine(engine); |
18398904 | 782 | } |
9f58892e | 783 | |
18398904 | 784 | return awake; |
67d4119c CW |
785 | } |
786 | ||
cb823ed9 | 787 | static void gt_revoke(struct intel_gt *gt) |
67d4119c | 788 | { |
cb823ed9 | 789 | revoke_mmaps(gt); |
9f58892e CW |
790 | } |
791 | ||
cb823ed9 | 792 | static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) |
9f58892e | 793 | { |
eb8d0f5a CW |
794 | struct intel_engine_cs *engine; |
795 | enum intel_engine_id id; | |
796 | int err; | |
797 | ||
9f58892e | 798 | /* |
eb8d0f5a CW |
799 | * Everything depends on having the GTT running, so we need to start |
800 | * there. | |
9f58892e | 801 | */ |
cb823ed9 | 802 | err = i915_ggtt_enable_hw(gt->i915); |
eb8d0f5a CW |
803 | if (err) |
804 | return err; | |
9f58892e | 805 | |
16f2941a | 806 | local_bh_disable(); |
5d904e3c | 807 | for_each_engine(engine, gt, id) |
cb823ed9 | 808 | __intel_engine_reset(engine, stalled_mask & engine->mask); |
16f2941a | 809 | local_bh_enable(); |
9f58892e | 810 | |
89e96d82 | 811 | intel_uc_reset(>->uc, ALL_ENGINES); |
eb5e7da7 | 812 | |
f899f786 | 813 | intel_ggtt_restore_fences(gt->ggtt); |
9f58892e | 814 | |
eb8d0f5a | 815 | return err; |
9f58892e CW |
816 | } |
817 | ||
eb8d0f5a | 818 | static void reset_finish_engine(struct intel_engine_cs *engine) |
9f58892e | 819 | { |
e26b6d43 CW |
820 | if (engine->reset.finish) |
821 | engine->reset.finish(engine); | |
fdc4e926 | 822 | intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); |
18398904 | 823 | |
54400257 | 824 | intel_engine_signal_breadcrumbs(engine); |
9f58892e CW |
825 | } |
826 | ||
cb823ed9 | 827 | static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake) |
9f58892e CW |
828 | { |
829 | struct intel_engine_cs *engine; | |
830 | enum intel_engine_id id; | |
831 | ||
5d904e3c | 832 | for_each_engine(engine, gt, id) { |
9f58892e | 833 | reset_finish_engine(engine); |
18398904 CW |
834 | if (awake & engine->mask) |
835 | intel_engine_pm_put(engine); | |
67d4119c | 836 | } |
eb5e7da7 MB |
837 | |
838 | intel_uc_reset_finish(>->uc); | |
9f58892e CW |
839 | } |
840 | ||
841 | static void nop_submit_request(struct i915_request *request) | |
842 | { | |
3fbbbef4 | 843 | RQ_TRACE(request, "-EIO\n"); |
9f58892e | 844 | |
c10e4a79 CW |
845 | request = i915_request_mark_eio(request); |
846 | if (request) { | |
847 | i915_request_submit(request); | |
848 | intel_engine_signal_breadcrumbs(request->engine); | |
52c0fdb2 | 849 | |
c10e4a79 CW |
850 | i915_request_put(request); |
851 | } | |
9f58892e CW |
852 | } |
853 | ||
cb823ed9 | 854 | static void __intel_gt_set_wedged(struct intel_gt *gt) |
9f58892e | 855 | { |
9f58892e | 856 | struct intel_engine_cs *engine; |
18398904 | 857 | intel_engine_mask_t awake; |
9f58892e CW |
858 | enum intel_engine_id id; |
859 | ||
cb823ed9 | 860 | if (test_bit(I915_WEDGED, >->reset.flags)) |
9f58892e | 861 | return; |
9f58892e | 862 | |
3fbbbef4 | 863 | GT_TRACE(gt, "start\n"); |
9f58892e CW |
864 | |
865 | /* | |
866 | * First, stop submission to hw, but do not yet complete requests by | |
867 | * rolling the global seqno forward (since this would complete requests | |
868 | * for which we haven't set the fence error to EIO yet). | |
869 | */ | |
cb823ed9 | 870 | awake = reset_prepare(gt); |
628ac441 | 871 | |
9f58892e | 872 | /* Even if the GPU reset fails, it should still stop the engines */ |
cb823ed9 CW |
873 | if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) |
874 | __intel_gt_reset(gt, ALL_ENGINES); | |
9f58892e | 875 | |
5d904e3c | 876 | for_each_engine(engine, gt, id) |
9f58892e | 877 | engine->submit_request = nop_submit_request; |
9f58892e CW |
878 | |
879 | /* | |
880 | * Make sure no request can slip through without getting completed by | |
881 | * either this call here to intel_engine_write_global_seqno, or the one | |
882 | * in nop_submit_request. | |
883 | */ | |
8f54b3c6 | 884 | synchronize_rcu_expedited(); |
cb823ed9 | 885 | set_bit(I915_WEDGED, >->reset.flags); |
9f58892e CW |
886 | |
887 | /* Mark all executing requests as skipped */ | |
16f2941a | 888 | local_bh_disable(); |
5d904e3c | 889 | for_each_engine(engine, gt, id) |
e26b6d43 CW |
890 | if (engine->reset.cancel) |
891 | engine->reset.cancel(engine); | |
eb5e7da7 | 892 | intel_uc_cancel_requests(>->uc); |
16f2941a | 893 | local_bh_enable(); |
9f58892e | 894 | |
cb823ed9 | 895 | reset_finish(gt, awake); |
9f58892e | 896 | |
3fbbbef4 | 897 | GT_TRACE(gt, "end\n"); |
72eb16df | 898 | } |
9f58892e | 899 | |
cb823ed9 | 900 | void intel_gt_set_wedged(struct intel_gt *gt) |
72eb16df | 901 | { |
6cffeb83 | 902 | intel_wakeref_t wakeref; |
72eb16df | 903 | |
a2847782 CW |
904 | if (test_bit(I915_WEDGED, >->reset.flags)) |
905 | return; | |
906 | ||
907 | wakeref = intel_runtime_pm_get(gt->uncore->rpm); | |
cb823ed9 | 908 | mutex_lock(>->reset.mutex); |
a2847782 CW |
909 | |
910 | if (GEM_SHOW_DEBUG()) { | |
911 | struct drm_printer p = drm_debug_printer(__func__); | |
912 | struct intel_engine_cs *engine; | |
913 | enum intel_engine_id id; | |
914 | ||
915 | drm_printf(&p, "called from %pS\n", (void *)_RET_IP_); | |
916 | for_each_engine(engine, gt, id) { | |
917 | if (intel_engine_is_idle(engine)) | |
918 | continue; | |
919 | ||
920 | intel_engine_dump(engine, &p, "%s\n", engine->name); | |
921 | } | |
922 | } | |
923 | ||
924 | __intel_gt_set_wedged(gt); | |
925 | ||
cb823ed9 | 926 | mutex_unlock(>->reset.mutex); |
a2847782 | 927 | intel_runtime_pm_put(gt->uncore->rpm, wakeref); |
9f58892e CW |
928 | } |
929 | ||
cb823ed9 | 930 | static bool __intel_gt_unset_wedged(struct intel_gt *gt) |
9f58892e | 931 | { |
cb823ed9 | 932 | struct intel_gt_timelines *timelines = >->timelines; |
f0c02c1b | 933 | struct intel_timeline *tl; |
1d6f1d16 | 934 | bool ok; |
9f58892e | 935 | |
cb823ed9 | 936 | if (!test_bit(I915_WEDGED, >->reset.flags)) |
9f58892e CW |
937 | return true; |
938 | ||
5311f517 | 939 | /* Never fully initialised, recovery impossible */ |
3f04bdce | 940 | if (intel_gt_has_unrecoverable_error(gt)) |
9f58892e CW |
941 | return false; |
942 | ||
3fbbbef4 | 943 | GT_TRACE(gt, "start\n"); |
9f58892e CW |
944 | |
945 | /* | |
946 | * Before unwedging, make sure that all pending operations | |
947 | * are flushed and errored out - we may have requests waiting upon | |
948 | * third party fences. We marked all inflight requests as EIO, and | |
949 | * every execbuf since returned EIO, for consistency we want all | |
950 | * the currently pending requests to also be marked as EIO, which | |
951 | * is done inside our nop_submit_request - and so we must wait. | |
952 | * | |
953 | * No more can be submitted until we reset the wedged bit. | |
954 | */ | |
88cec497 | 955 | spin_lock(&timelines->lock); |
cb823ed9 | 956 | list_for_each_entry(tl, &timelines->active_list, link) { |
b1e3177b | 957 | struct dma_fence *fence; |
9f58892e | 958 | |
b1e3177b CW |
959 | fence = i915_active_fence_get(&tl->last_request); |
960 | if (!fence) | |
9f58892e CW |
961 | continue; |
962 | ||
88cec497 | 963 | spin_unlock(&timelines->lock); |
338aade9 | 964 | |
9f58892e | 965 | /* |
13e87536 CW |
966 | * All internal dependencies (i915_requests) will have |
967 | * been flushed by the set-wedge, but we may be stuck waiting | |
968 | * for external fences. These should all be capped to 10s | |
969 | * (I915_FENCE_TIMEOUT) so this wait should not be unbounded | |
970 | * in the worst case. | |
9f58892e | 971 | */ |
b1e3177b CW |
972 | dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT); |
973 | dma_fence_put(fence); | |
338aade9 CW |
974 | |
975 | /* Restart iteration after droping lock */ | |
88cec497 | 976 | spin_lock(&timelines->lock); |
338aade9 | 977 | tl = list_entry(&timelines->active_list, typeof(*tl), link); |
9f58892e | 978 | } |
88cec497 | 979 | spin_unlock(&timelines->lock); |
9f58892e | 980 | |
1d6f1d16 CW |
981 | /* We must reset pending GPU events before restoring our submission */ |
982 | ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */ | |
983 | if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) | |
984 | ok = __intel_gt_reset(gt, ALL_ENGINES) == 0; | |
542a5c66 CW |
985 | if (!ok) { |
986 | /* | |
987 | * Warn CI about the unrecoverable wedged condition. | |
988 | * Time for a reboot. | |
989 | */ | |
65706203 | 990 | add_taint_for_CI(gt->i915, TAINT_WARN); |
1d6f1d16 | 991 | return false; |
542a5c66 | 992 | } |
9f58892e CW |
993 | |
994 | /* | |
995 | * Undo nop_submit_request. We prevent all new i915 requests from | |
996 | * being queued (by disallowing execbuf whilst wedged) so having | |
997 | * waited for all active requests above, we know the system is idle | |
998 | * and do not have to worry about a thread being inside | |
999 | * engine->submit_request() as we swap over. So unlike installing | |
1000 | * the nop_submit_request on reset, we can do this from normal | |
1001 | * context and do not require stop_machine(). | |
1002 | */ | |
cb823ed9 | 1003 | intel_engines_reset_default_submission(gt); |
9f58892e | 1004 | |
3fbbbef4 | 1005 | GT_TRACE(gt, "end\n"); |
9f58892e CW |
1006 | |
1007 | smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ | |
cb823ed9 | 1008 | clear_bit(I915_WEDGED, >->reset.flags); |
13e87536 | 1009 | |
13e87536 | 1010 | return true; |
9f58892e CW |
1011 | } |
1012 | ||
cb823ed9 | 1013 | bool intel_gt_unset_wedged(struct intel_gt *gt) |
72eb16df | 1014 | { |
72eb16df CW |
1015 | bool result; |
1016 | ||
cb823ed9 CW |
1017 | mutex_lock(>->reset.mutex); |
1018 | result = __intel_gt_unset_wedged(gt); | |
1019 | mutex_unlock(>->reset.mutex); | |
72eb16df CW |
1020 | |
1021 | return result; | |
1022 | } | |
1023 | ||
cb823ed9 | 1024 | static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) |
eb8d0f5a | 1025 | { |
eb8d0f5a CW |
1026 | int err, i; |
1027 | ||
cb823ed9 | 1028 | err = __intel_gt_reset(gt, ALL_ENGINES); |
eb8d0f5a | 1029 | for (i = 0; err && i < RESET_MAX_RETRIES; i++) { |
2caffbf1 | 1030 | msleep(10 * (i + 1)); |
cb823ed9 | 1031 | err = __intel_gt_reset(gt, ALL_ENGINES); |
eb8d0f5a | 1032 | } |
2caffbf1 CW |
1033 | if (err) |
1034 | return err; | |
eb8d0f5a | 1035 | |
cb823ed9 | 1036 | return gt_reset(gt, stalled_mask); |
eb8d0f5a CW |
1037 | } |
1038 | ||
cb823ed9 | 1039 | static int resume(struct intel_gt *gt) |
092be382 CW |
1040 | { |
1041 | struct intel_engine_cs *engine; | |
1042 | enum intel_engine_id id; | |
1043 | int ret; | |
1044 | ||
5d904e3c | 1045 | for_each_engine(engine, gt, id) { |
faea1792 | 1046 | ret = intel_engine_resume(engine); |
092be382 CW |
1047 | if (ret) |
1048 | return ret; | |
1049 | } | |
1050 | ||
1051 | return 0; | |
1052 | } | |
1053 | ||
9f58892e | 1054 | /** |
cb823ed9 CW |
1055 | * intel_gt_reset - reset chip after a hang |
1056 | * @gt: #intel_gt to reset | |
9f58892e CW |
1057 | * @stalled_mask: mask of the stalled engines with the guilty requests |
1058 | * @reason: user error message for why we are resetting | |
1059 | * | |
1060 | * Reset the chip. Useful if a hang is detected. Marks the device as wedged | |
1061 | * on failure. | |
1062 | * | |
9f58892e CW |
1063 | * Procedure is fairly simple: |
1064 | * - reset the chip using the reset reg | |
1065 | * - re-init context state | |
1066 | * - re-init hardware status page | |
1067 | * - re-init ring buffer | |
1068 | * - re-init interrupt state | |
1069 | * - re-init display | |
1070 | */ | |
cb823ed9 CW |
1071 | void intel_gt_reset(struct intel_gt *gt, |
1072 | intel_engine_mask_t stalled_mask, | |
1073 | const char *reason) | |
9f58892e | 1074 | { |
18398904 | 1075 | intel_engine_mask_t awake; |
9f58892e | 1076 | int ret; |
9f58892e | 1077 | |
3fbbbef4 | 1078 | GT_TRACE(gt, "flags=%lx\n", gt->reset.flags); |
9f58892e CW |
1079 | |
1080 | might_sleep(); | |
cb823ed9 | 1081 | GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags)); |
5b0a78ec ML |
1082 | |
1083 | /* | |
1084 | * FIXME: Revoking cpu mmap ptes cannot be done from a dma_fence | |
1085 | * critical section like gpu reset. | |
1086 | */ | |
1087 | gt_revoke(gt); | |
1088 | ||
cb823ed9 | 1089 | mutex_lock(>->reset.mutex); |
9f58892e | 1090 | |
9f58892e | 1091 | /* Clear any previous failed attempts at recovery. Time to try again. */ |
cb823ed9 | 1092 | if (!__intel_gt_unset_wedged(gt)) |
33df8a76 | 1093 | goto unlock; |
9f58892e CW |
1094 | |
1095 | if (reason) | |
dc483ba5 | 1096 | drm_notice(>->i915->drm, |
cb823ed9 CW |
1097 | "Resetting chip for %s\n", reason); |
1098 | atomic_inc(>->i915->gpu_error.reset_count); | |
9f58892e | 1099 | |
cb823ed9 | 1100 | awake = reset_prepare(gt); |
9f58892e | 1101 | |
260e6b71 | 1102 | if (!intel_has_gpu_reset(gt)) { |
8a25c4be | 1103 | if (gt->i915->params.reset) |
dc483ba5 | 1104 | drm_err(>->i915->drm, "GPU reset not supported\n"); |
9f58892e | 1105 | else |
f8474622 | 1106 | drm_dbg(>->i915->drm, "GPU reset disabled\n"); |
9f58892e CW |
1107 | goto error; |
1108 | } | |
1109 | ||
cb823ed9 CW |
1110 | if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) |
1111 | intel_runtime_pm_disable_interrupts(gt->i915); | |
156b16f9 | 1112 | |
cb823ed9 | 1113 | if (do_reset(gt, stalled_mask)) { |
dc483ba5 | 1114 | drm_err(>->i915->drm, "Failed to reset chip\n"); |
9f58892e CW |
1115 | goto taint; |
1116 | } | |
1117 | ||
cb823ed9 CW |
1118 | if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) |
1119 | intel_runtime_pm_enable_interrupts(gt->i915); | |
156b16f9 | 1120 | |
cb823ed9 | 1121 | intel_overlay_reset(gt->i915); |
9f58892e CW |
1122 | |
1123 | /* | |
1124 | * Next we need to restore the context, but we don't use those | |
1125 | * yet either... | |
1126 | * | |
1127 | * Ring buffer needs to be re-initialized in the KMS case, or if X | |
1128 | * was running at the time of the reset (i.e. we weren't VT | |
1129 | * switched away). | |
1130 | */ | |
61fa60ff | 1131 | ret = intel_gt_init_hw(gt); |
9f58892e | 1132 | if (ret) { |
f8474622 WK |
1133 | drm_err(>->i915->drm, |
1134 | "Failed to initialise HW following reset (%d)\n", | |
1135 | ret); | |
092be382 | 1136 | goto taint; |
9f58892e CW |
1137 | } |
1138 | ||
cb823ed9 | 1139 | ret = resume(gt); |
092be382 CW |
1140 | if (ret) |
1141 | goto taint; | |
1142 | ||
9f58892e | 1143 | finish: |
cb823ed9 | 1144 | reset_finish(gt, awake); |
33df8a76 | 1145 | unlock: |
cb823ed9 | 1146 | mutex_unlock(>->reset.mutex); |
9f58892e CW |
1147 | return; |
1148 | ||
1149 | taint: | |
1150 | /* | |
1151 | * History tells us that if we cannot reset the GPU now, we | |
1152 | * never will. This then impacts everything that is run | |
1153 | * subsequently. On failing the reset, we mark the driver | |
1154 | * as wedged, preventing further execution on the GPU. | |
1155 | * We also want to go one step further and add a taint to the | |
1156 | * kernel so that any subsequent faults can be traced back to | |
1157 | * this failure. This is important for CI, where if the | |
1158 | * GPU/driver fails we would like to reboot and restart testing | |
1159 | * rather than continue on into oblivion. For everyone else, | |
1160 | * the system should still plod along, but they have been warned! | |
1161 | */ | |
65706203 | 1162 | add_taint_for_CI(gt->i915, TAINT_WARN); |
9f58892e | 1163 | error: |
cb823ed9 | 1164 | __intel_gt_set_wedged(gt); |
9f58892e CW |
1165 | goto finish; |
1166 | } | |
1167 | ||
9834dfef | 1168 | static int intel_gt_reset_engine(struct intel_engine_cs *engine) |
9f58892e | 1169 | { |
cb823ed9 | 1170 | return __intel_gt_reset(engine->gt, engine->mask); |
9f58892e CW |
1171 | } |
1172 | ||
16f2941a | 1173 | int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg) |
9f58892e | 1174 | { |
cb823ed9 | 1175 | struct intel_gt *gt = engine->gt; |
9f58892e CW |
1176 | int ret; |
1177 | ||
639f2f24 | 1178 | ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags); |
cb823ed9 | 1179 | GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags)); |
9f58892e | 1180 | |
eb5e7da7 MB |
1181 | if (intel_engine_uses_guc(engine)) |
1182 | return -ENODEV; | |
1183 | ||
18398904 | 1184 | if (!intel_engine_pm_get_if_awake(engine)) |
79ffac85 CW |
1185 | return 0; |
1186 | ||
eb8d0f5a | 1187 | reset_prepare_engine(engine); |
9f58892e CW |
1188 | |
1189 | if (msg) | |
dc483ba5 | 1190 | drm_notice(&engine->i915->drm, |
9f58892e | 1191 | "Resetting %s for %s\n", engine->name, msg); |
cb823ed9 | 1192 | atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]); |
9f58892e | 1193 | |
eb5e7da7 | 1194 | ret = intel_gt_reset_engine(engine); |
9f58892e CW |
1195 | if (ret) { |
1196 | /* If we fail here, we expect to fallback to a global reset */ | |
eb5e7da7 | 1197 | ENGINE_TRACE(engine, "Failed to reset %s, err: %d\n", engine->name, ret); |
9f58892e CW |
1198 | goto out; |
1199 | } | |
1200 | ||
1201 | /* | |
1202 | * The request that caused the hang is stuck on elsp, we know the | |
1203 | * active request and can drop it, adjust head to skip the offending | |
1204 | * request to resume executing remaining requests in the queue. | |
1205 | */ | |
cb823ed9 | 1206 | __intel_engine_reset(engine, true); |
9f58892e CW |
1207 | |
1208 | /* | |
1209 | * The engine and its registers (and workarounds in case of render) | |
1210 | * have been reset to their default values. Follow the init_ring | |
1211 | * process to program RING_MODE, HWSP and re-enable submission. | |
1212 | */ | |
faea1792 | 1213 | ret = intel_engine_resume(engine); |
9f58892e CW |
1214 | |
1215 | out: | |
1216 | intel_engine_cancel_stop_cs(engine); | |
1217 | reset_finish_engine(engine); | |
07779a76 | 1218 | intel_engine_pm_put_async(engine); |
9f58892e CW |
1219 | return ret; |
1220 | } | |
1221 | ||
16f2941a CW |
1222 | /** |
1223 | * intel_engine_reset - reset GPU engine to recover from a hang | |
1224 | * @engine: engine to reset | |
1225 | * @msg: reason for GPU reset; or NULL for no drm_notice() | |
1226 | * | |
1227 | * Reset a specific GPU engine. Useful if a hang is detected. | |
1228 | * Returns zero on successful reset or otherwise an error code. | |
1229 | * | |
1230 | * Procedure is: | |
1231 | * - identifies the request that caused the hang and it is dropped | |
1232 | * - reset engine (which will force the engine to idle) | |
1233 | * - re-init/configure engine | |
1234 | */ | |
1235 | int intel_engine_reset(struct intel_engine_cs *engine, const char *msg) | |
1236 | { | |
1237 | int err; | |
1238 | ||
1239 | local_bh_disable(); | |
1240 | err = __intel_engine_reset_bh(engine, msg); | |
1241 | local_bh_enable(); | |
1242 | ||
1243 | return err; | |
1244 | } | |
1245 | ||
cb823ed9 CW |
1246 | static void intel_gt_reset_global(struct intel_gt *gt, |
1247 | u32 engine_mask, | |
1248 | const char *reason) | |
9f58892e | 1249 | { |
cb823ed9 | 1250 | struct kobject *kobj = >->i915->drm.primary->kdev->kobj; |
9f58892e CW |
1251 | char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; |
1252 | char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; | |
1253 | char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; | |
cb823ed9 | 1254 | struct intel_wedge_me w; |
9f58892e CW |
1255 | |
1256 | kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); | |
1257 | ||
cb56a07d | 1258 | GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask); |
9f58892e CW |
1259 | kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); |
1260 | ||
1261 | /* Use a watchdog to ensure that our reset completes */ | |
cb823ed9 | 1262 | intel_wedge_on_timeout(&w, gt, 5 * HZ) { |
87ebfaab | 1263 | intel_display_prepare_reset(gt->i915); |
9f58892e | 1264 | |
785fbda5 | 1265 | /* Flush everyone using a resource about to be clobbered */ |
cb823ed9 | 1266 | synchronize_srcu_expedited(>->reset.backoff_srcu); |
785fbda5 | 1267 | |
cb823ed9 | 1268 | intel_gt_reset(gt, engine_mask, reason); |
9f58892e | 1269 | |
e669ad6f | 1270 | intel_display_finish_reset(gt->i915); |
9f58892e CW |
1271 | } |
1272 | ||
cb823ed9 | 1273 | if (!test_bit(I915_WEDGED, >->reset.flags)) |
9f58892e CW |
1274 | kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); |
1275 | } | |
1276 | ||
9f58892e | 1277 | /** |
cb823ed9 CW |
1278 | * intel_gt_handle_error - handle a gpu error |
1279 | * @gt: the intel_gt | |
9f58892e CW |
1280 | * @engine_mask: mask representing engines that are hung |
1281 | * @flags: control flags | |
1282 | * @fmt: Error message format string | |
1283 | * | |
1284 | * Do some basic checking of register state at error time and | |
1285 | * dump it to the syslog. Also call i915_capture_error_state() to make | |
1286 | * sure we get a record and make it available in debugfs. Fire a uevent | |
1287 | * so userspace knows something bad happened (should trigger collection | |
1288 | * of a ring dump etc.). | |
1289 | */ | |
cb823ed9 CW |
1290 | void intel_gt_handle_error(struct intel_gt *gt, |
1291 | intel_engine_mask_t engine_mask, | |
1292 | unsigned long flags, | |
1293 | const char *fmt, ...) | |
9f58892e CW |
1294 | { |
1295 | struct intel_engine_cs *engine; | |
1296 | intel_wakeref_t wakeref; | |
3a891a62 | 1297 | intel_engine_mask_t tmp; |
9f58892e CW |
1298 | char error_msg[80]; |
1299 | char *msg = NULL; | |
1300 | ||
1301 | if (fmt) { | |
1302 | va_list args; | |
1303 | ||
1304 | va_start(args, fmt); | |
1305 | vscnprintf(error_msg, sizeof(error_msg), fmt, args); | |
1306 | va_end(args); | |
1307 | ||
1308 | msg = error_msg; | |
1309 | } | |
1310 | ||
1311 | /* | |
1312 | * In most cases it's guaranteed that we get here with an RPM | |
1313 | * reference held, for example because there is a pending GPU | |
1314 | * request that won't finish until the reset is done. This | |
1315 | * isn't the case at least when we get here by doing a | |
1316 | * simulated reset via debugfs, so get an RPM reference. | |
1317 | */ | |
cd6a8513 | 1318 | wakeref = intel_runtime_pm_get(gt->uncore->rpm); |
9f58892e | 1319 | |
792592e7 | 1320 | engine_mask &= gt->info.engine_mask; |
9f58892e CW |
1321 | |
1322 | if (flags & I915_ERROR_CAPTURE) { | |
a6f0f9cf | 1323 | i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_NONE); |
cb823ed9 | 1324 | intel_gt_clear_error_registers(gt, engine_mask); |
9f58892e CW |
1325 | } |
1326 | ||
1327 | /* | |
1328 | * Try engine reset when available. We fall back to full reset if | |
1329 | * single reset fails. | |
1330 | */ | |
eb5e7da7 MB |
1331 | if (!intel_uc_uses_guc_submission(>->uc) && |
1332 | intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) { | |
16f2941a | 1333 | local_bh_disable(); |
a50134b1 | 1334 | for_each_engine_masked(engine, gt, engine_mask, tmp) { |
9f58892e CW |
1335 | BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); |
1336 | if (test_and_set_bit(I915_RESET_ENGINE + engine->id, | |
cb823ed9 | 1337 | >->reset.flags)) |
9f58892e CW |
1338 | continue; |
1339 | ||
16f2941a | 1340 | if (__intel_engine_reset_bh(engine, msg) == 0) |
8a68d464 | 1341 | engine_mask &= ~engine->mask; |
9f58892e | 1342 | |
cb823ed9 CW |
1343 | clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, |
1344 | >->reset.flags); | |
9f58892e | 1345 | } |
16f2941a | 1346 | local_bh_enable(); |
9f58892e CW |
1347 | } |
1348 | ||
1349 | if (!engine_mask) | |
1350 | goto out; | |
1351 | ||
1352 | /* Full reset needs the mutex, stop any other user trying to do so. */ | |
cb823ed9 CW |
1353 | if (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) { |
1354 | wait_event(gt->reset.queue, | |
1355 | !test_bit(I915_RESET_BACKOFF, >->reset.flags)); | |
2caffbf1 | 1356 | goto out; /* piggy-back on the other reset */ |
9f58892e CW |
1357 | } |
1358 | ||
2caffbf1 CW |
1359 | /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */ |
1360 | synchronize_rcu_expedited(); | |
1361 | ||
03f060b7 MB |
1362 | /* |
1363 | * Prevent any other reset-engine attempt. We don't do this for GuC | |
1364 | * submission the GuC owns the per-engine reset, not the i915. | |
1365 | */ | |
1366 | if (!intel_uc_uses_guc_submission(>->uc)) { | |
1367 | for_each_engine(engine, gt, tmp) { | |
1368 | while (test_and_set_bit(I915_RESET_ENGINE + engine->id, | |
1369 | >->reset.flags)) | |
1370 | wait_on_bit(>->reset.flags, | |
1371 | I915_RESET_ENGINE + engine->id, | |
1372 | TASK_UNINTERRUPTIBLE); | |
1373 | } | |
9f58892e CW |
1374 | } |
1375 | ||
cb823ed9 | 1376 | intel_gt_reset_global(gt, engine_mask, msg); |
9f58892e | 1377 | |
03f060b7 MB |
1378 | if (!intel_uc_uses_guc_submission(>->uc)) { |
1379 | for_each_engine(engine, gt, tmp) | |
1380 | clear_bit_unlock(I915_RESET_ENGINE + engine->id, | |
1381 | >->reset.flags); | |
1382 | } | |
cb823ed9 CW |
1383 | clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags); |
1384 | smp_mb__after_atomic(); | |
1385 | wake_up_all(>->reset.queue); | |
9f58892e CW |
1386 | |
1387 | out: | |
cd6a8513 | 1388 | intel_runtime_pm_put(gt->uncore->rpm, wakeref); |
9f58892e CW |
1389 | } |
1390 | ||
eebab60f | 1391 | int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu) |
2caffbf1 | 1392 | { |
cb823ed9 | 1393 | might_lock(>->reset.backoff_srcu); |
b3b0391a CW |
1394 | might_sleep(); |
1395 | ||
2caffbf1 | 1396 | rcu_read_lock(); |
cb823ed9 | 1397 | while (test_bit(I915_RESET_BACKOFF, >->reset.flags)) { |
2caffbf1 CW |
1398 | rcu_read_unlock(); |
1399 | ||
cb823ed9 | 1400 | if (wait_event_interruptible(gt->reset.queue, |
2caffbf1 | 1401 | !test_bit(I915_RESET_BACKOFF, |
cb823ed9 | 1402 | >->reset.flags))) |
2caffbf1 CW |
1403 | return -EINTR; |
1404 | ||
1405 | rcu_read_lock(); | |
1406 | } | |
eebab60f | 1407 | *srcu = srcu_read_lock(>->reset.backoff_srcu); |
2caffbf1 CW |
1408 | rcu_read_unlock(); |
1409 | ||
eebab60f | 1410 | return 0; |
2caffbf1 CW |
1411 | } |
1412 | ||
cb823ed9 CW |
1413 | void intel_gt_reset_unlock(struct intel_gt *gt, int tag) |
1414 | __releases(>->reset.backoff_srcu) | |
2caffbf1 | 1415 | { |
cb823ed9 | 1416 | srcu_read_unlock(>->reset.backoff_srcu, tag); |
2caffbf1 CW |
1417 | } |
1418 | ||
cb823ed9 | 1419 | int intel_gt_terminally_wedged(struct intel_gt *gt) |
c41166f9 | 1420 | { |
c41166f9 CW |
1421 | might_sleep(); |
1422 | ||
cb823ed9 | 1423 | if (!intel_gt_is_wedged(gt)) |
c41166f9 CW |
1424 | return 0; |
1425 | ||
3f04bdce | 1426 | if (intel_gt_has_unrecoverable_error(gt)) |
c41166f9 CW |
1427 | return -EIO; |
1428 | ||
b761a7b4 | 1429 | /* Reset still in progress? Maybe we will recover? */ |
cb823ed9 | 1430 | if (wait_event_interruptible(gt->reset.queue, |
c41166f9 | 1431 | !test_bit(I915_RESET_BACKOFF, |
cb823ed9 | 1432 | >->reset.flags))) |
c41166f9 CW |
1433 | return -EINTR; |
1434 | ||
cb823ed9 CW |
1435 | return intel_gt_is_wedged(gt) ? -EIO : 0; |
1436 | } | |
1437 | ||
5311f517 MW |
1438 | void intel_gt_set_wedged_on_init(struct intel_gt *gt) |
1439 | { | |
1440 | BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES > | |
1441 | I915_WEDGED_ON_INIT); | |
1442 | intel_gt_set_wedged(gt); | |
9a7fc952 | 1443 | i915_disable_error_state(gt->i915, -ENODEV); |
5311f517 | 1444 | set_bit(I915_WEDGED_ON_INIT, >->reset.flags); |
3f04bdce MW |
1445 | |
1446 | /* Wedged on init is non-recoverable */ | |
65706203 | 1447 | add_taint_for_CI(gt->i915, TAINT_WARN); |
3f04bdce MW |
1448 | } |
1449 | ||
1450 | void intel_gt_set_wedged_on_fini(struct intel_gt *gt) | |
1451 | { | |
1452 | intel_gt_set_wedged(gt); | |
9a7fc952 | 1453 | i915_disable_error_state(gt->i915, -ENODEV); |
3f04bdce | 1454 | set_bit(I915_WEDGED_ON_FINI, >->reset.flags); |
b0573472 | 1455 | intel_gt_retire_requests(gt); /* cleanup any wedged requests */ |
5311f517 MW |
1456 | } |
1457 | ||
cb823ed9 CW |
1458 | void intel_gt_init_reset(struct intel_gt *gt) |
1459 | { | |
1460 | init_waitqueue_head(>->reset.queue); | |
1461 | mutex_init(>->reset.mutex); | |
1462 | init_srcu_struct(>->reset.backoff_srcu); | |
45b152f7 | 1463 | |
cecb2af4 CW |
1464 | /* |
1465 | * While undesirable to wait inside the shrinker, complain anyway. | |
1466 | * | |
1467 | * If we have to wait during shrinking, we guarantee forward progress | |
1468 | * by forcing the reset. Therefore during the reset we must not | |
1469 | * re-enter the shrinker. By declaring that we take the reset mutex | |
1470 | * within the shrinker, we forbid ourselves from performing any | |
1471 | * fs-reclaim or taking related locks during reset. | |
1472 | */ | |
1473 | i915_gem_shrinker_taints_mutex(gt->i915, >->reset.mutex); | |
1474 | ||
45b152f7 CW |
1475 | /* no GPU until we are ready! */ |
1476 | __set_bit(I915_WEDGED, >->reset.flags); | |
cb823ed9 CW |
1477 | } |
1478 | ||
1479 | void intel_gt_fini_reset(struct intel_gt *gt) | |
1480 | { | |
1481 | cleanup_srcu_struct(>->reset.backoff_srcu); | |
c41166f9 CW |
1482 | } |
1483 | ||
cb823ed9 | 1484 | static void intel_wedge_me(struct work_struct *work) |
9f58892e | 1485 | { |
cb823ed9 | 1486 | struct intel_wedge_me *w = container_of(work, typeof(*w), work.work); |
9f58892e | 1487 | |
dc483ba5 | 1488 | drm_err(&w->gt->i915->drm, |
9f58892e CW |
1489 | "%s timed out, cancelling all in-flight rendering.\n", |
1490 | w->name); | |
cb823ed9 | 1491 | intel_gt_set_wedged(w->gt); |
9f58892e CW |
1492 | } |
1493 | ||
cb823ed9 CW |
1494 | void __intel_init_wedge(struct intel_wedge_me *w, |
1495 | struct intel_gt *gt, | |
1496 | long timeout, | |
1497 | const char *name) | |
9f58892e | 1498 | { |
cb823ed9 | 1499 | w->gt = gt; |
9f58892e CW |
1500 | w->name = name; |
1501 | ||
cb823ed9 | 1502 | INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me); |
9f58892e CW |
1503 | schedule_delayed_work(&w->work, timeout); |
1504 | } | |
1505 | ||
cb823ed9 | 1506 | void __intel_fini_wedge(struct intel_wedge_me *w) |
9f58892e CW |
1507 | { |
1508 | cancel_delayed_work_sync(&w->work); | |
1509 | destroy_delayed_work_on_stack(&w->work); | |
cb823ed9 | 1510 | w->gt = NULL; |
9f58892e | 1511 | } |
932309fb MW |
1512 | |
1513 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | |
1514 | #include "selftest_reset.c" | |
058179e7 | 1515 | #include "selftest_hangcheck.c" |
932309fb | 1516 | #endif |