2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kthread.h>
27 #include "gem/i915_gem_context.h"
28 #include "gt/intel_gt.h"
29 #include "intel_engine_pm.h"
31 #include "i915_selftest.h"
32 #include "selftests/i915_random.h"
33 #include "selftests/igt_flush_test.h"
34 #include "selftests/igt_reset.h"
35 #include "selftests/igt_atomic.h"
37 #include "selftests/mock_drm.h"
39 #include "gem/selftests/mock_context.h"
40 #include "gem/selftests/igt_gem_utils.h"
42 #define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */
46 struct drm_i915_gem_object *hws;
47 struct drm_i915_gem_object *obj;
48 struct i915_gem_context *ctx;
53 static int hang_init(struct hang *h, struct intel_gt *gt)
58 memset(h, 0, sizeof(*h));
61 h->ctx = kernel_context(gt->i915);
63 return PTR_ERR(h->ctx);
65 GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx));
67 h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
69 err = PTR_ERR(h->hws);
73 h->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
75 err = PTR_ERR(h->obj);
79 i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC);
80 vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
85 h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
87 vaddr = i915_gem_object_pin_map(h->obj,
88 i915_coherent_map_type(gt->i915));
98 i915_gem_object_unpin_map(h->hws);
100 i915_gem_object_put(h->obj);
102 i915_gem_object_put(h->hws);
104 kernel_context_close(h->ctx);
108 static u64 hws_address(const struct i915_vma *hws,
109 const struct i915_request *rq)
111 return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
114 static int move_to_active(struct i915_vma *vma,
115 struct i915_request *rq,
121 err = i915_request_await_object(rq, vma->obj,
122 flags & EXEC_OBJECT_WRITE);
124 err = i915_vma_move_to_active(vma, rq, flags);
125 i915_vma_unlock(vma);
130 static struct i915_request *
131 hang_create_request(struct hang *h, struct intel_engine_cs *engine)
133 struct intel_gt *gt = h->gt;
134 struct i915_address_space *vm = i915_gem_context_get_vm_rcu(h->ctx);
135 struct drm_i915_gem_object *obj;
136 struct i915_request *rq = NULL;
137 struct i915_vma *hws, *vma;
143 obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
146 return ERR_CAST(obj);
149 vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915));
151 i915_gem_object_put(obj);
153 return ERR_CAST(vaddr);
156 i915_gem_object_unpin_map(h->obj);
157 i915_gem_object_put(h->obj);
162 vma = i915_vma_instance(h->obj, vm, NULL);
165 return ERR_CAST(vma);
168 hws = i915_vma_instance(h->hws, vm, NULL);
171 return ERR_CAST(hws);
174 err = i915_vma_pin(vma, 0, 0, PIN_USER);
180 err = i915_vma_pin(hws, 0, 0, PIN_USER);
184 rq = igt_request_alloc(h->ctx, engine);
190 err = move_to_active(vma, rq, 0);
194 err = move_to_active(hws, rq, 0);
199 if (INTEL_GEN(gt->i915) >= 8) {
200 *batch++ = MI_STORE_DWORD_IMM_GEN4;
201 *batch++ = lower_32_bits(hws_address(hws, rq));
202 *batch++ = upper_32_bits(hws_address(hws, rq));
203 *batch++ = rq->fence.seqno;
204 *batch++ = MI_ARB_CHECK;
206 memset(batch, 0, 1024);
207 batch += 1024 / sizeof(*batch);
209 *batch++ = MI_ARB_CHECK;
210 *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
211 *batch++ = lower_32_bits(vma->node.start);
212 *batch++ = upper_32_bits(vma->node.start);
213 } else if (INTEL_GEN(gt->i915) >= 6) {
214 *batch++ = MI_STORE_DWORD_IMM_GEN4;
216 *batch++ = lower_32_bits(hws_address(hws, rq));
217 *batch++ = rq->fence.seqno;
218 *batch++ = MI_ARB_CHECK;
220 memset(batch, 0, 1024);
221 batch += 1024 / sizeof(*batch);
223 *batch++ = MI_ARB_CHECK;
224 *batch++ = MI_BATCH_BUFFER_START | 1 << 8;
225 *batch++ = lower_32_bits(vma->node.start);
226 } else if (INTEL_GEN(gt->i915) >= 4) {
227 *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
229 *batch++ = lower_32_bits(hws_address(hws, rq));
230 *batch++ = rq->fence.seqno;
231 *batch++ = MI_ARB_CHECK;
233 memset(batch, 0, 1024);
234 batch += 1024 / sizeof(*batch);
236 *batch++ = MI_ARB_CHECK;
237 *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
238 *batch++ = lower_32_bits(vma->node.start);
240 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
241 *batch++ = lower_32_bits(hws_address(hws, rq));
242 *batch++ = rq->fence.seqno;
243 *batch++ = MI_ARB_CHECK;
245 memset(batch, 0, 1024);
246 batch += 1024 / sizeof(*batch);
248 *batch++ = MI_ARB_CHECK;
249 *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
250 *batch++ = lower_32_bits(vma->node.start);
252 *batch++ = MI_BATCH_BUFFER_END; /* not reached */
253 intel_gt_chipset_flush(engine->gt);
255 if (rq->engine->emit_init_breadcrumb) {
256 err = rq->engine->emit_init_breadcrumb(rq);
262 if (INTEL_GEN(gt->i915) <= 5)
263 flags |= I915_DISPATCH_SECURE;
265 err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
269 i915_request_skip(rq, err);
270 i915_request_add(rq);
277 return err ? ERR_PTR(err) : rq;
280 static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
282 return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
285 static void hang_fini(struct hang *h)
287 *h->batch = MI_BATCH_BUFFER_END;
288 intel_gt_chipset_flush(h->gt);
290 i915_gem_object_unpin_map(h->obj);
291 i915_gem_object_put(h->obj);
293 i915_gem_object_unpin_map(h->hws);
294 i915_gem_object_put(h->hws);
296 kernel_context_close(h->ctx);
298 igt_flush_test(h->gt->i915);
301 static bool wait_until_running(struct hang *h, struct i915_request *rq)
303 return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
306 wait_for(i915_seqno_passed(hws_seqno(h, rq),
311 static int igt_hang_sanitycheck(void *arg)
313 struct intel_gt *gt = arg;
314 struct i915_request *rq;
315 struct intel_engine_cs *engine;
316 enum intel_engine_id id;
320 /* Basic check that we can execute our hanging batch */
322 err = hang_init(&h, gt);
326 for_each_engine(engine, gt, id) {
327 struct intel_wedge_me w;
330 if (!intel_engine_can_store_dword(engine))
333 rq = hang_create_request(&h, engine);
336 pr_err("Failed to create request for %s, err=%d\n",
341 i915_request_get(rq);
343 *h.batch = MI_BATCH_BUFFER_END;
344 intel_gt_chipset_flush(engine->gt);
346 i915_request_add(rq);
349 intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
350 timeout = i915_request_wait(rq, 0,
351 MAX_SCHEDULE_TIMEOUT);
352 if (intel_gt_is_wedged(gt))
355 i915_request_put(rq);
359 pr_err("Wait for request failed on %s, err=%d\n",
370 static bool wait_for_idle(struct intel_engine_cs *engine)
372 return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
375 static int igt_reset_nop(void *arg)
377 struct intel_gt *gt = arg;
378 struct i915_gpu_error *global = >->i915->gpu_error;
379 struct intel_engine_cs *engine;
380 struct i915_gem_context *ctx;
381 unsigned int reset_count, count;
382 enum intel_engine_id id;
383 struct drm_file *file;
384 IGT_TIMEOUT(end_time);
387 /* Check that we can reset during non-user portions of requests */
389 file = mock_file(gt->i915);
391 return PTR_ERR(file);
393 ctx = live_context(gt->i915, file);
399 i915_gem_context_clear_bannable(ctx);
400 reset_count = i915_reset_count(global);
403 for_each_engine(engine, gt, id) {
406 for (i = 0; i < 16; i++) {
407 struct i915_request *rq;
409 rq = igt_request_alloc(ctx, engine);
415 i915_request_add(rq);
419 igt_global_reset_lock(gt);
420 intel_gt_reset(gt, ALL_ENGINES, NULL);
421 igt_global_reset_unlock(gt);
423 if (intel_gt_is_wedged(gt)) {
428 if (i915_reset_count(global) != reset_count + ++count) {
429 pr_err("Full GPU reset not recorded!\n");
434 err = igt_flush_test(gt->i915);
437 } while (time_before(jiffies, end_time));
438 pr_info("%s: %d resets\n", __func__, count);
440 err = igt_flush_test(gt->i915);
442 mock_file_free(gt->i915, file);
443 if (intel_gt_is_wedged(gt))
448 static int igt_reset_nop_engine(void *arg)
450 struct intel_gt *gt = arg;
451 struct i915_gpu_error *global = >->i915->gpu_error;
452 struct intel_engine_cs *engine;
453 struct i915_gem_context *ctx;
454 enum intel_engine_id id;
455 struct drm_file *file;
458 /* Check that we can engine-reset during non-user portions */
460 if (!intel_has_reset_engine(gt))
463 file = mock_file(gt->i915);
465 return PTR_ERR(file);
467 ctx = live_context(gt->i915, file);
473 i915_gem_context_clear_bannable(ctx);
474 for_each_engine(engine, gt, id) {
475 unsigned int reset_count, reset_engine_count;
477 IGT_TIMEOUT(end_time);
479 reset_count = i915_reset_count(global);
480 reset_engine_count = i915_reset_engine_count(global, engine);
483 set_bit(I915_RESET_ENGINE + id, >->reset.flags);
487 if (!wait_for_idle(engine)) {
488 pr_err("%s failed to idle before reset\n",
494 for (i = 0; i < 16; i++) {
495 struct i915_request *rq;
497 rq = igt_request_alloc(ctx, engine);
503 i915_request_add(rq);
505 err = intel_engine_reset(engine, NULL);
507 pr_err("i915_reset_engine failed\n");
511 if (i915_reset_count(global) != reset_count) {
512 pr_err("Full GPU reset recorded! (engine reset expected)\n");
517 if (i915_reset_engine_count(global, engine) !=
518 reset_engine_count + ++count) {
519 pr_err("%s engine reset not recorded!\n",
524 } while (time_before(jiffies, end_time));
525 clear_bit(I915_RESET_ENGINE + id, >->reset.flags);
526 pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
531 err = igt_flush_test(gt->i915);
536 err = igt_flush_test(gt->i915);
538 mock_file_free(gt->i915, file);
539 if (intel_gt_is_wedged(gt))
544 static int __igt_reset_engine(struct intel_gt *gt, bool active)
546 struct i915_gpu_error *global = >->i915->gpu_error;
547 struct intel_engine_cs *engine;
548 enum intel_engine_id id;
552 /* Check that we can issue an engine reset on an idle engine (no-op) */
554 if (!intel_has_reset_engine(gt))
558 err = hang_init(&h, gt);
563 for_each_engine(engine, gt, id) {
564 unsigned int reset_count, reset_engine_count;
565 IGT_TIMEOUT(end_time);
567 if (active && !intel_engine_can_store_dword(engine))
570 if (!wait_for_idle(engine)) {
571 pr_err("%s failed to idle before reset\n",
577 reset_count = i915_reset_count(global);
578 reset_engine_count = i915_reset_engine_count(global, engine);
580 intel_engine_pm_get(engine);
581 set_bit(I915_RESET_ENGINE + id, >->reset.flags);
584 struct i915_request *rq;
586 rq = hang_create_request(&h, engine);
592 i915_request_get(rq);
593 i915_request_add(rq);
595 if (!wait_until_running(&h, rq)) {
596 struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
598 pr_err("%s: Failed to start request %llx, at %x\n",
599 __func__, rq->fence.seqno, hws_seqno(&h, rq));
600 intel_engine_dump(engine, &p,
601 "%s\n", engine->name);
603 i915_request_put(rq);
608 i915_request_put(rq);
611 err = intel_engine_reset(engine, NULL);
613 pr_err("i915_reset_engine failed\n");
617 if (i915_reset_count(global) != reset_count) {
618 pr_err("Full GPU reset recorded! (engine reset expected)\n");
623 if (i915_reset_engine_count(global, engine) !=
624 ++reset_engine_count) {
625 pr_err("%s engine reset not recorded!\n",
630 } while (time_before(jiffies, end_time));
631 clear_bit(I915_RESET_ENGINE + id, >->reset.flags);
632 intel_engine_pm_put(engine);
637 err = igt_flush_test(gt->i915);
642 if (intel_gt_is_wedged(gt))
651 static int igt_reset_idle_engine(void *arg)
653 return __igt_reset_engine(arg, false);
656 static int igt_reset_active_engine(void *arg)
658 return __igt_reset_engine(arg, true);
661 struct active_engine {
662 struct task_struct *task;
663 struct intel_engine_cs *engine;
664 unsigned long resets;
668 #define TEST_ACTIVE BIT(0)
669 #define TEST_OTHERS BIT(1)
670 #define TEST_SELF BIT(2)
671 #define TEST_PRIORITY BIT(3)
673 static int active_request_put(struct i915_request *rq)
680 if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
681 GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n",
687 intel_gt_set_wedged(rq->engine->gt);
691 i915_request_put(rq);
696 static int active_engine(void *data)
698 I915_RND_STATE(prng);
699 struct active_engine *arg = data;
700 struct intel_engine_cs *engine = arg->engine;
701 struct i915_request *rq[8] = {};
702 struct i915_gem_context *ctx[ARRAY_SIZE(rq)];
703 struct drm_file *file;
704 unsigned long count = 0;
707 file = mock_file(engine->i915);
709 return PTR_ERR(file);
711 for (count = 0; count < ARRAY_SIZE(ctx); count++) {
712 ctx[count] = live_context(engine->i915, file);
713 if (IS_ERR(ctx[count])) {
714 err = PTR_ERR(ctx[count]);
716 i915_gem_context_put(ctx[count]);
721 while (!kthread_should_stop()) {
722 unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
723 struct i915_request *old = rq[idx];
724 struct i915_request *new;
726 new = igt_request_alloc(ctx[idx], engine);
732 if (arg->flags & TEST_PRIORITY)
733 ctx[idx]->sched.priority =
734 i915_prandom_u32_max_state(512, &prng);
736 rq[idx] = i915_request_get(new);
737 i915_request_add(new);
739 err = active_request_put(old);
746 for (count = 0; count < ARRAY_SIZE(rq); count++) {
747 int err__ = active_request_put(rq[count]);
749 /* Keep the first error */
755 mock_file_free(engine->i915, file);
759 static int __igt_reset_engines(struct intel_gt *gt,
760 const char *test_name,
763 struct i915_gpu_error *global = >->i915->gpu_error;
764 struct intel_engine_cs *engine, *other;
765 enum intel_engine_id id, tmp;
769 /* Check that issuing a reset on one engine does not interfere
770 * with any other engine.
773 if (!intel_has_reset_engine(gt))
776 if (flags & TEST_ACTIVE) {
777 err = hang_init(&h, gt);
781 if (flags & TEST_PRIORITY)
782 h.ctx->sched.priority = 1024;
785 for_each_engine(engine, gt, id) {
786 struct active_engine threads[I915_NUM_ENGINES] = {};
787 unsigned long device = i915_reset_count(global);
788 unsigned long count = 0, reported;
789 IGT_TIMEOUT(end_time);
791 if (flags & TEST_ACTIVE &&
792 !intel_engine_can_store_dword(engine))
795 if (!wait_for_idle(engine)) {
796 pr_err("i915_reset_engine(%s:%s): failed to idle before reset\n",
797 engine->name, test_name);
802 memset(threads, 0, sizeof(threads));
803 for_each_engine(other, gt, tmp) {
804 struct task_struct *tsk;
806 threads[tmp].resets =
807 i915_reset_engine_count(global, other);
809 if (!(flags & TEST_OTHERS))
812 if (other == engine && !(flags & TEST_SELF))
815 threads[tmp].engine = other;
816 threads[tmp].flags = flags;
818 tsk = kthread_run(active_engine, &threads[tmp],
819 "igt/%s", other->name);
825 threads[tmp].task = tsk;
826 get_task_struct(tsk);
829 yield(); /* start all threads before we begin */
831 intel_engine_pm_get(engine);
832 set_bit(I915_RESET_ENGINE + id, >->reset.flags);
834 struct i915_request *rq = NULL;
836 if (flags & TEST_ACTIVE) {
837 rq = hang_create_request(&h, engine);
843 i915_request_get(rq);
844 i915_request_add(rq);
846 if (!wait_until_running(&h, rq)) {
847 struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
849 pr_err("%s: Failed to start request %llx, at %x\n",
850 __func__, rq->fence.seqno, hws_seqno(&h, rq));
851 intel_engine_dump(engine, &p,
852 "%s\n", engine->name);
854 i915_request_put(rq);
860 err = intel_engine_reset(engine, NULL);
862 pr_err("i915_reset_engine(%s:%s): failed, err=%d\n",
863 engine->name, test_name, err);
870 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
871 struct drm_printer p =
872 drm_info_printer(gt->i915->drm.dev);
874 pr_err("i915_reset_engine(%s:%s):"
875 " failed to complete request after reset\n",
876 engine->name, test_name);
877 intel_engine_dump(engine, &p,
878 "%s\n", engine->name);
879 i915_request_put(rq);
882 intel_gt_set_wedged(gt);
887 i915_request_put(rq);
890 if (!(flags & TEST_SELF) && !wait_for_idle(engine)) {
891 struct drm_printer p =
892 drm_info_printer(gt->i915->drm.dev);
894 pr_err("i915_reset_engine(%s:%s):"
895 " failed to idle after reset\n",
896 engine->name, test_name);
897 intel_engine_dump(engine, &p,
898 "%s\n", engine->name);
903 } while (time_before(jiffies, end_time));
904 clear_bit(I915_RESET_ENGINE + id, >->reset.flags);
905 intel_engine_pm_put(engine);
906 pr_info("i915_reset_engine(%s:%s): %lu resets\n",
907 engine->name, test_name, count);
909 reported = i915_reset_engine_count(global, engine);
910 reported -= threads[engine->id].resets;
911 if (reported != count) {
912 pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
913 engine->name, test_name, count, reported);
919 for_each_engine(other, gt, tmp) {
922 if (!threads[tmp].task)
925 ret = kthread_stop(threads[tmp].task);
927 pr_err("kthread for other engine %s failed, err=%d\n",
932 put_task_struct(threads[tmp].task);
934 if (other->uabi_class != engine->uabi_class &&
935 threads[tmp].resets !=
936 i915_reset_engine_count(global, other)) {
937 pr_err("Innocent engine %s was reset (count=%ld)\n",
939 i915_reset_engine_count(global, other) -
940 threads[tmp].resets);
946 if (device != i915_reset_count(global)) {
947 pr_err("Global reset (count=%ld)!\n",
948 i915_reset_count(global) - device);
956 err = igt_flush_test(gt->i915);
961 if (intel_gt_is_wedged(gt))
964 if (flags & TEST_ACTIVE)
970 static int igt_reset_engines(void *arg)
972 static const struct {
977 { "active", TEST_ACTIVE },
978 { "others-idle", TEST_OTHERS },
979 { "others-active", TEST_OTHERS | TEST_ACTIVE },
982 TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY
986 TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY | TEST_SELF,
990 struct intel_gt *gt = arg;
994 for (p = phases; p->name; p++) {
995 if (p->flags & TEST_PRIORITY) {
996 if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1000 err = __igt_reset_engines(arg, p->name, p->flags);
1008 static u32 fake_hangcheck(struct intel_gt *gt, intel_engine_mask_t mask)
1010 u32 count = i915_reset_count(>->i915->gpu_error);
1012 intel_gt_reset(gt, mask, NULL);
1017 static int igt_reset_wait(void *arg)
1019 struct intel_gt *gt = arg;
1020 struct i915_gpu_error *global = >->i915->gpu_error;
1021 struct intel_engine_cs *engine = gt->engine[RCS0];
1022 struct i915_request *rq;
1023 unsigned int reset_count;
1028 if (!engine || !intel_engine_can_store_dword(engine))
1031 /* Check that we detect a stuck waiter and issue a reset */
1033 igt_global_reset_lock(gt);
1035 err = hang_init(&h, gt);
1039 rq = hang_create_request(&h, engine);
1045 i915_request_get(rq);
1046 i915_request_add(rq);
1048 if (!wait_until_running(&h, rq)) {
1049 struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1051 pr_err("%s: Failed to start request %llx, at %x\n",
1052 __func__, rq->fence.seqno, hws_seqno(&h, rq));
1053 intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1055 intel_gt_set_wedged(gt);
1061 reset_count = fake_hangcheck(gt, ALL_ENGINES);
1063 timeout = i915_request_wait(rq, 0, 10);
1065 pr_err("i915_request_wait failed on a stuck request: err=%ld\n",
1071 if (i915_reset_count(global) == reset_count) {
1072 pr_err("No GPU reset recorded!\n");
1078 i915_request_put(rq);
1082 igt_global_reset_unlock(gt);
1084 if (intel_gt_is_wedged(gt))
1091 struct completion completion;
1092 struct i915_vma *vma;
1095 static int evict_vma(void *data)
1097 struct evict_vma *arg = data;
1098 struct i915_address_space *vm = arg->vma->vm;
1099 struct drm_mm_node evict = arg->vma->node;
1102 complete(&arg->completion);
1104 mutex_lock(&vm->mutex);
1105 err = i915_gem_evict_for_node(vm, &evict, 0);
1106 mutex_unlock(&vm->mutex);
1111 static int evict_fence(void *data)
1113 struct evict_vma *arg = data;
1116 complete(&arg->completion);
1118 /* Mark the fence register as dirty to force the mmio update. */
1119 err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512);
1121 pr_err("Invalid Y-tiling settings; err:%d\n", err);
1125 err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE);
1127 pr_err("Unable to pin vma for Y-tiled fence; err:%d\n", err);
1131 err = i915_vma_pin_fence(arg->vma);
1132 i915_vma_unpin(arg->vma);
1134 pr_err("Unable to pin Y-tiled fence; err:%d\n", err);
1138 i915_vma_unpin_fence(arg->vma);
1143 static int __igt_reset_evict_vma(struct intel_gt *gt,
1144 struct i915_address_space *vm,
1148 struct intel_engine_cs *engine = gt->engine[RCS0];
1149 struct drm_i915_gem_object *obj;
1150 struct task_struct *tsk = NULL;
1151 struct i915_request *rq;
1152 struct evict_vma arg;
1154 unsigned int pin_flags;
1157 if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE)
1160 if (!engine || !intel_engine_can_store_dword(engine))
1163 /* Check that we can recover an unbind stuck on a hanging request */
1165 err = hang_init(&h, gt);
1169 obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
1175 if (flags & EXEC_OBJECT_NEEDS_FENCE) {
1176 err = i915_gem_object_set_tiling(obj, I915_TILING_X, 512);
1178 pr_err("Invalid X-tiling settings; err:%d\n", err);
1183 arg.vma = i915_vma_instance(obj, vm, NULL);
1184 if (IS_ERR(arg.vma)) {
1185 err = PTR_ERR(arg.vma);
1189 rq = hang_create_request(&h, engine);
1195 pin_flags = i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER;
1197 if (flags & EXEC_OBJECT_NEEDS_FENCE)
1198 pin_flags |= PIN_MAPPABLE;
1200 err = i915_vma_pin(arg.vma, 0, 0, pin_flags);
1202 i915_request_add(rq);
1206 if (flags & EXEC_OBJECT_NEEDS_FENCE) {
1207 err = i915_vma_pin_fence(arg.vma);
1209 pr_err("Unable to pin X-tiled fence; err:%d\n", err);
1210 i915_vma_unpin(arg.vma);
1211 i915_request_add(rq);
1216 i915_vma_lock(arg.vma);
1217 err = i915_request_await_object(rq, arg.vma->obj,
1218 flags & EXEC_OBJECT_WRITE);
1220 err = i915_vma_move_to_active(arg.vma, rq, flags);
1221 i915_vma_unlock(arg.vma);
1223 if (flags & EXEC_OBJECT_NEEDS_FENCE)
1224 i915_vma_unpin_fence(arg.vma);
1225 i915_vma_unpin(arg.vma);
1227 i915_request_get(rq);
1228 i915_request_add(rq);
1232 if (!wait_until_running(&h, rq)) {
1233 struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1235 pr_err("%s: Failed to start request %llx, at %x\n",
1236 __func__, rq->fence.seqno, hws_seqno(&h, rq));
1237 intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1239 intel_gt_set_wedged(gt);
1243 init_completion(&arg.completion);
1245 tsk = kthread_run(fn, &arg, "igt/evict_vma");
1251 get_task_struct(tsk);
1253 wait_for_completion(&arg.completion);
1255 if (wait_for(!list_empty(&rq->fence.cb_list), 10)) {
1256 struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1258 pr_err("igt/evict_vma kthread did not wait\n");
1259 intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1261 intel_gt_set_wedged(gt);
1266 igt_global_reset_lock(gt);
1267 fake_hangcheck(gt, rq->engine->mask);
1268 igt_global_reset_unlock(gt);
1271 struct intel_wedge_me w;
1273 /* The reset, even indirectly, should take less than 10ms. */
1274 intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
1275 err = kthread_stop(tsk);
1277 put_task_struct(tsk);
1281 i915_request_put(rq);
1283 i915_gem_object_put(obj);
1286 if (intel_gt_is_wedged(gt))
1292 static int igt_reset_evict_ggtt(void *arg)
1294 struct intel_gt *gt = arg;
1296 return __igt_reset_evict_vma(gt, >->ggtt->vm,
1297 evict_vma, EXEC_OBJECT_WRITE);
1300 static int igt_reset_evict_ppgtt(void *arg)
1302 struct intel_gt *gt = arg;
1303 struct i915_gem_context *ctx;
1304 struct i915_address_space *vm;
1305 struct drm_file *file;
1308 file = mock_file(gt->i915);
1310 return PTR_ERR(file);
1312 ctx = live_context(gt->i915, file);
1319 vm = i915_gem_context_get_vm_rcu(ctx);
1320 if (!i915_is_ggtt(vm)) {
1321 /* aliasing == global gtt locking, covered above */
1322 err = __igt_reset_evict_vma(gt, vm,
1323 evict_vma, EXEC_OBJECT_WRITE);
1328 mock_file_free(gt->i915, file);
1332 static int igt_reset_evict_fence(void *arg)
1334 struct intel_gt *gt = arg;
1336 return __igt_reset_evict_vma(gt, >->ggtt->vm,
1337 evict_fence, EXEC_OBJECT_NEEDS_FENCE);
1340 static int wait_for_others(struct intel_gt *gt,
1341 struct intel_engine_cs *exclude)
1343 struct intel_engine_cs *engine;
1344 enum intel_engine_id id;
1346 for_each_engine(engine, gt, id) {
1347 if (engine == exclude)
1350 if (!wait_for_idle(engine))
1357 static int igt_reset_queue(void *arg)
1359 struct intel_gt *gt = arg;
1360 struct i915_gpu_error *global = >->i915->gpu_error;
1361 struct intel_engine_cs *engine;
1362 enum intel_engine_id id;
1366 /* Check that we replay pending requests following a hang */
1368 igt_global_reset_lock(gt);
1370 err = hang_init(&h, gt);
1374 for_each_engine(engine, gt, id) {
1375 struct i915_request *prev;
1376 IGT_TIMEOUT(end_time);
1379 if (!intel_engine_can_store_dword(engine))
1382 prev = hang_create_request(&h, engine);
1384 err = PTR_ERR(prev);
1388 i915_request_get(prev);
1389 i915_request_add(prev);
1393 struct i915_request *rq;
1394 unsigned int reset_count;
1396 rq = hang_create_request(&h, engine);
1402 i915_request_get(rq);
1403 i915_request_add(rq);
1406 * XXX We don't handle resetting the kernel context
1407 * very well. If we trigger a device reset twice in
1408 * quick succession while the kernel context is
1409 * executing, we may end up skipping the breadcrumb.
1410 * This is really only a problem for the selftest as
1411 * normally there is a large interlude between resets
1412 * (hangcheck), or we focus on resetting just one
1413 * engine and so avoid repeatedly resetting innocents.
1415 err = wait_for_others(gt, engine);
1417 pr_err("%s(%s): Failed to idle other inactive engines after device reset\n",
1418 __func__, engine->name);
1419 i915_request_put(rq);
1420 i915_request_put(prev);
1423 intel_gt_set_wedged(gt);
1427 if (!wait_until_running(&h, prev)) {
1428 struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1430 pr_err("%s(%s): Failed to start request %llx, at %x\n",
1431 __func__, engine->name,
1432 prev->fence.seqno, hws_seqno(&h, prev));
1433 intel_engine_dump(engine, &p,
1434 "%s\n", engine->name);
1436 i915_request_put(rq);
1437 i915_request_put(prev);
1439 intel_gt_set_wedged(gt);
1445 reset_count = fake_hangcheck(gt, BIT(id));
1447 if (prev->fence.error != -EIO) {
1448 pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
1450 i915_request_put(rq);
1451 i915_request_put(prev);
1456 if (rq->fence.error) {
1457 pr_err("Fence error status not zero [%d] after unrelated reset\n",
1459 i915_request_put(rq);
1460 i915_request_put(prev);
1465 if (i915_reset_count(global) == reset_count) {
1466 pr_err("No GPU reset recorded!\n");
1467 i915_request_put(rq);
1468 i915_request_put(prev);
1473 i915_request_put(prev);
1476 } while (time_before(jiffies, end_time));
1477 pr_info("%s: Completed %d resets\n", engine->name, count);
1479 *h.batch = MI_BATCH_BUFFER_END;
1480 intel_gt_chipset_flush(engine->gt);
1482 i915_request_put(prev);
1484 err = igt_flush_test(gt->i915);
1492 igt_global_reset_unlock(gt);
1494 if (intel_gt_is_wedged(gt))
1500 static int igt_handle_error(void *arg)
1502 struct intel_gt *gt = arg;
1503 struct i915_gpu_error *global = >->i915->gpu_error;
1504 struct intel_engine_cs *engine = gt->engine[RCS0];
1506 struct i915_request *rq;
1507 struct i915_gpu_state *error;
1510 /* Check that we can issue a global GPU and engine reset */
1512 if (!intel_has_reset_engine(gt))
1515 if (!engine || !intel_engine_can_store_dword(engine))
1518 err = hang_init(&h, gt);
1522 rq = hang_create_request(&h, engine);
1528 i915_request_get(rq);
1529 i915_request_add(rq);
1531 if (!wait_until_running(&h, rq)) {
1532 struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1534 pr_err("%s: Failed to start request %llx, at %x\n",
1535 __func__, rq->fence.seqno, hws_seqno(&h, rq));
1536 intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1538 intel_gt_set_wedged(gt);
1544 /* Temporarily disable error capture */
1545 error = xchg(&global->first_error, (void *)-1);
1547 intel_gt_handle_error(gt, engine->mask, 0, NULL);
1549 xchg(&global->first_error, error);
1551 if (rq->fence.error != -EIO) {
1552 pr_err("Guilty request not identified!\n");
1558 i915_request_put(rq);
1564 static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
1565 const struct igt_atomic_section *p,
1568 struct tasklet_struct * const t = &engine->execlists.tasklet;
1571 GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
1572 engine->name, mode, p->name);
1575 p->critical_section_begin();
1577 err = intel_engine_reset(engine, NULL);
1579 p->critical_section_end();
1583 pr_err("i915_reset_engine(%s:%s) failed under %s\n",
1584 engine->name, mode, p->name);
1589 static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
1590 const struct igt_atomic_section *p)
1592 struct i915_request *rq;
1596 err = __igt_atomic_reset_engine(engine, p, "idle");
1600 err = hang_init(&h, engine->gt);
1604 rq = hang_create_request(&h, engine);
1610 i915_request_get(rq);
1611 i915_request_add(rq);
1613 if (wait_until_running(&h, rq)) {
1614 err = __igt_atomic_reset_engine(engine, p, "active");
1616 pr_err("%s(%s): Failed to start request %llx, at %x\n",
1617 __func__, engine->name,
1618 rq->fence.seqno, hws_seqno(&h, rq));
1619 intel_gt_set_wedged(engine->gt);
1624 struct intel_wedge_me w;
1626 intel_wedge_on_timeout(&w, engine->gt, HZ / 20 /* 50ms */)
1627 i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
1628 if (intel_gt_is_wedged(engine->gt))
1632 i915_request_put(rq);
1638 static int igt_reset_engines_atomic(void *arg)
1640 struct intel_gt *gt = arg;
1641 const typeof(*igt_atomic_phases) *p;
1644 /* Check that the engines resets are usable from atomic context */
1646 if (!intel_has_reset_engine(gt))
1649 if (USES_GUC_SUBMISSION(gt->i915))
1652 igt_global_reset_lock(gt);
1654 /* Flush any requests before we get started and check basics */
1655 if (!igt_force_reset(gt))
1658 for (p = igt_atomic_phases; p->name; p++) {
1659 struct intel_engine_cs *engine;
1660 enum intel_engine_id id;
1662 for_each_engine(engine, gt, id) {
1663 err = igt_atomic_reset_engine(engine, p);
1670 /* As we poke around the guts, do a full reset before continuing. */
1671 igt_force_reset(gt);
1673 igt_global_reset_unlock(gt);
1678 int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
1680 static const struct i915_subtest tests[] = {
1681 SUBTEST(igt_hang_sanitycheck),
1682 SUBTEST(igt_reset_nop),
1683 SUBTEST(igt_reset_nop_engine),
1684 SUBTEST(igt_reset_idle_engine),
1685 SUBTEST(igt_reset_active_engine),
1686 SUBTEST(igt_reset_engines),
1687 SUBTEST(igt_reset_engines_atomic),
1688 SUBTEST(igt_reset_queue),
1689 SUBTEST(igt_reset_wait),
1690 SUBTEST(igt_reset_evict_ggtt),
1691 SUBTEST(igt_reset_evict_ppgtt),
1692 SUBTEST(igt_reset_evict_fence),
1693 SUBTEST(igt_handle_error),
1695 struct intel_gt *gt = &i915->gt;
1696 intel_wakeref_t wakeref;
1699 if (!intel_has_gpu_reset(gt))
1702 if (intel_gt_is_wedged(gt))
1703 return -EIO; /* we're long past hope of a successful reset */
1705 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1707 err = intel_gt_live_subtests(tests, gt);
1709 intel_runtime_pm_put(gt->uncore->rpm, wakeref);