drm/i915: Load balancing across a virtual engine
[linux-block.git] / drivers / gpu / drm / i915 / gt / selftest_lrc.c
CommitLineData
2c66555e
CW
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
a21f453c
CW
7#include <linux/prime_numbers.h>
8
112ed2d3
CW
9#include "gt/intel_reset.h"
10#include "i915_selftest.h"
11#include "selftests/i915_random.h"
12#include "selftests/igt_flush_test.h"
46472b3e 13#include "selftests/igt_gem_utils.h"
112ed2d3
CW
14#include "selftests/igt_live_test.h"
15#include "selftests/igt_spinner.h"
16#include "selftests/mock_context.h"
2c66555e 17
2c66555e
CW
18static int live_sanitycheck(void *arg)
19{
20 struct drm_i915_private *i915 = arg;
21 struct intel_engine_cs *engine;
22 struct i915_gem_context *ctx;
23 enum intel_engine_id id;
8d2f6e2f 24 struct igt_spinner spin;
c9d08cc3 25 intel_wakeref_t wakeref;
2c66555e
CW
26 int err = -ENOMEM;
27
28 if (!HAS_LOGICAL_RING_CONTEXTS(i915))
29 return 0;
30
31 mutex_lock(&i915->drm.struct_mutex);
c9d08cc3 32 wakeref = intel_runtime_pm_get(i915);
2c66555e 33
8d2f6e2f 34 if (igt_spinner_init(&spin, i915))
2c66555e
CW
35 goto err_unlock;
36
37 ctx = kernel_context(i915);
38 if (!ctx)
39 goto err_spin;
40
41 for_each_engine(engine, i915, id) {
42 struct i915_request *rq;
43
8d2f6e2f 44 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
2c66555e
CW
45 if (IS_ERR(rq)) {
46 err = PTR_ERR(rq);
47 goto err_ctx;
48 }
49
50 i915_request_add(rq);
8d2f6e2f 51 if (!igt_wait_for_spinner(&spin, rq)) {
2c66555e
CW
52 GEM_TRACE("spinner failed to start\n");
53 GEM_TRACE_DUMP();
54 i915_gem_set_wedged(i915);
55 err = -EIO;
56 goto err_ctx;
57 }
58
8d2f6e2f 59 igt_spinner_end(&spin);
98dc0454 60 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
2c66555e
CW
61 err = -EIO;
62 goto err_ctx;
63 }
64 }
65
66 err = 0;
67err_ctx:
68 kernel_context_close(ctx);
69err_spin:
8d2f6e2f 70 igt_spinner_fini(&spin);
2c66555e 71err_unlock:
98dc0454 72 igt_flush_test(i915, I915_WAIT_LOCKED);
c9d08cc3 73 intel_runtime_pm_put(i915, wakeref);
2c66555e
CW
74 mutex_unlock(&i915->drm.struct_mutex);
75 return err;
76}
77
bac24f59
CW
78static int live_busywait_preempt(void *arg)
79{
80 struct drm_i915_private *i915 = arg;
81 struct i915_gem_context *ctx_hi, *ctx_lo;
82 struct intel_engine_cs *engine;
83 struct drm_i915_gem_object *obj;
84 struct i915_vma *vma;
85 enum intel_engine_id id;
86 intel_wakeref_t wakeref;
87 int err = -ENOMEM;
88 u32 *map;
89
90 /*
91 * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
92 * preempt the busywaits used to synchronise between rings.
93 */
94
95 mutex_lock(&i915->drm.struct_mutex);
96 wakeref = intel_runtime_pm_get(i915);
97
98 ctx_hi = kernel_context(i915);
99 if (!ctx_hi)
100 goto err_unlock;
6e7eb7a8
CW
101 ctx_hi->sched.priority =
102 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
bac24f59
CW
103
104 ctx_lo = kernel_context(i915);
105 if (!ctx_lo)
106 goto err_ctx_hi;
6e7eb7a8
CW
107 ctx_lo->sched.priority =
108 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
bac24f59
CW
109
110 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
111 if (IS_ERR(obj)) {
112 err = PTR_ERR(obj);
113 goto err_ctx_lo;
114 }
115
116 map = i915_gem_object_pin_map(obj, I915_MAP_WC);
117 if (IS_ERR(map)) {
118 err = PTR_ERR(map);
119 goto err_obj;
120 }
121
e57ce4b1 122 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
bac24f59
CW
123 if (IS_ERR(vma)) {
124 err = PTR_ERR(vma);
125 goto err_map;
126 }
127
128 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
129 if (err)
130 goto err_map;
131
132 for_each_engine(engine, i915, id) {
133 struct i915_request *lo, *hi;
134 struct igt_live_test t;
135 u32 *cs;
136
137 if (!intel_engine_can_store_dword(engine))
138 continue;
139
140 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
141 err = -EIO;
142 goto err_vma;
143 }
144
145 /*
146 * We create two requests. The low priority request
147 * busywaits on a semaphore (inside the ringbuffer where
148 * is should be preemptible) and the high priority requests
149 * uses a MI_STORE_DWORD_IMM to update the semaphore value
150 * allowing the first request to complete. If preemption
151 * fails, we hang instead.
152 */
153
46472b3e 154 lo = igt_request_alloc(ctx_lo, engine);
bac24f59
CW
155 if (IS_ERR(lo)) {
156 err = PTR_ERR(lo);
157 goto err_vma;
158 }
159
160 cs = intel_ring_begin(lo, 8);
161 if (IS_ERR(cs)) {
162 err = PTR_ERR(cs);
163 i915_request_add(lo);
164 goto err_vma;
165 }
166
167 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
168 *cs++ = i915_ggtt_offset(vma);
169 *cs++ = 0;
170 *cs++ = 1;
171
172 /* XXX Do we need a flush + invalidate here? */
173
174 *cs++ = MI_SEMAPHORE_WAIT |
175 MI_SEMAPHORE_GLOBAL_GTT |
176 MI_SEMAPHORE_POLL |
177 MI_SEMAPHORE_SAD_EQ_SDD;
178 *cs++ = 0;
179 *cs++ = i915_ggtt_offset(vma);
180 *cs++ = 0;
181
182 intel_ring_advance(lo, cs);
183 i915_request_add(lo);
184
185 if (wait_for(READ_ONCE(*map), 10)) {
186 err = -ETIMEDOUT;
187 goto err_vma;
188 }
189
190 /* Low priority request should be busywaiting now */
191 if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) {
192 pr_err("%s: Busywaiting request did not!\n",
193 engine->name);
194 err = -EIO;
195 goto err_vma;
196 }
197
46472b3e 198 hi = igt_request_alloc(ctx_hi, engine);
bac24f59
CW
199 if (IS_ERR(hi)) {
200 err = PTR_ERR(hi);
201 goto err_vma;
202 }
203
204 cs = intel_ring_begin(hi, 4);
205 if (IS_ERR(cs)) {
206 err = PTR_ERR(cs);
207 i915_request_add(hi);
208 goto err_vma;
209 }
210
211 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
212 *cs++ = i915_ggtt_offset(vma);
213 *cs++ = 0;
214 *cs++ = 0;
215
216 intel_ring_advance(hi, cs);
217 i915_request_add(hi);
218
219 if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) {
220 struct drm_printer p = drm_info_printer(i915->drm.dev);
221
222 pr_err("%s: Failed to preempt semaphore busywait!\n",
223 engine->name);
224
225 intel_engine_dump(engine, &p, "%s\n", engine->name);
226 GEM_TRACE_DUMP();
227
228 i915_gem_set_wedged(i915);
229 err = -EIO;
230 goto err_vma;
231 }
232 GEM_BUG_ON(READ_ONCE(*map));
233
234 if (igt_live_test_end(&t)) {
235 err = -EIO;
236 goto err_vma;
237 }
238 }
239
240 err = 0;
241err_vma:
242 i915_vma_unpin(vma);
243err_map:
244 i915_gem_object_unpin_map(obj);
245err_obj:
246 i915_gem_object_put(obj);
247err_ctx_lo:
248 kernel_context_close(ctx_lo);
249err_ctx_hi:
250 kernel_context_close(ctx_hi);
251err_unlock:
252 if (igt_flush_test(i915, I915_WAIT_LOCKED))
253 err = -EIO;
254 intel_runtime_pm_put(i915, wakeref);
255 mutex_unlock(&i915->drm.struct_mutex);
256 return err;
257}
258
2c66555e
CW
259static int live_preempt(void *arg)
260{
261 struct drm_i915_private *i915 = arg;
262 struct i915_gem_context *ctx_hi, *ctx_lo;
8d2f6e2f 263 struct igt_spinner spin_hi, spin_lo;
2c66555e
CW
264 struct intel_engine_cs *engine;
265 enum intel_engine_id id;
c9d08cc3 266 intel_wakeref_t wakeref;
2c66555e
CW
267 int err = -ENOMEM;
268
269 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
270 return 0;
271
3123ada8
CW
272 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
273 pr_err("Logical preemption supported, but not exposed\n");
274
2c66555e 275 mutex_lock(&i915->drm.struct_mutex);
c9d08cc3 276 wakeref = intel_runtime_pm_get(i915);
2c66555e 277
8d2f6e2f 278 if (igt_spinner_init(&spin_hi, i915))
2c66555e
CW
279 goto err_unlock;
280
8d2f6e2f 281 if (igt_spinner_init(&spin_lo, i915))
2c66555e
CW
282 goto err_spin_hi;
283
284 ctx_hi = kernel_context(i915);
285 if (!ctx_hi)
286 goto err_spin_lo;
7651a445
CW
287 ctx_hi->sched.priority =
288 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
2c66555e
CW
289
290 ctx_lo = kernel_context(i915);
291 if (!ctx_lo)
292 goto err_ctx_hi;
7651a445
CW
293 ctx_lo->sched.priority =
294 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
2c66555e
CW
295
296 for_each_engine(engine, i915, id) {
e70d3d80 297 struct igt_live_test t;
2c66555e
CW
298 struct i915_request *rq;
299
3123ada8
CW
300 if (!intel_engine_has_preemption(engine))
301 continue;
302
e70d3d80
CW
303 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
304 err = -EIO;
305 goto err_ctx_lo;
306 }
307
8d2f6e2f
TU
308 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
309 MI_ARB_CHECK);
2c66555e
CW
310 if (IS_ERR(rq)) {
311 err = PTR_ERR(rq);
312 goto err_ctx_lo;
313 }
314
315 i915_request_add(rq);
8d2f6e2f 316 if (!igt_wait_for_spinner(&spin_lo, rq)) {
2c66555e
CW
317 GEM_TRACE("lo spinner failed to start\n");
318 GEM_TRACE_DUMP();
319 i915_gem_set_wedged(i915);
320 err = -EIO;
321 goto err_ctx_lo;
322 }
323
8d2f6e2f
TU
324 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
325 MI_ARB_CHECK);
2c66555e 326 if (IS_ERR(rq)) {
8d2f6e2f 327 igt_spinner_end(&spin_lo);
2c66555e
CW
328 err = PTR_ERR(rq);
329 goto err_ctx_lo;
330 }
331
332 i915_request_add(rq);
8d2f6e2f 333 if (!igt_wait_for_spinner(&spin_hi, rq)) {
2c66555e
CW
334 GEM_TRACE("hi spinner failed to start\n");
335 GEM_TRACE_DUMP();
336 i915_gem_set_wedged(i915);
337 err = -EIO;
338 goto err_ctx_lo;
339 }
340
8d2f6e2f
TU
341 igt_spinner_end(&spin_hi);
342 igt_spinner_end(&spin_lo);
e70d3d80
CW
343
344 if (igt_live_test_end(&t)) {
2c66555e
CW
345 err = -EIO;
346 goto err_ctx_lo;
347 }
348 }
349
350 err = 0;
351err_ctx_lo:
352 kernel_context_close(ctx_lo);
353err_ctx_hi:
354 kernel_context_close(ctx_hi);
355err_spin_lo:
8d2f6e2f 356 igt_spinner_fini(&spin_lo);
2c66555e 357err_spin_hi:
8d2f6e2f 358 igt_spinner_fini(&spin_hi);
2c66555e 359err_unlock:
98dc0454 360 igt_flush_test(i915, I915_WAIT_LOCKED);
c9d08cc3 361 intel_runtime_pm_put(i915, wakeref);
2c66555e
CW
362 mutex_unlock(&i915->drm.struct_mutex);
363 return err;
364}
365
366static int live_late_preempt(void *arg)
367{
368 struct drm_i915_private *i915 = arg;
369 struct i915_gem_context *ctx_hi, *ctx_lo;
8d2f6e2f 370 struct igt_spinner spin_hi, spin_lo;
2c66555e 371 struct intel_engine_cs *engine;
b7268c5e 372 struct i915_sched_attr attr = {};
2c66555e 373 enum intel_engine_id id;
c9d08cc3 374 intel_wakeref_t wakeref;
2c66555e
CW
375 int err = -ENOMEM;
376
377 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
378 return 0;
379
380 mutex_lock(&i915->drm.struct_mutex);
c9d08cc3 381 wakeref = intel_runtime_pm_get(i915);
2c66555e 382
8d2f6e2f 383 if (igt_spinner_init(&spin_hi, i915))
2c66555e
CW
384 goto err_unlock;
385
8d2f6e2f 386 if (igt_spinner_init(&spin_lo, i915))
2c66555e
CW
387 goto err_spin_hi;
388
389 ctx_hi = kernel_context(i915);
390 if (!ctx_hi)
391 goto err_spin_lo;
392
393 ctx_lo = kernel_context(i915);
394 if (!ctx_lo)
395 goto err_ctx_hi;
396
397 for_each_engine(engine, i915, id) {
e70d3d80 398 struct igt_live_test t;
2c66555e
CW
399 struct i915_request *rq;
400
3123ada8
CW
401 if (!intel_engine_has_preemption(engine))
402 continue;
403
e70d3d80
CW
404 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
405 err = -EIO;
406 goto err_ctx_lo;
407 }
408
8d2f6e2f
TU
409 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
410 MI_ARB_CHECK);
2c66555e
CW
411 if (IS_ERR(rq)) {
412 err = PTR_ERR(rq);
413 goto err_ctx_lo;
414 }
415
416 i915_request_add(rq);
8d2f6e2f 417 if (!igt_wait_for_spinner(&spin_lo, rq)) {
2c66555e
CW
418 pr_err("First context failed to start\n");
419 goto err_wedged;
420 }
421
8d2f6e2f
TU
422 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
423 MI_NOOP);
2c66555e 424 if (IS_ERR(rq)) {
8d2f6e2f 425 igt_spinner_end(&spin_lo);
2c66555e
CW
426 err = PTR_ERR(rq);
427 goto err_ctx_lo;
428 }
429
430 i915_request_add(rq);
8d2f6e2f 431 if (igt_wait_for_spinner(&spin_hi, rq)) {
2c66555e
CW
432 pr_err("Second context overtook first?\n");
433 goto err_wedged;
434 }
435
7651a445 436 attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
b7268c5e 437 engine->schedule(rq, &attr);
2c66555e 438
8d2f6e2f 439 if (!igt_wait_for_spinner(&spin_hi, rq)) {
2c66555e
CW
440 pr_err("High priority context failed to preempt the low priority context\n");
441 GEM_TRACE_DUMP();
442 goto err_wedged;
443 }
444
8d2f6e2f
TU
445 igt_spinner_end(&spin_hi);
446 igt_spinner_end(&spin_lo);
e70d3d80
CW
447
448 if (igt_live_test_end(&t)) {
2c66555e
CW
449 err = -EIO;
450 goto err_ctx_lo;
451 }
452 }
453
454 err = 0;
455err_ctx_lo:
456 kernel_context_close(ctx_lo);
457err_ctx_hi:
458 kernel_context_close(ctx_hi);
459err_spin_lo:
8d2f6e2f 460 igt_spinner_fini(&spin_lo);
2c66555e 461err_spin_hi:
8d2f6e2f 462 igt_spinner_fini(&spin_hi);
2c66555e 463err_unlock:
98dc0454 464 igt_flush_test(i915, I915_WAIT_LOCKED);
c9d08cc3 465 intel_runtime_pm_put(i915, wakeref);
2c66555e
CW
466 mutex_unlock(&i915->drm.struct_mutex);
467 return err;
468
469err_wedged:
8d2f6e2f
TU
470 igt_spinner_end(&spin_hi);
471 igt_spinner_end(&spin_lo);
2c66555e
CW
472 i915_gem_set_wedged(i915);
473 err = -EIO;
474 goto err_ctx_lo;
475}
476
c9a64622
CW
477struct preempt_client {
478 struct igt_spinner spin;
479 struct i915_gem_context *ctx;
480};
481
482static int preempt_client_init(struct drm_i915_private *i915,
483 struct preempt_client *c)
484{
485 c->ctx = kernel_context(i915);
486 if (!c->ctx)
487 return -ENOMEM;
488
489 if (igt_spinner_init(&c->spin, i915))
490 goto err_ctx;
491
492 return 0;
493
494err_ctx:
495 kernel_context_close(c->ctx);
496 return -ENOMEM;
497}
498
499static void preempt_client_fini(struct preempt_client *c)
500{
501 igt_spinner_fini(&c->spin);
502 kernel_context_close(c->ctx);
503}
504
505static int live_suppress_self_preempt(void *arg)
506{
507 struct drm_i915_private *i915 = arg;
508 struct intel_engine_cs *engine;
509 struct i915_sched_attr attr = {
510 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
511 };
512 struct preempt_client a, b;
513 enum intel_engine_id id;
514 intel_wakeref_t wakeref;
515 int err = -ENOMEM;
516
517 /*
518 * Verify that if a preemption request does not cause a change in
519 * the current execution order, the preempt-to-idle injection is
520 * skipped and that we do not accidentally apply it after the CS
521 * completion event.
522 */
523
524 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
525 return 0;
526
527 if (USES_GUC_SUBMISSION(i915))
528 return 0; /* presume black blox */
529
530 mutex_lock(&i915->drm.struct_mutex);
531 wakeref = intel_runtime_pm_get(i915);
532
533 if (preempt_client_init(i915, &a))
534 goto err_unlock;
535 if (preempt_client_init(i915, &b))
536 goto err_client_a;
537
538 for_each_engine(engine, i915, id) {
539 struct i915_request *rq_a, *rq_b;
540 int depth;
541
3123ada8
CW
542 if (!intel_engine_has_preemption(engine))
543 continue;
544
c9a64622
CW
545 engine->execlists.preempt_hang.count = 0;
546
547 rq_a = igt_spinner_create_request(&a.spin,
548 a.ctx, engine,
549 MI_NOOP);
550 if (IS_ERR(rq_a)) {
551 err = PTR_ERR(rq_a);
552 goto err_client_b;
553 }
554
555 i915_request_add(rq_a);
556 if (!igt_wait_for_spinner(&a.spin, rq_a)) {
557 pr_err("First client failed to start\n");
558 goto err_wedged;
559 }
560
561 for (depth = 0; depth < 8; depth++) {
562 rq_b = igt_spinner_create_request(&b.spin,
563 b.ctx, engine,
564 MI_NOOP);
565 if (IS_ERR(rq_b)) {
566 err = PTR_ERR(rq_b);
567 goto err_client_b;
568 }
569 i915_request_add(rq_b);
570
571 GEM_BUG_ON(i915_request_completed(rq_a));
572 engine->schedule(rq_a, &attr);
573 igt_spinner_end(&a.spin);
574
575 if (!igt_wait_for_spinner(&b.spin, rq_b)) {
576 pr_err("Second client failed to start\n");
577 goto err_wedged;
578 }
579
580 swap(a, b);
581 rq_a = rq_b;
582 }
583 igt_spinner_end(&a.spin);
584
585 if (engine->execlists.preempt_hang.count) {
586 pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
587 engine->execlists.preempt_hang.count,
588 depth);
589 err = -EINVAL;
590 goto err_client_b;
591 }
592
593 if (igt_flush_test(i915, I915_WAIT_LOCKED))
594 goto err_wedged;
595 }
596
597 err = 0;
598err_client_b:
599 preempt_client_fini(&b);
600err_client_a:
601 preempt_client_fini(&a);
602err_unlock:
603 if (igt_flush_test(i915, I915_WAIT_LOCKED))
604 err = -EIO;
605 intel_runtime_pm_put(i915, wakeref);
606 mutex_unlock(&i915->drm.struct_mutex);
607 return err;
608
609err_wedged:
610 igt_spinner_end(&b.spin);
611 igt_spinner_end(&a.spin);
612 i915_gem_set_wedged(i915);
613 err = -EIO;
614 goto err_client_b;
615}
616
b5773a36
CW
617static int __i915_sw_fence_call
618dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
619{
620 return NOTIFY_DONE;
621}
622
623static struct i915_request *dummy_request(struct intel_engine_cs *engine)
624{
625 struct i915_request *rq;
626
627 rq = kzalloc(sizeof(*rq), GFP_KERNEL);
628 if (!rq)
629 return NULL;
630
631 INIT_LIST_HEAD(&rq->active_list);
632 rq->engine = engine;
633
634 i915_sched_node_init(&rq->sched);
635
636 /* mark this request as permanently incomplete */
637 rq->fence.seqno = 1;
638 BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
639 rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
640 GEM_BUG_ON(i915_request_completed(rq));
641
642 i915_sw_fence_init(&rq->submit, dummy_notify);
25d851ad 643 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
b5773a36
CW
644
645 return rq;
646}
647
648static void dummy_request_free(struct i915_request *dummy)
649{
25d851ad
CW
650 /* We have to fake the CS interrupt to kick the next request */
651 i915_sw_fence_commit(&dummy->submit);
652
b5773a36 653 i915_request_mark_complete(dummy);
25d851ad
CW
654 dma_fence_signal(&dummy->fence);
655
b5773a36
CW
656 i915_sched_node_fini(&dummy->sched);
657 i915_sw_fence_fini(&dummy->submit);
658
659 dma_fence_free(&dummy->fence);
660}
661
662static int live_suppress_wait_preempt(void *arg)
663{
664 struct drm_i915_private *i915 = arg;
665 struct preempt_client client[4];
666 struct intel_engine_cs *engine;
667 enum intel_engine_id id;
668 intel_wakeref_t wakeref;
669 int err = -ENOMEM;
670 int i;
671
672 /*
673 * Waiters are given a little priority nudge, but not enough
674 * to actually cause any preemption. Double check that we do
675 * not needlessly generate preempt-to-idle cycles.
676 */
677
678 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
679 return 0;
680
681 mutex_lock(&i915->drm.struct_mutex);
682 wakeref = intel_runtime_pm_get(i915);
683
684 if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
685 goto err_unlock;
686 if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
687 goto err_client_0;
688 if (preempt_client_init(i915, &client[2])) /* head of queue */
689 goto err_client_1;
690 if (preempt_client_init(i915, &client[3])) /* bystander */
691 goto err_client_2;
692
693 for_each_engine(engine, i915, id) {
694 int depth;
695
3123ada8
CW
696 if (!intel_engine_has_preemption(engine))
697 continue;
698
b5773a36
CW
699 if (!engine->emit_init_breadcrumb)
700 continue;
701
702 for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
703 struct i915_request *rq[ARRAY_SIZE(client)];
704 struct i915_request *dummy;
705
706 engine->execlists.preempt_hang.count = 0;
707
708 dummy = dummy_request(engine);
709 if (!dummy)
710 goto err_client_3;
711
712 for (i = 0; i < ARRAY_SIZE(client); i++) {
713 rq[i] = igt_spinner_create_request(&client[i].spin,
714 client[i].ctx, engine,
715 MI_NOOP);
716 if (IS_ERR(rq[i])) {
717 err = PTR_ERR(rq[i]);
718 goto err_wedged;
719 }
720
721 /* Disable NEWCLIENT promotion */
722 __i915_active_request_set(&rq[i]->timeline->last_request,
723 dummy);
724 i915_request_add(rq[i]);
725 }
726
727 dummy_request_free(dummy);
728
729 GEM_BUG_ON(i915_request_completed(rq[0]));
730 if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
731 pr_err("%s: First client failed to start\n",
732 engine->name);
733 goto err_wedged;
734 }
735 GEM_BUG_ON(!i915_request_started(rq[0]));
736
737 if (i915_request_wait(rq[depth],
738 I915_WAIT_LOCKED |
739 I915_WAIT_PRIORITY,
740 1) != -ETIME) {
741 pr_err("%s: Waiter depth:%d completed!\n",
742 engine->name, depth);
743 goto err_wedged;
744 }
745
746 for (i = 0; i < ARRAY_SIZE(client); i++)
747 igt_spinner_end(&client[i].spin);
748
749 if (igt_flush_test(i915, I915_WAIT_LOCKED))
750 goto err_wedged;
751
752 if (engine->execlists.preempt_hang.count) {
753 pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
754 engine->name,
755 engine->execlists.preempt_hang.count,
756 depth);
757 err = -EINVAL;
758 goto err_client_3;
759 }
760 }
761 }
762
763 err = 0;
764err_client_3:
765 preempt_client_fini(&client[3]);
766err_client_2:
767 preempt_client_fini(&client[2]);
768err_client_1:
769 preempt_client_fini(&client[1]);
770err_client_0:
771 preempt_client_fini(&client[0]);
772err_unlock:
773 if (igt_flush_test(i915, I915_WAIT_LOCKED))
774 err = -EIO;
775 intel_runtime_pm_put(i915, wakeref);
776 mutex_unlock(&i915->drm.struct_mutex);
777 return err;
778
779err_wedged:
780 for (i = 0; i < ARRAY_SIZE(client); i++)
781 igt_spinner_end(&client[i].spin);
782 i915_gem_set_wedged(i915);
783 err = -EIO;
784 goto err_client_3;
785}
786
a21f453c
CW
787static int live_chain_preempt(void *arg)
788{
789 struct drm_i915_private *i915 = arg;
790 struct intel_engine_cs *engine;
791 struct preempt_client hi, lo;
792 enum intel_engine_id id;
793 intel_wakeref_t wakeref;
794 int err = -ENOMEM;
795
796 /*
797 * Build a chain AB...BA between two contexts (A, B) and request
798 * preemption of the last request. It should then complete before
799 * the previously submitted spinner in B.
800 */
801
802 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
803 return 0;
804
805 mutex_lock(&i915->drm.struct_mutex);
806 wakeref = intel_runtime_pm_get(i915);
807
808 if (preempt_client_init(i915, &hi))
809 goto err_unlock;
810
811 if (preempt_client_init(i915, &lo))
812 goto err_client_hi;
813
814 for_each_engine(engine, i915, id) {
815 struct i915_sched_attr attr = {
816 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
817 };
e70d3d80 818 struct igt_live_test t;
d067994c
CW
819 struct i915_request *rq;
820 int ring_size, count, i;
a21f453c 821
3123ada8
CW
822 if (!intel_engine_has_preemption(engine))
823 continue;
824
d067994c
CW
825 rq = igt_spinner_create_request(&lo.spin,
826 lo.ctx, engine,
827 MI_ARB_CHECK);
828 if (IS_ERR(rq))
829 goto err_wedged;
830 i915_request_add(rq);
831
832 ring_size = rq->wa_tail - rq->head;
833 if (ring_size < 0)
834 ring_size += rq->ring->size;
835 ring_size = rq->ring->size / ring_size;
836 pr_debug("%s(%s): Using maximum of %d requests\n",
837 __func__, engine->name, ring_size);
a21f453c 838
d067994c
CW
839 igt_spinner_end(&lo.spin);
840 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
841 pr_err("Timed out waiting to flush %s\n", engine->name);
842 goto err_wedged;
843 }
844
e70d3d80
CW
845 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
846 err = -EIO;
847 goto err_wedged;
848 }
849
d067994c 850 for_each_prime_number_from(count, 1, ring_size) {
a21f453c
CW
851 rq = igt_spinner_create_request(&hi.spin,
852 hi.ctx, engine,
853 MI_ARB_CHECK);
854 if (IS_ERR(rq))
855 goto err_wedged;
856 i915_request_add(rq);
857 if (!igt_wait_for_spinner(&hi.spin, rq))
858 goto err_wedged;
859
860 rq = igt_spinner_create_request(&lo.spin,
861 lo.ctx, engine,
862 MI_ARB_CHECK);
863 if (IS_ERR(rq))
864 goto err_wedged;
865 i915_request_add(rq);
866
867 for (i = 0; i < count; i++) {
46472b3e 868 rq = igt_request_alloc(lo.ctx, engine);
a21f453c
CW
869 if (IS_ERR(rq))
870 goto err_wedged;
871 i915_request_add(rq);
872 }
873
46472b3e 874 rq = igt_request_alloc(hi.ctx, engine);
a21f453c
CW
875 if (IS_ERR(rq))
876 goto err_wedged;
877 i915_request_add(rq);
878 engine->schedule(rq, &attr);
879
880 igt_spinner_end(&hi.spin);
881 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
882 struct drm_printer p =
883 drm_info_printer(i915->drm.dev);
884
885 pr_err("Failed to preempt over chain of %d\n",
886 count);
887 intel_engine_dump(engine, &p,
888 "%s\n", engine->name);
889 goto err_wedged;
890 }
891 igt_spinner_end(&lo.spin);
d067994c 892
46472b3e 893 rq = igt_request_alloc(lo.ctx, engine);
d067994c
CW
894 if (IS_ERR(rq))
895 goto err_wedged;
896 i915_request_add(rq);
897 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
898 struct drm_printer p =
899 drm_info_printer(i915->drm.dev);
900
901 pr_err("Failed to flush low priority chain of %d requests\n",
902 count);
903 intel_engine_dump(engine, &p,
904 "%s\n", engine->name);
905 goto err_wedged;
906 }
a21f453c 907 }
e70d3d80
CW
908
909 if (igt_live_test_end(&t)) {
910 err = -EIO;
911 goto err_wedged;
912 }
a21f453c
CW
913 }
914
915 err = 0;
916err_client_lo:
917 preempt_client_fini(&lo);
918err_client_hi:
919 preempt_client_fini(&hi);
920err_unlock:
921 if (igt_flush_test(i915, I915_WAIT_LOCKED))
922 err = -EIO;
923 intel_runtime_pm_put(i915, wakeref);
924 mutex_unlock(&i915->drm.struct_mutex);
925 return err;
926
927err_wedged:
928 igt_spinner_end(&hi.spin);
929 igt_spinner_end(&lo.spin);
930 i915_gem_set_wedged(i915);
931 err = -EIO;
932 goto err_client_lo;
933}
934
0f6b79fa
CW
935static int live_preempt_hang(void *arg)
936{
937 struct drm_i915_private *i915 = arg;
938 struct i915_gem_context *ctx_hi, *ctx_lo;
8d2f6e2f 939 struct igt_spinner spin_hi, spin_lo;
0f6b79fa
CW
940 struct intel_engine_cs *engine;
941 enum intel_engine_id id;
c9d08cc3 942 intel_wakeref_t wakeref;
0f6b79fa
CW
943 int err = -ENOMEM;
944
945 if (!HAS_LOGICAL_RING_PREEMPTION(i915))
946 return 0;
947
948 if (!intel_has_reset_engine(i915))
949 return 0;
950
951 mutex_lock(&i915->drm.struct_mutex);
c9d08cc3 952 wakeref = intel_runtime_pm_get(i915);
0f6b79fa 953
8d2f6e2f 954 if (igt_spinner_init(&spin_hi, i915))
0f6b79fa
CW
955 goto err_unlock;
956
8d2f6e2f 957 if (igt_spinner_init(&spin_lo, i915))
0f6b79fa
CW
958 goto err_spin_hi;
959
960 ctx_hi = kernel_context(i915);
961 if (!ctx_hi)
962 goto err_spin_lo;
6e7eb7a8
CW
963 ctx_hi->sched.priority =
964 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
0f6b79fa
CW
965
966 ctx_lo = kernel_context(i915);
967 if (!ctx_lo)
968 goto err_ctx_hi;
6e7eb7a8
CW
969 ctx_lo->sched.priority =
970 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
0f6b79fa
CW
971
972 for_each_engine(engine, i915, id) {
973 struct i915_request *rq;
974
975 if (!intel_engine_has_preemption(engine))
976 continue;
977
8d2f6e2f
TU
978 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
979 MI_ARB_CHECK);
0f6b79fa
CW
980 if (IS_ERR(rq)) {
981 err = PTR_ERR(rq);
982 goto err_ctx_lo;
983 }
984
985 i915_request_add(rq);
8d2f6e2f 986 if (!igt_wait_for_spinner(&spin_lo, rq)) {
0f6b79fa
CW
987 GEM_TRACE("lo spinner failed to start\n");
988 GEM_TRACE_DUMP();
989 i915_gem_set_wedged(i915);
990 err = -EIO;
991 goto err_ctx_lo;
992 }
993
8d2f6e2f
TU
994 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
995 MI_ARB_CHECK);
0f6b79fa 996 if (IS_ERR(rq)) {
8d2f6e2f 997 igt_spinner_end(&spin_lo);
0f6b79fa
CW
998 err = PTR_ERR(rq);
999 goto err_ctx_lo;
1000 }
1001
1002 init_completion(&engine->execlists.preempt_hang.completion);
1003 engine->execlists.preempt_hang.inject_hang = true;
1004
1005 i915_request_add(rq);
1006
1007 if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
1008 HZ / 10)) {
1009 pr_err("Preemption did not occur within timeout!");
1010 GEM_TRACE_DUMP();
1011 i915_gem_set_wedged(i915);
1012 err = -EIO;
1013 goto err_ctx_lo;
1014 }
1015
1016 set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
1017 i915_reset_engine(engine, NULL);
1018 clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
1019
1020 engine->execlists.preempt_hang.inject_hang = false;
1021
8d2f6e2f 1022 if (!igt_wait_for_spinner(&spin_hi, rq)) {
0f6b79fa
CW
1023 GEM_TRACE("hi spinner failed to start\n");
1024 GEM_TRACE_DUMP();
1025 i915_gem_set_wedged(i915);
1026 err = -EIO;
1027 goto err_ctx_lo;
1028 }
1029
8d2f6e2f
TU
1030 igt_spinner_end(&spin_hi);
1031 igt_spinner_end(&spin_lo);
0f6b79fa
CW
1032 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
1033 err = -EIO;
1034 goto err_ctx_lo;
1035 }
1036 }
1037
1038 err = 0;
1039err_ctx_lo:
1040 kernel_context_close(ctx_lo);
1041err_ctx_hi:
1042 kernel_context_close(ctx_hi);
1043err_spin_lo:
8d2f6e2f 1044 igt_spinner_fini(&spin_lo);
0f6b79fa 1045err_spin_hi:
8d2f6e2f 1046 igt_spinner_fini(&spin_hi);
0f6b79fa
CW
1047err_unlock:
1048 igt_flush_test(i915, I915_WAIT_LOCKED);
c9d08cc3 1049 intel_runtime_pm_put(i915, wakeref);
0f6b79fa
CW
1050 mutex_unlock(&i915->drm.struct_mutex);
1051 return err;
1052}
1053
dee4a0f8
CW
1054static int random_range(struct rnd_state *rnd, int min, int max)
1055{
1056 return i915_prandom_u32_max_state(max - min, rnd) + min;
1057}
1058
1059static int random_priority(struct rnd_state *rnd)
1060{
1061 return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
1062}
1063
1064struct preempt_smoke {
1065 struct drm_i915_private *i915;
1066 struct i915_gem_context **contexts;
992d2098 1067 struct intel_engine_cs *engine;
567a6057 1068 struct drm_i915_gem_object *batch;
dee4a0f8
CW
1069 unsigned int ncontext;
1070 struct rnd_state prng;
992d2098 1071 unsigned long count;
dee4a0f8
CW
1072};
1073
1074static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
1075{
1076 return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
1077 &smoke->prng)];
1078}
1079
567a6057
CW
1080static int smoke_submit(struct preempt_smoke *smoke,
1081 struct i915_gem_context *ctx, int prio,
1082 struct drm_i915_gem_object *batch)
1083{
1084 struct i915_request *rq;
1085 struct i915_vma *vma = NULL;
1086 int err = 0;
1087
1088 if (batch) {
1089 vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
1090 if (IS_ERR(vma))
1091 return PTR_ERR(vma);
1092
1093 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1094 if (err)
1095 return err;
1096 }
1097
1098 ctx->sched.priority = prio;
1099
46472b3e 1100 rq = igt_request_alloc(ctx, smoke->engine);
567a6057
CW
1101 if (IS_ERR(rq)) {
1102 err = PTR_ERR(rq);
1103 goto unpin;
1104 }
1105
1106 if (vma) {
1107 err = rq->engine->emit_bb_start(rq,
1108 vma->node.start,
1109 PAGE_SIZE, 0);
1110 if (!err)
1111 err = i915_vma_move_to_active(vma, rq, 0);
1112 }
1113
1114 i915_request_add(rq);
1115
1116unpin:
1117 if (vma)
1118 i915_vma_unpin(vma);
1119
1120 return err;
1121}
1122
992d2098
CW
1123static int smoke_crescendo_thread(void *arg)
1124{
1125 struct preempt_smoke *smoke = arg;
1126 IGT_TIMEOUT(end_time);
1127 unsigned long count;
1128
1129 count = 0;
1130 do {
1131 struct i915_gem_context *ctx = smoke_context(smoke);
567a6057 1132 int err;
992d2098
CW
1133
1134 mutex_lock(&smoke->i915->drm.struct_mutex);
567a6057
CW
1135 err = smoke_submit(smoke,
1136 ctx, count % I915_PRIORITY_MAX,
1137 smoke->batch);
992d2098 1138 mutex_unlock(&smoke->i915->drm.struct_mutex);
567a6057
CW
1139 if (err)
1140 return err;
992d2098
CW
1141
1142 count++;
1143 } while (!__igt_timeout(end_time, NULL));
1144
1145 smoke->count = count;
1146 return 0;
1147}
1148
567a6057
CW
1149static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
1150#define BATCH BIT(0)
dee4a0f8 1151{
992d2098
CW
1152 struct task_struct *tsk[I915_NUM_ENGINES] = {};
1153 struct preempt_smoke arg[I915_NUM_ENGINES];
dee4a0f8
CW
1154 struct intel_engine_cs *engine;
1155 enum intel_engine_id id;
1156 unsigned long count;
992d2098
CW
1157 int err = 0;
1158
1159 mutex_unlock(&smoke->i915->drm.struct_mutex);
dee4a0f8 1160
dee4a0f8 1161 for_each_engine(engine, smoke->i915, id) {
992d2098
CW
1162 arg[id] = *smoke;
1163 arg[id].engine = engine;
567a6057
CW
1164 if (!(flags & BATCH))
1165 arg[id].batch = NULL;
992d2098
CW
1166 arg[id].count = 0;
1167
1168 tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
1169 "igt/smoke:%d", id);
1170 if (IS_ERR(tsk[id])) {
1171 err = PTR_ERR(tsk[id]);
1172 break;
1173 }
5ec244f4 1174 get_task_struct(tsk[id]);
992d2098 1175 }
dee4a0f8 1176
992d2098
CW
1177 count = 0;
1178 for_each_engine(engine, smoke->i915, id) {
1179 int status;
dee4a0f8 1180
992d2098
CW
1181 if (IS_ERR_OR_NULL(tsk[id]))
1182 continue;
dee4a0f8 1183
992d2098
CW
1184 status = kthread_stop(tsk[id]);
1185 if (status && !err)
1186 err = status;
dee4a0f8 1187
992d2098 1188 count += arg[id].count;
5ec244f4
CW
1189
1190 put_task_struct(tsk[id]);
dee4a0f8
CW
1191 }
1192
992d2098
CW
1193 mutex_lock(&smoke->i915->drm.struct_mutex);
1194
567a6057
CW
1195 pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
1196 count, flags,
8a68d464 1197 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
dee4a0f8
CW
1198 return 0;
1199}
1200
567a6057 1201static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
dee4a0f8 1202{
dee4a0f8
CW
1203 enum intel_engine_id id;
1204 IGT_TIMEOUT(end_time);
1205 unsigned long count;
1206
1207 count = 0;
1208 do {
567a6057 1209 for_each_engine(smoke->engine, smoke->i915, id) {
dee4a0f8 1210 struct i915_gem_context *ctx = smoke_context(smoke);
567a6057 1211 int err;
dee4a0f8 1212
567a6057
CW
1213 err = smoke_submit(smoke,
1214 ctx, random_priority(&smoke->prng),
1215 flags & BATCH ? smoke->batch : NULL);
1216 if (err)
1217 return err;
dee4a0f8 1218
dee4a0f8
CW
1219 count++;
1220 }
1221 } while (!__igt_timeout(end_time, NULL));
1222
567a6057
CW
1223 pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
1224 count, flags,
8a68d464 1225 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
dee4a0f8
CW
1226 return 0;
1227}
1228
1229static int live_preempt_smoke(void *arg)
1230{
1231 struct preempt_smoke smoke = {
1232 .i915 = arg,
1233 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
1234 .ncontext = 1024,
1235 };
567a6057 1236 const unsigned int phase[] = { 0, BATCH };
c9d08cc3 1237 intel_wakeref_t wakeref;
e70d3d80 1238 struct igt_live_test t;
dee4a0f8 1239 int err = -ENOMEM;
567a6057 1240 u32 *cs;
dee4a0f8
CW
1241 int n;
1242
1243 if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
1244 return 0;
1245
1246 smoke.contexts = kmalloc_array(smoke.ncontext,
1247 sizeof(*smoke.contexts),
1248 GFP_KERNEL);
1249 if (!smoke.contexts)
1250 return -ENOMEM;
1251
1252 mutex_lock(&smoke.i915->drm.struct_mutex);
c9d08cc3 1253 wakeref = intel_runtime_pm_get(smoke.i915);
dee4a0f8 1254
567a6057
CW
1255 smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
1256 if (IS_ERR(smoke.batch)) {
1257 err = PTR_ERR(smoke.batch);
1258 goto err_unlock;
1259 }
1260
1261 cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
1262 if (IS_ERR(cs)) {
1263 err = PTR_ERR(cs);
1264 goto err_batch;
1265 }
1266 for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
1267 cs[n] = MI_ARB_CHECK;
1268 cs[n] = MI_BATCH_BUFFER_END;
a679f58d 1269 i915_gem_object_flush_map(smoke.batch);
567a6057
CW
1270 i915_gem_object_unpin_map(smoke.batch);
1271
e70d3d80
CW
1272 if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
1273 err = -EIO;
1274 goto err_batch;
1275 }
1276
dee4a0f8
CW
1277 for (n = 0; n < smoke.ncontext; n++) {
1278 smoke.contexts[n] = kernel_context(smoke.i915);
1279 if (!smoke.contexts[n])
1280 goto err_ctx;
1281 }
1282
567a6057
CW
1283 for (n = 0; n < ARRAY_SIZE(phase); n++) {
1284 err = smoke_crescendo(&smoke, phase[n]);
1285 if (err)
1286 goto err_ctx;
dee4a0f8 1287
567a6057
CW
1288 err = smoke_random(&smoke, phase[n]);
1289 if (err)
1290 goto err_ctx;
1291 }
dee4a0f8
CW
1292
1293err_ctx:
e70d3d80 1294 if (igt_live_test_end(&t))
dee4a0f8
CW
1295 err = -EIO;
1296
1297 for (n = 0; n < smoke.ncontext; n++) {
1298 if (!smoke.contexts[n])
1299 break;
1300 kernel_context_close(smoke.contexts[n]);
1301 }
1302
567a6057
CW
1303err_batch:
1304 i915_gem_object_put(smoke.batch);
1305err_unlock:
c9d08cc3 1306 intel_runtime_pm_put(smoke.i915, wakeref);
dee4a0f8
CW
1307 mutex_unlock(&smoke.i915->drm.struct_mutex);
1308 kfree(smoke.contexts);
1309
1310 return err;
1311}
1312
6d06779e
CW
1313static int nop_virtual_engine(struct drm_i915_private *i915,
1314 struct intel_engine_cs **siblings,
1315 unsigned int nsibling,
1316 unsigned int nctx,
1317 unsigned int flags)
1318#define CHAIN BIT(0)
1319{
1320 IGT_TIMEOUT(end_time);
1321 struct i915_request *request[16];
1322 struct i915_gem_context *ctx[16];
1323 struct intel_context *ve[16];
1324 unsigned long n, prime, nc;
1325 struct igt_live_test t;
1326 ktime_t times[2] = {};
1327 int err;
1328
1329 GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
1330
1331 for (n = 0; n < nctx; n++) {
1332 ctx[n] = kernel_context(i915);
1333 if (!ctx[n]) {
1334 err = -ENOMEM;
1335 nctx = n;
1336 goto out;
1337 }
1338
1339 ve[n] = intel_execlists_create_virtual(ctx[n],
1340 siblings, nsibling);
1341 if (IS_ERR(ve[n])) {
1342 kernel_context_close(ctx[n]);
1343 err = PTR_ERR(ve[n]);
1344 nctx = n;
1345 goto out;
1346 }
1347
1348 err = intel_context_pin(ve[n]);
1349 if (err) {
1350 intel_context_put(ve[n]);
1351 kernel_context_close(ctx[n]);
1352 nctx = n;
1353 goto out;
1354 }
1355 }
1356
1357 err = igt_live_test_begin(&t, i915, __func__, ve[0]->engine->name);
1358 if (err)
1359 goto out;
1360
1361 for_each_prime_number_from(prime, 1, 8192) {
1362 times[1] = ktime_get_raw();
1363
1364 if (flags & CHAIN) {
1365 for (nc = 0; nc < nctx; nc++) {
1366 for (n = 0; n < prime; n++) {
1367 request[nc] =
1368 i915_request_create(ve[nc]);
1369 if (IS_ERR(request[nc])) {
1370 err = PTR_ERR(request[nc]);
1371 goto out;
1372 }
1373
1374 i915_request_add(request[nc]);
1375 }
1376 }
1377 } else {
1378 for (n = 0; n < prime; n++) {
1379 for (nc = 0; nc < nctx; nc++) {
1380 request[nc] =
1381 i915_request_create(ve[nc]);
1382 if (IS_ERR(request[nc])) {
1383 err = PTR_ERR(request[nc]);
1384 goto out;
1385 }
1386
1387 i915_request_add(request[nc]);
1388 }
1389 }
1390 }
1391
1392 for (nc = 0; nc < nctx; nc++) {
1393 if (i915_request_wait(request[nc],
1394 I915_WAIT_LOCKED,
1395 HZ / 10) < 0) {
1396 pr_err("%s(%s): wait for %llx:%lld timed out\n",
1397 __func__, ve[0]->engine->name,
1398 request[nc]->fence.context,
1399 request[nc]->fence.seqno);
1400
1401 GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
1402 __func__, ve[0]->engine->name,
1403 request[nc]->fence.context,
1404 request[nc]->fence.seqno);
1405 GEM_TRACE_DUMP();
1406 i915_gem_set_wedged(i915);
1407 break;
1408 }
1409 }
1410
1411 times[1] = ktime_sub(ktime_get_raw(), times[1]);
1412 if (prime == 1)
1413 times[0] = times[1];
1414
1415 if (__igt_timeout(end_time, NULL))
1416 break;
1417 }
1418
1419 err = igt_live_test_end(&t);
1420 if (err)
1421 goto out;
1422
1423 pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
1424 nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
1425 prime, div64_u64(ktime_to_ns(times[1]), prime));
1426
1427out:
1428 if (igt_flush_test(i915, I915_WAIT_LOCKED))
1429 err = -EIO;
1430
1431 for (nc = 0; nc < nctx; nc++) {
1432 intel_context_unpin(ve[nc]);
1433 intel_context_put(ve[nc]);
1434 kernel_context_close(ctx[nc]);
1435 }
1436 return err;
1437}
1438
1439static int live_virtual_engine(void *arg)
1440{
1441 struct drm_i915_private *i915 = arg;
1442 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
1443 struct intel_engine_cs *engine;
1444 enum intel_engine_id id;
1445 unsigned int class, inst;
1446 int err = -ENODEV;
1447
1448 if (USES_GUC_SUBMISSION(i915))
1449 return 0;
1450
1451 mutex_lock(&i915->drm.struct_mutex);
1452
1453 for_each_engine(engine, i915, id) {
1454 err = nop_virtual_engine(i915, &engine, 1, 1, 0);
1455 if (err) {
1456 pr_err("Failed to wrap engine %s: err=%d\n",
1457 engine->name, err);
1458 goto out_unlock;
1459 }
1460 }
1461
1462 for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
1463 int nsibling, n;
1464
1465 nsibling = 0;
1466 for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
1467 if (!i915->engine_class[class][inst])
1468 continue;
1469
1470 siblings[nsibling++] = i915->engine_class[class][inst];
1471 }
1472 if (nsibling < 2)
1473 continue;
1474
1475 for (n = 1; n <= nsibling + 1; n++) {
1476 err = nop_virtual_engine(i915, siblings, nsibling,
1477 n, 0);
1478 if (err)
1479 goto out_unlock;
1480 }
1481
1482 err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN);
1483 if (err)
1484 goto out_unlock;
1485 }
1486
1487out_unlock:
1488 mutex_unlock(&i915->drm.struct_mutex);
1489 return err;
1490}
1491
2c66555e
CW
1492int intel_execlists_live_selftests(struct drm_i915_private *i915)
1493{
1494 static const struct i915_subtest tests[] = {
1495 SUBTEST(live_sanitycheck),
bac24f59 1496 SUBTEST(live_busywait_preempt),
2c66555e
CW
1497 SUBTEST(live_preempt),
1498 SUBTEST(live_late_preempt),
c9a64622 1499 SUBTEST(live_suppress_self_preempt),
b5773a36 1500 SUBTEST(live_suppress_wait_preempt),
a21f453c 1501 SUBTEST(live_chain_preempt),
0f6b79fa 1502 SUBTEST(live_preempt_hang),
dee4a0f8 1503 SUBTEST(live_preempt_smoke),
6d06779e 1504 SUBTEST(live_virtual_engine),
2c66555e 1505 };
52cc8014
CW
1506
1507 if (!HAS_EXECLISTS(i915))
1508 return 0;
1509
c41166f9 1510 if (i915_terminally_wedged(i915))
03bbc508
CW
1511 return 0;
1512
2c66555e
CW
1513 return i915_subtests(tests, i915);
1514}