2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/prime_numbers.h>
27 #include "../i915_selftest.h"
28 #include "igt_live_test.h"
30 #include "mock_context.h"
31 #include "mock_gem_device.h"
33 static int igt_add_request(void *arg)
35 struct drm_i915_private *i915 = arg;
36 struct i915_request *request;
39 /* Basic preliminary test to create a request and let it loose! */
41 mutex_lock(&i915->drm.struct_mutex);
42 request = mock_request(i915->engine[RCS],
48 i915_request_add(request);
52 mutex_unlock(&i915->drm.struct_mutex);
56 static int igt_wait_request(void *arg)
58 const long T = HZ / 4;
59 struct drm_i915_private *i915 = arg;
60 struct i915_request *request;
63 /* Submit a request, then wait upon it */
65 mutex_lock(&i915->drm.struct_mutex);
66 request = mock_request(i915->engine[RCS], i915->kernel_context, T);
72 if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
73 pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
77 if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) {
78 pr_err("request wait succeeded (expected timeout before submit!)\n");
82 if (i915_request_completed(request)) {
83 pr_err("request completed before submit!!\n");
87 i915_request_add(request);
89 if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
90 pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
94 if (i915_request_completed(request)) {
95 pr_err("request completed immediately!\n");
99 if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
100 pr_err("request wait succeeded (expected timeout!)\n");
104 if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
105 pr_err("request wait timed out!\n");
109 if (!i915_request_completed(request)) {
110 pr_err("request not complete after waiting!\n");
114 if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
115 pr_err("request wait timed out when already complete!\n");
121 mock_device_flush(i915);
122 mutex_unlock(&i915->drm.struct_mutex);
126 static int igt_fence_wait(void *arg)
128 const long T = HZ / 4;
129 struct drm_i915_private *i915 = arg;
130 struct i915_request *request;
133 /* Submit a request, treat it as a fence and wait upon it */
135 mutex_lock(&i915->drm.struct_mutex);
136 request = mock_request(i915->engine[RCS], i915->kernel_context, T);
141 mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
143 if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
144 pr_err("fence wait success before submit (expected timeout)!\n");
148 mutex_lock(&i915->drm.struct_mutex);
149 i915_request_add(request);
150 mutex_unlock(&i915->drm.struct_mutex);
152 if (dma_fence_is_signaled(&request->fence)) {
153 pr_err("fence signaled immediately!\n");
157 if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
158 pr_err("fence wait success after submit (expected timeout)!\n");
162 if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
163 pr_err("fence wait timed out (expected success)!\n");
167 if (!dma_fence_is_signaled(&request->fence)) {
168 pr_err("fence unsignaled after waiting!\n");
172 if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
173 pr_err("fence wait timed out when complete (expected success)!\n");
179 mutex_lock(&i915->drm.struct_mutex);
181 mock_device_flush(i915);
182 mutex_unlock(&i915->drm.struct_mutex);
186 static int igt_request_rewind(void *arg)
188 struct drm_i915_private *i915 = arg;
189 struct i915_request *request, *vip;
190 struct i915_gem_context *ctx[2];
193 mutex_lock(&i915->drm.struct_mutex);
194 ctx[0] = mock_context(i915, "A");
195 request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
201 i915_request_get(request);
202 i915_request_add(request);
204 ctx[1] = mock_context(i915, "B");
205 vip = mock_request(i915->engine[RCS], ctx[1], 0);
211 /* Simulate preemption by manual reordering */
212 if (!mock_cancel_request(request)) {
213 pr_err("failed to cancel request (already executed)!\n");
214 i915_request_add(vip);
217 i915_request_get(vip);
218 i915_request_add(vip);
220 request->engine->submit_request(request);
223 mutex_unlock(&i915->drm.struct_mutex);
225 if (i915_request_wait(vip, 0, HZ) == -ETIME) {
226 pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
227 vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
231 if (i915_request_completed(request)) {
232 pr_err("low priority request already completed\n");
238 i915_request_put(vip);
239 mutex_lock(&i915->drm.struct_mutex);
241 mock_context_close(ctx[1]);
242 i915_request_put(request);
244 mock_context_close(ctx[0]);
245 mock_device_flush(i915);
246 mutex_unlock(&i915->drm.struct_mutex);
250 int i915_request_mock_selftests(void)
252 static const struct i915_subtest tests[] = {
253 SUBTEST(igt_add_request),
254 SUBTEST(igt_wait_request),
255 SUBTEST(igt_fence_wait),
256 SUBTEST(igt_request_rewind),
258 struct drm_i915_private *i915;
259 intel_wakeref_t wakeref;
262 i915 = mock_gem_device();
266 with_intel_runtime_pm(i915, wakeref)
267 err = i915_subtests(tests, i915);
269 drm_dev_put(&i915->drm);
274 static int live_nop_request(void *arg)
276 struct drm_i915_private *i915 = arg;
277 struct intel_engine_cs *engine;
278 intel_wakeref_t wakeref;
279 struct igt_live_test t;
283 /* Submit various sized batches of empty requests, to each engine
284 * (individually), and wait for the batch to complete. We can check
285 * the overhead of submitting requests to the hardware.
288 mutex_lock(&i915->drm.struct_mutex);
289 wakeref = intel_runtime_pm_get(i915);
291 for_each_engine(engine, i915, id) {
292 struct i915_request *request = NULL;
293 unsigned long n, prime;
294 IGT_TIMEOUT(end_time);
295 ktime_t times[2] = {};
297 err = igt_live_test_begin(&t, i915, __func__, engine->name);
301 for_each_prime_number_from(prime, 1, 8192) {
302 times[1] = ktime_get_raw();
304 for (n = 0; n < prime; n++) {
305 request = i915_request_alloc(engine,
306 i915->kernel_context);
307 if (IS_ERR(request)) {
308 err = PTR_ERR(request);
312 /* This space is left intentionally blank.
314 * We do not actually want to perform any
315 * action with this request, we just want
316 * to measure the latency in allocation
317 * and submission of our breadcrumbs -
318 * ensuring that the bare request is sufficient
319 * for the system to work (i.e. proper HEAD
320 * tracking of the rings, interrupt handling,
321 * etc). It also gives us the lowest bounds
325 i915_request_add(request);
327 i915_request_wait(request,
329 MAX_SCHEDULE_TIMEOUT);
331 times[1] = ktime_sub(ktime_get_raw(), times[1]);
335 if (__igt_timeout(end_time, NULL))
339 err = igt_live_test_end(&t);
343 pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
345 ktime_to_ns(times[0]),
346 prime, div64_u64(ktime_to_ns(times[1]), prime));
350 intel_runtime_pm_put(i915, wakeref);
351 mutex_unlock(&i915->drm.struct_mutex);
355 static struct i915_vma *empty_batch(struct drm_i915_private *i915)
357 struct drm_i915_gem_object *obj;
358 struct i915_vma *vma;
362 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
364 return ERR_CAST(obj);
366 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
372 *cmd = MI_BATCH_BUFFER_END;
373 i915_gem_chipset_flush(i915);
375 i915_gem_object_unpin_map(obj);
377 err = i915_gem_object_set_to_gtt_domain(obj, false);
381 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
387 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
394 i915_gem_object_put(obj);
398 static struct i915_request *
399 empty_request(struct intel_engine_cs *engine,
400 struct i915_vma *batch)
402 struct i915_request *request;
405 request = i915_request_alloc(engine, engine->i915->kernel_context);
409 err = engine->emit_bb_start(request,
412 I915_DISPATCH_SECURE);
417 i915_request_add(request);
418 return err ? ERR_PTR(err) : request;
421 static int live_empty_request(void *arg)
423 struct drm_i915_private *i915 = arg;
424 struct intel_engine_cs *engine;
425 intel_wakeref_t wakeref;
426 struct igt_live_test t;
427 struct i915_vma *batch;
431 /* Submit various sized batches of empty requests, to each engine
432 * (individually), and wait for the batch to complete. We can check
433 * the overhead of submitting requests to the hardware.
436 mutex_lock(&i915->drm.struct_mutex);
437 wakeref = intel_runtime_pm_get(i915);
439 batch = empty_batch(i915);
441 err = PTR_ERR(batch);
445 for_each_engine(engine, i915, id) {
446 IGT_TIMEOUT(end_time);
447 struct i915_request *request;
448 unsigned long n, prime;
449 ktime_t times[2] = {};
451 err = igt_live_test_begin(&t, i915, __func__, engine->name);
455 /* Warmup / preload */
456 request = empty_request(engine, batch);
457 if (IS_ERR(request)) {
458 err = PTR_ERR(request);
461 i915_request_wait(request,
463 MAX_SCHEDULE_TIMEOUT);
465 for_each_prime_number_from(prime, 1, 8192) {
466 times[1] = ktime_get_raw();
468 for (n = 0; n < prime; n++) {
469 request = empty_request(engine, batch);
470 if (IS_ERR(request)) {
471 err = PTR_ERR(request);
475 i915_request_wait(request,
477 MAX_SCHEDULE_TIMEOUT);
479 times[1] = ktime_sub(ktime_get_raw(), times[1]);
483 if (__igt_timeout(end_time, NULL))
487 err = igt_live_test_end(&t);
491 pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
493 ktime_to_ns(times[0]),
494 prime, div64_u64(ktime_to_ns(times[1]), prime));
498 i915_vma_unpin(batch);
501 intel_runtime_pm_put(i915, wakeref);
502 mutex_unlock(&i915->drm.struct_mutex);
506 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
508 struct i915_gem_context *ctx = i915->kernel_context;
509 struct i915_address_space *vm =
510 ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
511 struct drm_i915_gem_object *obj;
512 const int gen = INTEL_GEN(i915);
513 struct i915_vma *vma;
517 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
519 return ERR_CAST(obj);
521 vma = i915_vma_instance(obj, vm, NULL);
527 err = i915_vma_pin(vma, 0, 0, PIN_USER);
531 err = i915_gem_object_set_to_wc_domain(obj, true);
535 cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
542 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
543 *cmd++ = lower_32_bits(vma->node.start);
544 *cmd++ = upper_32_bits(vma->node.start);
545 } else if (gen >= 6) {
546 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
547 *cmd++ = lower_32_bits(vma->node.start);
549 *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
550 *cmd++ = lower_32_bits(vma->node.start);
552 *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
553 i915_gem_chipset_flush(i915);
555 i915_gem_object_unpin_map(obj);
560 i915_gem_object_put(obj);
564 static int recursive_batch_resolve(struct i915_vma *batch)
568 cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
572 *cmd = MI_BATCH_BUFFER_END;
573 i915_gem_chipset_flush(batch->vm->i915);
575 i915_gem_object_unpin_map(batch->obj);
580 static int live_all_engines(void *arg)
582 struct drm_i915_private *i915 = arg;
583 struct intel_engine_cs *engine;
584 struct i915_request *request[I915_NUM_ENGINES];
585 intel_wakeref_t wakeref;
586 struct igt_live_test t;
587 struct i915_vma *batch;
591 /* Check we can submit requests to all engines simultaneously. We
592 * send a recursive batch to each engine - checking that we don't
593 * block doing so, and that they don't complete too soon.
596 mutex_lock(&i915->drm.struct_mutex);
597 wakeref = intel_runtime_pm_get(i915);
599 err = igt_live_test_begin(&t, i915, __func__, "");
603 batch = recursive_batch(i915);
605 err = PTR_ERR(batch);
606 pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
610 for_each_engine(engine, i915, id) {
611 request[id] = i915_request_alloc(engine, i915->kernel_context);
612 if (IS_ERR(request[id])) {
613 err = PTR_ERR(request[id]);
614 pr_err("%s: Request allocation failed with err=%d\n",
619 err = engine->emit_bb_start(request[id],
624 request[id]->batch = batch;
626 if (!i915_gem_object_has_active_reference(batch->obj)) {
627 i915_gem_object_get(batch->obj);
628 i915_gem_object_set_active_reference(batch->obj);
631 err = i915_vma_move_to_active(batch, request[id], 0);
634 i915_request_get(request[id]);
635 i915_request_add(request[id]);
638 for_each_engine(engine, i915, id) {
639 if (i915_request_completed(request[id])) {
640 pr_err("%s(%s): request completed too early!\n",
641 __func__, engine->name);
647 err = recursive_batch_resolve(batch);
649 pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
653 for_each_engine(engine, i915, id) {
656 timeout = i915_request_wait(request[id],
658 MAX_SCHEDULE_TIMEOUT);
661 pr_err("%s: error waiting for request on %s, err=%d\n",
662 __func__, engine->name, err);
666 GEM_BUG_ON(!i915_request_completed(request[id]));
667 i915_request_put(request[id]);
671 err = igt_live_test_end(&t);
674 for_each_engine(engine, i915, id)
676 i915_request_put(request[id]);
677 i915_vma_unpin(batch);
680 intel_runtime_pm_put(i915, wakeref);
681 mutex_unlock(&i915->drm.struct_mutex);
685 static int live_sequential_engines(void *arg)
687 struct drm_i915_private *i915 = arg;
688 struct i915_request *request[I915_NUM_ENGINES] = {};
689 struct i915_request *prev = NULL;
690 struct intel_engine_cs *engine;
691 intel_wakeref_t wakeref;
692 struct igt_live_test t;
696 /* Check we can submit requests to all engines sequentially, such
697 * that each successive request waits for the earlier ones. This
698 * tests that we don't execute requests out of order, even though
699 * they are running on independent engines.
702 mutex_lock(&i915->drm.struct_mutex);
703 wakeref = intel_runtime_pm_get(i915);
705 err = igt_live_test_begin(&t, i915, __func__, "");
709 for_each_engine(engine, i915, id) {
710 struct i915_vma *batch;
712 batch = recursive_batch(i915);
714 err = PTR_ERR(batch);
715 pr_err("%s: Unable to create batch for %s, err=%d\n",
716 __func__, engine->name, err);
720 request[id] = i915_request_alloc(engine, i915->kernel_context);
721 if (IS_ERR(request[id])) {
722 err = PTR_ERR(request[id]);
723 pr_err("%s: Request allocation failed for %s with err=%d\n",
724 __func__, engine->name, err);
729 err = i915_request_await_dma_fence(request[id],
732 i915_request_add(request[id]);
733 pr_err("%s: Request await failed for %s with err=%d\n",
734 __func__, engine->name, err);
739 err = engine->emit_bb_start(request[id],
744 request[id]->batch = batch;
746 err = i915_vma_move_to_active(batch, request[id], 0);
749 i915_gem_object_set_active_reference(batch->obj);
752 i915_request_get(request[id]);
753 i915_request_add(request[id]);
758 for_each_engine(engine, i915, id) {
761 if (i915_request_completed(request[id])) {
762 pr_err("%s(%s): request completed too early!\n",
763 __func__, engine->name);
768 err = recursive_batch_resolve(request[id]->batch);
770 pr_err("%s: failed to resolve batch, err=%d\n",
775 timeout = i915_request_wait(request[id],
777 MAX_SCHEDULE_TIMEOUT);
780 pr_err("%s: error waiting for request on %s, err=%d\n",
781 __func__, engine->name, err);
785 GEM_BUG_ON(!i915_request_completed(request[id]));
788 err = igt_live_test_end(&t);
791 for_each_engine(engine, i915, id) {
797 cmd = i915_gem_object_pin_map(request[id]->batch->obj,
800 *cmd = MI_BATCH_BUFFER_END;
801 i915_gem_chipset_flush(i915);
803 i915_gem_object_unpin_map(request[id]->batch->obj);
806 i915_vma_put(request[id]->batch);
807 i915_request_put(request[id]);
810 intel_runtime_pm_put(i915, wakeref);
811 mutex_unlock(&i915->drm.struct_mutex);
815 int i915_request_live_selftests(struct drm_i915_private *i915)
817 static const struct i915_subtest tests[] = {
818 SUBTEST(live_nop_request),
819 SUBTEST(live_all_engines),
820 SUBTEST(live_sequential_engines),
821 SUBTEST(live_empty_request),
824 if (i915_terminally_wedged(&i915->gpu_error))
827 return i915_subtests(tests, i915);