4d4b86b5fa1135dc663547aeab2522b2c95f908e
[linux-2.6-block.git] / drivers / gpu / drm / i915 / selftests / i915_request.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/prime_numbers.h>
26
27 #include "../i915_selftest.h"
28 #include "igt_live_test.h"
29
30 #include "mock_context.h"
31 #include "mock_gem_device.h"
32
33 static int igt_add_request(void *arg)
34 {
35         struct drm_i915_private *i915 = arg;
36         struct i915_request *request;
37         int err = -ENOMEM;
38
39         /* Basic preliminary test to create a request and let it loose! */
40
41         mutex_lock(&i915->drm.struct_mutex);
42         request = mock_request(i915->engine[RCS],
43                                i915->kernel_context,
44                                HZ / 10);
45         if (!request)
46                 goto out_unlock;
47
48         i915_request_add(request);
49
50         err = 0;
51 out_unlock:
52         mutex_unlock(&i915->drm.struct_mutex);
53         return err;
54 }
55
56 static int igt_wait_request(void *arg)
57 {
58         const long T = HZ / 4;
59         struct drm_i915_private *i915 = arg;
60         struct i915_request *request;
61         int err = -EINVAL;
62
63         /* Submit a request, then wait upon it */
64
65         mutex_lock(&i915->drm.struct_mutex);
66         request = mock_request(i915->engine[RCS], i915->kernel_context, T);
67         if (!request) {
68                 err = -ENOMEM;
69                 goto out_unlock;
70         }
71
72         if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
73                 pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
74                 goto out_unlock;
75         }
76
77         if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) {
78                 pr_err("request wait succeeded (expected timeout before submit!)\n");
79                 goto out_unlock;
80         }
81
82         if (i915_request_completed(request)) {
83                 pr_err("request completed before submit!!\n");
84                 goto out_unlock;
85         }
86
87         i915_request_add(request);
88
89         if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
90                 pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
91                 goto out_unlock;
92         }
93
94         if (i915_request_completed(request)) {
95                 pr_err("request completed immediately!\n");
96                 goto out_unlock;
97         }
98
99         if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
100                 pr_err("request wait succeeded (expected timeout!)\n");
101                 goto out_unlock;
102         }
103
104         if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
105                 pr_err("request wait timed out!\n");
106                 goto out_unlock;
107         }
108
109         if (!i915_request_completed(request)) {
110                 pr_err("request not complete after waiting!\n");
111                 goto out_unlock;
112         }
113
114         if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
115                 pr_err("request wait timed out when already complete!\n");
116                 goto out_unlock;
117         }
118
119         err = 0;
120 out_unlock:
121         mock_device_flush(i915);
122         mutex_unlock(&i915->drm.struct_mutex);
123         return err;
124 }
125
126 static int igt_fence_wait(void *arg)
127 {
128         const long T = HZ / 4;
129         struct drm_i915_private *i915 = arg;
130         struct i915_request *request;
131         int err = -EINVAL;
132
133         /* Submit a request, treat it as a fence and wait upon it */
134
135         mutex_lock(&i915->drm.struct_mutex);
136         request = mock_request(i915->engine[RCS], i915->kernel_context, T);
137         if (!request) {
138                 err = -ENOMEM;
139                 goto out_locked;
140         }
141         mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
142
143         if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
144                 pr_err("fence wait success before submit (expected timeout)!\n");
145                 goto out_device;
146         }
147
148         mutex_lock(&i915->drm.struct_mutex);
149         i915_request_add(request);
150         mutex_unlock(&i915->drm.struct_mutex);
151
152         if (dma_fence_is_signaled(&request->fence)) {
153                 pr_err("fence signaled immediately!\n");
154                 goto out_device;
155         }
156
157         if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
158                 pr_err("fence wait success after submit (expected timeout)!\n");
159                 goto out_device;
160         }
161
162         if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
163                 pr_err("fence wait timed out (expected success)!\n");
164                 goto out_device;
165         }
166
167         if (!dma_fence_is_signaled(&request->fence)) {
168                 pr_err("fence unsignaled after waiting!\n");
169                 goto out_device;
170         }
171
172         if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
173                 pr_err("fence wait timed out when complete (expected success)!\n");
174                 goto out_device;
175         }
176
177         err = 0;
178 out_device:
179         mutex_lock(&i915->drm.struct_mutex);
180 out_locked:
181         mock_device_flush(i915);
182         mutex_unlock(&i915->drm.struct_mutex);
183         return err;
184 }
185
186 static int igt_request_rewind(void *arg)
187 {
188         struct drm_i915_private *i915 = arg;
189         struct i915_request *request, *vip;
190         struct i915_gem_context *ctx[2];
191         int err = -EINVAL;
192
193         mutex_lock(&i915->drm.struct_mutex);
194         ctx[0] = mock_context(i915, "A");
195         request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
196         if (!request) {
197                 err = -ENOMEM;
198                 goto err_context_0;
199         }
200
201         i915_request_get(request);
202         i915_request_add(request);
203
204         ctx[1] = mock_context(i915, "B");
205         vip = mock_request(i915->engine[RCS], ctx[1], 0);
206         if (!vip) {
207                 err = -ENOMEM;
208                 goto err_context_1;
209         }
210
211         /* Simulate preemption by manual reordering */
212         if (!mock_cancel_request(request)) {
213                 pr_err("failed to cancel request (already executed)!\n");
214                 i915_request_add(vip);
215                 goto err_context_1;
216         }
217         i915_request_get(vip);
218         i915_request_add(vip);
219         rcu_read_lock();
220         request->engine->submit_request(request);
221         rcu_read_unlock();
222
223         mutex_unlock(&i915->drm.struct_mutex);
224
225         if (i915_request_wait(vip, 0, HZ) == -ETIME) {
226                 pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
227                        vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
228                 goto err;
229         }
230
231         if (i915_request_completed(request)) {
232                 pr_err("low priority request already completed\n");
233                 goto err;
234         }
235
236         err = 0;
237 err:
238         i915_request_put(vip);
239         mutex_lock(&i915->drm.struct_mutex);
240 err_context_1:
241         mock_context_close(ctx[1]);
242         i915_request_put(request);
243 err_context_0:
244         mock_context_close(ctx[0]);
245         mock_device_flush(i915);
246         mutex_unlock(&i915->drm.struct_mutex);
247         return err;
248 }
249
250 int i915_request_mock_selftests(void)
251 {
252         static const struct i915_subtest tests[] = {
253                 SUBTEST(igt_add_request),
254                 SUBTEST(igt_wait_request),
255                 SUBTEST(igt_fence_wait),
256                 SUBTEST(igt_request_rewind),
257         };
258         struct drm_i915_private *i915;
259         intel_wakeref_t wakeref;
260         int err = 0;
261
262         i915 = mock_gem_device();
263         if (!i915)
264                 return -ENOMEM;
265
266         with_intel_runtime_pm(i915, wakeref)
267                 err = i915_subtests(tests, i915);
268
269         drm_dev_put(&i915->drm);
270
271         return err;
272 }
273
274 static int live_nop_request(void *arg)
275 {
276         struct drm_i915_private *i915 = arg;
277         struct intel_engine_cs *engine;
278         intel_wakeref_t wakeref;
279         struct igt_live_test t;
280         unsigned int id;
281         int err = -ENODEV;
282
283         /* Submit various sized batches of empty requests, to each engine
284          * (individually), and wait for the batch to complete. We can check
285          * the overhead of submitting requests to the hardware.
286          */
287
288         mutex_lock(&i915->drm.struct_mutex);
289         wakeref = intel_runtime_pm_get(i915);
290
291         for_each_engine(engine, i915, id) {
292                 struct i915_request *request = NULL;
293                 unsigned long n, prime;
294                 IGT_TIMEOUT(end_time);
295                 ktime_t times[2] = {};
296
297                 err = igt_live_test_begin(&t, i915, __func__, engine->name);
298                 if (err)
299                         goto out_unlock;
300
301                 for_each_prime_number_from(prime, 1, 8192) {
302                         times[1] = ktime_get_raw();
303
304                         for (n = 0; n < prime; n++) {
305                                 request = i915_request_alloc(engine,
306                                                              i915->kernel_context);
307                                 if (IS_ERR(request)) {
308                                         err = PTR_ERR(request);
309                                         goto out_unlock;
310                                 }
311
312                                 /* This space is left intentionally blank.
313                                  *
314                                  * We do not actually want to perform any
315                                  * action with this request, we just want
316                                  * to measure the latency in allocation
317                                  * and submission of our breadcrumbs -
318                                  * ensuring that the bare request is sufficient
319                                  * for the system to work (i.e. proper HEAD
320                                  * tracking of the rings, interrupt handling,
321                                  * etc). It also gives us the lowest bounds
322                                  * for latency.
323                                  */
324
325                                 i915_request_add(request);
326                         }
327                         i915_request_wait(request,
328                                           I915_WAIT_LOCKED,
329                                           MAX_SCHEDULE_TIMEOUT);
330
331                         times[1] = ktime_sub(ktime_get_raw(), times[1]);
332                         if (prime == 1)
333                                 times[0] = times[1];
334
335                         if (__igt_timeout(end_time, NULL))
336                                 break;
337                 }
338
339                 err = igt_live_test_end(&t);
340                 if (err)
341                         goto out_unlock;
342
343                 pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
344                         engine->name,
345                         ktime_to_ns(times[0]),
346                         prime, div64_u64(ktime_to_ns(times[1]), prime));
347         }
348
349 out_unlock:
350         intel_runtime_pm_put(i915, wakeref);
351         mutex_unlock(&i915->drm.struct_mutex);
352         return err;
353 }
354
355 static struct i915_vma *empty_batch(struct drm_i915_private *i915)
356 {
357         struct drm_i915_gem_object *obj;
358         struct i915_vma *vma;
359         u32 *cmd;
360         int err;
361
362         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
363         if (IS_ERR(obj))
364                 return ERR_CAST(obj);
365
366         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
367         if (IS_ERR(cmd)) {
368                 err = PTR_ERR(cmd);
369                 goto err;
370         }
371
372         *cmd = MI_BATCH_BUFFER_END;
373         i915_gem_chipset_flush(i915);
374
375         i915_gem_object_unpin_map(obj);
376
377         err = i915_gem_object_set_to_gtt_domain(obj, false);
378         if (err)
379                 goto err;
380
381         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
382         if (IS_ERR(vma)) {
383                 err = PTR_ERR(vma);
384                 goto err;
385         }
386
387         err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
388         if (err)
389                 goto err;
390
391         return vma;
392
393 err:
394         i915_gem_object_put(obj);
395         return ERR_PTR(err);
396 }
397
398 static struct i915_request *
399 empty_request(struct intel_engine_cs *engine,
400               struct i915_vma *batch)
401 {
402         struct i915_request *request;
403         int err;
404
405         request = i915_request_alloc(engine, engine->i915->kernel_context);
406         if (IS_ERR(request))
407                 return request;
408
409         err = engine->emit_bb_start(request,
410                                     batch->node.start,
411                                     batch->node.size,
412                                     I915_DISPATCH_SECURE);
413         if (err)
414                 goto out_request;
415
416 out_request:
417         i915_request_add(request);
418         return err ? ERR_PTR(err) : request;
419 }
420
421 static int live_empty_request(void *arg)
422 {
423         struct drm_i915_private *i915 = arg;
424         struct intel_engine_cs *engine;
425         intel_wakeref_t wakeref;
426         struct igt_live_test t;
427         struct i915_vma *batch;
428         unsigned int id;
429         int err = 0;
430
431         /* Submit various sized batches of empty requests, to each engine
432          * (individually), and wait for the batch to complete. We can check
433          * the overhead of submitting requests to the hardware.
434          */
435
436         mutex_lock(&i915->drm.struct_mutex);
437         wakeref = intel_runtime_pm_get(i915);
438
439         batch = empty_batch(i915);
440         if (IS_ERR(batch)) {
441                 err = PTR_ERR(batch);
442                 goto out_unlock;
443         }
444
445         for_each_engine(engine, i915, id) {
446                 IGT_TIMEOUT(end_time);
447                 struct i915_request *request;
448                 unsigned long n, prime;
449                 ktime_t times[2] = {};
450
451                 err = igt_live_test_begin(&t, i915, __func__, engine->name);
452                 if (err)
453                         goto out_batch;
454
455                 /* Warmup / preload */
456                 request = empty_request(engine, batch);
457                 if (IS_ERR(request)) {
458                         err = PTR_ERR(request);
459                         goto out_batch;
460                 }
461                 i915_request_wait(request,
462                                   I915_WAIT_LOCKED,
463                                   MAX_SCHEDULE_TIMEOUT);
464
465                 for_each_prime_number_from(prime, 1, 8192) {
466                         times[1] = ktime_get_raw();
467
468                         for (n = 0; n < prime; n++) {
469                                 request = empty_request(engine, batch);
470                                 if (IS_ERR(request)) {
471                                         err = PTR_ERR(request);
472                                         goto out_batch;
473                                 }
474                         }
475                         i915_request_wait(request,
476                                           I915_WAIT_LOCKED,
477                                           MAX_SCHEDULE_TIMEOUT);
478
479                         times[1] = ktime_sub(ktime_get_raw(), times[1]);
480                         if (prime == 1)
481                                 times[0] = times[1];
482
483                         if (__igt_timeout(end_time, NULL))
484                                 break;
485                 }
486
487                 err = igt_live_test_end(&t);
488                 if (err)
489                         goto out_batch;
490
491                 pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
492                         engine->name,
493                         ktime_to_ns(times[0]),
494                         prime, div64_u64(ktime_to_ns(times[1]), prime));
495         }
496
497 out_batch:
498         i915_vma_unpin(batch);
499         i915_vma_put(batch);
500 out_unlock:
501         intel_runtime_pm_put(i915, wakeref);
502         mutex_unlock(&i915->drm.struct_mutex);
503         return err;
504 }
505
506 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
507 {
508         struct i915_gem_context *ctx = i915->kernel_context;
509         struct i915_address_space *vm =
510                 ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
511         struct drm_i915_gem_object *obj;
512         const int gen = INTEL_GEN(i915);
513         struct i915_vma *vma;
514         u32 *cmd;
515         int err;
516
517         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
518         if (IS_ERR(obj))
519                 return ERR_CAST(obj);
520
521         vma = i915_vma_instance(obj, vm, NULL);
522         if (IS_ERR(vma)) {
523                 err = PTR_ERR(vma);
524                 goto err;
525         }
526
527         err = i915_vma_pin(vma, 0, 0, PIN_USER);
528         if (err)
529                 goto err;
530
531         err = i915_gem_object_set_to_wc_domain(obj, true);
532         if (err)
533                 goto err;
534
535         cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
536         if (IS_ERR(cmd)) {
537                 err = PTR_ERR(cmd);
538                 goto err;
539         }
540
541         if (gen >= 8) {
542                 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
543                 *cmd++ = lower_32_bits(vma->node.start);
544                 *cmd++ = upper_32_bits(vma->node.start);
545         } else if (gen >= 6) {
546                 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
547                 *cmd++ = lower_32_bits(vma->node.start);
548         } else {
549                 *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
550                 *cmd++ = lower_32_bits(vma->node.start);
551         }
552         *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
553         i915_gem_chipset_flush(i915);
554
555         i915_gem_object_unpin_map(obj);
556
557         return vma;
558
559 err:
560         i915_gem_object_put(obj);
561         return ERR_PTR(err);
562 }
563
564 static int recursive_batch_resolve(struct i915_vma *batch)
565 {
566         u32 *cmd;
567
568         cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
569         if (IS_ERR(cmd))
570                 return PTR_ERR(cmd);
571
572         *cmd = MI_BATCH_BUFFER_END;
573         i915_gem_chipset_flush(batch->vm->i915);
574
575         i915_gem_object_unpin_map(batch->obj);
576
577         return 0;
578 }
579
580 static int live_all_engines(void *arg)
581 {
582         struct drm_i915_private *i915 = arg;
583         struct intel_engine_cs *engine;
584         struct i915_request *request[I915_NUM_ENGINES];
585         intel_wakeref_t wakeref;
586         struct igt_live_test t;
587         struct i915_vma *batch;
588         unsigned int id;
589         int err;
590
591         /* Check we can submit requests to all engines simultaneously. We
592          * send a recursive batch to each engine - checking that we don't
593          * block doing so, and that they don't complete too soon.
594          */
595
596         mutex_lock(&i915->drm.struct_mutex);
597         wakeref = intel_runtime_pm_get(i915);
598
599         err = igt_live_test_begin(&t, i915, __func__, "");
600         if (err)
601                 goto out_unlock;
602
603         batch = recursive_batch(i915);
604         if (IS_ERR(batch)) {
605                 err = PTR_ERR(batch);
606                 pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
607                 goto out_unlock;
608         }
609
610         for_each_engine(engine, i915, id) {
611                 request[id] = i915_request_alloc(engine, i915->kernel_context);
612                 if (IS_ERR(request[id])) {
613                         err = PTR_ERR(request[id]);
614                         pr_err("%s: Request allocation failed with err=%d\n",
615                                __func__, err);
616                         goto out_request;
617                 }
618
619                 err = engine->emit_bb_start(request[id],
620                                             batch->node.start,
621                                             batch->node.size,
622                                             0);
623                 GEM_BUG_ON(err);
624                 request[id]->batch = batch;
625
626                 if (!i915_gem_object_has_active_reference(batch->obj)) {
627                         i915_gem_object_get(batch->obj);
628                         i915_gem_object_set_active_reference(batch->obj);
629                 }
630
631                 err = i915_vma_move_to_active(batch, request[id], 0);
632                 GEM_BUG_ON(err);
633
634                 i915_request_get(request[id]);
635                 i915_request_add(request[id]);
636         }
637
638         for_each_engine(engine, i915, id) {
639                 if (i915_request_completed(request[id])) {
640                         pr_err("%s(%s): request completed too early!\n",
641                                __func__, engine->name);
642                         err = -EINVAL;
643                         goto out_request;
644                 }
645         }
646
647         err = recursive_batch_resolve(batch);
648         if (err) {
649                 pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
650                 goto out_request;
651         }
652
653         for_each_engine(engine, i915, id) {
654                 long timeout;
655
656                 timeout = i915_request_wait(request[id],
657                                             I915_WAIT_LOCKED,
658                                             MAX_SCHEDULE_TIMEOUT);
659                 if (timeout < 0) {
660                         err = timeout;
661                         pr_err("%s: error waiting for request on %s, err=%d\n",
662                                __func__, engine->name, err);
663                         goto out_request;
664                 }
665
666                 GEM_BUG_ON(!i915_request_completed(request[id]));
667                 i915_request_put(request[id]);
668                 request[id] = NULL;
669         }
670
671         err = igt_live_test_end(&t);
672
673 out_request:
674         for_each_engine(engine, i915, id)
675                 if (request[id])
676                         i915_request_put(request[id]);
677         i915_vma_unpin(batch);
678         i915_vma_put(batch);
679 out_unlock:
680         intel_runtime_pm_put(i915, wakeref);
681         mutex_unlock(&i915->drm.struct_mutex);
682         return err;
683 }
684
685 static int live_sequential_engines(void *arg)
686 {
687         struct drm_i915_private *i915 = arg;
688         struct i915_request *request[I915_NUM_ENGINES] = {};
689         struct i915_request *prev = NULL;
690         struct intel_engine_cs *engine;
691         intel_wakeref_t wakeref;
692         struct igt_live_test t;
693         unsigned int id;
694         int err;
695
696         /* Check we can submit requests to all engines sequentially, such
697          * that each successive request waits for the earlier ones. This
698          * tests that we don't execute requests out of order, even though
699          * they are running on independent engines.
700          */
701
702         mutex_lock(&i915->drm.struct_mutex);
703         wakeref = intel_runtime_pm_get(i915);
704
705         err = igt_live_test_begin(&t, i915, __func__, "");
706         if (err)
707                 goto out_unlock;
708
709         for_each_engine(engine, i915, id) {
710                 struct i915_vma *batch;
711
712                 batch = recursive_batch(i915);
713                 if (IS_ERR(batch)) {
714                         err = PTR_ERR(batch);
715                         pr_err("%s: Unable to create batch for %s, err=%d\n",
716                                __func__, engine->name, err);
717                         goto out_unlock;
718                 }
719
720                 request[id] = i915_request_alloc(engine, i915->kernel_context);
721                 if (IS_ERR(request[id])) {
722                         err = PTR_ERR(request[id]);
723                         pr_err("%s: Request allocation failed for %s with err=%d\n",
724                                __func__, engine->name, err);
725                         goto out_request;
726                 }
727
728                 if (prev) {
729                         err = i915_request_await_dma_fence(request[id],
730                                                            &prev->fence);
731                         if (err) {
732                                 i915_request_add(request[id]);
733                                 pr_err("%s: Request await failed for %s with err=%d\n",
734                                        __func__, engine->name, err);
735                                 goto out_request;
736                         }
737                 }
738
739                 err = engine->emit_bb_start(request[id],
740                                             batch->node.start,
741                                             batch->node.size,
742                                             0);
743                 GEM_BUG_ON(err);
744                 request[id]->batch = batch;
745
746                 err = i915_vma_move_to_active(batch, request[id], 0);
747                 GEM_BUG_ON(err);
748
749                 i915_gem_object_set_active_reference(batch->obj);
750                 i915_vma_get(batch);
751
752                 i915_request_get(request[id]);
753                 i915_request_add(request[id]);
754
755                 prev = request[id];
756         }
757
758         for_each_engine(engine, i915, id) {
759                 long timeout;
760
761                 if (i915_request_completed(request[id])) {
762                         pr_err("%s(%s): request completed too early!\n",
763                                __func__, engine->name);
764                         err = -EINVAL;
765                         goto out_request;
766                 }
767
768                 err = recursive_batch_resolve(request[id]->batch);
769                 if (err) {
770                         pr_err("%s: failed to resolve batch, err=%d\n",
771                                __func__, err);
772                         goto out_request;
773                 }
774
775                 timeout = i915_request_wait(request[id],
776                                             I915_WAIT_LOCKED,
777                                             MAX_SCHEDULE_TIMEOUT);
778                 if (timeout < 0) {
779                         err = timeout;
780                         pr_err("%s: error waiting for request on %s, err=%d\n",
781                                __func__, engine->name, err);
782                         goto out_request;
783                 }
784
785                 GEM_BUG_ON(!i915_request_completed(request[id]));
786         }
787
788         err = igt_live_test_end(&t);
789
790 out_request:
791         for_each_engine(engine, i915, id) {
792                 u32 *cmd;
793
794                 if (!request[id])
795                         break;
796
797                 cmd = i915_gem_object_pin_map(request[id]->batch->obj,
798                                               I915_MAP_WC);
799                 if (!IS_ERR(cmd)) {
800                         *cmd = MI_BATCH_BUFFER_END;
801                         i915_gem_chipset_flush(i915);
802
803                         i915_gem_object_unpin_map(request[id]->batch->obj);
804                 }
805
806                 i915_vma_put(request[id]->batch);
807                 i915_request_put(request[id]);
808         }
809 out_unlock:
810         intel_runtime_pm_put(i915, wakeref);
811         mutex_unlock(&i915->drm.struct_mutex);
812         return err;
813 }
814
815 int i915_request_live_selftests(struct drm_i915_private *i915)
816 {
817         static const struct i915_subtest tests[] = {
818                 SUBTEST(live_nop_request),
819                 SUBTEST(live_all_engines),
820                 SUBTEST(live_sequential_engines),
821                 SUBTEST(live_empty_request),
822         };
823
824         if (i915_terminally_wedged(&i915->gpu_error))
825                 return 0;
826
827         return i915_subtests(tests, i915);
828 }