drm/i915/selftests: Setup engine->retire for mock_engine
[linux-2.6-block.git] / drivers / gpu / drm / i915 / selftests / i915_request.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/prime_numbers.h>
26
27 #include "gem/i915_gem_pm.h"
28 #include "gem/selftests/mock_context.h"
29
30 #include "gt/intel_engine_pm.h"
31 #include "gt/intel_gt.h"
32
33 #include "i915_random.h"
34 #include "i915_selftest.h"
35 #include "igt_live_test.h"
36 #include "igt_spinner.h"
37 #include "lib_sw_fence.h"
38
39 #include "mock_drm.h"
40 #include "mock_gem_device.h"
41
42 static unsigned int num_uabi_engines(struct drm_i915_private *i915)
43 {
44         struct intel_engine_cs *engine;
45         unsigned int count;
46
47         count = 0;
48         for_each_uabi_engine(engine, i915)
49                 count++;
50
51         return count;
52 }
53
54 static int igt_add_request(void *arg)
55 {
56         struct drm_i915_private *i915 = arg;
57         struct i915_request *request;
58
59         /* Basic preliminary test to create a request and let it loose! */
60
61         request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10);
62         if (!request)
63                 return -ENOMEM;
64
65         i915_request_add(request);
66
67         return 0;
68 }
69
70 static int igt_wait_request(void *arg)
71 {
72         const long T = HZ / 4;
73         struct drm_i915_private *i915 = arg;
74         struct i915_request *request;
75         int err = -EINVAL;
76
77         /* Submit a request, then wait upon it */
78
79         request = mock_request(i915->engine[RCS0]->kernel_context, T);
80         if (!request)
81                 return -ENOMEM;
82
83         i915_request_get(request);
84
85         if (i915_request_wait(request, 0, 0) != -ETIME) {
86                 pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
87                 goto out_request;
88         }
89
90         if (i915_request_wait(request, 0, T) != -ETIME) {
91                 pr_err("request wait succeeded (expected timeout before submit!)\n");
92                 goto out_request;
93         }
94
95         if (i915_request_completed(request)) {
96                 pr_err("request completed before submit!!\n");
97                 goto out_request;
98         }
99
100         i915_request_add(request);
101
102         if (i915_request_wait(request, 0, 0) != -ETIME) {
103                 pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
104                 goto out_request;
105         }
106
107         if (i915_request_completed(request)) {
108                 pr_err("request completed immediately!\n");
109                 goto out_request;
110         }
111
112         if (i915_request_wait(request, 0, T / 2) != -ETIME) {
113                 pr_err("request wait succeeded (expected timeout!)\n");
114                 goto out_request;
115         }
116
117         if (i915_request_wait(request, 0, T) == -ETIME) {
118                 pr_err("request wait timed out!\n");
119                 goto out_request;
120         }
121
122         if (!i915_request_completed(request)) {
123                 pr_err("request not complete after waiting!\n");
124                 goto out_request;
125         }
126
127         if (i915_request_wait(request, 0, T) == -ETIME) {
128                 pr_err("request wait timed out when already complete!\n");
129                 goto out_request;
130         }
131
132         err = 0;
133 out_request:
134         i915_request_put(request);
135         mock_device_flush(i915);
136         return err;
137 }
138
139 static int igt_fence_wait(void *arg)
140 {
141         const long T = HZ / 4;
142         struct drm_i915_private *i915 = arg;
143         struct i915_request *request;
144         int err = -EINVAL;
145
146         /* Submit a request, treat it as a fence and wait upon it */
147
148         request = mock_request(i915->engine[RCS0]->kernel_context, T);
149         if (!request)
150                 return -ENOMEM;
151
152         if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
153                 pr_err("fence wait success before submit (expected timeout)!\n");
154                 goto out;
155         }
156
157         i915_request_add(request);
158
159         if (dma_fence_is_signaled(&request->fence)) {
160                 pr_err("fence signaled immediately!\n");
161                 goto out;
162         }
163
164         if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
165                 pr_err("fence wait success after submit (expected timeout)!\n");
166                 goto out;
167         }
168
169         if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
170                 pr_err("fence wait timed out (expected success)!\n");
171                 goto out;
172         }
173
174         if (!dma_fence_is_signaled(&request->fence)) {
175                 pr_err("fence unsignaled after waiting!\n");
176                 goto out;
177         }
178
179         if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
180                 pr_err("fence wait timed out when complete (expected success)!\n");
181                 goto out;
182         }
183
184         err = 0;
185 out:
186         mock_device_flush(i915);
187         return err;
188 }
189
190 static int igt_request_rewind(void *arg)
191 {
192         struct drm_i915_private *i915 = arg;
193         struct i915_request *request, *vip;
194         struct i915_gem_context *ctx[2];
195         struct intel_context *ce;
196         int err = -EINVAL;
197
198         ctx[0] = mock_context(i915, "A");
199
200         ce = i915_gem_context_get_engine(ctx[0], RCS0);
201         GEM_BUG_ON(IS_ERR(ce));
202         request = mock_request(ce, 2 * HZ);
203         intel_context_put(ce);
204         if (!request) {
205                 err = -ENOMEM;
206                 goto err_context_0;
207         }
208
209         i915_request_get(request);
210         i915_request_add(request);
211
212         ctx[1] = mock_context(i915, "B");
213
214         ce = i915_gem_context_get_engine(ctx[1], RCS0);
215         GEM_BUG_ON(IS_ERR(ce));
216         vip = mock_request(ce, 0);
217         intel_context_put(ce);
218         if (!vip) {
219                 err = -ENOMEM;
220                 goto err_context_1;
221         }
222
223         /* Simulate preemption by manual reordering */
224         if (!mock_cancel_request(request)) {
225                 pr_err("failed to cancel request (already executed)!\n");
226                 i915_request_add(vip);
227                 goto err_context_1;
228         }
229         i915_request_get(vip);
230         i915_request_add(vip);
231         rcu_read_lock();
232         request->engine->submit_request(request);
233         rcu_read_unlock();
234
235
236         if (i915_request_wait(vip, 0, HZ) == -ETIME) {
237                 pr_err("timed out waiting for high priority request\n");
238                 goto err;
239         }
240
241         if (i915_request_completed(request)) {
242                 pr_err("low priority request already completed\n");
243                 goto err;
244         }
245
246         err = 0;
247 err:
248         i915_request_put(vip);
249 err_context_1:
250         mock_context_close(ctx[1]);
251         i915_request_put(request);
252 err_context_0:
253         mock_context_close(ctx[0]);
254         mock_device_flush(i915);
255         return err;
256 }
257
258 struct smoketest {
259         struct intel_engine_cs *engine;
260         struct i915_gem_context **contexts;
261         atomic_long_t num_waits, num_fences;
262         int ncontexts, max_batch;
263         struct i915_request *(*request_alloc)(struct intel_context *ce);
264 };
265
266 static struct i915_request *
267 __mock_request_alloc(struct intel_context *ce)
268 {
269         return mock_request(ce, 0);
270 }
271
272 static struct i915_request *
273 __live_request_alloc(struct intel_context *ce)
274 {
275         return intel_context_create_request(ce);
276 }
277
278 static int __igt_breadcrumbs_smoketest(void *arg)
279 {
280         struct smoketest *t = arg;
281         const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
282         const unsigned int total = 4 * t->ncontexts + 1;
283         unsigned int num_waits = 0, num_fences = 0;
284         struct i915_request **requests;
285         I915_RND_STATE(prng);
286         unsigned int *order;
287         int err = 0;
288
289         /*
290          * A very simple test to catch the most egregious of list handling bugs.
291          *
292          * At its heart, we simply create oodles of requests running across
293          * multiple kthreads and enable signaling on them, for the sole purpose
294          * of stressing our breadcrumb handling. The only inspection we do is
295          * that the fences were marked as signaled.
296          */
297
298         requests = kcalloc(total, sizeof(*requests), GFP_KERNEL);
299         if (!requests)
300                 return -ENOMEM;
301
302         order = i915_random_order(total, &prng);
303         if (!order) {
304                 err = -ENOMEM;
305                 goto out_requests;
306         }
307
308         while (!kthread_should_stop()) {
309                 struct i915_sw_fence *submit, *wait;
310                 unsigned int n, count;
311
312                 submit = heap_fence_create(GFP_KERNEL);
313                 if (!submit) {
314                         err = -ENOMEM;
315                         break;
316                 }
317
318                 wait = heap_fence_create(GFP_KERNEL);
319                 if (!wait) {
320                         i915_sw_fence_commit(submit);
321                         heap_fence_put(submit);
322                         err = ENOMEM;
323                         break;
324                 }
325
326                 i915_random_reorder(order, total, &prng);
327                 count = 1 + i915_prandom_u32_max_state(max_batch, &prng);
328
329                 for (n = 0; n < count; n++) {
330                         struct i915_gem_context *ctx =
331                                 t->contexts[order[n] % t->ncontexts];
332                         struct i915_request *rq;
333                         struct intel_context *ce;
334
335                         ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx);
336                         GEM_BUG_ON(IS_ERR(ce));
337                         rq = t->request_alloc(ce);
338                         intel_context_put(ce);
339                         if (IS_ERR(rq)) {
340                                 err = PTR_ERR(rq);
341                                 count = n;
342                                 break;
343                         }
344
345                         err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
346                                                                submit,
347                                                                GFP_KERNEL);
348
349                         requests[n] = i915_request_get(rq);
350                         i915_request_add(rq);
351
352                         if (err >= 0)
353                                 err = i915_sw_fence_await_dma_fence(wait,
354                                                                     &rq->fence,
355                                                                     0,
356                                                                     GFP_KERNEL);
357
358                         if (err < 0) {
359                                 i915_request_put(rq);
360                                 count = n;
361                                 break;
362                         }
363                 }
364
365                 i915_sw_fence_commit(submit);
366                 i915_sw_fence_commit(wait);
367
368                 if (!wait_event_timeout(wait->wait,
369                                         i915_sw_fence_done(wait),
370                                         5 * HZ)) {
371                         struct i915_request *rq = requests[count - 1];
372
373                         pr_err("waiting for %d/%d fences (last %llx:%lld) on %s timed out!\n",
374                                atomic_read(&wait->pending), count,
375                                rq->fence.context, rq->fence.seqno,
376                                t->engine->name);
377                         GEM_TRACE_DUMP();
378
379                         intel_gt_set_wedged(t->engine->gt);
380                         GEM_BUG_ON(!i915_request_completed(rq));
381                         i915_sw_fence_wait(wait);
382                         err = -EIO;
383                 }
384
385                 for (n = 0; n < count; n++) {
386                         struct i915_request *rq = requests[n];
387
388                         if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
389                                       &rq->fence.flags)) {
390                                 pr_err("%llu:%llu was not signaled!\n",
391                                        rq->fence.context, rq->fence.seqno);
392                                 err = -EINVAL;
393                         }
394
395                         i915_request_put(rq);
396                 }
397
398                 heap_fence_put(wait);
399                 heap_fence_put(submit);
400
401                 if (err < 0)
402                         break;
403
404                 num_fences += count;
405                 num_waits++;
406
407                 cond_resched();
408         }
409
410         atomic_long_add(num_fences, &t->num_fences);
411         atomic_long_add(num_waits, &t->num_waits);
412
413         kfree(order);
414 out_requests:
415         kfree(requests);
416         return err;
417 }
418
419 static int mock_breadcrumbs_smoketest(void *arg)
420 {
421         struct drm_i915_private *i915 = arg;
422         struct smoketest t = {
423                 .engine = i915->engine[RCS0],
424                 .ncontexts = 1024,
425                 .max_batch = 1024,
426                 .request_alloc = __mock_request_alloc
427         };
428         unsigned int ncpus = num_online_cpus();
429         struct task_struct **threads;
430         unsigned int n;
431         int ret = 0;
432
433         /*
434          * Smoketest our breadcrumb/signal handling for requests across multiple
435          * threads. A very simple test to only catch the most egregious of bugs.
436          * See __igt_breadcrumbs_smoketest();
437          */
438
439         threads = kcalloc(ncpus, sizeof(*threads), GFP_KERNEL);
440         if (!threads)
441                 return -ENOMEM;
442
443         t.contexts = kcalloc(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL);
444         if (!t.contexts) {
445                 ret = -ENOMEM;
446                 goto out_threads;
447         }
448
449         for (n = 0; n < t.ncontexts; n++) {
450                 t.contexts[n] = mock_context(t.engine->i915, "mock");
451                 if (!t.contexts[n]) {
452                         ret = -ENOMEM;
453                         goto out_contexts;
454                 }
455         }
456
457         for (n = 0; n < ncpus; n++) {
458                 threads[n] = kthread_run(__igt_breadcrumbs_smoketest,
459                                          &t, "igt/%d", n);
460                 if (IS_ERR(threads[n])) {
461                         ret = PTR_ERR(threads[n]);
462                         ncpus = n;
463                         break;
464                 }
465
466                 get_task_struct(threads[n]);
467         }
468
469         yield(); /* start all threads before we begin */
470         msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
471
472         for (n = 0; n < ncpus; n++) {
473                 int err;
474
475                 err = kthread_stop(threads[n]);
476                 if (err < 0 && !ret)
477                         ret = err;
478
479                 put_task_struct(threads[n]);
480         }
481         pr_info("Completed %lu waits for %lu fence across %d cpus\n",
482                 atomic_long_read(&t.num_waits),
483                 atomic_long_read(&t.num_fences),
484                 ncpus);
485
486 out_contexts:
487         for (n = 0; n < t.ncontexts; n++) {
488                 if (!t.contexts[n])
489                         break;
490                 mock_context_close(t.contexts[n]);
491         }
492         kfree(t.contexts);
493 out_threads:
494         kfree(threads);
495         return ret;
496 }
497
498 int i915_request_mock_selftests(void)
499 {
500         static const struct i915_subtest tests[] = {
501                 SUBTEST(igt_add_request),
502                 SUBTEST(igt_wait_request),
503                 SUBTEST(igt_fence_wait),
504                 SUBTEST(igt_request_rewind),
505                 SUBTEST(mock_breadcrumbs_smoketest),
506         };
507         struct drm_i915_private *i915;
508         intel_wakeref_t wakeref;
509         int err = 0;
510
511         i915 = mock_gem_device();
512         if (!i915)
513                 return -ENOMEM;
514
515         with_intel_runtime_pm(&i915->runtime_pm, wakeref)
516                 err = i915_subtests(tests, i915);
517
518         drm_dev_put(&i915->drm);
519
520         return err;
521 }
522
523 static int live_nop_request(void *arg)
524 {
525         struct drm_i915_private *i915 = arg;
526         struct intel_engine_cs *engine;
527         struct igt_live_test t;
528         int err = -ENODEV;
529
530         /*
531          * Submit various sized batches of empty requests, to each engine
532          * (individually), and wait for the batch to complete. We can check
533          * the overhead of submitting requests to the hardware.
534          */
535
536         for_each_uabi_engine(engine, i915) {
537                 unsigned long n, prime;
538                 IGT_TIMEOUT(end_time);
539                 ktime_t times[2] = {};
540
541                 err = igt_live_test_begin(&t, i915, __func__, engine->name);
542                 if (err)
543                         return err;
544
545                 intel_engine_pm_get(engine);
546                 for_each_prime_number_from(prime, 1, 8192) {
547                         struct i915_request *request = NULL;
548
549                         times[1] = ktime_get_raw();
550
551                         for (n = 0; n < prime; n++) {
552                                 i915_request_put(request);
553                                 request = i915_request_create(engine->kernel_context);
554                                 if (IS_ERR(request))
555                                         return PTR_ERR(request);
556
557                                 /*
558                                  * This space is left intentionally blank.
559                                  *
560                                  * We do not actually want to perform any
561                                  * action with this request, we just want
562                                  * to measure the latency in allocation
563                                  * and submission of our breadcrumbs -
564                                  * ensuring that the bare request is sufficient
565                                  * for the system to work (i.e. proper HEAD
566                                  * tracking of the rings, interrupt handling,
567                                  * etc). It also gives us the lowest bounds
568                                  * for latency.
569                                  */
570
571                                 i915_request_get(request);
572                                 i915_request_add(request);
573                         }
574                         i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
575                         i915_request_put(request);
576
577                         times[1] = ktime_sub(ktime_get_raw(), times[1]);
578                         if (prime == 1)
579                                 times[0] = times[1];
580
581                         if (__igt_timeout(end_time, NULL))
582                                 break;
583                 }
584                 intel_engine_pm_put(engine);
585
586                 err = igt_live_test_end(&t);
587                 if (err)
588                         return err;
589
590                 pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
591                         engine->name,
592                         ktime_to_ns(times[0]),
593                         prime, div64_u64(ktime_to_ns(times[1]), prime));
594         }
595
596         return err;
597 }
598
599 static struct i915_vma *empty_batch(struct drm_i915_private *i915)
600 {
601         struct drm_i915_gem_object *obj;
602         struct i915_vma *vma;
603         u32 *cmd;
604         int err;
605
606         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
607         if (IS_ERR(obj))
608                 return ERR_CAST(obj);
609
610         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
611         if (IS_ERR(cmd)) {
612                 err = PTR_ERR(cmd);
613                 goto err;
614         }
615
616         *cmd = MI_BATCH_BUFFER_END;
617
618         __i915_gem_object_flush_map(obj, 0, 64);
619         i915_gem_object_unpin_map(obj);
620
621         intel_gt_chipset_flush(&i915->gt);
622
623         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
624         if (IS_ERR(vma)) {
625                 err = PTR_ERR(vma);
626                 goto err;
627         }
628
629         err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
630         if (err)
631                 goto err;
632
633         /* Force the wait wait now to avoid including it in the benchmark */
634         err = i915_vma_sync(vma);
635         if (err)
636                 goto err_pin;
637
638         return vma;
639
640 err_pin:
641         i915_vma_unpin(vma);
642 err:
643         i915_gem_object_put(obj);
644         return ERR_PTR(err);
645 }
646
647 static struct i915_request *
648 empty_request(struct intel_engine_cs *engine,
649               struct i915_vma *batch)
650 {
651         struct i915_request *request;
652         int err;
653
654         request = i915_request_create(engine->kernel_context);
655         if (IS_ERR(request))
656                 return request;
657
658         err = engine->emit_bb_start(request,
659                                     batch->node.start,
660                                     batch->node.size,
661                                     I915_DISPATCH_SECURE);
662         if (err)
663                 goto out_request;
664
665         i915_request_get(request);
666 out_request:
667         i915_request_add(request);
668         return err ? ERR_PTR(err) : request;
669 }
670
671 static int live_empty_request(void *arg)
672 {
673         struct drm_i915_private *i915 = arg;
674         struct intel_engine_cs *engine;
675         struct igt_live_test t;
676         struct i915_vma *batch;
677         int err = 0;
678
679         /*
680          * Submit various sized batches of empty requests, to each engine
681          * (individually), and wait for the batch to complete. We can check
682          * the overhead of submitting requests to the hardware.
683          */
684
685         batch = empty_batch(i915);
686         if (IS_ERR(batch))
687                 return PTR_ERR(batch);
688
689         for_each_uabi_engine(engine, i915) {
690                 IGT_TIMEOUT(end_time);
691                 struct i915_request *request;
692                 unsigned long n, prime;
693                 ktime_t times[2] = {};
694
695                 err = igt_live_test_begin(&t, i915, __func__, engine->name);
696                 if (err)
697                         goto out_batch;
698
699                 intel_engine_pm_get(engine);
700
701                 /* Warmup / preload */
702                 request = empty_request(engine, batch);
703                 if (IS_ERR(request)) {
704                         err = PTR_ERR(request);
705                         intel_engine_pm_put(engine);
706                         goto out_batch;
707                 }
708                 i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
709
710                 for_each_prime_number_from(prime, 1, 8192) {
711                         times[1] = ktime_get_raw();
712
713                         for (n = 0; n < prime; n++) {
714                                 i915_request_put(request);
715                                 request = empty_request(engine, batch);
716                                 if (IS_ERR(request)) {
717                                         err = PTR_ERR(request);
718                                         intel_engine_pm_put(engine);
719                                         goto out_batch;
720                                 }
721                         }
722                         i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
723
724                         times[1] = ktime_sub(ktime_get_raw(), times[1]);
725                         if (prime == 1)
726                                 times[0] = times[1];
727
728                         if (__igt_timeout(end_time, NULL))
729                                 break;
730                 }
731                 i915_request_put(request);
732                 intel_engine_pm_put(engine);
733
734                 err = igt_live_test_end(&t);
735                 if (err)
736                         goto out_batch;
737
738                 pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
739                         engine->name,
740                         ktime_to_ns(times[0]),
741                         prime, div64_u64(ktime_to_ns(times[1]), prime));
742         }
743
744 out_batch:
745         i915_vma_unpin(batch);
746         i915_vma_put(batch);
747         return err;
748 }
749
750 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
751 {
752         struct i915_gem_context *ctx = i915->kernel_context;
753         struct drm_i915_gem_object *obj;
754         const int gen = INTEL_GEN(i915);
755         struct i915_address_space *vm;
756         struct i915_vma *vma;
757         u32 *cmd;
758         int err;
759
760         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
761         if (IS_ERR(obj))
762                 return ERR_CAST(obj);
763
764         vm = i915_gem_context_get_vm_rcu(ctx);
765         vma = i915_vma_instance(obj, vm, NULL);
766         i915_vm_put(vm);
767         if (IS_ERR(vma)) {
768                 err = PTR_ERR(vma);
769                 goto err;
770         }
771
772         err = i915_vma_pin(vma, 0, 0, PIN_USER);
773         if (err)
774                 goto err;
775
776         cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
777         if (IS_ERR(cmd)) {
778                 err = PTR_ERR(cmd);
779                 goto err;
780         }
781
782         if (gen >= 8) {
783                 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
784                 *cmd++ = lower_32_bits(vma->node.start);
785                 *cmd++ = upper_32_bits(vma->node.start);
786         } else if (gen >= 6) {
787                 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
788                 *cmd++ = lower_32_bits(vma->node.start);
789         } else {
790                 *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
791                 *cmd++ = lower_32_bits(vma->node.start);
792         }
793         *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
794
795         __i915_gem_object_flush_map(obj, 0, 64);
796         i915_gem_object_unpin_map(obj);
797
798         intel_gt_chipset_flush(&i915->gt);
799
800         return vma;
801
802 err:
803         i915_gem_object_put(obj);
804         return ERR_PTR(err);
805 }
806
807 static int recursive_batch_resolve(struct i915_vma *batch)
808 {
809         u32 *cmd;
810
811         cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
812         if (IS_ERR(cmd))
813                 return PTR_ERR(cmd);
814
815         *cmd = MI_BATCH_BUFFER_END;
816         intel_gt_chipset_flush(batch->vm->gt);
817
818         i915_gem_object_unpin_map(batch->obj);
819
820         return 0;
821 }
822
823 static int live_all_engines(void *arg)
824 {
825         struct drm_i915_private *i915 = arg;
826         const unsigned int nengines = num_uabi_engines(i915);
827         struct intel_engine_cs *engine;
828         struct i915_request **request;
829         struct igt_live_test t;
830         struct i915_vma *batch;
831         unsigned int idx;
832         int err;
833
834         /*
835          * Check we can submit requests to all engines simultaneously. We
836          * send a recursive batch to each engine - checking that we don't
837          * block doing so, and that they don't complete too soon.
838          */
839
840         request = kcalloc(nengines, sizeof(*request), GFP_KERNEL);
841         if (!request)
842                 return -ENOMEM;
843
844         err = igt_live_test_begin(&t, i915, __func__, "");
845         if (err)
846                 goto out_free;
847
848         batch = recursive_batch(i915);
849         if (IS_ERR(batch)) {
850                 err = PTR_ERR(batch);
851                 pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
852                 goto out_free;
853         }
854
855         idx = 0;
856         for_each_uabi_engine(engine, i915) {
857                 request[idx] = intel_engine_create_kernel_request(engine);
858                 if (IS_ERR(request[idx])) {
859                         err = PTR_ERR(request[idx]);
860                         pr_err("%s: Request allocation failed with err=%d\n",
861                                __func__, err);
862                         goto out_request;
863                 }
864
865                 err = engine->emit_bb_start(request[idx],
866                                             batch->node.start,
867                                             batch->node.size,
868                                             0);
869                 GEM_BUG_ON(err);
870                 request[idx]->batch = batch;
871
872                 i915_vma_lock(batch);
873                 err = i915_request_await_object(request[idx], batch->obj, 0);
874                 if (err == 0)
875                         err = i915_vma_move_to_active(batch, request[idx], 0);
876                 i915_vma_unlock(batch);
877                 GEM_BUG_ON(err);
878
879                 i915_request_get(request[idx]);
880                 i915_request_add(request[idx]);
881                 idx++;
882         }
883
884         idx = 0;
885         for_each_uabi_engine(engine, i915) {
886                 if (i915_request_completed(request[idx])) {
887                         pr_err("%s(%s): request completed too early!\n",
888                                __func__, engine->name);
889                         err = -EINVAL;
890                         goto out_request;
891                 }
892                 idx++;
893         }
894
895         err = recursive_batch_resolve(batch);
896         if (err) {
897                 pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
898                 goto out_request;
899         }
900
901         idx = 0;
902         for_each_uabi_engine(engine, i915) {
903                 long timeout;
904
905                 timeout = i915_request_wait(request[idx], 0,
906                                             MAX_SCHEDULE_TIMEOUT);
907                 if (timeout < 0) {
908                         err = timeout;
909                         pr_err("%s: error waiting for request on %s, err=%d\n",
910                                __func__, engine->name, err);
911                         goto out_request;
912                 }
913
914                 GEM_BUG_ON(!i915_request_completed(request[idx]));
915                 i915_request_put(request[idx]);
916                 request[idx] = NULL;
917                 idx++;
918         }
919
920         err = igt_live_test_end(&t);
921
922 out_request:
923         idx = 0;
924         for_each_uabi_engine(engine, i915) {
925                 if (request[idx])
926                         i915_request_put(request[idx]);
927                 idx++;
928         }
929         i915_vma_unpin(batch);
930         i915_vma_put(batch);
931 out_free:
932         kfree(request);
933         return err;
934 }
935
936 static int live_sequential_engines(void *arg)
937 {
938         struct drm_i915_private *i915 = arg;
939         const unsigned int nengines = num_uabi_engines(i915);
940         struct i915_request **request;
941         struct i915_request *prev = NULL;
942         struct intel_engine_cs *engine;
943         struct igt_live_test t;
944         unsigned int idx;
945         int err;
946
947         /*
948          * Check we can submit requests to all engines sequentially, such
949          * that each successive request waits for the earlier ones. This
950          * tests that we don't execute requests out of order, even though
951          * they are running on independent engines.
952          */
953
954         request = kcalloc(nengines, sizeof(*request), GFP_KERNEL);
955         if (!request)
956                 return -ENOMEM;
957
958         err = igt_live_test_begin(&t, i915, __func__, "");
959         if (err)
960                 goto out_free;
961
962         idx = 0;
963         for_each_uabi_engine(engine, i915) {
964                 struct i915_vma *batch;
965
966                 batch = recursive_batch(i915);
967                 if (IS_ERR(batch)) {
968                         err = PTR_ERR(batch);
969                         pr_err("%s: Unable to create batch for %s, err=%d\n",
970                                __func__, engine->name, err);
971                         goto out_free;
972                 }
973
974                 request[idx] = intel_engine_create_kernel_request(engine);
975                 if (IS_ERR(request[idx])) {
976                         err = PTR_ERR(request[idx]);
977                         pr_err("%s: Request allocation failed for %s with err=%d\n",
978                                __func__, engine->name, err);
979                         goto out_request;
980                 }
981
982                 if (prev) {
983                         err = i915_request_await_dma_fence(request[idx],
984                                                            &prev->fence);
985                         if (err) {
986                                 i915_request_add(request[idx]);
987                                 pr_err("%s: Request await failed for %s with err=%d\n",
988                                        __func__, engine->name, err);
989                                 goto out_request;
990                         }
991                 }
992
993                 err = engine->emit_bb_start(request[idx],
994                                             batch->node.start,
995                                             batch->node.size,
996                                             0);
997                 GEM_BUG_ON(err);
998                 request[idx]->batch = batch;
999
1000                 i915_vma_lock(batch);
1001                 err = i915_request_await_object(request[idx],
1002                                                 batch->obj, false);
1003                 if (err == 0)
1004                         err = i915_vma_move_to_active(batch, request[idx], 0);
1005                 i915_vma_unlock(batch);
1006                 GEM_BUG_ON(err);
1007
1008                 i915_request_get(request[idx]);
1009                 i915_request_add(request[idx]);
1010
1011                 prev = request[idx];
1012                 idx++;
1013         }
1014
1015         idx = 0;
1016         for_each_uabi_engine(engine, i915) {
1017                 long timeout;
1018
1019                 if (i915_request_completed(request[idx])) {
1020                         pr_err("%s(%s): request completed too early!\n",
1021                                __func__, engine->name);
1022                         err = -EINVAL;
1023                         goto out_request;
1024                 }
1025
1026                 err = recursive_batch_resolve(request[idx]->batch);
1027                 if (err) {
1028                         pr_err("%s: failed to resolve batch, err=%d\n",
1029                                __func__, err);
1030                         goto out_request;
1031                 }
1032
1033                 timeout = i915_request_wait(request[idx], 0,
1034                                             MAX_SCHEDULE_TIMEOUT);
1035                 if (timeout < 0) {
1036                         err = timeout;
1037                         pr_err("%s: error waiting for request on %s, err=%d\n",
1038                                __func__, engine->name, err);
1039                         goto out_request;
1040                 }
1041
1042                 GEM_BUG_ON(!i915_request_completed(request[idx]));
1043                 idx++;
1044         }
1045
1046         err = igt_live_test_end(&t);
1047
1048 out_request:
1049         idx = 0;
1050         for_each_uabi_engine(engine, i915) {
1051                 u32 *cmd;
1052
1053                 if (!request[idx])
1054                         break;
1055
1056                 cmd = i915_gem_object_pin_map(request[idx]->batch->obj,
1057                                               I915_MAP_WC);
1058                 if (!IS_ERR(cmd)) {
1059                         *cmd = MI_BATCH_BUFFER_END;
1060                         intel_gt_chipset_flush(engine->gt);
1061
1062                         i915_gem_object_unpin_map(request[idx]->batch->obj);
1063                 }
1064
1065                 i915_vma_put(request[idx]->batch);
1066                 i915_request_put(request[idx]);
1067                 idx++;
1068         }
1069 out_free:
1070         kfree(request);
1071         return err;
1072 }
1073
1074 static int __live_parallel_engine1(void *arg)
1075 {
1076         struct intel_engine_cs *engine = arg;
1077         IGT_TIMEOUT(end_time);
1078         unsigned long count;
1079         int err = 0;
1080
1081         count = 0;
1082         intel_engine_pm_get(engine);
1083         do {
1084                 struct i915_request *rq;
1085
1086                 rq = i915_request_create(engine->kernel_context);
1087                 if (IS_ERR(rq)) {
1088                         err = PTR_ERR(rq);
1089                         break;
1090                 }
1091
1092                 i915_request_get(rq);
1093                 i915_request_add(rq);
1094
1095                 err = 0;
1096                 if (i915_request_wait(rq, 0, HZ / 5) < 0)
1097                         err = -ETIME;
1098                 i915_request_put(rq);
1099                 if (err)
1100                         break;
1101
1102                 count++;
1103         } while (!__igt_timeout(end_time, NULL));
1104         intel_engine_pm_put(engine);
1105
1106         pr_info("%s: %lu request + sync\n", engine->name, count);
1107         return err;
1108 }
1109
1110 static int __live_parallel_engineN(void *arg)
1111 {
1112         struct intel_engine_cs *engine = arg;
1113         IGT_TIMEOUT(end_time);
1114         unsigned long count;
1115         int err = 0;
1116
1117         count = 0;
1118         intel_engine_pm_get(engine);
1119         do {
1120                 struct i915_request *rq;
1121
1122                 rq = i915_request_create(engine->kernel_context);
1123                 if (IS_ERR(rq)) {
1124                         err = PTR_ERR(rq);
1125                         break;
1126                 }
1127
1128                 i915_request_add(rq);
1129                 count++;
1130         } while (!__igt_timeout(end_time, NULL));
1131         intel_engine_pm_put(engine);
1132
1133         pr_info("%s: %lu requests\n", engine->name, count);
1134         return err;
1135 }
1136
1137 static bool wake_all(struct drm_i915_private *i915)
1138 {
1139         if (atomic_dec_and_test(&i915->selftest.counter)) {
1140                 wake_up_var(&i915->selftest.counter);
1141                 return true;
1142         }
1143
1144         return false;
1145 }
1146
1147 static int wait_for_all(struct drm_i915_private *i915)
1148 {
1149         if (wake_all(i915))
1150                 return 0;
1151
1152         if (wait_var_event_timeout(&i915->selftest.counter,
1153                                    !atomic_read(&i915->selftest.counter),
1154                                    i915_selftest.timeout_jiffies))
1155                 return 0;
1156
1157         return -ETIME;
1158 }
1159
1160 static int __live_parallel_spin(void *arg)
1161 {
1162         struct intel_engine_cs *engine = arg;
1163         struct igt_spinner spin;
1164         struct i915_request *rq;
1165         int err = 0;
1166
1167         /*
1168          * Create a spinner running for eternity on each engine. If a second
1169          * spinner is incorrectly placed on the same engine, it will not be
1170          * able to start in time.
1171          */
1172
1173         if (igt_spinner_init(&spin, engine->gt)) {
1174                 wake_all(engine->i915);
1175                 return -ENOMEM;
1176         }
1177
1178         intel_engine_pm_get(engine);
1179         rq = igt_spinner_create_request(&spin,
1180                                         engine->kernel_context,
1181                                         MI_NOOP); /* no preemption */
1182         intel_engine_pm_put(engine);
1183         if (IS_ERR(rq)) {
1184                 err = PTR_ERR(rq);
1185                 if (err == -ENODEV)
1186                         err = 0;
1187                 wake_all(engine->i915);
1188                 goto out_spin;
1189         }
1190
1191         i915_request_get(rq);
1192         i915_request_add(rq);
1193         if (igt_wait_for_spinner(&spin, rq)) {
1194                 /* Occupy this engine for the whole test */
1195                 err = wait_for_all(engine->i915);
1196         } else {
1197                 pr_err("Failed to start spinner on %s\n", engine->name);
1198                 err = -EINVAL;
1199         }
1200         igt_spinner_end(&spin);
1201
1202         if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0)
1203                 err = -EIO;
1204         i915_request_put(rq);
1205
1206 out_spin:
1207         igt_spinner_fini(&spin);
1208         return err;
1209 }
1210
1211 static int live_parallel_engines(void *arg)
1212 {
1213         struct drm_i915_private *i915 = arg;
1214         static int (* const func[])(void *arg) = {
1215                 __live_parallel_engine1,
1216                 __live_parallel_engineN,
1217                 __live_parallel_spin,
1218                 NULL,
1219         };
1220         const unsigned int nengines = num_uabi_engines(i915);
1221         struct intel_engine_cs *engine;
1222         int (* const *fn)(void *arg);
1223         struct task_struct **tsk;
1224         int err = 0;
1225
1226         /*
1227          * Check we can submit requests to all engines concurrently. This
1228          * tests that we load up the system maximally.
1229          */
1230
1231         tsk = kcalloc(nengines, sizeof(*tsk), GFP_KERNEL);
1232         if (!tsk)
1233                 return -ENOMEM;
1234
1235         for (fn = func; !err && *fn; fn++) {
1236                 char name[KSYM_NAME_LEN];
1237                 struct igt_live_test t;
1238                 unsigned int idx;
1239
1240                 snprintf(name, sizeof(name), "%pS", fn);
1241                 err = igt_live_test_begin(&t, i915, __func__, name);
1242                 if (err)
1243                         break;
1244
1245                 atomic_set(&i915->selftest.counter, nengines);
1246
1247                 idx = 0;
1248                 for_each_uabi_engine(engine, i915) {
1249                         tsk[idx] = kthread_run(*fn, engine,
1250                                                "igt/parallel:%s",
1251                                                engine->name);
1252                         if (IS_ERR(tsk[idx])) {
1253                                 err = PTR_ERR(tsk[idx]);
1254                                 break;
1255                         }
1256                         get_task_struct(tsk[idx++]);
1257                 }
1258
1259                 yield(); /* start all threads before we kthread_stop() */
1260
1261                 idx = 0;
1262                 for_each_uabi_engine(engine, i915) {
1263                         int status;
1264
1265                         if (IS_ERR(tsk[idx]))
1266                                 break;
1267
1268                         status = kthread_stop(tsk[idx]);
1269                         if (status && !err)
1270                                 err = status;
1271
1272                         put_task_struct(tsk[idx++]);
1273                 }
1274
1275                 if (igt_live_test_end(&t))
1276                         err = -EIO;
1277         }
1278
1279         kfree(tsk);
1280         return err;
1281 }
1282
1283 static int
1284 max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
1285 {
1286         struct i915_request *rq;
1287         int ret;
1288
1289         /*
1290          * Before execlists, all contexts share the same ringbuffer. With
1291          * execlists, each context/engine has a separate ringbuffer and
1292          * for the purposes of this test, inexhaustible.
1293          *
1294          * For the global ringbuffer though, we have to be very careful
1295          * that we do not wrap while preventing the execution of requests
1296          * with a unsignaled fence.
1297          */
1298         if (HAS_EXECLISTS(ctx->i915))
1299                 return INT_MAX;
1300
1301         rq = igt_request_alloc(ctx, engine);
1302         if (IS_ERR(rq)) {
1303                 ret = PTR_ERR(rq);
1304         } else {
1305                 int sz;
1306
1307                 ret = rq->ring->size - rq->reserved_space;
1308                 i915_request_add(rq);
1309
1310                 sz = rq->ring->emit - rq->head;
1311                 if (sz < 0)
1312                         sz += rq->ring->size;
1313                 ret /= sz;
1314                 ret /= 2; /* leave half spare, in case of emergency! */
1315         }
1316
1317         return ret;
1318 }
1319
1320 static int live_breadcrumbs_smoketest(void *arg)
1321 {
1322         struct drm_i915_private *i915 = arg;
1323         const unsigned int nengines = num_uabi_engines(i915);
1324         const unsigned int ncpus = num_online_cpus();
1325         unsigned long num_waits, num_fences;
1326         struct intel_engine_cs *engine;
1327         struct task_struct **threads;
1328         struct igt_live_test live;
1329         intel_wakeref_t wakeref;
1330         struct smoketest *smoke;
1331         unsigned int n, idx;
1332         struct file *file;
1333         int ret = 0;
1334
1335         /*
1336          * Smoketest our breadcrumb/signal handling for requests across multiple
1337          * threads. A very simple test to only catch the most egregious of bugs.
1338          * See __igt_breadcrumbs_smoketest();
1339          *
1340          * On real hardware this time.
1341          */
1342
1343         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1344
1345         file = mock_file(i915);
1346         if (IS_ERR(file)) {
1347                 ret = PTR_ERR(file);
1348                 goto out_rpm;
1349         }
1350
1351         smoke = kcalloc(nengines, sizeof(*smoke), GFP_KERNEL);
1352         if (!smoke) {
1353                 ret = -ENOMEM;
1354                 goto out_file;
1355         }
1356
1357         threads = kcalloc(ncpus * nengines, sizeof(*threads), GFP_KERNEL);
1358         if (!threads) {
1359                 ret = -ENOMEM;
1360                 goto out_smoke;
1361         }
1362
1363         smoke[0].request_alloc = __live_request_alloc;
1364         smoke[0].ncontexts = 64;
1365         smoke[0].contexts = kcalloc(smoke[0].ncontexts,
1366                                     sizeof(*smoke[0].contexts),
1367                                     GFP_KERNEL);
1368         if (!smoke[0].contexts) {
1369                 ret = -ENOMEM;
1370                 goto out_threads;
1371         }
1372
1373         for (n = 0; n < smoke[0].ncontexts; n++) {
1374                 smoke[0].contexts[n] = live_context(i915, file);
1375                 if (!smoke[0].contexts[n]) {
1376                         ret = -ENOMEM;
1377                         goto out_contexts;
1378                 }
1379         }
1380
1381         ret = igt_live_test_begin(&live, i915, __func__, "");
1382         if (ret)
1383                 goto out_contexts;
1384
1385         idx = 0;
1386         for_each_uabi_engine(engine, i915) {
1387                 smoke[idx] = smoke[0];
1388                 smoke[idx].engine = engine;
1389                 smoke[idx].max_batch =
1390                         max_batches(smoke[0].contexts[0], engine);
1391                 if (smoke[idx].max_batch < 0) {
1392                         ret = smoke[idx].max_batch;
1393                         goto out_flush;
1394                 }
1395                 /* One ring interleaved between requests from all cpus */
1396                 smoke[idx].max_batch /= num_online_cpus() + 1;
1397                 pr_debug("Limiting batches to %d requests on %s\n",
1398                          smoke[idx].max_batch, engine->name);
1399
1400                 for (n = 0; n < ncpus; n++) {
1401                         struct task_struct *tsk;
1402
1403                         tsk = kthread_run(__igt_breadcrumbs_smoketest,
1404                                           &smoke[idx], "igt/%d.%d", idx, n);
1405                         if (IS_ERR(tsk)) {
1406                                 ret = PTR_ERR(tsk);
1407                                 goto out_flush;
1408                         }
1409
1410                         get_task_struct(tsk);
1411                         threads[idx * ncpus + n] = tsk;
1412                 }
1413
1414                 idx++;
1415         }
1416
1417         yield(); /* start all threads before we begin */
1418         msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
1419
1420 out_flush:
1421         idx = 0;
1422         num_waits = 0;
1423         num_fences = 0;
1424         for_each_uabi_engine(engine, i915) {
1425                 for (n = 0; n < ncpus; n++) {
1426                         struct task_struct *tsk = threads[idx * ncpus + n];
1427                         int err;
1428
1429                         if (!tsk)
1430                                 continue;
1431
1432                         err = kthread_stop(tsk);
1433                         if (err < 0 && !ret)
1434                                 ret = err;
1435
1436                         put_task_struct(tsk);
1437                 }
1438
1439                 num_waits += atomic_long_read(&smoke[idx].num_waits);
1440                 num_fences += atomic_long_read(&smoke[idx].num_fences);
1441                 idx++;
1442         }
1443         pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
1444                 num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
1445
1446         ret = igt_live_test_end(&live) ?: ret;
1447 out_contexts:
1448         kfree(smoke[0].contexts);
1449 out_threads:
1450         kfree(threads);
1451 out_smoke:
1452         kfree(smoke);
1453 out_file:
1454         fput(file);
1455 out_rpm:
1456         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1457
1458         return ret;
1459 }
1460
1461 int i915_request_live_selftests(struct drm_i915_private *i915)
1462 {
1463         static const struct i915_subtest tests[] = {
1464                 SUBTEST(live_nop_request),
1465                 SUBTEST(live_all_engines),
1466                 SUBTEST(live_sequential_engines),
1467                 SUBTEST(live_parallel_engines),
1468                 SUBTEST(live_empty_request),
1469                 SUBTEST(live_breadcrumbs_smoketest),
1470         };
1471
1472         if (intel_gt_is_wedged(&i915->gt))
1473                 return 0;
1474
1475         return i915_subtests(tests, i915);
1476 }