b85a4a43536a6ca011acacb739d0037b9ad0a9de
[linux-2.6-block.git] / drivers / gpu / drm / i915 / gem / selftests / i915_gem_context.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2017 Intel Corporation
5  */
6
7 #include <linux/prime_numbers.h>
8
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_reset.h"
11 #include "i915_selftest.h"
12
13 #include "gem/selftests/igt_gem_utils.h"
14 #include "selftests/i915_random.h"
15 #include "selftests/igt_flush_test.h"
16 #include "selftests/igt_live_test.h"
17 #include "selftests/igt_reset.h"
18 #include "selftests/igt_spinner.h"
19 #include "selftests/mock_drm.h"
20 #include "selftests/mock_gem_device.h"
21
22 #include "huge_gem_object.h"
23 #include "igt_gem_utils.h"
24
25 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
26
27 static int live_nop_switch(void *arg)
28 {
29         const unsigned int nctx = 1024;
30         struct drm_i915_private *i915 = arg;
31         struct intel_engine_cs *engine;
32         struct i915_gem_context **ctx;
33         enum intel_engine_id id;
34         intel_wakeref_t wakeref;
35         struct igt_live_test t;
36         struct drm_file *file;
37         unsigned long n;
38         int err = -ENODEV;
39
40         /*
41          * Create as many contexts as we can feasibly get away with
42          * and check we can switch between them rapidly.
43          *
44          * Serves as very simple stress test for submission and HW switching
45          * between contexts.
46          */
47
48         if (!DRIVER_CAPS(i915)->has_logical_contexts)
49                 return 0;
50
51         file = mock_file(i915);
52         if (IS_ERR(file))
53                 return PTR_ERR(file);
54
55         mutex_lock(&i915->drm.struct_mutex);
56         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
57
58         ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
59         if (!ctx) {
60                 err = -ENOMEM;
61                 goto out_unlock;
62         }
63
64         for (n = 0; n < nctx; n++) {
65                 ctx[n] = live_context(i915, file);
66                 if (IS_ERR(ctx[n])) {
67                         err = PTR_ERR(ctx[n]);
68                         goto out_unlock;
69                 }
70         }
71
72         for_each_engine(engine, i915, id) {
73                 struct i915_request *rq;
74                 unsigned long end_time, prime;
75                 ktime_t times[2] = {};
76
77                 times[0] = ktime_get_raw();
78                 for (n = 0; n < nctx; n++) {
79                         rq = igt_request_alloc(ctx[n], engine);
80                         if (IS_ERR(rq)) {
81                                 err = PTR_ERR(rq);
82                                 goto out_unlock;
83                         }
84                         i915_request_add(rq);
85                 }
86                 if (i915_request_wait(rq,
87                                       I915_WAIT_LOCKED,
88                                       HZ / 5) < 0) {
89                         pr_err("Failed to populated %d contexts\n", nctx);
90                         i915_gem_set_wedged(i915);
91                         err = -EIO;
92                         goto out_unlock;
93                 }
94
95                 times[1] = ktime_get_raw();
96
97                 pr_info("Populated %d contexts on %s in %lluns\n",
98                         nctx, engine->name, ktime_to_ns(times[1] - times[0]));
99
100                 err = igt_live_test_begin(&t, i915, __func__, engine->name);
101                 if (err)
102                         goto out_unlock;
103
104                 end_time = jiffies + i915_selftest.timeout_jiffies;
105                 for_each_prime_number_from(prime, 2, 8192) {
106                         times[1] = ktime_get_raw();
107
108                         for (n = 0; n < prime; n++) {
109                                 rq = igt_request_alloc(ctx[n % nctx], engine);
110                                 if (IS_ERR(rq)) {
111                                         err = PTR_ERR(rq);
112                                         goto out_unlock;
113                                 }
114
115                                 /*
116                                  * This space is left intentionally blank.
117                                  *
118                                  * We do not actually want to perform any
119                                  * action with this request, we just want
120                                  * to measure the latency in allocation
121                                  * and submission of our breadcrumbs -
122                                  * ensuring that the bare request is sufficient
123                                  * for the system to work (i.e. proper HEAD
124                                  * tracking of the rings, interrupt handling,
125                                  * etc). It also gives us the lowest bounds
126                                  * for latency.
127                                  */
128
129                                 i915_request_add(rq);
130                         }
131                         if (i915_request_wait(rq,
132                                               I915_WAIT_LOCKED,
133                                               HZ / 5) < 0) {
134                                 pr_err("Switching between %ld contexts timed out\n",
135                                        prime);
136                                 i915_gem_set_wedged(i915);
137                                 break;
138                         }
139
140                         times[1] = ktime_sub(ktime_get_raw(), times[1]);
141                         if (prime == 2)
142                                 times[0] = times[1];
143
144                         if (__igt_timeout(end_time, NULL))
145                                 break;
146                 }
147
148                 err = igt_live_test_end(&t);
149                 if (err)
150                         goto out_unlock;
151
152                 pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
153                         engine->name,
154                         ktime_to_ns(times[0]),
155                         prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
156         }
157
158 out_unlock:
159         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
160         mutex_unlock(&i915->drm.struct_mutex);
161         mock_file_free(i915, file);
162         return err;
163 }
164
165 static struct i915_vma *
166 gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
167 {
168         struct drm_i915_gem_object *obj;
169         const int gen = INTEL_GEN(vma->vm->i915);
170         unsigned long n, size;
171         u32 *cmd;
172         int err;
173
174         size = (4 * count + 1) * sizeof(u32);
175         size = round_up(size, PAGE_SIZE);
176         obj = i915_gem_object_create_internal(vma->vm->i915, size);
177         if (IS_ERR(obj))
178                 return ERR_CAST(obj);
179
180         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
181         if (IS_ERR(cmd)) {
182                 err = PTR_ERR(cmd);
183                 goto err;
184         }
185
186         GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
187         offset += vma->node.start;
188
189         for (n = 0; n < count; n++) {
190                 if (gen >= 8) {
191                         *cmd++ = MI_STORE_DWORD_IMM_GEN4;
192                         *cmd++ = lower_32_bits(offset);
193                         *cmd++ = upper_32_bits(offset);
194                         *cmd++ = value;
195                 } else if (gen >= 4) {
196                         *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
197                                 (gen < 6 ? MI_USE_GGTT : 0);
198                         *cmd++ = 0;
199                         *cmd++ = offset;
200                         *cmd++ = value;
201                 } else {
202                         *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
203                         *cmd++ = offset;
204                         *cmd++ = value;
205                 }
206                 offset += PAGE_SIZE;
207         }
208         *cmd = MI_BATCH_BUFFER_END;
209         i915_gem_object_flush_map(obj);
210         i915_gem_object_unpin_map(obj);
211
212         vma = i915_vma_instance(obj, vma->vm, NULL);
213         if (IS_ERR(vma)) {
214                 err = PTR_ERR(vma);
215                 goto err;
216         }
217
218         err = i915_vma_pin(vma, 0, 0, PIN_USER);
219         if (err)
220                 goto err;
221
222         return vma;
223
224 err:
225         i915_gem_object_put(obj);
226         return ERR_PTR(err);
227 }
228
229 static unsigned long real_page_count(struct drm_i915_gem_object *obj)
230 {
231         return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
232 }
233
234 static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
235 {
236         return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
237 }
238
239 static int gpu_fill(struct drm_i915_gem_object *obj,
240                     struct i915_gem_context *ctx,
241                     struct intel_engine_cs *engine,
242                     unsigned int dw)
243 {
244         struct drm_i915_private *i915 = to_i915(obj->base.dev);
245         struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
246         struct i915_request *rq;
247         struct i915_vma *vma;
248         struct i915_vma *batch;
249         unsigned int flags;
250         int err;
251
252         GEM_BUG_ON(obj->base.size > vm->total);
253         GEM_BUG_ON(!intel_engine_can_store_dword(engine));
254
255         vma = i915_vma_instance(obj, vm, NULL);
256         if (IS_ERR(vma))
257                 return PTR_ERR(vma);
258
259         i915_gem_object_lock(obj);
260         err = i915_gem_object_set_to_gtt_domain(obj, false);
261         i915_gem_object_unlock(obj);
262         if (err)
263                 return err;
264
265         err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
266         if (err)
267                 return err;
268
269         /* Within the GTT the huge objects maps every page onto
270          * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
271          * We set the nth dword within the page using the nth
272          * mapping via the GTT - this should exercise the GTT mapping
273          * whilst checking that each context provides a unique view
274          * into the object.
275          */
276         batch = gpu_fill_dw(vma,
277                             (dw * real_page_count(obj)) << PAGE_SHIFT |
278                             (dw * sizeof(u32)),
279                             real_page_count(obj),
280                             dw);
281         if (IS_ERR(batch)) {
282                 err = PTR_ERR(batch);
283                 goto err_vma;
284         }
285
286         rq = igt_request_alloc(ctx, engine);
287         if (IS_ERR(rq)) {
288                 err = PTR_ERR(rq);
289                 goto err_batch;
290         }
291
292         flags = 0;
293         if (INTEL_GEN(vm->i915) <= 5)
294                 flags |= I915_DISPATCH_SECURE;
295
296         err = engine->emit_bb_start(rq,
297                                     batch->node.start, batch->node.size,
298                                     flags);
299         if (err)
300                 goto err_request;
301
302         i915_vma_lock(batch);
303         err = i915_vma_move_to_active(batch, rq, 0);
304         i915_vma_unlock(batch);
305         if (err)
306                 goto skip_request;
307
308         i915_vma_lock(vma);
309         err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
310         i915_vma_unlock(vma);
311         if (err)
312                 goto skip_request;
313
314         i915_request_add(rq);
315
316         i915_vma_unpin(batch);
317         i915_vma_close(batch);
318         i915_vma_put(batch);
319
320         i915_vma_unpin(vma);
321
322         return 0;
323
324 skip_request:
325         i915_request_skip(rq, err);
326 err_request:
327         i915_request_add(rq);
328 err_batch:
329         i915_vma_unpin(batch);
330         i915_vma_put(batch);
331 err_vma:
332         i915_vma_unpin(vma);
333         return err;
334 }
335
336 static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
337 {
338         const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
339         unsigned int n, m, need_flush;
340         int err;
341
342         err = i915_gem_object_prepare_write(obj, &need_flush);
343         if (err)
344                 return err;
345
346         for (n = 0; n < real_page_count(obj); n++) {
347                 u32 *map;
348
349                 map = kmap_atomic(i915_gem_object_get_page(obj, n));
350                 for (m = 0; m < DW_PER_PAGE; m++)
351                         map[m] = value;
352                 if (!has_llc)
353                         drm_clflush_virt_range(map, PAGE_SIZE);
354                 kunmap_atomic(map);
355         }
356
357         i915_gem_object_finish_access(obj);
358         obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
359         obj->write_domain = 0;
360         return 0;
361 }
362
363 static noinline int cpu_check(struct drm_i915_gem_object *obj,
364                               unsigned int idx, unsigned int max)
365 {
366         unsigned int n, m, needs_flush;
367         int err;
368
369         err = i915_gem_object_prepare_read(obj, &needs_flush);
370         if (err)
371                 return err;
372
373         for (n = 0; n < real_page_count(obj); n++) {
374                 u32 *map;
375
376                 map = kmap_atomic(i915_gem_object_get_page(obj, n));
377                 if (needs_flush & CLFLUSH_BEFORE)
378                         drm_clflush_virt_range(map, PAGE_SIZE);
379
380                 for (m = 0; m < max; m++) {
381                         if (map[m] != m) {
382                                 pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
383                                        __builtin_return_address(0), idx,
384                                        n, real_page_count(obj), m, max,
385                                        map[m], m);
386                                 err = -EINVAL;
387                                 goto out_unmap;
388                         }
389                 }
390
391                 for (; m < DW_PER_PAGE; m++) {
392                         if (map[m] != STACK_MAGIC) {
393                                 pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
394                                        __builtin_return_address(0), idx, n, m,
395                                        map[m], STACK_MAGIC);
396                                 err = -EINVAL;
397                                 goto out_unmap;
398                         }
399                 }
400
401 out_unmap:
402                 kunmap_atomic(map);
403                 if (err)
404                         break;
405         }
406
407         i915_gem_object_finish_access(obj);
408         return err;
409 }
410
411 static int file_add_object(struct drm_file *file,
412                             struct drm_i915_gem_object *obj)
413 {
414         int err;
415
416         GEM_BUG_ON(obj->base.handle_count);
417
418         /* tie the object to the drm_file for easy reaping */
419         err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
420         if (err < 0)
421                 return  err;
422
423         i915_gem_object_get(obj);
424         obj->base.handle_count++;
425         return 0;
426 }
427
428 static struct drm_i915_gem_object *
429 create_test_object(struct i915_gem_context *ctx,
430                    struct drm_file *file,
431                    struct list_head *objects)
432 {
433         struct drm_i915_gem_object *obj;
434         struct i915_address_space *vm = ctx->vm ?: &ctx->i915->ggtt.vm;
435         u64 size;
436         int err;
437
438         size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
439         size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
440
441         obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
442         if (IS_ERR(obj))
443                 return obj;
444
445         err = file_add_object(file, obj);
446         i915_gem_object_put(obj);
447         if (err)
448                 return ERR_PTR(err);
449
450         err = cpu_fill(obj, STACK_MAGIC);
451         if (err) {
452                 pr_err("Failed to fill object with cpu, err=%d\n",
453                        err);
454                 return ERR_PTR(err);
455         }
456
457         list_add_tail(&obj->st_link, objects);
458         return obj;
459 }
460
461 static unsigned long max_dwords(struct drm_i915_gem_object *obj)
462 {
463         unsigned long npages = fake_page_count(obj);
464
465         GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
466         return npages / DW_PER_PAGE;
467 }
468
469 static int igt_ctx_exec(void *arg)
470 {
471         struct drm_i915_private *i915 = arg;
472         struct intel_engine_cs *engine;
473         enum intel_engine_id id;
474         int err = -ENODEV;
475
476         /*
477          * Create a few different contexts (with different mm) and write
478          * through each ctx/mm using the GPU making sure those writes end
479          * up in the expected pages of our obj.
480          */
481
482         if (!DRIVER_CAPS(i915)->has_logical_contexts)
483                 return 0;
484
485         for_each_engine(engine, i915, id) {
486                 struct drm_i915_gem_object *obj = NULL;
487                 unsigned long ncontexts, ndwords, dw;
488                 struct igt_live_test t;
489                 struct drm_file *file;
490                 IGT_TIMEOUT(end_time);
491                 LIST_HEAD(objects);
492
493                 if (!intel_engine_can_store_dword(engine))
494                         continue;
495
496                 if (!engine->context_size)
497                         continue; /* No logical context support in HW */
498
499                 file = mock_file(i915);
500                 if (IS_ERR(file))
501                         return PTR_ERR(file);
502
503                 mutex_lock(&i915->drm.struct_mutex);
504
505                 err = igt_live_test_begin(&t, i915, __func__, engine->name);
506                 if (err)
507                         goto out_unlock;
508
509                 ncontexts = 0;
510                 ndwords = 0;
511                 dw = 0;
512                 while (!time_after(jiffies, end_time)) {
513                         struct i915_gem_context *ctx;
514                         intel_wakeref_t wakeref;
515
516                         ctx = live_context(i915, file);
517                         if (IS_ERR(ctx)) {
518                                 err = PTR_ERR(ctx);
519                                 goto out_unlock;
520                         }
521
522                         if (!obj) {
523                                 obj = create_test_object(ctx, file, &objects);
524                                 if (IS_ERR(obj)) {
525                                         err = PTR_ERR(obj);
526                                         goto out_unlock;
527                                 }
528                         }
529
530                         with_intel_runtime_pm(i915, wakeref)
531                                 err = gpu_fill(obj, ctx, engine, dw);
532                         if (err) {
533                                 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
534                                        ndwords, dw, max_dwords(obj),
535                                        engine->name, ctx->hw_id,
536                                        yesno(!!ctx->vm), err);
537                                 goto out_unlock;
538                         }
539
540                         if (++dw == max_dwords(obj)) {
541                                 obj = NULL;
542                                 dw = 0;
543                         }
544
545                         ndwords++;
546                         ncontexts++;
547                 }
548
549                 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
550                         ncontexts, engine->name, ndwords);
551
552                 ncontexts = dw = 0;
553                 list_for_each_entry(obj, &objects, st_link) {
554                         unsigned int rem =
555                                 min_t(unsigned int, ndwords - dw, max_dwords(obj));
556
557                         err = cpu_check(obj, ncontexts++, rem);
558                         if (err)
559                                 break;
560
561                         dw += rem;
562                 }
563
564 out_unlock:
565                 if (igt_live_test_end(&t))
566                         err = -EIO;
567                 mutex_unlock(&i915->drm.struct_mutex);
568
569                 mock_file_free(i915, file);
570                 if (err)
571                         return err;
572         }
573
574         return 0;
575 }
576
577 static int igt_shared_ctx_exec(void *arg)
578 {
579         struct drm_i915_private *i915 = arg;
580         struct i915_gem_context *parent;
581         struct intel_engine_cs *engine;
582         enum intel_engine_id id;
583         struct igt_live_test t;
584         struct drm_file *file;
585         int err = 0;
586
587         /*
588          * Create a few different contexts with the same mm and write
589          * through each ctx using the GPU making sure those writes end
590          * up in the expected pages of our obj.
591          */
592         if (!DRIVER_CAPS(i915)->has_logical_contexts)
593                 return 0;
594
595         file = mock_file(i915);
596         if (IS_ERR(file))
597                 return PTR_ERR(file);
598
599         mutex_lock(&i915->drm.struct_mutex);
600
601         parent = live_context(i915, file);
602         if (IS_ERR(parent)) {
603                 err = PTR_ERR(parent);
604                 goto out_unlock;
605         }
606
607         if (!parent->vm) { /* not full-ppgtt; nothing to share */
608                 err = 0;
609                 goto out_unlock;
610         }
611
612         err = igt_live_test_begin(&t, i915, __func__, "");
613         if (err)
614                 goto out_unlock;
615
616         for_each_engine(engine, i915, id) {
617                 unsigned long ncontexts, ndwords, dw;
618                 struct drm_i915_gem_object *obj = NULL;
619                 IGT_TIMEOUT(end_time);
620                 LIST_HEAD(objects);
621
622                 if (!intel_engine_can_store_dword(engine))
623                         continue;
624
625                 dw = 0;
626                 ndwords = 0;
627                 ncontexts = 0;
628                 while (!time_after(jiffies, end_time)) {
629                         struct i915_gem_context *ctx;
630                         intel_wakeref_t wakeref;
631
632                         ctx = kernel_context(i915);
633                         if (IS_ERR(ctx)) {
634                                 err = PTR_ERR(ctx);
635                                 goto out_test;
636                         }
637
638                         __assign_ppgtt(ctx, parent->vm);
639
640                         if (!obj) {
641                                 obj = create_test_object(parent, file, &objects);
642                                 if (IS_ERR(obj)) {
643                                         err = PTR_ERR(obj);
644                                         kernel_context_close(ctx);
645                                         goto out_test;
646                                 }
647                         }
648
649                         err = 0;
650                         with_intel_runtime_pm(i915, wakeref)
651                                 err = gpu_fill(obj, ctx, engine, dw);
652                         if (err) {
653                                 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
654                                        ndwords, dw, max_dwords(obj),
655                                        engine->name, ctx->hw_id,
656                                        yesno(!!ctx->vm), err);
657                                 kernel_context_close(ctx);
658                                 goto out_test;
659                         }
660
661                         if (++dw == max_dwords(obj)) {
662                                 obj = NULL;
663                                 dw = 0;
664                         }
665
666                         ndwords++;
667                         ncontexts++;
668
669                         kernel_context_close(ctx);
670                 }
671                 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
672                         ncontexts, engine->name, ndwords);
673
674                 ncontexts = dw = 0;
675                 list_for_each_entry(obj, &objects, st_link) {
676                         unsigned int rem =
677                                 min_t(unsigned int, ndwords - dw, max_dwords(obj));
678
679                         err = cpu_check(obj, ncontexts++, rem);
680                         if (err)
681                                 goto out_test;
682
683                         dw += rem;
684                 }
685         }
686 out_test:
687         if (igt_live_test_end(&t))
688                 err = -EIO;
689 out_unlock:
690         mutex_unlock(&i915->drm.struct_mutex);
691
692         mock_file_free(i915, file);
693         return err;
694 }
695
696 static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
697 {
698         struct drm_i915_gem_object *obj;
699         u32 *cmd;
700         int err;
701
702         if (INTEL_GEN(vma->vm->i915) < 8)
703                 return ERR_PTR(-EINVAL);
704
705         obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE);
706         if (IS_ERR(obj))
707                 return ERR_CAST(obj);
708
709         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
710         if (IS_ERR(cmd)) {
711                 err = PTR_ERR(cmd);
712                 goto err;
713         }
714
715         *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
716         *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
717         *cmd++ = lower_32_bits(vma->node.start);
718         *cmd++ = upper_32_bits(vma->node.start);
719         *cmd = MI_BATCH_BUFFER_END;
720
721         __i915_gem_object_flush_map(obj, 0, 64);
722         i915_gem_object_unpin_map(obj);
723
724         vma = i915_vma_instance(obj, vma->vm, NULL);
725         if (IS_ERR(vma)) {
726                 err = PTR_ERR(vma);
727                 goto err;
728         }
729
730         err = i915_vma_pin(vma, 0, 0, PIN_USER);
731         if (err)
732                 goto err;
733
734         return vma;
735
736 err:
737         i915_gem_object_put(obj);
738         return ERR_PTR(err);
739 }
740
741 static int
742 emit_rpcs_query(struct drm_i915_gem_object *obj,
743                 struct intel_context *ce,
744                 struct i915_request **rq_out)
745 {
746         struct i915_request *rq;
747         struct i915_vma *batch;
748         struct i915_vma *vma;
749         int err;
750
751         GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
752
753         vma = i915_vma_instance(obj, ce->gem_context->vm, NULL);
754         if (IS_ERR(vma))
755                 return PTR_ERR(vma);
756
757         i915_gem_object_lock(obj);
758         err = i915_gem_object_set_to_gtt_domain(obj, false);
759         i915_gem_object_unlock(obj);
760         if (err)
761                 return err;
762
763         err = i915_vma_pin(vma, 0, 0, PIN_USER);
764         if (err)
765                 return err;
766
767         batch = rpcs_query_batch(vma);
768         if (IS_ERR(batch)) {
769                 err = PTR_ERR(batch);
770                 goto err_vma;
771         }
772
773         rq = i915_request_create(ce);
774         if (IS_ERR(rq)) {
775                 err = PTR_ERR(rq);
776                 goto err_batch;
777         }
778
779         err = rq->engine->emit_bb_start(rq,
780                                         batch->node.start, batch->node.size,
781                                         0);
782         if (err)
783                 goto err_request;
784
785         i915_vma_lock(batch);
786         err = i915_vma_move_to_active(batch, rq, 0);
787         i915_vma_unlock(batch);
788         if (err)
789                 goto skip_request;
790
791         i915_vma_lock(vma);
792         err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
793         i915_vma_unlock(vma);
794         if (err)
795                 goto skip_request;
796
797         i915_vma_unpin(batch);
798         i915_vma_close(batch);
799         i915_vma_put(batch);
800
801         i915_vma_unpin(vma);
802
803         *rq_out = i915_request_get(rq);
804
805         i915_request_add(rq);
806
807         return 0;
808
809 skip_request:
810         i915_request_skip(rq, err);
811 err_request:
812         i915_request_add(rq);
813 err_batch:
814         i915_vma_unpin(batch);
815         i915_vma_put(batch);
816 err_vma:
817         i915_vma_unpin(vma);
818
819         return err;
820 }
821
822 #define TEST_IDLE       BIT(0)
823 #define TEST_BUSY       BIT(1)
824 #define TEST_RESET      BIT(2)
825
826 static int
827 __sseu_prepare(struct drm_i915_private *i915,
828                const char *name,
829                unsigned int flags,
830                struct intel_context *ce,
831                struct igt_spinner **spin)
832 {
833         struct i915_request *rq;
834         int ret;
835
836         *spin = NULL;
837         if (!(flags & (TEST_BUSY | TEST_RESET)))
838                 return 0;
839
840         *spin = kzalloc(sizeof(**spin), GFP_KERNEL);
841         if (!*spin)
842                 return -ENOMEM;
843
844         ret = igt_spinner_init(*spin, i915);
845         if (ret)
846                 goto err_free;
847
848         rq = igt_spinner_create_request(*spin,
849                                         ce->gem_context,
850                                         ce->engine,
851                                         MI_NOOP);
852         if (IS_ERR(rq)) {
853                 ret = PTR_ERR(rq);
854                 goto err_fini;
855         }
856
857         i915_request_add(rq);
858
859         if (!igt_wait_for_spinner(*spin, rq)) {
860                 pr_err("%s: Spinner failed to start!\n", name);
861                 ret = -ETIMEDOUT;
862                 goto err_end;
863         }
864
865         return 0;
866
867 err_end:
868         igt_spinner_end(*spin);
869 err_fini:
870         igt_spinner_fini(*spin);
871 err_free:
872         kfree(fetch_and_zero(spin));
873         return ret;
874 }
875
876 static int
877 __read_slice_count(struct drm_i915_private *i915,
878                    struct intel_context *ce,
879                    struct drm_i915_gem_object *obj,
880                    struct igt_spinner *spin,
881                    u32 *rpcs)
882 {
883         struct i915_request *rq = NULL;
884         u32 s_mask, s_shift;
885         unsigned int cnt;
886         u32 *buf, val;
887         long ret;
888
889         ret = emit_rpcs_query(obj, ce, &rq);
890         if (ret)
891                 return ret;
892
893         if (spin)
894                 igt_spinner_end(spin);
895
896         ret = i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT);
897         i915_request_put(rq);
898         if (ret < 0)
899                 return ret;
900
901         buf = i915_gem_object_pin_map(obj, I915_MAP_WB);
902         if (IS_ERR(buf)) {
903                 ret = PTR_ERR(buf);
904                 return ret;
905         }
906
907         if (INTEL_GEN(i915) >= 11) {
908                 s_mask = GEN11_RPCS_S_CNT_MASK;
909                 s_shift = GEN11_RPCS_S_CNT_SHIFT;
910         } else {
911                 s_mask = GEN8_RPCS_S_CNT_MASK;
912                 s_shift = GEN8_RPCS_S_CNT_SHIFT;
913         }
914
915         val = *buf;
916         cnt = (val & s_mask) >> s_shift;
917         *rpcs = val;
918
919         i915_gem_object_unpin_map(obj);
920
921         return cnt;
922 }
923
924 static int
925 __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
926              const char *prefix, const char *suffix)
927 {
928         if (slices == expected)
929                 return 0;
930
931         if (slices < 0) {
932                 pr_err("%s: %s read slice count failed with %d%s\n",
933                        name, prefix, slices, suffix);
934                 return slices;
935         }
936
937         pr_err("%s: %s slice count %d is not %u%s\n",
938                name, prefix, slices, expected, suffix);
939
940         pr_info("RPCS=0x%x; %u%sx%u%s\n",
941                 rpcs, slices,
942                 (rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "",
943                 (rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT,
944                 (rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : "");
945
946         return -EINVAL;
947 }
948
949 static int
950 __sseu_finish(struct drm_i915_private *i915,
951               const char *name,
952               unsigned int flags,
953               struct intel_context *ce,
954               struct drm_i915_gem_object *obj,
955               unsigned int expected,
956               struct igt_spinner *spin)
957 {
958         unsigned int slices = hweight32(ce->engine->sseu.slice_mask);
959         u32 rpcs = 0;
960         int ret = 0;
961
962         if (flags & TEST_RESET) {
963                 ret = i915_reset_engine(ce->engine, "sseu");
964                 if (ret)
965                         goto out;
966         }
967
968         ret = __read_slice_count(i915, ce, obj,
969                                  flags & TEST_RESET ? NULL : spin, &rpcs);
970         ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
971         if (ret)
972                 goto out;
973
974         ret = __read_slice_count(i915, ce->engine->kernel_context, obj,
975                                  NULL, &rpcs);
976         ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
977
978 out:
979         if (spin)
980                 igt_spinner_end(spin);
981
982         if ((flags & TEST_IDLE) && ret == 0) {
983                 ret = i915_gem_wait_for_idle(i915,
984                                              I915_WAIT_LOCKED,
985                                              MAX_SCHEDULE_TIMEOUT);
986                 if (ret)
987                         return ret;
988
989                 ret = __read_slice_count(i915, ce, obj, NULL, &rpcs);
990                 ret = __check_rpcs(name, rpcs, ret, expected,
991                                    "Context", " after idle!");
992         }
993
994         return ret;
995 }
996
997 static int
998 __sseu_test(struct drm_i915_private *i915,
999             const char *name,
1000             unsigned int flags,
1001             struct intel_context *ce,
1002             struct drm_i915_gem_object *obj,
1003             struct intel_sseu sseu)
1004 {
1005         struct igt_spinner *spin = NULL;
1006         int ret;
1007
1008         ret = __sseu_prepare(i915, name, flags, ce, &spin);
1009         if (ret)
1010                 return ret;
1011
1012         ret = __intel_context_reconfigure_sseu(ce, sseu);
1013         if (ret)
1014                 goto out_spin;
1015
1016         ret = __sseu_finish(i915, name, flags, ce, obj,
1017                             hweight32(sseu.slice_mask), spin);
1018
1019 out_spin:
1020         if (spin) {
1021                 igt_spinner_end(spin);
1022                 igt_spinner_fini(spin);
1023                 kfree(spin);
1024         }
1025         return ret;
1026 }
1027
1028 static int
1029 __igt_ctx_sseu(struct drm_i915_private *i915,
1030                const char *name,
1031                unsigned int flags)
1032 {
1033         struct intel_engine_cs *engine = i915->engine[RCS0];
1034         struct intel_sseu default_sseu = engine->sseu;
1035         struct drm_i915_gem_object *obj;
1036         struct i915_gem_context *ctx;
1037         struct intel_context *ce;
1038         struct intel_sseu pg_sseu;
1039         intel_wakeref_t wakeref;
1040         struct drm_file *file;
1041         int ret;
1042
1043         if (INTEL_GEN(i915) < 9)
1044                 return 0;
1045
1046         if (!RUNTIME_INFO(i915)->sseu.has_slice_pg)
1047                 return 0;
1048
1049         if (hweight32(default_sseu.slice_mask) < 2)
1050                 return 0;
1051
1052         /*
1053          * Gen11 VME friendly power-gated configuration with half enabled
1054          * sub-slices.
1055          */
1056         pg_sseu = default_sseu;
1057         pg_sseu.slice_mask = 1;
1058         pg_sseu.subslice_mask =
1059                 ~(~0 << (hweight32(default_sseu.subslice_mask) / 2));
1060
1061         pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
1062                 name, flags, hweight32(default_sseu.slice_mask),
1063                 hweight32(pg_sseu.slice_mask));
1064
1065         file = mock_file(i915);
1066         if (IS_ERR(file))
1067                 return PTR_ERR(file);
1068
1069         if (flags & TEST_RESET)
1070                 igt_global_reset_lock(i915);
1071
1072         mutex_lock(&i915->drm.struct_mutex);
1073
1074         ctx = live_context(i915, file);
1075         if (IS_ERR(ctx)) {
1076                 ret = PTR_ERR(ctx);
1077                 goto out_unlock;
1078         }
1079         i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */
1080
1081         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1082         if (IS_ERR(obj)) {
1083                 ret = PTR_ERR(obj);
1084                 goto out_unlock;
1085         }
1086
1087         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1088
1089         ce = i915_gem_context_get_engine(ctx, RCS0);
1090         if (IS_ERR(ce)) {
1091                 ret = PTR_ERR(ce);
1092                 goto out_rpm;
1093         }
1094
1095         ret = intel_context_pin(ce);
1096         if (ret)
1097                 goto out_context;
1098
1099         /* First set the default mask. */
1100         ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
1101         if (ret)
1102                 goto out_fail;
1103
1104         /* Then set a power-gated configuration. */
1105         ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
1106         if (ret)
1107                 goto out_fail;
1108
1109         /* Back to defaults. */
1110         ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
1111         if (ret)
1112                 goto out_fail;
1113
1114         /* One last power-gated configuration for the road. */
1115         ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
1116         if (ret)
1117                 goto out_fail;
1118
1119 out_fail:
1120         if (igt_flush_test(i915, I915_WAIT_LOCKED))
1121                 ret = -EIO;
1122
1123         intel_context_unpin(ce);
1124 out_context:
1125         intel_context_put(ce);
1126 out_rpm:
1127         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1128         i915_gem_object_put(obj);
1129
1130 out_unlock:
1131         mutex_unlock(&i915->drm.struct_mutex);
1132
1133         if (flags & TEST_RESET)
1134                 igt_global_reset_unlock(i915);
1135
1136         mock_file_free(i915, file);
1137
1138         if (ret)
1139                 pr_err("%s: Failed with %d!\n", name, ret);
1140
1141         return ret;
1142 }
1143
1144 static int igt_ctx_sseu(void *arg)
1145 {
1146         struct {
1147                 const char *name;
1148                 unsigned int flags;
1149         } *phase, phases[] = {
1150                 { .name = "basic", .flags = 0 },
1151                 { .name = "idle", .flags = TEST_IDLE },
1152                 { .name = "busy", .flags = TEST_BUSY },
1153                 { .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET },
1154                 { .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE },
1155                 { .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE },
1156         };
1157         unsigned int i;
1158         int ret = 0;
1159
1160         for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases);
1161              i++, phase++)
1162                 ret = __igt_ctx_sseu(arg, phase->name, phase->flags);
1163
1164         return ret;
1165 }
1166
1167 static int igt_ctx_readonly(void *arg)
1168 {
1169         struct drm_i915_private *i915 = arg;
1170         struct drm_i915_gem_object *obj = NULL;
1171         struct i915_address_space *vm;
1172         struct i915_gem_context *ctx;
1173         unsigned long idx, ndwords, dw;
1174         struct igt_live_test t;
1175         struct drm_file *file;
1176         I915_RND_STATE(prng);
1177         IGT_TIMEOUT(end_time);
1178         LIST_HEAD(objects);
1179         int err = -ENODEV;
1180
1181         /*
1182          * Create a few read-only objects (with the occasional writable object)
1183          * and try to write into these object checking that the GPU discards
1184          * any write to a read-only object.
1185          */
1186
1187         file = mock_file(i915);
1188         if (IS_ERR(file))
1189                 return PTR_ERR(file);
1190
1191         mutex_lock(&i915->drm.struct_mutex);
1192
1193         err = igt_live_test_begin(&t, i915, __func__, "");
1194         if (err)
1195                 goto out_unlock;
1196
1197         ctx = live_context(i915, file);
1198         if (IS_ERR(ctx)) {
1199                 err = PTR_ERR(ctx);
1200                 goto out_unlock;
1201         }
1202
1203         vm = ctx->vm ?: &i915->mm.aliasing_ppgtt->vm;
1204         if (!vm || !vm->has_read_only) {
1205                 err = 0;
1206                 goto out_unlock;
1207         }
1208
1209         ndwords = 0;
1210         dw = 0;
1211         while (!time_after(jiffies, end_time)) {
1212                 struct intel_engine_cs *engine;
1213                 unsigned int id;
1214
1215                 for_each_engine(engine, i915, id) {
1216                         intel_wakeref_t wakeref;
1217
1218                         if (!intel_engine_can_store_dword(engine))
1219                                 continue;
1220
1221                         if (!obj) {
1222                                 obj = create_test_object(ctx, file, &objects);
1223                                 if (IS_ERR(obj)) {
1224                                         err = PTR_ERR(obj);
1225                                         goto out_unlock;
1226                                 }
1227
1228                                 if (prandom_u32_state(&prng) & 1)
1229                                         i915_gem_object_set_readonly(obj);
1230                         }
1231
1232                         err = 0;
1233                         with_intel_runtime_pm(i915, wakeref)
1234                                 err = gpu_fill(obj, ctx, engine, dw);
1235                         if (err) {
1236                                 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
1237                                        ndwords, dw, max_dwords(obj),
1238                                        engine->name, ctx->hw_id,
1239                                        yesno(!!ctx->vm), err);
1240                                 goto out_unlock;
1241                         }
1242
1243                         if (++dw == max_dwords(obj)) {
1244                                 obj = NULL;
1245                                 dw = 0;
1246                         }
1247                         ndwords++;
1248                 }
1249         }
1250         pr_info("Submitted %lu dwords (across %u engines)\n",
1251                 ndwords, RUNTIME_INFO(i915)->num_engines);
1252
1253         dw = 0;
1254         idx = 0;
1255         list_for_each_entry(obj, &objects, st_link) {
1256                 unsigned int rem =
1257                         min_t(unsigned int, ndwords - dw, max_dwords(obj));
1258                 unsigned int num_writes;
1259
1260                 num_writes = rem;
1261                 if (i915_gem_object_is_readonly(obj))
1262                         num_writes = 0;
1263
1264                 err = cpu_check(obj, idx++, num_writes);
1265                 if (err)
1266                         break;
1267
1268                 dw += rem;
1269         }
1270
1271 out_unlock:
1272         if (igt_live_test_end(&t))
1273                 err = -EIO;
1274         mutex_unlock(&i915->drm.struct_mutex);
1275
1276         mock_file_free(i915, file);
1277         return err;
1278 }
1279
1280 static int check_scratch(struct i915_gem_context *ctx, u64 offset)
1281 {
1282         struct drm_mm_node *node =
1283                 __drm_mm_interval_first(&ctx->vm->mm,
1284                                         offset, offset + sizeof(u32) - 1);
1285         if (!node || node->start > offset)
1286                 return 0;
1287
1288         GEM_BUG_ON(offset >= node->start + node->size);
1289
1290         pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
1291                upper_32_bits(offset), lower_32_bits(offset));
1292         return -EINVAL;
1293 }
1294
1295 static int write_to_scratch(struct i915_gem_context *ctx,
1296                             struct intel_engine_cs *engine,
1297                             u64 offset, u32 value)
1298 {
1299         struct drm_i915_private *i915 = ctx->i915;
1300         struct drm_i915_gem_object *obj;
1301         struct i915_request *rq;
1302         struct i915_vma *vma;
1303         u32 *cmd;
1304         int err;
1305
1306         GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1307
1308         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1309         if (IS_ERR(obj))
1310                 return PTR_ERR(obj);
1311
1312         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1313         if (IS_ERR(cmd)) {
1314                 err = PTR_ERR(cmd);
1315                 goto err;
1316         }
1317
1318         *cmd++ = MI_STORE_DWORD_IMM_GEN4;
1319         if (INTEL_GEN(i915) >= 8) {
1320                 *cmd++ = lower_32_bits(offset);
1321                 *cmd++ = upper_32_bits(offset);
1322         } else {
1323                 *cmd++ = 0;
1324                 *cmd++ = offset;
1325         }
1326         *cmd++ = value;
1327         *cmd = MI_BATCH_BUFFER_END;
1328         __i915_gem_object_flush_map(obj, 0, 64);
1329         i915_gem_object_unpin_map(obj);
1330
1331         vma = i915_vma_instance(obj, ctx->vm, NULL);
1332         if (IS_ERR(vma)) {
1333                 err = PTR_ERR(vma);
1334                 goto err;
1335         }
1336
1337         err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1338         if (err)
1339                 goto err;
1340
1341         err = check_scratch(ctx, offset);
1342         if (err)
1343                 goto err_unpin;
1344
1345         rq = igt_request_alloc(ctx, engine);
1346         if (IS_ERR(rq)) {
1347                 err = PTR_ERR(rq);
1348                 goto err_unpin;
1349         }
1350
1351         err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1352         if (err)
1353                 goto err_request;
1354
1355         i915_vma_lock(vma);
1356         err = i915_vma_move_to_active(vma, rq, 0);
1357         i915_vma_unlock(vma);
1358         if (err)
1359                 goto skip_request;
1360
1361         i915_vma_unpin(vma);
1362         i915_vma_close(vma);
1363         i915_vma_put(vma);
1364
1365         i915_request_add(rq);
1366
1367         return 0;
1368
1369 skip_request:
1370         i915_request_skip(rq, err);
1371 err_request:
1372         i915_request_add(rq);
1373 err_unpin:
1374         i915_vma_unpin(vma);
1375 err:
1376         i915_gem_object_put(obj);
1377         return err;
1378 }
1379
1380 static int read_from_scratch(struct i915_gem_context *ctx,
1381                              struct intel_engine_cs *engine,
1382                              u64 offset, u32 *value)
1383 {
1384         struct drm_i915_private *i915 = ctx->i915;
1385         struct drm_i915_gem_object *obj;
1386         const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
1387         const u32 result = 0x100;
1388         struct i915_request *rq;
1389         struct i915_vma *vma;
1390         u32 *cmd;
1391         int err;
1392
1393         GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1394
1395         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1396         if (IS_ERR(obj))
1397                 return PTR_ERR(obj);
1398
1399         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1400         if (IS_ERR(cmd)) {
1401                 err = PTR_ERR(cmd);
1402                 goto err;
1403         }
1404
1405         memset(cmd, POISON_INUSE, PAGE_SIZE);
1406         if (INTEL_GEN(i915) >= 8) {
1407                 *cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
1408                 *cmd++ = RCS_GPR0;
1409                 *cmd++ = lower_32_bits(offset);
1410                 *cmd++ = upper_32_bits(offset);
1411                 *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
1412                 *cmd++ = RCS_GPR0;
1413                 *cmd++ = result;
1414                 *cmd++ = 0;
1415         } else {
1416                 *cmd++ = MI_LOAD_REGISTER_MEM;
1417                 *cmd++ = RCS_GPR0;
1418                 *cmd++ = offset;
1419                 *cmd++ = MI_STORE_REGISTER_MEM;
1420                 *cmd++ = RCS_GPR0;
1421                 *cmd++ = result;
1422         }
1423         *cmd = MI_BATCH_BUFFER_END;
1424
1425         i915_gem_object_flush_map(obj);
1426         i915_gem_object_unpin_map(obj);
1427
1428         vma = i915_vma_instance(obj, ctx->vm, NULL);
1429         if (IS_ERR(vma)) {
1430                 err = PTR_ERR(vma);
1431                 goto err;
1432         }
1433
1434         err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1435         if (err)
1436                 goto err;
1437
1438         err = check_scratch(ctx, offset);
1439         if (err)
1440                 goto err_unpin;
1441
1442         rq = igt_request_alloc(ctx, engine);
1443         if (IS_ERR(rq)) {
1444                 err = PTR_ERR(rq);
1445                 goto err_unpin;
1446         }
1447
1448         err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1449         if (err)
1450                 goto err_request;
1451
1452         i915_vma_lock(vma);
1453         err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1454         i915_vma_unlock(vma);
1455         if (err)
1456                 goto skip_request;
1457
1458         i915_vma_unpin(vma);
1459         i915_vma_close(vma);
1460
1461         i915_request_add(rq);
1462
1463         i915_gem_object_lock(obj);
1464         err = i915_gem_object_set_to_cpu_domain(obj, false);
1465         i915_gem_object_unlock(obj);
1466         if (err)
1467                 goto err;
1468
1469         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1470         if (IS_ERR(cmd)) {
1471                 err = PTR_ERR(cmd);
1472                 goto err;
1473         }
1474
1475         *value = cmd[result / sizeof(*cmd)];
1476         i915_gem_object_unpin_map(obj);
1477         i915_gem_object_put(obj);
1478
1479         return 0;
1480
1481 skip_request:
1482         i915_request_skip(rq, err);
1483 err_request:
1484         i915_request_add(rq);
1485 err_unpin:
1486         i915_vma_unpin(vma);
1487 err:
1488         i915_gem_object_put(obj);
1489         return err;
1490 }
1491
1492 static int igt_vm_isolation(void *arg)
1493 {
1494         struct drm_i915_private *i915 = arg;
1495         struct i915_gem_context *ctx_a, *ctx_b;
1496         struct intel_engine_cs *engine;
1497         intel_wakeref_t wakeref;
1498         struct igt_live_test t;
1499         struct drm_file *file;
1500         I915_RND_STATE(prng);
1501         unsigned long count;
1502         unsigned int id;
1503         u64 vm_total;
1504         int err;
1505
1506         if (INTEL_GEN(i915) < 7)
1507                 return 0;
1508
1509         /*
1510          * The simple goal here is that a write into one context is not
1511          * observed in a second (separate page tables and scratch).
1512          */
1513
1514         file = mock_file(i915);
1515         if (IS_ERR(file))
1516                 return PTR_ERR(file);
1517
1518         mutex_lock(&i915->drm.struct_mutex);
1519
1520         err = igt_live_test_begin(&t, i915, __func__, "");
1521         if (err)
1522                 goto out_unlock;
1523
1524         ctx_a = live_context(i915, file);
1525         if (IS_ERR(ctx_a)) {
1526                 err = PTR_ERR(ctx_a);
1527                 goto out_unlock;
1528         }
1529
1530         ctx_b = live_context(i915, file);
1531         if (IS_ERR(ctx_b)) {
1532                 err = PTR_ERR(ctx_b);
1533                 goto out_unlock;
1534         }
1535
1536         /* We can only test vm isolation, if the vm are distinct */
1537         if (ctx_a->vm == ctx_b->vm)
1538                 goto out_unlock;
1539
1540         vm_total = ctx_a->vm->total;
1541         GEM_BUG_ON(ctx_b->vm->total != vm_total);
1542         vm_total -= I915_GTT_PAGE_SIZE;
1543
1544         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1545
1546         count = 0;
1547         for_each_engine(engine, i915, id) {
1548                 IGT_TIMEOUT(end_time);
1549                 unsigned long this = 0;
1550
1551                 if (!intel_engine_can_store_dword(engine))
1552                         continue;
1553
1554                 while (!__igt_timeout(end_time, NULL)) {
1555                         u32 value = 0xc5c5c5c5;
1556                         u64 offset;
1557
1558                         div64_u64_rem(i915_prandom_u64_state(&prng),
1559                                       vm_total, &offset);
1560                         offset &= -sizeof(u32);
1561                         offset += I915_GTT_PAGE_SIZE;
1562
1563                         err = write_to_scratch(ctx_a, engine,
1564                                                offset, 0xdeadbeef);
1565                         if (err == 0)
1566                                 err = read_from_scratch(ctx_b, engine,
1567                                                         offset, &value);
1568                         if (err)
1569                                 goto out_rpm;
1570
1571                         if (value) {
1572                                 pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
1573                                        engine->name, value,
1574                                        upper_32_bits(offset),
1575                                        lower_32_bits(offset),
1576                                        this);
1577                                 err = -EINVAL;
1578                                 goto out_rpm;
1579                         }
1580
1581                         this++;
1582                 }
1583                 count += this;
1584         }
1585         pr_info("Checked %lu scratch offsets across %d engines\n",
1586                 count, RUNTIME_INFO(i915)->num_engines);
1587
1588 out_rpm:
1589         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1590 out_unlock:
1591         if (igt_live_test_end(&t))
1592                 err = -EIO;
1593         mutex_unlock(&i915->drm.struct_mutex);
1594
1595         mock_file_free(i915, file);
1596         return err;
1597 }
1598
1599 static __maybe_unused const char *
1600 __engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
1601 {
1602         struct intel_engine_cs *engine;
1603         intel_engine_mask_t tmp;
1604
1605         if (engines == ALL_ENGINES)
1606                 return "all";
1607
1608         for_each_engine_masked(engine, i915, engines, tmp)
1609                 return engine->name;
1610
1611         return "none";
1612 }
1613
1614 static bool skip_unused_engines(struct intel_context *ce, void *data)
1615 {
1616         return !ce->state;
1617 }
1618
1619 static void mock_barrier_task(void *data)
1620 {
1621         unsigned int *counter = data;
1622
1623         ++*counter;
1624 }
1625
1626 static int mock_context_barrier(void *arg)
1627 {
1628 #undef pr_fmt
1629 #define pr_fmt(x) "context_barrier_task():" # x
1630         struct drm_i915_private *i915 = arg;
1631         struct i915_gem_context *ctx;
1632         struct i915_request *rq;
1633         unsigned int counter;
1634         int err;
1635
1636         /*
1637          * The context barrier provides us with a callback after it emits
1638          * a request; useful for retiring old state after loading new.
1639          */
1640
1641         mutex_lock(&i915->drm.struct_mutex);
1642
1643         ctx = mock_context(i915, "mock");
1644         if (!ctx) {
1645                 err = -ENOMEM;
1646                 goto unlock;
1647         }
1648
1649         counter = 0;
1650         err = context_barrier_task(ctx, 0,
1651                                    NULL, NULL, mock_barrier_task, &counter);
1652         if (err) {
1653                 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1654                 goto out;
1655         }
1656         if (counter == 0) {
1657                 pr_err("Did not retire immediately with 0 engines\n");
1658                 err = -EINVAL;
1659                 goto out;
1660         }
1661
1662         counter = 0;
1663         err = context_barrier_task(ctx, ALL_ENGINES,
1664                                    skip_unused_engines,
1665                                    NULL,
1666                                    mock_barrier_task,
1667                                    &counter);
1668         if (err) {
1669                 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1670                 goto out;
1671         }
1672         if (counter == 0) {
1673                 pr_err("Did not retire immediately for all unused engines\n");
1674                 err = -EINVAL;
1675                 goto out;
1676         }
1677
1678         rq = igt_request_alloc(ctx, i915->engine[RCS0]);
1679         if (IS_ERR(rq)) {
1680                 pr_err("Request allocation failed!\n");
1681                 goto out;
1682         }
1683         i915_request_add(rq);
1684
1685         counter = 0;
1686         context_barrier_inject_fault = BIT(RCS0);
1687         err = context_barrier_task(ctx, ALL_ENGINES,
1688                                    NULL, NULL, mock_barrier_task, &counter);
1689         context_barrier_inject_fault = 0;
1690         if (err == -ENXIO)
1691                 err = 0;
1692         else
1693                 pr_err("Did not hit fault injection!\n");
1694         if (counter != 0) {
1695                 pr_err("Invoked callback on error!\n");
1696                 err = -EIO;
1697         }
1698         if (err)
1699                 goto out;
1700
1701         counter = 0;
1702         err = context_barrier_task(ctx, ALL_ENGINES,
1703                                    skip_unused_engines,
1704                                    NULL,
1705                                    mock_barrier_task,
1706                                    &counter);
1707         if (err) {
1708                 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1709                 goto out;
1710         }
1711         mock_device_flush(i915);
1712         if (counter == 0) {
1713                 pr_err("Did not retire on each active engines\n");
1714                 err = -EINVAL;
1715                 goto out;
1716         }
1717
1718 out:
1719         mock_context_close(ctx);
1720 unlock:
1721         mutex_unlock(&i915->drm.struct_mutex);
1722         return err;
1723 #undef pr_fmt
1724 #define pr_fmt(x) x
1725 }
1726
1727 int i915_gem_context_mock_selftests(void)
1728 {
1729         static const struct i915_subtest tests[] = {
1730                 SUBTEST(mock_context_barrier),
1731         };
1732         struct drm_i915_private *i915;
1733         int err;
1734
1735         i915 = mock_gem_device();
1736         if (!i915)
1737                 return -ENOMEM;
1738
1739         err = i915_subtests(tests, i915);
1740
1741         drm_dev_put(&i915->drm);
1742         return err;
1743 }
1744
1745 int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
1746 {
1747         static const struct i915_subtest tests[] = {
1748                 SUBTEST(live_nop_switch),
1749                 SUBTEST(igt_ctx_exec),
1750                 SUBTEST(igt_ctx_readonly),
1751                 SUBTEST(igt_ctx_sseu),
1752                 SUBTEST(igt_shared_ctx_exec),
1753                 SUBTEST(igt_vm_isolation),
1754         };
1755
1756         if (i915_terminally_wedged(dev_priv))
1757                 return 0;
1758
1759         return i915_subtests(tests, dev_priv);
1760 }