Merge drm/drm-next into drm-intel-gt-next
[linux-2.6-block.git] / drivers / gpu / drm / i915 / gem / selftests / i915_gem_mman.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6
7 #include <linux/highmem.h>
8 #include <linux/prime_numbers.h>
9
10 #include "gem/i915_gem_internal.h"
11 #include "gem/i915_gem_lmem.h"
12 #include "gem/i915_gem_region.h"
13 #include "gem/i915_gem_ttm.h"
14 #include "gem/i915_gem_ttm_move.h"
15 #include "gt/intel_engine_pm.h"
16 #include "gt/intel_gpu_commands.h"
17 #include "gt/intel_gt.h"
18 #include "gt/intel_gt_pm.h"
19 #include "gt/intel_migrate.h"
20 #include "i915_ttm_buddy_manager.h"
21
22 #include "huge_gem_object.h"
23 #include "i915_selftest.h"
24 #include "selftests/i915_random.h"
25 #include "selftests/igt_flush_test.h"
26 #include "selftests/igt_reset.h"
27 #include "selftests/igt_mmap.h"
28
29 struct tile {
30         unsigned int width;
31         unsigned int height;
32         unsigned int stride;
33         unsigned int size;
34         unsigned int tiling;
35         unsigned int swizzle;
36 };
37
38 static u64 swizzle_bit(unsigned int bit, u64 offset)
39 {
40         return (offset & BIT_ULL(bit)) >> (bit - 6);
41 }
42
43 static u64 tiled_offset(const struct tile *tile, u64 v)
44 {
45         u64 x, y;
46
47         if (tile->tiling == I915_TILING_NONE)
48                 return v;
49
50         y = div64_u64_rem(v, tile->stride, &x);
51         v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
52
53         if (tile->tiling == I915_TILING_X) {
54                 v += y * tile->width;
55                 v += div64_u64_rem(x, tile->width, &x) << tile->size;
56                 v += x;
57         } else if (tile->width == 128) {
58                 const unsigned int ytile_span = 16;
59                 const unsigned int ytile_height = 512;
60
61                 v += y * ytile_span;
62                 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
63                 v += x;
64         } else {
65                 const unsigned int ytile_span = 32;
66                 const unsigned int ytile_height = 256;
67
68                 v += y * ytile_span;
69                 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
70                 v += x;
71         }
72
73         switch (tile->swizzle) {
74         case I915_BIT_6_SWIZZLE_9:
75                 v ^= swizzle_bit(9, v);
76                 break;
77         case I915_BIT_6_SWIZZLE_9_10:
78                 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
79                 break;
80         case I915_BIT_6_SWIZZLE_9_11:
81                 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
82                 break;
83         case I915_BIT_6_SWIZZLE_9_10_11:
84                 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
85                 break;
86         }
87
88         return v;
89 }
90
91 static int check_partial_mapping(struct drm_i915_gem_object *obj,
92                                  const struct tile *tile,
93                                  struct rnd_state *prng)
94 {
95         const unsigned long npages = obj->base.size / PAGE_SIZE;
96         struct drm_i915_private *i915 = to_i915(obj->base.dev);
97         struct i915_gtt_view view;
98         struct i915_vma *vma;
99         unsigned long page;
100         u32 __iomem *io;
101         struct page *p;
102         unsigned int n;
103         u64 offset;
104         u32 *cpu;
105         int err;
106
107         err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
108         if (err) {
109                 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
110                        tile->tiling, tile->stride, err);
111                 return err;
112         }
113
114         GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
115         GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
116
117         i915_gem_object_lock(obj, NULL);
118         err = i915_gem_object_set_to_gtt_domain(obj, true);
119         i915_gem_object_unlock(obj);
120         if (err) {
121                 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
122                 return err;
123         }
124
125         page = i915_prandom_u32_max_state(npages, prng);
126         view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
127
128         vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
129         if (IS_ERR(vma)) {
130                 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
131                        page, (int)PTR_ERR(vma));
132                 return PTR_ERR(vma);
133         }
134
135         n = page - view.partial.offset;
136         GEM_BUG_ON(n >= view.partial.size);
137
138         io = i915_vma_pin_iomap(vma);
139         i915_vma_unpin(vma);
140         if (IS_ERR(io)) {
141                 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
142                        page, (int)PTR_ERR(io));
143                 err = PTR_ERR(io);
144                 goto out;
145         }
146
147         iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
148         i915_vma_unpin_iomap(vma);
149
150         offset = tiled_offset(tile, page << PAGE_SHIFT);
151         if (offset >= obj->base.size)
152                 goto out;
153
154         intel_gt_flush_ggtt_writes(to_gt(i915));
155
156         p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
157         cpu = kmap(p) + offset_in_page(offset);
158         drm_clflush_virt_range(cpu, sizeof(*cpu));
159         if (*cpu != (u32)page) {
160                 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
161                        page, n,
162                        view.partial.offset,
163                        view.partial.size,
164                        vma->size >> PAGE_SHIFT,
165                        tile->tiling ? tile_row_pages(obj) : 0,
166                        vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
167                        offset >> PAGE_SHIFT,
168                        (unsigned int)offset_in_page(offset),
169                        offset,
170                        (u32)page, *cpu);
171                 err = -EINVAL;
172         }
173         *cpu = 0;
174         drm_clflush_virt_range(cpu, sizeof(*cpu));
175         kunmap(p);
176
177 out:
178         i915_gem_object_lock(obj, NULL);
179         i915_vma_destroy(vma);
180         i915_gem_object_unlock(obj);
181         return err;
182 }
183
184 static int check_partial_mappings(struct drm_i915_gem_object *obj,
185                                   const struct tile *tile,
186                                   unsigned long end_time)
187 {
188         const unsigned int nreal = obj->scratch / PAGE_SIZE;
189         const unsigned long npages = obj->base.size / PAGE_SIZE;
190         struct drm_i915_private *i915 = to_i915(obj->base.dev);
191         struct i915_vma *vma;
192         unsigned long page;
193         int err;
194
195         err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
196         if (err) {
197                 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
198                        tile->tiling, tile->stride, err);
199                 return err;
200         }
201
202         GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
203         GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
204
205         i915_gem_object_lock(obj, NULL);
206         err = i915_gem_object_set_to_gtt_domain(obj, true);
207         i915_gem_object_unlock(obj);
208         if (err) {
209                 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
210                 return err;
211         }
212
213         for_each_prime_number_from(page, 1, npages) {
214                 struct i915_gtt_view view =
215                         compute_partial_view(obj, page, MIN_CHUNK_PAGES);
216                 u32 __iomem *io;
217                 struct page *p;
218                 unsigned int n;
219                 u64 offset;
220                 u32 *cpu;
221
222                 GEM_BUG_ON(view.partial.size > nreal);
223                 cond_resched();
224
225                 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
226                 if (IS_ERR(vma)) {
227                         pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
228                                page, (int)PTR_ERR(vma));
229                         return PTR_ERR(vma);
230                 }
231
232                 n = page - view.partial.offset;
233                 GEM_BUG_ON(n >= view.partial.size);
234
235                 io = i915_vma_pin_iomap(vma);
236                 i915_vma_unpin(vma);
237                 if (IS_ERR(io)) {
238                         pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
239                                page, (int)PTR_ERR(io));
240                         return PTR_ERR(io);
241                 }
242
243                 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
244                 i915_vma_unpin_iomap(vma);
245
246                 offset = tiled_offset(tile, page << PAGE_SHIFT);
247                 if (offset >= obj->base.size)
248                         continue;
249
250                 intel_gt_flush_ggtt_writes(to_gt(i915));
251
252                 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
253                 cpu = kmap(p) + offset_in_page(offset);
254                 drm_clflush_virt_range(cpu, sizeof(*cpu));
255                 if (*cpu != (u32)page) {
256                         pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
257                                page, n,
258                                view.partial.offset,
259                                view.partial.size,
260                                vma->size >> PAGE_SHIFT,
261                                tile->tiling ? tile_row_pages(obj) : 0,
262                                vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
263                                offset >> PAGE_SHIFT,
264                                (unsigned int)offset_in_page(offset),
265                                offset,
266                                (u32)page, *cpu);
267                         err = -EINVAL;
268                 }
269                 *cpu = 0;
270                 drm_clflush_virt_range(cpu, sizeof(*cpu));
271                 kunmap(p);
272                 if (err)
273                         return err;
274
275                 i915_gem_object_lock(obj, NULL);
276                 i915_vma_destroy(vma);
277                 i915_gem_object_unlock(obj);
278
279                 if (igt_timeout(end_time,
280                                 "%s: timed out after tiling=%d stride=%d\n",
281                                 __func__, tile->tiling, tile->stride))
282                         return -EINTR;
283         }
284
285         return 0;
286 }
287
288 static unsigned int
289 setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
290 {
291         if (GRAPHICS_VER(i915) <= 2) {
292                 tile->height = 16;
293                 tile->width = 128;
294                 tile->size = 11;
295         } else if (tile->tiling == I915_TILING_Y &&
296                    HAS_128_BYTE_Y_TILING(i915)) {
297                 tile->height = 32;
298                 tile->width = 128;
299                 tile->size = 12;
300         } else {
301                 tile->height = 8;
302                 tile->width = 512;
303                 tile->size = 12;
304         }
305
306         if (GRAPHICS_VER(i915) < 4)
307                 return 8192 / tile->width;
308         else if (GRAPHICS_VER(i915) < 7)
309                 return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
310         else
311                 return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
312 }
313
314 static int igt_partial_tiling(void *arg)
315 {
316         const unsigned int nreal = 1 << 12; /* largest tile row x2 */
317         struct drm_i915_private *i915 = arg;
318         struct drm_i915_gem_object *obj;
319         intel_wakeref_t wakeref;
320         int tiling;
321         int err;
322
323         if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
324                 return 0;
325
326         /* We want to check the page mapping and fencing of a large object
327          * mmapped through the GTT. The object we create is larger than can
328          * possibly be mmaped as a whole, and so we must use partial GGTT vma.
329          * We then check that a write through each partial GGTT vma ends up
330          * in the right set of pages within the object, and with the expected
331          * tiling, which we verify by manual swizzling.
332          */
333
334         obj = huge_gem_object(i915,
335                               nreal << PAGE_SHIFT,
336                               (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
337         if (IS_ERR(obj))
338                 return PTR_ERR(obj);
339
340         err = i915_gem_object_pin_pages_unlocked(obj);
341         if (err) {
342                 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
343                        nreal, obj->base.size / PAGE_SIZE, err);
344                 goto out;
345         }
346
347         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
348
349         if (1) {
350                 IGT_TIMEOUT(end);
351                 struct tile tile;
352
353                 tile.height = 1;
354                 tile.width = 1;
355                 tile.size = 0;
356                 tile.stride = 0;
357                 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
358                 tile.tiling = I915_TILING_NONE;
359
360                 err = check_partial_mappings(obj, &tile, end);
361                 if (err && err != -EINTR)
362                         goto out_unlock;
363         }
364
365         for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
366                 IGT_TIMEOUT(end);
367                 unsigned int max_pitch;
368                 unsigned int pitch;
369                 struct tile tile;
370
371                 if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
372                         /*
373                          * The swizzling pattern is actually unknown as it
374                          * varies based on physical address of each page.
375                          * See i915_gem_detect_bit_6_swizzle().
376                          */
377                         break;
378
379                 tile.tiling = tiling;
380                 switch (tiling) {
381                 case I915_TILING_X:
382                         tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
383                         break;
384                 case I915_TILING_Y:
385                         tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
386                         break;
387                 }
388
389                 GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
390                 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
391                     tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
392                         continue;
393
394                 max_pitch = setup_tile_size(&tile, i915);
395
396                 for (pitch = max_pitch; pitch; pitch >>= 1) {
397                         tile.stride = tile.width * pitch;
398                         err = check_partial_mappings(obj, &tile, end);
399                         if (err == -EINTR)
400                                 goto next_tiling;
401                         if (err)
402                                 goto out_unlock;
403
404                         if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
405                                 tile.stride = tile.width * (pitch - 1);
406                                 err = check_partial_mappings(obj, &tile, end);
407                                 if (err == -EINTR)
408                                         goto next_tiling;
409                                 if (err)
410                                         goto out_unlock;
411                         }
412
413                         if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
414                                 tile.stride = tile.width * (pitch + 1);
415                                 err = check_partial_mappings(obj, &tile, end);
416                                 if (err == -EINTR)
417                                         goto next_tiling;
418                                 if (err)
419                                         goto out_unlock;
420                         }
421                 }
422
423                 if (GRAPHICS_VER(i915) >= 4) {
424                         for_each_prime_number(pitch, max_pitch) {
425                                 tile.stride = tile.width * pitch;
426                                 err = check_partial_mappings(obj, &tile, end);
427                                 if (err == -EINTR)
428                                         goto next_tiling;
429                                 if (err)
430                                         goto out_unlock;
431                         }
432                 }
433
434 next_tiling: ;
435         }
436
437 out_unlock:
438         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
439         i915_gem_object_unpin_pages(obj);
440 out:
441         i915_gem_object_put(obj);
442         return err;
443 }
444
445 static int igt_smoke_tiling(void *arg)
446 {
447         const unsigned int nreal = 1 << 12; /* largest tile row x2 */
448         struct drm_i915_private *i915 = arg;
449         struct drm_i915_gem_object *obj;
450         intel_wakeref_t wakeref;
451         I915_RND_STATE(prng);
452         unsigned long count;
453         IGT_TIMEOUT(end);
454         int err;
455
456         if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
457                 return 0;
458
459         /*
460          * igt_partial_tiling() does an exhastive check of partial tiling
461          * chunking, but will undoubtably run out of time. Here, we do a
462          * randomised search and hope over many runs of 1s with different
463          * seeds we will do a thorough check.
464          *
465          * Remember to look at the st_seed if we see a flip-flop in BAT!
466          */
467
468         if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
469                 return 0;
470
471         obj = huge_gem_object(i915,
472                               nreal << PAGE_SHIFT,
473                               (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
474         if (IS_ERR(obj))
475                 return PTR_ERR(obj);
476
477         err = i915_gem_object_pin_pages_unlocked(obj);
478         if (err) {
479                 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
480                        nreal, obj->base.size / PAGE_SIZE, err);
481                 goto out;
482         }
483
484         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
485
486         count = 0;
487         do {
488                 struct tile tile;
489
490                 tile.tiling =
491                         i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
492                 switch (tile.tiling) {
493                 case I915_TILING_NONE:
494                         tile.height = 1;
495                         tile.width = 1;
496                         tile.size = 0;
497                         tile.stride = 0;
498                         tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
499                         break;
500
501                 case I915_TILING_X:
502                         tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
503                         break;
504                 case I915_TILING_Y:
505                         tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
506                         break;
507                 }
508
509                 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
510                     tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
511                         continue;
512
513                 if (tile.tiling != I915_TILING_NONE) {
514                         unsigned int max_pitch = setup_tile_size(&tile, i915);
515
516                         tile.stride =
517                                 i915_prandom_u32_max_state(max_pitch, &prng);
518                         tile.stride = (1 + tile.stride) * tile.width;
519                         if (GRAPHICS_VER(i915) < 4)
520                                 tile.stride = rounddown_pow_of_two(tile.stride);
521                 }
522
523                 err = check_partial_mapping(obj, &tile, &prng);
524                 if (err)
525                         break;
526
527                 count++;
528         } while (!__igt_timeout(end, NULL));
529
530         pr_info("%s: Completed %lu trials\n", __func__, count);
531
532         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
533         i915_gem_object_unpin_pages(obj);
534 out:
535         i915_gem_object_put(obj);
536         return err;
537 }
538
539 static int make_obj_busy(struct drm_i915_gem_object *obj)
540 {
541         struct drm_i915_private *i915 = to_i915(obj->base.dev);
542         struct intel_engine_cs *engine;
543
544         for_each_uabi_engine(engine, i915) {
545                 struct i915_request *rq;
546                 struct i915_vma *vma;
547                 struct i915_gem_ww_ctx ww;
548                 int err;
549
550                 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
551                 if (IS_ERR(vma))
552                         return PTR_ERR(vma);
553
554                 i915_gem_ww_ctx_init(&ww, false);
555 retry:
556                 err = i915_gem_object_lock(obj, &ww);
557                 if (!err)
558                         err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
559                 if (err)
560                         goto err;
561
562                 rq = intel_engine_create_kernel_request(engine);
563                 if (IS_ERR(rq)) {
564                         err = PTR_ERR(rq);
565                         goto err_unpin;
566                 }
567
568                 err = i915_request_await_object(rq, vma->obj, true);
569                 if (err == 0)
570                         err = i915_vma_move_to_active(vma, rq,
571                                                       EXEC_OBJECT_WRITE);
572
573                 i915_request_add(rq);
574 err_unpin:
575                 i915_vma_unpin(vma);
576 err:
577                 if (err == -EDEADLK) {
578                         err = i915_gem_ww_ctx_backoff(&ww);
579                         if (!err)
580                                 goto retry;
581                 }
582                 i915_gem_ww_ctx_fini(&ww);
583                 if (err)
584                         return err;
585         }
586
587         i915_gem_object_put(obj); /* leave it only alive via its active ref */
588         return 0;
589 }
590
591 static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
592 {
593         if (HAS_LMEM(i915))
594                 return I915_MMAP_TYPE_FIXED;
595
596         return I915_MMAP_TYPE_GTT;
597 }
598
599 static struct drm_i915_gem_object *
600 create_sys_or_internal(struct drm_i915_private *i915,
601                        unsigned long size)
602 {
603         if (HAS_LMEM(i915)) {
604                 struct intel_memory_region *sys_region =
605                         i915->mm.regions[INTEL_REGION_SMEM];
606
607                 return __i915_gem_object_create_user(i915, size, &sys_region, 1);
608         }
609
610         return i915_gem_object_create_internal(i915, size);
611 }
612
613 static bool assert_mmap_offset(struct drm_i915_private *i915,
614                                unsigned long size,
615                                int expected)
616 {
617         struct drm_i915_gem_object *obj;
618         u64 offset;
619         int ret;
620
621         obj = create_sys_or_internal(i915, size);
622         if (IS_ERR(obj))
623                 return expected && expected == PTR_ERR(obj);
624
625         ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
626         i915_gem_object_put(obj);
627
628         return ret == expected;
629 }
630
631 static void disable_retire_worker(struct drm_i915_private *i915)
632 {
633         i915_gem_driver_unregister__shrinker(i915);
634         intel_gt_pm_get(to_gt(i915));
635         cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
636 }
637
638 static void restore_retire_worker(struct drm_i915_private *i915)
639 {
640         igt_flush_test(i915);
641         intel_gt_pm_put(to_gt(i915));
642         i915_gem_driver_register__shrinker(i915);
643 }
644
645 static void mmap_offset_lock(struct drm_i915_private *i915)
646         __acquires(&i915->drm.vma_offset_manager->vm_lock)
647 {
648         write_lock(&i915->drm.vma_offset_manager->vm_lock);
649 }
650
651 static void mmap_offset_unlock(struct drm_i915_private *i915)
652         __releases(&i915->drm.vma_offset_manager->vm_lock)
653 {
654         write_unlock(&i915->drm.vma_offset_manager->vm_lock);
655 }
656
657 static int igt_mmap_offset_exhaustion(void *arg)
658 {
659         struct drm_i915_private *i915 = arg;
660         struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
661         struct drm_i915_gem_object *obj;
662         struct drm_mm_node *hole, *next;
663         int loop, err = 0;
664         u64 offset;
665         int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
666
667         /* Disable background reaper */
668         disable_retire_worker(i915);
669         GEM_BUG_ON(!to_gt(i915)->awake);
670         intel_gt_retire_requests(to_gt(i915));
671         i915_gem_drain_freed_objects(i915);
672
673         /* Trim the device mmap space to only a page */
674         mmap_offset_lock(i915);
675         loop = 1; /* PAGE_SIZE units */
676         list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
677                 struct drm_mm_node *resv;
678
679                 resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
680                 if (!resv) {
681                         err = -ENOMEM;
682                         goto out_park;
683                 }
684
685                 resv->start = drm_mm_hole_node_start(hole) + loop;
686                 resv->size = hole->hole_size - loop;
687                 resv->color = -1ul;
688                 loop = 0;
689
690                 if (!resv->size) {
691                         kfree(resv);
692                         continue;
693                 }
694
695                 pr_debug("Reserving hole [%llx + %llx]\n",
696                          resv->start, resv->size);
697
698                 err = drm_mm_reserve_node(mm, resv);
699                 if (err) {
700                         pr_err("Failed to trim VMA manager, err=%d\n", err);
701                         kfree(resv);
702                         goto out_park;
703                 }
704         }
705         GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
706         mmap_offset_unlock(i915);
707
708         /* Just fits! */
709         if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
710                 pr_err("Unable to insert object into single page hole\n");
711                 err = -EINVAL;
712                 goto out;
713         }
714
715         /* Too large */
716         if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
717                 pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
718                 err = -EINVAL;
719                 goto out;
720         }
721
722         /* Fill the hole, further allocation attempts should then fail */
723         obj = create_sys_or_internal(i915, PAGE_SIZE);
724         if (IS_ERR(obj)) {
725                 err = PTR_ERR(obj);
726                 pr_err("Unable to create object for reclaimed hole\n");
727                 goto out;
728         }
729
730         err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
731         if (err) {
732                 pr_err("Unable to insert object into reclaimed hole\n");
733                 goto err_obj;
734         }
735
736         if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
737                 pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
738                 err = -EINVAL;
739                 goto err_obj;
740         }
741
742         i915_gem_object_put(obj);
743
744         /* Now fill with busy dead objects that we expect to reap */
745         for (loop = 0; loop < 3; loop++) {
746                 if (intel_gt_is_wedged(to_gt(i915)))
747                         break;
748
749                 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
750                 if (IS_ERR(obj)) {
751                         err = PTR_ERR(obj);
752                         goto out;
753                 }
754
755                 err = make_obj_busy(obj);
756                 if (err) {
757                         pr_err("[loop %d] Failed to busy the object\n", loop);
758                         goto err_obj;
759                 }
760         }
761
762 out:
763         mmap_offset_lock(i915);
764 out_park:
765         drm_mm_for_each_node_safe(hole, next, mm) {
766                 if (hole->color != -1ul)
767                         continue;
768
769                 drm_mm_remove_node(hole);
770                 kfree(hole);
771         }
772         mmap_offset_unlock(i915);
773         restore_retire_worker(i915);
774         return err;
775 err_obj:
776         i915_gem_object_put(obj);
777         goto out;
778 }
779
780 static int gtt_set(struct drm_i915_gem_object *obj)
781 {
782         struct i915_vma *vma;
783         void __iomem *map;
784         int err = 0;
785
786         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
787         if (IS_ERR(vma))
788                 return PTR_ERR(vma);
789
790         intel_gt_pm_get(vma->vm->gt);
791         map = i915_vma_pin_iomap(vma);
792         i915_vma_unpin(vma);
793         if (IS_ERR(map)) {
794                 err = PTR_ERR(map);
795                 goto out;
796         }
797
798         memset_io(map, POISON_INUSE, obj->base.size);
799         i915_vma_unpin_iomap(vma);
800
801 out:
802         intel_gt_pm_put(vma->vm->gt);
803         return err;
804 }
805
806 static int gtt_check(struct drm_i915_gem_object *obj)
807 {
808         struct i915_vma *vma;
809         void __iomem *map;
810         int err = 0;
811
812         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
813         if (IS_ERR(vma))
814                 return PTR_ERR(vma);
815
816         intel_gt_pm_get(vma->vm->gt);
817         map = i915_vma_pin_iomap(vma);
818         i915_vma_unpin(vma);
819         if (IS_ERR(map)) {
820                 err = PTR_ERR(map);
821                 goto out;
822         }
823
824         if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
825                 pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
826                        obj->mm.region->name);
827                 err = -EINVAL;
828         }
829         i915_vma_unpin_iomap(vma);
830
831 out:
832         intel_gt_pm_put(vma->vm->gt);
833         return err;
834 }
835
836 static int wc_set(struct drm_i915_gem_object *obj)
837 {
838         void *vaddr;
839
840         vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
841         if (IS_ERR(vaddr))
842                 return PTR_ERR(vaddr);
843
844         memset(vaddr, POISON_INUSE, obj->base.size);
845         i915_gem_object_flush_map(obj);
846         i915_gem_object_unpin_map(obj);
847
848         return 0;
849 }
850
851 static int wc_check(struct drm_i915_gem_object *obj)
852 {
853         void *vaddr;
854         int err = 0;
855
856         vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
857         if (IS_ERR(vaddr))
858                 return PTR_ERR(vaddr);
859
860         if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
861                 pr_err("%s: Write via mmap did not land in backing store (WC)\n",
862                        obj->mm.region->name);
863                 err = -EINVAL;
864         }
865         i915_gem_object_unpin_map(obj);
866
867         return err;
868 }
869
870 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
871 {
872         struct drm_i915_private *i915 = to_i915(obj->base.dev);
873         bool no_map;
874
875         if (obj->ops->mmap_offset)
876                 return type == I915_MMAP_TYPE_FIXED;
877         else if (type == I915_MMAP_TYPE_FIXED)
878                 return false;
879
880         if (type == I915_MMAP_TYPE_GTT &&
881             !i915_ggtt_has_aperture(to_gt(i915)->ggtt))
882                 return false;
883
884         i915_gem_object_lock(obj, NULL);
885         no_map = (type != I915_MMAP_TYPE_GTT &&
886                   !i915_gem_object_has_struct_page(obj) &&
887                   !i915_gem_object_has_iomem(obj));
888         i915_gem_object_unlock(obj);
889
890         return !no_map;
891 }
892
893 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
894 static int __igt_mmap(struct drm_i915_private *i915,
895                       struct drm_i915_gem_object *obj,
896                       enum i915_mmap_type type)
897 {
898         struct vm_area_struct *area;
899         unsigned long addr;
900         int err, i;
901         u64 offset;
902
903         if (!can_mmap(obj, type))
904                 return 0;
905
906         err = wc_set(obj);
907         if (err == -ENXIO)
908                 err = gtt_set(obj);
909         if (err)
910                 return err;
911
912         err = __assign_mmap_offset(obj, type, &offset, NULL);
913         if (err)
914                 return err;
915
916         addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
917         if (IS_ERR_VALUE(addr))
918                 return addr;
919
920         pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
921
922         mmap_read_lock(current->mm);
923         area = vma_lookup(current->mm, addr);
924         mmap_read_unlock(current->mm);
925         if (!area) {
926                 pr_err("%s: Did not create a vm_area_struct for the mmap\n",
927                        obj->mm.region->name);
928                 err = -EINVAL;
929                 goto out_unmap;
930         }
931
932         for (i = 0; i < obj->base.size / sizeof(u32); i++) {
933                 u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
934                 u32 x;
935
936                 if (get_user(x, ux)) {
937                         pr_err("%s: Unable to read from mmap, offset:%zd\n",
938                                obj->mm.region->name, i * sizeof(x));
939                         err = -EFAULT;
940                         goto out_unmap;
941                 }
942
943                 if (x != expand32(POISON_INUSE)) {
944                         pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
945                                obj->mm.region->name,
946                                i * sizeof(x), x, expand32(POISON_INUSE));
947                         err = -EINVAL;
948                         goto out_unmap;
949                 }
950
951                 x = expand32(POISON_FREE);
952                 if (put_user(x, ux)) {
953                         pr_err("%s: Unable to write to mmap, offset:%zd\n",
954                                obj->mm.region->name, i * sizeof(x));
955                         err = -EFAULT;
956                         goto out_unmap;
957                 }
958         }
959
960         if (type == I915_MMAP_TYPE_GTT)
961                 intel_gt_flush_ggtt_writes(to_gt(i915));
962
963         err = wc_check(obj);
964         if (err == -ENXIO)
965                 err = gtt_check(obj);
966 out_unmap:
967         vm_munmap(addr, obj->base.size);
968         return err;
969 }
970
971 static int igt_mmap(void *arg)
972 {
973         struct drm_i915_private *i915 = arg;
974         struct intel_memory_region *mr;
975         enum intel_region_id id;
976
977         for_each_memory_region(mr, i915, id) {
978                 unsigned long sizes[] = {
979                         PAGE_SIZE,
980                         mr->min_page_size,
981                         SZ_4M,
982                 };
983                 int i;
984
985                 if (mr->private)
986                         continue;
987
988                 for (i = 0; i < ARRAY_SIZE(sizes); i++) {
989                         struct drm_i915_gem_object *obj;
990                         int err;
991
992                         obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1);
993                         if (obj == ERR_PTR(-ENODEV))
994                                 continue;
995
996                         if (IS_ERR(obj))
997                                 return PTR_ERR(obj);
998
999                         err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
1000                         if (err == 0)
1001                                 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
1002                         if (err == 0)
1003                                 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED);
1004
1005                         i915_gem_object_put(obj);
1006                         if (err)
1007                                 return err;
1008                 }
1009         }
1010
1011         return 0;
1012 }
1013
1014 static void igt_close_objects(struct drm_i915_private *i915,
1015                               struct list_head *objects)
1016 {
1017         struct drm_i915_gem_object *obj, *on;
1018
1019         list_for_each_entry_safe(obj, on, objects, st_link) {
1020                 i915_gem_object_lock(obj, NULL);
1021                 if (i915_gem_object_has_pinned_pages(obj))
1022                         i915_gem_object_unpin_pages(obj);
1023                 /* No polluting the memory region between tests */
1024                 __i915_gem_object_put_pages(obj);
1025                 i915_gem_object_unlock(obj);
1026                 list_del(&obj->st_link);
1027                 i915_gem_object_put(obj);
1028         }
1029
1030         cond_resched();
1031
1032         i915_gem_drain_freed_objects(i915);
1033 }
1034
1035 static void igt_make_evictable(struct list_head *objects)
1036 {
1037         struct drm_i915_gem_object *obj;
1038
1039         list_for_each_entry(obj, objects, st_link) {
1040                 i915_gem_object_lock(obj, NULL);
1041                 if (i915_gem_object_has_pinned_pages(obj))
1042                         i915_gem_object_unpin_pages(obj);
1043                 i915_gem_object_unlock(obj);
1044         }
1045
1046         cond_resched();
1047 }
1048
1049 static int igt_fill_mappable(struct intel_memory_region *mr,
1050                              struct list_head *objects)
1051 {
1052         u64 size, total;
1053         int err;
1054
1055         total = 0;
1056         size = mr->io_size;
1057         do {
1058                 struct drm_i915_gem_object *obj;
1059
1060                 obj = i915_gem_object_create_region(mr, size, 0, 0);
1061                 if (IS_ERR(obj)) {
1062                         err = PTR_ERR(obj);
1063                         goto err_close;
1064                 }
1065
1066                 list_add(&obj->st_link, objects);
1067
1068                 err = i915_gem_object_pin_pages_unlocked(obj);
1069                 if (err) {
1070                         if (err != -ENXIO && err != -ENOMEM)
1071                                 goto err_close;
1072
1073                         if (size == mr->min_page_size) {
1074                                 err = 0;
1075                                 break;
1076                         }
1077
1078                         size >>= 1;
1079                         continue;
1080                 }
1081
1082                 total += obj->base.size;
1083         } while (1);
1084
1085         pr_info("%s filled=%lluMiB\n", __func__, total >> 20);
1086         return 0;
1087
1088 err_close:
1089         igt_close_objects(mr->i915, objects);
1090         return err;
1091 }
1092
1093 static int ___igt_mmap_migrate(struct drm_i915_private *i915,
1094                                struct drm_i915_gem_object *obj,
1095                                unsigned long addr,
1096                                bool unfaultable)
1097 {
1098         struct vm_area_struct *area;
1099         int err = 0, i;
1100
1101         pr_info("igt_mmap(%s, %d) @ %lx\n",
1102                 obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr);
1103
1104         mmap_read_lock(current->mm);
1105         area = vma_lookup(current->mm, addr);
1106         mmap_read_unlock(current->mm);
1107         if (!area) {
1108                 pr_err("%s: Did not create a vm_area_struct for the mmap\n",
1109                        obj->mm.region->name);
1110                 err = -EINVAL;
1111                 goto out_unmap;
1112         }
1113
1114         for (i = 0; i < obj->base.size / sizeof(u32); i++) {
1115                 u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
1116                 u32 x;
1117
1118                 if (get_user(x, ux)) {
1119                         err = -EFAULT;
1120                         if (!unfaultable) {
1121                                 pr_err("%s: Unable to read from mmap, offset:%zd\n",
1122                                        obj->mm.region->name, i * sizeof(x));
1123                                 goto out_unmap;
1124                         }
1125
1126                         continue;
1127                 }
1128
1129                 if (unfaultable) {
1130                         pr_err("%s: Faulted unmappable memory\n",
1131                                obj->mm.region->name);
1132                         err = -EINVAL;
1133                         goto out_unmap;
1134                 }
1135
1136                 if (x != expand32(POISON_INUSE)) {
1137                         pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
1138                                obj->mm.region->name,
1139                                i * sizeof(x), x, expand32(POISON_INUSE));
1140                         err = -EINVAL;
1141                         goto out_unmap;
1142                 }
1143
1144                 x = expand32(POISON_FREE);
1145                 if (put_user(x, ux)) {
1146                         pr_err("%s: Unable to write to mmap, offset:%zd\n",
1147                                obj->mm.region->name, i * sizeof(x));
1148                         err = -EFAULT;
1149                         goto out_unmap;
1150                 }
1151         }
1152
1153         if (unfaultable) {
1154                 if (err == -EFAULT)
1155                         err = 0;
1156         } else {
1157                 obj->flags &= ~I915_BO_ALLOC_GPU_ONLY;
1158                 err = wc_check(obj);
1159         }
1160 out_unmap:
1161         vm_munmap(addr, obj->base.size);
1162         return err;
1163 }
1164
1165 #define IGT_MMAP_MIGRATE_TOPDOWN     (1 << 0)
1166 #define IGT_MMAP_MIGRATE_FILL        (1 << 1)
1167 #define IGT_MMAP_MIGRATE_EVICTABLE   (1 << 2)
1168 #define IGT_MMAP_MIGRATE_UNFAULTABLE (1 << 3)
1169 #define IGT_MMAP_MIGRATE_FAIL_GPU    (1 << 4)
1170 static int __igt_mmap_migrate(struct intel_memory_region **placements,
1171                               int n_placements,
1172                               struct intel_memory_region *expected_mr,
1173                               unsigned int flags)
1174 {
1175         struct drm_i915_private *i915 = placements[0]->i915;
1176         struct drm_i915_gem_object *obj;
1177         struct i915_request *rq = NULL;
1178         unsigned long addr;
1179         LIST_HEAD(objects);
1180         u64 offset;
1181         int err;
1182
1183         obj = __i915_gem_object_create_user(i915, PAGE_SIZE,
1184                                             placements,
1185                                             n_placements);
1186         if (IS_ERR(obj))
1187                 return PTR_ERR(obj);
1188
1189         if (flags & IGT_MMAP_MIGRATE_TOPDOWN)
1190                 obj->flags |= I915_BO_ALLOC_GPU_ONLY;
1191
1192         err = __assign_mmap_offset(obj, I915_MMAP_TYPE_FIXED, &offset, NULL);
1193         if (err)
1194                 goto out_put;
1195
1196         /*
1197          * This will eventually create a GEM context, due to opening dummy drm
1198          * file, which needs a tiny amount of mappable device memory for the top
1199          * level paging structures(and perhaps scratch), so make sure we
1200          * allocate early, to avoid tears.
1201          */
1202         addr = igt_mmap_offset(i915, offset, obj->base.size,
1203                                PROT_WRITE, MAP_SHARED);
1204         if (IS_ERR_VALUE(addr)) {
1205                 err = addr;
1206                 goto out_put;
1207         }
1208
1209         if (flags & IGT_MMAP_MIGRATE_FILL) {
1210                 err = igt_fill_mappable(placements[0], &objects);
1211                 if (err)
1212                         goto out_put;
1213         }
1214
1215         err = i915_gem_object_lock(obj, NULL);
1216         if (err)
1217                 goto out_put;
1218
1219         err = i915_gem_object_pin_pages(obj);
1220         if (err) {
1221                 i915_gem_object_unlock(obj);
1222                 goto out_put;
1223         }
1224
1225         err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL,
1226                                           obj->mm.pages->sgl, obj->cache_level,
1227                                           i915_gem_object_is_lmem(obj),
1228                                           expand32(POISON_INUSE), &rq);
1229         i915_gem_object_unpin_pages(obj);
1230         if (rq) {
1231                 err = dma_resv_reserve_fences(obj->base.resv, 1);
1232                 if (!err)
1233                         dma_resv_add_fence(obj->base.resv, &rq->fence,
1234                                            DMA_RESV_USAGE_KERNEL);
1235                 i915_request_put(rq);
1236         }
1237         i915_gem_object_unlock(obj);
1238         if (err)
1239                 goto out_put;
1240
1241         if (flags & IGT_MMAP_MIGRATE_EVICTABLE)
1242                 igt_make_evictable(&objects);
1243
1244         if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
1245                 err = i915_gem_object_lock(obj, NULL);
1246                 if (err)
1247                         goto out_put;
1248
1249                 /*
1250                  * Ensure we only simulate the gpu failuire when faulting the
1251                  * pages.
1252                  */
1253                 err = i915_gem_object_wait_moving_fence(obj, true);
1254                 i915_gem_object_unlock(obj);
1255                 if (err)
1256                         goto out_put;
1257                 i915_ttm_migrate_set_failure_modes(true, false);
1258         }
1259
1260         err = ___igt_mmap_migrate(i915, obj, addr,
1261                                   flags & IGT_MMAP_MIGRATE_UNFAULTABLE);
1262
1263         if (!err && obj->mm.region != expected_mr) {
1264                 pr_err("%s region mismatch %s\n", __func__, expected_mr->name);
1265                 err = -EINVAL;
1266         }
1267
1268         if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
1269                 struct intel_gt *gt;
1270                 unsigned int id;
1271
1272                 i915_ttm_migrate_set_failure_modes(false, false);
1273
1274                 for_each_gt(gt, i915, id) {
1275                         intel_wakeref_t wakeref;
1276                         bool wedged;
1277
1278                         mutex_lock(&gt->reset.mutex);
1279                         wedged = test_bit(I915_WEDGED, &gt->reset.flags);
1280                         mutex_unlock(&gt->reset.mutex);
1281                         if (!wedged) {
1282                                 pr_err("gt(%u) not wedged\n", id);
1283                                 err = -EINVAL;
1284                                 continue;
1285                         }
1286
1287                         wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1288                         igt_global_reset_lock(gt);
1289                         intel_gt_reset(gt, ALL_ENGINES, NULL);
1290                         igt_global_reset_unlock(gt);
1291                         intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1292                 }
1293
1294                 if (!i915_gem_object_has_unknown_state(obj)) {
1295                         pr_err("object missing unknown_state\n");
1296                         err = -EINVAL;
1297                 }
1298         }
1299
1300 out_put:
1301         i915_gem_object_put(obj);
1302         igt_close_objects(i915, &objects);
1303         return err;
1304 }
1305
1306 static int igt_mmap_migrate(void *arg)
1307 {
1308         struct drm_i915_private *i915 = arg;
1309         struct intel_memory_region *system = i915->mm.regions[INTEL_REGION_SMEM];
1310         struct intel_memory_region *mr;
1311         enum intel_region_id id;
1312
1313         for_each_memory_region(mr, i915, id) {
1314                 struct intel_memory_region *mixed[] = { mr, system };
1315                 struct intel_memory_region *single[] = { mr };
1316                 struct ttm_resource_manager *man = mr->region_private;
1317                 resource_size_t saved_io_size;
1318                 int err;
1319
1320                 if (mr->private)
1321                         continue;
1322
1323                 if (!mr->io_size)
1324                         continue;
1325
1326                 /*
1327                  * For testing purposes let's force small BAR, if not already
1328                  * present.
1329                  */
1330                 saved_io_size = mr->io_size;
1331                 if (mr->io_size == mr->total) {
1332                         resource_size_t io_size = mr->io_size;
1333
1334                         io_size = rounddown_pow_of_two(io_size >> 1);
1335                         if (io_size < PAGE_SIZE)
1336                                 continue;
1337
1338                         mr->io_size = io_size;
1339                         i915_ttm_buddy_man_force_visible_size(man,
1340                                                               io_size >> PAGE_SHIFT);
1341                 }
1342
1343                 /*
1344                  * Allocate in the mappable portion, should be no suprises here.
1345                  */
1346                 err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), mr, 0);
1347                 if (err)
1348                         goto out_io_size;
1349
1350                 /*
1351                  * Allocate in the non-mappable portion, but force migrating to
1352                  * the mappable portion on fault (LMEM -> LMEM)
1353                  */
1354                 err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1355                                          IGT_MMAP_MIGRATE_TOPDOWN |
1356                                          IGT_MMAP_MIGRATE_FILL |
1357                                          IGT_MMAP_MIGRATE_EVICTABLE);
1358                 if (err)
1359                         goto out_io_size;
1360
1361                 /*
1362                  * Allocate in the non-mappable portion, but force spilling into
1363                  * system memory on fault (LMEM -> SMEM)
1364                  */
1365                 err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), system,
1366                                          IGT_MMAP_MIGRATE_TOPDOWN |
1367                                          IGT_MMAP_MIGRATE_FILL);
1368                 if (err)
1369                         goto out_io_size;
1370
1371                 /*
1372                  * Allocate in the non-mappable portion, but since the mappable
1373                  * portion is already full, and we can't spill to system memory,
1374                  * then we should expect the fault to fail.
1375                  */
1376                 err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1377                                          IGT_MMAP_MIGRATE_TOPDOWN |
1378                                          IGT_MMAP_MIGRATE_FILL |
1379                                          IGT_MMAP_MIGRATE_UNFAULTABLE);
1380                 if (err)
1381                         goto out_io_size;
1382
1383                 /*
1384                  * Allocate in the non-mappable portion, but force migrating to
1385                  * the mappable portion on fault (LMEM -> LMEM). We then also
1386                  * simulate a gpu error when moving the pages when faulting the
1387                  * pages, which should result in wedging the gpu and returning
1388                  * SIGBUS in the fault handler, since we can't fallback to
1389                  * memcpy.
1390                  */
1391                 err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1392                                          IGT_MMAP_MIGRATE_TOPDOWN |
1393                                          IGT_MMAP_MIGRATE_FILL |
1394                                          IGT_MMAP_MIGRATE_EVICTABLE |
1395                                          IGT_MMAP_MIGRATE_FAIL_GPU |
1396                                          IGT_MMAP_MIGRATE_UNFAULTABLE);
1397 out_io_size:
1398                 mr->io_size = saved_io_size;
1399                 i915_ttm_buddy_man_force_visible_size(man,
1400                                                       mr->io_size >> PAGE_SHIFT);
1401                 if (err)
1402                         return err;
1403         }
1404
1405         return 0;
1406 }
1407
1408 static const char *repr_mmap_type(enum i915_mmap_type type)
1409 {
1410         switch (type) {
1411         case I915_MMAP_TYPE_GTT: return "gtt";
1412         case I915_MMAP_TYPE_WB: return "wb";
1413         case I915_MMAP_TYPE_WC: return "wc";
1414         case I915_MMAP_TYPE_UC: return "uc";
1415         case I915_MMAP_TYPE_FIXED: return "fixed";
1416         default: return "unknown";
1417         }
1418 }
1419
1420 static bool can_access(struct drm_i915_gem_object *obj)
1421 {
1422         bool access;
1423
1424         i915_gem_object_lock(obj, NULL);
1425         access = i915_gem_object_has_struct_page(obj) ||
1426                 i915_gem_object_has_iomem(obj);
1427         i915_gem_object_unlock(obj);
1428
1429         return access;
1430 }
1431
1432 static int __igt_mmap_access(struct drm_i915_private *i915,
1433                              struct drm_i915_gem_object *obj,
1434                              enum i915_mmap_type type)
1435 {
1436         unsigned long __user *ptr;
1437         unsigned long A, B;
1438         unsigned long x, y;
1439         unsigned long addr;
1440         int err;
1441         u64 offset;
1442
1443         memset(&A, 0xAA, sizeof(A));
1444         memset(&B, 0xBB, sizeof(B));
1445
1446         if (!can_mmap(obj, type) || !can_access(obj))
1447                 return 0;
1448
1449         err = __assign_mmap_offset(obj, type, &offset, NULL);
1450         if (err)
1451                 return err;
1452
1453         addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1454         if (IS_ERR_VALUE(addr))
1455                 return addr;
1456         ptr = (unsigned long __user *)addr;
1457
1458         err = __put_user(A, ptr);
1459         if (err) {
1460                 pr_err("%s(%s): failed to write into user mmap\n",
1461                        obj->mm.region->name, repr_mmap_type(type));
1462                 goto out_unmap;
1463         }
1464
1465         intel_gt_flush_ggtt_writes(to_gt(i915));
1466
1467         err = access_process_vm(current, addr, &x, sizeof(x), 0);
1468         if (err != sizeof(x)) {
1469                 pr_err("%s(%s): access_process_vm() read failed\n",
1470                        obj->mm.region->name, repr_mmap_type(type));
1471                 goto out_unmap;
1472         }
1473
1474         err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
1475         if (err != sizeof(B)) {
1476                 pr_err("%s(%s): access_process_vm() write failed\n",
1477                        obj->mm.region->name, repr_mmap_type(type));
1478                 goto out_unmap;
1479         }
1480
1481         intel_gt_flush_ggtt_writes(to_gt(i915));
1482
1483         err = __get_user(y, ptr);
1484         if (err) {
1485                 pr_err("%s(%s): failed to read from user mmap\n",
1486                        obj->mm.region->name, repr_mmap_type(type));
1487                 goto out_unmap;
1488         }
1489
1490         if (x != A || y != B) {
1491                 pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
1492                        obj->mm.region->name, repr_mmap_type(type),
1493                        x, y);
1494                 err = -EINVAL;
1495                 goto out_unmap;
1496         }
1497
1498 out_unmap:
1499         vm_munmap(addr, obj->base.size);
1500         return err;
1501 }
1502
1503 static int igt_mmap_access(void *arg)
1504 {
1505         struct drm_i915_private *i915 = arg;
1506         struct intel_memory_region *mr;
1507         enum intel_region_id id;
1508
1509         for_each_memory_region(mr, i915, id) {
1510                 struct drm_i915_gem_object *obj;
1511                 int err;
1512
1513                 if (mr->private)
1514                         continue;
1515
1516                 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1517                 if (obj == ERR_PTR(-ENODEV))
1518                         continue;
1519
1520                 if (IS_ERR(obj))
1521                         return PTR_ERR(obj);
1522
1523                 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
1524                 if (err == 0)
1525                         err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
1526                 if (err == 0)
1527                         err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
1528                 if (err == 0)
1529                         err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
1530                 if (err == 0)
1531                         err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED);
1532
1533                 i915_gem_object_put(obj);
1534                 if (err)
1535                         return err;
1536         }
1537
1538         return 0;
1539 }
1540
1541 static int __igt_mmap_gpu(struct drm_i915_private *i915,
1542                           struct drm_i915_gem_object *obj,
1543                           enum i915_mmap_type type)
1544 {
1545         struct intel_engine_cs *engine;
1546         unsigned long addr;
1547         u32 __user *ux;
1548         u32 bbe;
1549         int err;
1550         u64 offset;
1551
1552         /*
1553          * Verify that the mmap access into the backing store aligns with
1554          * that of the GPU, i.e. that mmap is indeed writing into the same
1555          * page as being read by the GPU.
1556          */
1557
1558         if (!can_mmap(obj, type))
1559                 return 0;
1560
1561         err = wc_set(obj);
1562         if (err == -ENXIO)
1563                 err = gtt_set(obj);
1564         if (err)
1565                 return err;
1566
1567         err = __assign_mmap_offset(obj, type, &offset, NULL);
1568         if (err)
1569                 return err;
1570
1571         addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1572         if (IS_ERR_VALUE(addr))
1573                 return addr;
1574
1575         ux = u64_to_user_ptr((u64)addr);
1576         bbe = MI_BATCH_BUFFER_END;
1577         if (put_user(bbe, ux)) {
1578                 pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
1579                 err = -EFAULT;
1580                 goto out_unmap;
1581         }
1582
1583         if (type == I915_MMAP_TYPE_GTT)
1584                 intel_gt_flush_ggtt_writes(to_gt(i915));
1585
1586         for_each_uabi_engine(engine, i915) {
1587                 struct i915_request *rq;
1588                 struct i915_vma *vma;
1589                 struct i915_gem_ww_ctx ww;
1590
1591                 vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
1592                 if (IS_ERR(vma)) {
1593                         err = PTR_ERR(vma);
1594                         goto out_unmap;
1595                 }
1596
1597                 i915_gem_ww_ctx_init(&ww, false);
1598 retry:
1599                 err = i915_gem_object_lock(obj, &ww);
1600                 if (!err)
1601                         err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
1602                 if (err)
1603                         goto out_ww;
1604
1605                 rq = i915_request_create(engine->kernel_context);
1606                 if (IS_ERR(rq)) {
1607                         err = PTR_ERR(rq);
1608                         goto out_unpin;
1609                 }
1610
1611                 err = i915_request_await_object(rq, vma->obj, false);
1612                 if (err == 0)
1613                         err = i915_vma_move_to_active(vma, rq, 0);
1614
1615                 err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
1616                 i915_request_get(rq);
1617                 i915_request_add(rq);
1618
1619                 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1620                         struct drm_printer p =
1621                                 drm_info_printer(engine->i915->drm.dev);
1622
1623                         pr_err("%s(%s, %s): Failed to execute batch\n",
1624                                __func__, engine->name, obj->mm.region->name);
1625                         intel_engine_dump(engine, &p,
1626                                           "%s\n", engine->name);
1627
1628                         intel_gt_set_wedged(engine->gt);
1629                         err = -EIO;
1630                 }
1631                 i915_request_put(rq);
1632
1633 out_unpin:
1634                 i915_vma_unpin(vma);
1635 out_ww:
1636                 if (err == -EDEADLK) {
1637                         err = i915_gem_ww_ctx_backoff(&ww);
1638                         if (!err)
1639                                 goto retry;
1640                 }
1641                 i915_gem_ww_ctx_fini(&ww);
1642                 if (err)
1643                         goto out_unmap;
1644         }
1645
1646 out_unmap:
1647         vm_munmap(addr, obj->base.size);
1648         return err;
1649 }
1650
1651 static int igt_mmap_gpu(void *arg)
1652 {
1653         struct drm_i915_private *i915 = arg;
1654         struct intel_memory_region *mr;
1655         enum intel_region_id id;
1656
1657         for_each_memory_region(mr, i915, id) {
1658                 struct drm_i915_gem_object *obj;
1659                 int err;
1660
1661                 if (mr->private)
1662                         continue;
1663
1664                 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1665                 if (obj == ERR_PTR(-ENODEV))
1666                         continue;
1667
1668                 if (IS_ERR(obj))
1669                         return PTR_ERR(obj);
1670
1671                 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
1672                 if (err == 0)
1673                         err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
1674                 if (err == 0)
1675                         err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED);
1676
1677                 i915_gem_object_put(obj);
1678                 if (err)
1679                         return err;
1680         }
1681
1682         return 0;
1683 }
1684
1685 static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
1686 {
1687         if (!pte_present(*pte) || pte_none(*pte)) {
1688                 pr_err("missing PTE:%lx\n",
1689                        (addr - (unsigned long)data) >> PAGE_SHIFT);
1690                 return -EINVAL;
1691         }
1692
1693         return 0;
1694 }
1695
1696 static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
1697 {
1698         if (pte_present(*pte) && !pte_none(*pte)) {
1699                 pr_err("present PTE:%lx; expected to be revoked\n",
1700                        (addr - (unsigned long)data) >> PAGE_SHIFT);
1701                 return -EINVAL;
1702         }
1703
1704         return 0;
1705 }
1706
1707 static int check_present(unsigned long addr, unsigned long len)
1708 {
1709         return apply_to_page_range(current->mm, addr, len,
1710                                    check_present_pte, (void *)addr);
1711 }
1712
1713 static int check_absent(unsigned long addr, unsigned long len)
1714 {
1715         return apply_to_page_range(current->mm, addr, len,
1716                                    check_absent_pte, (void *)addr);
1717 }
1718
1719 static int prefault_range(u64 start, u64 len)
1720 {
1721         const char __user *addr, *end;
1722         char __maybe_unused c;
1723         int err;
1724
1725         addr = u64_to_user_ptr(start);
1726         end = addr + len;
1727
1728         for (; addr < end; addr += PAGE_SIZE) {
1729                 err = __get_user(c, addr);
1730                 if (err)
1731                         return err;
1732         }
1733
1734         return __get_user(c, end - 1);
1735 }
1736
1737 static int __igt_mmap_revoke(struct drm_i915_private *i915,
1738                              struct drm_i915_gem_object *obj,
1739                              enum i915_mmap_type type)
1740 {
1741         unsigned long addr;
1742         int err;
1743         u64 offset;
1744
1745         if (!can_mmap(obj, type))
1746                 return 0;
1747
1748         err = __assign_mmap_offset(obj, type, &offset, NULL);
1749         if (err)
1750                 return err;
1751
1752         addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1753         if (IS_ERR_VALUE(addr))
1754                 return addr;
1755
1756         err = prefault_range(addr, obj->base.size);
1757         if (err)
1758                 goto out_unmap;
1759
1760         err = check_present(addr, obj->base.size);
1761         if (err) {
1762                 pr_err("%s: was not present\n", obj->mm.region->name);
1763                 goto out_unmap;
1764         }
1765
1766         /*
1767          * After unbinding the object from the GGTT, its address may be reused
1768          * for other objects. Ergo we have to revoke the previous mmap PTE
1769          * access as it no longer points to the same object.
1770          */
1771         i915_gem_object_lock(obj, NULL);
1772         err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
1773         i915_gem_object_unlock(obj);
1774         if (err) {
1775                 pr_err("Failed to unbind object!\n");
1776                 goto out_unmap;
1777         }
1778
1779         if (type != I915_MMAP_TYPE_GTT) {
1780                 i915_gem_object_lock(obj, NULL);
1781                 __i915_gem_object_put_pages(obj);
1782                 i915_gem_object_unlock(obj);
1783                 if (i915_gem_object_has_pages(obj)) {
1784                         pr_err("Failed to put-pages object!\n");
1785                         err = -EINVAL;
1786                         goto out_unmap;
1787                 }
1788         }
1789
1790         err = check_absent(addr, obj->base.size);
1791         if (err) {
1792                 pr_err("%s: was not absent\n", obj->mm.region->name);
1793                 goto out_unmap;
1794         }
1795
1796 out_unmap:
1797         vm_munmap(addr, obj->base.size);
1798         return err;
1799 }
1800
1801 static int igt_mmap_revoke(void *arg)
1802 {
1803         struct drm_i915_private *i915 = arg;
1804         struct intel_memory_region *mr;
1805         enum intel_region_id id;
1806
1807         for_each_memory_region(mr, i915, id) {
1808                 struct drm_i915_gem_object *obj;
1809                 int err;
1810
1811                 if (mr->private)
1812                         continue;
1813
1814                 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1815                 if (obj == ERR_PTR(-ENODEV))
1816                         continue;
1817
1818                 if (IS_ERR(obj))
1819                         return PTR_ERR(obj);
1820
1821                 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
1822                 if (err == 0)
1823                         err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
1824                 if (err == 0)
1825                         err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED);
1826
1827                 i915_gem_object_put(obj);
1828                 if (err)
1829                         return err;
1830         }
1831
1832         return 0;
1833 }
1834
1835 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1836 {
1837         static const struct i915_subtest tests[] = {
1838                 SUBTEST(igt_partial_tiling),
1839                 SUBTEST(igt_smoke_tiling),
1840                 SUBTEST(igt_mmap_offset_exhaustion),
1841                 SUBTEST(igt_mmap),
1842                 SUBTEST(igt_mmap_migrate),
1843                 SUBTEST(igt_mmap_access),
1844                 SUBTEST(igt_mmap_revoke),
1845                 SUBTEST(igt_mmap_gpu),
1846         };
1847
1848         return i915_live_subtests(tests, i915);
1849 }