drm/i915: Add i915_vma_unbind_unlocked, and take obj lock for i915_vma_unbind, v2.
[linux-block.git] / drivers / gpu / drm / i915 / gem / selftests / i915_gem_mman.c
CommitLineData
b414fcd5
CW
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2016 Intel Corporation
5 */
6
7#include <linux/prime_numbers.h>
8
de5825be 9#include "gt/intel_engine_pm.h"
45233ab2 10#include "gt/intel_gpu_commands.h"
a1c8a09e 11#include "gt/intel_gt.h"
b414fcd5 12#include "gt/intel_gt_pm.h"
9771d5f7 13#include "gem/i915_gem_region.h"
10be98a7 14#include "huge_gem_object.h"
b414fcd5 15#include "i915_selftest.h"
07e98eb0 16#include "selftests/i915_random.h"
b414fcd5 17#include "selftests/igt_flush_test.h"
6fedafac 18#include "selftests/igt_mmap.h"
b414fcd5
CW
19
20struct tile {
21 unsigned int width;
22 unsigned int height;
23 unsigned int stride;
24 unsigned int size;
25 unsigned int tiling;
26 unsigned int swizzle;
27};
28
29static u64 swizzle_bit(unsigned int bit, u64 offset)
30{
31 return (offset & BIT_ULL(bit)) >> (bit - 6);
32}
33
34static u64 tiled_offset(const struct tile *tile, u64 v)
35{
36 u64 x, y;
37
38 if (tile->tiling == I915_TILING_NONE)
39 return v;
40
41 y = div64_u64_rem(v, tile->stride, &x);
42 v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
43
44 if (tile->tiling == I915_TILING_X) {
45 v += y * tile->width;
46 v += div64_u64_rem(x, tile->width, &x) << tile->size;
47 v += x;
48 } else if (tile->width == 128) {
49 const unsigned int ytile_span = 16;
50 const unsigned int ytile_height = 512;
51
52 v += y * ytile_span;
53 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
54 v += x;
55 } else {
56 const unsigned int ytile_span = 32;
57 const unsigned int ytile_height = 256;
58
59 v += y * ytile_span;
60 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
61 v += x;
62 }
63
64 switch (tile->swizzle) {
65 case I915_BIT_6_SWIZZLE_9:
66 v ^= swizzle_bit(9, v);
67 break;
68 case I915_BIT_6_SWIZZLE_9_10:
69 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
70 break;
71 case I915_BIT_6_SWIZZLE_9_11:
72 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
73 break;
74 case I915_BIT_6_SWIZZLE_9_10_11:
75 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
76 break;
77 }
78
79 return v;
80}
81
82static int check_partial_mapping(struct drm_i915_gem_object *obj,
83 const struct tile *tile,
07e98eb0 84 struct rnd_state *prng)
b414fcd5 85{
b414fcd5 86 const unsigned long npages = obj->base.size / PAGE_SIZE;
1a9c4db4 87 struct drm_i915_private *i915 = to_i915(obj->base.dev);
07e98eb0 88 struct i915_ggtt_view view;
b414fcd5
CW
89 struct i915_vma *vma;
90 unsigned long page;
07e98eb0
CW
91 u32 __iomem *io;
92 struct page *p;
93 unsigned int n;
94 u64 offset;
95 u32 *cpu;
b414fcd5
CW
96 int err;
97
07e98eb0
CW
98 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
99 if (err) {
100 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
101 tile->tiling, tile->stride, err);
102 return err;
103 }
104
105 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
106 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
107
80f0b679 108 i915_gem_object_lock(obj, NULL);
07e98eb0
CW
109 err = i915_gem_object_set_to_gtt_domain(obj, true);
110 i915_gem_object_unlock(obj);
111 if (err) {
112 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
113 return err;
114 }
115
116 page = i915_prandom_u32_max_state(npages, prng);
117 view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
118
119 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
120 if (IS_ERR(vma)) {
121 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
122 page, (int)PTR_ERR(vma));
123 return PTR_ERR(vma);
124 }
125
126 n = page - view.partial.offset;
127 GEM_BUG_ON(n >= view.partial.size);
128
129 io = i915_vma_pin_iomap(vma);
130 i915_vma_unpin(vma);
131 if (IS_ERR(io)) {
132 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
133 page, (int)PTR_ERR(io));
134 err = PTR_ERR(io);
135 goto out;
136 }
137
138 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
139 i915_vma_unpin_iomap(vma);
140
141 offset = tiled_offset(tile, page << PAGE_SHIFT);
142 if (offset >= obj->base.size)
143 goto out;
144
1a9c4db4 145 intel_gt_flush_ggtt_writes(to_gt(i915));
07e98eb0
CW
146
147 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
148 cpu = kmap(p) + offset_in_page(offset);
149 drm_clflush_virt_range(cpu, sizeof(*cpu));
150 if (*cpu != (u32)page) {
151 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
152 page, n,
153 view.partial.offset,
154 view.partial.size,
155 vma->size >> PAGE_SHIFT,
156 tile->tiling ? tile_row_pages(obj) : 0,
157 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
158 offset >> PAGE_SHIFT,
159 (unsigned int)offset_in_page(offset),
160 offset,
161 (u32)page, *cpu);
162 err = -EINVAL;
163 }
164 *cpu = 0;
165 drm_clflush_virt_range(cpu, sizeof(*cpu));
166 kunmap(p);
167
168out:
0f341974 169 i915_gem_object_lock(obj, NULL);
76f9764c 170 __i915_vma_put(vma);
0f341974 171 i915_gem_object_unlock(obj);
07e98eb0
CW
172 return err;
173}
174
175static int check_partial_mappings(struct drm_i915_gem_object *obj,
176 const struct tile *tile,
177 unsigned long end_time)
178{
179 const unsigned int nreal = obj->scratch / PAGE_SIZE;
180 const unsigned long npages = obj->base.size / PAGE_SIZE;
1a9c4db4 181 struct drm_i915_private *i915 = to_i915(obj->base.dev);
07e98eb0
CW
182 struct i915_vma *vma;
183 unsigned long page;
184 int err;
b414fcd5
CW
185
186 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
187 if (err) {
188 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
189 tile->tiling, tile->stride, err);
190 return err;
191 }
192
193 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
194 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
195
80f0b679 196 i915_gem_object_lock(obj, NULL);
87d1372d
CW
197 err = i915_gem_object_set_to_gtt_domain(obj, true);
198 i915_gem_object_unlock(obj);
199 if (err) {
200 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
201 return err;
202 }
203
b414fcd5
CW
204 for_each_prime_number_from(page, 1, npages) {
205 struct i915_ggtt_view view =
206 compute_partial_view(obj, page, MIN_CHUNK_PAGES);
207 u32 __iomem *io;
208 struct page *p;
209 unsigned int n;
210 u64 offset;
211 u32 *cpu;
212
213 GEM_BUG_ON(view.partial.size > nreal);
214 cond_resched();
215
b414fcd5
CW
216 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
217 if (IS_ERR(vma)) {
218 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
219 page, (int)PTR_ERR(vma));
220 return PTR_ERR(vma);
221 }
222
223 n = page - view.partial.offset;
224 GEM_BUG_ON(n >= view.partial.size);
225
226 io = i915_vma_pin_iomap(vma);
227 i915_vma_unpin(vma);
228 if (IS_ERR(io)) {
229 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
230 page, (int)PTR_ERR(io));
231 return PTR_ERR(io);
232 }
233
234 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
235 i915_vma_unpin_iomap(vma);
236
237 offset = tiled_offset(tile, page << PAGE_SHIFT);
238 if (offset >= obj->base.size)
239 continue;
240
1a9c4db4 241 intel_gt_flush_ggtt_writes(to_gt(i915));
b414fcd5
CW
242
243 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
244 cpu = kmap(p) + offset_in_page(offset);
245 drm_clflush_virt_range(cpu, sizeof(*cpu));
246 if (*cpu != (u32)page) {
247 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
248 page, n,
249 view.partial.offset,
250 view.partial.size,
251 vma->size >> PAGE_SHIFT,
252 tile->tiling ? tile_row_pages(obj) : 0,
253 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
254 offset >> PAGE_SHIFT,
255 (unsigned int)offset_in_page(offset),
256 offset,
257 (u32)page, *cpu);
258 err = -EINVAL;
259 }
260 *cpu = 0;
261 drm_clflush_virt_range(cpu, sizeof(*cpu));
262 kunmap(p);
263 if (err)
264 return err;
265
0f341974 266 i915_gem_object_lock(obj, NULL);
76f9764c 267 __i915_vma_put(vma);
0f341974 268 i915_gem_object_unlock(obj);
07e98eb0
CW
269
270 if (igt_timeout(end_time,
271 "%s: timed out after tiling=%d stride=%d\n",
272 __func__, tile->tiling, tile->stride))
273 return -EINTR;
b414fcd5
CW
274 }
275
276 return 0;
277}
278
07e98eb0
CW
279static unsigned int
280setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
281{
40e1956e 282 if (GRAPHICS_VER(i915) <= 2) {
07e98eb0
CW
283 tile->height = 16;
284 tile->width = 128;
285 tile->size = 11;
286 } else if (tile->tiling == I915_TILING_Y &&
287 HAS_128_BYTE_Y_TILING(i915)) {
288 tile->height = 32;
289 tile->width = 128;
290 tile->size = 12;
291 } else {
292 tile->height = 8;
293 tile->width = 512;
294 tile->size = 12;
295 }
296
40e1956e 297 if (GRAPHICS_VER(i915) < 4)
07e98eb0 298 return 8192 / tile->width;
40e1956e 299 else if (GRAPHICS_VER(i915) < 7)
07e98eb0
CW
300 return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
301 else
302 return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
303}
304
b414fcd5
CW
305static int igt_partial_tiling(void *arg)
306{
307 const unsigned int nreal = 1 << 12; /* largest tile row x2 */
308 struct drm_i915_private *i915 = arg;
309 struct drm_i915_gem_object *obj;
310 intel_wakeref_t wakeref;
311 int tiling;
312 int err;
313
5c24c9d2 314 if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
e60f7bb7
MA
315 return 0;
316
b414fcd5
CW
317 /* We want to check the page mapping and fencing of a large object
318 * mmapped through the GTT. The object we create is larger than can
319 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
320 * We then check that a write through each partial GGTT vma ends up
321 * in the right set of pages within the object, and with the expected
322 * tiling, which we verify by manual swizzling.
323 */
324
325 obj = huge_gem_object(i915,
326 nreal << PAGE_SHIFT,
5c24c9d2 327 (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
b414fcd5
CW
328 if (IS_ERR(obj))
329 return PTR_ERR(obj);
330
6f791ffe 331 err = i915_gem_object_pin_pages_unlocked(obj);
b414fcd5
CW
332 if (err) {
333 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
334 nreal, obj->base.size / PAGE_SIZE, err);
335 goto out;
336 }
337
d858d569 338 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
b414fcd5
CW
339
340 if (1) {
341 IGT_TIMEOUT(end);
342 struct tile tile;
343
344 tile.height = 1;
345 tile.width = 1;
346 tile.size = 0;
347 tile.stride = 0;
348 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
349 tile.tiling = I915_TILING_NONE;
350
07e98eb0 351 err = check_partial_mappings(obj, &tile, end);
b414fcd5
CW
352 if (err && err != -EINTR)
353 goto out_unlock;
354 }
355
356 for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
357 IGT_TIMEOUT(end);
358 unsigned int max_pitch;
359 unsigned int pitch;
360 struct tile tile;
361
362 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
363 /*
364 * The swizzling pattern is actually unknown as it
365 * varies based on physical address of each page.
366 * See i915_gem_detect_bit_6_swizzle().
367 */
368 break;
369
370 tile.tiling = tiling;
371 switch (tiling) {
372 case I915_TILING_X:
5c24c9d2 373 tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
b414fcd5
CW
374 break;
375 case I915_TILING_Y:
5c24c9d2 376 tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
b414fcd5
CW
377 break;
378 }
379
380 GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
381 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
382 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
383 continue;
384
07e98eb0 385 max_pitch = setup_tile_size(&tile, i915);
b414fcd5
CW
386
387 for (pitch = max_pitch; pitch; pitch >>= 1) {
388 tile.stride = tile.width * pitch;
07e98eb0 389 err = check_partial_mappings(obj, &tile, end);
b414fcd5
CW
390 if (err == -EINTR)
391 goto next_tiling;
392 if (err)
393 goto out_unlock;
394
40e1956e 395 if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
b414fcd5 396 tile.stride = tile.width * (pitch - 1);
07e98eb0 397 err = check_partial_mappings(obj, &tile, end);
b414fcd5
CW
398 if (err == -EINTR)
399 goto next_tiling;
400 if (err)
401 goto out_unlock;
402 }
403
40e1956e 404 if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
b414fcd5 405 tile.stride = tile.width * (pitch + 1);
07e98eb0 406 err = check_partial_mappings(obj, &tile, end);
b414fcd5
CW
407 if (err == -EINTR)
408 goto next_tiling;
409 if (err)
410 goto out_unlock;
411 }
412 }
413
40e1956e 414 if (GRAPHICS_VER(i915) >= 4) {
b414fcd5
CW
415 for_each_prime_number(pitch, max_pitch) {
416 tile.stride = tile.width * pitch;
07e98eb0 417 err = check_partial_mappings(obj, &tile, end);
b414fcd5
CW
418 if (err == -EINTR)
419 goto next_tiling;
420 if (err)
421 goto out_unlock;
422 }
423 }
424
425next_tiling: ;
426 }
427
428out_unlock:
d858d569 429 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
b414fcd5
CW
430 i915_gem_object_unpin_pages(obj);
431out:
432 i915_gem_object_put(obj);
433 return err;
434}
435
07e98eb0
CW
436static int igt_smoke_tiling(void *arg)
437{
438 const unsigned int nreal = 1 << 12; /* largest tile row x2 */
439 struct drm_i915_private *i915 = arg;
440 struct drm_i915_gem_object *obj;
441 intel_wakeref_t wakeref;
442 I915_RND_STATE(prng);
443 unsigned long count;
444 IGT_TIMEOUT(end);
445 int err;
446
5c24c9d2 447 if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
e60f7bb7
MA
448 return 0;
449
07e98eb0
CW
450 /*
451 * igt_partial_tiling() does an exhastive check of partial tiling
452 * chunking, but will undoubtably run out of time. Here, we do a
453 * randomised search and hope over many runs of 1s with different
454 * seeds we will do a thorough check.
455 *
456 * Remember to look at the st_seed if we see a flip-flop in BAT!
457 */
458
459 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
460 return 0;
461
462 obj = huge_gem_object(i915,
463 nreal << PAGE_SHIFT,
5c24c9d2 464 (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
07e98eb0
CW
465 if (IS_ERR(obj))
466 return PTR_ERR(obj);
467
6f791ffe 468 err = i915_gem_object_pin_pages_unlocked(obj);
07e98eb0
CW
469 if (err) {
470 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
471 nreal, obj->base.size / PAGE_SIZE, err);
472 goto out;
473 }
474
07e98eb0
CW
475 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
476
477 count = 0;
478 do {
479 struct tile tile;
480
481 tile.tiling =
482 i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
483 switch (tile.tiling) {
484 case I915_TILING_NONE:
485 tile.height = 1;
486 tile.width = 1;
487 tile.size = 0;
488 tile.stride = 0;
489 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
490 break;
491
492 case I915_TILING_X:
5c24c9d2 493 tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
07e98eb0
CW
494 break;
495 case I915_TILING_Y:
5c24c9d2 496 tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
07e98eb0
CW
497 break;
498 }
499
500 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
501 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
502 continue;
503
504 if (tile.tiling != I915_TILING_NONE) {
505 unsigned int max_pitch = setup_tile_size(&tile, i915);
506
507 tile.stride =
508 i915_prandom_u32_max_state(max_pitch, &prng);
509 tile.stride = (1 + tile.stride) * tile.width;
40e1956e 510 if (GRAPHICS_VER(i915) < 4)
07e98eb0
CW
511 tile.stride = rounddown_pow_of_two(tile.stride);
512 }
513
514 err = check_partial_mapping(obj, &tile, &prng);
515 if (err)
516 break;
517
518 count++;
519 } while (!__igt_timeout(end, NULL));
520
521 pr_info("%s: Completed %lu trials\n", __func__, count);
522
523 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
07e98eb0
CW
524 i915_gem_object_unpin_pages(obj);
525out:
526 i915_gem_object_put(obj);
527 return err;
528}
529
b414fcd5
CW
530static int make_obj_busy(struct drm_i915_gem_object *obj)
531{
532 struct drm_i915_private *i915 = to_i915(obj->base.dev);
8f856c74 533 struct intel_engine_cs *engine;
b414fcd5 534
e948761f
CW
535 for_each_uabi_engine(engine, i915) {
536 struct i915_request *rq;
537 struct i915_vma *vma;
15b6c924 538 struct i915_gem_ww_ctx ww;
e948761f 539 int err;
b414fcd5 540
e948761f
CW
541 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
542 if (IS_ERR(vma))
543 return PTR_ERR(vma);
b414fcd5 544
15b6c924
ML
545 i915_gem_ww_ctx_init(&ww, false);
546retry:
547 err = i915_gem_object_lock(obj, &ww);
548 if (!err)
549 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
e948761f 550 if (err)
15b6c924 551 goto err;
b414fcd5 552
de5825be 553 rq = intel_engine_create_kernel_request(engine);
8f856c74 554 if (IS_ERR(rq)) {
15b6c924
ML
555 err = PTR_ERR(rq);
556 goto err_unpin;
8f856c74 557 }
b414fcd5 558
70d6894d
CW
559 err = i915_request_await_object(rq, vma->obj, true);
560 if (err == 0)
561 err = i915_vma_move_to_active(vma, rq,
562 EXEC_OBJECT_WRITE);
8f856c74
CW
563
564 i915_request_add(rq);
15b6c924 565err_unpin:
e948761f 566 i915_vma_unpin(vma);
15b6c924
ML
567err:
568 if (err == -EDEADLK) {
569 err = i915_gem_ww_ctx_backoff(&ww);
570 if (!err)
571 goto retry;
572 }
573 i915_gem_ww_ctx_fini(&ww);
e948761f
CW
574 if (err)
575 return err;
8f856c74 576 }
b414fcd5 577
c017cf6b 578 i915_gem_object_put(obj); /* leave it only alive via its active ref */
e948761f 579 return 0;
b414fcd5
CW
580}
581
7961c5b6
ML
582static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
583{
584 if (HAS_LMEM(i915))
585 return I915_MMAP_TYPE_FIXED;
586
587 return I915_MMAP_TYPE_GTT;
588}
589
450cede7
TH
590static struct drm_i915_gem_object *
591create_sys_or_internal(struct drm_i915_private *i915,
592 unsigned long size)
593{
594 if (HAS_LMEM(i915)) {
595 struct intel_memory_region *sys_region =
596 i915->mm.regions[INTEL_REGION_SMEM];
597
598 return __i915_gem_object_create_user(i915, size, &sys_region, 1);
599 }
600
601 return i915_gem_object_create_internal(i915, size);
602}
603
b414fcd5
CW
604static bool assert_mmap_offset(struct drm_i915_private *i915,
605 unsigned long size,
606 int expected)
607{
608 struct drm_i915_gem_object *obj;
cf3e3e86
ML
609 u64 offset;
610 int ret;
b414fcd5 611
450cede7 612 obj = create_sys_or_internal(i915, size);
b414fcd5 613 if (IS_ERR(obj))
cf3e3e86 614 return expected && expected == PTR_ERR(obj);
b414fcd5 615
7961c5b6 616 ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
b414fcd5
CW
617 i915_gem_object_put(obj);
618
cf3e3e86 619 return ret == expected;
b414fcd5
CW
620}
621
622static void disable_retire_worker(struct drm_i915_private *i915)
623{
c29579d2 624 i915_gem_driver_unregister__shrinker(i915);
1a9c4db4
MW
625 intel_gt_pm_get(to_gt(i915));
626 cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
b414fcd5
CW
627}
628
629static void restore_retire_worker(struct drm_i915_private *i915)
630{
7e805762 631 igt_flush_test(i915);
1a9c4db4 632 intel_gt_pm_put(to_gt(i915));
c29579d2 633 i915_gem_driver_register__shrinker(i915);
b414fcd5
CW
634}
635
f63dfc14
CW
636static void mmap_offset_lock(struct drm_i915_private *i915)
637 __acquires(&i915->drm.vma_offset_manager->vm_lock)
638{
639 write_lock(&i915->drm.vma_offset_manager->vm_lock);
640}
641
642static void mmap_offset_unlock(struct drm_i915_private *i915)
643 __releases(&i915->drm.vma_offset_manager->vm_lock)
644{
645 write_unlock(&i915->drm.vma_offset_manager->vm_lock);
646}
647
b414fcd5
CW
648static int igt_mmap_offset_exhaustion(void *arg)
649{
650 struct drm_i915_private *i915 = arg;
651 struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
652 struct drm_i915_gem_object *obj;
1af65515 653 struct drm_mm_node *hole, *next;
cc662126 654 int loop, err = 0;
cf3e3e86 655 u64 offset;
450cede7 656 int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
b414fcd5
CW
657
658 /* Disable background reaper */
659 disable_retire_worker(i915);
1a9c4db4
MW
660 GEM_BUG_ON(!to_gt(i915)->awake);
661 intel_gt_retire_requests(to_gt(i915));
1af65515 662 i915_gem_drain_freed_objects(i915);
b414fcd5
CW
663
664 /* Trim the device mmap space to only a page */
1af65515
CW
665 mmap_offset_lock(i915);
666 loop = 1; /* PAGE_SIZE units */
667 list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
668 struct drm_mm_node *resv;
669
670 resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
671 if (!resv) {
672 err = -ENOMEM;
673 goto out_park;
674 }
675
676 resv->start = drm_mm_hole_node_start(hole) + loop;
677 resv->size = hole->hole_size - loop;
678 resv->color = -1ul;
679 loop = 0;
680
681 if (!resv->size) {
682 kfree(resv);
683 continue;
684 }
685
686 pr_debug("Reserving hole [%llx + %llx]\n",
687 resv->start, resv->size);
688
689 err = drm_mm_reserve_node(mm, resv);
b414fcd5
CW
690 if (err) {
691 pr_err("Failed to trim VMA manager, err=%d\n", err);
1af65515 692 kfree(resv);
b414fcd5
CW
693 goto out_park;
694 }
b414fcd5 695 }
1af65515
CW
696 GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
697 mmap_offset_unlock(i915);
b414fcd5
CW
698
699 /* Just fits! */
700 if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
701 pr_err("Unable to insert object into single page hole\n");
702 err = -EINVAL;
703 goto out;
704 }
705
706 /* Too large */
450cede7 707 if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
b414fcd5
CW
708 pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
709 err = -EINVAL;
710 goto out;
711 }
712
713 /* Fill the hole, further allocation attempts should then fail */
450cede7 714 obj = create_sys_or_internal(i915, PAGE_SIZE);
b414fcd5
CW
715 if (IS_ERR(obj)) {
716 err = PTR_ERR(obj);
cf3e3e86 717 pr_err("Unable to create object for reclaimed hole\n");
b414fcd5
CW
718 goto out;
719 }
720
7961c5b6 721 err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
cf3e3e86 722 if (err) {
b414fcd5
CW
723 pr_err("Unable to insert object into reclaimed hole\n");
724 goto err_obj;
725 }
726
450cede7 727 if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
b414fcd5
CW
728 pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
729 err = -EINVAL;
730 goto err_obj;
731 }
732
733 i915_gem_object_put(obj);
734
735 /* Now fill with busy dead objects that we expect to reap */
736 for (loop = 0; loop < 3; loop++) {
1a9c4db4 737 if (intel_gt_is_wedged(to_gt(i915)))
b414fcd5
CW
738 break;
739
740 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
741 if (IS_ERR(obj)) {
742 err = PTR_ERR(obj);
743 goto out;
744 }
745
b414fcd5 746 err = make_obj_busy(obj);
b414fcd5
CW
747 if (err) {
748 pr_err("[loop %d] Failed to busy the object\n", loop);
749 goto err_obj;
750 }
b414fcd5
CW
751 }
752
753out:
f63dfc14 754 mmap_offset_lock(i915);
b414fcd5 755out_park:
1af65515
CW
756 drm_mm_for_each_node_safe(hole, next, mm) {
757 if (hole->color != -1ul)
758 continue;
759
760 drm_mm_remove_node(hole);
761 kfree(hole);
762 }
763 mmap_offset_unlock(i915);
b414fcd5
CW
764 restore_retire_worker(i915);
765 return err;
766err_obj:
767 i915_gem_object_put(obj);
768 goto out;
769}
770
9771d5f7 771static int gtt_set(struct drm_i915_gem_object *obj)
6fedafac 772{
9771d5f7
AJ
773 struct i915_vma *vma;
774 void __iomem *map;
775 int err = 0;
6fedafac 776
9771d5f7
AJ
777 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
778 if (IS_ERR(vma))
779 return PTR_ERR(vma);
6fedafac 780
9771d5f7
AJ
781 intel_gt_pm_get(vma->vm->gt);
782 map = i915_vma_pin_iomap(vma);
783 i915_vma_unpin(vma);
784 if (IS_ERR(map)) {
785 err = PTR_ERR(map);
786 goto out;
787 }
788
789 memset_io(map, POISON_INUSE, obj->base.size);
790 i915_vma_unpin_iomap(vma);
791
792out:
793 intel_gt_pm_put(vma->vm->gt);
794 return err;
795}
796
797static int gtt_check(struct drm_i915_gem_object *obj)
798{
799 struct i915_vma *vma;
800 void __iomem *map;
801 int err = 0;
802
803 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
804 if (IS_ERR(vma))
805 return PTR_ERR(vma);
6fedafac 806
9771d5f7
AJ
807 intel_gt_pm_get(vma->vm->gt);
808 map = i915_vma_pin_iomap(vma);
809 i915_vma_unpin(vma);
810 if (IS_ERR(map)) {
811 err = PTR_ERR(map);
6fedafac
CW
812 goto out;
813 }
9771d5f7
AJ
814
815 if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
816 pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
817 obj->mm.region->name);
818 err = -EINVAL;
819 }
820 i915_vma_unpin_iomap(vma);
821
822out:
823 intel_gt_pm_put(vma->vm->gt);
824 return err;
825}
826
827static int wc_set(struct drm_i915_gem_object *obj)
828{
829 void *vaddr;
830
6f791ffe 831 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
9771d5f7
AJ
832 if (IS_ERR(vaddr))
833 return PTR_ERR(vaddr);
834
835 memset(vaddr, POISON_INUSE, obj->base.size);
6fedafac
CW
836 i915_gem_object_flush_map(obj);
837 i915_gem_object_unpin_map(obj);
838
9771d5f7
AJ
839 return 0;
840}
841
842static int wc_check(struct drm_i915_gem_object *obj)
843{
844 void *vaddr;
845 int err = 0;
846
6f791ffe 847 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
9771d5f7
AJ
848 if (IS_ERR(vaddr))
849 return PTR_ERR(vaddr);
850
851 if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
852 pr_err("%s: Write via mmap did not land in backing store (WC)\n",
853 obj->mm.region->name);
854 err = -EINVAL;
cc662126 855 }
9771d5f7
AJ
856 i915_gem_object_unpin_map(obj);
857
858 return err;
859}
860
861static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
862{
5c24c9d2 863 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0ff37575
TH
864 bool no_map;
865
450cede7 866 if (obj->ops->mmap_offset)
7961c5b6
ML
867 return type == I915_MMAP_TYPE_FIXED;
868 else if (type == I915_MMAP_TYPE_FIXED)
869 return false;
870
9771d5f7 871 if (type == I915_MMAP_TYPE_GTT &&
5c24c9d2 872 !i915_ggtt_has_aperture(to_gt(i915)->ggtt))
9771d5f7
AJ
873 return false;
874
0ff37575
TH
875 i915_gem_object_lock(obj, NULL);
876 no_map = (type != I915_MMAP_TYPE_GTT &&
877 !i915_gem_object_has_struct_page(obj) &&
878 !i915_gem_object_has_iomem(obj));
879 i915_gem_object_unlock(obj);
9771d5f7 880
0ff37575 881 return !no_map;
9771d5f7
AJ
882}
883
884#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
885static int __igt_mmap(struct drm_i915_private *i915,
886 struct drm_i915_gem_object *obj,
887 enum i915_mmap_type type)
888{
9771d5f7
AJ
889 struct vm_area_struct *area;
890 unsigned long addr;
891 int err, i;
cf3e3e86 892 u64 offset;
9771d5f7
AJ
893
894 if (!can_mmap(obj, type))
895 return 0;
896
897 err = wc_set(obj);
898 if (err == -ENXIO)
899 err = gtt_set(obj);
900 if (err)
901 return err;
902
cf3e3e86
ML
903 err = __assign_mmap_offset(obj, type, &offset, NULL);
904 if (err)
905 return err;
6fedafac 906
cf3e3e86 907 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
9771d5f7
AJ
908 if (IS_ERR_VALUE(addr))
909 return addr;
6fedafac 910
9771d5f7 911 pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
6fedafac 912
ce079f6d 913 mmap_read_lock(current->mm);
064b2663 914 area = vma_lookup(current->mm, addr);
ce079f6d 915 mmap_read_unlock(current->mm);
6fedafac 916 if (!area) {
9771d5f7
AJ
917 pr_err("%s: Did not create a vm_area_struct for the mmap\n",
918 obj->mm.region->name);
6fedafac
CW
919 err = -EINVAL;
920 goto out_unmap;
921 }
922
9771d5f7 923 for (i = 0; i < obj->base.size / sizeof(u32); i++) {
6fedafac
CW
924 u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
925 u32 x;
926
927 if (get_user(x, ux)) {
9771d5f7
AJ
928 pr_err("%s: Unable to read from mmap, offset:%zd\n",
929 obj->mm.region->name, i * sizeof(x));
6fedafac 930 err = -EFAULT;
9771d5f7 931 goto out_unmap;
6fedafac
CW
932 }
933
934 if (x != expand32(POISON_INUSE)) {
9771d5f7
AJ
935 pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
936 obj->mm.region->name,
6fedafac
CW
937 i * sizeof(x), x, expand32(POISON_INUSE));
938 err = -EINVAL;
9771d5f7 939 goto out_unmap;
6fedafac
CW
940 }
941
942 x = expand32(POISON_FREE);
943 if (put_user(x, ux)) {
9771d5f7
AJ
944 pr_err("%s: Unable to write to mmap, offset:%zd\n",
945 obj->mm.region->name, i * sizeof(x));
6fedafac 946 err = -EFAULT;
9771d5f7 947 goto out_unmap;
6fedafac
CW
948 }
949 }
950
9771d5f7 951 if (type == I915_MMAP_TYPE_GTT)
1a9c4db4 952 intel_gt_flush_ggtt_writes(to_gt(i915));
6fedafac 953
9771d5f7
AJ
954 err = wc_check(obj);
955 if (err == -ENXIO)
956 err = gtt_check(obj);
957out_unmap:
958 vm_munmap(addr, obj->base.size);
6fedafac
CW
959 return err;
960}
961
9771d5f7 962static int igt_mmap(void *arg)
cc662126 963{
9771d5f7
AJ
964 struct drm_i915_private *i915 = arg;
965 struct intel_memory_region *mr;
966 enum intel_region_id id;
cc662126 967
9771d5f7
AJ
968 for_each_memory_region(mr, i915, id) {
969 unsigned long sizes[] = {
970 PAGE_SIZE,
971 mr->min_page_size,
972 SZ_4M,
973 };
974 int i;
975
976 for (i = 0; i < ARRAY_SIZE(sizes); i++) {
977 struct drm_i915_gem_object *obj;
978 int err;
979
6d0e4f07 980 obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1);
9771d5f7
AJ
981 if (obj == ERR_PTR(-ENODEV))
982 continue;
983
984 if (IS_ERR(obj))
985 return PTR_ERR(obj);
986
987 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
988 if (err == 0)
989 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
7961c5b6
ML
990 if (err == 0)
991 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED);
9771d5f7
AJ
992
993 i915_gem_object_put(obj);
994 if (err)
995 return err;
996 }
997 }
998
999 return 0;
cc662126
AJ
1000}
1001
9f909e21
CW
1002static const char *repr_mmap_type(enum i915_mmap_type type)
1003{
1004 switch (type) {
1005 case I915_MMAP_TYPE_GTT: return "gtt";
1006 case I915_MMAP_TYPE_WB: return "wb";
1007 case I915_MMAP_TYPE_WC: return "wc";
1008 case I915_MMAP_TYPE_UC: return "uc";
7961c5b6 1009 case I915_MMAP_TYPE_FIXED: return "fixed";
9f909e21
CW
1010 default: return "unknown";
1011 }
1012}
1013
0ff37575 1014static bool can_access(struct drm_i915_gem_object *obj)
9f909e21 1015{
0ff37575
TH
1016 bool access;
1017
1018 i915_gem_object_lock(obj, NULL);
1019 access = i915_gem_object_has_struct_page(obj) ||
1020 i915_gem_object_has_iomem(obj);
1021 i915_gem_object_unlock(obj);
1022
1023 return access;
9f909e21
CW
1024}
1025
1026static int __igt_mmap_access(struct drm_i915_private *i915,
1027 struct drm_i915_gem_object *obj,
1028 enum i915_mmap_type type)
1029{
9f909e21
CW
1030 unsigned long __user *ptr;
1031 unsigned long A, B;
1032 unsigned long x, y;
1033 unsigned long addr;
1034 int err;
cf3e3e86 1035 u64 offset;
9f909e21
CW
1036
1037 memset(&A, 0xAA, sizeof(A));
1038 memset(&B, 0xBB, sizeof(B));
1039
1040 if (!can_mmap(obj, type) || !can_access(obj))
1041 return 0;
1042
cf3e3e86
ML
1043 err = __assign_mmap_offset(obj, type, &offset, NULL);
1044 if (err)
1045 return err;
9f909e21 1046
cf3e3e86 1047 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
9f909e21
CW
1048 if (IS_ERR_VALUE(addr))
1049 return addr;
1050 ptr = (unsigned long __user *)addr;
1051
1052 err = __put_user(A, ptr);
1053 if (err) {
1054 pr_err("%s(%s): failed to write into user mmap\n",
1055 obj->mm.region->name, repr_mmap_type(type));
1056 goto out_unmap;
1057 }
1058
1a9c4db4 1059 intel_gt_flush_ggtt_writes(to_gt(i915));
9f909e21
CW
1060
1061 err = access_process_vm(current, addr, &x, sizeof(x), 0);
1062 if (err != sizeof(x)) {
1063 pr_err("%s(%s): access_process_vm() read failed\n",
1064 obj->mm.region->name, repr_mmap_type(type));
1065 goto out_unmap;
1066 }
1067
1068 err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
1069 if (err != sizeof(B)) {
1070 pr_err("%s(%s): access_process_vm() write failed\n",
1071 obj->mm.region->name, repr_mmap_type(type));
1072 goto out_unmap;
1073 }
1074
1a9c4db4 1075 intel_gt_flush_ggtt_writes(to_gt(i915));
9f909e21
CW
1076
1077 err = __get_user(y, ptr);
1078 if (err) {
1079 pr_err("%s(%s): failed to read from user mmap\n",
1080 obj->mm.region->name, repr_mmap_type(type));
1081 goto out_unmap;
1082 }
1083
1084 if (x != A || y != B) {
1085 pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
1086 obj->mm.region->name, repr_mmap_type(type),
1087 x, y);
1088 err = -EINVAL;
1089 goto out_unmap;
1090 }
1091
1092out_unmap:
1093 vm_munmap(addr, obj->base.size);
1094 return err;
1095}
1096
1097static int igt_mmap_access(void *arg)
1098{
1099 struct drm_i915_private *i915 = arg;
1100 struct intel_memory_region *mr;
1101 enum intel_region_id id;
1102
1103 for_each_memory_region(mr, i915, id) {
1104 struct drm_i915_gem_object *obj;
1105 int err;
1106
6d0e4f07 1107 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
9f909e21
CW
1108 if (obj == ERR_PTR(-ENODEV))
1109 continue;
1110
1111 if (IS_ERR(obj))
1112 return PTR_ERR(obj);
1113
1114 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
1115 if (err == 0)
1116 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
1117 if (err == 0)
1118 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
1119 if (err == 0)
1120 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
7961c5b6
ML
1121 if (err == 0)
1122 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED);
9f909e21
CW
1123
1124 i915_gem_object_put(obj);
1125 if (err)
1126 return err;
1127 }
1128
1129 return 0;
1130}
1131
06581862
CW
1132static int __igt_mmap_gpu(struct drm_i915_private *i915,
1133 struct drm_i915_gem_object *obj,
1134 enum i915_mmap_type type)
1135{
1136 struct intel_engine_cs *engine;
06581862 1137 unsigned long addr;
a5799832
CW
1138 u32 __user *ux;
1139 u32 bbe;
06581862 1140 int err;
cf3e3e86 1141 u64 offset;
06581862
CW
1142
1143 /*
1144 * Verify that the mmap access into the backing store aligns with
1145 * that of the GPU, i.e. that mmap is indeed writing into the same
1146 * page as being read by the GPU.
1147 */
1148
1149 if (!can_mmap(obj, type))
1150 return 0;
1151
1152 err = wc_set(obj);
1153 if (err == -ENXIO)
1154 err = gtt_set(obj);
1155 if (err)
1156 return err;
1157
cf3e3e86
ML
1158 err = __assign_mmap_offset(obj, type, &offset, NULL);
1159 if (err)
1160 return err;
06581862 1161
cf3e3e86 1162 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
06581862
CW
1163 if (IS_ERR_VALUE(addr))
1164 return addr;
1165
1166 ux = u64_to_user_ptr((u64)addr);
1167 bbe = MI_BATCH_BUFFER_END;
1168 if (put_user(bbe, ux)) {
1169 pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
1170 err = -EFAULT;
1171 goto out_unmap;
1172 }
1173
1174 if (type == I915_MMAP_TYPE_GTT)
1a9c4db4 1175 intel_gt_flush_ggtt_writes(to_gt(i915));
06581862
CW
1176
1177 for_each_uabi_engine(engine, i915) {
1178 struct i915_request *rq;
1179 struct i915_vma *vma;
15b6c924 1180 struct i915_gem_ww_ctx ww;
06581862
CW
1181
1182 vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
1183 if (IS_ERR(vma)) {
1184 err = PTR_ERR(vma);
1185 goto out_unmap;
1186 }
1187
15b6c924
ML
1188 i915_gem_ww_ctx_init(&ww, false);
1189retry:
1190 err = i915_gem_object_lock(obj, &ww);
1191 if (!err)
1192 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
06581862 1193 if (err)
15b6c924 1194 goto out_ww;
06581862
CW
1195
1196 rq = i915_request_create(engine->kernel_context);
1197 if (IS_ERR(rq)) {
1198 err = PTR_ERR(rq);
1199 goto out_unpin;
1200 }
1201
06581862
CW
1202 err = i915_request_await_object(rq, vma->obj, false);
1203 if (err == 0)
1204 err = i915_vma_move_to_active(vma, rq, 0);
06581862
CW
1205
1206 err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
1207 i915_request_get(rq);
1208 i915_request_add(rq);
1209
1210 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1211 struct drm_printer p =
1212 drm_info_printer(engine->i915->drm.dev);
1213
1214 pr_err("%s(%s, %s): Failed to execute batch\n",
1215 __func__, engine->name, obj->mm.region->name);
1216 intel_engine_dump(engine, &p,
1217 "%s\n", engine->name);
1218
1219 intel_gt_set_wedged(engine->gt);
1220 err = -EIO;
1221 }
1222 i915_request_put(rq);
1223
1224out_unpin:
1225 i915_vma_unpin(vma);
15b6c924
ML
1226out_ww:
1227 if (err == -EDEADLK) {
1228 err = i915_gem_ww_ctx_backoff(&ww);
1229 if (!err)
1230 goto retry;
1231 }
1232 i915_gem_ww_ctx_fini(&ww);
06581862
CW
1233 if (err)
1234 goto out_unmap;
1235 }
1236
1237out_unmap:
1238 vm_munmap(addr, obj->base.size);
1239 return err;
1240}
1241
1242static int igt_mmap_gpu(void *arg)
1243{
1244 struct drm_i915_private *i915 = arg;
1245 struct intel_memory_region *mr;
1246 enum intel_region_id id;
1247
1248 for_each_memory_region(mr, i915, id) {
1249 struct drm_i915_gem_object *obj;
1250 int err;
1251
6d0e4f07 1252 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
06581862
CW
1253 if (obj == ERR_PTR(-ENODEV))
1254 continue;
1255
1256 if (IS_ERR(obj))
1257 return PTR_ERR(obj);
1258
1259 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
1260 if (err == 0)
1261 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
7961c5b6
ML
1262 if (err == 0)
1263 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED);
06581862
CW
1264
1265 i915_gem_object_put(obj);
1266 if (err)
1267 return err;
1268 }
1269
1270 return 0;
1271}
1272
1d1d0af6
CW
1273static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
1274{
1275 if (!pte_present(*pte) || pte_none(*pte)) {
1276 pr_err("missing PTE:%lx\n",
1277 (addr - (unsigned long)data) >> PAGE_SHIFT);
1278 return -EINVAL;
1279 }
1280
1281 return 0;
1282}
1283
1284static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
1285{
1286 if (pte_present(*pte) && !pte_none(*pte)) {
1287 pr_err("present PTE:%lx; expected to be revoked\n",
1288 (addr - (unsigned long)data) >> PAGE_SHIFT);
1289 return -EINVAL;
1290 }
1291
1292 return 0;
1293}
1294
1295static int check_present(unsigned long addr, unsigned long len)
1296{
1297 return apply_to_page_range(current->mm, addr, len,
1298 check_present_pte, (void *)addr);
1299}
1300
1301static int check_absent(unsigned long addr, unsigned long len)
1302{
1303 return apply_to_page_range(current->mm, addr, len,
1304 check_absent_pte, (void *)addr);
1305}
1306
1307static int prefault_range(u64 start, u64 len)
1308{
1309 const char __user *addr, *end;
1310 char __maybe_unused c;
1311 int err;
1312
1313 addr = u64_to_user_ptr(start);
1314 end = addr + len;
1315
1316 for (; addr < end; addr += PAGE_SIZE) {
1317 err = __get_user(c, addr);
1318 if (err)
1319 return err;
1320 }
1321
1322 return __get_user(c, end - 1);
1323}
1324
9771d5f7
AJ
1325static int __igt_mmap_revoke(struct drm_i915_private *i915,
1326 struct drm_i915_gem_object *obj,
1327 enum i915_mmap_type type)
1d1d0af6 1328{
1d1d0af6
CW
1329 unsigned long addr;
1330 int err;
cf3e3e86 1331 u64 offset;
1d1d0af6 1332
9771d5f7 1333 if (!can_mmap(obj, type))
1d1d0af6
CW
1334 return 0;
1335
cf3e3e86
ML
1336 err = __assign_mmap_offset(obj, type, &offset, NULL);
1337 if (err)
1338 return err;
1d1d0af6 1339
cf3e3e86 1340 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
9771d5f7
AJ
1341 if (IS_ERR_VALUE(addr))
1342 return addr;
1d1d0af6
CW
1343
1344 err = prefault_range(addr, obj->base.size);
1345 if (err)
1346 goto out_unmap;
1347
1d1d0af6 1348 err = check_present(addr, obj->base.size);
9771d5f7
AJ
1349 if (err) {
1350 pr_err("%s: was not present\n", obj->mm.region->name);
1d1d0af6 1351 goto out_unmap;
9771d5f7 1352 }
1d1d0af6
CW
1353
1354 /*
1355 * After unbinding the object from the GGTT, its address may be reused
1356 * for other objects. Ergo we have to revoke the previous mmap PTE
1357 * access as it no longer points to the same object.
1358 */
0f341974 1359 i915_gem_object_lock(obj, NULL);
1d1d0af6 1360 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
0f341974 1361 i915_gem_object_unlock(obj);
1d1d0af6
CW
1362 if (err) {
1363 pr_err("Failed to unbind object!\n");
1364 goto out_unmap;
1365 }
1d1d0af6 1366
cc662126 1367 if (type != I915_MMAP_TYPE_GTT) {
6f791ffe 1368 i915_gem_object_lock(obj, NULL);
cc662126 1369 __i915_gem_object_put_pages(obj);
6f791ffe 1370 i915_gem_object_unlock(obj);
cc662126
AJ
1371 if (i915_gem_object_has_pages(obj)) {
1372 pr_err("Failed to put-pages object!\n");
1373 err = -EINVAL;
1374 goto out_unmap;
1375 }
1376 }
1377
903e0387
MA
1378 err = check_absent(addr, obj->base.size);
1379 if (err) {
1380 pr_err("%s: was not absent\n", obj->mm.region->name);
1381 goto out_unmap;
9771d5f7 1382 }
1d1d0af6
CW
1383
1384out_unmap:
1385 vm_munmap(addr, obj->base.size);
1d1d0af6
CW
1386 return err;
1387}
1388
9771d5f7 1389static int igt_mmap_revoke(void *arg)
cc662126 1390{
9771d5f7
AJ
1391 struct drm_i915_private *i915 = arg;
1392 struct intel_memory_region *mr;
1393 enum intel_region_id id;
cc662126 1394
9771d5f7
AJ
1395 for_each_memory_region(mr, i915, id) {
1396 struct drm_i915_gem_object *obj;
1397 int err;
1398
6d0e4f07 1399 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
9771d5f7
AJ
1400 if (obj == ERR_PTR(-ENODEV))
1401 continue;
1402
1403 if (IS_ERR(obj))
1404 return PTR_ERR(obj);
1405
1406 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
1407 if (err == 0)
1408 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
7961c5b6
ML
1409 if (err == 0)
1410 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED);
9771d5f7
AJ
1411
1412 i915_gem_object_put(obj);
1413 if (err)
1414 return err;
1415 }
1416
1417 return 0;
cc662126
AJ
1418}
1419
b414fcd5
CW
1420int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1421{
1422 static const struct i915_subtest tests[] = {
1423 SUBTEST(igt_partial_tiling),
07e98eb0 1424 SUBTEST(igt_smoke_tiling),
b414fcd5 1425 SUBTEST(igt_mmap_offset_exhaustion),
9771d5f7 1426 SUBTEST(igt_mmap),
9f909e21 1427 SUBTEST(igt_mmap_access),
9771d5f7 1428 SUBTEST(igt_mmap_revoke),
06581862 1429 SUBTEST(igt_mmap_gpu),
b414fcd5
CW
1430 };
1431
1432 return i915_subtests(tests, i915);
1433}