Merge branches 'pm-devfreq', 'pm-qos', 'pm-tools' and 'pm-docs'
[linux-2.6-block.git] / drivers / gpu / drm / i915 / selftests / i915_gem_gtt.c
CommitLineData
1c42819a
CW
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
62c981cf 25#include <linux/list_sort.h>
8d28ba45
CW
26#include <linux/prime_numbers.h>
27
a47e788c 28#include "gem/i915_gem_context.h"
b508d01f 29#include "gem/i915_gem_internal.h"
a413c99f 30#include "gem/i915_gem_region.h"
10be98a7 31#include "gem/selftests/mock_context.h"
a47e788c 32#include "gt/intel_context.h"
45233ab2 33#include "gt/intel_gpu_commands.h"
a413c99f 34#include "gt/intel_gtt.h"
10be98a7 35
5c3bff48 36#include "i915_random.h"
10be98a7 37#include "i915_selftest.h"
e1a4bbb6 38#include "i915_vma_resource.h"
1c42819a 39
8d28ba45 40#include "mock_drm.h"
e619cd0d 41#include "mock_gem_device.h"
2c86e55d 42#include "mock_gtt.h"
a47e788c 43#include "igt_flush_test.h"
8d28ba45 44
38b7fb0b
CW
45static void cleanup_freed_objects(struct drm_i915_private *i915)
46{
38b7fb0b 47 i915_gem_drain_freed_objects(i915);
38b7fb0b
CW
48}
49
8d28ba45
CW
50static void fake_free_pages(struct drm_i915_gem_object *obj,
51 struct sg_table *pages)
52{
53 sg_free_table(pages);
54 kfree(pages);
55}
56
b91b09ee 57static int fake_get_pages(struct drm_i915_gem_object *obj)
8d28ba45
CW
58{
59#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
60#define PFN_BIAS 0x1000
61 struct sg_table *pages;
62 struct scatterlist *sg;
84e8978e 63 unsigned int sg_page_sizes;
8d28ba45
CW
64 typeof(obj->base.size) rem;
65
66 pages = kmalloc(sizeof(*pages), GFP);
67 if (!pages)
b91b09ee 68 return -ENOMEM;
8d28ba45
CW
69
70 rem = round_up(obj->base.size, BIT(31)) >> 31;
71 if (sg_alloc_table(pages, rem, GFP)) {
72 kfree(pages);
b91b09ee 73 return -ENOMEM;
8d28ba45
CW
74 }
75
84e8978e 76 sg_page_sizes = 0;
8d28ba45
CW
77 rem = obj->base.size;
78 for (sg = pages->sgl; sg; sg = sg_next(sg)) {
79 unsigned long len = min_t(typeof(rem), rem, BIT(31));
80
b3bb8288 81 GEM_BUG_ON(!len);
8d28ba45
CW
82 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
83 sg_dma_address(sg) = page_to_phys(sg_page(sg));
84 sg_dma_len(sg) = len;
84e8978e 85 sg_page_sizes |= len;
8d28ba45
CW
86
87 rem -= len;
88 }
b3bb8288 89 GEM_BUG_ON(rem);
8d28ba45 90
84e8978e 91 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
b91b09ee
MA
92
93 return 0;
8d28ba45
CW
94#undef GFP
95}
96
97static void fake_put_pages(struct drm_i915_gem_object *obj,
98 struct sg_table *pages)
99{
100 fake_free_pages(obj, pages);
101 obj->mm.dirty = false;
8d28ba45
CW
102}
103
104static const struct drm_i915_gem_object_ops fake_ops = {
7d192daa 105 .name = "fake-gem",
8d28ba45
CW
106 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
107 .get_pages = fake_get_pages,
108 .put_pages = fake_put_pages,
109};
110
111static struct drm_i915_gem_object *
112fake_dma_object(struct drm_i915_private *i915, u64 size)
113{
7867d709 114 static struct lock_class_key lock_class;
8d28ba45
CW
115 struct drm_i915_gem_object *obj;
116
117 GEM_BUG_ON(!size);
118 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
119
120 if (overflows_type(size, obj->base.size))
121 return ERR_PTR(-E2BIG);
122
13f1bfd3 123 obj = i915_gem_object_alloc();
8d28ba45 124 if (!obj)
a5dd8f5a 125 goto err;
8d28ba45
CW
126
127 drm_gem_private_object_init(&i915->drm, &obj->base, size);
c471748d 128 i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
8d28ba45 129
7c98501a
MA
130 i915_gem_object_set_volatile(obj);
131
c0a51fd0
CK
132 obj->write_domain = I915_GEM_DOMAIN_CPU;
133 obj->read_domains = I915_GEM_DOMAIN_CPU;
8d28ba45
CW
134 obj->cache_level = I915_CACHE_NONE;
135
136 /* Preallocate the "backing storage" */
480ae795 137 if (i915_gem_object_pin_pages_unlocked(obj))
a5dd8f5a 138 goto err_obj;
8d28ba45
CW
139
140 i915_gem_object_unpin_pages(obj);
141 return obj;
a5dd8f5a
MA
142
143err_obj:
144 i915_gem_object_put(obj);
145err:
146 return ERR_PTR(-ENOMEM);
8d28ba45
CW
147}
148
1c42819a
CW
149static int igt_ppgtt_alloc(void *arg)
150{
151 struct drm_i915_private *dev_priv = arg;
ab53497b 152 struct i915_ppgtt *ppgtt;
480ae795 153 struct i915_gem_ww_ctx ww;
207b7000 154 u64 size, last, limit;
35ac40d8 155 int err = 0;
1c42819a
CW
156
157 /* Allocate a ppggt and try to fill the entire range */
158
4bdafb9d 159 if (!HAS_PPGTT(dev_priv))
1c42819a
CW
160 return 0;
161
8c2699fa 162 ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
1f6f0023
CW
163 if (IS_ERR(ppgtt))
164 return PTR_ERR(ppgtt);
1c42819a 165
35ac40d8 166 if (!ppgtt->vm.allocate_va_range)
1c42819a
CW
167 goto err_ppgtt_cleanup;
168
207b7000
CW
169 /*
170 * While we only allocate the page tables here and so we could
171 * address a much larger GTT than we could actually fit into
172 * RAM, a practical limit is the amount of physical pages in the system.
173 * This should ensure that we do not run into the oomkiller during
174 * the test and take down the machine wilfully.
175 */
ca79b0c2 176 limit = totalram_pages() << PAGE_SHIFT;
207b7000
CW
177 limit = min(ppgtt->vm.total, limit);
178
480ae795
ML
179 i915_gem_ww_ctx_init(&ww, false);
180retry:
181 err = i915_vm_lock_objects(&ppgtt->vm, &ww);
182 if (err)
183 goto err_ppgtt_cleanup;
184
1c42819a 185 /* Check we can allocate the entire range */
207b7000 186 for (size = 4096; size <= limit; size <<= 2) {
cd0452aa
CW
187 struct i915_vm_pt_stash stash = {};
188
189 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
190 if (err)
1c42819a 191 goto err_ppgtt_cleanup;
1c42819a 192
529b9ec8 193 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
89351925
CW
194 if (err) {
195 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
196 goto err_ppgtt_cleanup;
197 }
198
cd0452aa 199 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
1f6f0023
CW
200 cond_resched();
201
82ad6443 202 ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
cd0452aa
CW
203
204 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
1c42819a
CW
205 }
206
207 /* Check we can incrementally allocate the entire range */
207b7000 208 for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
cd0452aa
CW
209 struct i915_vm_pt_stash stash = {};
210
211 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
212 if (err)
1c42819a 213 goto err_ppgtt_cleanup;
1f6f0023 214
529b9ec8 215 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
89351925
CW
216 if (err) {
217 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
218 goto err_ppgtt_cleanup;
219 }
220
cd0452aa
CW
221 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
222 last, size - last);
1f6f0023 223 cond_resched();
cd0452aa
CW
224
225 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
1c42819a
CW
226 }
227
228err_ppgtt_cleanup:
480ae795
ML
229 if (err == -EDEADLK) {
230 err = i915_gem_ww_ctx_backoff(&ww);
231 if (!err)
232 goto retry;
233 }
234 i915_gem_ww_ctx_fini(&ww);
235
e568ac38 236 i915_vm_put(&ppgtt->vm);
1c42819a
CW
237 return err;
238}
239
2c86e55d 240static int lowlevel_hole(struct i915_address_space *vm,
4a6f13fc
CW
241 u64 hole_start, u64 hole_end,
242 unsigned long end_time)
243{
87bd701e
MA
244 const unsigned int min_alignment =
245 i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
4a6f13fc 246 I915_RND_STATE(seed_prng);
39a2bd34 247 struct i915_vma_resource *mock_vma_res;
4a6f13fc 248 unsigned int size;
4a234c5f 249
39a2bd34
TH
250 mock_vma_res = kzalloc(sizeof(*mock_vma_res), GFP_KERNEL);
251 if (!mock_vma_res)
34f5fe12 252 return -ENOMEM;
4a6f13fc
CW
253
254 /* Keep creating larger objects until one cannot fit into the hole */
255 for (size = 12; (hole_end - hole_start) >> size; size++) {
256 I915_RND_SUBSTATE(prng, seed_prng);
257 struct drm_i915_gem_object *obj;
258 unsigned int *order, count, n;
87bd701e 259 u64 hole_size, aligned_size;
4a6f13fc 260
87bd701e
MA
261 aligned_size = max_t(u32, ilog2(min_alignment), size);
262 hole_size = (hole_end - hole_start) >> aligned_size;
4a6f13fc
CW
263 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
264 hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
223c73a3
CW
265 count = hole_size >> 1;
266 if (!count) {
267 pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
268 __func__, hole_start, hole_end, size, hole_size);
269 break;
270 }
271
4a6f13fc 272 do {
4a6f13fc 273 order = i915_random_order(count, &prng);
223c73a3
CW
274 if (order)
275 break;
276 } while (count >>= 1);
34f5fe12 277 if (!count) {
39a2bd34 278 kfree(mock_vma_res);
223c73a3 279 return -ENOMEM;
34f5fe12 280 }
223c73a3 281 GEM_BUG_ON(!order);
4a6f13fc 282
87bd701e
MA
283 GEM_BUG_ON(count * BIT_ULL(aligned_size) > vm->total);
284 GEM_BUG_ON(hole_start + count * BIT_ULL(aligned_size) > hole_end);
4a6f13fc
CW
285
286 /* Ignore allocation failures (i.e. don't report them as
287 * a test failure) as we are purposefully allocating very
288 * large objects without checking that we have sufficient
289 * memory. We expect to hit -ENOMEM.
290 */
291
2c86e55d 292 obj = fake_dma_object(vm->i915, BIT_ULL(size));
4a6f13fc
CW
293 if (IS_ERR(obj)) {
294 kfree(order);
295 break;
296 }
297
298 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
299
480ae795 300 if (i915_gem_object_pin_pages_unlocked(obj)) {
4a6f13fc
CW
301 i915_gem_object_put(obj);
302 kfree(order);
303 break;
304 }
305
306 for (n = 0; n < count; n++) {
87bd701e 307 u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
c9d08cc3 308 intel_wakeref_t wakeref;
4a6f13fc 309
87bd701e 310 GEM_BUG_ON(addr + BIT_ULL(aligned_size) > vm->total);
4a6f13fc 311
91e32157
CW
312 if (igt_timeout(end_time,
313 "%s timed out before %d/%d\n",
314 __func__, n, count)) {
315 hole_end = hole_start; /* quit */
316 break;
317 }
318
cd0452aa
CW
319 if (vm->allocate_va_range) {
320 struct i915_vm_pt_stash stash = {};
480ae795
ML
321 struct i915_gem_ww_ctx ww;
322 int err;
323
324 i915_gem_ww_ctx_init(&ww, false);
325retry:
326 err = i915_vm_lock_objects(vm, &ww);
327 if (err)
328 goto alloc_vm_end;
cd0452aa 329
480ae795 330 err = -ENOMEM;
cd0452aa
CW
331 if (i915_vm_alloc_pt_stash(vm, &stash,
332 BIT_ULL(size)))
480ae795 333 goto alloc_vm_end;
89351925 334
529b9ec8 335 err = i915_vm_map_pt_stash(vm, &stash);
480ae795
ML
336 if (!err)
337 vm->allocate_va_range(vm, &stash,
338 addr, BIT_ULL(size));
cd0452aa 339 i915_vm_free_pt_stash(vm, &stash);
480ae795
ML
340alloc_vm_end:
341 if (err == -EDEADLK) {
342 err = i915_gem_ww_ctx_backoff(&ww);
343 if (!err)
344 goto retry;
345 }
346 i915_gem_ww_ctx_fini(&ww);
347
348 if (err)
349 break;
cd0452aa 350 }
4a6f13fc 351
39a2bd34 352 mock_vma_res->bi.pages = obj->mm.pages;
87bd701e 353 mock_vma_res->node_size = BIT_ULL(aligned_size);
39a2bd34 354 mock_vma_res->start = addr;
4a234c5f 355
2c86e55d 356 with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
39a2bd34 357 vm->insert_entries(vm, mock_vma_res,
7c465310 358 I915_CACHE_NONE, 0);
4a6f13fc
CW
359 }
360 count = n;
361
362 i915_random_reorder(order, count, &prng);
363 for (n = 0; n < count; n++) {
87bd701e 364 u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
7c465310 365 intel_wakeref_t wakeref;
4a6f13fc
CW
366
367 GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
2c86e55d 368 with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
7c465310 369 vm->clear_range(vm, addr, BIT_ULL(size));
4a6f13fc
CW
370 }
371
372 i915_gem_object_unpin_pages(obj);
373 i915_gem_object_put(obj);
374
375 kfree(order);
38b7fb0b 376
2c86e55d 377 cleanup_freed_objects(vm->i915);
4a6f13fc
CW
378 }
379
39a2bd34 380 kfree(mock_vma_res);
4a6f13fc
CW
381 return 0;
382}
383
8d28ba45
CW
384static void close_object_list(struct list_head *objects,
385 struct i915_address_space *vm)
386{
387 struct drm_i915_gem_object *obj, *on;
aae4a3d8 388 int ignored;
8d28ba45
CW
389
390 list_for_each_entry_safe(obj, on, objects, st_link) {
391 struct i915_vma *vma;
392
393 vma = i915_vma_instance(obj, vm, NULL);
aae4a3d8 394 if (!IS_ERR(vma))
0f341974 395 ignored = i915_vma_unbind_unlocked(vma);
8d28ba45
CW
396
397 list_del(&obj->st_link);
398 i915_gem_object_put(obj);
399 }
400}
401
2c86e55d 402static int fill_hole(struct i915_address_space *vm,
8d28ba45
CW
403 u64 hole_start, u64 hole_end,
404 unsigned long end_time)
405{
406 const u64 hole_size = hole_end - hole_start;
407 struct drm_i915_gem_object *obj;
87bd701e
MA
408 const unsigned int min_alignment =
409 i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
8d28ba45 410 const unsigned long max_pages =
87bd701e 411 min_t(u64, ULONG_MAX - 1, (hole_size / 2) >> ilog2(min_alignment));
8d28ba45
CW
412 const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
413 unsigned long npages, prime, flags;
414 struct i915_vma *vma;
415 LIST_HEAD(objects);
416 int err;
417
418 /* Try binding many VMA working inwards from either edge */
419
420 flags = PIN_OFFSET_FIXED | PIN_USER;
421 if (i915_is_ggtt(vm))
422 flags |= PIN_GLOBAL;
423
424 for_each_prime_number_from(prime, 2, max_step) {
425 for (npages = 1; npages <= max_pages; npages *= prime) {
426 const u64 full_size = npages << PAGE_SHIFT;
427 const struct {
428 const char *name;
429 u64 offset;
430 int step;
431 } phases[] = {
432 { "top-down", hole_end, -1, },
433 { "bottom-up", hole_start, 1, },
434 { }
435 }, *p;
436
2c86e55d 437 obj = fake_dma_object(vm->i915, full_size);
8d28ba45
CW
438 if (IS_ERR(obj))
439 break;
440
441 list_add(&obj->st_link, &objects);
442
443 /* Align differing sized objects against the edges, and
444 * check we don't walk off into the void when binding
445 * them into the GTT.
446 */
447 for (p = phases; p->name; p++) {
448 u64 offset;
449
450 offset = p->offset;
451 list_for_each_entry(obj, &objects, st_link) {
87bd701e
MA
452 u64 aligned_size = round_up(obj->base.size,
453 min_alignment);
454
8d28ba45
CW
455 vma = i915_vma_instance(obj, vm, NULL);
456 if (IS_ERR(vma))
457 continue;
458
459 if (p->step < 0) {
87bd701e 460 if (offset < hole_start + aligned_size)
8d28ba45 461 break;
87bd701e 462 offset -= aligned_size;
8d28ba45
CW
463 }
464
465 err = i915_vma_pin(vma, 0, 0, offset | flags);
466 if (err) {
467 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
468 __func__, p->name, err, npages, prime, offset);
469 goto err;
470 }
471
472 if (!drm_mm_node_allocated(&vma->node) ||
473 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
474 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
475 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
476 offset);
477 err = -EINVAL;
478 goto err;
479 }
480
481 i915_vma_unpin(vma);
482
483 if (p->step > 0) {
87bd701e 484 if (offset + aligned_size > hole_end)
8d28ba45 485 break;
87bd701e 486 offset += aligned_size;
8d28ba45
CW
487 }
488 }
489
490 offset = p->offset;
491 list_for_each_entry(obj, &objects, st_link) {
87bd701e
MA
492 u64 aligned_size = round_up(obj->base.size,
493 min_alignment);
494
8d28ba45
CW
495 vma = i915_vma_instance(obj, vm, NULL);
496 if (IS_ERR(vma))
497 continue;
498
499 if (p->step < 0) {
87bd701e 500 if (offset < hole_start + aligned_size)
8d28ba45 501 break;
87bd701e 502 offset -= aligned_size;
8d28ba45
CW
503 }
504
505 if (!drm_mm_node_allocated(&vma->node) ||
506 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
507 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
508 __func__, p->name, vma->node.start, vma->node.size,
509 offset);
510 err = -EINVAL;
511 goto err;
512 }
513
0f341974 514 err = i915_vma_unbind_unlocked(vma);
8d28ba45
CW
515 if (err) {
516 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
517 __func__, p->name, vma->node.start, vma->node.size,
518 err);
519 goto err;
520 }
521
522 if (p->step > 0) {
87bd701e 523 if (offset + aligned_size > hole_end)
8d28ba45 524 break;
87bd701e 525 offset += aligned_size;
8d28ba45
CW
526 }
527 }
528
529 offset = p->offset;
530 list_for_each_entry_reverse(obj, &objects, st_link) {
87bd701e
MA
531 u64 aligned_size = round_up(obj->base.size,
532 min_alignment);
533
8d28ba45
CW
534 vma = i915_vma_instance(obj, vm, NULL);
535 if (IS_ERR(vma))
536 continue;
537
538 if (p->step < 0) {
87bd701e 539 if (offset < hole_start + aligned_size)
8d28ba45 540 break;
87bd701e 541 offset -= aligned_size;
8d28ba45
CW
542 }
543
544 err = i915_vma_pin(vma, 0, 0, offset | flags);
545 if (err) {
546 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
547 __func__, p->name, err, npages, prime, offset);
548 goto err;
549 }
550
551 if (!drm_mm_node_allocated(&vma->node) ||
552 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
553 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
554 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
555 offset);
556 err = -EINVAL;
557 goto err;
558 }
559
560 i915_vma_unpin(vma);
561
562 if (p->step > 0) {
87bd701e 563 if (offset + aligned_size > hole_end)
8d28ba45 564 break;
87bd701e 565 offset += aligned_size;
8d28ba45
CW
566 }
567 }
568
569 offset = p->offset;
570 list_for_each_entry_reverse(obj, &objects, st_link) {
87bd701e
MA
571 u64 aligned_size = round_up(obj->base.size,
572 min_alignment);
573
8d28ba45
CW
574 vma = i915_vma_instance(obj, vm, NULL);
575 if (IS_ERR(vma))
576 continue;
577
578 if (p->step < 0) {
87bd701e 579 if (offset < hole_start + aligned_size)
8d28ba45 580 break;
87bd701e 581 offset -= aligned_size;
8d28ba45
CW
582 }
583
584 if (!drm_mm_node_allocated(&vma->node) ||
585 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
586 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
587 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
588 offset);
589 err = -EINVAL;
590 goto err;
591 }
592
0f341974 593 err = i915_vma_unbind_unlocked(vma);
8d28ba45
CW
594 if (err) {
595 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
596 __func__, p->name, vma->node.start, vma->node.size,
597 err);
598 goto err;
599 }
600
601 if (p->step > 0) {
87bd701e 602 if (offset + aligned_size > hole_end)
8d28ba45 603 break;
87bd701e 604 offset += aligned_size;
8d28ba45
CW
605 }
606 }
607 }
608
609 if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
610 __func__, npages, prime)) {
611 err = -EINTR;
612 goto err;
613 }
614 }
615
616 close_object_list(&objects, vm);
2c86e55d 617 cleanup_freed_objects(vm->i915);
8d28ba45
CW
618 }
619
620 return 0;
621
622err:
623 close_object_list(&objects, vm);
624 return err;
625}
626
2c86e55d 627static int walk_hole(struct i915_address_space *vm,
6e32ab3d
CW
628 u64 hole_start, u64 hole_end,
629 unsigned long end_time)
630{
631 const u64 hole_size = hole_end - hole_start;
632 const unsigned long max_pages =
633 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
87bd701e 634 unsigned long min_alignment;
6e32ab3d
CW
635 unsigned long flags;
636 u64 size;
637
638 /* Try binding a single VMA in different positions within the hole */
639
640 flags = PIN_OFFSET_FIXED | PIN_USER;
641 if (i915_is_ggtt(vm))
642 flags |= PIN_GLOBAL;
643
87bd701e
MA
644 min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
645
6e32ab3d
CW
646 for_each_prime_number_from(size, 1, max_pages) {
647 struct drm_i915_gem_object *obj;
648 struct i915_vma *vma;
649 u64 addr;
650 int err = 0;
651
2c86e55d 652 obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
6e32ab3d
CW
653 if (IS_ERR(obj))
654 break;
655
656 vma = i915_vma_instance(obj, vm, NULL);
657 if (IS_ERR(vma)) {
658 err = PTR_ERR(vma);
1257e0f8 659 goto err_put;
6e32ab3d
CW
660 }
661
662 for (addr = hole_start;
663 addr + obj->base.size < hole_end;
87bd701e 664 addr += round_up(obj->base.size, min_alignment)) {
6e32ab3d
CW
665 err = i915_vma_pin(vma, 0, 0, addr | flags);
666 if (err) {
667 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
668 __func__, addr, vma->size,
669 hole_start, hole_end, err);
50689771 670 goto err_put;
6e32ab3d
CW
671 }
672 i915_vma_unpin(vma);
673
674 if (!drm_mm_node_allocated(&vma->node) ||
675 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
676 pr_err("%s incorrect at %llx + %llx\n",
677 __func__, addr, vma->size);
678 err = -EINVAL;
50689771 679 goto err_put;
6e32ab3d
CW
680 }
681
0f341974 682 err = i915_vma_unbind_unlocked(vma);
6e32ab3d
CW
683 if (err) {
684 pr_err("%s unbind failed at %llx + %llx with err=%d\n",
685 __func__, addr, vma->size, err);
50689771 686 goto err_put;
6e32ab3d
CW
687 }
688
689 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
690
691 if (igt_timeout(end_time,
692 "%s timed out at %llx\n",
693 __func__, addr)) {
694 err = -EINTR;
50689771 695 goto err_put;
6e32ab3d
CW
696 }
697 }
698
1257e0f8 699err_put:
6e32ab3d
CW
700 i915_gem_object_put(obj);
701 if (err)
702 return err;
38b7fb0b 703
2c86e55d 704 cleanup_freed_objects(vm->i915);
6e32ab3d
CW
705 }
706
707 return 0;
708}
709
2c86e55d 710static int pot_hole(struct i915_address_space *vm,
7db4dcea
CW
711 u64 hole_start, u64 hole_end,
712 unsigned long end_time)
713{
714 struct drm_i915_gem_object *obj;
715 struct i915_vma *vma;
87bd701e 716 unsigned int min_alignment;
7db4dcea
CW
717 unsigned long flags;
718 unsigned int pot;
72affdf9 719 int err = 0;
7db4dcea
CW
720
721 flags = PIN_OFFSET_FIXED | PIN_USER;
722 if (i915_is_ggtt(vm))
723 flags |= PIN_GLOBAL;
724
87bd701e
MA
725 min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
726
2c86e55d 727 obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
7db4dcea
CW
728 if (IS_ERR(obj))
729 return PTR_ERR(obj);
730
731 vma = i915_vma_instance(obj, vm, NULL);
732 if (IS_ERR(vma)) {
733 err = PTR_ERR(vma);
734 goto err_obj;
735 }
736
737 /* Insert a pair of pages across every pot boundary within the hole */
738 for (pot = fls64(hole_end - 1) - 1;
87bd701e 739 pot > ilog2(2 * min_alignment);
7db4dcea
CW
740 pot--) {
741 u64 step = BIT_ULL(pot);
742 u64 addr;
743
87bd701e 744 for (addr = round_up(hole_start + min_alignment, step) - min_alignment;
333991c4 745 hole_end > addr && hole_end - addr >= 2 * min_alignment;
7db4dcea
CW
746 addr += step) {
747 err = i915_vma_pin(vma, 0, 0, addr | flags);
748 if (err) {
749 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
750 __func__,
751 addr,
752 hole_start, hole_end,
753 err);
50689771 754 goto err_obj;
7db4dcea
CW
755 }
756
757 if (!drm_mm_node_allocated(&vma->node) ||
758 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
759 pr_err("%s incorrect at %llx + %llx\n",
760 __func__, addr, vma->size);
761 i915_vma_unpin(vma);
0f341974 762 err = i915_vma_unbind_unlocked(vma);
7db4dcea 763 err = -EINVAL;
50689771 764 goto err_obj;
7db4dcea
CW
765 }
766
767 i915_vma_unpin(vma);
0f341974 768 err = i915_vma_unbind_unlocked(vma);
7db4dcea
CW
769 GEM_BUG_ON(err);
770 }
771
772 if (igt_timeout(end_time,
773 "%s timed out after %d/%d\n",
774 __func__, pot, fls64(hole_end - 1) - 1)) {
775 err = -EINTR;
50689771 776 goto err_obj;
7db4dcea
CW
777 }
778 }
779
7db4dcea
CW
780err_obj:
781 i915_gem_object_put(obj);
782 return err;
783}
784
2c86e55d 785static int drunk_hole(struct i915_address_space *vm,
5c3bff48
CW
786 u64 hole_start, u64 hole_end,
787 unsigned long end_time)
788{
789 I915_RND_STATE(prng);
87bd701e 790 unsigned int min_alignment;
5c3bff48
CW
791 unsigned int size;
792 unsigned long flags;
793
794 flags = PIN_OFFSET_FIXED | PIN_USER;
795 if (i915_is_ggtt(vm))
796 flags |= PIN_GLOBAL;
797
87bd701e
MA
798 min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
799
5c3bff48
CW
800 /* Keep creating larger objects until one cannot fit into the hole */
801 for (size = 12; (hole_end - hole_start) >> size; size++) {
802 struct drm_i915_gem_object *obj;
803 unsigned int *order, count, n;
804 struct i915_vma *vma;
87bd701e 805 u64 hole_size, aligned_size;
6e128141 806 int err = -ENODEV;
5c3bff48 807
87bd701e
MA
808 aligned_size = max_t(u32, ilog2(min_alignment), size);
809 hole_size = (hole_end - hole_start) >> aligned_size;
5c3bff48
CW
810 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
811 hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
223c73a3
CW
812 count = hole_size >> 1;
813 if (!count) {
814 pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
815 __func__, hole_start, hole_end, size, hole_size);
816 break;
817 }
818
5c3bff48 819 do {
5c3bff48 820 order = i915_random_order(count, &prng);
223c73a3
CW
821 if (order)
822 break;
823 } while (count >>= 1);
824 if (!count)
825 return -ENOMEM;
826 GEM_BUG_ON(!order);
5c3bff48
CW
827
828 /* Ignore allocation failures (i.e. don't report them as
829 * a test failure) as we are purposefully allocating very
830 * large objects without checking that we have sufficient
831 * memory. We expect to hit -ENOMEM.
832 */
833
2c86e55d 834 obj = fake_dma_object(vm->i915, BIT_ULL(size));
5c3bff48
CW
835 if (IS_ERR(obj)) {
836 kfree(order);
837 break;
838 }
839
840 vma = i915_vma_instance(obj, vm, NULL);
841 if (IS_ERR(vma)) {
842 err = PTR_ERR(vma);
843 goto err_obj;
844 }
845
846 GEM_BUG_ON(vma->size != BIT_ULL(size));
847
848 for (n = 0; n < count; n++) {
87bd701e 849 u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
5c3bff48
CW
850
851 err = i915_vma_pin(vma, 0, 0, addr | flags);
852 if (err) {
853 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
854 __func__,
855 addr, BIT_ULL(size),
856 hole_start, hole_end,
857 err);
50689771 858 goto err_obj;
5c3bff48
CW
859 }
860
861 if (!drm_mm_node_allocated(&vma->node) ||
862 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
863 pr_err("%s incorrect at %llx + %llx\n",
864 __func__, addr, BIT_ULL(size));
865 i915_vma_unpin(vma);
0f341974 866 err = i915_vma_unbind_unlocked(vma);
5c3bff48 867 err = -EINVAL;
50689771 868 goto err_obj;
5c3bff48
CW
869 }
870
871 i915_vma_unpin(vma);
0f341974 872 err = i915_vma_unbind_unlocked(vma);
5c3bff48
CW
873 GEM_BUG_ON(err);
874
875 if (igt_timeout(end_time,
876 "%s timed out after %d/%d\n",
877 __func__, n, count)) {
878 err = -EINTR;
50689771 879 goto err_obj;
5c3bff48
CW
880 }
881 }
882
5c3bff48
CW
883err_obj:
884 i915_gem_object_put(obj);
885 kfree(order);
886 if (err)
887 return err;
38b7fb0b 888
2c86e55d 889 cleanup_freed_objects(vm->i915);
5c3bff48
CW
890 }
891
892 return 0;
893}
894
2c86e55d 895static int __shrink_hole(struct i915_address_space *vm,
aae4a3d8
CW
896 u64 hole_start, u64 hole_end,
897 unsigned long end_time)
898{
899 struct drm_i915_gem_object *obj;
900 unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
87bd701e 901 unsigned int min_alignment;
aae4a3d8
CW
902 unsigned int order = 12;
903 LIST_HEAD(objects);
904 int err = 0;
905 u64 addr;
906
87bd701e
MA
907 min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
908
aae4a3d8
CW
909 /* Keep creating larger objects until one cannot fit into the hole */
910 for (addr = hole_start; addr < hole_end; ) {
911 struct i915_vma *vma;
912 u64 size = BIT_ULL(order++);
913
914 size = min(size, hole_end - addr);
2c86e55d 915 obj = fake_dma_object(vm->i915, size);
aae4a3d8
CW
916 if (IS_ERR(obj)) {
917 err = PTR_ERR(obj);
918 break;
919 }
920
921 list_add(&obj->st_link, &objects);
922
923 vma = i915_vma_instance(obj, vm, NULL);
924 if (IS_ERR(vma)) {
925 err = PTR_ERR(vma);
926 break;
927 }
928
929 GEM_BUG_ON(vma->size != size);
930
931 err = i915_vma_pin(vma, 0, 0, addr | flags);
932 if (err) {
933 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
934 __func__, addr, size, hole_start, hole_end, err);
935 break;
936 }
937
938 if (!drm_mm_node_allocated(&vma->node) ||
939 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
940 pr_err("%s incorrect at %llx + %llx\n",
941 __func__, addr, size);
942 i915_vma_unpin(vma);
0f341974 943 err = i915_vma_unbind_unlocked(vma);
aae4a3d8
CW
944 err = -EINVAL;
945 break;
946 }
947
948 i915_vma_unpin(vma);
87bd701e 949 addr += round_up(size, min_alignment);
aae4a3d8 950
2850748e
CW
951 /*
952 * Since we are injecting allocation faults at random intervals,
953 * wait for this allocation to complete before we change the
954 * faultinjection.
955 */
956 err = i915_vma_sync(vma);
957 if (err)
958 break;
959
aae4a3d8
CW
960 if (igt_timeout(end_time,
961 "%s timed out at ofset %llx [%llx - %llx]\n",
962 __func__, addr, hole_start, hole_end)) {
963 err = -EINTR;
964 break;
965 }
966 }
967
968 close_object_list(&objects, vm);
2c86e55d 969 cleanup_freed_objects(vm->i915);
aae4a3d8
CW
970 return err;
971}
972
2c86e55d 973static int shrink_hole(struct i915_address_space *vm,
aae4a3d8
CW
974 u64 hole_start, u64 hole_end,
975 unsigned long end_time)
976{
977 unsigned long prime;
978 int err;
979
8448661d
CW
980 vm->fault_attr.probability = 999;
981 atomic_set(&vm->fault_attr.times, -1);
aae4a3d8
CW
982
983 for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
8448661d 984 vm->fault_attr.interval = prime;
2c86e55d 985 err = __shrink_hole(vm, hole_start, hole_end, end_time);
aae4a3d8
CW
986 if (err)
987 break;
988 }
989
8448661d 990 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
aae4a3d8
CW
991
992 return err;
993}
994
2c86e55d 995static int shrink_boom(struct i915_address_space *vm,
fe215c8b
MA
996 u64 hole_start, u64 hole_end,
997 unsigned long end_time)
998{
999 unsigned int sizes[] = { SZ_2M, SZ_1G };
1000 struct drm_i915_gem_object *purge;
1001 struct drm_i915_gem_object *explode;
1002 int err;
1003 int i;
1004
1005 /*
1006 * Catch the case which shrink_hole seems to miss. The setup here
1007 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
1008 * ensuring that all vma assiocated with the respective pd/pdp are
1009 * unpinned at the time.
1010 */
1011
1012 for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1013 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1014 unsigned int size = sizes[i];
1015 struct i915_vma *vma;
1016
2c86e55d 1017 purge = fake_dma_object(vm->i915, size);
fe215c8b
MA
1018 if (IS_ERR(purge))
1019 return PTR_ERR(purge);
1020
1021 vma = i915_vma_instance(purge, vm, NULL);
1022 if (IS_ERR(vma)) {
1023 err = PTR_ERR(vma);
1024 goto err_purge;
1025 }
1026
1027 err = i915_vma_pin(vma, 0, 0, flags);
1028 if (err)
1029 goto err_purge;
1030
1031 /* Should now be ripe for purging */
1032 i915_vma_unpin(vma);
1033
2c86e55d 1034 explode = fake_dma_object(vm->i915, size);
fe215c8b 1035 if (IS_ERR(explode)) {
6e8c06d2 1036 err = PTR_ERR(explode);
fe215c8b
MA
1037 goto err_purge;
1038 }
1039
1040 vm->fault_attr.probability = 100;
1041 vm->fault_attr.interval = 1;
1042 atomic_set(&vm->fault_attr.times, -1);
1043
1044 vma = i915_vma_instance(explode, vm, NULL);
1045 if (IS_ERR(vma)) {
1046 err = PTR_ERR(vma);
1047 goto err_explode;
1048 }
1049
1050 err = i915_vma_pin(vma, 0, 0, flags | size);
1051 if (err)
1052 goto err_explode;
1053
1054 i915_vma_unpin(vma);
1055
1056 i915_gem_object_put(purge);
1057 i915_gem_object_put(explode);
1058
1059 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
2c86e55d 1060 cleanup_freed_objects(vm->i915);
fe215c8b
MA
1061 }
1062
1063 return 0;
1064
1065err_explode:
1066 i915_gem_object_put(explode);
1067err_purge:
1068 i915_gem_object_put(purge);
1069 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1070 return err;
1071}
1072
a413c99f
RB
1073static int misaligned_case(struct i915_address_space *vm, struct intel_memory_region *mr,
1074 u64 addr, u64 size, unsigned long flags)
1075{
1076 struct drm_i915_gem_object *obj;
1077 struct i915_vma *vma;
1078 int err = 0;
1079 u64 expected_vma_size, expected_node_size;
1080 bool is_stolen = mr->type == INTEL_MEMORY_STOLEN_SYSTEM ||
1081 mr->type == INTEL_MEMORY_STOLEN_LOCAL;
1082
1083 obj = i915_gem_object_create_region(mr, size, 0, 0);
1084 if (IS_ERR(obj)) {
1085 /* if iGVT-g or DMAR is active, stolen mem will be uninitialized */
1086 if (PTR_ERR(obj) == -ENODEV && is_stolen)
1087 return 0;
1088 return PTR_ERR(obj);
1089 }
1090
1091 vma = i915_vma_instance(obj, vm, NULL);
1092 if (IS_ERR(vma)) {
1093 err = PTR_ERR(vma);
1094 goto err_put;
1095 }
1096
1097 err = i915_vma_pin(vma, 0, 0, addr | flags);
1098 if (err)
1099 goto err_put;
1100 i915_vma_unpin(vma);
1101
1102 if (!drm_mm_node_allocated(&vma->node)) {
1103 err = -EINVAL;
1104 goto err_put;
1105 }
1106
1107 if (i915_vma_misplaced(vma, 0, 0, addr | flags)) {
1108 err = -EINVAL;
1109 goto err_put;
1110 }
1111
1112 expected_vma_size = round_up(size, 1 << (ffs(vma->resource->page_sizes_gtt) - 1));
1113 expected_node_size = expected_vma_size;
1114
0f9fc0c1
MA
1115 if (HAS_64K_PAGES(vm->i915) && i915_gem_object_is_lmem(obj)) {
1116 /*
1117 * The compact-pt should expand lmem node to 2MB for the ppGTT,
1118 * for all other cases we should only expect 64K.
1119 */
a413c99f 1120 expected_vma_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
0f9fc0c1
MA
1121 if (NEEDS_COMPACT_PT(vm->i915) && !i915_is_ggtt(vm))
1122 expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_2M);
1123 else
1124 expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
a413c99f
RB
1125 }
1126
1127 if (vma->size != expected_vma_size || vma->node.size != expected_node_size) {
1128 err = i915_vma_unbind_unlocked(vma);
1129 err = -EBADSLT;
1130 goto err_put;
1131 }
1132
1133 err = i915_vma_unbind_unlocked(vma);
1134 if (err)
1135 goto err_put;
1136
1137 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1138
1139err_put:
1140 i915_gem_object_put(obj);
1141 cleanup_freed_objects(vm->i915);
1142 return err;
1143}
1144
1145static int misaligned_pin(struct i915_address_space *vm,
1146 u64 hole_start, u64 hole_end,
1147 unsigned long end_time)
1148{
1149 struct intel_memory_region *mr;
1150 enum intel_region_id id;
1151 unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
1152 int err = 0;
1153 u64 hole_size = hole_end - hole_start;
1154
1155 if (i915_is_ggtt(vm))
1156 flags |= PIN_GLOBAL;
1157
1158 for_each_memory_region(mr, vm->i915, id) {
9707cc4b 1159 u64 min_alignment = i915_vm_min_alignment(vm, mr->type);
a413c99f
RB
1160 u64 size = min_alignment;
1161 u64 addr = round_down(hole_start + (hole_size / 2), min_alignment);
1162
1163 /* avoid -ENOSPC on very small hole setups */
1164 if (hole_size < 3 * min_alignment)
1165 continue;
1166
1167 /* we can't test < 4k alignment due to flags being encoded in lower bits */
1168 if (min_alignment != I915_GTT_PAGE_SIZE_4K) {
1169 err = misaligned_case(vm, mr, addr + (min_alignment / 2), size, flags);
1170 /* misaligned should error with -EINVAL*/
1171 if (!err)
1172 err = -EBADSLT;
1173 if (err != -EINVAL)
1174 return err;
1175 }
1176
1177 /* test for vma->size expansion to min page size */
1178 err = misaligned_case(vm, mr, addr, PAGE_SIZE, flags);
1179 if (err)
1180 return err;
1181
1182 /* test for intermediate size not expanding vma->size for large alignments */
1183 err = misaligned_case(vm, mr, addr, size / 2, flags);
1184 if (err)
1185 return err;
1186 }
1187
1188 return 0;
1189}
1190
8d28ba45 1191static int exercise_ppgtt(struct drm_i915_private *dev_priv,
2c86e55d 1192 int (*func)(struct i915_address_space *vm,
8d28ba45
CW
1193 u64 hole_start, u64 hole_end,
1194 unsigned long end_time))
1195{
ab53497b 1196 struct i915_ppgtt *ppgtt;
8d28ba45 1197 IGT_TIMEOUT(end_time);
a8c9a7f5 1198 struct file *file;
8d28ba45
CW
1199 int err;
1200
4bdafb9d 1201 if (!HAS_FULL_PPGTT(dev_priv))
8d28ba45
CW
1202 return 0;
1203
1204 file = mock_file(dev_priv);
1205 if (IS_ERR(file))
1206 return PTR_ERR(file);
1207
8c2699fa 1208 ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
8d28ba45
CW
1209 if (IS_ERR(ppgtt)) {
1210 err = PTR_ERR(ppgtt);
2850748e 1211 goto out_free;
8d28ba45 1212 }
82ad6443 1213 GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
e1a7ab4f 1214 assert_vm_alive(&ppgtt->vm);
8d28ba45 1215
2c86e55d 1216 err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
8d28ba45 1217
e568ac38 1218 i915_vm_put(&ppgtt->vm);
8d28ba45 1219
2850748e 1220out_free:
a8c9a7f5 1221 fput(file);
8d28ba45
CW
1222 return err;
1223}
1224
1225static int igt_ppgtt_fill(void *arg)
1226{
1227 return exercise_ppgtt(arg, fill_hole);
1228}
1229
6e32ab3d
CW
1230static int igt_ppgtt_walk(void *arg)
1231{
1232 return exercise_ppgtt(arg, walk_hole);
1233}
1234
7db4dcea
CW
1235static int igt_ppgtt_pot(void *arg)
1236{
1237 return exercise_ppgtt(arg, pot_hole);
1238}
1239
5c3bff48
CW
1240static int igt_ppgtt_drunk(void *arg)
1241{
1242 return exercise_ppgtt(arg, drunk_hole);
1243}
1244
4a6f13fc
CW
1245static int igt_ppgtt_lowlevel(void *arg)
1246{
1247 return exercise_ppgtt(arg, lowlevel_hole);
1248}
1249
aae4a3d8
CW
1250static int igt_ppgtt_shrink(void *arg)
1251{
1252 return exercise_ppgtt(arg, shrink_hole);
1253}
1254
fe215c8b
MA
1255static int igt_ppgtt_shrink_boom(void *arg)
1256{
1257 return exercise_ppgtt(arg, shrink_boom);
1258}
1259
a413c99f
RB
1260static int igt_ppgtt_misaligned_pin(void *arg)
1261{
1262 return exercise_ppgtt(arg, misaligned_pin);
1263}
1264
4f0f586b
ST
1265static int sort_holes(void *priv, const struct list_head *A,
1266 const struct list_head *B)
62c981cf
CW
1267{
1268 struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1269 struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1270
1271 if (a->start < b->start)
1272 return -1;
1273 else
1274 return 1;
1275}
1276
1277static int exercise_ggtt(struct drm_i915_private *i915,
2c86e55d 1278 int (*func)(struct i915_address_space *vm,
62c981cf
CW
1279 u64 hole_start, u64 hole_end,
1280 unsigned long end_time))
1281{
17190a34 1282 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
62c981cf
CW
1283 u64 hole_start, hole_end, last = 0;
1284 struct drm_mm_node *node;
1285 IGT_TIMEOUT(end_time);
4fe95b04 1286 int err = 0;
62c981cf 1287
62c981cf 1288restart:
82ad6443
CW
1289 list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1290 drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
62c981cf
CW
1291 if (hole_start < last)
1292 continue;
1293
82ad6443
CW
1294 if (ggtt->vm.mm.color_adjust)
1295 ggtt->vm.mm.color_adjust(node, 0,
1296 &hole_start, &hole_end);
62c981cf
CW
1297 if (hole_start >= hole_end)
1298 continue;
1299
2c86e55d 1300 err = func(&ggtt->vm, hole_start, hole_end, end_time);
62c981cf
CW
1301 if (err)
1302 break;
1303
1304 /* As we have manipulated the drm_mm, the list may be corrupt */
1305 last = hole_end;
1306 goto restart;
1307 }
62c981cf
CW
1308
1309 return err;
1310}
1311
1312static int igt_ggtt_fill(void *arg)
1313{
1314 return exercise_ggtt(arg, fill_hole);
1315}
1316
6e32ab3d
CW
1317static int igt_ggtt_walk(void *arg)
1318{
1319 return exercise_ggtt(arg, walk_hole);
1320}
1321
7db4dcea
CW
1322static int igt_ggtt_pot(void *arg)
1323{
1324 return exercise_ggtt(arg, pot_hole);
1325}
1326
5c3bff48
CW
1327static int igt_ggtt_drunk(void *arg)
1328{
1329 return exercise_ggtt(arg, drunk_hole);
1330}
1331
4a6f13fc
CW
1332static int igt_ggtt_lowlevel(void *arg)
1333{
1334 return exercise_ggtt(arg, lowlevel_hole);
1335}
1336
a413c99f
RB
1337static int igt_ggtt_misaligned_pin(void *arg)
1338{
1339 return exercise_ggtt(arg, misaligned_pin);
1340}
1341
af85f50d
CW
1342static int igt_ggtt_page(void *arg)
1343{
1344 const unsigned int count = PAGE_SIZE/sizeof(u32);
1345 I915_RND_STATE(prng);
1346 struct drm_i915_private *i915 = arg;
17190a34 1347 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
af85f50d 1348 struct drm_i915_gem_object *obj;
c9d08cc3 1349 intel_wakeref_t wakeref;
af85f50d
CW
1350 struct drm_mm_node tmp;
1351 unsigned int *order, n;
1352 int err;
1353
e60f7bb7
MA
1354 if (!i915_ggtt_has_aperture(ggtt))
1355 return 0;
1356
af85f50d 1357 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
2850748e
CW
1358 if (IS_ERR(obj))
1359 return PTR_ERR(obj);
af85f50d 1360
480ae795 1361 err = i915_gem_object_pin_pages_unlocked(obj);
af85f50d
CW
1362 if (err)
1363 goto out_free;
1364
1365 memset(&tmp, 0, sizeof(tmp));
b006869c 1366 mutex_lock(&ggtt->vm.mutex);
82ad6443 1367 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
85a9c0bc 1368 count * PAGE_SIZE, 0,
af85f50d
CW
1369 I915_COLOR_UNEVICTABLE,
1370 0, ggtt->mappable_end,
1371 DRM_MM_INSERT_LOW);
b006869c 1372 mutex_unlock(&ggtt->vm.mutex);
af85f50d
CW
1373 if (err)
1374 goto out_unpin;
1375
d858d569 1376 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
85a9c0bc
CW
1377
1378 for (n = 0; n < count; n++) {
1379 u64 offset = tmp.start + n * PAGE_SIZE;
1380
82ad6443
CW
1381 ggtt->vm.insert_page(&ggtt->vm,
1382 i915_gem_object_get_dma_address(obj, 0),
1383 offset, I915_CACHE_NONE, 0);
85a9c0bc
CW
1384 }
1385
af85f50d
CW
1386 order = i915_random_order(count, &prng);
1387 if (!order) {
1388 err = -ENOMEM;
1389 goto out_remove;
1390 }
1391
1392 for (n = 0; n < count; n++) {
1393 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1394 u32 __iomem *vaddr;
1395
73ebd503 1396 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
af85f50d
CW
1397 iowrite32(n, vaddr + n);
1398 io_mapping_unmap_atomic(vaddr);
af85f50d 1399 }
a1c8a09e 1400 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
af85f50d
CW
1401
1402 i915_random_reorder(order, count, &prng);
1403 for (n = 0; n < count; n++) {
1404 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1405 u32 __iomem *vaddr;
1406 u32 val;
1407
73ebd503 1408 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
af85f50d
CW
1409 val = ioread32(vaddr + n);
1410 io_mapping_unmap_atomic(vaddr);
1411
af85f50d
CW
1412 if (val != n) {
1413 pr_err("insert page failed: found %d, expected %d\n",
1414 val, n);
1415 err = -EINVAL;
1416 break;
1417 }
1418 }
1419
1420 kfree(order);
1421out_remove:
82ad6443 1422 ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
d858d569 1423 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
b006869c 1424 mutex_lock(&ggtt->vm.mutex);
af85f50d 1425 drm_mm_remove_node(&tmp);
b006869c 1426 mutex_unlock(&ggtt->vm.mutex);
af85f50d
CW
1427out_unpin:
1428 i915_gem_object_unpin_pages(obj);
1429out_free:
1430 i915_gem_object_put(obj);
af85f50d
CW
1431 return err;
1432}
1433
e619cd0d
CW
1434static void track_vma_bind(struct i915_vma *vma)
1435{
1436 struct drm_i915_gem_object *obj = vma->obj;
1437
e619cd0d
CW
1438 __i915_gem_object_pin_pages(obj);
1439
0b4d1f0e 1440 GEM_BUG_ON(atomic_read(&vma->pages_count));
2850748e
CW
1441 atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1442 __i915_gem_object_pin_pages(obj);
e619cd0d 1443 vma->pages = obj->mm.pages;
39a2bd34 1444 vma->resource->bi.pages = vma->pages;
09d7e46b
CW
1445
1446 mutex_lock(&vma->vm->mutex);
e1a7ab4f 1447 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
09d7e46b 1448 mutex_unlock(&vma->vm->mutex);
e619cd0d
CW
1449}
1450
210e8ac4 1451static int exercise_mock(struct drm_i915_private *i915,
2c86e55d 1452 int (*func)(struct i915_address_space *vm,
210e8ac4
CW
1453 u64 hole_start, u64 hole_end,
1454 unsigned long end_time))
1455{
ca79b0c2 1456 const u64 limit = totalram_pages() << PAGE_SHIFT;
a4e7ccda 1457 struct i915_address_space *vm;
210e8ac4 1458 struct i915_gem_context *ctx;
210e8ac4
CW
1459 IGT_TIMEOUT(end_time);
1460 int err;
1461
1462 ctx = mock_context(i915, "mock");
1463 if (!ctx)
1464 return -ENOMEM;
1465
c6d04e48 1466 vm = i915_gem_context_get_eb_vm(ctx);
2c86e55d 1467 err = func(vm, 0, min(vm->total, limit), end_time);
a4e7ccda 1468 i915_vm_put(vm);
210e8ac4
CW
1469
1470 mock_context_close(ctx);
1471 return err;
1472}
1473
1474static int igt_mock_fill(void *arg)
1475{
c95e7ce3
CW
1476 struct i915_ggtt *ggtt = arg;
1477
1478 return exercise_mock(ggtt->vm.i915, fill_hole);
210e8ac4
CW
1479}
1480
1481static int igt_mock_walk(void *arg)
1482{
c95e7ce3
CW
1483 struct i915_ggtt *ggtt = arg;
1484
1485 return exercise_mock(ggtt->vm.i915, walk_hole);
210e8ac4
CW
1486}
1487
7db4dcea
CW
1488static int igt_mock_pot(void *arg)
1489{
c95e7ce3
CW
1490 struct i915_ggtt *ggtt = arg;
1491
1492 return exercise_mock(ggtt->vm.i915, pot_hole);
7db4dcea
CW
1493}
1494
210e8ac4
CW
1495static int igt_mock_drunk(void *arg)
1496{
c95e7ce3
CW
1497 struct i915_ggtt *ggtt = arg;
1498
1499 return exercise_mock(ggtt->vm.i915, drunk_hole);
210e8ac4
CW
1500}
1501
e1a4bbb6
TH
1502static int reserve_gtt_with_resource(struct i915_vma *vma, u64 offset)
1503{
1504 struct i915_address_space *vm = vma->vm;
1505 struct i915_vma_resource *vma_res;
1506 struct drm_i915_gem_object *obj = vma->obj;
1507 int err;
1508
1509 vma_res = i915_vma_resource_alloc();
1510 if (IS_ERR(vma_res))
1511 return PTR_ERR(vma_res);
1512
1513 mutex_lock(&vm->mutex);
7e00897b 1514 err = i915_gem_gtt_reserve(vm, NULL, &vma->node, obj->base.size,
e1a4bbb6
TH
1515 offset,
1516 obj->cache_level,
1517 0);
1518 if (!err) {
39a2bd34 1519 i915_vma_resource_init_from_vma(vma_res, vma);
e1a4bbb6
TH
1520 vma->resource = vma_res;
1521 } else {
1522 kfree(vma_res);
1523 }
1524 mutex_unlock(&vm->mutex);
1525
1526 return err;
1527}
1528
e619cd0d
CW
1529static int igt_gtt_reserve(void *arg)
1530{
c95e7ce3 1531 struct i915_ggtt *ggtt = arg;
e619cd0d 1532 struct drm_i915_gem_object *obj, *on;
dfe324f3 1533 I915_RND_STATE(prng);
e619cd0d
CW
1534 LIST_HEAD(objects);
1535 u64 total;
6e128141 1536 int err = -ENODEV;
e619cd0d
CW
1537
1538 /* i915_gem_gtt_reserve() tries to reserve the precise range
1539 * for the node, and evicts if it has to. So our test checks that
1540 * it can give us the requsted space and prevent overlaps.
1541 */
1542
1543 /* Start by filling the GGTT */
1544 for (total = 0;
c95e7ce3
CW
1545 total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1546 total += 2 * I915_GTT_PAGE_SIZE) {
e619cd0d
CW
1547 struct i915_vma *vma;
1548
c95e7ce3
CW
1549 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1550 2 * PAGE_SIZE);
e619cd0d
CW
1551 if (IS_ERR(obj)) {
1552 err = PTR_ERR(obj);
1553 goto out;
1554 }
1555
480ae795 1556 err = i915_gem_object_pin_pages_unlocked(obj);
e619cd0d
CW
1557 if (err) {
1558 i915_gem_object_put(obj);
1559 goto out;
1560 }
1561
1562 list_add(&obj->st_link, &objects);
c95e7ce3 1563 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
e619cd0d
CW
1564 if (IS_ERR(vma)) {
1565 err = PTR_ERR(vma);
1566 goto out;
1567 }
1568
e1a4bbb6 1569 err = reserve_gtt_with_resource(vma, total);
e619cd0d
CW
1570 if (err) {
1571 pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
c95e7ce3 1572 total, ggtt->vm.total, err);
e619cd0d
CW
1573 goto out;
1574 }
1575 track_vma_bind(vma);
1576
1577 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1578 if (vma->node.start != total ||
1579 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
9125963a 1580 pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
e619cd0d
CW
1581 vma->node.start, vma->node.size,
1582 total, 2*I915_GTT_PAGE_SIZE);
1583 err = -EINVAL;
1584 goto out;
1585 }
1586 }
1587
1588 /* Now we start forcing evictions */
1589 for (total = I915_GTT_PAGE_SIZE;
c95e7ce3
CW
1590 total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1591 total += 2 * I915_GTT_PAGE_SIZE) {
e619cd0d
CW
1592 struct i915_vma *vma;
1593
c95e7ce3
CW
1594 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1595 2 * PAGE_SIZE);
e619cd0d
CW
1596 if (IS_ERR(obj)) {
1597 err = PTR_ERR(obj);
1598 goto out;
1599 }
1600
480ae795 1601 err = i915_gem_object_pin_pages_unlocked(obj);
e619cd0d
CW
1602 if (err) {
1603 i915_gem_object_put(obj);
1604 goto out;
1605 }
1606
1607 list_add(&obj->st_link, &objects);
1608
c95e7ce3 1609 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
e619cd0d
CW
1610 if (IS_ERR(vma)) {
1611 err = PTR_ERR(vma);
1612 goto out;
1613 }
1614
e1a4bbb6 1615 err = reserve_gtt_with_resource(vma, total);
e619cd0d
CW
1616 if (err) {
1617 pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
c95e7ce3 1618 total, ggtt->vm.total, err);
e619cd0d
CW
1619 goto out;
1620 }
1621 track_vma_bind(vma);
1622
1623 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1624 if (vma->node.start != total ||
1625 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
9125963a 1626 pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
e619cd0d
CW
1627 vma->node.start, vma->node.size,
1628 total, 2*I915_GTT_PAGE_SIZE);
1629 err = -EINVAL;
1630 goto out;
1631 }
1632 }
1633
1634 /* And then try at random */
1635 list_for_each_entry_safe(obj, on, &objects, st_link) {
1636 struct i915_vma *vma;
1637 u64 offset;
1638
c95e7ce3 1639 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
e619cd0d
CW
1640 if (IS_ERR(vma)) {
1641 err = PTR_ERR(vma);
1642 goto out;
1643 }
1644
0f341974 1645 err = i915_vma_unbind_unlocked(vma);
e619cd0d
CW
1646 if (err) {
1647 pr_err("i915_vma_unbind failed with err=%d!\n", err);
1648 goto out;
1649 }
1650
dfe324f3
CW
1651 offset = igt_random_offset(&prng,
1652 0, ggtt->vm.total,
1653 2 * I915_GTT_PAGE_SIZE,
1654 I915_GTT_MIN_ALIGNMENT);
e619cd0d 1655
e1a4bbb6 1656 err = reserve_gtt_with_resource(vma, offset);
e619cd0d
CW
1657 if (err) {
1658 pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
c95e7ce3 1659 total, ggtt->vm.total, err);
e619cd0d
CW
1660 goto out;
1661 }
1662 track_vma_bind(vma);
1663
1664 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1665 if (vma->node.start != offset ||
1666 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
9125963a 1667 pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
e619cd0d
CW
1668 vma->node.start, vma->node.size,
1669 offset, 2*I915_GTT_PAGE_SIZE);
1670 err = -EINVAL;
1671 goto out;
1672 }
1673 }
1674
1675out:
1676 list_for_each_entry_safe(obj, on, &objects, st_link) {
1677 i915_gem_object_unpin_pages(obj);
1678 i915_gem_object_put(obj);
1679 }
1680 return err;
1681}
1682
e1a4bbb6
TH
1683static int insert_gtt_with_resource(struct i915_vma *vma)
1684{
1685 struct i915_address_space *vm = vma->vm;
1686 struct i915_vma_resource *vma_res;
1687 struct drm_i915_gem_object *obj = vma->obj;
1688 int err;
1689
1690 vma_res = i915_vma_resource_alloc();
1691 if (IS_ERR(vma_res))
1692 return PTR_ERR(vma_res);
1693
1694 mutex_lock(&vm->mutex);
7e00897b 1695 err = i915_gem_gtt_insert(vm, NULL, &vma->node, obj->base.size, 0,
e1a4bbb6
TH
1696 obj->cache_level, 0, vm->total, 0);
1697 if (!err) {
39a2bd34 1698 i915_vma_resource_init_from_vma(vma_res, vma);
e1a4bbb6
TH
1699 vma->resource = vma_res;
1700 } else {
1701 kfree(vma_res);
1702 }
1703 mutex_unlock(&vm->mutex);
1704
1705 return err;
1706}
1707
5f32616e
CW
1708static int igt_gtt_insert(void *arg)
1709{
c95e7ce3 1710 struct i915_ggtt *ggtt = arg;
5f32616e
CW
1711 struct drm_i915_gem_object *obj, *on;
1712 struct drm_mm_node tmp = {};
1713 const struct invalid_insert {
1714 u64 size;
1715 u64 alignment;
1716 u64 start, end;
1717 } invalid_insert[] = {
1718 {
c95e7ce3
CW
1719 ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1720 0, ggtt->vm.total,
5f32616e
CW
1721 },
1722 {
1723 2*I915_GTT_PAGE_SIZE, 0,
1724 0, I915_GTT_PAGE_SIZE,
1725 },
1726 {
1727 -(u64)I915_GTT_PAGE_SIZE, 0,
1728 0, 4*I915_GTT_PAGE_SIZE,
1729 },
1730 {
1731 -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1732 0, 4*I915_GTT_PAGE_SIZE,
1733 },
1734 {
1735 I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1736 I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1737 },
1738 {}
1739 }, *ii;
1740 LIST_HEAD(objects);
1741 u64 total;
6e128141 1742 int err = -ENODEV;
5f32616e
CW
1743
1744 /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1745 * to the node, evicting if required.
1746 */
1747
1748 /* Check a couple of obviously invalid requests */
1749 for (ii = invalid_insert; ii->size; ii++) {
2850748e 1750 mutex_lock(&ggtt->vm.mutex);
7e00897b 1751 err = i915_gem_gtt_insert(&ggtt->vm, NULL, &tmp,
5f32616e
CW
1752 ii->size, ii->alignment,
1753 I915_COLOR_UNEVICTABLE,
1754 ii->start, ii->end,
1755 0);
2850748e 1756 mutex_unlock(&ggtt->vm.mutex);
5f32616e
CW
1757 if (err != -ENOSPC) {
1758 pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1759 ii->size, ii->alignment, ii->start, ii->end,
1760 err);
1761 return -EINVAL;
1762 }
1763 }
1764
1765 /* Start by filling the GGTT */
1766 for (total = 0;
c95e7ce3 1767 total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
5f32616e
CW
1768 total += I915_GTT_PAGE_SIZE) {
1769 struct i915_vma *vma;
1770
c95e7ce3
CW
1771 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1772 I915_GTT_PAGE_SIZE);
5f32616e
CW
1773 if (IS_ERR(obj)) {
1774 err = PTR_ERR(obj);
1775 goto out;
1776 }
1777
480ae795 1778 err = i915_gem_object_pin_pages_unlocked(obj);
5f32616e
CW
1779 if (err) {
1780 i915_gem_object_put(obj);
1781 goto out;
1782 }
1783
1784 list_add(&obj->st_link, &objects);
1785
c95e7ce3 1786 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
5f32616e
CW
1787 if (IS_ERR(vma)) {
1788 err = PTR_ERR(vma);
1789 goto out;
1790 }
1791
e1a4bbb6 1792 err = insert_gtt_with_resource(vma);
5f32616e
CW
1793 if (err == -ENOSPC) {
1794 /* maxed out the GGTT space */
1795 i915_gem_object_put(obj);
1796 break;
1797 }
1798 if (err) {
1799 pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
c95e7ce3 1800 total, ggtt->vm.total, err);
5f32616e
CW
1801 goto out;
1802 }
1803 track_vma_bind(vma);
1804 __i915_vma_pin(vma);
1805
1806 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1807 }
1808
1809 list_for_each_entry(obj, &objects, st_link) {
1810 struct i915_vma *vma;
1811
c95e7ce3 1812 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
5f32616e
CW
1813 if (IS_ERR(vma)) {
1814 err = PTR_ERR(vma);
1815 goto out;
1816 }
1817
1818 if (!drm_mm_node_allocated(&vma->node)) {
1819 pr_err("VMA was unexpectedly evicted!\n");
1820 err = -EINVAL;
1821 goto out;
1822 }
1823
1824 __i915_vma_unpin(vma);
1825 }
1826
1827 /* If we then reinsert, we should find the same hole */
1828 list_for_each_entry_safe(obj, on, &objects, st_link) {
1829 struct i915_vma *vma;
1830 u64 offset;
1831
c95e7ce3 1832 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
5f32616e
CW
1833 if (IS_ERR(vma)) {
1834 err = PTR_ERR(vma);
1835 goto out;
1836 }
1837
1838 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1839 offset = vma->node.start;
1840
0f341974 1841 err = i915_vma_unbind_unlocked(vma);
5f32616e
CW
1842 if (err) {
1843 pr_err("i915_vma_unbind failed with err=%d!\n", err);
1844 goto out;
1845 }
1846
e1a4bbb6 1847 err = insert_gtt_with_resource(vma);
5f32616e
CW
1848 if (err) {
1849 pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
c95e7ce3 1850 total, ggtt->vm.total, err);
5f32616e
CW
1851 goto out;
1852 }
1853 track_vma_bind(vma);
1854
1855 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1856 if (vma->node.start != offset) {
1857 pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1858 offset, vma->node.start);
1859 err = -EINVAL;
1860 goto out;
1861 }
1862 }
1863
1864 /* And then force evictions */
1865 for (total = 0;
c95e7ce3
CW
1866 total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1867 total += 2 * I915_GTT_PAGE_SIZE) {
5f32616e
CW
1868 struct i915_vma *vma;
1869
c95e7ce3
CW
1870 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1871 2 * I915_GTT_PAGE_SIZE);
5f32616e
CW
1872 if (IS_ERR(obj)) {
1873 err = PTR_ERR(obj);
1874 goto out;
1875 }
1876
480ae795 1877 err = i915_gem_object_pin_pages_unlocked(obj);
5f32616e
CW
1878 if (err) {
1879 i915_gem_object_put(obj);
1880 goto out;
1881 }
1882
1883 list_add(&obj->st_link, &objects);
1884
c95e7ce3 1885 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
5f32616e
CW
1886 if (IS_ERR(vma)) {
1887 err = PTR_ERR(vma);
1888 goto out;
1889 }
1890
e1a4bbb6 1891 err = insert_gtt_with_resource(vma);
5f32616e
CW
1892 if (err) {
1893 pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
c95e7ce3 1894 total, ggtt->vm.total, err);
5f32616e
CW
1895 goto out;
1896 }
1897 track_vma_bind(vma);
1898
1899 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1900 }
1901
1902out:
1903 list_for_each_entry_safe(obj, on, &objects, st_link) {
1904 i915_gem_object_unpin_pages(obj);
1905 i915_gem_object_put(obj);
1906 }
1907 return err;
1908}
1909
e619cd0d
CW
1910int i915_gem_gtt_mock_selftests(void)
1911{
1912 static const struct i915_subtest tests[] = {
210e8ac4
CW
1913 SUBTEST(igt_mock_drunk),
1914 SUBTEST(igt_mock_walk),
7db4dcea 1915 SUBTEST(igt_mock_pot),
210e8ac4 1916 SUBTEST(igt_mock_fill),
e619cd0d 1917 SUBTEST(igt_gtt_reserve),
5f32616e 1918 SUBTEST(igt_gtt_insert),
e619cd0d
CW
1919 };
1920 struct drm_i915_private *i915;
cdeea858 1921 struct intel_gt *gt;
e619cd0d
CW
1922 int err;
1923
1924 i915 = mock_gem_device();
1925 if (!i915)
1926 return -ENOMEM;
1927
cdeea858
AS
1928 /* allocate the ggtt */
1929 err = intel_gt_assign_ggtt(to_gt(i915));
1930 if (err)
83e3a215 1931 goto out_put;
c95e7ce3 1932
cdeea858
AS
1933 gt = to_gt(i915);
1934
1935 mock_init_ggtt(gt);
1936
1937 err = i915_subtests(tests, gt->ggtt);
2850748e 1938
c95e7ce3 1939 mock_device_flush(i915);
c95e7ce3 1940 i915_gem_drain_freed_objects(i915);
cdeea858
AS
1941 mock_fini_ggtt(gt->ggtt);
1942
83e3a215 1943out_put:
82be0d75 1944 mock_destroy_device(i915);
e619cd0d
CW
1945 return err;
1946}
1947
a47e788c
CW
1948static int context_sync(struct intel_context *ce)
1949{
1950 struct i915_request *rq;
1951 long timeout;
1952
1953 rq = intel_context_create_request(ce);
1954 if (IS_ERR(rq))
1955 return PTR_ERR(rq);
1956
1957 i915_request_get(rq);
1958 i915_request_add(rq);
1959
1960 timeout = i915_request_wait(rq, 0, HZ / 5);
1961 i915_request_put(rq);
1962
1963 return timeout < 0 ? -EIO : 0;
1964}
1965
1966static struct i915_request *
1967submit_batch(struct intel_context *ce, u64 addr)
1968{
1969 struct i915_request *rq;
1970 int err;
1971
1972 rq = intel_context_create_request(ce);
1973 if (IS_ERR(rq))
1974 return rq;
1975
1976 err = 0;
1977 if (rq->engine->emit_init_breadcrumb) /* detect a hang */
1978 err = rq->engine->emit_init_breadcrumb(rq);
1979 if (err == 0)
1980 err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1981
1982 if (err == 0)
1983 i915_request_get(rq);
1984 i915_request_add(rq);
1985
1986 return err ? ERR_PTR(err) : rq;
1987}
1988
1989static u32 *spinner(u32 *batch, int i)
1990{
1991 return batch + i * 64 / sizeof(*batch) + 4;
1992}
1993
1994static void end_spin(u32 *batch, int i)
1995{
1996 *spinner(batch, i) = MI_BATCH_BUFFER_END;
1997 wmb();
1998}
1999
2000static int igt_cs_tlb(void *arg)
2001{
2002 const unsigned int count = PAGE_SIZE / 64;
2003 const unsigned int chunk_size = count * PAGE_SIZE;
2004 struct drm_i915_private *i915 = arg;
2005 struct drm_i915_gem_object *bbe, *act, *out;
2006 struct i915_gem_engines_iter it;
2007 struct i915_address_space *vm;
2008 struct i915_gem_context *ctx;
2009 struct intel_context *ce;
a47e788c 2010 struct i915_vma *vma;
dfe324f3 2011 I915_RND_STATE(prng);
a8c9a7f5 2012 struct file *file;
a47e788c
CW
2013 unsigned int i;
2014 u32 *result;
2015 u32 *batch;
2016 int err = 0;
2017
2018 /*
2019 * Our mission here is to fool the hardware to execute something
2020 * from scratch as it has not seen the batch move (due to missing
2021 * the TLB invalidate).
2022 */
2023
2024 file = mock_file(i915);
2025 if (IS_ERR(file))
2026 return PTR_ERR(file);
2027
a47e788c
CW
2028 ctx = live_context(i915, file);
2029 if (IS_ERR(ctx)) {
2030 err = PTR_ERR(ctx);
2031 goto out_unlock;
2032 }
2033
c6d04e48 2034 vm = i915_gem_context_get_eb_vm(ctx);
a4e7ccda
CW
2035 if (i915_is_ggtt(vm))
2036 goto out_vm;
a47e788c
CW
2037
2038 /* Create two pages; dummy we prefill the TLB, and intended */
2039 bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
2040 if (IS_ERR(bbe)) {
2041 err = PTR_ERR(bbe);
a4e7ccda 2042 goto out_vm;
a47e788c
CW
2043 }
2044
480ae795 2045 batch = i915_gem_object_pin_map_unlocked(bbe, I915_MAP_WC);
a47e788c
CW
2046 if (IS_ERR(batch)) {
2047 err = PTR_ERR(batch);
2048 goto out_put_bbe;
2049 }
2050 memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
2051 i915_gem_object_flush_map(bbe);
2052 i915_gem_object_unpin_map(bbe);
2053
2054 act = i915_gem_object_create_internal(i915, PAGE_SIZE);
2055 if (IS_ERR(act)) {
2056 err = PTR_ERR(act);
2057 goto out_put_bbe;
2058 }
2059
2060 /* Track the execution of each request by writing into different slot */
480ae795 2061 batch = i915_gem_object_pin_map_unlocked(act, I915_MAP_WC);
a47e788c
CW
2062 if (IS_ERR(batch)) {
2063 err = PTR_ERR(batch);
2064 goto out_put_act;
2065 }
2066 for (i = 0; i < count; i++) {
2067 u32 *cs = batch + i * 64 / sizeof(*cs);
2068 u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
2069
651e7d48 2070 GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
a47e788c 2071 cs[0] = MI_STORE_DWORD_IMM_GEN4;
651e7d48 2072 if (GRAPHICS_VER(i915) >= 8) {
a47e788c
CW
2073 cs[1] = lower_32_bits(addr);
2074 cs[2] = upper_32_bits(addr);
2075 cs[3] = i;
2076 cs[4] = MI_NOOP;
2077 cs[5] = MI_BATCH_BUFFER_START_GEN8;
2078 } else {
2079 cs[1] = 0;
2080 cs[2] = lower_32_bits(addr);
2081 cs[3] = i;
2082 cs[4] = MI_NOOP;
2083 cs[5] = MI_BATCH_BUFFER_START;
2084 }
2085 }
2086
2087 out = i915_gem_object_create_internal(i915, PAGE_SIZE);
2088 if (IS_ERR(out)) {
2089 err = PTR_ERR(out);
2090 goto out_put_batch;
2091 }
2092 i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
2093
2094 vma = i915_vma_instance(out, vm, NULL);
2095 if (IS_ERR(vma)) {
2096 err = PTR_ERR(vma);
3d480fe1 2097 goto out_put_out;
a47e788c
CW
2098 }
2099
2100 err = i915_vma_pin(vma, 0, 0,
2101 PIN_USER |
2102 PIN_OFFSET_FIXED |
2103 (vm->total - PAGE_SIZE));
2104 if (err)
2105 goto out_put_out;
2106 GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
2107
480ae795 2108 result = i915_gem_object_pin_map_unlocked(out, I915_MAP_WB);
a47e788c
CW
2109 if (IS_ERR(result)) {
2110 err = PTR_ERR(result);
2111 goto out_put_out;
2112 }
2113
2114 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2115 IGT_TIMEOUT(end_time);
2116 unsigned long pass = 0;
2117
2118 if (!intel_engine_can_store_dword(ce->engine))
2119 continue;
2120
2121 while (!__igt_timeout(end_time, NULL)) {
cd0452aa 2122 struct i915_vm_pt_stash stash = {};
a47e788c 2123 struct i915_request *rq;
480ae795 2124 struct i915_gem_ww_ctx ww;
39a2bd34 2125 struct i915_vma_resource *vma_res;
a47e788c
CW
2126 u64 offset;
2127
dfe324f3
CW
2128 offset = igt_random_offset(&prng,
2129 0, vm->total - PAGE_SIZE,
2130 chunk_size, PAGE_SIZE);
a47e788c 2131
a47e788c
CW
2132 memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
2133
2134 vma = i915_vma_instance(bbe, vm, NULL);
2135 if (IS_ERR(vma)) {
2136 err = PTR_ERR(vma);
2137 goto end;
2138 }
2139
0b4d1f0e
ML
2140 i915_gem_object_lock(bbe, NULL);
2141 err = i915_vma_get_pages(vma);
2142 i915_gem_object_unlock(bbe);
a47e788c
CW
2143 if (err)
2144 goto end;
2145
39a2bd34
TH
2146 vma_res = i915_vma_resource_alloc();
2147 if (IS_ERR(vma_res)) {
2148 i915_vma_put_pages(vma);
2149 err = PTR_ERR(vma_res);
2150 goto end;
2151 }
2152
480ae795
ML
2153 i915_gem_ww_ctx_init(&ww, false);
2154retry:
2155 err = i915_vm_lock_objects(vm, &ww);
2156 if (err)
2157 goto end_ww;
2158
cd0452aa
CW
2159 err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size);
2160 if (err)
480ae795 2161 goto end_ww;
cd0452aa 2162
529b9ec8 2163 err = i915_vm_map_pt_stash(vm, &stash);
480ae795
ML
2164 if (!err)
2165 vm->allocate_va_range(vm, &stash, offset, chunk_size);
cd0452aa 2166 i915_vm_free_pt_stash(vm, &stash);
480ae795
ML
2167end_ww:
2168 if (err == -EDEADLK) {
2169 err = i915_gem_ww_ctx_backoff(&ww);
2170 if (!err)
2171 goto retry;
2172 }
2173 i915_gem_ww_ctx_fini(&ww);
39a2bd34
TH
2174 if (err) {
2175 kfree(vma_res);
480ae795 2176 goto end;
39a2bd34 2177 }
cd0452aa 2178
39a2bd34 2179 i915_vma_resource_init_from_vma(vma_res, vma);
a47e788c
CW
2180 /* Prime the TLB with the dummy pages */
2181 for (i = 0; i < count; i++) {
39a2bd34
TH
2182 vma_res->start = offset + i * PAGE_SIZE;
2183 vm->insert_entries(vm, vma_res, I915_CACHE_NONE,
2184 0);
a47e788c 2185
39a2bd34 2186 rq = submit_batch(ce, vma_res->start);
a47e788c
CW
2187 if (IS_ERR(rq)) {
2188 err = PTR_ERR(rq);
39a2bd34
TH
2189 i915_vma_resource_fini(vma_res);
2190 kfree(vma_res);
a47e788c
CW
2191 goto end;
2192 }
2193 i915_request_put(rq);
2194 }
39a2bd34 2195 i915_vma_resource_fini(vma_res);
0b4d1f0e 2196 i915_vma_put_pages(vma);
a47e788c
CW
2197
2198 err = context_sync(ce);
2199 if (err) {
2200 pr_err("%s: dummy setup timed out\n",
2201 ce->engine->name);
39a2bd34 2202 kfree(vma_res);
a47e788c
CW
2203 goto end;
2204 }
2205
2206 vma = i915_vma_instance(act, vm, NULL);
2207 if (IS_ERR(vma)) {
39a2bd34 2208 kfree(vma_res);
a47e788c
CW
2209 err = PTR_ERR(vma);
2210 goto end;
2211 }
2212
0b4d1f0e
ML
2213 i915_gem_object_lock(act, NULL);
2214 err = i915_vma_get_pages(vma);
2215 i915_gem_object_unlock(act);
39a2bd34
TH
2216 if (err) {
2217 kfree(vma_res);
a47e788c 2218 goto end;
39a2bd34 2219 }
a47e788c 2220
39a2bd34 2221 i915_vma_resource_init_from_vma(vma_res, vma);
a47e788c
CW
2222 /* Replace the TLB with target batches */
2223 for (i = 0; i < count; i++) {
2224 struct i915_request *rq;
2225 u32 *cs = batch + i * 64 / sizeof(*cs);
2226 u64 addr;
2227
39a2bd34
TH
2228 vma_res->start = offset + i * PAGE_SIZE;
2229 vm->insert_entries(vm, vma_res, I915_CACHE_NONE, 0);
a47e788c 2230
39a2bd34 2231 addr = vma_res->start + i * 64;
a47e788c
CW
2232 cs[4] = MI_NOOP;
2233 cs[6] = lower_32_bits(addr);
2234 cs[7] = upper_32_bits(addr);
2235 wmb();
2236
2237 rq = submit_batch(ce, addr);
2238 if (IS_ERR(rq)) {
2239 err = PTR_ERR(rq);
39a2bd34
TH
2240 i915_vma_resource_fini(vma_res);
2241 kfree(vma_res);
a47e788c
CW
2242 goto end;
2243 }
2244
2245 /* Wait until the context chain has started */
2246 if (i == 0) {
2247 while (READ_ONCE(result[i]) &&
2248 !i915_request_completed(rq))
2249 cond_resched();
2250 } else {
2251 end_spin(batch, i - 1);
2252 }
2253
2254 i915_request_put(rq);
2255 }
2256 end_spin(batch, count - 1);
2257
39a2bd34
TH
2258 i915_vma_resource_fini(vma_res);
2259 kfree(vma_res);
0b4d1f0e 2260 i915_vma_put_pages(vma);
a47e788c
CW
2261
2262 err = context_sync(ce);
2263 if (err) {
2264 pr_err("%s: writes timed out\n",
2265 ce->engine->name);
2266 goto end;
2267 }
2268
2269 for (i = 0; i < count; i++) {
2270 if (result[i] != i) {
2271 pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
2272 ce->engine->name, pass,
2273 offset, i, result[i], i);
2274 err = -EINVAL;
2275 goto end;
2276 }
2277 }
2278
2279 vm->clear_range(vm, offset, chunk_size);
2280 pass++;
2281 }
2282 }
2283end:
7e805762 2284 if (igt_flush_test(i915))
a47e788c
CW
2285 err = -EIO;
2286 i915_gem_context_unlock_engines(ctx);
2287 i915_gem_object_unpin_map(out);
2288out_put_out:
2289 i915_gem_object_put(out);
2290out_put_batch:
2291 i915_gem_object_unpin_map(act);
2292out_put_act:
2293 i915_gem_object_put(act);
2294out_put_bbe:
2295 i915_gem_object_put(bbe);
a4e7ccda
CW
2296out_vm:
2297 i915_vm_put(vm);
a47e788c 2298out_unlock:
a8c9a7f5 2299 fput(file);
a47e788c
CW
2300 return err;
2301}
2302
1c42819a
CW
2303int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
2304{
2305 static const struct i915_subtest tests[] = {
2306 SUBTEST(igt_ppgtt_alloc),
4a6f13fc 2307 SUBTEST(igt_ppgtt_lowlevel),
5c3bff48 2308 SUBTEST(igt_ppgtt_drunk),
6e32ab3d 2309 SUBTEST(igt_ppgtt_walk),
7db4dcea 2310 SUBTEST(igt_ppgtt_pot),
8d28ba45 2311 SUBTEST(igt_ppgtt_fill),
aae4a3d8 2312 SUBTEST(igt_ppgtt_shrink),
fe215c8b 2313 SUBTEST(igt_ppgtt_shrink_boom),
a413c99f 2314 SUBTEST(igt_ppgtt_misaligned_pin),
4a6f13fc 2315 SUBTEST(igt_ggtt_lowlevel),
5c3bff48 2316 SUBTEST(igt_ggtt_drunk),
6e32ab3d 2317 SUBTEST(igt_ggtt_walk),
7db4dcea 2318 SUBTEST(igt_ggtt_pot),
62c981cf 2319 SUBTEST(igt_ggtt_fill),
af85f50d 2320 SUBTEST(igt_ggtt_page),
a413c99f 2321 SUBTEST(igt_ggtt_misaligned_pin),
a47e788c 2322 SUBTEST(igt_cs_tlb),
1c42819a
CW
2323 };
2324
17190a34 2325 GEM_BUG_ON(offset_in_page(to_gt(i915)->ggtt->vm.total));
62c981cf 2326
1c42819a
CW
2327 return i915_subtests(tests, i915);
2328}