Merge branches 'for-4.20/upstream-fixes', 'for-4.21/core', 'for-4.21/hid-asus', ...
[linux-2.6-block.git] / drivers / gpu / drm / i915 / selftests / huge_pages.c
CommitLineData
4049866f
MA
1/*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "../i915_selftest.h"
26
27#include <linux/prime_numbers.h>
28
29#include "mock_drm.h"
621d07b2 30#include "i915_random.h"
4049866f
MA
31
32static const unsigned int page_sizes[] = {
33 I915_GTT_PAGE_SIZE_2M,
34 I915_GTT_PAGE_SIZE_64K,
35 I915_GTT_PAGE_SIZE_4K,
36};
37
38static unsigned int get_largest_page_size(struct drm_i915_private *i915,
39 u64 rem)
40{
41 int i;
42
43 for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
44 unsigned int page_size = page_sizes[i];
45
46 if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
47 return page_size;
48 }
49
50 return 0;
51}
52
53static void huge_pages_free_pages(struct sg_table *st)
54{
55 struct scatterlist *sg;
56
57 for (sg = st->sgl; sg; sg = __sg_next(sg)) {
58 if (sg_page(sg))
59 __free_pages(sg_page(sg), get_order(sg->length));
60 }
61
62 sg_free_table(st);
63 kfree(st);
64}
65
66static int get_huge_pages(struct drm_i915_gem_object *obj)
67{
68#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
69 unsigned int page_mask = obj->mm.page_mask;
70 struct sg_table *st;
71 struct scatterlist *sg;
84e8978e 72 unsigned int sg_page_sizes;
4049866f
MA
73 u64 rem;
74
75 st = kmalloc(sizeof(*st), GFP);
76 if (!st)
77 return -ENOMEM;
78
79 if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
80 kfree(st);
81 return -ENOMEM;
82 }
83
84 rem = obj->base.size;
85 sg = st->sgl;
86 st->nents = 0;
84e8978e 87 sg_page_sizes = 0;
4049866f
MA
88
89 /*
90 * Our goal here is simple, we want to greedily fill the object from
91 * largest to smallest page-size, while ensuring that we use *every*
92 * page-size as per the given page-mask.
93 */
94 do {
95 unsigned int bit = ilog2(page_mask);
96 unsigned int page_size = BIT(bit);
97 int order = get_order(page_size);
98
99 do {
100 struct page *page;
101
102 GEM_BUG_ON(order >= MAX_ORDER);
103 page = alloc_pages(GFP | __GFP_ZERO, order);
104 if (!page)
105 goto err;
106
107 sg_set_page(sg, page, page_size, 0);
84e8978e 108 sg_page_sizes |= page_size;
4049866f
MA
109 st->nents++;
110
111 rem -= page_size;
112 if (!rem) {
113 sg_mark_end(sg);
114 break;
115 }
116
117 sg = __sg_next(sg);
118 } while ((rem - ((page_size-1) & page_mask)) >= page_size);
119
120 page_mask &= (page_size-1);
121 } while (page_mask);
122
123 if (i915_gem_gtt_prepare_pages(obj, st))
124 goto err;
125
126 obj->mm.madv = I915_MADV_DONTNEED;
127
84e8978e
MA
128 GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
129 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
4049866f
MA
130
131 return 0;
132
133err:
134 sg_set_page(sg, NULL, 0, 0);
135 sg_mark_end(sg);
136 huge_pages_free_pages(st);
137
138 return -ENOMEM;
139}
140
141static void put_huge_pages(struct drm_i915_gem_object *obj,
142 struct sg_table *pages)
143{
144 i915_gem_gtt_finish_pages(obj, pages);
145 huge_pages_free_pages(pages);
146
147 obj->mm.dirty = false;
148 obj->mm.madv = I915_MADV_WILLNEED;
149}
150
151static const struct drm_i915_gem_object_ops huge_page_ops = {
152 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
153 I915_GEM_OBJECT_IS_SHRINKABLE,
154 .get_pages = get_huge_pages,
155 .put_pages = put_huge_pages,
156};
157
158static struct drm_i915_gem_object *
159huge_pages_object(struct drm_i915_private *i915,
160 u64 size,
161 unsigned int page_mask)
162{
163 struct drm_i915_gem_object *obj;
164
165 GEM_BUG_ON(!size);
166 GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
167
168 if (size >> PAGE_SHIFT > INT_MAX)
169 return ERR_PTR(-E2BIG);
170
171 if (overflows_type(size, obj->base.size))
172 return ERR_PTR(-E2BIG);
173
174 obj = i915_gem_object_alloc(i915);
175 if (!obj)
176 return ERR_PTR(-ENOMEM);
177
178 drm_gem_private_object_init(&i915->drm, &obj->base, size);
179 i915_gem_object_init(obj, &huge_page_ops);
180
c0a51fd0
CK
181 obj->write_domain = I915_GEM_DOMAIN_CPU;
182 obj->read_domains = I915_GEM_DOMAIN_CPU;
4049866f
MA
183 obj->cache_level = I915_CACHE_NONE;
184
185 obj->mm.page_mask = page_mask;
186
187 return obj;
188}
189
190static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
191{
192 struct drm_i915_private *i915 = to_i915(obj->base.dev);
193 const u64 max_len = rounddown_pow_of_two(UINT_MAX);
194 struct sg_table *st;
195 struct scatterlist *sg;
84e8978e 196 unsigned int sg_page_sizes;
4049866f
MA
197 u64 rem;
198
199 st = kmalloc(sizeof(*st), GFP);
200 if (!st)
201 return -ENOMEM;
202
203 if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
204 kfree(st);
205 return -ENOMEM;
206 }
207
208 /* Use optimal page sized chunks to fill in the sg table */
209 rem = obj->base.size;
210 sg = st->sgl;
211 st->nents = 0;
84e8978e 212 sg_page_sizes = 0;
4049866f
MA
213 do {
214 unsigned int page_size = get_largest_page_size(i915, rem);
215 unsigned int len = min(page_size * div_u64(rem, page_size),
216 max_len);
217
218 GEM_BUG_ON(!page_size);
219
220 sg->offset = 0;
221 sg->length = len;
222 sg_dma_len(sg) = len;
223 sg_dma_address(sg) = page_size;
224
84e8978e 225 sg_page_sizes |= len;
4049866f
MA
226
227 st->nents++;
228
229 rem -= len;
230 if (!rem) {
231 sg_mark_end(sg);
232 break;
233 }
234
235 sg = sg_next(sg);
236 } while (1);
237
c6d22ab6
MA
238 i915_sg_trim(st);
239
4049866f
MA
240 obj->mm.madv = I915_MADV_DONTNEED;
241
84e8978e 242 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
4049866f
MA
243
244 return 0;
245}
246
247static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
248{
249 struct drm_i915_private *i915 = to_i915(obj->base.dev);
250 struct sg_table *st;
251 struct scatterlist *sg;
252 unsigned int page_size;
253
254 st = kmalloc(sizeof(*st), GFP);
255 if (!st)
256 return -ENOMEM;
257
258 if (sg_alloc_table(st, 1, GFP)) {
259 kfree(st);
260 return -ENOMEM;
261 }
262
263 sg = st->sgl;
264 st->nents = 1;
265
266 page_size = get_largest_page_size(i915, obj->base.size);
267 GEM_BUG_ON(!page_size);
268
269 sg->offset = 0;
270 sg->length = obj->base.size;
271 sg_dma_len(sg) = obj->base.size;
272 sg_dma_address(sg) = page_size;
273
274 obj->mm.madv = I915_MADV_DONTNEED;
275
276 __i915_gem_object_set_pages(obj, st, sg->length);
277
278 return 0;
279#undef GFP
280}
281
282static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
283 struct sg_table *pages)
284{
285 sg_free_table(pages);
286 kfree(pages);
287}
288
289static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
290 struct sg_table *pages)
291{
292 fake_free_huge_pages(obj, pages);
293 obj->mm.dirty = false;
294 obj->mm.madv = I915_MADV_WILLNEED;
295}
296
297static const struct drm_i915_gem_object_ops fake_ops = {
298 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
299 .get_pages = fake_get_huge_pages,
300 .put_pages = fake_put_huge_pages,
301};
302
303static const struct drm_i915_gem_object_ops fake_ops_single = {
304 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
305 .get_pages = fake_get_huge_pages_single,
306 .put_pages = fake_put_huge_pages,
307};
308
309static struct drm_i915_gem_object *
310fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
311{
312 struct drm_i915_gem_object *obj;
313
314 GEM_BUG_ON(!size);
315 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
316
317 if (size >> PAGE_SHIFT > UINT_MAX)
318 return ERR_PTR(-E2BIG);
319
320 if (overflows_type(size, obj->base.size))
321 return ERR_PTR(-E2BIG);
322
323 obj = i915_gem_object_alloc(i915);
324 if (!obj)
325 return ERR_PTR(-ENOMEM);
326
327 drm_gem_private_object_init(&i915->drm, &obj->base, size);
328
329 if (single)
330 i915_gem_object_init(obj, &fake_ops_single);
331 else
332 i915_gem_object_init(obj, &fake_ops);
333
c0a51fd0
CK
334 obj->write_domain = I915_GEM_DOMAIN_CPU;
335 obj->read_domains = I915_GEM_DOMAIN_CPU;
4049866f
MA
336 obj->cache_level = I915_CACHE_NONE;
337
338 return obj;
339}
340
341static int igt_check_page_sizes(struct i915_vma *vma)
342{
82ad6443 343 struct drm_i915_private *i915 = vma->vm->i915;
4049866f
MA
344 unsigned int supported = INTEL_INFO(i915)->page_sizes;
345 struct drm_i915_gem_object *obj = vma->obj;
346 int err = 0;
347
348 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
349 pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
350 vma->page_sizes.sg & ~supported, supported);
351 err = -EINVAL;
352 }
353
354 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
355 pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
356 vma->page_sizes.gtt & ~supported, supported);
357 err = -EINVAL;
358 }
359
360 if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
361 pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
362 vma->page_sizes.phys, obj->mm.page_sizes.phys);
363 err = -EINVAL;
364 }
365
366 if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
367 pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
368 vma->page_sizes.sg, obj->mm.page_sizes.sg);
369 err = -EINVAL;
370 }
371
372 if (obj->mm.page_sizes.gtt) {
373 pr_err("obj->page_sizes.gtt(%u) should never be set\n",
374 obj->mm.page_sizes.gtt);
375 err = -EINVAL;
376 }
377
378 return err;
379}
380
381static int igt_mock_exhaust_device_supported_pages(void *arg)
382{
383 struct i915_hw_ppgtt *ppgtt = arg;
82ad6443 384 struct drm_i915_private *i915 = ppgtt->vm.i915;
4049866f
MA
385 unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
386 struct drm_i915_gem_object *obj;
387 struct i915_vma *vma;
388 int i, j, single;
389 int err;
390
391 /*
392 * Sanity check creating objects with every valid page support
393 * combination for our mock device.
394 */
395
396 for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
397 unsigned int combination = 0;
398
399 for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
400 if (i & BIT(j))
401 combination |= page_sizes[j];
402 }
403
404 mkwrite_device_info(i915)->page_sizes = combination;
405
406 for (single = 0; single <= 1; ++single) {
407 obj = fake_huge_pages_object(i915, combination, !!single);
408 if (IS_ERR(obj)) {
409 err = PTR_ERR(obj);
410 goto out_device;
411 }
412
413 if (obj->base.size != combination) {
414 pr_err("obj->base.size=%zu, expected=%u\n",
415 obj->base.size, combination);
416 err = -EINVAL;
417 goto out_put;
418 }
419
82ad6443 420 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
4049866f
MA
421 if (IS_ERR(vma)) {
422 err = PTR_ERR(vma);
423 goto out_put;
424 }
425
426 err = i915_vma_pin(vma, 0, 0, PIN_USER);
427 if (err)
428 goto out_close;
429
430 err = igt_check_page_sizes(vma);
431
432 if (vma->page_sizes.sg != combination) {
433 pr_err("page_sizes.sg=%u, expected=%u\n",
434 vma->page_sizes.sg, combination);
435 err = -EINVAL;
436 }
437
438 i915_vma_unpin(vma);
439 i915_vma_close(vma);
440
441 i915_gem_object_put(obj);
442
443 if (err)
444 goto out_device;
445 }
446 }
447
448 goto out_device;
449
450out_close:
451 i915_vma_close(vma);
452out_put:
453 i915_gem_object_put(obj);
454out_device:
455 mkwrite_device_info(i915)->page_sizes = saved_mask;
456
457 return err;
458}
459
460static int igt_mock_ppgtt_misaligned_dma(void *arg)
461{
462 struct i915_hw_ppgtt *ppgtt = arg;
82ad6443 463 struct drm_i915_private *i915 = ppgtt->vm.i915;
4049866f
MA
464 unsigned long supported = INTEL_INFO(i915)->page_sizes;
465 struct drm_i915_gem_object *obj;
466 int bit;
467 int err;
468
469 /*
470 * Sanity check dma misalignment for huge pages -- the dma addresses we
471 * insert into the paging structures need to always respect the page
472 * size alignment.
473 */
474
475 bit = ilog2(I915_GTT_PAGE_SIZE_64K);
476
477 for_each_set_bit_from(bit, &supported,
478 ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
479 IGT_TIMEOUT(end_time);
480 unsigned int page_size = BIT(bit);
481 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
482 unsigned int offset;
483 unsigned int size =
484 round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
485 struct i915_vma *vma;
486
487 obj = fake_huge_pages_object(i915, size, true);
488 if (IS_ERR(obj))
489 return PTR_ERR(obj);
490
491 if (obj->base.size != size) {
492 pr_err("obj->base.size=%zu, expected=%u\n",
493 obj->base.size, size);
494 err = -EINVAL;
495 goto out_put;
496 }
497
498 err = i915_gem_object_pin_pages(obj);
499 if (err)
500 goto out_put;
501
502 /* Force the page size for this object */
503 obj->mm.page_sizes.sg = page_size;
504
82ad6443 505 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
4049866f
MA
506 if (IS_ERR(vma)) {
507 err = PTR_ERR(vma);
508 goto out_unpin;
509 }
510
511 err = i915_vma_pin(vma, 0, 0, flags);
512 if (err) {
513 i915_vma_close(vma);
514 goto out_unpin;
515 }
516
517
518 err = igt_check_page_sizes(vma);
519
520 if (vma->page_sizes.gtt != page_size) {
521 pr_err("page_sizes.gtt=%u, expected %u\n",
522 vma->page_sizes.gtt, page_size);
523 err = -EINVAL;
524 }
525
526 i915_vma_unpin(vma);
527
528 if (err) {
529 i915_vma_close(vma);
530 goto out_unpin;
531 }
532
533 /*
534 * Try all the other valid offsets until the next
535 * boundary -- should always fall back to using 4K
536 * pages.
537 */
538 for (offset = 4096; offset < page_size; offset += 4096) {
539 err = i915_vma_unbind(vma);
540 if (err) {
541 i915_vma_close(vma);
542 goto out_unpin;
543 }
544
545 err = i915_vma_pin(vma, 0, 0, flags | offset);
546 if (err) {
547 i915_vma_close(vma);
548 goto out_unpin;
549 }
550
551 err = igt_check_page_sizes(vma);
552
553 if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
c5828105 554 pr_err("page_sizes.gtt=%u, expected %llu\n",
4049866f
MA
555 vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
556 err = -EINVAL;
557 }
558
559 i915_vma_unpin(vma);
560
561 if (err) {
562 i915_vma_close(vma);
563 goto out_unpin;
564 }
565
566 if (igt_timeout(end_time,
567 "%s timed out at offset %x with page-size %x\n",
568 __func__, offset, page_size))
569 break;
570 }
571
572 i915_vma_close(vma);
573
574 i915_gem_object_unpin_pages(obj);
d7788472 575 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4049866f
MA
576 i915_gem_object_put(obj);
577 }
578
579 return 0;
580
581out_unpin:
582 i915_gem_object_unpin_pages(obj);
583out_put:
584 i915_gem_object_put(obj);
585
586 return err;
587}
588
589static void close_object_list(struct list_head *objects,
590 struct i915_hw_ppgtt *ppgtt)
591{
592 struct drm_i915_gem_object *obj, *on;
593
594 list_for_each_entry_safe(obj, on, objects, st_link) {
595 struct i915_vma *vma;
596
82ad6443 597 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
4049866f
MA
598 if (!IS_ERR(vma))
599 i915_vma_close(vma);
600
601 list_del(&obj->st_link);
602 i915_gem_object_unpin_pages(obj);
d7788472 603 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4049866f
MA
604 i915_gem_object_put(obj);
605 }
606}
607
608static int igt_mock_ppgtt_huge_fill(void *arg)
609{
610 struct i915_hw_ppgtt *ppgtt = arg;
82ad6443
CW
611 struct drm_i915_private *i915 = ppgtt->vm.i915;
612 unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
4049866f
MA
613 unsigned long page_num;
614 bool single = false;
615 LIST_HEAD(objects);
616 IGT_TIMEOUT(end_time);
134649ff 617 int err = -ENODEV;
4049866f
MA
618
619 for_each_prime_number_from(page_num, 1, max_pages) {
620 struct drm_i915_gem_object *obj;
621 u64 size = page_num << PAGE_SHIFT;
622 struct i915_vma *vma;
623 unsigned int expected_gtt = 0;
624 int i;
625
626 obj = fake_huge_pages_object(i915, size, single);
627 if (IS_ERR(obj)) {
628 err = PTR_ERR(obj);
629 break;
630 }
631
632 if (obj->base.size != size) {
633 pr_err("obj->base.size=%zd, expected=%llu\n",
634 obj->base.size, size);
635 i915_gem_object_put(obj);
636 err = -EINVAL;
637 break;
638 }
639
640 err = i915_gem_object_pin_pages(obj);
641 if (err) {
642 i915_gem_object_put(obj);
643 break;
644 }
645
646 list_add(&obj->st_link, &objects);
647
82ad6443 648 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
4049866f
MA
649 if (IS_ERR(vma)) {
650 err = PTR_ERR(vma);
651 break;
652 }
653
654 err = i915_vma_pin(vma, 0, 0, PIN_USER);
655 if (err)
656 break;
657
658 err = igt_check_page_sizes(vma);
659 if (err) {
660 i915_vma_unpin(vma);
661 break;
662 }
663
664 /*
665 * Figure out the expected gtt page size knowing that we go from
666 * largest to smallest page size sg chunks, and that we align to
667 * the largest page size.
668 */
669 for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
670 unsigned int page_size = page_sizes[i];
671
672 if (HAS_PAGE_SIZES(i915, page_size) &&
673 size >= page_size) {
674 expected_gtt |= page_size;
675 size &= page_size-1;
676 }
677 }
678
679 GEM_BUG_ON(!expected_gtt);
680 GEM_BUG_ON(size);
681
682 if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
683 expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
684
685 i915_vma_unpin(vma);
686
687 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
688 if (!IS_ALIGNED(vma->node.start,
689 I915_GTT_PAGE_SIZE_2M)) {
690 pr_err("node.start(%llx) not aligned to 2M\n",
691 vma->node.start);
692 err = -EINVAL;
693 break;
694 }
695
696 if (!IS_ALIGNED(vma->node.size,
697 I915_GTT_PAGE_SIZE_2M)) {
698 pr_err("node.size(%llx) not aligned to 2M\n",
699 vma->node.size);
700 err = -EINVAL;
701 break;
702 }
703 }
704
705 if (vma->page_sizes.gtt != expected_gtt) {
706 pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
707 vma->page_sizes.gtt, expected_gtt,
708 obj->base.size, yesno(!!single));
709 err = -EINVAL;
710 break;
711 }
712
713 if (igt_timeout(end_time,
714 "%s timed out at size %zd\n",
715 __func__, obj->base.size))
716 break;
717
718 single = !single;
719 }
720
721 close_object_list(&objects, ppgtt);
722
723 if (err == -ENOMEM || err == -ENOSPC)
724 err = 0;
725
726 return err;
727}
728
729static int igt_mock_ppgtt_64K(void *arg)
730{
731 struct i915_hw_ppgtt *ppgtt = arg;
82ad6443 732 struct drm_i915_private *i915 = ppgtt->vm.i915;
4049866f
MA
733 struct drm_i915_gem_object *obj;
734 const struct object_info {
735 unsigned int size;
736 unsigned int gtt;
737 unsigned int offset;
738 } objects[] = {
739 /* Cases with forced padding/alignment */
740 {
741 .size = SZ_64K,
742 .gtt = I915_GTT_PAGE_SIZE_64K,
743 .offset = 0,
744 },
745 {
746 .size = SZ_64K + SZ_4K,
747 .gtt = I915_GTT_PAGE_SIZE_4K,
748 .offset = 0,
749 },
750 {
751 .size = SZ_64K - SZ_4K,
752 .gtt = I915_GTT_PAGE_SIZE_4K,
753 .offset = 0,
754 },
755 {
756 .size = SZ_2M,
757 .gtt = I915_GTT_PAGE_SIZE_64K,
758 .offset = 0,
759 },
760 {
761 .size = SZ_2M - SZ_4K,
762 .gtt = I915_GTT_PAGE_SIZE_4K,
763 .offset = 0,
764 },
765 {
766 .size = SZ_2M + SZ_4K,
767 .gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
768 .offset = 0,
769 },
770 {
771 .size = SZ_2M + SZ_64K,
772 .gtt = I915_GTT_PAGE_SIZE_64K,
773 .offset = 0,
774 },
775 {
776 .size = SZ_2M - SZ_64K,
777 .gtt = I915_GTT_PAGE_SIZE_64K,
778 .offset = 0,
779 },
780 /* Try without any forced padding/alignment */
781 {
782 .size = SZ_64K,
783 .offset = SZ_2M,
784 .gtt = I915_GTT_PAGE_SIZE_4K,
785 },
786 {
787 .size = SZ_128K,
788 .offset = SZ_2M - SZ_64K,
789 .gtt = I915_GTT_PAGE_SIZE_4K,
790 },
791 };
792 struct i915_vma *vma;
793 int i, single;
794 int err;
795
796 /*
797 * Sanity check some of the trickiness with 64K pages -- either we can
798 * safely mark the whole page-table(2M block) as 64K, or we have to
799 * always fallback to 4K.
800 */
801
802 if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
803 return 0;
804
805 for (i = 0; i < ARRAY_SIZE(objects); ++i) {
806 unsigned int size = objects[i].size;
807 unsigned int expected_gtt = objects[i].gtt;
808 unsigned int offset = objects[i].offset;
809 unsigned int flags = PIN_USER;
810
811 for (single = 0; single <= 1; single++) {
812 obj = fake_huge_pages_object(i915, size, !!single);
813 if (IS_ERR(obj))
814 return PTR_ERR(obj);
815
816 err = i915_gem_object_pin_pages(obj);
817 if (err)
818 goto out_object_put;
819
820 /*
821 * Disable 2M pages -- We only want to use 64K/4K pages
822 * for this test.
823 */
824 obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
825
82ad6443 826 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
4049866f
MA
827 if (IS_ERR(vma)) {
828 err = PTR_ERR(vma);
829 goto out_object_unpin;
830 }
831
832 if (offset)
833 flags |= PIN_OFFSET_FIXED | offset;
834
835 err = i915_vma_pin(vma, 0, 0, flags);
836 if (err)
837 goto out_vma_close;
838
839 err = igt_check_page_sizes(vma);
840 if (err)
841 goto out_vma_unpin;
842
843 if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
844 if (!IS_ALIGNED(vma->node.start,
845 I915_GTT_PAGE_SIZE_2M)) {
846 pr_err("node.start(%llx) not aligned to 2M\n",
847 vma->node.start);
848 err = -EINVAL;
849 goto out_vma_unpin;
850 }
851
852 if (!IS_ALIGNED(vma->node.size,
853 I915_GTT_PAGE_SIZE_2M)) {
854 pr_err("node.size(%llx) not aligned to 2M\n",
855 vma->node.size);
856 err = -EINVAL;
857 goto out_vma_unpin;
858 }
859 }
860
861 if (vma->page_sizes.gtt != expected_gtt) {
862 pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
863 vma->page_sizes.gtt, expected_gtt, i,
864 yesno(!!single));
865 err = -EINVAL;
866 goto out_vma_unpin;
867 }
868
869 i915_vma_unpin(vma);
870 i915_vma_close(vma);
871
872 i915_gem_object_unpin_pages(obj);
d7788472 873 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4049866f
MA
874 i915_gem_object_put(obj);
875 }
876 }
877
878 return 0;
879
880out_vma_unpin:
881 i915_vma_unpin(vma);
882out_vma_close:
883 i915_vma_close(vma);
884out_object_unpin:
885 i915_gem_object_unpin_pages(obj);
886out_object_put:
887 i915_gem_object_put(obj);
888
889 return err;
890}
891
892static struct i915_vma *
893gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
894{
82ad6443
CW
895 struct drm_i915_private *i915 = vma->vm->i915;
896 const int gen = INTEL_GEN(i915);
4049866f
MA
897 unsigned int count = vma->size >> PAGE_SHIFT;
898 struct drm_i915_gem_object *obj;
899 struct i915_vma *batch;
900 unsigned int size;
901 u32 *cmd;
902 int n;
903 int err;
904
905 size = (1 + 4 * count) * sizeof(u32);
906 size = round_up(size, PAGE_SIZE);
907 obj = i915_gem_object_create_internal(i915, size);
908 if (IS_ERR(obj))
909 return ERR_CAST(obj);
910
e6a59382
CW
911 err = i915_gem_object_set_to_wc_domain(obj, true);
912 if (err)
913 goto err;
914
915 cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
4049866f
MA
916 if (IS_ERR(cmd)) {
917 err = PTR_ERR(cmd);
918 goto err;
919 }
920
921 offset += vma->node.start;
922
923 for (n = 0; n < count; n++) {
924 if (gen >= 8) {
925 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
926 *cmd++ = lower_32_bits(offset);
927 *cmd++ = upper_32_bits(offset);
928 *cmd++ = val;
929 } else if (gen >= 4) {
930 *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
5b544337 931 (gen < 6 ? MI_USE_GGTT : 0);
4049866f
MA
932 *cmd++ = 0;
933 *cmd++ = offset;
934 *cmd++ = val;
935 } else {
5b544337 936 *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
4049866f
MA
937 *cmd++ = offset;
938 *cmd++ = val;
939 }
940
941 offset += PAGE_SIZE;
942 }
943
944 *cmd = MI_BATCH_BUFFER_END;
e6a59382 945 i915_gem_chipset_flush(i915);
4049866f
MA
946
947 i915_gem_object_unpin_map(obj);
948
4049866f
MA
949 batch = i915_vma_instance(obj, vma->vm, NULL);
950 if (IS_ERR(batch)) {
951 err = PTR_ERR(batch);
952 goto err;
953 }
954
955 err = i915_vma_pin(batch, 0, 0, PIN_USER);
956 if (err)
957 goto err;
958
959 return batch;
960
961err:
962 i915_gem_object_put(obj);
963
964 return ERR_PTR(err);
965}
966
967static int gpu_write(struct i915_vma *vma,
968 struct i915_gem_context *ctx,
969 struct intel_engine_cs *engine,
970 u32 dword,
971 u32 value)
972{
e61e0f51 973 struct i915_request *rq;
4049866f
MA
974 struct i915_vma *batch;
975 int flags = 0;
976 int err;
977
978 GEM_BUG_ON(!intel_engine_can_store_dword(engine));
979
980 err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
981 if (err)
982 return err;
983
e61e0f51 984 rq = i915_request_alloc(engine, ctx);
4049866f
MA
985 if (IS_ERR(rq))
986 return PTR_ERR(rq);
987
988 batch = gpu_write_dw(vma, dword * sizeof(u32), value);
989 if (IS_ERR(batch)) {
990 err = PTR_ERR(batch);
991 goto err_request;
992 }
993
a5236978
CW
994 err = i915_vma_move_to_active(batch, rq, 0);
995 if (err)
996 goto err_request;
997
4049866f
MA
998 i915_gem_object_set_active_reference(batch->obj);
999 i915_vma_unpin(batch);
1000 i915_vma_close(batch);
1001
3fef5cda
CW
1002 err = engine->emit_bb_start(rq,
1003 batch->node.start, batch->node.size,
1004 flags);
4049866f
MA
1005 if (err)
1006 goto err_request;
1007
a5236978
CW
1008 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1009 if (err)
1010 i915_request_skip(rq, err);
4049866f 1011
4049866f 1012err_request:
697b9a87 1013 i915_request_add(rq);
4049866f
MA
1014
1015 return err;
1016}
1017
1018static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1019{
1020 unsigned int needs_flush;
1021 unsigned long n;
1022 int err;
1023
1024 err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
1025 if (err)
1026 return err;
1027
1028 for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
1029 u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
1030
1031 if (needs_flush & CLFLUSH_BEFORE)
1032 drm_clflush_virt_range(ptr, PAGE_SIZE);
1033
1034 if (ptr[dword] != val) {
1035 pr_err("n=%lu ptr[%u]=%u, val=%u\n",
1036 n, dword, ptr[dword], val);
1037 kunmap_atomic(ptr);
1038 err = -EINVAL;
1039 break;
1040 }
1041
1042 kunmap_atomic(ptr);
1043 }
1044
1045 i915_gem_obj_finish_shmem_access(obj);
1046
1047 return err;
1048}
1049
c83a8d4a
MA
1050static int __igt_write_huge(struct i915_gem_context *ctx,
1051 struct intel_engine_cs *engine,
1052 struct drm_i915_gem_object *obj,
1053 u64 size, u64 offset,
1054 u32 dword, u32 val)
1055{
1056 struct drm_i915_private *i915 = to_i915(obj->base.dev);
82ad6443
CW
1057 struct i915_address_space *vm =
1058 ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
c83a8d4a
MA
1059 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1060 struct i915_vma *vma;
1061 int err;
1062
1063 vma = i915_vma_instance(obj, vm, NULL);
1064 if (IS_ERR(vma))
1065 return PTR_ERR(vma);
1066
1067 err = i915_vma_unbind(vma);
1068 if (err)
1069 goto out_vma_close;
1070
1071 err = i915_vma_pin(vma, size, 0, flags | offset);
1072 if (err) {
1073 /*
1074 * The ggtt may have some pages reserved so
1075 * refrain from erroring out.
1076 */
1077 if (err == -ENOSPC && i915_is_ggtt(vm))
1078 err = 0;
1079
1080 goto out_vma_close;
1081 }
1082
1083 err = igt_check_page_sizes(vma);
1084 if (err)
1085 goto out_vma_unpin;
1086
1087 err = gpu_write(vma, ctx, engine, dword, val);
1088 if (err) {
1089 pr_err("gpu-write failed at offset=%llx\n", offset);
1090 goto out_vma_unpin;
1091 }
1092
1093 err = cpu_check(obj, dword, val);
1094 if (err) {
1095 pr_err("cpu-check failed at offset=%llx\n", offset);
1096 goto out_vma_unpin;
1097 }
1098
1099out_vma_unpin:
1100 i915_vma_unpin(vma);
1101out_vma_close:
3365e226 1102 i915_vma_destroy(vma);
c83a8d4a
MA
1103
1104 return err;
1105}
1106
617dc761
MA
1107static int igt_write_huge(struct i915_gem_context *ctx,
1108 struct drm_i915_gem_object *obj)
4049866f
MA
1109{
1110 struct drm_i915_private *i915 = to_i915(obj->base.dev);
82ad6443
CW
1111 struct i915_address_space *vm =
1112 ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
621d07b2 1113 static struct intel_engine_cs *engines[I915_NUM_ENGINES];
4049866f 1114 struct intel_engine_cs *engine;
621d07b2
MA
1115 I915_RND_STATE(prng);
1116 IGT_TIMEOUT(end_time);
4049866f
MA
1117 unsigned int max_page_size;
1118 unsigned int id;
1119 u64 max;
1120 u64 num;
1121 u64 size;
621d07b2
MA
1122 int *order;
1123 int i, n;
4049866f
MA
1124 int err = 0;
1125
1126 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1127
1128 size = obj->base.size;
1129 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1130 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
1131
1132 max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
1133 max = div_u64((vm->total - size), max_page_size);
1134
621d07b2 1135 n = 0;
4049866f 1136 for_each_engine(engine, i915, id) {
4049866f 1137 if (!intel_engine_can_store_dword(engine)) {
621d07b2 1138 pr_info("store-dword-imm not supported on engine=%u\n", id);
4049866f
MA
1139 continue;
1140 }
621d07b2
MA
1141 engines[n++] = engine;
1142 }
4049866f 1143
621d07b2
MA
1144 if (!n)
1145 return 0;
4049866f 1146
621d07b2
MA
1147 /*
1148 * To keep things interesting when alternating between engines in our
1149 * randomized order, lets also make feeding to the same engine a few
1150 * times in succession a possibility by enlarging the permutation array.
1151 */
1152 order = i915_random_order(n * I915_NUM_ENGINES, &prng);
1153 if (!order)
1154 return -ENOMEM;
4049866f 1155
621d07b2 1156 /*
c83a8d4a
MA
1157 * Try various offsets in an ascending/descending fashion until we
1158 * timeout -- we want to avoid issues hidden by effectively always using
1159 * offset = 0.
621d07b2
MA
1160 */
1161 i = 0;
1162 for_each_prime_number_from(num, 0, max) {
c83a8d4a
MA
1163 u64 offset_low = num * max_page_size;
1164 u64 offset_high = (max - num) * max_page_size;
1165 u32 dword = offset_in_page(num) / 4;
4049866f 1166
621d07b2
MA
1167 engine = engines[order[i] % n];
1168 i = (i + 1) % (n * I915_NUM_ENGINES);
4049866f 1169
c83a8d4a
MA
1170 err = __igt_write_huge(ctx, engine, obj, size, offset_low, dword, num + 1);
1171 if (err)
1172 break;
621d07b2 1173
c83a8d4a
MA
1174 err = __igt_write_huge(ctx, engine, obj, size, offset_high, dword, num + 1);
1175 if (err)
1176 break;
621d07b2
MA
1177
1178 if (igt_timeout(end_time,
c83a8d4a
MA
1179 "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
1180 __func__, engine->id, offset_low, offset_high, max_page_size))
621d07b2 1181 break;
4049866f
MA
1182 }
1183
621d07b2 1184 kfree(order);
4049866f
MA
1185
1186 return err;
1187}
1188
1189static int igt_ppgtt_exhaust_huge(void *arg)
1190{
617dc761
MA
1191 struct i915_gem_context *ctx = arg;
1192 struct drm_i915_private *i915 = ctx->i915;
4049866f
MA
1193 unsigned long supported = INTEL_INFO(i915)->page_sizes;
1194 static unsigned int pages[ARRAY_SIZE(page_sizes)];
1195 struct drm_i915_gem_object *obj;
1196 unsigned int size_mask;
1197 unsigned int page_mask;
1198 int n, i;
134649ff 1199 int err = -ENODEV;
4049866f 1200
f6d03042
CW
1201 if (supported == I915_GTT_PAGE_SIZE_4K)
1202 return 0;
1203
4049866f
MA
1204 /*
1205 * Sanity check creating objects with a varying mix of page sizes --
1206 * ensuring that our writes lands in the right place.
1207 */
1208
1209 n = 0;
1210 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1)
1211 pages[n++] = BIT(i);
1212
1213 for (size_mask = 2; size_mask < BIT(n); size_mask++) {
1214 unsigned int size = 0;
1215
1216 for (i = 0; i < n; i++) {
1217 if (size_mask & BIT(i))
1218 size |= pages[i];
1219 }
1220
1221 /*
1222 * For our page mask we want to enumerate all the page-size
1223 * combinations which will fit into our chosen object size.
1224 */
1225 for (page_mask = 2; page_mask <= size_mask; page_mask++) {
1226 unsigned int page_sizes = 0;
1227
1228 for (i = 0; i < n; i++) {
1229 if (page_mask & BIT(i))
1230 page_sizes |= pages[i];
1231 }
1232
1233 /*
1234 * Ensure that we can actually fill the given object
1235 * with our chosen page mask.
1236 */
1237 if (!IS_ALIGNED(size, BIT(__ffs(page_sizes))))
1238 continue;
1239
1240 obj = huge_pages_object(i915, size, page_sizes);
1241 if (IS_ERR(obj)) {
1242 err = PTR_ERR(obj);
1243 goto out_device;
1244 }
1245
1246 err = i915_gem_object_pin_pages(obj);
1247 if (err) {
1248 i915_gem_object_put(obj);
1249
1250 if (err == -ENOMEM) {
1251 pr_info("unable to get pages, size=%u, pages=%u\n",
1252 size, page_sizes);
1253 err = 0;
1254 break;
1255 }
1256
1257 pr_err("pin_pages failed, size=%u, pages=%u\n",
1258 size_mask, page_mask);
1259
1260 goto out_device;
1261 }
1262
1263 /* Force the page-size for the gtt insertion */
1264 obj->mm.page_sizes.sg = page_sizes;
1265
617dc761 1266 err = igt_write_huge(ctx, obj);
4049866f
MA
1267 if (err) {
1268 pr_err("exhaust write-huge failed with size=%u\n",
1269 size);
1270 goto out_unpin;
1271 }
1272
1273 i915_gem_object_unpin_pages(obj);
d7788472 1274 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4049866f
MA
1275 i915_gem_object_put(obj);
1276 }
1277 }
1278
1279 goto out_device;
1280
1281out_unpin:
1282 i915_gem_object_unpin_pages(obj);
1283 i915_gem_object_put(obj);
1284out_device:
1285 mkwrite_device_info(i915)->page_sizes = supported;
1286
1287 return err;
1288}
1289
1290static int igt_ppgtt_internal_huge(void *arg)
1291{
617dc761
MA
1292 struct i915_gem_context *ctx = arg;
1293 struct drm_i915_private *i915 = ctx->i915;
4049866f
MA
1294 struct drm_i915_gem_object *obj;
1295 static const unsigned int sizes[] = {
1296 SZ_64K,
1297 SZ_128K,
1298 SZ_256K,
1299 SZ_512K,
1300 SZ_1M,
1301 SZ_2M,
1302 };
1303 int i;
1304 int err;
1305
1306 /*
1307 * Sanity check that the HW uses huge pages correctly through internal
1308 * -- ensure that our writes land in the right place.
1309 */
1310
1311 for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1312 unsigned int size = sizes[i];
1313
1314 obj = i915_gem_object_create_internal(i915, size);
1315 if (IS_ERR(obj))
1316 return PTR_ERR(obj);
1317
1318 err = i915_gem_object_pin_pages(obj);
1319 if (err)
1320 goto out_put;
1321
1322 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
1323 pr_info("internal unable to allocate huge-page(s) with size=%u\n",
1324 size);
1325 goto out_unpin;
1326 }
1327
617dc761 1328 err = igt_write_huge(ctx, obj);
4049866f
MA
1329 if (err) {
1330 pr_err("internal write-huge failed with size=%u\n",
1331 size);
1332 goto out_unpin;
1333 }
1334
1335 i915_gem_object_unpin_pages(obj);
d7788472 1336 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4049866f
MA
1337 i915_gem_object_put(obj);
1338 }
1339
1340 return 0;
1341
1342out_unpin:
1343 i915_gem_object_unpin_pages(obj);
1344out_put:
1345 i915_gem_object_put(obj);
1346
1347 return err;
1348}
1349
1350static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
1351{
1352 return i915->mm.gemfs && has_transparent_hugepage();
1353}
1354
1355static int igt_ppgtt_gemfs_huge(void *arg)
1356{
617dc761
MA
1357 struct i915_gem_context *ctx = arg;
1358 struct drm_i915_private *i915 = ctx->i915;
4049866f
MA
1359 struct drm_i915_gem_object *obj;
1360 static const unsigned int sizes[] = {
1361 SZ_2M,
1362 SZ_4M,
1363 SZ_8M,
1364 SZ_16M,
1365 SZ_32M,
1366 };
1367 int i;
1368 int err;
1369
1370 /*
1371 * Sanity check that the HW uses huge pages correctly through gemfs --
1372 * ensure that our writes land in the right place.
1373 */
1374
1375 if (!igt_can_allocate_thp(i915)) {
1376 pr_info("missing THP support, skipping\n");
1377 return 0;
1378 }
1379
1380 for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1381 unsigned int size = sizes[i];
1382
1383 obj = i915_gem_object_create(i915, size);
1384 if (IS_ERR(obj))
1385 return PTR_ERR(obj);
1386
1387 err = i915_gem_object_pin_pages(obj);
1388 if (err)
1389 goto out_put;
1390
1391 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
1392 pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n",
1393 size);
1394 goto out_unpin;
1395 }
1396
617dc761 1397 err = igt_write_huge(ctx, obj);
4049866f
MA
1398 if (err) {
1399 pr_err("gemfs write-huge failed with size=%u\n",
1400 size);
1401 goto out_unpin;
1402 }
1403
1404 i915_gem_object_unpin_pages(obj);
d7788472 1405 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4049866f
MA
1406 i915_gem_object_put(obj);
1407 }
1408
1409 return 0;
1410
1411out_unpin:
1412 i915_gem_object_unpin_pages(obj);
1413out_put:
1414 i915_gem_object_put(obj);
1415
1416 return err;
1417}
1418
1419static int igt_ppgtt_pin_update(void *arg)
1420{
617dc761
MA
1421 struct i915_gem_context *ctx = arg;
1422 struct drm_i915_private *dev_priv = ctx->i915;
4049866f 1423 unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
617dc761 1424 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
4049866f
MA
1425 struct drm_i915_gem_object *obj;
1426 struct i915_vma *vma;
1427 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1428 int first, last;
1429 int err;
1430
1431 /*
1432 * Make sure there's no funny business when doing a PIN_UPDATE -- in the
1433 * past we had a subtle issue with being able to incorrectly do multiple
1434 * alloc va ranges on the same object when doing a PIN_UPDATE, which
1435 * resulted in some pretty nasty bugs, though only when using
1436 * huge-gtt-pages.
1437 */
1438
1439 if (!USES_FULL_48BIT_PPGTT(dev_priv)) {
1440 pr_info("48b PPGTT not supported, skipping\n");
1441 return 0;
1442 }
1443
1444 first = ilog2(I915_GTT_PAGE_SIZE_64K);
1445 last = ilog2(I915_GTT_PAGE_SIZE_2M);
1446
1447 for_each_set_bit_from(first, &supported, last + 1) {
1448 unsigned int page_size = BIT(first);
1449
1450 obj = i915_gem_object_create_internal(dev_priv, page_size);
1451 if (IS_ERR(obj))
1452 return PTR_ERR(obj);
1453
82ad6443 1454 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
4049866f
MA
1455 if (IS_ERR(vma)) {
1456 err = PTR_ERR(vma);
1457 goto out_put;
1458 }
1459
1460 err = i915_vma_pin(vma, SZ_2M, 0, flags);
1461 if (err)
1462 goto out_close;
1463
1464 if (vma->page_sizes.sg < page_size) {
1465 pr_info("Unable to allocate page-size %x, finishing test early\n",
1466 page_size);
1467 goto out_unpin;
1468 }
1469
1470 err = igt_check_page_sizes(vma);
1471 if (err)
1472 goto out_unpin;
1473
1474 if (vma->page_sizes.gtt != page_size) {
1475 dma_addr_t addr = i915_gem_object_get_dma_address(obj, 0);
1476
1477 /*
1478 * The only valid reason for this to ever fail would be
1479 * if the dma-mapper screwed us over when we did the
1480 * dma_map_sg(), since it has the final say over the dma
1481 * address.
1482 */
1483 if (IS_ALIGNED(addr, page_size)) {
1484 pr_err("page_sizes.gtt=%u, expected=%u\n",
1485 vma->page_sizes.gtt, page_size);
1486 err = -EINVAL;
1487 } else {
1488 pr_info("dma address misaligned, finishing test early\n");
1489 }
1490
1491 goto out_unpin;
1492 }
1493
1494 err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE);
1495 if (err)
1496 goto out_unpin;
1497
1498 i915_vma_unpin(vma);
1499 i915_vma_close(vma);
1500
1501 i915_gem_object_put(obj);
1502 }
1503
1504 obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
1505 if (IS_ERR(obj))
1506 return PTR_ERR(obj);
1507
82ad6443 1508 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
4049866f
MA
1509 if (IS_ERR(vma)) {
1510 err = PTR_ERR(vma);
1511 goto out_put;
1512 }
1513
1514 err = i915_vma_pin(vma, 0, 0, flags);
1515 if (err)
1516 goto out_close;
1517
1518 /*
1519 * Make sure we don't end up with something like where the pde is still
1520 * pointing to the 2M page, and the pt we just filled-in is dangling --
1521 * we can check this by writing to the first page where it would then
1522 * land in the now stale 2M page.
1523 */
1524
617dc761 1525 err = gpu_write(vma, ctx, dev_priv->engine[RCS], 0, 0xdeadbeaf);
4049866f
MA
1526 if (err)
1527 goto out_unpin;
1528
1529 err = cpu_check(obj, 0, 0xdeadbeaf);
1530
1531out_unpin:
1532 i915_vma_unpin(vma);
1533out_close:
1534 i915_vma_close(vma);
1535out_put:
1536 i915_gem_object_put(obj);
1537
1538 return err;
1539}
1540
1541static int igt_tmpfs_fallback(void *arg)
1542{
617dc761
MA
1543 struct i915_gem_context *ctx = arg;
1544 struct drm_i915_private *i915 = ctx->i915;
4049866f 1545 struct vfsmount *gemfs = i915->mm.gemfs;
82ad6443
CW
1546 struct i915_address_space *vm =
1547 ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
4049866f
MA
1548 struct drm_i915_gem_object *obj;
1549 struct i915_vma *vma;
1550 u32 *vaddr;
1551 int err = 0;
1552
1553 /*
1554 * Make sure that we don't burst into a ball of flames upon falling back
1555 * to tmpfs, which we rely on if on the off-chance we encouter a failure
1556 * when setting up gemfs.
1557 */
1558
1559 i915->mm.gemfs = NULL;
1560
1561 obj = i915_gem_object_create(i915, PAGE_SIZE);
1562 if (IS_ERR(obj)) {
1563 err = PTR_ERR(obj);
1564 goto out_restore;
1565 }
1566
1567 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1568 if (IS_ERR(vaddr)) {
1569 err = PTR_ERR(vaddr);
1570 goto out_put;
1571 }
1572 *vaddr = 0xdeadbeaf;
1573
1574 i915_gem_object_unpin_map(obj);
1575
1576 vma = i915_vma_instance(obj, vm, NULL);
1577 if (IS_ERR(vma)) {
1578 err = PTR_ERR(vma);
1579 goto out_put;
1580 }
1581
1582 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1583 if (err)
1584 goto out_close;
1585
1586 err = igt_check_page_sizes(vma);
1587
1588 i915_vma_unpin(vma);
1589out_close:
1590 i915_vma_close(vma);
1591out_put:
1592 i915_gem_object_put(obj);
1593out_restore:
1594 i915->mm.gemfs = gemfs;
1595
1596 return err;
1597}
1598
1599static int igt_shrink_thp(void *arg)
1600{
617dc761
MA
1601 struct i915_gem_context *ctx = arg;
1602 struct drm_i915_private *i915 = ctx->i915;
82ad6443
CW
1603 struct i915_address_space *vm =
1604 ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
4049866f
MA
1605 struct drm_i915_gem_object *obj;
1606 struct i915_vma *vma;
1607 unsigned int flags = PIN_USER;
1608 int err;
1609
1610 /*
1611 * Sanity check shrinking huge-paged object -- make sure nothing blows
1612 * up.
1613 */
1614
1615 if (!igt_can_allocate_thp(i915)) {
1616 pr_info("missing THP support, skipping\n");
1617 return 0;
1618 }
1619
1620 obj = i915_gem_object_create(i915, SZ_2M);
1621 if (IS_ERR(obj))
1622 return PTR_ERR(obj);
1623
1624 vma = i915_vma_instance(obj, vm, NULL);
1625 if (IS_ERR(vma)) {
1626 err = PTR_ERR(vma);
1627 goto out_put;
1628 }
1629
1630 err = i915_vma_pin(vma, 0, 0, flags);
1631 if (err)
1632 goto out_close;
1633
1634 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
1635 pr_info("failed to allocate THP, finishing test early\n");
1636 goto out_unpin;
1637 }
1638
1639 err = igt_check_page_sizes(vma);
1640 if (err)
1641 goto out_unpin;
1642
617dc761 1643 err = gpu_write(vma, ctx, i915->engine[RCS], 0, 0xdeadbeaf);
4049866f
MA
1644 if (err)
1645 goto out_unpin;
1646
1647 i915_vma_unpin(vma);
1648
1649 /*
1650 * Now that the pages are *unpinned* shrink-all should invoke
1651 * shmem to truncate our pages.
1652 */
1653 i915_gem_shrink_all(i915);
b65a9b98 1654 if (i915_gem_object_has_pages(obj)) {
4049866f
MA
1655 pr_err("shrink-all didn't truncate the pages\n");
1656 err = -EINVAL;
1657 goto out_close;
1658 }
1659
1660 if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
1661 pr_err("residual page-size bits left\n");
1662 err = -EINVAL;
1663 goto out_close;
1664 }
1665
1666 err = i915_vma_pin(vma, 0, 0, flags);
1667 if (err)
1668 goto out_close;
1669
1670 err = cpu_check(obj, 0, 0xdeadbeaf);
1671
1672out_unpin:
1673 i915_vma_unpin(vma);
1674out_close:
1675 i915_vma_close(vma);
1676out_put:
1677 i915_gem_object_put(obj);
1678
1679 return err;
1680}
1681
1682int i915_gem_huge_page_mock_selftests(void)
1683{
1684 static const struct i915_subtest tests[] = {
1685 SUBTEST(igt_mock_exhaust_device_supported_pages),
1686 SUBTEST(igt_mock_ppgtt_misaligned_dma),
1687 SUBTEST(igt_mock_ppgtt_huge_fill),
1688 SUBTEST(igt_mock_ppgtt_64K),
1689 };
1690 int saved_ppgtt = i915_modparams.enable_ppgtt;
1691 struct drm_i915_private *dev_priv;
1692 struct pci_dev *pdev;
1693 struct i915_hw_ppgtt *ppgtt;
1694 int err;
1695
1696 dev_priv = mock_gem_device();
1697 if (!dev_priv)
1698 return -ENOMEM;
1699
1700 /* Pretend to be a device which supports the 48b PPGTT */
1701 i915_modparams.enable_ppgtt = 3;
1702
1703 pdev = dev_priv->drm.pdev;
1704 dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
1705
1706 mutex_lock(&dev_priv->drm.struct_mutex);
63fd659f 1707 ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV));
4049866f
MA
1708 if (IS_ERR(ppgtt)) {
1709 err = PTR_ERR(ppgtt);
1710 goto out_unlock;
1711 }
1712
82ad6443 1713 if (!i915_vm_is_48bit(&ppgtt->vm)) {
4049866f
MA
1714 pr_err("failed to create 48b PPGTT\n");
1715 err = -EINVAL;
1716 goto out_close;
1717 }
1718
1719 /* If we were ever hit this then it's time to mock the 64K scratch */
82ad6443 1720 if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
4049866f
MA
1721 pr_err("PPGTT missing 64K scratch page\n");
1722 err = -EINVAL;
1723 goto out_close;
1724 }
1725
1726 err = i915_subtests(tests, ppgtt);
1727
1728out_close:
82ad6443 1729 i915_ppgtt_close(&ppgtt->vm);
4049866f
MA
1730 i915_ppgtt_put(ppgtt);
1731
1732out_unlock:
1733 mutex_unlock(&dev_priv->drm.struct_mutex);
1734
1735 i915_modparams.enable_ppgtt = saved_ppgtt;
1736
a24362ea 1737 drm_dev_put(&dev_priv->drm);
4049866f
MA
1738
1739 return err;
1740}
1741
1742int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
1743{
1744 static const struct i915_subtest tests[] = {
1745 SUBTEST(igt_shrink_thp),
1746 SUBTEST(igt_ppgtt_pin_update),
1747 SUBTEST(igt_tmpfs_fallback),
1748 SUBTEST(igt_ppgtt_exhaust_huge),
1749 SUBTEST(igt_ppgtt_gemfs_huge),
1750 SUBTEST(igt_ppgtt_internal_huge),
1751 };
617dc761
MA
1752 struct drm_file *file;
1753 struct i915_gem_context *ctx;
4049866f
MA
1754 int err;
1755
1756 if (!USES_PPGTT(dev_priv)) {
1757 pr_info("PPGTT not supported, skipping live-selftests\n");
1758 return 0;
1759 }
1760
921d07d7
CW
1761 if (i915_terminally_wedged(&dev_priv->gpu_error))
1762 return 0;
1763
617dc761
MA
1764 file = mock_file(dev_priv);
1765 if (IS_ERR(file))
1766 return PTR_ERR(file);
1767
4049866f 1768 mutex_lock(&dev_priv->drm.struct_mutex);
e7e5da71 1769 intel_runtime_pm_get(dev_priv);
617dc761
MA
1770
1771 ctx = live_context(dev_priv, file);
1772 if (IS_ERR(ctx)) {
1773 err = PTR_ERR(ctx);
1774 goto out_unlock;
1775 }
1776
f79401b4 1777 if (ctx->ppgtt)
82ad6443 1778 ctx->ppgtt->vm.scrub_64K = true;
f79401b4 1779
617dc761
MA
1780 err = i915_subtests(tests, ctx);
1781
1782out_unlock:
e7e5da71 1783 intel_runtime_pm_put(dev_priv);
4049866f
MA
1784 mutex_unlock(&dev_priv->drm.struct_mutex);
1785
617dc761
MA
1786 mock_file_free(dev_priv, file);
1787
4049866f
MA
1788 return err;
1789}