2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "../i915_selftest.h"
27 #include "mock_gem_device.h"
28 #include "huge_gem_object.h"
30 static int igt_gem_object(void *arg)
32 struct drm_i915_private *i915 = arg;
33 struct drm_i915_gem_object *obj;
36 /* Basic test to ensure we can create an object */
38 obj = i915_gem_object_create(i915, PAGE_SIZE);
41 pr_err("i915_gem_object_create failed, err=%d\n", err);
46 i915_gem_object_put(obj);
51 static int igt_phys_object(void *arg)
53 struct drm_i915_private *i915 = arg;
54 struct drm_i915_gem_object *obj;
57 /* Create an object and bind it to a contiguous set of physical pages,
58 * i.e. exercise the i915_gem_object_phys API.
61 obj = i915_gem_object_create(i915, PAGE_SIZE);
64 pr_err("i915_gem_object_create failed, err=%d\n", err);
68 mutex_lock(&i915->drm.struct_mutex);
69 err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
70 mutex_unlock(&i915->drm.struct_mutex);
72 pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
76 if (obj->ops != &i915_gem_phys_ops) {
77 pr_err("i915_gem_object_attach_phys did not create a phys object\n");
82 if (!atomic_read(&obj->mm.pages_pin_count)) {
83 pr_err("i915_gem_object_attach_phys did not pin its phys pages\n");
88 /* Make the object dirty so that put_pages must do copy back the data */
89 mutex_lock(&i915->drm.struct_mutex);
90 err = i915_gem_object_set_to_gtt_domain(obj, true);
91 mutex_unlock(&i915->drm.struct_mutex);
93 pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
99 i915_gem_object_put(obj);
104 static int igt_gem_huge(void *arg)
106 const unsigned int nreal = 509; /* just to be awkward */
107 struct drm_i915_private *i915 = arg;
108 struct drm_i915_gem_object *obj;
112 /* Basic sanitycheck of our huge fake object allocation */
114 obj = huge_gem_object(i915,
116 i915->ggtt.base.total + PAGE_SIZE);
120 err = i915_gem_object_pin_pages(obj);
122 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
123 nreal, obj->base.size / PAGE_SIZE, err);
127 for (n = 0; n < obj->base.size / PAGE_SIZE; n++) {
128 if (i915_gem_object_get_page(obj, n) !=
129 i915_gem_object_get_page(obj, n % nreal)) {
130 pr_err("Page lookup mismatch at index %u [%u]\n",
138 i915_gem_object_unpin_pages(obj);
140 i915_gem_object_put(obj);
150 unsigned int swizzle;
153 static u64 swizzle_bit(unsigned int bit, u64 offset)
155 return (offset & BIT_ULL(bit)) >> (bit - 6);
158 static u64 tiled_offset(const struct tile *tile, u64 v)
162 if (tile->tiling == I915_TILING_NONE)
165 y = div64_u64_rem(v, tile->stride, &x);
166 v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
168 if (tile->tiling == I915_TILING_X) {
169 v += y * tile->width;
170 v += div64_u64_rem(x, tile->width, &x) << tile->size;
173 const unsigned int ytile_span = 16;
174 const unsigned int ytile_height = 32 * ytile_span;
177 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
181 switch (tile->swizzle) {
182 case I915_BIT_6_SWIZZLE_9:
183 v ^= swizzle_bit(9, v);
185 case I915_BIT_6_SWIZZLE_9_10:
186 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
188 case I915_BIT_6_SWIZZLE_9_11:
189 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
191 case I915_BIT_6_SWIZZLE_9_10_11:
192 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
199 static int check_partial_mapping(struct drm_i915_gem_object *obj,
200 const struct tile *tile,
201 unsigned long end_time)
203 const unsigned int nreal = obj->scratch / PAGE_SIZE;
204 const unsigned long npages = obj->base.size / PAGE_SIZE;
205 struct i915_vma *vma;
209 if (igt_timeout(end_time,
210 "%s: timed out before tiling=%d stride=%d\n",
211 __func__, tile->tiling, tile->stride))
214 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
218 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
219 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
221 for_each_prime_number_from(page, 1, npages) {
222 struct i915_ggtt_view view =
223 compute_partial_view(obj, page, MIN_CHUNK_PAGES);
230 GEM_BUG_ON(view.partial.size > nreal);
232 err = i915_gem_object_set_to_gtt_domain(obj, true);
236 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
238 pr_err("Failed to pin partial view: offset=%lu\n",
243 n = page - view.partial.offset;
244 GEM_BUG_ON(n >= view.partial.size);
246 io = i915_vma_pin_iomap(vma);
249 pr_err("Failed to iomap partial view: offset=%lu\n",
254 err = i915_vma_get_fence(vma);
256 pr_err("Failed to get fence for partial view: offset=%lu\n",
258 i915_vma_unpin_iomap(vma);
262 iowrite32(page, io + n * PAGE_SIZE/sizeof(*io));
263 i915_vma_unpin_iomap(vma);
265 offset = tiled_offset(tile, page << PAGE_SHIFT);
266 if (offset >= obj->base.size)
269 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
271 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
272 cpu = kmap(p) + offset_in_page(offset);
273 drm_clflush_virt_range(cpu, sizeof(*cpu));
274 if (*cpu != (u32)page) {
275 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
279 vma->size >> PAGE_SHIFT,
281 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
282 offset >> PAGE_SHIFT,
283 (unsigned int)offset_in_page(offset),
289 drm_clflush_virt_range(cpu, sizeof(*cpu));
298 static int igt_partial_tiling(void *arg)
300 const unsigned int nreal = 1 << 12; /* largest tile row x2 */
301 struct drm_i915_private *i915 = arg;
302 struct drm_i915_gem_object *obj;
306 /* We want to check the page mapping and fencing of a large object
307 * mmapped through the GTT. The object we create is larger than can
308 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
309 * We then check that a write through each partial GGTT vma ends up
310 * in the right set of pages within the object, and with the expected
311 * tiling, which we verify by manual swizzling.
314 obj = huge_gem_object(i915,
316 (1 + next_prime_number(i915->ggtt.base.total >> PAGE_SHIFT)) << PAGE_SHIFT);
320 err = i915_gem_object_pin_pages(obj);
322 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
323 nreal, obj->base.size / PAGE_SIZE, err);
327 mutex_lock(&i915->drm.struct_mutex);
337 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
338 tile.tiling = I915_TILING_NONE;
340 err = check_partial_mapping(obj, &tile, end);
341 if (err && err != -EINTR)
345 for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
347 unsigned int max_pitch;
351 tile.tiling = tiling;
354 tile.swizzle = i915->mm.bit_6_swizzle_x;
357 tile.swizzle = i915->mm.bit_6_swizzle_y;
361 if (tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN ||
362 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
365 if (INTEL_GEN(i915) <= 2) {
369 } else if (tile.tiling == I915_TILING_Y &&
370 HAS_128_BYTE_Y_TILING(i915)) {
380 if (INTEL_GEN(i915) < 4)
381 max_pitch = 8192 / tile.width;
382 else if (INTEL_GEN(i915) < 7)
383 max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width;
385 max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width;
387 for (pitch = max_pitch; pitch; pitch >>= 1) {
388 tile.stride = tile.width * pitch;
389 err = check_partial_mapping(obj, &tile, end);
395 if (pitch > 2 && INTEL_GEN(i915) >= 4) {
396 tile.stride = tile.width * (pitch - 1);
397 err = check_partial_mapping(obj, &tile, end);
404 if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
405 tile.stride = tile.width * (pitch + 1);
406 err = check_partial_mapping(obj, &tile, end);
414 if (INTEL_GEN(i915) >= 4) {
415 for_each_prime_number(pitch, max_pitch) {
416 tile.stride = tile.width * pitch;
417 err = check_partial_mapping(obj, &tile, end);
429 mutex_unlock(&i915->drm.struct_mutex);
430 i915_gem_object_unpin_pages(obj);
432 i915_gem_object_put(obj);
436 static int make_obj_busy(struct drm_i915_gem_object *obj)
438 struct drm_i915_private *i915 = to_i915(obj->base.dev);
439 struct drm_i915_gem_request *rq;
440 struct i915_vma *vma;
443 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
447 err = i915_vma_pin(vma, 0, 0, PIN_USER);
451 rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
457 i915_vma_move_to_active(vma, rq, 0);
458 i915_add_request(rq);
460 i915_gem_object_set_active_reference(obj);
465 static bool assert_mmap_offset(struct drm_i915_private *i915,
469 struct drm_i915_gem_object *obj;
472 obj = i915_gem_object_create_internal(i915, size);
476 err = i915_gem_object_create_mmap_offset(obj);
477 i915_gem_object_put(obj);
479 return err == expected;
482 static int igt_mmap_offset_exhaustion(void *arg)
484 struct drm_i915_private *i915 = arg;
485 struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
486 struct drm_i915_gem_object *obj;
487 struct drm_mm_node resv, *hole;
488 u64 hole_start, hole_end;
491 /* Trim the device mmap space to only a page */
492 memset(&resv, 0, sizeof(resv));
493 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
494 resv.start = hole_start;
495 resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */
496 err = drm_mm_reserve_node(mm, &resv);
498 pr_err("Failed to trim VMA manager, err=%d\n", err);
505 if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
506 pr_err("Unable to insert object into single page hole\n");
512 if (!assert_mmap_offset(i915, 2*PAGE_SIZE, -ENOSPC)) {
513 pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
518 /* Fill the hole, further allocation attempts should then fail */
519 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
525 err = i915_gem_object_create_mmap_offset(obj);
527 pr_err("Unable to insert object into reclaimed hole\n");
531 if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
532 pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
537 i915_gem_object_put(obj);
539 /* Now fill with busy dead objects that we expect to reap */
540 for (loop = 0; loop < 3; loop++) {
541 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
547 mutex_lock(&i915->drm.struct_mutex);
548 intel_runtime_pm_get(i915);
549 err = make_obj_busy(obj);
550 intel_runtime_pm_put(i915);
551 mutex_unlock(&i915->drm.struct_mutex);
553 pr_err("[loop %d] Failed to busy the object\n", loop);
557 GEM_BUG_ON(!i915_gem_object_is_active(obj));
558 err = i915_gem_object_create_mmap_offset(obj);
560 pr_err("[loop %d] i915_gem_object_create_mmap_offset failed with err=%d\n",
567 drm_mm_remove_node(&resv);
570 i915_gem_object_put(obj);
574 int i915_gem_object_mock_selftests(void)
576 static const struct i915_subtest tests[] = {
577 SUBTEST(igt_gem_object),
578 SUBTEST(igt_phys_object),
580 struct drm_i915_private *i915;
583 i915 = mock_gem_device();
587 err = i915_subtests(tests, i915);
589 drm_dev_unref(&i915->drm);
593 int i915_gem_object_live_selftests(struct drm_i915_private *i915)
595 static const struct i915_subtest tests[] = {
596 SUBTEST(igt_gem_huge),
597 SUBTEST(igt_partial_tiling),
598 SUBTEST(igt_mmap_offset_exhaustion),
601 return i915_subtests(tests, i915);