drm/i915: Test partial mappings
[linux-2.6-block.git] / drivers / gpu / drm / i915 / selftests / i915_gem_object.c
CommitLineData
8335fd65
CW
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "../i915_selftest.h"
26
27#include "mock_gem_device.h"
28
29static int igt_gem_object(void *arg)
30{
31 struct drm_i915_private *i915 = arg;
32 struct drm_i915_gem_object *obj;
33 int err = -ENOMEM;
34
35 /* Basic test to ensure we can create an object */
36
37 obj = i915_gem_object_create(i915, PAGE_SIZE);
38 if (IS_ERR(obj)) {
39 err = PTR_ERR(obj);
40 pr_err("i915_gem_object_create failed, err=%d\n", err);
41 goto out;
42 }
43
44 err = 0;
45 i915_gem_object_put(obj);
46out:
47 return err;
48}
49
50static int igt_phys_object(void *arg)
51{
52 struct drm_i915_private *i915 = arg;
53 struct drm_i915_gem_object *obj;
54 int err;
55
56 /* Create an object and bind it to a contiguous set of physical pages,
57 * i.e. exercise the i915_gem_object_phys API.
58 */
59
60 obj = i915_gem_object_create(i915, PAGE_SIZE);
61 if (IS_ERR(obj)) {
62 err = PTR_ERR(obj);
63 pr_err("i915_gem_object_create failed, err=%d\n", err);
64 goto out;
65 }
66
67 mutex_lock(&i915->drm.struct_mutex);
68 err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
69 mutex_unlock(&i915->drm.struct_mutex);
70 if (err) {
71 pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
72 goto out_obj;
73 }
74
75 if (obj->ops != &i915_gem_phys_ops) {
76 pr_err("i915_gem_object_attach_phys did not create a phys object\n");
77 err = -EINVAL;
78 goto out_obj;
79 }
80
81 if (!atomic_read(&obj->mm.pages_pin_count)) {
82 pr_err("i915_gem_object_attach_phys did not pin its phys pages\n");
83 err = -EINVAL;
84 goto out_obj;
85 }
86
87 /* Make the object dirty so that put_pages must do copy back the data */
88 mutex_lock(&i915->drm.struct_mutex);
89 err = i915_gem_object_set_to_gtt_domain(obj, true);
90 mutex_unlock(&i915->drm.struct_mutex);
91 if (err) {
92 pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
93 err);
94 goto out_obj;
95 }
96
97out_obj:
98 i915_gem_object_put(obj);
99out:
100 return err;
101}
102
12d30d87
CW
103static int igt_gem_huge(void *arg)
104{
105 const unsigned int nreal = 509; /* just to be awkward */
106 struct drm_i915_private *i915 = arg;
107 struct drm_i915_gem_object *obj;
108 unsigned int n;
109 int err;
110
111 /* Basic sanitycheck of our huge fake object allocation */
112
113 obj = huge_gem_object(i915,
114 nreal * PAGE_SIZE,
115 i915->ggtt.base.total + PAGE_SIZE);
116 if (IS_ERR(obj))
117 return PTR_ERR(obj);
118
119 err = i915_gem_object_pin_pages(obj);
120 if (err) {
121 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
122 nreal, obj->base.size / PAGE_SIZE, err);
123 goto out;
124 }
125
126 for (n = 0; n < obj->base.size / PAGE_SIZE; n++) {
127 if (i915_gem_object_get_page(obj, n) !=
128 i915_gem_object_get_page(obj, n % nreal)) {
129 pr_err("Page lookup mismatch at index %u [%u]\n",
130 n, n % nreal);
131 err = -EINVAL;
132 goto out_unpin;
133 }
134 }
135
136out_unpin:
137 i915_gem_object_unpin_pages(obj);
138out:
139 i915_gem_object_put(obj);
140 return err;
141}
142
48d89817
CW
143struct tile {
144 unsigned int width;
145 unsigned int height;
146 unsigned int stride;
147 unsigned int size;
148 unsigned int tiling;
149 unsigned int swizzle;
150};
151
152static u64 swizzle_bit(unsigned int bit, u64 offset)
153{
154 return (offset & BIT_ULL(bit)) >> (bit - 6);
155}
156
157static u64 tiled_offset(const struct tile *tile, u64 v)
158{
159 u64 x, y;
160
161 if (tile->tiling == I915_TILING_NONE)
162 return v;
163
164 y = div64_u64_rem(v, tile->stride, &x);
165 v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
166
167 if (tile->tiling == I915_TILING_X) {
168 v += y * tile->width;
169 v += div64_u64_rem(x, tile->width, &x) << tile->size;
170 v += x;
171 } else {
172 const unsigned int ytile_span = 16;
173 const unsigned int ytile_height = 32 * ytile_span;
174
175 v += y * ytile_span;
176 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
177 v += x;
178 }
179
180 switch (tile->swizzle) {
181 case I915_BIT_6_SWIZZLE_9:
182 v ^= swizzle_bit(9, v);
183 break;
184 case I915_BIT_6_SWIZZLE_9_10:
185 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
186 break;
187 case I915_BIT_6_SWIZZLE_9_11:
188 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
189 break;
190 case I915_BIT_6_SWIZZLE_9_10_11:
191 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
192 break;
193 }
194
195 return v;
196}
197
198static int check_partial_mapping(struct drm_i915_gem_object *obj,
199 const struct tile *tile,
200 unsigned long end_time)
201{
202 const unsigned int nreal = obj->scratch / PAGE_SIZE;
203 const unsigned long npages = obj->base.size / PAGE_SIZE;
204 struct i915_vma *vma;
205 unsigned long page;
206 int err;
207
208 if (igt_timeout(end_time,
209 "%s: timed out before tiling=%d stride=%d\n",
210 __func__, tile->tiling, tile->stride))
211 return -EINTR;
212
213 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
214 if (err)
215 return err;
216
217 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
218 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
219
220 for_each_prime_number_from(page, 1, npages) {
221 struct i915_ggtt_view view =
222 compute_partial_view(obj, page, MIN_CHUNK_PAGES);
223 u32 __iomem *io;
224 struct page *p;
225 unsigned int n;
226 u64 offset;
227 u32 *cpu;
228
229 GEM_BUG_ON(view.partial.size > nreal);
230
231 err = i915_gem_object_set_to_gtt_domain(obj, true);
232 if (err)
233 return err;
234
235 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
236 if (IS_ERR(vma)) {
237 pr_err("Failed to pin partial view: offset=%lu\n",
238 page);
239 return PTR_ERR(vma);
240 }
241
242 n = page - view.partial.offset;
243 GEM_BUG_ON(n >= view.partial.size);
244
245 io = i915_vma_pin_iomap(vma);
246 i915_vma_unpin(vma);
247 if (IS_ERR(io)) {
248 pr_err("Failed to iomap partial view: offset=%lu\n",
249 page);
250 return PTR_ERR(io);
251 }
252
253 err = i915_vma_get_fence(vma);
254 if (err) {
255 pr_err("Failed to get fence for partial view: offset=%lu\n",
256 page);
257 i915_vma_unpin_iomap(vma);
258 return err;
259 }
260
261 iowrite32(page, io + n * PAGE_SIZE/sizeof(*io));
262 i915_vma_unpin_iomap(vma);
263
264 offset = tiled_offset(tile, page << PAGE_SHIFT);
265 if (offset >= obj->base.size)
266 continue;
267
268 i915_gem_object_flush_gtt_write_domain(obj);
269
270 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
271 cpu = kmap(p) + offset_in_page(offset);
272 drm_clflush_virt_range(cpu, sizeof(*cpu));
273 if (*cpu != (u32)page) {
274 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
275 page, n,
276 view.partial.offset,
277 view.partial.size,
278 vma->size >> PAGE_SHIFT,
279 tile_row_pages(obj),
280 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
281 offset >> PAGE_SHIFT,
282 (unsigned int)offset_in_page(offset),
283 offset,
284 (u32)page, *cpu);
285 err = -EINVAL;
286 }
287 *cpu = 0;
288 drm_clflush_virt_range(cpu, sizeof(*cpu));
289 kunmap(p);
290 if (err)
291 return err;
292 }
293
294 return 0;
295}
296
297static int igt_partial_tiling(void *arg)
298{
299 const unsigned int nreal = 1 << 12; /* largest tile row x2 */
300 struct drm_i915_private *i915 = arg;
301 struct drm_i915_gem_object *obj;
302 int tiling;
303 int err;
304
305 /* We want to check the page mapping and fencing of a large object
306 * mmapped through the GTT. The object we create is larger than can
307 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
308 * We then check that a write through each partial GGTT vma ends up
309 * in the right set of pages within the object, and with the expected
310 * tiling, which we verify by manual swizzling.
311 */
312
313 obj = huge_gem_object(i915,
314 nreal << PAGE_SHIFT,
315 (1 + next_prime_number(i915->ggtt.base.total >> PAGE_SHIFT)) << PAGE_SHIFT);
316 if (IS_ERR(obj))
317 return PTR_ERR(obj);
318
319 err = i915_gem_object_pin_pages(obj);
320 if (err) {
321 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
322 nreal, obj->base.size / PAGE_SIZE, err);
323 goto out;
324 }
325
326 mutex_lock(&i915->drm.struct_mutex);
327
328 if (1) {
329 IGT_TIMEOUT(end);
330 struct tile tile;
331
332 tile.height = 1;
333 tile.width = 1;
334 tile.size = 0;
335 tile.stride = 0;
336 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
337 tile.tiling = I915_TILING_NONE;
338
339 err = check_partial_mapping(obj, &tile, end);
340 if (err && err != -EINTR)
341 goto out_unlock;
342 }
343
344 for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
345 IGT_TIMEOUT(end);
346 unsigned int max_pitch;
347 unsigned int pitch;
348 struct tile tile;
349
350 tile.tiling = tiling;
351 switch (tiling) {
352 case I915_TILING_X:
353 tile.swizzle = i915->mm.bit_6_swizzle_x;
354 break;
355 case I915_TILING_Y:
356 tile.swizzle = i915->mm.bit_6_swizzle_y;
357 break;
358 }
359
360 if (tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN ||
361 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
362 continue;
363
364 if (INTEL_GEN(i915) <= 2) {
365 tile.height = 16;
366 tile.width = 128;
367 tile.size = 11;
368 } else if (tile.tiling == I915_TILING_Y &&
369 HAS_128_BYTE_Y_TILING(i915)) {
370 tile.height = 32;
371 tile.width = 128;
372 tile.size = 12;
373 } else {
374 tile.height = 8;
375 tile.width = 512;
376 tile.size = 12;
377 }
378
379 if (INTEL_GEN(i915) < 4)
380 max_pitch = 8192 / tile.width;
381 else if (INTEL_GEN(i915) < 7)
382 max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width;
383 else
384 max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width;
385
386 for (pitch = max_pitch; pitch; pitch >>= 1) {
387 tile.stride = tile.width * pitch;
388 err = check_partial_mapping(obj, &tile, end);
389 if (err == -EINTR)
390 goto next_tiling;
391 if (err)
392 goto out_unlock;
393
394 if (pitch > 2 && INTEL_GEN(i915) >= 4) {
395 tile.stride = tile.width * (pitch - 1);
396 err = check_partial_mapping(obj, &tile, end);
397 if (err == -EINTR)
398 goto next_tiling;
399 if (err)
400 goto out_unlock;
401 }
402
403 if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
404 tile.stride = tile.width * (pitch + 1);
405 err = check_partial_mapping(obj, &tile, end);
406 if (err == -EINTR)
407 goto next_tiling;
408 if (err)
409 goto out_unlock;
410 }
411 }
412
413 if (INTEL_GEN(i915) >= 4) {
414 for_each_prime_number(pitch, max_pitch) {
415 tile.stride = tile.width * pitch;
416 err = check_partial_mapping(obj, &tile, end);
417 if (err == -EINTR)
418 goto next_tiling;
419 if (err)
420 goto out_unlock;
421 }
422 }
423
424next_tiling: ;
425 }
426
427out_unlock:
428 mutex_unlock(&i915->drm.struct_mutex);
429 i915_gem_object_unpin_pages(obj);
430out:
431 i915_gem_object_put(obj);
432 return err;
433}
434
8335fd65
CW
435int i915_gem_object_mock_selftests(void)
436{
437 static const struct i915_subtest tests[] = {
438 SUBTEST(igt_gem_object),
439 SUBTEST(igt_phys_object),
440 };
441 struct drm_i915_private *i915;
442 int err;
443
444 i915 = mock_gem_device();
445 if (!i915)
446 return -ENOMEM;
447
448 err = i915_subtests(tests, i915);
449
450 drm_dev_unref(&i915->drm);
451 return err;
452}
12d30d87
CW
453
454int i915_gem_object_live_selftests(struct drm_i915_private *i915)
455{
456 static const struct i915_subtest tests[] = {
457 SUBTEST(igt_gem_huge),
48d89817 458 SUBTEST(igt_partial_tiling),
12d30d87
CW
459 };
460
461 return i915_subtests(tests, i915);
462}