Commit | Line | Data |
---|---|---|
1c42819a CW |
1 | /* |
2 | * Copyright © 2016 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
62c981cf | 25 | #include <linux/list_sort.h> |
8d28ba45 CW |
26 | #include <linux/prime_numbers.h> |
27 | ||
1c42819a | 28 | #include "../i915_selftest.h" |
5c3bff48 | 29 | #include "i915_random.h" |
1c42819a | 30 | |
210e8ac4 | 31 | #include "mock_context.h" |
8d28ba45 | 32 | #include "mock_drm.h" |
e619cd0d | 33 | #include "mock_gem_device.h" |
8d28ba45 | 34 | |
38b7fb0b CW |
35 | static void cleanup_freed_objects(struct drm_i915_private *i915) |
36 | { | |
37 | /* | |
38 | * As we may hold onto the struct_mutex for inordinate lengths of | |
39 | * time, the NMI khungtaskd detector may fire for the free objects | |
40 | * worker. | |
41 | */ | |
42 | mutex_unlock(&i915->drm.struct_mutex); | |
43 | ||
44 | i915_gem_drain_freed_objects(i915); | |
45 | ||
46 | mutex_lock(&i915->drm.struct_mutex); | |
47 | } | |
48 | ||
8d28ba45 CW |
49 | static void fake_free_pages(struct drm_i915_gem_object *obj, |
50 | struct sg_table *pages) | |
51 | { | |
52 | sg_free_table(pages); | |
53 | kfree(pages); | |
54 | } | |
55 | ||
b91b09ee | 56 | static int fake_get_pages(struct drm_i915_gem_object *obj) |
8d28ba45 CW |
57 | { |
58 | #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY) | |
59 | #define PFN_BIAS 0x1000 | |
60 | struct sg_table *pages; | |
61 | struct scatterlist *sg; | |
84e8978e | 62 | unsigned int sg_page_sizes; |
8d28ba45 CW |
63 | typeof(obj->base.size) rem; |
64 | ||
65 | pages = kmalloc(sizeof(*pages), GFP); | |
66 | if (!pages) | |
b91b09ee | 67 | return -ENOMEM; |
8d28ba45 CW |
68 | |
69 | rem = round_up(obj->base.size, BIT(31)) >> 31; | |
70 | if (sg_alloc_table(pages, rem, GFP)) { | |
71 | kfree(pages); | |
b91b09ee | 72 | return -ENOMEM; |
8d28ba45 CW |
73 | } |
74 | ||
84e8978e | 75 | sg_page_sizes = 0; |
8d28ba45 CW |
76 | rem = obj->base.size; |
77 | for (sg = pages->sgl; sg; sg = sg_next(sg)) { | |
78 | unsigned long len = min_t(typeof(rem), rem, BIT(31)); | |
79 | ||
b3bb8288 | 80 | GEM_BUG_ON(!len); |
8d28ba45 CW |
81 | sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0); |
82 | sg_dma_address(sg) = page_to_phys(sg_page(sg)); | |
83 | sg_dma_len(sg) = len; | |
84e8978e | 84 | sg_page_sizes |= len; |
8d28ba45 CW |
85 | |
86 | rem -= len; | |
87 | } | |
b3bb8288 | 88 | GEM_BUG_ON(rem); |
8d28ba45 CW |
89 | |
90 | obj->mm.madv = I915_MADV_DONTNEED; | |
b91b09ee | 91 | |
84e8978e | 92 | __i915_gem_object_set_pages(obj, pages, sg_page_sizes); |
b91b09ee MA |
93 | |
94 | return 0; | |
8d28ba45 CW |
95 | #undef GFP |
96 | } | |
97 | ||
98 | static void fake_put_pages(struct drm_i915_gem_object *obj, | |
99 | struct sg_table *pages) | |
100 | { | |
101 | fake_free_pages(obj, pages); | |
102 | obj->mm.dirty = false; | |
103 | obj->mm.madv = I915_MADV_WILLNEED; | |
104 | } | |
105 | ||
106 | static const struct drm_i915_gem_object_ops fake_ops = { | |
107 | .flags = I915_GEM_OBJECT_IS_SHRINKABLE, | |
108 | .get_pages = fake_get_pages, | |
109 | .put_pages = fake_put_pages, | |
110 | }; | |
111 | ||
112 | static struct drm_i915_gem_object * | |
113 | fake_dma_object(struct drm_i915_private *i915, u64 size) | |
114 | { | |
115 | struct drm_i915_gem_object *obj; | |
116 | ||
117 | GEM_BUG_ON(!size); | |
118 | GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); | |
119 | ||
120 | if (overflows_type(size, obj->base.size)) | |
121 | return ERR_PTR(-E2BIG); | |
122 | ||
13f1bfd3 | 123 | obj = i915_gem_object_alloc(); |
8d28ba45 | 124 | if (!obj) |
a5dd8f5a | 125 | goto err; |
8d28ba45 CW |
126 | |
127 | drm_gem_private_object_init(&i915->drm, &obj->base, size); | |
128 | i915_gem_object_init(obj, &fake_ops); | |
129 | ||
c0a51fd0 CK |
130 | obj->write_domain = I915_GEM_DOMAIN_CPU; |
131 | obj->read_domains = I915_GEM_DOMAIN_CPU; | |
8d28ba45 CW |
132 | obj->cache_level = I915_CACHE_NONE; |
133 | ||
134 | /* Preallocate the "backing storage" */ | |
135 | if (i915_gem_object_pin_pages(obj)) | |
a5dd8f5a | 136 | goto err_obj; |
8d28ba45 CW |
137 | |
138 | i915_gem_object_unpin_pages(obj); | |
139 | return obj; | |
a5dd8f5a MA |
140 | |
141 | err_obj: | |
142 | i915_gem_object_put(obj); | |
143 | err: | |
144 | return ERR_PTR(-ENOMEM); | |
8d28ba45 CW |
145 | } |
146 | ||
1c42819a CW |
147 | static int igt_ppgtt_alloc(void *arg) |
148 | { | |
149 | struct drm_i915_private *dev_priv = arg; | |
150 | struct i915_hw_ppgtt *ppgtt; | |
207b7000 | 151 | u64 size, last, limit; |
35ac40d8 | 152 | int err = 0; |
1c42819a CW |
153 | |
154 | /* Allocate a ppggt and try to fill the entire range */ | |
155 | ||
4bdafb9d | 156 | if (!HAS_PPGTT(dev_priv)) |
1c42819a CW |
157 | return 0; |
158 | ||
17f297b4 | 159 | ppgtt = __hw_ppgtt_create(dev_priv); |
1f6f0023 CW |
160 | if (IS_ERR(ppgtt)) |
161 | return PTR_ERR(ppgtt); | |
1c42819a | 162 | |
35ac40d8 | 163 | if (!ppgtt->vm.allocate_va_range) |
1c42819a CW |
164 | goto err_ppgtt_cleanup; |
165 | ||
207b7000 CW |
166 | /* |
167 | * While we only allocate the page tables here and so we could | |
168 | * address a much larger GTT than we could actually fit into | |
169 | * RAM, a practical limit is the amount of physical pages in the system. | |
170 | * This should ensure that we do not run into the oomkiller during | |
171 | * the test and take down the machine wilfully. | |
172 | */ | |
ca79b0c2 | 173 | limit = totalram_pages() << PAGE_SHIFT; |
207b7000 CW |
174 | limit = min(ppgtt->vm.total, limit); |
175 | ||
1c42819a | 176 | /* Check we can allocate the entire range */ |
207b7000 | 177 | for (size = 4096; size <= limit; size <<= 2) { |
82ad6443 | 178 | err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size); |
1c42819a CW |
179 | if (err) { |
180 | if (err == -ENOMEM) { | |
181 | pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n", | |
182 | size, ilog2(size)); | |
183 | err = 0; /* virtual space too large! */ | |
184 | } | |
185 | goto err_ppgtt_cleanup; | |
186 | } | |
187 | ||
1f6f0023 CW |
188 | cond_resched(); |
189 | ||
82ad6443 | 190 | ppgtt->vm.clear_range(&ppgtt->vm, 0, size); |
1c42819a CW |
191 | } |
192 | ||
193 | /* Check we can incrementally allocate the entire range */ | |
207b7000 | 194 | for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) { |
82ad6443 CW |
195 | err = ppgtt->vm.allocate_va_range(&ppgtt->vm, |
196 | last, size - last); | |
1c42819a CW |
197 | if (err) { |
198 | if (err == -ENOMEM) { | |
199 | pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n", | |
200 | last, size - last, ilog2(size)); | |
201 | err = 0; /* virtual space too large! */ | |
202 | } | |
203 | goto err_ppgtt_cleanup; | |
204 | } | |
1f6f0023 CW |
205 | |
206 | cond_resched(); | |
1c42819a CW |
207 | } |
208 | ||
209 | err_ppgtt_cleanup: | |
1f6f0023 | 210 | mutex_lock(&dev_priv->drm.struct_mutex); |
eae4c944 | 211 | i915_ppgtt_put(ppgtt); |
17f297b4 | 212 | mutex_unlock(&dev_priv->drm.struct_mutex); |
1c42819a CW |
213 | return err; |
214 | } | |
215 | ||
4a6f13fc CW |
216 | static int lowlevel_hole(struct drm_i915_private *i915, |
217 | struct i915_address_space *vm, | |
218 | u64 hole_start, u64 hole_end, | |
219 | unsigned long end_time) | |
220 | { | |
221 | I915_RND_STATE(seed_prng); | |
222 | unsigned int size; | |
4a234c5f MA |
223 | struct i915_vma mock_vma; |
224 | ||
225 | memset(&mock_vma, 0, sizeof(struct i915_vma)); | |
4a6f13fc CW |
226 | |
227 | /* Keep creating larger objects until one cannot fit into the hole */ | |
228 | for (size = 12; (hole_end - hole_start) >> size; size++) { | |
229 | I915_RND_SUBSTATE(prng, seed_prng); | |
230 | struct drm_i915_gem_object *obj; | |
231 | unsigned int *order, count, n; | |
232 | u64 hole_size; | |
233 | ||
234 | hole_size = (hole_end - hole_start) >> size; | |
235 | if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32)) | |
236 | hole_size = KMALLOC_MAX_SIZE / sizeof(u32); | |
223c73a3 CW |
237 | count = hole_size >> 1; |
238 | if (!count) { | |
239 | pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n", | |
240 | __func__, hole_start, hole_end, size, hole_size); | |
241 | break; | |
242 | } | |
243 | ||
4a6f13fc | 244 | do { |
4a6f13fc | 245 | order = i915_random_order(count, &prng); |
223c73a3 CW |
246 | if (order) |
247 | break; | |
248 | } while (count >>= 1); | |
249 | if (!count) | |
250 | return -ENOMEM; | |
251 | GEM_BUG_ON(!order); | |
4a6f13fc CW |
252 | |
253 | GEM_BUG_ON(count * BIT_ULL(size) > vm->total); | |
254 | GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end); | |
255 | ||
256 | /* Ignore allocation failures (i.e. don't report them as | |
257 | * a test failure) as we are purposefully allocating very | |
258 | * large objects without checking that we have sufficient | |
259 | * memory. We expect to hit -ENOMEM. | |
260 | */ | |
261 | ||
262 | obj = fake_dma_object(i915, BIT_ULL(size)); | |
263 | if (IS_ERR(obj)) { | |
264 | kfree(order); | |
265 | break; | |
266 | } | |
267 | ||
268 | GEM_BUG_ON(obj->base.size != BIT_ULL(size)); | |
269 | ||
270 | if (i915_gem_object_pin_pages(obj)) { | |
271 | i915_gem_object_put(obj); | |
272 | kfree(order); | |
273 | break; | |
274 | } | |
275 | ||
276 | for (n = 0; n < count; n++) { | |
277 | u64 addr = hole_start + order[n] * BIT_ULL(size); | |
c9d08cc3 | 278 | intel_wakeref_t wakeref; |
4a6f13fc CW |
279 | |
280 | GEM_BUG_ON(addr + BIT_ULL(size) > vm->total); | |
281 | ||
91e32157 CW |
282 | if (igt_timeout(end_time, |
283 | "%s timed out before %d/%d\n", | |
284 | __func__, n, count)) { | |
285 | hole_end = hole_start; /* quit */ | |
286 | break; | |
287 | } | |
288 | ||
4a6f13fc CW |
289 | if (vm->allocate_va_range && |
290 | vm->allocate_va_range(vm, addr, BIT_ULL(size))) | |
291 | break; | |
292 | ||
4a234c5f MA |
293 | mock_vma.pages = obj->mm.pages; |
294 | mock_vma.node.size = BIT_ULL(size); | |
295 | mock_vma.node.start = addr; | |
296 | ||
c9d08cc3 | 297 | wakeref = intel_runtime_pm_get(i915); |
4a234c5f | 298 | vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0); |
c9d08cc3 | 299 | intel_runtime_pm_put(i915, wakeref); |
4a6f13fc CW |
300 | } |
301 | count = n; | |
302 | ||
303 | i915_random_reorder(order, count, &prng); | |
304 | for (n = 0; n < count; n++) { | |
305 | u64 addr = hole_start + order[n] * BIT_ULL(size); | |
306 | ||
307 | GEM_BUG_ON(addr + BIT_ULL(size) > vm->total); | |
308 | vm->clear_range(vm, addr, BIT_ULL(size)); | |
309 | } | |
310 | ||
311 | i915_gem_object_unpin_pages(obj); | |
312 | i915_gem_object_put(obj); | |
313 | ||
314 | kfree(order); | |
38b7fb0b CW |
315 | |
316 | cleanup_freed_objects(i915); | |
4a6f13fc CW |
317 | } |
318 | ||
319 | return 0; | |
320 | } | |
321 | ||
8d28ba45 CW |
322 | static void close_object_list(struct list_head *objects, |
323 | struct i915_address_space *vm) | |
324 | { | |
325 | struct drm_i915_gem_object *obj, *on; | |
aae4a3d8 | 326 | int ignored; |
8d28ba45 CW |
327 | |
328 | list_for_each_entry_safe(obj, on, objects, st_link) { | |
329 | struct i915_vma *vma; | |
330 | ||
331 | vma = i915_vma_instance(obj, vm, NULL); | |
aae4a3d8 CW |
332 | if (!IS_ERR(vma)) |
333 | ignored = i915_vma_unbind(vma); | |
62c981cf CW |
334 | /* Only ppgtt vma may be closed before the object is freed */ |
335 | if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma)) | |
8d28ba45 CW |
336 | i915_vma_close(vma); |
337 | ||
338 | list_del(&obj->st_link); | |
339 | i915_gem_object_put(obj); | |
340 | } | |
341 | } | |
342 | ||
343 | static int fill_hole(struct drm_i915_private *i915, | |
344 | struct i915_address_space *vm, | |
345 | u64 hole_start, u64 hole_end, | |
346 | unsigned long end_time) | |
347 | { | |
348 | const u64 hole_size = hole_end - hole_start; | |
349 | struct drm_i915_gem_object *obj; | |
350 | const unsigned long max_pages = | |
351 | min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT); | |
352 | const unsigned long max_step = max(int_sqrt(max_pages), 2UL); | |
353 | unsigned long npages, prime, flags; | |
354 | struct i915_vma *vma; | |
355 | LIST_HEAD(objects); | |
356 | int err; | |
357 | ||
358 | /* Try binding many VMA working inwards from either edge */ | |
359 | ||
360 | flags = PIN_OFFSET_FIXED | PIN_USER; | |
361 | if (i915_is_ggtt(vm)) | |
362 | flags |= PIN_GLOBAL; | |
363 | ||
364 | for_each_prime_number_from(prime, 2, max_step) { | |
365 | for (npages = 1; npages <= max_pages; npages *= prime) { | |
366 | const u64 full_size = npages << PAGE_SHIFT; | |
367 | const struct { | |
368 | const char *name; | |
369 | u64 offset; | |
370 | int step; | |
371 | } phases[] = { | |
372 | { "top-down", hole_end, -1, }, | |
373 | { "bottom-up", hole_start, 1, }, | |
374 | { } | |
375 | }, *p; | |
376 | ||
377 | obj = fake_dma_object(i915, full_size); | |
378 | if (IS_ERR(obj)) | |
379 | break; | |
380 | ||
381 | list_add(&obj->st_link, &objects); | |
382 | ||
383 | /* Align differing sized objects against the edges, and | |
384 | * check we don't walk off into the void when binding | |
385 | * them into the GTT. | |
386 | */ | |
387 | for (p = phases; p->name; p++) { | |
388 | u64 offset; | |
389 | ||
390 | offset = p->offset; | |
391 | list_for_each_entry(obj, &objects, st_link) { | |
392 | vma = i915_vma_instance(obj, vm, NULL); | |
393 | if (IS_ERR(vma)) | |
394 | continue; | |
395 | ||
396 | if (p->step < 0) { | |
397 | if (offset < hole_start + obj->base.size) | |
398 | break; | |
399 | offset -= obj->base.size; | |
400 | } | |
401 | ||
402 | err = i915_vma_pin(vma, 0, 0, offset | flags); | |
403 | if (err) { | |
404 | pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n", | |
405 | __func__, p->name, err, npages, prime, offset); | |
406 | goto err; | |
407 | } | |
408 | ||
409 | if (!drm_mm_node_allocated(&vma->node) || | |
410 | i915_vma_misplaced(vma, 0, 0, offset | flags)) { | |
411 | pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n", | |
412 | __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), | |
413 | offset); | |
414 | err = -EINVAL; | |
415 | goto err; | |
416 | } | |
417 | ||
418 | i915_vma_unpin(vma); | |
419 | ||
420 | if (p->step > 0) { | |
421 | if (offset + obj->base.size > hole_end) | |
422 | break; | |
423 | offset += obj->base.size; | |
424 | } | |
425 | } | |
426 | ||
427 | offset = p->offset; | |
428 | list_for_each_entry(obj, &objects, st_link) { | |
429 | vma = i915_vma_instance(obj, vm, NULL); | |
430 | if (IS_ERR(vma)) | |
431 | continue; | |
432 | ||
433 | if (p->step < 0) { | |
434 | if (offset < hole_start + obj->base.size) | |
435 | break; | |
436 | offset -= obj->base.size; | |
437 | } | |
438 | ||
439 | if (!drm_mm_node_allocated(&vma->node) || | |
440 | i915_vma_misplaced(vma, 0, 0, offset | flags)) { | |
441 | pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n", | |
442 | __func__, p->name, vma->node.start, vma->node.size, | |
443 | offset); | |
444 | err = -EINVAL; | |
445 | goto err; | |
446 | } | |
447 | ||
448 | err = i915_vma_unbind(vma); | |
449 | if (err) { | |
450 | pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n", | |
451 | __func__, p->name, vma->node.start, vma->node.size, | |
452 | err); | |
453 | goto err; | |
454 | } | |
455 | ||
456 | if (p->step > 0) { | |
457 | if (offset + obj->base.size > hole_end) | |
458 | break; | |
459 | offset += obj->base.size; | |
460 | } | |
461 | } | |
462 | ||
463 | offset = p->offset; | |
464 | list_for_each_entry_reverse(obj, &objects, st_link) { | |
465 | vma = i915_vma_instance(obj, vm, NULL); | |
466 | if (IS_ERR(vma)) | |
467 | continue; | |
468 | ||
469 | if (p->step < 0) { | |
470 | if (offset < hole_start + obj->base.size) | |
471 | break; | |
472 | offset -= obj->base.size; | |
473 | } | |
474 | ||
475 | err = i915_vma_pin(vma, 0, 0, offset | flags); | |
476 | if (err) { | |
477 | pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n", | |
478 | __func__, p->name, err, npages, prime, offset); | |
479 | goto err; | |
480 | } | |
481 | ||
482 | if (!drm_mm_node_allocated(&vma->node) || | |
483 | i915_vma_misplaced(vma, 0, 0, offset | flags)) { | |
484 | pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n", | |
485 | __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), | |
486 | offset); | |
487 | err = -EINVAL; | |
488 | goto err; | |
489 | } | |
490 | ||
491 | i915_vma_unpin(vma); | |
492 | ||
493 | if (p->step > 0) { | |
494 | if (offset + obj->base.size > hole_end) | |
495 | break; | |
496 | offset += obj->base.size; | |
497 | } | |
498 | } | |
499 | ||
500 | offset = p->offset; | |
501 | list_for_each_entry_reverse(obj, &objects, st_link) { | |
502 | vma = i915_vma_instance(obj, vm, NULL); | |
503 | if (IS_ERR(vma)) | |
504 | continue; | |
505 | ||
506 | if (p->step < 0) { | |
507 | if (offset < hole_start + obj->base.size) | |
508 | break; | |
509 | offset -= obj->base.size; | |
510 | } | |
511 | ||
512 | if (!drm_mm_node_allocated(&vma->node) || | |
513 | i915_vma_misplaced(vma, 0, 0, offset | flags)) { | |
514 | pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n", | |
515 | __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node), | |
516 | offset); | |
517 | err = -EINVAL; | |
518 | goto err; | |
519 | } | |
520 | ||
521 | err = i915_vma_unbind(vma); | |
522 | if (err) { | |
523 | pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n", | |
524 | __func__, p->name, vma->node.start, vma->node.size, | |
525 | err); | |
526 | goto err; | |
527 | } | |
528 | ||
529 | if (p->step > 0) { | |
530 | if (offset + obj->base.size > hole_end) | |
531 | break; | |
532 | offset += obj->base.size; | |
533 | } | |
534 | } | |
535 | } | |
536 | ||
537 | if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n", | |
538 | __func__, npages, prime)) { | |
539 | err = -EINTR; | |
540 | goto err; | |
541 | } | |
542 | } | |
543 | ||
544 | close_object_list(&objects, vm); | |
38b7fb0b | 545 | cleanup_freed_objects(i915); |
8d28ba45 CW |
546 | } |
547 | ||
548 | return 0; | |
549 | ||
550 | err: | |
551 | close_object_list(&objects, vm); | |
552 | return err; | |
553 | } | |
554 | ||
6e32ab3d CW |
555 | static int walk_hole(struct drm_i915_private *i915, |
556 | struct i915_address_space *vm, | |
557 | u64 hole_start, u64 hole_end, | |
558 | unsigned long end_time) | |
559 | { | |
560 | const u64 hole_size = hole_end - hole_start; | |
561 | const unsigned long max_pages = | |
562 | min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT); | |
563 | unsigned long flags; | |
564 | u64 size; | |
565 | ||
566 | /* Try binding a single VMA in different positions within the hole */ | |
567 | ||
568 | flags = PIN_OFFSET_FIXED | PIN_USER; | |
569 | if (i915_is_ggtt(vm)) | |
570 | flags |= PIN_GLOBAL; | |
571 | ||
572 | for_each_prime_number_from(size, 1, max_pages) { | |
573 | struct drm_i915_gem_object *obj; | |
574 | struct i915_vma *vma; | |
575 | u64 addr; | |
576 | int err = 0; | |
577 | ||
578 | obj = fake_dma_object(i915, size << PAGE_SHIFT); | |
579 | if (IS_ERR(obj)) | |
580 | break; | |
581 | ||
582 | vma = i915_vma_instance(obj, vm, NULL); | |
583 | if (IS_ERR(vma)) { | |
584 | err = PTR_ERR(vma); | |
1257e0f8 | 585 | goto err_put; |
6e32ab3d CW |
586 | } |
587 | ||
588 | for (addr = hole_start; | |
589 | addr + obj->base.size < hole_end; | |
590 | addr += obj->base.size) { | |
591 | err = i915_vma_pin(vma, 0, 0, addr | flags); | |
592 | if (err) { | |
593 | pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n", | |
594 | __func__, addr, vma->size, | |
595 | hole_start, hole_end, err); | |
1257e0f8 | 596 | goto err_close; |
6e32ab3d CW |
597 | } |
598 | i915_vma_unpin(vma); | |
599 | ||
600 | if (!drm_mm_node_allocated(&vma->node) || | |
601 | i915_vma_misplaced(vma, 0, 0, addr | flags)) { | |
602 | pr_err("%s incorrect at %llx + %llx\n", | |
603 | __func__, addr, vma->size); | |
604 | err = -EINVAL; | |
1257e0f8 | 605 | goto err_close; |
6e32ab3d CW |
606 | } |
607 | ||
608 | err = i915_vma_unbind(vma); | |
609 | if (err) { | |
610 | pr_err("%s unbind failed at %llx + %llx with err=%d\n", | |
611 | __func__, addr, vma->size, err); | |
1257e0f8 | 612 | goto err_close; |
6e32ab3d CW |
613 | } |
614 | ||
615 | GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); | |
616 | ||
617 | if (igt_timeout(end_time, | |
618 | "%s timed out at %llx\n", | |
619 | __func__, addr)) { | |
620 | err = -EINTR; | |
1257e0f8 | 621 | goto err_close; |
6e32ab3d CW |
622 | } |
623 | } | |
624 | ||
1257e0f8 | 625 | err_close: |
6e32ab3d CW |
626 | if (!i915_vma_is_ggtt(vma)) |
627 | i915_vma_close(vma); | |
1257e0f8 | 628 | err_put: |
6e32ab3d CW |
629 | i915_gem_object_put(obj); |
630 | if (err) | |
631 | return err; | |
38b7fb0b CW |
632 | |
633 | cleanup_freed_objects(i915); | |
6e32ab3d CW |
634 | } |
635 | ||
636 | return 0; | |
637 | } | |
638 | ||
7db4dcea CW |
639 | static int pot_hole(struct drm_i915_private *i915, |
640 | struct i915_address_space *vm, | |
641 | u64 hole_start, u64 hole_end, | |
642 | unsigned long end_time) | |
643 | { | |
644 | struct drm_i915_gem_object *obj; | |
645 | struct i915_vma *vma; | |
646 | unsigned long flags; | |
647 | unsigned int pot; | |
72affdf9 | 648 | int err = 0; |
7db4dcea CW |
649 | |
650 | flags = PIN_OFFSET_FIXED | PIN_USER; | |
651 | if (i915_is_ggtt(vm)) | |
652 | flags |= PIN_GLOBAL; | |
653 | ||
654 | obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE); | |
655 | if (IS_ERR(obj)) | |
656 | return PTR_ERR(obj); | |
657 | ||
658 | vma = i915_vma_instance(obj, vm, NULL); | |
659 | if (IS_ERR(vma)) { | |
660 | err = PTR_ERR(vma); | |
661 | goto err_obj; | |
662 | } | |
663 | ||
664 | /* Insert a pair of pages across every pot boundary within the hole */ | |
665 | for (pot = fls64(hole_end - 1) - 1; | |
666 | pot > ilog2(2 * I915_GTT_PAGE_SIZE); | |
667 | pot--) { | |
668 | u64 step = BIT_ULL(pot); | |
669 | u64 addr; | |
670 | ||
671 | for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE; | |
e1c5f754 | 672 | addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE; |
7db4dcea CW |
673 | addr += step) { |
674 | err = i915_vma_pin(vma, 0, 0, addr | flags); | |
675 | if (err) { | |
676 | pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n", | |
677 | __func__, | |
678 | addr, | |
679 | hole_start, hole_end, | |
680 | err); | |
681 | goto err; | |
682 | } | |
683 | ||
684 | if (!drm_mm_node_allocated(&vma->node) || | |
685 | i915_vma_misplaced(vma, 0, 0, addr | flags)) { | |
686 | pr_err("%s incorrect at %llx + %llx\n", | |
687 | __func__, addr, vma->size); | |
688 | i915_vma_unpin(vma); | |
689 | err = i915_vma_unbind(vma); | |
690 | err = -EINVAL; | |
691 | goto err; | |
692 | } | |
693 | ||
694 | i915_vma_unpin(vma); | |
695 | err = i915_vma_unbind(vma); | |
696 | GEM_BUG_ON(err); | |
697 | } | |
698 | ||
699 | if (igt_timeout(end_time, | |
700 | "%s timed out after %d/%d\n", | |
701 | __func__, pot, fls64(hole_end - 1) - 1)) { | |
702 | err = -EINTR; | |
703 | goto err; | |
704 | } | |
705 | } | |
706 | ||
707 | err: | |
708 | if (!i915_vma_is_ggtt(vma)) | |
709 | i915_vma_close(vma); | |
710 | err_obj: | |
711 | i915_gem_object_put(obj); | |
712 | return err; | |
713 | } | |
714 | ||
5c3bff48 CW |
715 | static int drunk_hole(struct drm_i915_private *i915, |
716 | struct i915_address_space *vm, | |
717 | u64 hole_start, u64 hole_end, | |
718 | unsigned long end_time) | |
719 | { | |
720 | I915_RND_STATE(prng); | |
721 | unsigned int size; | |
722 | unsigned long flags; | |
723 | ||
724 | flags = PIN_OFFSET_FIXED | PIN_USER; | |
725 | if (i915_is_ggtt(vm)) | |
726 | flags |= PIN_GLOBAL; | |
727 | ||
728 | /* Keep creating larger objects until one cannot fit into the hole */ | |
729 | for (size = 12; (hole_end - hole_start) >> size; size++) { | |
730 | struct drm_i915_gem_object *obj; | |
731 | unsigned int *order, count, n; | |
732 | struct i915_vma *vma; | |
733 | u64 hole_size; | |
6e128141 | 734 | int err = -ENODEV; |
5c3bff48 CW |
735 | |
736 | hole_size = (hole_end - hole_start) >> size; | |
737 | if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32)) | |
738 | hole_size = KMALLOC_MAX_SIZE / sizeof(u32); | |
223c73a3 CW |
739 | count = hole_size >> 1; |
740 | if (!count) { | |
741 | pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n", | |
742 | __func__, hole_start, hole_end, size, hole_size); | |
743 | break; | |
744 | } | |
745 | ||
5c3bff48 | 746 | do { |
5c3bff48 | 747 | order = i915_random_order(count, &prng); |
223c73a3 CW |
748 | if (order) |
749 | break; | |
750 | } while (count >>= 1); | |
751 | if (!count) | |
752 | return -ENOMEM; | |
753 | GEM_BUG_ON(!order); | |
5c3bff48 CW |
754 | |
755 | /* Ignore allocation failures (i.e. don't report them as | |
756 | * a test failure) as we are purposefully allocating very | |
757 | * large objects without checking that we have sufficient | |
758 | * memory. We expect to hit -ENOMEM. | |
759 | */ | |
760 | ||
761 | obj = fake_dma_object(i915, BIT_ULL(size)); | |
762 | if (IS_ERR(obj)) { | |
763 | kfree(order); | |
764 | break; | |
765 | } | |
766 | ||
767 | vma = i915_vma_instance(obj, vm, NULL); | |
768 | if (IS_ERR(vma)) { | |
769 | err = PTR_ERR(vma); | |
770 | goto err_obj; | |
771 | } | |
772 | ||
773 | GEM_BUG_ON(vma->size != BIT_ULL(size)); | |
774 | ||
775 | for (n = 0; n < count; n++) { | |
776 | u64 addr = hole_start + order[n] * BIT_ULL(size); | |
777 | ||
778 | err = i915_vma_pin(vma, 0, 0, addr | flags); | |
779 | if (err) { | |
780 | pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n", | |
781 | __func__, | |
782 | addr, BIT_ULL(size), | |
783 | hole_start, hole_end, | |
784 | err); | |
785 | goto err; | |
786 | } | |
787 | ||
788 | if (!drm_mm_node_allocated(&vma->node) || | |
789 | i915_vma_misplaced(vma, 0, 0, addr | flags)) { | |
790 | pr_err("%s incorrect at %llx + %llx\n", | |
791 | __func__, addr, BIT_ULL(size)); | |
792 | i915_vma_unpin(vma); | |
793 | err = i915_vma_unbind(vma); | |
794 | err = -EINVAL; | |
795 | goto err; | |
796 | } | |
797 | ||
798 | i915_vma_unpin(vma); | |
799 | err = i915_vma_unbind(vma); | |
800 | GEM_BUG_ON(err); | |
801 | ||
802 | if (igt_timeout(end_time, | |
803 | "%s timed out after %d/%d\n", | |
804 | __func__, n, count)) { | |
805 | err = -EINTR; | |
806 | goto err; | |
807 | } | |
808 | } | |
809 | ||
810 | err: | |
811 | if (!i915_vma_is_ggtt(vma)) | |
812 | i915_vma_close(vma); | |
813 | err_obj: | |
814 | i915_gem_object_put(obj); | |
815 | kfree(order); | |
816 | if (err) | |
817 | return err; | |
38b7fb0b CW |
818 | |
819 | cleanup_freed_objects(i915); | |
5c3bff48 CW |
820 | } |
821 | ||
822 | return 0; | |
823 | } | |
824 | ||
aae4a3d8 CW |
825 | static int __shrink_hole(struct drm_i915_private *i915, |
826 | struct i915_address_space *vm, | |
827 | u64 hole_start, u64 hole_end, | |
828 | unsigned long end_time) | |
829 | { | |
830 | struct drm_i915_gem_object *obj; | |
831 | unsigned long flags = PIN_OFFSET_FIXED | PIN_USER; | |
832 | unsigned int order = 12; | |
833 | LIST_HEAD(objects); | |
834 | int err = 0; | |
835 | u64 addr; | |
836 | ||
837 | /* Keep creating larger objects until one cannot fit into the hole */ | |
838 | for (addr = hole_start; addr < hole_end; ) { | |
839 | struct i915_vma *vma; | |
840 | u64 size = BIT_ULL(order++); | |
841 | ||
842 | size = min(size, hole_end - addr); | |
843 | obj = fake_dma_object(i915, size); | |
844 | if (IS_ERR(obj)) { | |
845 | err = PTR_ERR(obj); | |
846 | break; | |
847 | } | |
848 | ||
849 | list_add(&obj->st_link, &objects); | |
850 | ||
851 | vma = i915_vma_instance(obj, vm, NULL); | |
852 | if (IS_ERR(vma)) { | |
853 | err = PTR_ERR(vma); | |
854 | break; | |
855 | } | |
856 | ||
857 | GEM_BUG_ON(vma->size != size); | |
858 | ||
859 | err = i915_vma_pin(vma, 0, 0, addr | flags); | |
860 | if (err) { | |
861 | pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n", | |
862 | __func__, addr, size, hole_start, hole_end, err); | |
863 | break; | |
864 | } | |
865 | ||
866 | if (!drm_mm_node_allocated(&vma->node) || | |
867 | i915_vma_misplaced(vma, 0, 0, addr | flags)) { | |
868 | pr_err("%s incorrect at %llx + %llx\n", | |
869 | __func__, addr, size); | |
870 | i915_vma_unpin(vma); | |
871 | err = i915_vma_unbind(vma); | |
872 | err = -EINVAL; | |
873 | break; | |
874 | } | |
875 | ||
876 | i915_vma_unpin(vma); | |
877 | addr += size; | |
878 | ||
879 | if (igt_timeout(end_time, | |
880 | "%s timed out at ofset %llx [%llx - %llx]\n", | |
881 | __func__, addr, hole_start, hole_end)) { | |
882 | err = -EINTR; | |
883 | break; | |
884 | } | |
885 | } | |
886 | ||
887 | close_object_list(&objects, vm); | |
38b7fb0b | 888 | cleanup_freed_objects(i915); |
aae4a3d8 CW |
889 | return err; |
890 | } | |
891 | ||
892 | static int shrink_hole(struct drm_i915_private *i915, | |
893 | struct i915_address_space *vm, | |
894 | u64 hole_start, u64 hole_end, | |
895 | unsigned long end_time) | |
896 | { | |
897 | unsigned long prime; | |
898 | int err; | |
899 | ||
8448661d CW |
900 | vm->fault_attr.probability = 999; |
901 | atomic_set(&vm->fault_attr.times, -1); | |
aae4a3d8 CW |
902 | |
903 | for_each_prime_number_from(prime, 0, ULONG_MAX - 1) { | |
8448661d | 904 | vm->fault_attr.interval = prime; |
aae4a3d8 CW |
905 | err = __shrink_hole(i915, vm, hole_start, hole_end, end_time); |
906 | if (err) | |
907 | break; | |
908 | } | |
909 | ||
8448661d | 910 | memset(&vm->fault_attr, 0, sizeof(vm->fault_attr)); |
aae4a3d8 CW |
911 | |
912 | return err; | |
913 | } | |
914 | ||
fe215c8b MA |
915 | static int shrink_boom(struct drm_i915_private *i915, |
916 | struct i915_address_space *vm, | |
917 | u64 hole_start, u64 hole_end, | |
918 | unsigned long end_time) | |
919 | { | |
920 | unsigned int sizes[] = { SZ_2M, SZ_1G }; | |
921 | struct drm_i915_gem_object *purge; | |
922 | struct drm_i915_gem_object *explode; | |
923 | int err; | |
924 | int i; | |
925 | ||
926 | /* | |
927 | * Catch the case which shrink_hole seems to miss. The setup here | |
928 | * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while | |
929 | * ensuring that all vma assiocated with the respective pd/pdp are | |
930 | * unpinned at the time. | |
931 | */ | |
932 | ||
933 | for (i = 0; i < ARRAY_SIZE(sizes); ++i) { | |
934 | unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; | |
935 | unsigned int size = sizes[i]; | |
936 | struct i915_vma *vma; | |
937 | ||
938 | purge = fake_dma_object(i915, size); | |
939 | if (IS_ERR(purge)) | |
940 | return PTR_ERR(purge); | |
941 | ||
942 | vma = i915_vma_instance(purge, vm, NULL); | |
943 | if (IS_ERR(vma)) { | |
944 | err = PTR_ERR(vma); | |
945 | goto err_purge; | |
946 | } | |
947 | ||
948 | err = i915_vma_pin(vma, 0, 0, flags); | |
949 | if (err) | |
950 | goto err_purge; | |
951 | ||
952 | /* Should now be ripe for purging */ | |
953 | i915_vma_unpin(vma); | |
954 | ||
955 | explode = fake_dma_object(i915, size); | |
956 | if (IS_ERR(explode)) { | |
6e8c06d2 | 957 | err = PTR_ERR(explode); |
fe215c8b MA |
958 | goto err_purge; |
959 | } | |
960 | ||
961 | vm->fault_attr.probability = 100; | |
962 | vm->fault_attr.interval = 1; | |
963 | atomic_set(&vm->fault_attr.times, -1); | |
964 | ||
965 | vma = i915_vma_instance(explode, vm, NULL); | |
966 | if (IS_ERR(vma)) { | |
967 | err = PTR_ERR(vma); | |
968 | goto err_explode; | |
969 | } | |
970 | ||
971 | err = i915_vma_pin(vma, 0, 0, flags | size); | |
972 | if (err) | |
973 | goto err_explode; | |
974 | ||
975 | i915_vma_unpin(vma); | |
976 | ||
977 | i915_gem_object_put(purge); | |
978 | i915_gem_object_put(explode); | |
979 | ||
980 | memset(&vm->fault_attr, 0, sizeof(vm->fault_attr)); | |
38b7fb0b | 981 | cleanup_freed_objects(i915); |
fe215c8b MA |
982 | } |
983 | ||
984 | return 0; | |
985 | ||
986 | err_explode: | |
987 | i915_gem_object_put(explode); | |
988 | err_purge: | |
989 | i915_gem_object_put(purge); | |
990 | memset(&vm->fault_attr, 0, sizeof(vm->fault_attr)); | |
991 | return err; | |
992 | } | |
993 | ||
8d28ba45 CW |
994 | static int exercise_ppgtt(struct drm_i915_private *dev_priv, |
995 | int (*func)(struct drm_i915_private *i915, | |
996 | struct i915_address_space *vm, | |
997 | u64 hole_start, u64 hole_end, | |
998 | unsigned long end_time)) | |
999 | { | |
1000 | struct drm_file *file; | |
1001 | struct i915_hw_ppgtt *ppgtt; | |
1002 | IGT_TIMEOUT(end_time); | |
1003 | int err; | |
1004 | ||
4bdafb9d | 1005 | if (!HAS_FULL_PPGTT(dev_priv)) |
8d28ba45 CW |
1006 | return 0; |
1007 | ||
1008 | file = mock_file(dev_priv); | |
1009 | if (IS_ERR(file)) | |
1010 | return PTR_ERR(file); | |
1011 | ||
1012 | mutex_lock(&dev_priv->drm.struct_mutex); | |
63fd659f | 1013 | ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv); |
8d28ba45 CW |
1014 | if (IS_ERR(ppgtt)) { |
1015 | err = PTR_ERR(ppgtt); | |
1016 | goto out_unlock; | |
1017 | } | |
82ad6443 CW |
1018 | GEM_BUG_ON(offset_in_page(ppgtt->vm.total)); |
1019 | GEM_BUG_ON(ppgtt->vm.closed); | |
8d28ba45 | 1020 | |
82ad6443 | 1021 | err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time); |
8d28ba45 | 1022 | |
82ad6443 | 1023 | i915_ppgtt_close(&ppgtt->vm); |
8d28ba45 CW |
1024 | i915_ppgtt_put(ppgtt); |
1025 | out_unlock: | |
1026 | mutex_unlock(&dev_priv->drm.struct_mutex); | |
1027 | ||
1028 | mock_file_free(dev_priv, file); | |
1029 | return err; | |
1030 | } | |
1031 | ||
1032 | static int igt_ppgtt_fill(void *arg) | |
1033 | { | |
1034 | return exercise_ppgtt(arg, fill_hole); | |
1035 | } | |
1036 | ||
6e32ab3d CW |
1037 | static int igt_ppgtt_walk(void *arg) |
1038 | { | |
1039 | return exercise_ppgtt(arg, walk_hole); | |
1040 | } | |
1041 | ||
7db4dcea CW |
1042 | static int igt_ppgtt_pot(void *arg) |
1043 | { | |
1044 | return exercise_ppgtt(arg, pot_hole); | |
1045 | } | |
1046 | ||
5c3bff48 CW |
1047 | static int igt_ppgtt_drunk(void *arg) |
1048 | { | |
1049 | return exercise_ppgtt(arg, drunk_hole); | |
1050 | } | |
1051 | ||
4a6f13fc CW |
1052 | static int igt_ppgtt_lowlevel(void *arg) |
1053 | { | |
1054 | return exercise_ppgtt(arg, lowlevel_hole); | |
1055 | } | |
1056 | ||
aae4a3d8 CW |
1057 | static int igt_ppgtt_shrink(void *arg) |
1058 | { | |
1059 | return exercise_ppgtt(arg, shrink_hole); | |
1060 | } | |
1061 | ||
fe215c8b MA |
1062 | static int igt_ppgtt_shrink_boom(void *arg) |
1063 | { | |
1064 | return exercise_ppgtt(arg, shrink_boom); | |
1065 | } | |
1066 | ||
62c981cf CW |
1067 | static int sort_holes(void *priv, struct list_head *A, struct list_head *B) |
1068 | { | |
1069 | struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack); | |
1070 | struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack); | |
1071 | ||
1072 | if (a->start < b->start) | |
1073 | return -1; | |
1074 | else | |
1075 | return 1; | |
1076 | } | |
1077 | ||
1078 | static int exercise_ggtt(struct drm_i915_private *i915, | |
1079 | int (*func)(struct drm_i915_private *i915, | |
1080 | struct i915_address_space *vm, | |
1081 | u64 hole_start, u64 hole_end, | |
1082 | unsigned long end_time)) | |
1083 | { | |
1084 | struct i915_ggtt *ggtt = &i915->ggtt; | |
1085 | u64 hole_start, hole_end, last = 0; | |
1086 | struct drm_mm_node *node; | |
1087 | IGT_TIMEOUT(end_time); | |
4fe95b04 | 1088 | int err = 0; |
62c981cf CW |
1089 | |
1090 | mutex_lock(&i915->drm.struct_mutex); | |
1091 | restart: | |
82ad6443 CW |
1092 | list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes); |
1093 | drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) { | |
62c981cf CW |
1094 | if (hole_start < last) |
1095 | continue; | |
1096 | ||
82ad6443 CW |
1097 | if (ggtt->vm.mm.color_adjust) |
1098 | ggtt->vm.mm.color_adjust(node, 0, | |
1099 | &hole_start, &hole_end); | |
62c981cf CW |
1100 | if (hole_start >= hole_end) |
1101 | continue; | |
1102 | ||
82ad6443 | 1103 | err = func(i915, &ggtt->vm, hole_start, hole_end, end_time); |
62c981cf CW |
1104 | if (err) |
1105 | break; | |
1106 | ||
1107 | /* As we have manipulated the drm_mm, the list may be corrupt */ | |
1108 | last = hole_end; | |
1109 | goto restart; | |
1110 | } | |
1111 | mutex_unlock(&i915->drm.struct_mutex); | |
1112 | ||
1113 | return err; | |
1114 | } | |
1115 | ||
1116 | static int igt_ggtt_fill(void *arg) | |
1117 | { | |
1118 | return exercise_ggtt(arg, fill_hole); | |
1119 | } | |
1120 | ||
6e32ab3d CW |
1121 | static int igt_ggtt_walk(void *arg) |
1122 | { | |
1123 | return exercise_ggtt(arg, walk_hole); | |
1124 | } | |
1125 | ||
7db4dcea CW |
1126 | static int igt_ggtt_pot(void *arg) |
1127 | { | |
1128 | return exercise_ggtt(arg, pot_hole); | |
1129 | } | |
1130 | ||
5c3bff48 CW |
1131 | static int igt_ggtt_drunk(void *arg) |
1132 | { | |
1133 | return exercise_ggtt(arg, drunk_hole); | |
1134 | } | |
1135 | ||
4a6f13fc CW |
1136 | static int igt_ggtt_lowlevel(void *arg) |
1137 | { | |
1138 | return exercise_ggtt(arg, lowlevel_hole); | |
1139 | } | |
1140 | ||
af85f50d CW |
1141 | static int igt_ggtt_page(void *arg) |
1142 | { | |
1143 | const unsigned int count = PAGE_SIZE/sizeof(u32); | |
1144 | I915_RND_STATE(prng); | |
1145 | struct drm_i915_private *i915 = arg; | |
1146 | struct i915_ggtt *ggtt = &i915->ggtt; | |
1147 | struct drm_i915_gem_object *obj; | |
c9d08cc3 | 1148 | intel_wakeref_t wakeref; |
af85f50d CW |
1149 | struct drm_mm_node tmp; |
1150 | unsigned int *order, n; | |
1151 | int err; | |
1152 | ||
1153 | mutex_lock(&i915->drm.struct_mutex); | |
1154 | ||
1155 | obj = i915_gem_object_create_internal(i915, PAGE_SIZE); | |
1156 | if (IS_ERR(obj)) { | |
1157 | err = PTR_ERR(obj); | |
1158 | goto out_unlock; | |
1159 | } | |
1160 | ||
1161 | err = i915_gem_object_pin_pages(obj); | |
1162 | if (err) | |
1163 | goto out_free; | |
1164 | ||
1165 | memset(&tmp, 0, sizeof(tmp)); | |
82ad6443 | 1166 | err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp, |
85a9c0bc | 1167 | count * PAGE_SIZE, 0, |
af85f50d CW |
1168 | I915_COLOR_UNEVICTABLE, |
1169 | 0, ggtt->mappable_end, | |
1170 | DRM_MM_INSERT_LOW); | |
1171 | if (err) | |
1172 | goto out_unpin; | |
1173 | ||
c9d08cc3 | 1174 | wakeref = intel_runtime_pm_get(i915); |
85a9c0bc CW |
1175 | |
1176 | for (n = 0; n < count; n++) { | |
1177 | u64 offset = tmp.start + n * PAGE_SIZE; | |
1178 | ||
82ad6443 CW |
1179 | ggtt->vm.insert_page(&ggtt->vm, |
1180 | i915_gem_object_get_dma_address(obj, 0), | |
1181 | offset, I915_CACHE_NONE, 0); | |
85a9c0bc CW |
1182 | } |
1183 | ||
af85f50d CW |
1184 | order = i915_random_order(count, &prng); |
1185 | if (!order) { | |
1186 | err = -ENOMEM; | |
1187 | goto out_remove; | |
1188 | } | |
1189 | ||
1190 | for (n = 0; n < count; n++) { | |
1191 | u64 offset = tmp.start + order[n] * PAGE_SIZE; | |
1192 | u32 __iomem *vaddr; | |
1193 | ||
73ebd503 | 1194 | vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset); |
af85f50d CW |
1195 | iowrite32(n, vaddr + n); |
1196 | io_mapping_unmap_atomic(vaddr); | |
af85f50d | 1197 | } |
85a9c0bc | 1198 | i915_gem_flush_ggtt_writes(i915); |
af85f50d CW |
1199 | |
1200 | i915_random_reorder(order, count, &prng); | |
1201 | for (n = 0; n < count; n++) { | |
1202 | u64 offset = tmp.start + order[n] * PAGE_SIZE; | |
1203 | u32 __iomem *vaddr; | |
1204 | u32 val; | |
1205 | ||
73ebd503 | 1206 | vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset); |
af85f50d CW |
1207 | val = ioread32(vaddr + n); |
1208 | io_mapping_unmap_atomic(vaddr); | |
1209 | ||
af85f50d CW |
1210 | if (val != n) { |
1211 | pr_err("insert page failed: found %d, expected %d\n", | |
1212 | val, n); | |
1213 | err = -EINVAL; | |
1214 | break; | |
1215 | } | |
1216 | } | |
1217 | ||
1218 | kfree(order); | |
1219 | out_remove: | |
82ad6443 | 1220 | ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size); |
c9d08cc3 | 1221 | intel_runtime_pm_put(i915, wakeref); |
af85f50d CW |
1222 | drm_mm_remove_node(&tmp); |
1223 | out_unpin: | |
1224 | i915_gem_object_unpin_pages(obj); | |
1225 | out_free: | |
1226 | i915_gem_object_put(obj); | |
1227 | out_unlock: | |
1228 | mutex_unlock(&i915->drm.struct_mutex); | |
1229 | return err; | |
1230 | } | |
1231 | ||
e619cd0d CW |
1232 | static void track_vma_bind(struct i915_vma *vma) |
1233 | { | |
1234 | struct drm_i915_gem_object *obj = vma->obj; | |
1235 | ||
1236 | obj->bind_count++; /* track for eviction later */ | |
1237 | __i915_gem_object_pin_pages(obj); | |
1238 | ||
1239 | vma->pages = obj->mm.pages; | |
09d7e46b CW |
1240 | |
1241 | mutex_lock(&vma->vm->mutex); | |
499197dc | 1242 | list_move_tail(&vma->vm_link, &vma->vm->bound_list); |
09d7e46b | 1243 | mutex_unlock(&vma->vm->mutex); |
e619cd0d CW |
1244 | } |
1245 | ||
210e8ac4 CW |
1246 | static int exercise_mock(struct drm_i915_private *i915, |
1247 | int (*func)(struct drm_i915_private *i915, | |
1248 | struct i915_address_space *vm, | |
1249 | u64 hole_start, u64 hole_end, | |
1250 | unsigned long end_time)) | |
1251 | { | |
ca79b0c2 | 1252 | const u64 limit = totalram_pages() << PAGE_SHIFT; |
210e8ac4 CW |
1253 | struct i915_gem_context *ctx; |
1254 | struct i915_hw_ppgtt *ppgtt; | |
1255 | IGT_TIMEOUT(end_time); | |
1256 | int err; | |
1257 | ||
1258 | ctx = mock_context(i915, "mock"); | |
1259 | if (!ctx) | |
1260 | return -ENOMEM; | |
1261 | ||
1262 | ppgtt = ctx->ppgtt; | |
1263 | GEM_BUG_ON(!ppgtt); | |
1264 | ||
ebfa7944 | 1265 | err = func(i915, &ppgtt->vm, 0, min(ppgtt->vm.total, limit), end_time); |
210e8ac4 CW |
1266 | |
1267 | mock_context_close(ctx); | |
1268 | return err; | |
1269 | } | |
1270 | ||
1271 | static int igt_mock_fill(void *arg) | |
1272 | { | |
c95e7ce3 CW |
1273 | struct i915_ggtt *ggtt = arg; |
1274 | ||
1275 | return exercise_mock(ggtt->vm.i915, fill_hole); | |
210e8ac4 CW |
1276 | } |
1277 | ||
1278 | static int igt_mock_walk(void *arg) | |
1279 | { | |
c95e7ce3 CW |
1280 | struct i915_ggtt *ggtt = arg; |
1281 | ||
1282 | return exercise_mock(ggtt->vm.i915, walk_hole); | |
210e8ac4 CW |
1283 | } |
1284 | ||
7db4dcea CW |
1285 | static int igt_mock_pot(void *arg) |
1286 | { | |
c95e7ce3 CW |
1287 | struct i915_ggtt *ggtt = arg; |
1288 | ||
1289 | return exercise_mock(ggtt->vm.i915, pot_hole); | |
7db4dcea CW |
1290 | } |
1291 | ||
210e8ac4 CW |
1292 | static int igt_mock_drunk(void *arg) |
1293 | { | |
c95e7ce3 CW |
1294 | struct i915_ggtt *ggtt = arg; |
1295 | ||
1296 | return exercise_mock(ggtt->vm.i915, drunk_hole); | |
210e8ac4 CW |
1297 | } |
1298 | ||
e619cd0d CW |
1299 | static int igt_gtt_reserve(void *arg) |
1300 | { | |
c95e7ce3 | 1301 | struct i915_ggtt *ggtt = arg; |
e619cd0d CW |
1302 | struct drm_i915_gem_object *obj, *on; |
1303 | LIST_HEAD(objects); | |
1304 | u64 total; | |
6e128141 | 1305 | int err = -ENODEV; |
e619cd0d CW |
1306 | |
1307 | /* i915_gem_gtt_reserve() tries to reserve the precise range | |
1308 | * for the node, and evicts if it has to. So our test checks that | |
1309 | * it can give us the requsted space and prevent overlaps. | |
1310 | */ | |
1311 | ||
1312 | /* Start by filling the GGTT */ | |
1313 | for (total = 0; | |
c95e7ce3 CW |
1314 | total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total; |
1315 | total += 2 * I915_GTT_PAGE_SIZE) { | |
e619cd0d CW |
1316 | struct i915_vma *vma; |
1317 | ||
c95e7ce3 CW |
1318 | obj = i915_gem_object_create_internal(ggtt->vm.i915, |
1319 | 2 * PAGE_SIZE); | |
e619cd0d CW |
1320 | if (IS_ERR(obj)) { |
1321 | err = PTR_ERR(obj); | |
1322 | goto out; | |
1323 | } | |
1324 | ||
1325 | err = i915_gem_object_pin_pages(obj); | |
1326 | if (err) { | |
1327 | i915_gem_object_put(obj); | |
1328 | goto out; | |
1329 | } | |
1330 | ||
1331 | list_add(&obj->st_link, &objects); | |
1332 | ||
c95e7ce3 | 1333 | vma = i915_vma_instance(obj, &ggtt->vm, NULL); |
e619cd0d CW |
1334 | if (IS_ERR(vma)) { |
1335 | err = PTR_ERR(vma); | |
1336 | goto out; | |
1337 | } | |
1338 | ||
c95e7ce3 | 1339 | err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, |
e619cd0d CW |
1340 | obj->base.size, |
1341 | total, | |
1342 | obj->cache_level, | |
1343 | 0); | |
1344 | if (err) { | |
1345 | pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n", | |
c95e7ce3 | 1346 | total, ggtt->vm.total, err); |
e619cd0d CW |
1347 | goto out; |
1348 | } | |
1349 | track_vma_bind(vma); | |
1350 | ||
1351 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | |
1352 | if (vma->node.start != total || | |
1353 | vma->node.size != 2*I915_GTT_PAGE_SIZE) { | |
9125963a | 1354 | pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", |
e619cd0d CW |
1355 | vma->node.start, vma->node.size, |
1356 | total, 2*I915_GTT_PAGE_SIZE); | |
1357 | err = -EINVAL; | |
1358 | goto out; | |
1359 | } | |
1360 | } | |
1361 | ||
1362 | /* Now we start forcing evictions */ | |
1363 | for (total = I915_GTT_PAGE_SIZE; | |
c95e7ce3 CW |
1364 | total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total; |
1365 | total += 2 * I915_GTT_PAGE_SIZE) { | |
e619cd0d CW |
1366 | struct i915_vma *vma; |
1367 | ||
c95e7ce3 CW |
1368 | obj = i915_gem_object_create_internal(ggtt->vm.i915, |
1369 | 2 * PAGE_SIZE); | |
e619cd0d CW |
1370 | if (IS_ERR(obj)) { |
1371 | err = PTR_ERR(obj); | |
1372 | goto out; | |
1373 | } | |
1374 | ||
1375 | err = i915_gem_object_pin_pages(obj); | |
1376 | if (err) { | |
1377 | i915_gem_object_put(obj); | |
1378 | goto out; | |
1379 | } | |
1380 | ||
1381 | list_add(&obj->st_link, &objects); | |
1382 | ||
c95e7ce3 | 1383 | vma = i915_vma_instance(obj, &ggtt->vm, NULL); |
e619cd0d CW |
1384 | if (IS_ERR(vma)) { |
1385 | err = PTR_ERR(vma); | |
1386 | goto out; | |
1387 | } | |
1388 | ||
c95e7ce3 | 1389 | err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, |
e619cd0d CW |
1390 | obj->base.size, |
1391 | total, | |
1392 | obj->cache_level, | |
1393 | 0); | |
1394 | if (err) { | |
1395 | pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n", | |
c95e7ce3 | 1396 | total, ggtt->vm.total, err); |
e619cd0d CW |
1397 | goto out; |
1398 | } | |
1399 | track_vma_bind(vma); | |
1400 | ||
1401 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | |
1402 | if (vma->node.start != total || | |
1403 | vma->node.size != 2*I915_GTT_PAGE_SIZE) { | |
9125963a | 1404 | pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", |
e619cd0d CW |
1405 | vma->node.start, vma->node.size, |
1406 | total, 2*I915_GTT_PAGE_SIZE); | |
1407 | err = -EINVAL; | |
1408 | goto out; | |
1409 | } | |
1410 | } | |
1411 | ||
1412 | /* And then try at random */ | |
1413 | list_for_each_entry_safe(obj, on, &objects, st_link) { | |
1414 | struct i915_vma *vma; | |
1415 | u64 offset; | |
1416 | ||
c95e7ce3 | 1417 | vma = i915_vma_instance(obj, &ggtt->vm, NULL); |
e619cd0d CW |
1418 | if (IS_ERR(vma)) { |
1419 | err = PTR_ERR(vma); | |
1420 | goto out; | |
1421 | } | |
1422 | ||
1423 | err = i915_vma_unbind(vma); | |
1424 | if (err) { | |
1425 | pr_err("i915_vma_unbind failed with err=%d!\n", err); | |
1426 | goto out; | |
1427 | } | |
1428 | ||
c95e7ce3 | 1429 | offset = random_offset(0, ggtt->vm.total, |
e619cd0d CW |
1430 | 2*I915_GTT_PAGE_SIZE, |
1431 | I915_GTT_MIN_ALIGNMENT); | |
1432 | ||
c95e7ce3 | 1433 | err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, |
e619cd0d CW |
1434 | obj->base.size, |
1435 | offset, | |
1436 | obj->cache_level, | |
1437 | 0); | |
1438 | if (err) { | |
1439 | pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n", | |
c95e7ce3 | 1440 | total, ggtt->vm.total, err); |
e619cd0d CW |
1441 | goto out; |
1442 | } | |
1443 | track_vma_bind(vma); | |
1444 | ||
1445 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | |
1446 | if (vma->node.start != offset || | |
1447 | vma->node.size != 2*I915_GTT_PAGE_SIZE) { | |
9125963a | 1448 | pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", |
e619cd0d CW |
1449 | vma->node.start, vma->node.size, |
1450 | offset, 2*I915_GTT_PAGE_SIZE); | |
1451 | err = -EINVAL; | |
1452 | goto out; | |
1453 | } | |
1454 | } | |
1455 | ||
1456 | out: | |
1457 | list_for_each_entry_safe(obj, on, &objects, st_link) { | |
1458 | i915_gem_object_unpin_pages(obj); | |
1459 | i915_gem_object_put(obj); | |
1460 | } | |
1461 | return err; | |
1462 | } | |
1463 | ||
5f32616e CW |
1464 | static int igt_gtt_insert(void *arg) |
1465 | { | |
c95e7ce3 | 1466 | struct i915_ggtt *ggtt = arg; |
5f32616e CW |
1467 | struct drm_i915_gem_object *obj, *on; |
1468 | struct drm_mm_node tmp = {}; | |
1469 | const struct invalid_insert { | |
1470 | u64 size; | |
1471 | u64 alignment; | |
1472 | u64 start, end; | |
1473 | } invalid_insert[] = { | |
1474 | { | |
c95e7ce3 CW |
1475 | ggtt->vm.total + I915_GTT_PAGE_SIZE, 0, |
1476 | 0, ggtt->vm.total, | |
5f32616e CW |
1477 | }, |
1478 | { | |
1479 | 2*I915_GTT_PAGE_SIZE, 0, | |
1480 | 0, I915_GTT_PAGE_SIZE, | |
1481 | }, | |
1482 | { | |
1483 | -(u64)I915_GTT_PAGE_SIZE, 0, | |
1484 | 0, 4*I915_GTT_PAGE_SIZE, | |
1485 | }, | |
1486 | { | |
1487 | -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE, | |
1488 | 0, 4*I915_GTT_PAGE_SIZE, | |
1489 | }, | |
1490 | { | |
1491 | I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1, | |
1492 | I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1, | |
1493 | }, | |
1494 | {} | |
1495 | }, *ii; | |
1496 | LIST_HEAD(objects); | |
1497 | u64 total; | |
6e128141 | 1498 | int err = -ENODEV; |
5f32616e CW |
1499 | |
1500 | /* i915_gem_gtt_insert() tries to allocate some free space in the GTT | |
1501 | * to the node, evicting if required. | |
1502 | */ | |
1503 | ||
1504 | /* Check a couple of obviously invalid requests */ | |
1505 | for (ii = invalid_insert; ii->size; ii++) { | |
c95e7ce3 | 1506 | err = i915_gem_gtt_insert(&ggtt->vm, &tmp, |
5f32616e CW |
1507 | ii->size, ii->alignment, |
1508 | I915_COLOR_UNEVICTABLE, | |
1509 | ii->start, ii->end, | |
1510 | 0); | |
1511 | if (err != -ENOSPC) { | |
1512 | pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n", | |
1513 | ii->size, ii->alignment, ii->start, ii->end, | |
1514 | err); | |
1515 | return -EINVAL; | |
1516 | } | |
1517 | } | |
1518 | ||
1519 | /* Start by filling the GGTT */ | |
1520 | for (total = 0; | |
c95e7ce3 | 1521 | total + I915_GTT_PAGE_SIZE <= ggtt->vm.total; |
5f32616e CW |
1522 | total += I915_GTT_PAGE_SIZE) { |
1523 | struct i915_vma *vma; | |
1524 | ||
c95e7ce3 CW |
1525 | obj = i915_gem_object_create_internal(ggtt->vm.i915, |
1526 | I915_GTT_PAGE_SIZE); | |
5f32616e CW |
1527 | if (IS_ERR(obj)) { |
1528 | err = PTR_ERR(obj); | |
1529 | goto out; | |
1530 | } | |
1531 | ||
1532 | err = i915_gem_object_pin_pages(obj); | |
1533 | if (err) { | |
1534 | i915_gem_object_put(obj); | |
1535 | goto out; | |
1536 | } | |
1537 | ||
1538 | list_add(&obj->st_link, &objects); | |
1539 | ||
c95e7ce3 | 1540 | vma = i915_vma_instance(obj, &ggtt->vm, NULL); |
5f32616e CW |
1541 | if (IS_ERR(vma)) { |
1542 | err = PTR_ERR(vma); | |
1543 | goto out; | |
1544 | } | |
1545 | ||
c95e7ce3 | 1546 | err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, |
5f32616e | 1547 | obj->base.size, 0, obj->cache_level, |
c95e7ce3 | 1548 | 0, ggtt->vm.total, |
5f32616e CW |
1549 | 0); |
1550 | if (err == -ENOSPC) { | |
1551 | /* maxed out the GGTT space */ | |
1552 | i915_gem_object_put(obj); | |
1553 | break; | |
1554 | } | |
1555 | if (err) { | |
1556 | pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n", | |
c95e7ce3 | 1557 | total, ggtt->vm.total, err); |
5f32616e CW |
1558 | goto out; |
1559 | } | |
1560 | track_vma_bind(vma); | |
1561 | __i915_vma_pin(vma); | |
1562 | ||
1563 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | |
1564 | } | |
1565 | ||
1566 | list_for_each_entry(obj, &objects, st_link) { | |
1567 | struct i915_vma *vma; | |
1568 | ||
c95e7ce3 | 1569 | vma = i915_vma_instance(obj, &ggtt->vm, NULL); |
5f32616e CW |
1570 | if (IS_ERR(vma)) { |
1571 | err = PTR_ERR(vma); | |
1572 | goto out; | |
1573 | } | |
1574 | ||
1575 | if (!drm_mm_node_allocated(&vma->node)) { | |
1576 | pr_err("VMA was unexpectedly evicted!\n"); | |
1577 | err = -EINVAL; | |
1578 | goto out; | |
1579 | } | |
1580 | ||
1581 | __i915_vma_unpin(vma); | |
1582 | } | |
1583 | ||
1584 | /* If we then reinsert, we should find the same hole */ | |
1585 | list_for_each_entry_safe(obj, on, &objects, st_link) { | |
1586 | struct i915_vma *vma; | |
1587 | u64 offset; | |
1588 | ||
c95e7ce3 | 1589 | vma = i915_vma_instance(obj, &ggtt->vm, NULL); |
5f32616e CW |
1590 | if (IS_ERR(vma)) { |
1591 | err = PTR_ERR(vma); | |
1592 | goto out; | |
1593 | } | |
1594 | ||
1595 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | |
1596 | offset = vma->node.start; | |
1597 | ||
1598 | err = i915_vma_unbind(vma); | |
1599 | if (err) { | |
1600 | pr_err("i915_vma_unbind failed with err=%d!\n", err); | |
1601 | goto out; | |
1602 | } | |
1603 | ||
c95e7ce3 | 1604 | err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, |
5f32616e | 1605 | obj->base.size, 0, obj->cache_level, |
c95e7ce3 | 1606 | 0, ggtt->vm.total, |
5f32616e CW |
1607 | 0); |
1608 | if (err) { | |
1609 | pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n", | |
c95e7ce3 | 1610 | total, ggtt->vm.total, err); |
5f32616e CW |
1611 | goto out; |
1612 | } | |
1613 | track_vma_bind(vma); | |
1614 | ||
1615 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | |
1616 | if (vma->node.start != offset) { | |
1617 | pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n", | |
1618 | offset, vma->node.start); | |
1619 | err = -EINVAL; | |
1620 | goto out; | |
1621 | } | |
1622 | } | |
1623 | ||
1624 | /* And then force evictions */ | |
1625 | for (total = 0; | |
c95e7ce3 CW |
1626 | total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total; |
1627 | total += 2 * I915_GTT_PAGE_SIZE) { | |
5f32616e CW |
1628 | struct i915_vma *vma; |
1629 | ||
c95e7ce3 CW |
1630 | obj = i915_gem_object_create_internal(ggtt->vm.i915, |
1631 | 2 * I915_GTT_PAGE_SIZE); | |
5f32616e CW |
1632 | if (IS_ERR(obj)) { |
1633 | err = PTR_ERR(obj); | |
1634 | goto out; | |
1635 | } | |
1636 | ||
1637 | err = i915_gem_object_pin_pages(obj); | |
1638 | if (err) { | |
1639 | i915_gem_object_put(obj); | |
1640 | goto out; | |
1641 | } | |
1642 | ||
1643 | list_add(&obj->st_link, &objects); | |
1644 | ||
c95e7ce3 | 1645 | vma = i915_vma_instance(obj, &ggtt->vm, NULL); |
5f32616e CW |
1646 | if (IS_ERR(vma)) { |
1647 | err = PTR_ERR(vma); | |
1648 | goto out; | |
1649 | } | |
1650 | ||
c95e7ce3 | 1651 | err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, |
5f32616e | 1652 | obj->base.size, 0, obj->cache_level, |
c95e7ce3 | 1653 | 0, ggtt->vm.total, |
5f32616e CW |
1654 | 0); |
1655 | if (err) { | |
1656 | pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n", | |
c95e7ce3 | 1657 | total, ggtt->vm.total, err); |
5f32616e CW |
1658 | goto out; |
1659 | } | |
1660 | track_vma_bind(vma); | |
1661 | ||
1662 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | |
1663 | } | |
1664 | ||
1665 | out: | |
1666 | list_for_each_entry_safe(obj, on, &objects, st_link) { | |
1667 | i915_gem_object_unpin_pages(obj); | |
1668 | i915_gem_object_put(obj); | |
1669 | } | |
1670 | return err; | |
1671 | } | |
1672 | ||
e619cd0d CW |
1673 | int i915_gem_gtt_mock_selftests(void) |
1674 | { | |
1675 | static const struct i915_subtest tests[] = { | |
210e8ac4 CW |
1676 | SUBTEST(igt_mock_drunk), |
1677 | SUBTEST(igt_mock_walk), | |
7db4dcea | 1678 | SUBTEST(igt_mock_pot), |
210e8ac4 | 1679 | SUBTEST(igt_mock_fill), |
e619cd0d | 1680 | SUBTEST(igt_gtt_reserve), |
5f32616e | 1681 | SUBTEST(igt_gtt_insert), |
e619cd0d CW |
1682 | }; |
1683 | struct drm_i915_private *i915; | |
83e3a215 | 1684 | struct i915_ggtt *ggtt; |
e619cd0d CW |
1685 | int err; |
1686 | ||
1687 | i915 = mock_gem_device(); | |
1688 | if (!i915) | |
1689 | return -ENOMEM; | |
1690 | ||
83e3a215 CW |
1691 | ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL); |
1692 | if (!ggtt) { | |
1693 | err = -ENOMEM; | |
1694 | goto out_put; | |
1695 | } | |
1696 | mock_init_ggtt(i915, ggtt); | |
c95e7ce3 | 1697 | |
e619cd0d | 1698 | mutex_lock(&i915->drm.struct_mutex); |
83e3a215 | 1699 | err = i915_subtests(tests, ggtt); |
c95e7ce3 | 1700 | mock_device_flush(i915); |
e619cd0d CW |
1701 | mutex_unlock(&i915->drm.struct_mutex); |
1702 | ||
c95e7ce3 CW |
1703 | i915_gem_drain_freed_objects(i915); |
1704 | ||
83e3a215 CW |
1705 | mock_fini_ggtt(ggtt); |
1706 | kfree(ggtt); | |
1707 | out_put: | |
a24362ea | 1708 | drm_dev_put(&i915->drm); |
e619cd0d CW |
1709 | return err; |
1710 | } | |
1711 | ||
1c42819a CW |
1712 | int i915_gem_gtt_live_selftests(struct drm_i915_private *i915) |
1713 | { | |
1714 | static const struct i915_subtest tests[] = { | |
1715 | SUBTEST(igt_ppgtt_alloc), | |
4a6f13fc | 1716 | SUBTEST(igt_ppgtt_lowlevel), |
5c3bff48 | 1717 | SUBTEST(igt_ppgtt_drunk), |
6e32ab3d | 1718 | SUBTEST(igt_ppgtt_walk), |
7db4dcea | 1719 | SUBTEST(igt_ppgtt_pot), |
8d28ba45 | 1720 | SUBTEST(igt_ppgtt_fill), |
aae4a3d8 | 1721 | SUBTEST(igt_ppgtt_shrink), |
fe215c8b | 1722 | SUBTEST(igt_ppgtt_shrink_boom), |
4a6f13fc | 1723 | SUBTEST(igt_ggtt_lowlevel), |
5c3bff48 | 1724 | SUBTEST(igt_ggtt_drunk), |
6e32ab3d | 1725 | SUBTEST(igt_ggtt_walk), |
7db4dcea | 1726 | SUBTEST(igt_ggtt_pot), |
62c981cf | 1727 | SUBTEST(igt_ggtt_fill), |
af85f50d | 1728 | SUBTEST(igt_ggtt_page), |
1c42819a CW |
1729 | }; |
1730 | ||
82ad6443 | 1731 | GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total)); |
62c981cf | 1732 | |
1c42819a CW |
1733 | return i915_subtests(tests, i915); |
1734 | } |