drm/i915: return the correct usable aperture size under gvt environment
[linux-block.git] / drivers / gpu / drm / i915 / i915_gem_shrinker.c
CommitLineData
be6a0376
DV
1/*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <linux/oom.h>
26#include <linux/shmem_fs.h>
27#include <linux/slab.h>
28#include <linux/swap.h>
29#include <linux/pci.h>
30#include <linux/dma-buf.h>
e87666b5 31#include <linux/vmalloc.h>
be6a0376
DV
32#include <drm/drmP.h>
33#include <drm/i915_drm.h>
34
35#include "i915_drv.h"
36#include "i915_trace.h"
37
e92075ff 38static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
be6a0376 39{
e92075ff 40 switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
9439b371 41 case MUTEX_TRYLOCK_FAILED:
be6a0376
DV
42 return false;
43
9439b371
LT
44 case MUTEX_TRYLOCK_SUCCESS:
45 *unlock = true;
46 return true;
1233e2db 47
9439b371 48 case MUTEX_TRYLOCK_RECURSIVE:
1233e2db 49 *unlock = false;
9439b371 50 return true;
1233e2db
CW
51 }
52
9439b371 53 BUG();
1233e2db
CW
54}
55
e92075ff 56static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
8f612d05
JL
57{
58 if (!unlock)
59 return;
60
e92075ff 61 mutex_unlock(&dev_priv->drm.struct_mutex);
8f612d05
JL
62}
63
15717de2 64static bool any_vma_pinned(struct drm_i915_gem_object *obj)
c1a415e2
CW
65{
66 struct i915_vma *vma;
c1a415e2 67
b2241f18
CW
68 list_for_each_entry(vma, &obj->vma_list, obj_link) {
69 /* Only GGTT vma may be permanently pinned, and are always
70 * at the start of the list. We can stop hunting as soon
71 * as we see a ppGTT vma.
72 */
73 if (!i915_vma_is_ggtt(vma))
74 break;
75
3272db53 76 if (i915_vma_is_pinned(vma))
15717de2 77 return true;
b2241f18 78 }
c1a415e2 79
15717de2 80 return false;
c1a415e2
CW
81}
82
83static bool swap_available(void)
84{
85 return get_nr_swap_pages() > 0;
86}
87
88static bool can_release_pages(struct drm_i915_gem_object *obj)
89{
1233e2db
CW
90 if (!obj->mm.pages)
91 return false;
92
3599a91c
TU
93 /* Consider only shrinkable ojects. */
94 if (!i915_gem_object_is_shrinkable(obj))
1bec9b0b
CW
95 return false;
96
c1a415e2
CW
97 /* Only report true if by unbinding the object and putting its pages
98 * we can actually make forward progress towards freeing physical
99 * pages.
100 *
101 * If the pages are pinned for any other reason than being bound
102 * to the GPU, simply unbinding from the GPU is not going to succeed
103 * in releasing our pin count on the pages themselves.
104 */
1233e2db 105 if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
15717de2
CW
106 return false;
107
108 if (any_vma_pinned(obj))
c1a415e2
CW
109 return false;
110
111 /* We can only return physical pages to the system if we can either
112 * discard the contents (because the user has marked them as being
113 * purgeable) or if we can move their contents out to swap.
114 */
a4f5ea64 115 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
c1a415e2
CW
116}
117
03ac84f1
CW
118static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
119{
120 if (i915_gem_object_unbind(obj) == 0)
548625ee 121 __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
1233e2db 122 return !READ_ONCE(obj->mm.pages);
c1a415e2
CW
123}
124
eb0b44ad
DV
125/**
126 * i915_gem_shrink - Shrink buffer object caches
127 * @dev_priv: i915 device
128 * @target: amount of memory to make available, in pages
129 * @flags: control flags for selecting cache types
130 *
131 * This function is the main interface to the shrinker. It will try to release
132 * up to @target pages of main memory backing storage from buffer objects.
133 * Selection of the specific caches can be done with @flags. This is e.g. useful
134 * when purgeable objects should be removed from caches preferentially.
135 *
136 * Note that it's not guaranteed that released amount is actually available as
137 * free system memory - the pages might still be in-used to due to other reasons
138 * (like cpu mmaps) or the mm core has reused them before we could grab them.
139 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
140 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
141 *
142 * Also note that any kind of pinning (both per-vma address space pins and
143 * backing storage pins at the buffer object level) result in the shrinker code
144 * having to skip the object.
145 *
146 * Returns:
147 * The number of pages of backing storage actually released.
148 */
be6a0376
DV
149unsigned long
150i915_gem_shrink(struct drm_i915_private *dev_priv,
14387540 151 unsigned long target, unsigned flags)
be6a0376
DV
152{
153 const struct {
154 struct list_head *list;
155 unsigned int bit;
156 } phases[] = {
157 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
158 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
159 { NULL, 0 },
160 }, *phase;
161 unsigned long count = 0;
1233e2db
CW
162 bool unlock;
163
e92075ff 164 if (!shrinker_lock(dev_priv, &unlock))
1233e2db 165 return 0;
be6a0376 166
3abafa53 167 trace_i915_gem_shrink(dev_priv, target, flags);
c033666a 168 i915_gem_retire_requests(dev_priv);
3abafa53 169
178a30c9
PP
170 /*
171 * Unbinding of objects will require HW access; Let us not wake the
172 * device just to recover a little memory. If absolutely necessary,
173 * we will force the wake during oom-notifier.
174 */
175 if ((flags & I915_SHRINK_BOUND) &&
176 !intel_runtime_pm_get_if_in_use(dev_priv))
177 flags &= ~I915_SHRINK_BOUND;
178
be6a0376
DV
179 /*
180 * As we may completely rewrite the (un)bound list whilst unbinding
181 * (due to retiring requests) we have to strictly process only
182 * one element of the list at the time, and recheck the list
183 * on every iteration.
184 *
185 * In particular, we must hold a reference whilst removing the
186 * object as we may end up waiting for and/or retiring the objects.
187 * This might release the final reference (held by the active list)
188 * and result in the object being freed from under us. This is
189 * similar to the precautions the eviction code must take whilst
190 * removing objects.
191 *
192 * Also note that although these lists do not hold a reference to
193 * the object we can safely grab one here: The final object
194 * unreferencing and the bound_list are both protected by the
195 * dev->struct_mutex and so we won't ever be able to observe an
196 * object on the bound_list with a reference count equals 0.
197 */
198 for (phase = phases; phase->list; phase++) {
199 struct list_head still_in_list;
2a1d7752 200 struct drm_i915_gem_object *obj;
be6a0376
DV
201
202 if ((flags & phase->bit) == 0)
203 continue;
204
205 INIT_LIST_HEAD(&still_in_list);
2a1d7752
CW
206 while (count < target &&
207 (obj = list_first_entry_or_null(phase->list,
208 typeof(*obj),
56cea323
JL
209 global_link))) {
210 list_move_tail(&obj->global_link, &still_in_list);
fbbd37b3 211 if (!obj->mm.pages) {
56cea323 212 list_del_init(&obj->global_link);
fbbd37b3
CW
213 continue;
214 }
be6a0376
DV
215
216 if (flags & I915_SHRINK_PURGEABLE &&
a4f5ea64 217 obj->mm.madv != I915_MADV_DONTNEED)
be6a0376
DV
218 continue;
219
eae2c43b 220 if (flags & I915_SHRINK_VMAPS &&
a4f5ea64 221 !is_vmalloc_addr(obj->mm.mapping))
eae2c43b
CW
222 continue;
223
45353ce5
CW
224 if (!(flags & I915_SHRINK_ACTIVE) &&
225 (i915_gem_object_is_active(obj) ||
dd689287 226 i915_gem_object_is_framebuffer(obj)))
5763ff04
CW
227 continue;
228
c1a415e2
CW
229 if (!can_release_pages(obj))
230 continue;
231
1233e2db 232 if (unsafe_drop_pages(obj)) {
7b7a119e
CW
233 /* May arrive from get_pages on another bo */
234 mutex_lock_nested(&obj->mm.lock,
548625ee 235 I915_MM_SHRINKER);
1233e2db
CW
236 if (!obj->mm.pages) {
237 __i915_gem_object_invalidate(obj);
56cea323 238 list_del_init(&obj->global_link);
1233e2db
CW
239 count += obj->base.size >> PAGE_SHIFT;
240 }
241 mutex_unlock(&obj->mm.lock);
242 }
be6a0376 243 }
53597277 244 list_splice_tail(&still_in_list, phase->list);
be6a0376
DV
245 }
246
178a30c9
PP
247 if (flags & I915_SHRINK_BOUND)
248 intel_runtime_pm_put(dev_priv);
249
c033666a 250 i915_gem_retire_requests(dev_priv);
1233e2db 251
e92075ff 252 shrinker_unlock(dev_priv, unlock);
c9c0f5ea 253
be6a0376
DV
254 return count;
255}
256
eb0b44ad 257/**
1f2449cd 258 * i915_gem_shrink_all - Shrink buffer object caches completely
eb0b44ad
DV
259 * @dev_priv: i915 device
260 *
261 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
262 * caches completely. It also first waits for and retires all outstanding
263 * requests to also be able to release backing storage for active objects.
264 *
265 * This should only be used in code to intentionally quiescent the gpu or as a
266 * last-ditch effort when memory seems to have run out.
267 *
268 * Returns:
269 * The number of pages of backing storage actually released.
270 */
be6a0376
DV
271unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
272{
0eafec6d
CW
273 unsigned long freed;
274
519d5249 275 intel_runtime_pm_get(dev_priv);
0eafec6d
CW
276 freed = i915_gem_shrink(dev_priv, -1UL,
277 I915_SHRINK_BOUND |
278 I915_SHRINK_UNBOUND |
279 I915_SHRINK_ACTIVE);
519d5249
CW
280 intel_runtime_pm_put(dev_priv);
281
0eafec6d 282 return freed;
be6a0376
DV
283}
284
be6a0376
DV
285static unsigned long
286i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
287{
288 struct drm_i915_private *dev_priv =
289 container_of(shrinker, struct drm_i915_private, mm.shrinker);
be6a0376
DV
290 struct drm_i915_gem_object *obj;
291 unsigned long count;
292 bool unlock;
293
e92075ff 294 if (!shrinker_lock(dev_priv, &unlock))
be6a0376
DV
295 return 0;
296
bed50aea
CW
297 i915_gem_retire_requests(dev_priv);
298
be6a0376 299 count = 0;
56cea323 300 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
6f0ac204 301 if (can_release_pages(obj))
be6a0376
DV
302 count += obj->base.size >> PAGE_SHIFT;
303
56cea323 304 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
573adb39 305 if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
be6a0376
DV
306 count += obj->base.size >> PAGE_SHIFT;
307 }
308
e92075ff 309 shrinker_unlock(dev_priv, unlock);
be6a0376
DV
310
311 return count;
312}
313
314static unsigned long
315i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
316{
317 struct drm_i915_private *dev_priv =
318 container_of(shrinker, struct drm_i915_private, mm.shrinker);
be6a0376
DV
319 unsigned long freed;
320 bool unlock;
321
e92075ff 322 if (!shrinker_lock(dev_priv, &unlock))
be6a0376
DV
323 return SHRINK_STOP;
324
325 freed = i915_gem_shrink(dev_priv,
326 sc->nr_to_scan,
327 I915_SHRINK_BOUND |
328 I915_SHRINK_UNBOUND |
329 I915_SHRINK_PURGEABLE);
330 if (freed < sc->nr_to_scan)
331 freed += i915_gem_shrink(dev_priv,
332 sc->nr_to_scan - freed,
333 I915_SHRINK_BOUND |
334 I915_SHRINK_UNBOUND);
8f612d05 335
e92075ff 336 shrinker_unlock(dev_priv, unlock);
be6a0376
DV
337
338 return freed;
339}
340
168cf367 341static bool
e92075ff
JL
342shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock,
343 int timeout_ms)
168cf367 344{
5cba5be6
CW
345 unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
346
347 do {
ea746f36 348 if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
e92075ff 349 shrinker_lock(dev_priv, unlock))
5cba5be6 350 break;
168cf367 351
168cf367
CW
352 schedule_timeout_killable(1);
353 if (fatal_signal_pending(current))
354 return false;
5cba5be6
CW
355
356 if (time_after(jiffies, timeout)) {
168cf367
CW
357 pr_err("Unable to lock GPU to purge memory.\n");
358 return false;
359 }
5cba5be6 360 } while (1);
168cf367 361
168cf367
CW
362 return true;
363}
364
be6a0376
DV
365static int
366i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
367{
368 struct drm_i915_private *dev_priv =
369 container_of(nb, struct drm_i915_private, mm.oom_notifier);
be6a0376 370 struct drm_i915_gem_object *obj;
1768d455 371 unsigned long unevictable, bound, unbound, freed_pages;
e92075ff 372 bool unlock;
be6a0376 373
e92075ff 374 if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
be6a0376 375 return NOTIFY_DONE;
be6a0376
DV
376
377 freed_pages = i915_gem_shrink_all(dev_priv);
378
be6a0376
DV
379 /* Because we may be allocating inside our own driver, we cannot
380 * assert that there are no objects with pinned pages that are not
381 * being pointed to by hardware.
382 */
1768d455 383 unbound = bound = unevictable = 0;
56cea323 384 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
fbbd37b3
CW
385 if (!obj->mm.pages)
386 continue;
387
1768d455
CW
388 if (!can_release_pages(obj))
389 unevictable += obj->base.size >> PAGE_SHIFT;
be6a0376 390 else
1768d455 391 unbound += obj->base.size >> PAGE_SHIFT;
be6a0376 392 }
56cea323 393 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
fbbd37b3
CW
394 if (!obj->mm.pages)
395 continue;
396
1768d455
CW
397 if (!can_release_pages(obj))
398 unevictable += obj->base.size >> PAGE_SHIFT;
be6a0376 399 else
1768d455 400 bound += obj->base.size >> PAGE_SHIFT;
be6a0376
DV
401 }
402
e92075ff 403 shrinker_unlock(dev_priv, unlock);
be6a0376
DV
404
405 if (freed_pages || unbound || bound)
1768d455
CW
406 pr_info("Purging GPU memory, %lu pages freed, "
407 "%lu pages still pinned.\n",
408 freed_pages, unevictable);
be6a0376 409 if (unbound || bound)
1768d455 410 pr_err("%lu and %lu pages still available in the "
be6a0376
DV
411 "bound and unbound GPU page lists.\n",
412 bound, unbound);
413
414 *(unsigned long *)ptr += freed_pages;
415 return NOTIFY_DONE;
416}
417
e87666b5
CW
418static int
419i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
420{
421 struct drm_i915_private *dev_priv =
422 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
8ef8561f
CW
423 struct i915_vma *vma, *next;
424 unsigned long freed_pages = 0;
e92075ff 425 bool unlock;
8ef8561f 426 int ret;
e87666b5 427
e92075ff 428 if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
e87666b5 429 return NOTIFY_DONE;
e87666b5 430
8ef8561f 431 /* Force everything onto the inactive lists */
22dd3bb9 432 ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
8ef8561f
CW
433 if (ret)
434 goto out;
435
ea9d9768 436 intel_runtime_pm_get(dev_priv);
8ef8561f
CW
437 freed_pages += i915_gem_shrink(dev_priv, -1UL,
438 I915_SHRINK_BOUND |
439 I915_SHRINK_UNBOUND |
440 I915_SHRINK_ACTIVE |
441 I915_SHRINK_VMAPS);
ea9d9768 442 intel_runtime_pm_put(dev_priv);
8ef8561f
CW
443
444 /* We also want to clear any cached iomaps as they wrap vmap */
445 list_for_each_entry_safe(vma, next,
446 &dev_priv->ggtt.base.inactive_list, vm_link) {
447 unsigned long count = vma->node.size >> PAGE_SHIFT;
448 if (vma->iomap && i915_vma_unbind(vma) == 0)
449 freed_pages += count;
450 }
e87666b5 451
8ef8561f 452out:
e92075ff 453 shrinker_unlock(dev_priv, unlock);
e87666b5
CW
454
455 *(unsigned long *)ptr += freed_pages;
456 return NOTIFY_DONE;
457}
458
eb0b44ad
DV
459/**
460 * i915_gem_shrinker_init - Initialize i915 shrinker
461 * @dev_priv: i915 device
462 *
463 * This function registers and sets up the i915 shrinker and OOM handler.
464 */
be6a0376
DV
465void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
466{
467 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
468 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
469 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
a8a40589 470 WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
be6a0376
DV
471
472 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
a8a40589 473 WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
e87666b5
CW
474
475 dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
476 WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
a8a40589
ID
477}
478
479/**
480 * i915_gem_shrinker_cleanup - Clean up i915 shrinker
481 * @dev_priv: i915 device
482 *
483 * This function unregisters the i915 shrinker and OOM handler.
484 */
485void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
486{
e87666b5 487 WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
a8a40589
ID
488 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
489 unregister_shrinker(&dev_priv->mm.shrinker);
be6a0376 490}