2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/oom.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/slab.h>
28 #include <linux/swap.h>
29 #include <linux/pci.h>
30 #include <linux/dma-buf.h>
31 #include <linux/vmalloc.h>
33 #include <drm/i915_drm.h>
36 #include "i915_trace.h"
38 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
40 if (!mutex_is_locked(mutex))
43 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
44 return mutex->owner == task;
46 /* Since UP may be pre-empted, we cannot assume that we own the lock */
51 static int num_vma_bound(struct drm_i915_gem_object *obj)
56 list_for_each_entry(vma, &obj->vma_list, obj_link) {
57 if (drm_mm_node_allocated(&vma->node))
66 static bool swap_available(void)
68 return get_nr_swap_pages() > 0;
71 static bool can_release_pages(struct drm_i915_gem_object *obj)
73 /* Only shmemfs objects are backed by swap */
77 /* Only report true if by unbinding the object and putting its pages
78 * we can actually make forward progress towards freeing physical
81 * If the pages are pinned for any other reason than being bound
82 * to the GPU, simply unbinding from the GPU is not going to succeed
83 * in releasing our pin count on the pages themselves.
85 if (obj->pages_pin_count != num_vma_bound(obj))
88 /* We can only return physical pages to the system if we can either
89 * discard the contents (because the user has marked them as being
90 * purgeable) or if we can move their contents out to swap.
92 return swap_available() || obj->madv == I915_MADV_DONTNEED;
96 * i915_gem_shrink - Shrink buffer object caches
97 * @dev_priv: i915 device
98 * @target: amount of memory to make available, in pages
99 * @flags: control flags for selecting cache types
101 * This function is the main interface to the shrinker. It will try to release
102 * up to @target pages of main memory backing storage from buffer objects.
103 * Selection of the specific caches can be done with @flags. This is e.g. useful
104 * when purgeable objects should be removed from caches preferentially.
106 * Note that it's not guaranteed that released amount is actually available as
107 * free system memory - the pages might still be in-used to due to other reasons
108 * (like cpu mmaps) or the mm core has reused them before we could grab them.
109 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
110 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
112 * Also note that any kind of pinning (both per-vma address space pins and
113 * backing storage pins at the buffer object level) result in the shrinker code
114 * having to skip the object.
117 * The number of pages of backing storage actually released.
120 i915_gem_shrink(struct drm_i915_private *dev_priv,
121 unsigned long target, unsigned flags)
124 struct list_head *list;
127 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
128 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
131 unsigned long count = 0;
133 trace_i915_gem_shrink(dev_priv, target, flags);
134 i915_gem_retire_requests(dev_priv->dev);
137 * As we may completely rewrite the (un)bound list whilst unbinding
138 * (due to retiring requests) we have to strictly process only
139 * one element of the list at the time, and recheck the list
140 * on every iteration.
142 * In particular, we must hold a reference whilst removing the
143 * object as we may end up waiting for and/or retiring the objects.
144 * This might release the final reference (held by the active list)
145 * and result in the object being freed from under us. This is
146 * similar to the precautions the eviction code must take whilst
149 * Also note that although these lists do not hold a reference to
150 * the object we can safely grab one here: The final object
151 * unreferencing and the bound_list are both protected by the
152 * dev->struct_mutex and so we won't ever be able to observe an
153 * object on the bound_list with a reference count equals 0.
155 for (phase = phases; phase->list; phase++) {
156 struct list_head still_in_list;
158 if ((flags & phase->bit) == 0)
161 INIT_LIST_HEAD(&still_in_list);
162 while (count < target && !list_empty(phase->list)) {
163 struct drm_i915_gem_object *obj;
164 struct i915_vma *vma, *v;
166 obj = list_first_entry(phase->list,
167 typeof(*obj), global_list);
168 list_move_tail(&obj->global_list, &still_in_list);
170 if (flags & I915_SHRINK_PURGEABLE &&
171 obj->madv != I915_MADV_DONTNEED)
174 if (flags & I915_SHRINK_VMAPS &&
175 !is_vmalloc_addr(obj->mapping))
178 if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
181 if (!can_release_pages(obj))
184 drm_gem_object_reference(&obj->base);
186 /* For the unbound phase, this should be a no-op! */
187 list_for_each_entry_safe(vma, v,
188 &obj->vma_list, obj_link)
189 if (i915_vma_unbind(vma))
192 if (i915_gem_object_put_pages(obj) == 0)
193 count += obj->base.size >> PAGE_SHIFT;
195 drm_gem_object_unreference(&obj->base);
197 list_splice(&still_in_list, phase->list);
200 i915_gem_retire_requests(dev_priv->dev);
206 * i915_gem_shrink_all - Shrink buffer object caches completely
207 * @dev_priv: i915 device
209 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
210 * caches completely. It also first waits for and retires all outstanding
211 * requests to also be able to release backing storage for active objects.
213 * This should only be used in code to intentionally quiescent the gpu or as a
214 * last-ditch effort when memory seems to have run out.
217 * The number of pages of backing storage actually released.
219 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
221 return i915_gem_shrink(dev_priv, -1UL,
223 I915_SHRINK_UNBOUND |
227 static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
229 if (!mutex_trylock(&dev->struct_mutex)) {
230 if (!mutex_is_locked_by(&dev->struct_mutex, current))
233 if (to_i915(dev)->mm.shrinker_no_lock_stealing)
244 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
246 struct drm_i915_private *dev_priv =
247 container_of(shrinker, struct drm_i915_private, mm.shrinker);
248 struct drm_device *dev = dev_priv->dev;
249 struct drm_i915_gem_object *obj;
253 if (!i915_gem_shrinker_lock(dev, &unlock))
257 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
258 if (can_release_pages(obj))
259 count += obj->base.size >> PAGE_SHIFT;
261 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
262 if (!obj->active && can_release_pages(obj))
263 count += obj->base.size >> PAGE_SHIFT;
267 mutex_unlock(&dev->struct_mutex);
273 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
275 struct drm_i915_private *dev_priv =
276 container_of(shrinker, struct drm_i915_private, mm.shrinker);
277 struct drm_device *dev = dev_priv->dev;
281 if (!i915_gem_shrinker_lock(dev, &unlock))
284 freed = i915_gem_shrink(dev_priv,
287 I915_SHRINK_UNBOUND |
288 I915_SHRINK_PURGEABLE);
289 if (freed < sc->nr_to_scan)
290 freed += i915_gem_shrink(dev_priv,
291 sc->nr_to_scan - freed,
293 I915_SHRINK_UNBOUND);
295 mutex_unlock(&dev->struct_mutex);
300 struct shrinker_lock_uninterruptible {
301 bool was_interruptible;
306 i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
307 struct shrinker_lock_uninterruptible *slu,
310 unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
312 while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) {
313 schedule_timeout_killable(1);
314 if (fatal_signal_pending(current))
316 if (--timeout == 0) {
317 pr_err("Unable to lock GPU to purge memory.\n");
322 slu->was_interruptible = dev_priv->mm.interruptible;
323 dev_priv->mm.interruptible = false;
328 i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
329 struct shrinker_lock_uninterruptible *slu)
331 dev_priv->mm.interruptible = slu->was_interruptible;
333 mutex_unlock(&dev_priv->dev->struct_mutex);
337 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
339 struct drm_i915_private *dev_priv =
340 container_of(nb, struct drm_i915_private, mm.oom_notifier);
341 struct shrinker_lock_uninterruptible slu;
342 struct drm_i915_gem_object *obj;
343 unsigned long unevictable, bound, unbound, freed_pages;
345 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
348 freed_pages = i915_gem_shrink_all(dev_priv);
350 /* Because we may be allocating inside our own driver, we cannot
351 * assert that there are no objects with pinned pages that are not
352 * being pointed to by hardware.
354 unbound = bound = unevictable = 0;
355 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
356 if (!can_release_pages(obj))
357 unevictable += obj->base.size >> PAGE_SHIFT;
359 unbound += obj->base.size >> PAGE_SHIFT;
361 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
362 if (!can_release_pages(obj))
363 unevictable += obj->base.size >> PAGE_SHIFT;
365 bound += obj->base.size >> PAGE_SHIFT;
368 i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
370 if (freed_pages || unbound || bound)
371 pr_info("Purging GPU memory, %lu pages freed, "
372 "%lu pages still pinned.\n",
373 freed_pages, unevictable);
374 if (unbound || bound)
375 pr_err("%lu and %lu pages still available in the "
376 "bound and unbound GPU page lists.\n",
379 *(unsigned long *)ptr += freed_pages;
384 i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
386 struct drm_i915_private *dev_priv =
387 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
388 struct shrinker_lock_uninterruptible slu;
389 unsigned long freed_pages;
391 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
394 freed_pages = i915_gem_shrink(dev_priv, -1UL,
396 I915_SHRINK_UNBOUND |
400 i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
402 *(unsigned long *)ptr += freed_pages;
407 * i915_gem_shrinker_init - Initialize i915 shrinker
408 * @dev_priv: i915 device
410 * This function registers and sets up the i915 shrinker and OOM handler.
412 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
414 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
415 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
416 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
417 WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
419 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
420 WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
422 dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
423 WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
427 * i915_gem_shrinker_cleanup - Clean up i915 shrinker
428 * @dev_priv: i915 device
430 * This function unregisters the i915 shrinker and OOM handler.
432 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
434 WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
435 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
436 unregister_shrinker(&dev_priv->mm.shrinker);