Commit | Line | Data |
---|---|---|
be6a0376 DV |
1 | /* |
2 | * Copyright © 2008-2015 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
25 | #include <linux/oom.h> | |
26 | #include <linux/shmem_fs.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/swap.h> | |
29 | #include <linux/pci.h> | |
30 | #include <linux/dma-buf.h> | |
e87666b5 | 31 | #include <linux/vmalloc.h> |
be6a0376 DV |
32 | #include <drm/drmP.h> |
33 | #include <drm/i915_drm.h> | |
34 | ||
35 | #include "i915_drv.h" | |
36 | #include "i915_trace.h" | |
37 | ||
e92075ff | 38 | static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock) |
be6a0376 | 39 | { |
e92075ff | 40 | switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) { |
9439b371 | 41 | case MUTEX_TRYLOCK_RECURSIVE: |
1233e2db | 42 | *unlock = false; |
9439b371 | 43 | return true; |
290271de CW |
44 | |
45 | case MUTEX_TRYLOCK_FAILED: | |
cd82f37a CW |
46 | *unlock = false; |
47 | preempt_disable(); | |
290271de CW |
48 | do { |
49 | cpu_relax(); | |
50 | if (mutex_trylock(&dev_priv->drm.struct_mutex)) { | |
290271de | 51 | *unlock = true; |
cd82f37a | 52 | break; |
290271de CW |
53 | } |
54 | } while (!need_resched()); | |
cd82f37a CW |
55 | preempt_enable(); |
56 | return *unlock; | |
290271de | 57 | |
cd82f37a CW |
58 | case MUTEX_TRYLOCK_SUCCESS: |
59 | *unlock = true; | |
60 | return true; | |
1233e2db CW |
61 | } |
62 | ||
9439b371 | 63 | BUG(); |
1233e2db CW |
64 | } |
65 | ||
e92075ff | 66 | static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock) |
8f612d05 JL |
67 | { |
68 | if (!unlock) | |
69 | return; | |
70 | ||
e92075ff | 71 | mutex_unlock(&dev_priv->drm.struct_mutex); |
8f612d05 JL |
72 | } |
73 | ||
c1a415e2 CW |
74 | static bool swap_available(void) |
75 | { | |
76 | return get_nr_swap_pages() > 0; | |
77 | } | |
78 | ||
79 | static bool can_release_pages(struct drm_i915_gem_object *obj) | |
80 | { | |
3599a91c TU |
81 | /* Consider only shrinkable ojects. */ |
82 | if (!i915_gem_object_is_shrinkable(obj)) | |
1bec9b0b CW |
83 | return false; |
84 | ||
c1a415e2 CW |
85 | /* Only report true if by unbinding the object and putting its pages |
86 | * we can actually make forward progress towards freeing physical | |
87 | * pages. | |
88 | * | |
89 | * If the pages are pinned for any other reason than being bound | |
90 | * to the GPU, simply unbinding from the GPU is not going to succeed | |
91 | * in releasing our pin count on the pages themselves. | |
92 | */ | |
1233e2db | 93 | if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count) |
15717de2 CW |
94 | return false; |
95 | ||
3d574a6b CW |
96 | /* If any vma are "permanently" pinned, it will prevent us from |
97 | * reclaiming the obj->mm.pages. We only allow scanout objects to claim | |
98 | * a permanent pin, along with a few others like the context objects. | |
99 | * To simplify the scan, and to avoid walking the list of vma under the | |
100 | * object, we just check the count of its permanently pinned. | |
101 | */ | |
f2123818 | 102 | if (READ_ONCE(obj->pin_global)) |
c1a415e2 CW |
103 | return false; |
104 | ||
105 | /* We can only return physical pages to the system if we can either | |
106 | * discard the contents (because the user has marked them as being | |
107 | * purgeable) or if we can move their contents out to swap. | |
108 | */ | |
a4f5ea64 | 109 | return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; |
c1a415e2 CW |
110 | } |
111 | ||
03ac84f1 CW |
112 | static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) |
113 | { | |
114 | if (i915_gem_object_unbind(obj) == 0) | |
548625ee | 115 | __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); |
f1fa4f44 | 116 | return !i915_gem_object_has_pages(obj); |
c1a415e2 CW |
117 | } |
118 | ||
eb0b44ad DV |
119 | /** |
120 | * i915_gem_shrink - Shrink buffer object caches | |
121 | * @dev_priv: i915 device | |
122 | * @target: amount of memory to make available, in pages | |
912d572d | 123 | * @nr_scanned: optional output for number of pages scanned (incremental) |
eb0b44ad DV |
124 | * @flags: control flags for selecting cache types |
125 | * | |
126 | * This function is the main interface to the shrinker. It will try to release | |
127 | * up to @target pages of main memory backing storage from buffer objects. | |
128 | * Selection of the specific caches can be done with @flags. This is e.g. useful | |
129 | * when purgeable objects should be removed from caches preferentially. | |
130 | * | |
131 | * Note that it's not guaranteed that released amount is actually available as | |
132 | * free system memory - the pages might still be in-used to due to other reasons | |
133 | * (like cpu mmaps) or the mm core has reused them before we could grab them. | |
134 | * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to | |
135 | * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all(). | |
136 | * | |
137 | * Also note that any kind of pinning (both per-vma address space pins and | |
138 | * backing storage pins at the buffer object level) result in the shrinker code | |
139 | * having to skip the object. | |
140 | * | |
141 | * Returns: | |
142 | * The number of pages of backing storage actually released. | |
143 | */ | |
be6a0376 DV |
144 | unsigned long |
145 | i915_gem_shrink(struct drm_i915_private *dev_priv, | |
912d572d CW |
146 | unsigned long target, |
147 | unsigned long *nr_scanned, | |
148 | unsigned flags) | |
be6a0376 DV |
149 | { |
150 | const struct { | |
151 | struct list_head *list; | |
152 | unsigned int bit; | |
153 | } phases[] = { | |
154 | { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND }, | |
155 | { &dev_priv->mm.bound_list, I915_SHRINK_BOUND }, | |
156 | { NULL, 0 }, | |
157 | }, *phase; | |
158 | unsigned long count = 0; | |
912d572d | 159 | unsigned long scanned = 0; |
1233e2db CW |
160 | bool unlock; |
161 | ||
e92075ff | 162 | if (!shrinker_lock(dev_priv, &unlock)) |
1233e2db | 163 | return 0; |
be6a0376 | 164 | |
2f6a3783 CW |
165 | /* |
166 | * When shrinking the active list, also consider active contexts. | |
167 | * Active contexts are pinned until they are retired, and so can | |
168 | * not be simply unbound to retire and unpin their pages. To shrink | |
169 | * the contexts, we must wait until the gpu is idle. | |
170 | * | |
171 | * We don't care about errors here; if we cannot wait upon the GPU, | |
172 | * we will free as much as we can and hope to get a second chance. | |
173 | */ | |
174 | if (flags & I915_SHRINK_ACTIVE) | |
175 | i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED); | |
176 | ||
3abafa53 | 177 | trace_i915_gem_shrink(dev_priv, target, flags); |
c033666a | 178 | i915_gem_retire_requests(dev_priv); |
3abafa53 | 179 | |
178a30c9 PP |
180 | /* |
181 | * Unbinding of objects will require HW access; Let us not wake the | |
182 | * device just to recover a little memory. If absolutely necessary, | |
183 | * we will force the wake during oom-notifier. | |
184 | */ | |
185 | if ((flags & I915_SHRINK_BOUND) && | |
186 | !intel_runtime_pm_get_if_in_use(dev_priv)) | |
187 | flags &= ~I915_SHRINK_BOUND; | |
188 | ||
be6a0376 DV |
189 | /* |
190 | * As we may completely rewrite the (un)bound list whilst unbinding | |
191 | * (due to retiring requests) we have to strictly process only | |
192 | * one element of the list at the time, and recheck the list | |
193 | * on every iteration. | |
194 | * | |
195 | * In particular, we must hold a reference whilst removing the | |
196 | * object as we may end up waiting for and/or retiring the objects. | |
197 | * This might release the final reference (held by the active list) | |
198 | * and result in the object being freed from under us. This is | |
199 | * similar to the precautions the eviction code must take whilst | |
200 | * removing objects. | |
201 | * | |
202 | * Also note that although these lists do not hold a reference to | |
203 | * the object we can safely grab one here: The final object | |
204 | * unreferencing and the bound_list are both protected by the | |
205 | * dev->struct_mutex and so we won't ever be able to observe an | |
206 | * object on the bound_list with a reference count equals 0. | |
207 | */ | |
208 | for (phase = phases; phase->list; phase++) { | |
209 | struct list_head still_in_list; | |
2a1d7752 | 210 | struct drm_i915_gem_object *obj; |
be6a0376 DV |
211 | |
212 | if ((flags & phase->bit) == 0) | |
213 | continue; | |
214 | ||
215 | INIT_LIST_HEAD(&still_in_list); | |
f2123818 CW |
216 | |
217 | /* | |
218 | * We serialize our access to unreferenced objects through | |
219 | * the use of the struct_mutex. While the objects are not | |
220 | * yet freed (due to RCU then a workqueue) we still want | |
221 | * to be able to shrink their pages, so they remain on | |
222 | * the unbound/bound list until actually freed. | |
223 | */ | |
224 | spin_lock(&dev_priv->mm.obj_lock); | |
2a1d7752 CW |
225 | while (count < target && |
226 | (obj = list_first_entry_or_null(phase->list, | |
227 | typeof(*obj), | |
f2123818 CW |
228 | mm.link))) { |
229 | list_move_tail(&obj->mm.link, &still_in_list); | |
be6a0376 DV |
230 | |
231 | if (flags & I915_SHRINK_PURGEABLE && | |
a4f5ea64 | 232 | obj->mm.madv != I915_MADV_DONTNEED) |
be6a0376 DV |
233 | continue; |
234 | ||
eae2c43b | 235 | if (flags & I915_SHRINK_VMAPS && |
a4f5ea64 | 236 | !is_vmalloc_addr(obj->mm.mapping)) |
eae2c43b CW |
237 | continue; |
238 | ||
45353ce5 CW |
239 | if (!(flags & I915_SHRINK_ACTIVE) && |
240 | (i915_gem_object_is_active(obj) || | |
dd689287 | 241 | i915_gem_object_is_framebuffer(obj))) |
5763ff04 CW |
242 | continue; |
243 | ||
c1a415e2 CW |
244 | if (!can_release_pages(obj)) |
245 | continue; | |
246 | ||
f2123818 CW |
247 | spin_unlock(&dev_priv->mm.obj_lock); |
248 | ||
1233e2db | 249 | if (unsafe_drop_pages(obj)) { |
7b7a119e CW |
250 | /* May arrive from get_pages on another bo */ |
251 | mutex_lock_nested(&obj->mm.lock, | |
548625ee | 252 | I915_MM_SHRINKER); |
f1fa4f44 | 253 | if (!i915_gem_object_has_pages(obj)) { |
1233e2db CW |
254 | __i915_gem_object_invalidate(obj); |
255 | count += obj->base.size >> PAGE_SHIFT; | |
256 | } | |
257 | mutex_unlock(&obj->mm.lock); | |
258 | } | |
4e773c3a | 259 | scanned += obj->base.size >> PAGE_SHIFT; |
f2123818 CW |
260 | |
261 | spin_lock(&dev_priv->mm.obj_lock); | |
be6a0376 | 262 | } |
53597277 | 263 | list_splice_tail(&still_in_list, phase->list); |
f2123818 | 264 | spin_unlock(&dev_priv->mm.obj_lock); |
be6a0376 DV |
265 | } |
266 | ||
178a30c9 PP |
267 | if (flags & I915_SHRINK_BOUND) |
268 | intel_runtime_pm_put(dev_priv); | |
269 | ||
c033666a | 270 | i915_gem_retire_requests(dev_priv); |
1233e2db | 271 | |
e92075ff | 272 | shrinker_unlock(dev_priv, unlock); |
c9c0f5ea | 273 | |
912d572d CW |
274 | if (nr_scanned) |
275 | *nr_scanned += scanned; | |
be6a0376 DV |
276 | return count; |
277 | } | |
278 | ||
eb0b44ad | 279 | /** |
1f2449cd | 280 | * i915_gem_shrink_all - Shrink buffer object caches completely |
eb0b44ad DV |
281 | * @dev_priv: i915 device |
282 | * | |
283 | * This is a simple wraper around i915_gem_shrink() to aggressively shrink all | |
284 | * caches completely. It also first waits for and retires all outstanding | |
285 | * requests to also be able to release backing storage for active objects. | |
286 | * | |
287 | * This should only be used in code to intentionally quiescent the gpu or as a | |
288 | * last-ditch effort when memory seems to have run out. | |
289 | * | |
290 | * Returns: | |
291 | * The number of pages of backing storage actually released. | |
292 | */ | |
be6a0376 DV |
293 | unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) |
294 | { | |
0eafec6d CW |
295 | unsigned long freed; |
296 | ||
519d5249 | 297 | intel_runtime_pm_get(dev_priv); |
912d572d | 298 | freed = i915_gem_shrink(dev_priv, -1UL, NULL, |
0eafec6d CW |
299 | I915_SHRINK_BOUND | |
300 | I915_SHRINK_UNBOUND | | |
301 | I915_SHRINK_ACTIVE); | |
519d5249 CW |
302 | intel_runtime_pm_put(dev_priv); |
303 | ||
0eafec6d | 304 | return freed; |
be6a0376 DV |
305 | } |
306 | ||
be6a0376 DV |
307 | static unsigned long |
308 | i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) | |
309 | { | |
c5418a8b | 310 | struct drm_i915_private *i915 = |
be6a0376 | 311 | container_of(shrinker, struct drm_i915_private, mm.shrinker); |
be6a0376 | 312 | struct drm_i915_gem_object *obj; |
c5418a8b | 313 | unsigned long num_objects = 0; |
f2123818 | 314 | unsigned long count = 0; |
bed50aea | 315 | |
c5418a8b CW |
316 | spin_lock(&i915->mm.obj_lock); |
317 | list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) | |
318 | if (can_release_pages(obj)) { | |
be6a0376 | 319 | count += obj->base.size >> PAGE_SHIFT; |
c5418a8b CW |
320 | num_objects++; |
321 | } | |
be6a0376 | 322 | |
c5418a8b CW |
323 | list_for_each_entry(obj, &i915->mm.bound_list, mm.link) |
324 | if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) { | |
be6a0376 | 325 | count += obj->base.size >> PAGE_SHIFT; |
c5418a8b CW |
326 | num_objects++; |
327 | } | |
328 | spin_unlock(&i915->mm.obj_lock); | |
329 | ||
330 | /* Update our preferred vmscan batch size for the next pass. | |
331 | * Our rough guess for an effective batch size is roughly 2 | |
332 | * available GEM objects worth of pages. That is we don't want | |
333 | * the shrinker to fire, until it is worth the cost of freeing an | |
334 | * entire GEM object. | |
335 | */ | |
336 | if (num_objects) { | |
337 | unsigned long avg = 2 * count / num_objects; | |
338 | ||
339 | i915->mm.shrinker.batch = | |
340 | max((i915->mm.shrinker.batch + avg) >> 1, | |
341 | 128ul /* default SHRINK_BATCH */); | |
342 | } | |
be6a0376 DV |
343 | |
344 | return count; | |
345 | } | |
346 | ||
347 | static unsigned long | |
348 | i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) | |
349 | { | |
350 | struct drm_i915_private *dev_priv = | |
351 | container_of(shrinker, struct drm_i915_private, mm.shrinker); | |
be6a0376 DV |
352 | unsigned long freed; |
353 | bool unlock; | |
354 | ||
912d572d CW |
355 | sc->nr_scanned = 0; |
356 | ||
e92075ff | 357 | if (!shrinker_lock(dev_priv, &unlock)) |
be6a0376 DV |
358 | return SHRINK_STOP; |
359 | ||
360 | freed = i915_gem_shrink(dev_priv, | |
361 | sc->nr_to_scan, | |
912d572d | 362 | &sc->nr_scanned, |
be6a0376 DV |
363 | I915_SHRINK_BOUND | |
364 | I915_SHRINK_UNBOUND | | |
365 | I915_SHRINK_PURGEABLE); | |
366 | if (freed < sc->nr_to_scan) | |
367 | freed += i915_gem_shrink(dev_priv, | |
912d572d CW |
368 | sc->nr_to_scan - sc->nr_scanned, |
369 | &sc->nr_scanned, | |
be6a0376 DV |
370 | I915_SHRINK_BOUND | |
371 | I915_SHRINK_UNBOUND); | |
1d24ad45 CW |
372 | if (freed < sc->nr_to_scan && current_is_kswapd()) { |
373 | intel_runtime_pm_get(dev_priv); | |
374 | freed += i915_gem_shrink(dev_priv, | |
912d572d CW |
375 | sc->nr_to_scan - sc->nr_scanned, |
376 | &sc->nr_scanned, | |
1d24ad45 CW |
377 | I915_SHRINK_ACTIVE | |
378 | I915_SHRINK_BOUND | | |
379 | I915_SHRINK_UNBOUND); | |
380 | intel_runtime_pm_put(dev_priv); | |
381 | } | |
8f612d05 | 382 | |
e92075ff | 383 | shrinker_unlock(dev_priv, unlock); |
be6a0376 | 384 | |
912d572d | 385 | return sc->nr_scanned ? freed : SHRINK_STOP; |
be6a0376 DV |
386 | } |
387 | ||
168cf367 | 388 | static bool |
e92075ff JL |
389 | shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock, |
390 | int timeout_ms) | |
168cf367 | 391 | { |
5cba5be6 CW |
392 | unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms); |
393 | ||
394 | do { | |
ea746f36 | 395 | if (i915_gem_wait_for_idle(dev_priv, 0) == 0 && |
e92075ff | 396 | shrinker_lock(dev_priv, unlock)) |
5cba5be6 | 397 | break; |
168cf367 | 398 | |
168cf367 CW |
399 | schedule_timeout_killable(1); |
400 | if (fatal_signal_pending(current)) | |
401 | return false; | |
5cba5be6 CW |
402 | |
403 | if (time_after(jiffies, timeout)) { | |
168cf367 CW |
404 | pr_err("Unable to lock GPU to purge memory.\n"); |
405 | return false; | |
406 | } | |
5cba5be6 | 407 | } while (1); |
168cf367 | 408 | |
168cf367 CW |
409 | return true; |
410 | } | |
411 | ||
be6a0376 DV |
412 | static int |
413 | i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) | |
414 | { | |
415 | struct drm_i915_private *dev_priv = | |
416 | container_of(nb, struct drm_i915_private, mm.oom_notifier); | |
be6a0376 | 417 | struct drm_i915_gem_object *obj; |
1768d455 | 418 | unsigned long unevictable, bound, unbound, freed_pages; |
be6a0376 DV |
419 | |
420 | freed_pages = i915_gem_shrink_all(dev_priv); | |
421 | ||
be6a0376 DV |
422 | /* Because we may be allocating inside our own driver, we cannot |
423 | * assert that there are no objects with pinned pages that are not | |
424 | * being pointed to by hardware. | |
425 | */ | |
1768d455 | 426 | unbound = bound = unevictable = 0; |
f2123818 CW |
427 | spin_lock(&dev_priv->mm.obj_lock); |
428 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) { | |
1768d455 CW |
429 | if (!can_release_pages(obj)) |
430 | unevictable += obj->base.size >> PAGE_SHIFT; | |
be6a0376 | 431 | else |
1768d455 | 432 | unbound += obj->base.size >> PAGE_SHIFT; |
be6a0376 | 433 | } |
f2123818 | 434 | list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) { |
1768d455 CW |
435 | if (!can_release_pages(obj)) |
436 | unevictable += obj->base.size >> PAGE_SHIFT; | |
be6a0376 | 437 | else |
1768d455 | 438 | bound += obj->base.size >> PAGE_SHIFT; |
be6a0376 | 439 | } |
f2123818 | 440 | spin_unlock(&dev_priv->mm.obj_lock); |
be6a0376 DV |
441 | |
442 | if (freed_pages || unbound || bound) | |
1768d455 CW |
443 | pr_info("Purging GPU memory, %lu pages freed, " |
444 | "%lu pages still pinned.\n", | |
445 | freed_pages, unevictable); | |
be6a0376 | 446 | if (unbound || bound) |
1768d455 | 447 | pr_err("%lu and %lu pages still available in the " |
be6a0376 DV |
448 | "bound and unbound GPU page lists.\n", |
449 | bound, unbound); | |
450 | ||
451 | *(unsigned long *)ptr += freed_pages; | |
452 | return NOTIFY_DONE; | |
453 | } | |
454 | ||
e87666b5 CW |
455 | static int |
456 | i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) | |
457 | { | |
458 | struct drm_i915_private *dev_priv = | |
459 | container_of(nb, struct drm_i915_private, mm.vmap_notifier); | |
8ef8561f CW |
460 | struct i915_vma *vma, *next; |
461 | unsigned long freed_pages = 0; | |
e92075ff | 462 | bool unlock; |
8ef8561f | 463 | int ret; |
e87666b5 | 464 | |
e92075ff | 465 | if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000)) |
e87666b5 | 466 | return NOTIFY_DONE; |
e87666b5 | 467 | |
8ef8561f | 468 | /* Force everything onto the inactive lists */ |
22dd3bb9 | 469 | ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED); |
8ef8561f CW |
470 | if (ret) |
471 | goto out; | |
472 | ||
ea9d9768 | 473 | intel_runtime_pm_get(dev_priv); |
912d572d | 474 | freed_pages += i915_gem_shrink(dev_priv, -1UL, NULL, |
8ef8561f CW |
475 | I915_SHRINK_BOUND | |
476 | I915_SHRINK_UNBOUND | | |
477 | I915_SHRINK_ACTIVE | | |
478 | I915_SHRINK_VMAPS); | |
ea9d9768 | 479 | intel_runtime_pm_put(dev_priv); |
8ef8561f CW |
480 | |
481 | /* We also want to clear any cached iomaps as they wrap vmap */ | |
482 | list_for_each_entry_safe(vma, next, | |
483 | &dev_priv->ggtt.base.inactive_list, vm_link) { | |
484 | unsigned long count = vma->node.size >> PAGE_SHIFT; | |
485 | if (vma->iomap && i915_vma_unbind(vma) == 0) | |
486 | freed_pages += count; | |
487 | } | |
e87666b5 | 488 | |
8ef8561f | 489 | out: |
e92075ff | 490 | shrinker_unlock(dev_priv, unlock); |
e87666b5 CW |
491 | |
492 | *(unsigned long *)ptr += freed_pages; | |
493 | return NOTIFY_DONE; | |
494 | } | |
495 | ||
eb0b44ad DV |
496 | /** |
497 | * i915_gem_shrinker_init - Initialize i915 shrinker | |
498 | * @dev_priv: i915 device | |
499 | * | |
500 | * This function registers and sets up the i915 shrinker and OOM handler. | |
501 | */ | |
be6a0376 DV |
502 | void i915_gem_shrinker_init(struct drm_i915_private *dev_priv) |
503 | { | |
504 | dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; | |
505 | dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count; | |
506 | dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS; | |
c5418a8b | 507 | dev_priv->mm.shrinker.batch = 4096; |
a8a40589 | 508 | WARN_ON(register_shrinker(&dev_priv->mm.shrinker)); |
be6a0376 DV |
509 | |
510 | dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; | |
a8a40589 | 511 | WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier)); |
e87666b5 CW |
512 | |
513 | dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap; | |
514 | WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier)); | |
a8a40589 ID |
515 | } |
516 | ||
517 | /** | |
518 | * i915_gem_shrinker_cleanup - Clean up i915 shrinker | |
519 | * @dev_priv: i915 device | |
520 | * | |
521 | * This function unregisters the i915 shrinker and OOM handler. | |
522 | */ | |
523 | void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv) | |
524 | { | |
e87666b5 | 525 | WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier)); |
a8a40589 ID |
526 | WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); |
527 | unregister_shrinker(&dev_priv->mm.shrinker); | |
be6a0376 | 528 | } |