Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_gem_evict.c
CommitLineData
b47eb4a2
CW
1/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
760285e7 29#include <drm/drmP.h>
760285e7 30#include <drm/i915_drm.h>
74e21ac2
CW
31
32#include "i915_drv.h"
33#include "intel_drv.h"
db53a302 34#include "i915_trace.h"
b47eb4a2 35
cd377ea9 36static bool
f6cd1f15 37mark_free(struct i915_vma *vma, struct list_head *unwind)
b47eb4a2 38{
3036537d 39 if (vma->pin_count)
1b50247a
CW
40 return false;
41
b93dab6e
DV
42 if (WARN_ON(!list_empty(&vma->exec_list)))
43 return false;
44
82a55ad1 45 list_add(&vma->exec_list, unwind);
2f633156 46 return drm_mm_scan_add_block(&vma->node);
b47eb4a2
CW
47}
48
c2c1d491
DV
49/**
50 * i915_gem_evict_something - Evict vmas to make room for binding a new one
51 * @dev: drm_device
52 * @vm: address space to evict from
7838a63a 53 * @min_size: size of the desired free space
c2c1d491
DV
54 * @alignment: alignment constraint of the desired free space
55 * @cache_level: cache_level for the desired space
7838a63a
DV
56 * @start: start (inclusive) of the range from which to evict objects
57 * @end: end (exclusive) of the range from which to evict objects
58 * @flags: additional flags to control the eviction algorithm
c2c1d491
DV
59 *
60 * This function will try to evict vmas until a free space satisfying the
61 * requirements is found. Callers must check first whether any such hole exists
62 * already before calling this function.
63 *
64 * This function is used by the object/vma binding code.
65 *
eb0b44ad
DV
66 * Since this function is only used to free up virtual address space it only
67 * ignores pinned vmas, and not object where the backing storage itself is
68 * pinned. Hence obj->pages_pin_count does not protect against eviction.
69 *
c2c1d491
DV
70 * To clarify: This is for freeing up virtual address space, not for freeing
71 * memory in e.g. the shrinker.
72 */
b47eb4a2 73int
f6cd1f15
BW
74i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
75 int min_size, unsigned alignment, unsigned cache_level,
d23db88c 76 unsigned long start, unsigned long end,
1ec9e26d 77 unsigned flags)
b47eb4a2 78{
cd377ea9 79 struct list_head eviction_list, unwind_list;
2f633156 80 struct i915_vma *vma;
cd377ea9 81 int ret = 0;
74e21ac2 82 int pass = 0;
b47eb4a2 83
1ec9e26d 84 trace_i915_gem_evict(dev, min_size, alignment, flags);
db53a302 85
cd377ea9
CW
86 /*
87 * The goal is to evict objects and amalgamate space in LRU order.
88 * The oldest idle objects reside on the inactive list, which is in
89 * retirement order. The next objects to retire are those on the (per
90 * ring) active list that do not have an outstanding flush. Once the
91 * hardware reports completion (the seqno is updated after the
92 * batchbuffer has been finished) the clean buffer objects would
93 * be retired to the inactive list. Any dirty objects would be added
94 * to the tail of the flushing list. So after processing the clean
95 * active objects we need to emit a MI_FLUSH to retire the flushing
96 * list, hence the retirement order of the flushing list is in
97 * advance of the dirty objects on the active lists.
98 *
99 * The retirement sequence is thus:
100 * 1. Inactive objects (already retired)
101 * 2. Clean active objects
102 * 3. Flushing list
103 * 4. Dirty active objects.
104 *
105 * On each list, the oldest objects lie at the HEAD with the freshest
106 * object on the TAIL.
107 */
108
109 INIT_LIST_HEAD(&unwind_list);
d23db88c 110 if (start != 0 || end != vm->total) {
5cef07e1 111 drm_mm_init_scan_with_range(&vm->mm, min_size,
d23db88c
CW
112 alignment, cache_level,
113 start, end);
f6cd1f15 114 } else
5cef07e1 115 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
cd377ea9 116
ad071acb 117search_again:
cd377ea9 118 /* First see if there is a large enough contiguous idle region... */
1c7f4bca 119 list_for_each_entry(vma, &vm->inactive_list, vm_link) {
f6cd1f15 120 if (mark_free(vma, &unwind_list))
cd377ea9
CW
121 goto found;
122 }
b47eb4a2 123
1ec9e26d 124 if (flags & PIN_NONBLOCK)
86a1ee26 125 goto none;
b47eb4a2 126
cd377ea9 127 /* Now merge in the soon-to-be-expired objects... */
1c7f4bca 128 list_for_each_entry(vma, &vm->active_list, vm_link) {
f6cd1f15 129 if (mark_free(vma, &unwind_list))
cd377ea9
CW
130 goto found;
131 }
132
86a1ee26 133none:
cd377ea9 134 /* Nothing found, clean up and bail out! */
092de6f2 135 while (!list_empty(&unwind_list)) {
82a55ad1
BW
136 vma = list_first_entry(&unwind_list,
137 struct i915_vma,
092de6f2 138 exec_list);
2f633156 139 ret = drm_mm_scan_remove_block(&vma->node);
cd377ea9 140 BUG_ON(ret);
092de6f2 141
82a55ad1 142 list_del_init(&vma->exec_list);
cd377ea9
CW
143 }
144
ad071acb
CW
145 /* Can we unpin some objects such as idle hw contents,
146 * or pending flips?
cd377ea9 147 */
1ec9e26d 148 if (flags & PIN_NONBLOCK)
74e21ac2 149 return -ENOSPC;
ad071acb
CW
150
151 /* Only idle the GPU and repeat the search once */
74e21ac2
CW
152 if (pass++ == 0) {
153 ret = i915_gpu_idle(dev);
154 if (ret)
155 return ret;
156
157 i915_gem_retire_requests(dev);
158 goto search_again;
159 }
160
161 /* If we still have pending pageflip completions, drop
162 * back to userspace to give our workqueues time to
163 * acquire our locks and unpin the old scanouts.
164 */
165 return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
cd377ea9
CW
166
167found:
e39a0150
CW
168 /* drm_mm doesn't allow any other other operations while
169 * scanning, therefore store to be evicted objects on a
170 * temporary list. */
cd377ea9 171 INIT_LIST_HEAD(&eviction_list);
e39a0150 172 while (!list_empty(&unwind_list)) {
82a55ad1
BW
173 vma = list_first_entry(&unwind_list,
174 struct i915_vma,
432e58ed 175 exec_list);
2f633156 176 if (drm_mm_scan_remove_block(&vma->node)) {
82a55ad1
BW
177 list_move(&vma->exec_list, &eviction_list);
178 drm_gem_object_reference(&vma->obj->base);
e39a0150
CW
179 continue;
180 }
82a55ad1 181 list_del_init(&vma->exec_list);
cd377ea9 182 }
b47eb4a2 183
cd377ea9 184 /* Unbinding will emit any required flushes */
e39a0150 185 while (!list_empty(&eviction_list)) {
8637b407 186 struct drm_gem_object *obj;
82a55ad1
BW
187 vma = list_first_entry(&eviction_list,
188 struct i915_vma,
432e58ed 189 exec_list);
8637b407
BW
190
191 obj = &vma->obj->base;
192 list_del_init(&vma->exec_list);
e39a0150 193 if (ret == 0)
82a55ad1 194 ret = i915_vma_unbind(vma);
092de6f2 195
8637b407 196 drm_gem_object_unreference(obj);
b47eb4a2 197 }
cd377ea9 198
e39a0150 199 return ret;
b47eb4a2
CW
200}
201
506a8e87
CW
202int
203i915_gem_evict_for_vma(struct i915_vma *target)
204{
205 struct drm_mm_node *node, *next;
206
207 list_for_each_entry_safe(node, next,
208 &target->vm->mm.head_node.node_list,
209 node_list) {
210 struct i915_vma *vma;
211 int ret;
212
213 if (node->start + node->size <= target->node.start)
214 continue;
215 if (node->start >= target->node.start + target->node.size)
216 break;
217
218 vma = container_of(node, typeof(*vma), node);
219
220 if (vma->pin_count) {
221 if (!vma->exec_entry || (vma->pin_count > 1))
222 /* Object is pinned for some other use */
223 return -EBUSY;
224
225 /* We need to evict a buffer in the same batch */
226 if (vma->exec_entry->flags & EXEC_OBJECT_PINNED)
227 /* Overlapping fixed objects in the same batch */
228 return -EINVAL;
229
230 return -ENOSPC;
231 }
232
233 ret = i915_vma_unbind(vma);
234 if (ret)
235 return ret;
236 }
237
238 return 0;
239}
240
68c8c17f 241/**
c2c1d491 242 * i915_gem_evict_vm - Evict all idle vmas from a vm
c2c1d491 243 * @vm: Address space to cleanse
68c8c17f
BW
244 * @do_idle: Boolean directing whether to idle first.
245 *
c2c1d491
DV
246 * This function evicts all idles vmas from a vm. If all unpinned vmas should be
247 * evicted the @do_idle needs to be set to true.
68c8c17f 248 *
c2c1d491
DV
249 * This is used by the execbuf code as a last-ditch effort to defragment the
250 * address space.
251 *
252 * To clarify: This is for freeing up virtual address space, not for freeing
253 * memory in e.g. the shrinker.
68c8c17f
BW
254 */
255int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
7b796122
BW
256{
257 struct i915_vma *vma, *next;
258 int ret;
259
b9b5dce5 260 WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex));
bcccff84
BW
261 trace_i915_gem_evict_vm(vm);
262
7b796122
BW
263 if (do_idle) {
264 ret = i915_gpu_idle(vm->dev);
265 if (ret)
266 return ret;
267
268 i915_gem_retire_requests(vm->dev);
b9b5dce5
BW
269
270 WARN_ON(!list_empty(&vm->active_list));
7b796122
BW
271 }
272
1c7f4bca 273 list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
d7f46fc4 274 if (vma->pin_count == 0)
7b796122
BW
275 WARN_ON(i915_vma_unbind(vma));
276
277 return 0;
278}