Commit | Line | Data |
---|---|---|
673a394b | 1 | /* |
be6a0376 | 2 | * Copyright © 2008-2015 Intel Corporation |
673a394b EA |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <eric@anholt.net> | |
25 | * | |
26 | */ | |
27 | ||
0de23977 | 28 | #include <drm/drm_vma_manager.h> |
fcd70cd3 | 29 | #include <drm/drm_pci.h> |
760285e7 | 30 | #include <drm/i915_drm.h> |
6b5e90f5 | 31 | #include <linux/dma-fence-array.h> |
fe3288b5 | 32 | #include <linux/kthread.h> |
c13d87ea | 33 | #include <linux/reservation.h> |
5949eac4 | 34 | #include <linux/shmem_fs.h> |
5a0e3ad6 | 35 | #include <linux/slab.h> |
20e4933c | 36 | #include <linux/stop_machine.h> |
673a394b | 37 | #include <linux/swap.h> |
79e53945 | 38 | #include <linux/pci.h> |
1286ff73 | 39 | #include <linux/dma-buf.h> |
fcd70cd3 | 40 | #include <linux/mman.h> |
673a394b | 41 | |
9f58892e CW |
42 | #include "i915_drv.h" |
43 | #include "i915_gem_clflush.h" | |
44 | #include "i915_gemfs.h" | |
32eb6bcf | 45 | #include "i915_globals.h" |
9f58892e CW |
46 | #include "i915_reset.h" |
47 | #include "i915_trace.h" | |
48 | #include "i915_vgpu.h" | |
49 | ||
50 | #include "intel_drv.h" | |
51 | #include "intel_frontbuffer.h" | |
52 | #include "intel_mocs.h" | |
53 | #include "intel_workarounds.h" | |
54 | ||
fbbd37b3 | 55 | static void i915_gem_flush_free_objects(struct drm_i915_private *i915); |
61050808 | 56 | |
2c22569b CW |
57 | static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) |
58 | { | |
e27ab73d | 59 | if (obj->cache_dirty) |
b50a5371 AS |
60 | return false; |
61 | ||
b8f55be6 | 62 | if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) |
2c22569b CW |
63 | return true; |
64 | ||
bd3d2252 | 65 | return obj->pin_global; /* currently in use by HW, keep flushed */ |
2c22569b CW |
66 | } |
67 | ||
4f1959ee | 68 | static int |
bb6dc8d9 | 69 | insert_mappable_node(struct i915_ggtt *ggtt, |
4f1959ee AS |
70 | struct drm_mm_node *node, u32 size) |
71 | { | |
72 | memset(node, 0, sizeof(*node)); | |
82ad6443 | 73 | return drm_mm_insert_node_in_range(&ggtt->vm.mm, node, |
4e64e553 CW |
74 | size, 0, I915_COLOR_UNEVICTABLE, |
75 | 0, ggtt->mappable_end, | |
76 | DRM_MM_INSERT_LOW); | |
4f1959ee AS |
77 | } |
78 | ||
79 | static void | |
80 | remove_mappable_node(struct drm_mm_node *node) | |
81 | { | |
82 | drm_mm_remove_node(node); | |
83 | } | |
84 | ||
73aa808f CW |
85 | /* some bookkeeping */ |
86 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, | |
3ef7f228 | 87 | u64 size) |
73aa808f | 88 | { |
c20e8355 | 89 | spin_lock(&dev_priv->mm.object_stat_lock); |
73aa808f CW |
90 | dev_priv->mm.object_count++; |
91 | dev_priv->mm.object_memory += size; | |
c20e8355 | 92 | spin_unlock(&dev_priv->mm.object_stat_lock); |
73aa808f CW |
93 | } |
94 | ||
95 | static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, | |
3ef7f228 | 96 | u64 size) |
73aa808f | 97 | { |
c20e8355 | 98 | spin_lock(&dev_priv->mm.object_stat_lock); |
73aa808f CW |
99 | dev_priv->mm.object_count--; |
100 | dev_priv->mm.object_memory -= size; | |
c20e8355 | 101 | spin_unlock(&dev_priv->mm.object_stat_lock); |
73aa808f CW |
102 | } |
103 | ||
d9948a10 | 104 | static void __i915_gem_park(struct drm_i915_private *i915) |
e4d2006f | 105 | { |
506d1f62 CW |
106 | intel_wakeref_t wakeref; |
107 | ||
4dfacb0b CW |
108 | GEM_TRACE("\n"); |
109 | ||
e4d2006f CW |
110 | lockdep_assert_held(&i915->drm.struct_mutex); |
111 | GEM_BUG_ON(i915->gt.active_requests); | |
643b450a | 112 | GEM_BUG_ON(!list_empty(&i915->gt.active_rings)); |
e4d2006f CW |
113 | |
114 | if (!i915->gt.awake) | |
d9948a10 | 115 | return; |
e4d2006f CW |
116 | |
117 | /* | |
118 | * Be paranoid and flush a concurrent interrupt to make sure | |
119 | * we don't reactivate any irq tasklets after parking. | |
120 | * | |
121 | * FIXME: Note that even though we have waited for execlists to be idle, | |
122 | * there may still be an in-flight interrupt even though the CSB | |
123 | * is now empty. synchronize_irq() makes sure that a residual interrupt | |
124 | * is completed before we continue, but it doesn't prevent the HW from | |
125 | * raising a spurious interrupt later. To complete the shield we should | |
126 | * coordinate disabling the CS irq with flushing the interrupts. | |
127 | */ | |
128 | synchronize_irq(i915->drm.irq); | |
129 | ||
130 | intel_engines_park(i915); | |
a89d1f92 | 131 | i915_timelines_park(i915); |
e4d2006f CW |
132 | |
133 | i915_pmu_gt_parked(i915); | |
3365e226 | 134 | i915_vma_parked(i915); |
e4d2006f | 135 | |
506d1f62 CW |
136 | wakeref = fetch_and_zero(&i915->gt.awake); |
137 | GEM_BUG_ON(!wakeref); | |
e4d2006f CW |
138 | |
139 | if (INTEL_GEN(i915) >= 6) | |
140 | gen6_rps_idle(i915); | |
141 | ||
8d761e77 | 142 | intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref); |
e4d2006f | 143 | |
d9948a10 | 144 | i915_globals_park(); |
e4d2006f CW |
145 | } |
146 | ||
147 | void i915_gem_park(struct drm_i915_private *i915) | |
148 | { | |
4dfacb0b CW |
149 | GEM_TRACE("\n"); |
150 | ||
e4d2006f CW |
151 | lockdep_assert_held(&i915->drm.struct_mutex); |
152 | GEM_BUG_ON(i915->gt.active_requests); | |
153 | ||
154 | if (!i915->gt.awake) | |
155 | return; | |
156 | ||
157 | /* Defer the actual call to __i915_gem_park() to prevent ping-pongs */ | |
158 | mod_delayed_work(i915->wq, &i915->gt.idle_work, msecs_to_jiffies(100)); | |
159 | } | |
160 | ||
161 | void i915_gem_unpark(struct drm_i915_private *i915) | |
162 | { | |
4dfacb0b CW |
163 | GEM_TRACE("\n"); |
164 | ||
e4d2006f CW |
165 | lockdep_assert_held(&i915->drm.struct_mutex); |
166 | GEM_BUG_ON(!i915->gt.active_requests); | |
8d761e77 | 167 | assert_rpm_wakelock_held(i915); |
e4d2006f CW |
168 | |
169 | if (i915->gt.awake) | |
170 | return; | |
171 | ||
e4d2006f CW |
172 | /* |
173 | * It seems that the DMC likes to transition between the DC states a lot | |
174 | * when there are no connected displays (no active power domains) during | |
175 | * command submission. | |
176 | * | |
177 | * This activity has negative impact on the performance of the chip with | |
178 | * huge latencies observed in the interrupt handler and elsewhere. | |
179 | * | |
180 | * Work around it by grabbing a GT IRQ power domain whilst there is any | |
181 | * GT activity, preventing any DC state transitions. | |
182 | */ | |
8d761e77 CW |
183 | i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); |
184 | GEM_BUG_ON(!i915->gt.awake); | |
e4d2006f | 185 | |
32eb6bcf CW |
186 | i915_globals_unpark(); |
187 | ||
e4d2006f CW |
188 | intel_enable_gt_powersave(i915); |
189 | i915_update_gfx_val(i915); | |
190 | if (INTEL_GEN(i915) >= 6) | |
191 | gen6_rps_busy(i915); | |
192 | i915_pmu_gt_unparked(i915); | |
193 | ||
194 | intel_engines_unpark(i915); | |
195 | ||
196 | i915_queue_hangcheck(i915); | |
197 | ||
198 | queue_delayed_work(i915->wq, | |
199 | &i915->gt.retire_work, | |
200 | round_jiffies_up_relative(HZ)); | |
201 | } | |
202 | ||
5a125c3c EA |
203 | int |
204 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |
05394f39 | 205 | struct drm_file *file) |
5a125c3c | 206 | { |
09d7e46b | 207 | struct i915_ggtt *ggtt = &to_i915(dev)->ggtt; |
72e96d64 | 208 | struct drm_i915_gem_get_aperture *args = data; |
ca1543be | 209 | struct i915_vma *vma; |
ff8f7975 | 210 | u64 pinned; |
5a125c3c | 211 | |
09d7e46b CW |
212 | mutex_lock(&ggtt->vm.mutex); |
213 | ||
82ad6443 | 214 | pinned = ggtt->vm.reserved; |
499197dc | 215 | list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) |
20dfbde4 | 216 | if (i915_vma_is_pinned(vma)) |
ca1543be | 217 | pinned += vma->node.size; |
09d7e46b CW |
218 | |
219 | mutex_unlock(&ggtt->vm.mutex); | |
5a125c3c | 220 | |
82ad6443 | 221 | args->aper_size = ggtt->vm.total; |
0206e353 | 222 | args->aper_available_size = args->aper_size - pinned; |
6299f992 | 223 | |
5a125c3c EA |
224 | return 0; |
225 | } | |
226 | ||
b91b09ee | 227 | static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) |
00731155 | 228 | { |
93c76a3d | 229 | struct address_space *mapping = obj->base.filp->f_mapping; |
dbb4351b | 230 | drm_dma_handle_t *phys; |
6a2c4232 CW |
231 | struct sg_table *st; |
232 | struct scatterlist *sg; | |
dbb4351b | 233 | char *vaddr; |
6a2c4232 | 234 | int i; |
b91b09ee | 235 | int err; |
00731155 | 236 | |
6a2c4232 | 237 | if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) |
b91b09ee | 238 | return -EINVAL; |
6a2c4232 | 239 | |
dbb4351b CW |
240 | /* Always aligning to the object size, allows a single allocation |
241 | * to handle all possible callers, and given typical object sizes, | |
242 | * the alignment of the buddy allocation will naturally match. | |
243 | */ | |
244 | phys = drm_pci_alloc(obj->base.dev, | |
750fae23 | 245 | roundup_pow_of_two(obj->base.size), |
dbb4351b CW |
246 | roundup_pow_of_two(obj->base.size)); |
247 | if (!phys) | |
b91b09ee | 248 | return -ENOMEM; |
dbb4351b CW |
249 | |
250 | vaddr = phys->vaddr; | |
6a2c4232 CW |
251 | for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { |
252 | struct page *page; | |
253 | char *src; | |
254 | ||
255 | page = shmem_read_mapping_page(mapping, i); | |
dbb4351b | 256 | if (IS_ERR(page)) { |
b91b09ee | 257 | err = PTR_ERR(page); |
dbb4351b CW |
258 | goto err_phys; |
259 | } | |
6a2c4232 CW |
260 | |
261 | src = kmap_atomic(page); | |
262 | memcpy(vaddr, src, PAGE_SIZE); | |
263 | drm_clflush_virt_range(vaddr, PAGE_SIZE); | |
264 | kunmap_atomic(src); | |
265 | ||
09cbfeaf | 266 | put_page(page); |
6a2c4232 CW |
267 | vaddr += PAGE_SIZE; |
268 | } | |
269 | ||
c033666a | 270 | i915_gem_chipset_flush(to_i915(obj->base.dev)); |
6a2c4232 CW |
271 | |
272 | st = kmalloc(sizeof(*st), GFP_KERNEL); | |
dbb4351b | 273 | if (!st) { |
b91b09ee | 274 | err = -ENOMEM; |
dbb4351b CW |
275 | goto err_phys; |
276 | } | |
6a2c4232 CW |
277 | |
278 | if (sg_alloc_table(st, 1, GFP_KERNEL)) { | |
279 | kfree(st); | |
b91b09ee | 280 | err = -ENOMEM; |
dbb4351b | 281 | goto err_phys; |
6a2c4232 CW |
282 | } |
283 | ||
284 | sg = st->sgl; | |
285 | sg->offset = 0; | |
286 | sg->length = obj->base.size; | |
00731155 | 287 | |
dbb4351b | 288 | sg_dma_address(sg) = phys->busaddr; |
6a2c4232 CW |
289 | sg_dma_len(sg) = obj->base.size; |
290 | ||
dbb4351b | 291 | obj->phys_handle = phys; |
b91b09ee | 292 | |
a5c08166 | 293 | __i915_gem_object_set_pages(obj, st, sg->length); |
b91b09ee MA |
294 | |
295 | return 0; | |
dbb4351b CW |
296 | |
297 | err_phys: | |
298 | drm_pci_free(obj->base.dev, phys); | |
b91b09ee MA |
299 | |
300 | return err; | |
6a2c4232 CW |
301 | } |
302 | ||
e27ab73d CW |
303 | static void __start_cpu_write(struct drm_i915_gem_object *obj) |
304 | { | |
c0a51fd0 CK |
305 | obj->read_domains = I915_GEM_DOMAIN_CPU; |
306 | obj->write_domain = I915_GEM_DOMAIN_CPU; | |
e27ab73d CW |
307 | if (cpu_write_needs_clflush(obj)) |
308 | obj->cache_dirty = true; | |
309 | } | |
310 | ||
6a2c4232 | 311 | static void |
2b3c8317 | 312 | __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, |
e5facdf9 CW |
313 | struct sg_table *pages, |
314 | bool needs_clflush) | |
6a2c4232 | 315 | { |
a4f5ea64 | 316 | GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); |
00731155 | 317 | |
a4f5ea64 CW |
318 | if (obj->mm.madv == I915_MADV_DONTNEED) |
319 | obj->mm.dirty = false; | |
6a2c4232 | 320 | |
e5facdf9 | 321 | if (needs_clflush && |
c0a51fd0 | 322 | (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 && |
b8f55be6 | 323 | !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) |
2b3c8317 | 324 | drm_clflush_sg(pages); |
03ac84f1 | 325 | |
e27ab73d | 326 | __start_cpu_write(obj); |
03ac84f1 CW |
327 | } |
328 | ||
329 | static void | |
330 | i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, | |
331 | struct sg_table *pages) | |
332 | { | |
e5facdf9 | 333 | __i915_gem_object_release_shmem(obj, pages, false); |
03ac84f1 | 334 | |
a4f5ea64 | 335 | if (obj->mm.dirty) { |
93c76a3d | 336 | struct address_space *mapping = obj->base.filp->f_mapping; |
6a2c4232 | 337 | char *vaddr = obj->phys_handle->vaddr; |
00731155 CW |
338 | int i; |
339 | ||
340 | for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { | |
6a2c4232 CW |
341 | struct page *page; |
342 | char *dst; | |
343 | ||
344 | page = shmem_read_mapping_page(mapping, i); | |
345 | if (IS_ERR(page)) | |
346 | continue; | |
347 | ||
348 | dst = kmap_atomic(page); | |
349 | drm_clflush_virt_range(vaddr, PAGE_SIZE); | |
350 | memcpy(dst, vaddr, PAGE_SIZE); | |
351 | kunmap_atomic(dst); | |
352 | ||
353 | set_page_dirty(page); | |
a4f5ea64 | 354 | if (obj->mm.madv == I915_MADV_WILLNEED) |
00731155 | 355 | mark_page_accessed(page); |
09cbfeaf | 356 | put_page(page); |
00731155 CW |
357 | vaddr += PAGE_SIZE; |
358 | } | |
a4f5ea64 | 359 | obj->mm.dirty = false; |
00731155 CW |
360 | } |
361 | ||
03ac84f1 CW |
362 | sg_free_table(pages); |
363 | kfree(pages); | |
dbb4351b CW |
364 | |
365 | drm_pci_free(obj->base.dev, obj->phys_handle); | |
6a2c4232 CW |
366 | } |
367 | ||
368 | static void | |
369 | i915_gem_object_release_phys(struct drm_i915_gem_object *obj) | |
370 | { | |
a4f5ea64 | 371 | i915_gem_object_unpin_pages(obj); |
6a2c4232 CW |
372 | } |
373 | ||
374 | static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { | |
375 | .get_pages = i915_gem_object_get_pages_phys, | |
376 | .put_pages = i915_gem_object_put_pages_phys, | |
377 | .release = i915_gem_object_release_phys, | |
378 | }; | |
379 | ||
581ab1fe CW |
380 | static const struct drm_i915_gem_object_ops i915_gem_object_ops; |
381 | ||
35a9611c | 382 | int i915_gem_object_unbind(struct drm_i915_gem_object *obj) |
aa653a68 CW |
383 | { |
384 | struct i915_vma *vma; | |
385 | LIST_HEAD(still_in_list); | |
02bef8f9 CW |
386 | int ret; |
387 | ||
388 | lockdep_assert_held(&obj->base.dev->struct_mutex); | |
aa653a68 | 389 | |
02bef8f9 CW |
390 | /* Closed vma are removed from the obj->vma_list - but they may |
391 | * still have an active binding on the object. To remove those we | |
392 | * must wait for all rendering to complete to the object (as unbinding | |
393 | * must anyway), and retire the requests. | |
aa653a68 | 394 | */ |
5888fc9e | 395 | ret = i915_gem_object_set_to_cpu_domain(obj, false); |
02bef8f9 CW |
396 | if (ret) |
397 | return ret; | |
398 | ||
528cbd17 CW |
399 | spin_lock(&obj->vma.lock); |
400 | while (!ret && (vma = list_first_entry_or_null(&obj->vma.list, | |
401 | struct i915_vma, | |
402 | obj_link))) { | |
aa653a68 | 403 | list_move_tail(&vma->obj_link, &still_in_list); |
528cbd17 CW |
404 | spin_unlock(&obj->vma.lock); |
405 | ||
aa653a68 | 406 | ret = i915_vma_unbind(vma); |
528cbd17 CW |
407 | |
408 | spin_lock(&obj->vma.lock); | |
aa653a68 | 409 | } |
528cbd17 CW |
410 | list_splice(&still_in_list, &obj->vma.list); |
411 | spin_unlock(&obj->vma.lock); | |
aa653a68 CW |
412 | |
413 | return ret; | |
414 | } | |
415 | ||
e95433c7 CW |
416 | static long |
417 | i915_gem_object_wait_fence(struct dma_fence *fence, | |
418 | unsigned int flags, | |
62eb3c24 | 419 | long timeout) |
00e60f26 | 420 | { |
e61e0f51 | 421 | struct i915_request *rq; |
00e60f26 | 422 | |
e95433c7 | 423 | BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1); |
00e60f26 | 424 | |
e95433c7 CW |
425 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
426 | return timeout; | |
427 | ||
428 | if (!dma_fence_is_i915(fence)) | |
429 | return dma_fence_wait_timeout(fence, | |
430 | flags & I915_WAIT_INTERRUPTIBLE, | |
431 | timeout); | |
432 | ||
433 | rq = to_request(fence); | |
e61e0f51 | 434 | if (i915_request_completed(rq)) |
e95433c7 CW |
435 | goto out; |
436 | ||
e61e0f51 | 437 | timeout = i915_request_wait(rq, flags, timeout); |
e95433c7 CW |
438 | |
439 | out: | |
e61e0f51 CW |
440 | if (flags & I915_WAIT_LOCKED && i915_request_completed(rq)) |
441 | i915_request_retire_upto(rq); | |
e95433c7 | 442 | |
e95433c7 CW |
443 | return timeout; |
444 | } | |
445 | ||
446 | static long | |
447 | i915_gem_object_wait_reservation(struct reservation_object *resv, | |
448 | unsigned int flags, | |
62eb3c24 | 449 | long timeout) |
e95433c7 | 450 | { |
e54ca977 | 451 | unsigned int seq = __read_seqcount_begin(&resv->seq); |
e95433c7 | 452 | struct dma_fence *excl; |
e54ca977 | 453 | bool prune_fences = false; |
e95433c7 CW |
454 | |
455 | if (flags & I915_WAIT_ALL) { | |
456 | struct dma_fence **shared; | |
457 | unsigned int count, i; | |
00e60f26 CW |
458 | int ret; |
459 | ||
e95433c7 CW |
460 | ret = reservation_object_get_fences_rcu(resv, |
461 | &excl, &count, &shared); | |
00e60f26 CW |
462 | if (ret) |
463 | return ret; | |
00e60f26 | 464 | |
e95433c7 CW |
465 | for (i = 0; i < count; i++) { |
466 | timeout = i915_gem_object_wait_fence(shared[i], | |
62eb3c24 | 467 | flags, timeout); |
d892e939 | 468 | if (timeout < 0) |
e95433c7 | 469 | break; |
00e60f26 | 470 | |
e95433c7 CW |
471 | dma_fence_put(shared[i]); |
472 | } | |
473 | ||
474 | for (; i < count; i++) | |
475 | dma_fence_put(shared[i]); | |
476 | kfree(shared); | |
e54ca977 | 477 | |
fa73055b CW |
478 | /* |
479 | * If both shared fences and an exclusive fence exist, | |
480 | * then by construction the shared fences must be later | |
481 | * than the exclusive fence. If we successfully wait for | |
482 | * all the shared fences, we know that the exclusive fence | |
483 | * must all be signaled. If all the shared fences are | |
484 | * signaled, we can prune the array and recover the | |
485 | * floating references on the fences/requests. | |
486 | */ | |
e54ca977 | 487 | prune_fences = count && timeout >= 0; |
e95433c7 CW |
488 | } else { |
489 | excl = reservation_object_get_excl_rcu(resv); | |
00e60f26 CW |
490 | } |
491 | ||
fa73055b | 492 | if (excl && timeout >= 0) |
62eb3c24 | 493 | timeout = i915_gem_object_wait_fence(excl, flags, timeout); |
e95433c7 CW |
494 | |
495 | dma_fence_put(excl); | |
496 | ||
fa73055b CW |
497 | /* |
498 | * Opportunistically prune the fences iff we know they have *all* been | |
03d1cac6 CW |
499 | * signaled and that the reservation object has not been changed (i.e. |
500 | * no new fences have been added). | |
501 | */ | |
e54ca977 | 502 | if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) { |
03d1cac6 CW |
503 | if (reservation_object_trylock(resv)) { |
504 | if (!__read_seqcount_retry(&resv->seq, seq)) | |
505 | reservation_object_add_excl_fence(resv, NULL); | |
506 | reservation_object_unlock(resv); | |
507 | } | |
e54ca977 CW |
508 | } |
509 | ||
e95433c7 | 510 | return timeout; |
00e60f26 CW |
511 | } |
512 | ||
b7268c5e CW |
513 | static void __fence_set_priority(struct dma_fence *fence, |
514 | const struct i915_sched_attr *attr) | |
6b5e90f5 | 515 | { |
e61e0f51 | 516 | struct i915_request *rq; |
6b5e90f5 CW |
517 | struct intel_engine_cs *engine; |
518 | ||
c218ee03 | 519 | if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence)) |
6b5e90f5 CW |
520 | return; |
521 | ||
522 | rq = to_request(fence); | |
523 | engine = rq->engine; | |
6b5e90f5 | 524 | |
4f6d8fcf CW |
525 | local_bh_disable(); |
526 | rcu_read_lock(); /* RCU serialisation for set-wedged protection */ | |
47650db0 | 527 | if (engine->schedule) |
b7268c5e | 528 | engine->schedule(rq, attr); |
47650db0 | 529 | rcu_read_unlock(); |
4f6d8fcf | 530 | local_bh_enable(); /* kick the tasklets if queues were reprioritised */ |
6b5e90f5 CW |
531 | } |
532 | ||
b7268c5e CW |
533 | static void fence_set_priority(struct dma_fence *fence, |
534 | const struct i915_sched_attr *attr) | |
6b5e90f5 CW |
535 | { |
536 | /* Recurse once into a fence-array */ | |
537 | if (dma_fence_is_array(fence)) { | |
538 | struct dma_fence_array *array = to_dma_fence_array(fence); | |
539 | int i; | |
540 | ||
541 | for (i = 0; i < array->num_fences; i++) | |
b7268c5e | 542 | __fence_set_priority(array->fences[i], attr); |
6b5e90f5 | 543 | } else { |
b7268c5e | 544 | __fence_set_priority(fence, attr); |
6b5e90f5 CW |
545 | } |
546 | } | |
547 | ||
548 | int | |
549 | i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, | |
550 | unsigned int flags, | |
b7268c5e | 551 | const struct i915_sched_attr *attr) |
6b5e90f5 CW |
552 | { |
553 | struct dma_fence *excl; | |
554 | ||
555 | if (flags & I915_WAIT_ALL) { | |
556 | struct dma_fence **shared; | |
557 | unsigned int count, i; | |
558 | int ret; | |
559 | ||
560 | ret = reservation_object_get_fences_rcu(obj->resv, | |
561 | &excl, &count, &shared); | |
562 | if (ret) | |
563 | return ret; | |
564 | ||
565 | for (i = 0; i < count; i++) { | |
b7268c5e | 566 | fence_set_priority(shared[i], attr); |
6b5e90f5 CW |
567 | dma_fence_put(shared[i]); |
568 | } | |
569 | ||
570 | kfree(shared); | |
571 | } else { | |
572 | excl = reservation_object_get_excl_rcu(obj->resv); | |
573 | } | |
574 | ||
575 | if (excl) { | |
b7268c5e | 576 | fence_set_priority(excl, attr); |
6b5e90f5 CW |
577 | dma_fence_put(excl); |
578 | } | |
579 | return 0; | |
580 | } | |
581 | ||
e95433c7 CW |
582 | /** |
583 | * Waits for rendering to the object to be completed | |
584 | * @obj: i915 gem object | |
585 | * @flags: how to wait (under a lock, for all rendering or just for writes etc) | |
586 | * @timeout: how long to wait | |
00e60f26 | 587 | */ |
e95433c7 CW |
588 | int |
589 | i915_gem_object_wait(struct drm_i915_gem_object *obj, | |
590 | unsigned int flags, | |
62eb3c24 | 591 | long timeout) |
00e60f26 | 592 | { |
e95433c7 | 593 | might_sleep(); |
e95433c7 | 594 | GEM_BUG_ON(timeout < 0); |
00e60f26 | 595 | |
62eb3c24 | 596 | timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout); |
e95433c7 | 597 | return timeout < 0 ? timeout : 0; |
00e60f26 CW |
598 | } |
599 | ||
00731155 CW |
600 | static int |
601 | i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, | |
602 | struct drm_i915_gem_pwrite *args, | |
03ac84f1 | 603 | struct drm_file *file) |
00731155 | 604 | { |
00731155 | 605 | void *vaddr = obj->phys_handle->vaddr + args->offset; |
3ed605bc | 606 | char __user *user_data = u64_to_user_ptr(args->data_ptr); |
6a2c4232 CW |
607 | |
608 | /* We manually control the domain here and pretend that it | |
609 | * remains coherent i.e. in the GTT domain, like shmem_pwrite. | |
610 | */ | |
77a0d1ca | 611 | intel_fb_obj_invalidate(obj, ORIGIN_CPU); |
10466d2a CW |
612 | if (copy_from_user(vaddr, user_data, args->size)) |
613 | return -EFAULT; | |
00731155 | 614 | |
6a2c4232 | 615 | drm_clflush_virt_range(vaddr, args->size); |
10466d2a | 616 | i915_gem_chipset_flush(to_i915(obj->base.dev)); |
063e4e6b | 617 | |
d59b21ec | 618 | intel_fb_obj_flush(obj, ORIGIN_CPU); |
10466d2a | 619 | return 0; |
00731155 CW |
620 | } |
621 | ||
ff72145b DA |
622 | static int |
623 | i915_gem_create(struct drm_file *file, | |
12d79d78 | 624 | struct drm_i915_private *dev_priv, |
739f3abd JN |
625 | u64 size, |
626 | u32 *handle_p) | |
673a394b | 627 | { |
05394f39 | 628 | struct drm_i915_gem_object *obj; |
a1a2d1d3 PP |
629 | int ret; |
630 | u32 handle; | |
673a394b | 631 | |
ff72145b | 632 | size = roundup(size, PAGE_SIZE); |
8ffc0246 CW |
633 | if (size == 0) |
634 | return -EINVAL; | |
673a394b EA |
635 | |
636 | /* Allocate the new object */ | |
12d79d78 | 637 | obj = i915_gem_object_create(dev_priv, size); |
fe3db79b CW |
638 | if (IS_ERR(obj)) |
639 | return PTR_ERR(obj); | |
673a394b | 640 | |
05394f39 | 641 | ret = drm_gem_handle_create(file, &obj->base, &handle); |
202f2fef | 642 | /* drop reference from allocate - handle holds it now */ |
f0cd5182 | 643 | i915_gem_object_put(obj); |
d861e338 DV |
644 | if (ret) |
645 | return ret; | |
202f2fef | 646 | |
ff72145b | 647 | *handle_p = handle; |
673a394b EA |
648 | return 0; |
649 | } | |
650 | ||
ff72145b DA |
651 | int |
652 | i915_gem_dumb_create(struct drm_file *file, | |
653 | struct drm_device *dev, | |
654 | struct drm_mode_create_dumb *args) | |
655 | { | |
656 | /* have to work out size/pitch and return them */ | |
de45eaf7 | 657 | args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); |
ff72145b | 658 | args->size = args->pitch * args->height; |
12d79d78 | 659 | return i915_gem_create(file, to_i915(dev), |
da6b51d0 | 660 | args->size, &args->handle); |
ff72145b DA |
661 | } |
662 | ||
e27ab73d CW |
663 | static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) |
664 | { | |
665 | return !(obj->cache_level == I915_CACHE_NONE || | |
666 | obj->cache_level == I915_CACHE_WT); | |
667 | } | |
668 | ||
ff72145b DA |
669 | /** |
670 | * Creates a new mm object and returns a handle to it. | |
14bb2c11 TU |
671 | * @dev: drm device pointer |
672 | * @data: ioctl data blob | |
673 | * @file: drm file pointer | |
ff72145b DA |
674 | */ |
675 | int | |
676 | i915_gem_create_ioctl(struct drm_device *dev, void *data, | |
677 | struct drm_file *file) | |
678 | { | |
12d79d78 | 679 | struct drm_i915_private *dev_priv = to_i915(dev); |
ff72145b | 680 | struct drm_i915_gem_create *args = data; |
63ed2cb2 | 681 | |
12d79d78 | 682 | i915_gem_flush_free_objects(dev_priv); |
fbbd37b3 | 683 | |
12d79d78 | 684 | return i915_gem_create(file, dev_priv, |
da6b51d0 | 685 | args->size, &args->handle); |
ff72145b DA |
686 | } |
687 | ||
ef74921b CW |
688 | static inline enum fb_op_origin |
689 | fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain) | |
690 | { | |
691 | return (domain == I915_GEM_DOMAIN_GTT ? | |
692 | obj->frontbuffer_ggtt_origin : ORIGIN_CPU); | |
693 | } | |
694 | ||
7125397b | 695 | void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) |
ef74921b | 696 | { |
538ef96b CW |
697 | intel_wakeref_t wakeref; |
698 | ||
7125397b CW |
699 | /* |
700 | * No actual flushing is required for the GTT write domain for reads | |
701 | * from the GTT domain. Writes to it "immediately" go to main memory | |
702 | * as far as we know, so there's no chipset flush. It also doesn't | |
703 | * land in the GPU render cache. | |
ef74921b CW |
704 | * |
705 | * However, we do have to enforce the order so that all writes through | |
706 | * the GTT land before any writes to the device, such as updates to | |
707 | * the GATT itself. | |
708 | * | |
709 | * We also have to wait a bit for the writes to land from the GTT. | |
710 | * An uncached read (i.e. mmio) seems to be ideal for the round-trip | |
711 | * timing. This issue has only been observed when switching quickly | |
712 | * between GTT writes and CPU reads from inside the kernel on recent hw, | |
713 | * and it appears to only affect discrete GTT blocks (i.e. on LLC | |
7125397b CW |
714 | * system agents we cannot reproduce this behaviour, until Cannonlake |
715 | * that was!). | |
ef74921b | 716 | */ |
7125397b | 717 | |
900ccf30 CW |
718 | wmb(); |
719 | ||
720 | if (INTEL_INFO(dev_priv)->has_coherent_ggtt) | |
721 | return; | |
722 | ||
a8bd3b88 | 723 | i915_gem_chipset_flush(dev_priv); |
ef74921b | 724 | |
d4225a53 CW |
725 | with_intel_runtime_pm(dev_priv, wakeref) { |
726 | spin_lock_irq(&dev_priv->uncore.lock); | |
7125397b | 727 | |
d4225a53 | 728 | POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE)); |
7125397b | 729 | |
d4225a53 CW |
730 | spin_unlock_irq(&dev_priv->uncore.lock); |
731 | } | |
7125397b CW |
732 | } |
733 | ||
734 | static void | |
735 | flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) | |
736 | { | |
737 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | |
738 | struct i915_vma *vma; | |
739 | ||
c0a51fd0 | 740 | if (!(obj->write_domain & flush_domains)) |
7125397b CW |
741 | return; |
742 | ||
c0a51fd0 | 743 | switch (obj->write_domain) { |
ef74921b | 744 | case I915_GEM_DOMAIN_GTT: |
7125397b | 745 | i915_gem_flush_ggtt_writes(dev_priv); |
ef74921b CW |
746 | |
747 | intel_fb_obj_flush(obj, | |
748 | fb_write_origin(obj, I915_GEM_DOMAIN_GTT)); | |
7125397b | 749 | |
e2189dd0 | 750 | for_each_ggtt_vma(vma, obj) { |
7125397b CW |
751 | if (vma->iomap) |
752 | continue; | |
753 | ||
754 | i915_vma_unset_ggtt_write(vma); | |
755 | } | |
ef74921b CW |
756 | break; |
757 | ||
add00e6d CW |
758 | case I915_GEM_DOMAIN_WC: |
759 | wmb(); | |
760 | break; | |
761 | ||
ef74921b CW |
762 | case I915_GEM_DOMAIN_CPU: |
763 | i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); | |
764 | break; | |
e27ab73d CW |
765 | |
766 | case I915_GEM_DOMAIN_RENDER: | |
767 | if (gpu_write_needs_clflush(obj)) | |
768 | obj->cache_dirty = true; | |
769 | break; | |
ef74921b CW |
770 | } |
771 | ||
c0a51fd0 | 772 | obj->write_domain = 0; |
ef74921b CW |
773 | } |
774 | ||
4c914c0c BV |
775 | /* |
776 | * Pins the specified object's pages and synchronizes the object with | |
777 | * GPU accesses. Sets needs_clflush to non-zero if the caller should | |
778 | * flush the object from the CPU cache. | |
779 | */ | |
780 | int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, | |
43394c7d | 781 | unsigned int *needs_clflush) |
4c914c0c BV |
782 | { |
783 | int ret; | |
784 | ||
e95433c7 | 785 | lockdep_assert_held(&obj->base.dev->struct_mutex); |
4c914c0c | 786 | |
e95433c7 | 787 | *needs_clflush = 0; |
43394c7d CW |
788 | if (!i915_gem_object_has_struct_page(obj)) |
789 | return -ENODEV; | |
4c914c0c | 790 | |
e95433c7 CW |
791 | ret = i915_gem_object_wait(obj, |
792 | I915_WAIT_INTERRUPTIBLE | | |
793 | I915_WAIT_LOCKED, | |
62eb3c24 | 794 | MAX_SCHEDULE_TIMEOUT); |
c13d87ea CW |
795 | if (ret) |
796 | return ret; | |
797 | ||
a4f5ea64 | 798 | ret = i915_gem_object_pin_pages(obj); |
9764951e CW |
799 | if (ret) |
800 | return ret; | |
801 | ||
b8f55be6 CW |
802 | if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ || |
803 | !static_cpu_has(X86_FEATURE_CLFLUSH)) { | |
7f5f95d8 CW |
804 | ret = i915_gem_object_set_to_cpu_domain(obj, false); |
805 | if (ret) | |
806 | goto err_unpin; | |
807 | else | |
808 | goto out; | |
809 | } | |
810 | ||
ef74921b | 811 | flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); |
a314d5cb | 812 | |
43394c7d CW |
813 | /* If we're not in the cpu read domain, set ourself into the gtt |
814 | * read domain and manually flush cachelines (if required). This | |
815 | * optimizes for the case when the gpu will dirty the data | |
816 | * anyway again before the next pread happens. | |
817 | */ | |
e27ab73d | 818 | if (!obj->cache_dirty && |
c0a51fd0 | 819 | !(obj->read_domains & I915_GEM_DOMAIN_CPU)) |
7f5f95d8 | 820 | *needs_clflush = CLFLUSH_BEFORE; |
4c914c0c | 821 | |
7f5f95d8 | 822 | out: |
9764951e | 823 | /* return with the pages pinned */ |
43394c7d | 824 | return 0; |
9764951e CW |
825 | |
826 | err_unpin: | |
827 | i915_gem_object_unpin_pages(obj); | |
828 | return ret; | |
43394c7d CW |
829 | } |
830 | ||
831 | int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, | |
832 | unsigned int *needs_clflush) | |
833 | { | |
834 | int ret; | |
835 | ||
e95433c7 CW |
836 | lockdep_assert_held(&obj->base.dev->struct_mutex); |
837 | ||
43394c7d CW |
838 | *needs_clflush = 0; |
839 | if (!i915_gem_object_has_struct_page(obj)) | |
840 | return -ENODEV; | |
841 | ||
e95433c7 CW |
842 | ret = i915_gem_object_wait(obj, |
843 | I915_WAIT_INTERRUPTIBLE | | |
844 | I915_WAIT_LOCKED | | |
845 | I915_WAIT_ALL, | |
62eb3c24 | 846 | MAX_SCHEDULE_TIMEOUT); |
43394c7d CW |
847 | if (ret) |
848 | return ret; | |
849 | ||
a4f5ea64 | 850 | ret = i915_gem_object_pin_pages(obj); |
9764951e CW |
851 | if (ret) |
852 | return ret; | |
853 | ||
b8f55be6 CW |
854 | if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE || |
855 | !static_cpu_has(X86_FEATURE_CLFLUSH)) { | |
7f5f95d8 CW |
856 | ret = i915_gem_object_set_to_cpu_domain(obj, true); |
857 | if (ret) | |
858 | goto err_unpin; | |
859 | else | |
860 | goto out; | |
861 | } | |
862 | ||
ef74921b | 863 | flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); |
a314d5cb | 864 | |
43394c7d CW |
865 | /* If we're not in the cpu write domain, set ourself into the |
866 | * gtt write domain and manually flush cachelines (as required). | |
867 | * This optimizes for the case when the gpu will use the data | |
868 | * right away and we therefore have to clflush anyway. | |
869 | */ | |
e27ab73d | 870 | if (!obj->cache_dirty) { |
7f5f95d8 | 871 | *needs_clflush |= CLFLUSH_AFTER; |
43394c7d | 872 | |
e27ab73d CW |
873 | /* |
874 | * Same trick applies to invalidate partially written | |
875 | * cachelines read before writing. | |
876 | */ | |
c0a51fd0 | 877 | if (!(obj->read_domains & I915_GEM_DOMAIN_CPU)) |
e27ab73d CW |
878 | *needs_clflush |= CLFLUSH_BEFORE; |
879 | } | |
43394c7d | 880 | |
7f5f95d8 | 881 | out: |
43394c7d | 882 | intel_fb_obj_invalidate(obj, ORIGIN_CPU); |
a4f5ea64 | 883 | obj->mm.dirty = true; |
9764951e | 884 | /* return with the pages pinned */ |
43394c7d | 885 | return 0; |
9764951e CW |
886 | |
887 | err_unpin: | |
888 | i915_gem_object_unpin_pages(obj); | |
889 | return ret; | |
4c914c0c BV |
890 | } |
891 | ||
d174bd64 | 892 | static int |
b9d126e7 CW |
893 | shmem_pread(struct page *page, int offset, int len, char __user *user_data, |
894 | bool needs_clflush) | |
d174bd64 DV |
895 | { |
896 | char *vaddr; | |
897 | int ret; | |
898 | ||
899 | vaddr = kmap(page); | |
d174bd64 | 900 | |
b9d126e7 CW |
901 | if (needs_clflush) |
902 | drm_clflush_virt_range(vaddr + offset, len); | |
bb6dc8d9 | 903 | |
b9d126e7 | 904 | ret = __copy_to_user(user_data, vaddr + offset, len); |
bb6dc8d9 | 905 | |
b9d126e7 | 906 | kunmap(page); |
bb6dc8d9 | 907 | |
b9d126e7 | 908 | return ret ? -EFAULT : 0; |
bb6dc8d9 CW |
909 | } |
910 | ||
911 | static int | |
912 | i915_gem_shmem_pread(struct drm_i915_gem_object *obj, | |
913 | struct drm_i915_gem_pread *args) | |
914 | { | |
915 | char __user *user_data; | |
916 | u64 remain; | |
bb6dc8d9 CW |
917 | unsigned int needs_clflush; |
918 | unsigned int idx, offset; | |
919 | int ret; | |
920 | ||
bb6dc8d9 CW |
921 | ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex); |
922 | if (ret) | |
923 | return ret; | |
924 | ||
925 | ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); | |
926 | mutex_unlock(&obj->base.dev->struct_mutex); | |
927 | if (ret) | |
928 | return ret; | |
929 | ||
930 | remain = args->size; | |
931 | user_data = u64_to_user_ptr(args->data_ptr); | |
932 | offset = offset_in_page(args->offset); | |
933 | for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { | |
934 | struct page *page = i915_gem_object_get_page(obj, idx); | |
a5e856a5 | 935 | unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); |
bb6dc8d9 CW |
936 | |
937 | ret = shmem_pread(page, offset, length, user_data, | |
bb6dc8d9 CW |
938 | needs_clflush); |
939 | if (ret) | |
940 | break; | |
941 | ||
942 | remain -= length; | |
943 | user_data += length; | |
944 | offset = 0; | |
945 | } | |
946 | ||
947 | i915_gem_obj_finish_shmem_access(obj); | |
948 | return ret; | |
949 | } | |
950 | ||
951 | static inline bool | |
952 | gtt_user_read(struct io_mapping *mapping, | |
953 | loff_t base, int offset, | |
954 | char __user *user_data, int length) | |
b50a5371 | 955 | { |
afe722be | 956 | void __iomem *vaddr; |
bb6dc8d9 | 957 | unsigned long unwritten; |
b50a5371 | 958 | |
b50a5371 | 959 | /* We can use the cpu mem copy function because this is X86. */ |
afe722be VS |
960 | vaddr = io_mapping_map_atomic_wc(mapping, base); |
961 | unwritten = __copy_to_user_inatomic(user_data, | |
962 | (void __force *)vaddr + offset, | |
963 | length); | |
bb6dc8d9 CW |
964 | io_mapping_unmap_atomic(vaddr); |
965 | if (unwritten) { | |
afe722be VS |
966 | vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); |
967 | unwritten = copy_to_user(user_data, | |
968 | (void __force *)vaddr + offset, | |
969 | length); | |
bb6dc8d9 CW |
970 | io_mapping_unmap(vaddr); |
971 | } | |
b50a5371 AS |
972 | return unwritten; |
973 | } | |
974 | ||
975 | static int | |
bb6dc8d9 CW |
976 | i915_gem_gtt_pread(struct drm_i915_gem_object *obj, |
977 | const struct drm_i915_gem_pread *args) | |
b50a5371 | 978 | { |
bb6dc8d9 CW |
979 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
980 | struct i915_ggtt *ggtt = &i915->ggtt; | |
538ef96b | 981 | intel_wakeref_t wakeref; |
b50a5371 | 982 | struct drm_mm_node node; |
bb6dc8d9 CW |
983 | struct i915_vma *vma; |
984 | void __user *user_data; | |
985 | u64 remain, offset; | |
b50a5371 AS |
986 | int ret; |
987 | ||
bb6dc8d9 CW |
988 | ret = mutex_lock_interruptible(&i915->drm.struct_mutex); |
989 | if (ret) | |
990 | return ret; | |
991 | ||
538ef96b | 992 | wakeref = intel_runtime_pm_get(i915); |
bb6dc8d9 | 993 | vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, |
a3259ca9 CW |
994 | PIN_MAPPABLE | |
995 | PIN_NONFAULT | | |
996 | PIN_NONBLOCK); | |
18034584 CW |
997 | if (!IS_ERR(vma)) { |
998 | node.start = i915_ggtt_offset(vma); | |
999 | node.allocated = false; | |
49ef5294 | 1000 | ret = i915_vma_put_fence(vma); |
18034584 CW |
1001 | if (ret) { |
1002 | i915_vma_unpin(vma); | |
1003 | vma = ERR_PTR(ret); | |
1004 | } | |
1005 | } | |
058d88c4 | 1006 | if (IS_ERR(vma)) { |
bb6dc8d9 | 1007 | ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); |
b50a5371 | 1008 | if (ret) |
bb6dc8d9 CW |
1009 | goto out_unlock; |
1010 | GEM_BUG_ON(!node.allocated); | |
b50a5371 AS |
1011 | } |
1012 | ||
1013 | ret = i915_gem_object_set_to_gtt_domain(obj, false); | |
1014 | if (ret) | |
1015 | goto out_unpin; | |
1016 | ||
bb6dc8d9 | 1017 | mutex_unlock(&i915->drm.struct_mutex); |
b50a5371 | 1018 | |
bb6dc8d9 CW |
1019 | user_data = u64_to_user_ptr(args->data_ptr); |
1020 | remain = args->size; | |
1021 | offset = args->offset; | |
b50a5371 AS |
1022 | |
1023 | while (remain > 0) { | |
1024 | /* Operation in this page | |
1025 | * | |
1026 | * page_base = page offset within aperture | |
1027 | * page_offset = offset within page | |
1028 | * page_length = bytes to copy for this page | |
1029 | */ | |
1030 | u32 page_base = node.start; | |
1031 | unsigned page_offset = offset_in_page(offset); | |
1032 | unsigned page_length = PAGE_SIZE - page_offset; | |
1033 | page_length = remain < page_length ? remain : page_length; | |
1034 | if (node.allocated) { | |
1035 | wmb(); | |
82ad6443 CW |
1036 | ggtt->vm.insert_page(&ggtt->vm, |
1037 | i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), | |
1038 | node.start, I915_CACHE_NONE, 0); | |
b50a5371 AS |
1039 | wmb(); |
1040 | } else { | |
1041 | page_base += offset & PAGE_MASK; | |
1042 | } | |
bb6dc8d9 | 1043 | |
73ebd503 | 1044 | if (gtt_user_read(&ggtt->iomap, page_base, page_offset, |
bb6dc8d9 | 1045 | user_data, page_length)) { |
b50a5371 AS |
1046 | ret = -EFAULT; |
1047 | break; | |
1048 | } | |
1049 | ||
1050 | remain -= page_length; | |
1051 | user_data += page_length; | |
1052 | offset += page_length; | |
1053 | } | |
1054 | ||
bb6dc8d9 | 1055 | mutex_lock(&i915->drm.struct_mutex); |
b50a5371 AS |
1056 | out_unpin: |
1057 | if (node.allocated) { | |
1058 | wmb(); | |
82ad6443 | 1059 | ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); |
b50a5371 AS |
1060 | remove_mappable_node(&node); |
1061 | } else { | |
058d88c4 | 1062 | i915_vma_unpin(vma); |
b50a5371 | 1063 | } |
bb6dc8d9 | 1064 | out_unlock: |
538ef96b | 1065 | intel_runtime_pm_put(i915, wakeref); |
bb6dc8d9 | 1066 | mutex_unlock(&i915->drm.struct_mutex); |
f60d7f0c | 1067 | |
eb01459f EA |
1068 | return ret; |
1069 | } | |
1070 | ||
673a394b EA |
1071 | /** |
1072 | * Reads data from the object referenced by handle. | |
14bb2c11 TU |
1073 | * @dev: drm device pointer |
1074 | * @data: ioctl data blob | |
1075 | * @file: drm file pointer | |
673a394b EA |
1076 | * |
1077 | * On error, the contents of *data are undefined. | |
1078 | */ | |
1079 | int | |
1080 | i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |
05394f39 | 1081 | struct drm_file *file) |
673a394b EA |
1082 | { |
1083 | struct drm_i915_gem_pread *args = data; | |
05394f39 | 1084 | struct drm_i915_gem_object *obj; |
bb6dc8d9 | 1085 | int ret; |
673a394b | 1086 | |
51311d0a CW |
1087 | if (args->size == 0) |
1088 | return 0; | |
1089 | ||
96d4f267 | 1090 | if (!access_ok(u64_to_user_ptr(args->data_ptr), |
51311d0a CW |
1091 | args->size)) |
1092 | return -EFAULT; | |
1093 | ||
03ac0642 | 1094 | obj = i915_gem_object_lookup(file, args->handle); |
258a5ede CW |
1095 | if (!obj) |
1096 | return -ENOENT; | |
673a394b | 1097 | |
7dcd2499 | 1098 | /* Bounds check source. */ |
966d5bf5 | 1099 | if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { |
ce9d419d | 1100 | ret = -EINVAL; |
bb6dc8d9 | 1101 | goto out; |
ce9d419d CW |
1102 | } |
1103 | ||
db53a302 CW |
1104 | trace_i915_gem_object_pread(obj, args->offset, args->size); |
1105 | ||
e95433c7 CW |
1106 | ret = i915_gem_object_wait(obj, |
1107 | I915_WAIT_INTERRUPTIBLE, | |
62eb3c24 | 1108 | MAX_SCHEDULE_TIMEOUT); |
258a5ede | 1109 | if (ret) |
bb6dc8d9 | 1110 | goto out; |
258a5ede | 1111 | |
bb6dc8d9 | 1112 | ret = i915_gem_object_pin_pages(obj); |
258a5ede | 1113 | if (ret) |
bb6dc8d9 | 1114 | goto out; |
673a394b | 1115 | |
bb6dc8d9 | 1116 | ret = i915_gem_shmem_pread(obj, args); |
9c870d03 | 1117 | if (ret == -EFAULT || ret == -ENODEV) |
bb6dc8d9 | 1118 | ret = i915_gem_gtt_pread(obj, args); |
b50a5371 | 1119 | |
bb6dc8d9 CW |
1120 | i915_gem_object_unpin_pages(obj); |
1121 | out: | |
f0cd5182 | 1122 | i915_gem_object_put(obj); |
eb01459f | 1123 | return ret; |
673a394b EA |
1124 | } |
1125 | ||
0839ccb8 KP |
1126 | /* This is the fast write path which cannot handle |
1127 | * page faults in the source data | |
9b7530cc | 1128 | */ |
0839ccb8 | 1129 | |
fe115628 CW |
1130 | static inline bool |
1131 | ggtt_write(struct io_mapping *mapping, | |
1132 | loff_t base, int offset, | |
1133 | char __user *user_data, int length) | |
9b7530cc | 1134 | { |
afe722be | 1135 | void __iomem *vaddr; |
0839ccb8 | 1136 | unsigned long unwritten; |
9b7530cc | 1137 | |
4f0c7cfb | 1138 | /* We can use the cpu mem copy function because this is X86. */ |
afe722be VS |
1139 | vaddr = io_mapping_map_atomic_wc(mapping, base); |
1140 | unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset, | |
0839ccb8 | 1141 | user_data, length); |
fe115628 CW |
1142 | io_mapping_unmap_atomic(vaddr); |
1143 | if (unwritten) { | |
afe722be VS |
1144 | vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); |
1145 | unwritten = copy_from_user((void __force *)vaddr + offset, | |
1146 | user_data, length); | |
fe115628 CW |
1147 | io_mapping_unmap(vaddr); |
1148 | } | |
bb6dc8d9 | 1149 | |
bb6dc8d9 CW |
1150 | return unwritten; |
1151 | } | |
1152 | ||
3de09aa3 EA |
1153 | /** |
1154 | * This is the fast pwrite path, where we copy the data directly from the | |
1155 | * user into the GTT, uncached. | |
fe115628 | 1156 | * @obj: i915 GEM object |
14bb2c11 | 1157 | * @args: pwrite arguments structure |
3de09aa3 | 1158 | */ |
673a394b | 1159 | static int |
fe115628 CW |
1160 | i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, |
1161 | const struct drm_i915_gem_pwrite *args) | |
673a394b | 1162 | { |
fe115628 | 1163 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
4f1959ee | 1164 | struct i915_ggtt *ggtt = &i915->ggtt; |
538ef96b | 1165 | intel_wakeref_t wakeref; |
4f1959ee | 1166 | struct drm_mm_node node; |
fe115628 CW |
1167 | struct i915_vma *vma; |
1168 | u64 remain, offset; | |
1169 | void __user *user_data; | |
4f1959ee | 1170 | int ret; |
b50a5371 | 1171 | |
fe115628 CW |
1172 | ret = mutex_lock_interruptible(&i915->drm.struct_mutex); |
1173 | if (ret) | |
1174 | return ret; | |
935aaa69 | 1175 | |
8bd81815 CW |
1176 | if (i915_gem_object_has_struct_page(obj)) { |
1177 | /* | |
1178 | * Avoid waking the device up if we can fallback, as | |
1179 | * waking/resuming is very slow (worst-case 10-100 ms | |
1180 | * depending on PCI sleeps and our own resume time). | |
1181 | * This easily dwarfs any performance advantage from | |
1182 | * using the cache bypass of indirect GGTT access. | |
1183 | */ | |
538ef96b CW |
1184 | wakeref = intel_runtime_pm_get_if_in_use(i915); |
1185 | if (!wakeref) { | |
8bd81815 CW |
1186 | ret = -EFAULT; |
1187 | goto out_unlock; | |
1188 | } | |
1189 | } else { | |
1190 | /* No backing pages, no fallback, we must force GGTT access */ | |
538ef96b | 1191 | wakeref = intel_runtime_pm_get(i915); |
8bd81815 CW |
1192 | } |
1193 | ||
058d88c4 | 1194 | vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, |
a3259ca9 CW |
1195 | PIN_MAPPABLE | |
1196 | PIN_NONFAULT | | |
1197 | PIN_NONBLOCK); | |
18034584 CW |
1198 | if (!IS_ERR(vma)) { |
1199 | node.start = i915_ggtt_offset(vma); | |
1200 | node.allocated = false; | |
49ef5294 | 1201 | ret = i915_vma_put_fence(vma); |
18034584 CW |
1202 | if (ret) { |
1203 | i915_vma_unpin(vma); | |
1204 | vma = ERR_PTR(ret); | |
1205 | } | |
1206 | } | |
058d88c4 | 1207 | if (IS_ERR(vma)) { |
bb6dc8d9 | 1208 | ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); |
4f1959ee | 1209 | if (ret) |
8bd81815 | 1210 | goto out_rpm; |
fe115628 | 1211 | GEM_BUG_ON(!node.allocated); |
4f1959ee | 1212 | } |
935aaa69 DV |
1213 | |
1214 | ret = i915_gem_object_set_to_gtt_domain(obj, true); | |
1215 | if (ret) | |
1216 | goto out_unpin; | |
1217 | ||
fe115628 CW |
1218 | mutex_unlock(&i915->drm.struct_mutex); |
1219 | ||
b19482d7 | 1220 | intel_fb_obj_invalidate(obj, ORIGIN_CPU); |
063e4e6b | 1221 | |
4f1959ee AS |
1222 | user_data = u64_to_user_ptr(args->data_ptr); |
1223 | offset = args->offset; | |
1224 | remain = args->size; | |
1225 | while (remain) { | |
673a394b EA |
1226 | /* Operation in this page |
1227 | * | |
0839ccb8 KP |
1228 | * page_base = page offset within aperture |
1229 | * page_offset = offset within page | |
1230 | * page_length = bytes to copy for this page | |
673a394b | 1231 | */ |
4f1959ee | 1232 | u32 page_base = node.start; |
bb6dc8d9 CW |
1233 | unsigned int page_offset = offset_in_page(offset); |
1234 | unsigned int page_length = PAGE_SIZE - page_offset; | |
4f1959ee AS |
1235 | page_length = remain < page_length ? remain : page_length; |
1236 | if (node.allocated) { | |
1237 | wmb(); /* flush the write before we modify the GGTT */ | |
82ad6443 CW |
1238 | ggtt->vm.insert_page(&ggtt->vm, |
1239 | i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), | |
1240 | node.start, I915_CACHE_NONE, 0); | |
4f1959ee AS |
1241 | wmb(); /* flush modifications to the GGTT (insert_page) */ |
1242 | } else { | |
1243 | page_base += offset & PAGE_MASK; | |
1244 | } | |
0839ccb8 | 1245 | /* If we get a fault while copying data, then (presumably) our |
3de09aa3 EA |
1246 | * source page isn't available. Return the error and we'll |
1247 | * retry in the slow path. | |
b50a5371 AS |
1248 | * If the object is non-shmem backed, we retry again with the |
1249 | * path that handles page fault. | |
0839ccb8 | 1250 | */ |
73ebd503 | 1251 | if (ggtt_write(&ggtt->iomap, page_base, page_offset, |
fe115628 CW |
1252 | user_data, page_length)) { |
1253 | ret = -EFAULT; | |
1254 | break; | |
935aaa69 | 1255 | } |
673a394b | 1256 | |
0839ccb8 KP |
1257 | remain -= page_length; |
1258 | user_data += page_length; | |
1259 | offset += page_length; | |
673a394b | 1260 | } |
d59b21ec | 1261 | intel_fb_obj_flush(obj, ORIGIN_CPU); |
fe115628 CW |
1262 | |
1263 | mutex_lock(&i915->drm.struct_mutex); | |
935aaa69 | 1264 | out_unpin: |
4f1959ee AS |
1265 | if (node.allocated) { |
1266 | wmb(); | |
82ad6443 | 1267 | ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); |
4f1959ee AS |
1268 | remove_mappable_node(&node); |
1269 | } else { | |
058d88c4 | 1270 | i915_vma_unpin(vma); |
4f1959ee | 1271 | } |
8bd81815 | 1272 | out_rpm: |
538ef96b | 1273 | intel_runtime_pm_put(i915, wakeref); |
8bd81815 | 1274 | out_unlock: |
fe115628 | 1275 | mutex_unlock(&i915->drm.struct_mutex); |
3de09aa3 | 1276 | return ret; |
673a394b EA |
1277 | } |
1278 | ||
fe115628 CW |
1279 | /* Per-page copy function for the shmem pwrite fastpath. |
1280 | * Flushes invalid cachelines before writing to the target if | |
1281 | * needs_clflush_before is set and flushes out any written cachelines after | |
1282 | * writing if needs_clflush is set. | |
1283 | */ | |
40123c1f | 1284 | static int |
fe115628 | 1285 | shmem_pwrite(struct page *page, int offset, int len, char __user *user_data, |
fe115628 CW |
1286 | bool needs_clflush_before, |
1287 | bool needs_clflush_after) | |
40123c1f | 1288 | { |
b9d126e7 | 1289 | char *vaddr; |
fe115628 CW |
1290 | int ret; |
1291 | ||
b9d126e7 | 1292 | vaddr = kmap(page); |
fe115628 | 1293 | |
b9d126e7 CW |
1294 | if (needs_clflush_before) |
1295 | drm_clflush_virt_range(vaddr + offset, len); | |
fe115628 | 1296 | |
b9d126e7 CW |
1297 | ret = __copy_from_user(vaddr + offset, user_data, len); |
1298 | if (!ret && needs_clflush_after) | |
1299 | drm_clflush_virt_range(vaddr + offset, len); | |
fe115628 | 1300 | |
b9d126e7 CW |
1301 | kunmap(page); |
1302 | ||
1303 | return ret ? -EFAULT : 0; | |
fe115628 CW |
1304 | } |
1305 | ||
1306 | static int | |
1307 | i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, | |
1308 | const struct drm_i915_gem_pwrite *args) | |
1309 | { | |
1310 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | |
1311 | void __user *user_data; | |
1312 | u64 remain; | |
fe115628 | 1313 | unsigned int partial_cacheline_write; |
43394c7d | 1314 | unsigned int needs_clflush; |
fe115628 CW |
1315 | unsigned int offset, idx; |
1316 | int ret; | |
40123c1f | 1317 | |
fe115628 | 1318 | ret = mutex_lock_interruptible(&i915->drm.struct_mutex); |
755d2218 CW |
1319 | if (ret) |
1320 | return ret; | |
1321 | ||
fe115628 CW |
1322 | ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush); |
1323 | mutex_unlock(&i915->drm.struct_mutex); | |
1324 | if (ret) | |
1325 | return ret; | |
673a394b | 1326 | |
fe115628 CW |
1327 | /* If we don't overwrite a cacheline completely we need to be |
1328 | * careful to have up-to-date data by first clflushing. Don't | |
1329 | * overcomplicate things and flush the entire patch. | |
1330 | */ | |
1331 | partial_cacheline_write = 0; | |
1332 | if (needs_clflush & CLFLUSH_BEFORE) | |
1333 | partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1; | |
9da3da66 | 1334 | |
fe115628 CW |
1335 | user_data = u64_to_user_ptr(args->data_ptr); |
1336 | remain = args->size; | |
1337 | offset = offset_in_page(args->offset); | |
1338 | for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { | |
1339 | struct page *page = i915_gem_object_get_page(obj, idx); | |
a5e856a5 | 1340 | unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); |
755d2218 | 1341 | |
fe115628 | 1342 | ret = shmem_pwrite(page, offset, length, user_data, |
fe115628 CW |
1343 | (offset | length) & partial_cacheline_write, |
1344 | needs_clflush & CLFLUSH_AFTER); | |
755d2218 | 1345 | if (ret) |
fe115628 | 1346 | break; |
755d2218 | 1347 | |
fe115628 CW |
1348 | remain -= length; |
1349 | user_data += length; | |
1350 | offset = 0; | |
8c59967c | 1351 | } |
673a394b | 1352 | |
d59b21ec | 1353 | intel_fb_obj_flush(obj, ORIGIN_CPU); |
fe115628 | 1354 | i915_gem_obj_finish_shmem_access(obj); |
40123c1f | 1355 | return ret; |
673a394b EA |
1356 | } |
1357 | ||
1358 | /** | |
1359 | * Writes data to the object referenced by handle. | |
14bb2c11 TU |
1360 | * @dev: drm device |
1361 | * @data: ioctl data blob | |
1362 | * @file: drm file | |
673a394b EA |
1363 | * |
1364 | * On error, the contents of the buffer that were to be modified are undefined. | |
1365 | */ | |
1366 | int | |
1367 | i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |
fbd5a26d | 1368 | struct drm_file *file) |
673a394b EA |
1369 | { |
1370 | struct drm_i915_gem_pwrite *args = data; | |
05394f39 | 1371 | struct drm_i915_gem_object *obj; |
51311d0a CW |
1372 | int ret; |
1373 | ||
1374 | if (args->size == 0) | |
1375 | return 0; | |
1376 | ||
96d4f267 | 1377 | if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size)) |
51311d0a CW |
1378 | return -EFAULT; |
1379 | ||
03ac0642 | 1380 | obj = i915_gem_object_lookup(file, args->handle); |
258a5ede CW |
1381 | if (!obj) |
1382 | return -ENOENT; | |
673a394b | 1383 | |
7dcd2499 | 1384 | /* Bounds check destination. */ |
966d5bf5 | 1385 | if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { |
ce9d419d | 1386 | ret = -EINVAL; |
258a5ede | 1387 | goto err; |
ce9d419d CW |
1388 | } |
1389 | ||
f8c1cce3 CW |
1390 | /* Writes not allowed into this read-only object */ |
1391 | if (i915_gem_object_is_readonly(obj)) { | |
1392 | ret = -EINVAL; | |
1393 | goto err; | |
1394 | } | |
1395 | ||
db53a302 CW |
1396 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); |
1397 | ||
7c55e2c5 CW |
1398 | ret = -ENODEV; |
1399 | if (obj->ops->pwrite) | |
1400 | ret = obj->ops->pwrite(obj, args); | |
1401 | if (ret != -ENODEV) | |
1402 | goto err; | |
1403 | ||
e95433c7 CW |
1404 | ret = i915_gem_object_wait(obj, |
1405 | I915_WAIT_INTERRUPTIBLE | | |
1406 | I915_WAIT_ALL, | |
62eb3c24 | 1407 | MAX_SCHEDULE_TIMEOUT); |
258a5ede CW |
1408 | if (ret) |
1409 | goto err; | |
1410 | ||
fe115628 | 1411 | ret = i915_gem_object_pin_pages(obj); |
258a5ede | 1412 | if (ret) |
fe115628 | 1413 | goto err; |
258a5ede | 1414 | |
935aaa69 | 1415 | ret = -EFAULT; |
673a394b EA |
1416 | /* We can only do the GTT pwrite on untiled buffers, as otherwise |
1417 | * it would end up going through the fenced access, and we'll get | |
1418 | * different detiling behavior between reading and writing. | |
1419 | * pread/pwrite currently are reading and writing from the CPU | |
1420 | * perspective, requiring manual detiling by the client. | |
1421 | */ | |
6eae0059 | 1422 | if (!i915_gem_object_has_struct_page(obj) || |
9c870d03 | 1423 | cpu_write_needs_clflush(obj)) |
935aaa69 DV |
1424 | /* Note that the gtt paths might fail with non-page-backed user |
1425 | * pointers (e.g. gtt mappings when moving data between | |
9c870d03 CW |
1426 | * textures). Fallback to the shmem path in that case. |
1427 | */ | |
fe115628 | 1428 | ret = i915_gem_gtt_pwrite_fast(obj, args); |
673a394b | 1429 | |
d1054ee4 | 1430 | if (ret == -EFAULT || ret == -ENOSPC) { |
6a2c4232 CW |
1431 | if (obj->phys_handle) |
1432 | ret = i915_gem_phys_pwrite(obj, args, file); | |
b50a5371 | 1433 | else |
fe115628 | 1434 | ret = i915_gem_shmem_pwrite(obj, args); |
6a2c4232 | 1435 | } |
5c0480f2 | 1436 | |
fe115628 | 1437 | i915_gem_object_unpin_pages(obj); |
258a5ede | 1438 | err: |
f0cd5182 | 1439 | i915_gem_object_put(obj); |
258a5ede | 1440 | return ret; |
673a394b EA |
1441 | } |
1442 | ||
40e62d5d CW |
1443 | static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) |
1444 | { | |
09d7e46b | 1445 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
40e62d5d CW |
1446 | struct list_head *list; |
1447 | struct i915_vma *vma; | |
1448 | ||
f2123818 CW |
1449 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); |
1450 | ||
09d7e46b | 1451 | mutex_lock(&i915->ggtt.vm.mutex); |
e2189dd0 | 1452 | for_each_ggtt_vma(vma, obj) { |
40e62d5d CW |
1453 | if (!drm_mm_node_allocated(&vma->node)) |
1454 | continue; | |
1455 | ||
499197dc | 1456 | list_move_tail(&vma->vm_link, &vma->vm->bound_list); |
40e62d5d | 1457 | } |
09d7e46b | 1458 | mutex_unlock(&i915->ggtt.vm.mutex); |
40e62d5d | 1459 | |
f2123818 | 1460 | spin_lock(&i915->mm.obj_lock); |
40e62d5d | 1461 | list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list; |
f2123818 CW |
1462 | list_move_tail(&obj->mm.link, list); |
1463 | spin_unlock(&i915->mm.obj_lock); | |
40e62d5d CW |
1464 | } |
1465 | ||
673a394b | 1466 | /** |
2ef7eeaa EA |
1467 | * Called when user space prepares to use an object with the CPU, either |
1468 | * through the mmap ioctl's mapping or a GTT mapping. | |
14bb2c11 TU |
1469 | * @dev: drm device |
1470 | * @data: ioctl data blob | |
1471 | * @file: drm file | |
673a394b EA |
1472 | */ |
1473 | int | |
1474 | i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |
05394f39 | 1475 | struct drm_file *file) |
673a394b EA |
1476 | { |
1477 | struct drm_i915_gem_set_domain *args = data; | |
05394f39 | 1478 | struct drm_i915_gem_object *obj; |
739f3abd JN |
1479 | u32 read_domains = args->read_domains; |
1480 | u32 write_domain = args->write_domain; | |
40e62d5d | 1481 | int err; |
673a394b | 1482 | |
2ef7eeaa | 1483 | /* Only handle setting domains to types used by the CPU. */ |
b8f9096d | 1484 | if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS) |
2ef7eeaa EA |
1485 | return -EINVAL; |
1486 | ||
1487 | /* Having something in the write domain implies it's in the read | |
1488 | * domain, and only that read domain. Enforce that in the request. | |
1489 | */ | |
1490 | if (write_domain != 0 && read_domains != write_domain) | |
1491 | return -EINVAL; | |
1492 | ||
03ac0642 | 1493 | obj = i915_gem_object_lookup(file, args->handle); |
b8f9096d CW |
1494 | if (!obj) |
1495 | return -ENOENT; | |
673a394b | 1496 | |
3236f57a CW |
1497 | /* Try to flush the object off the GPU without holding the lock. |
1498 | * We will repeat the flush holding the lock in the normal manner | |
1499 | * to catch cases where we are gazumped. | |
1500 | */ | |
40e62d5d | 1501 | err = i915_gem_object_wait(obj, |
e95433c7 | 1502 | I915_WAIT_INTERRUPTIBLE | |
e9eaf82d | 1503 | I915_WAIT_PRIORITY | |
e95433c7 | 1504 | (write_domain ? I915_WAIT_ALL : 0), |
62eb3c24 | 1505 | MAX_SCHEDULE_TIMEOUT); |
40e62d5d | 1506 | if (err) |
f0cd5182 | 1507 | goto out; |
b8f9096d | 1508 | |
a03f395a TZ |
1509 | /* |
1510 | * Proxy objects do not control access to the backing storage, ergo | |
1511 | * they cannot be used as a means to manipulate the cache domain | |
1512 | * tracking for that backing storage. The proxy object is always | |
1513 | * considered to be outside of any cache domain. | |
1514 | */ | |
1515 | if (i915_gem_object_is_proxy(obj)) { | |
1516 | err = -ENXIO; | |
1517 | goto out; | |
1518 | } | |
1519 | ||
1520 | /* | |
1521 | * Flush and acquire obj->pages so that we are coherent through | |
40e62d5d CW |
1522 | * direct access in memory with previous cached writes through |
1523 | * shmemfs and that our cache domain tracking remains valid. | |
1524 | * For example, if the obj->filp was moved to swap without us | |
1525 | * being notified and releasing the pages, we would mistakenly | |
1526 | * continue to assume that the obj remained out of the CPU cached | |
1527 | * domain. | |
1528 | */ | |
1529 | err = i915_gem_object_pin_pages(obj); | |
1530 | if (err) | |
f0cd5182 | 1531 | goto out; |
40e62d5d CW |
1532 | |
1533 | err = i915_mutex_lock_interruptible(dev); | |
1534 | if (err) | |
f0cd5182 | 1535 | goto out_unpin; |
3236f57a | 1536 | |
e22d8e3c CW |
1537 | if (read_domains & I915_GEM_DOMAIN_WC) |
1538 | err = i915_gem_object_set_to_wc_domain(obj, write_domain); | |
1539 | else if (read_domains & I915_GEM_DOMAIN_GTT) | |
1540 | err = i915_gem_object_set_to_gtt_domain(obj, write_domain); | |
43566ded | 1541 | else |
e22d8e3c | 1542 | err = i915_gem_object_set_to_cpu_domain(obj, write_domain); |
2ef7eeaa | 1543 | |
40e62d5d CW |
1544 | /* And bump the LRU for this access */ |
1545 | i915_gem_object_bump_inactive_ggtt(obj); | |
031b698a | 1546 | |
673a394b | 1547 | mutex_unlock(&dev->struct_mutex); |
b8f9096d | 1548 | |
40e62d5d | 1549 | if (write_domain != 0) |
ef74921b CW |
1550 | intel_fb_obj_invalidate(obj, |
1551 | fb_write_origin(obj, write_domain)); | |
40e62d5d | 1552 | |
f0cd5182 | 1553 | out_unpin: |
40e62d5d | 1554 | i915_gem_object_unpin_pages(obj); |
f0cd5182 CW |
1555 | out: |
1556 | i915_gem_object_put(obj); | |
40e62d5d | 1557 | return err; |
673a394b EA |
1558 | } |
1559 | ||
1560 | /** | |
1561 | * Called when user space has done writes to this buffer | |
14bb2c11 TU |
1562 | * @dev: drm device |
1563 | * @data: ioctl data blob | |
1564 | * @file: drm file | |
673a394b EA |
1565 | */ |
1566 | int | |
1567 | i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |
05394f39 | 1568 | struct drm_file *file) |
673a394b EA |
1569 | { |
1570 | struct drm_i915_gem_sw_finish *args = data; | |
05394f39 | 1571 | struct drm_i915_gem_object *obj; |
1d7cfea1 | 1572 | |
03ac0642 | 1573 | obj = i915_gem_object_lookup(file, args->handle); |
c21724cc CW |
1574 | if (!obj) |
1575 | return -ENOENT; | |
673a394b | 1576 | |
a03f395a TZ |
1577 | /* |
1578 | * Proxy objects are barred from CPU access, so there is no | |
1579 | * need to ban sw_finish as it is a nop. | |
1580 | */ | |
1581 | ||
673a394b | 1582 | /* Pinned buffers may be scanout, so flush the cache */ |
5a97bcc6 | 1583 | i915_gem_object_flush_if_display(obj); |
f0cd5182 | 1584 | i915_gem_object_put(obj); |
5a97bcc6 CW |
1585 | |
1586 | return 0; | |
673a394b EA |
1587 | } |
1588 | ||
5c4604e7 JL |
1589 | static inline bool |
1590 | __vma_matches(struct vm_area_struct *vma, struct file *filp, | |
1591 | unsigned long addr, unsigned long size) | |
1592 | { | |
1593 | if (vma->vm_file != filp) | |
1594 | return false; | |
1595 | ||
a90e1948 TU |
1596 | return vma->vm_start == addr && |
1597 | (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size); | |
5c4604e7 JL |
1598 | } |
1599 | ||
673a394b | 1600 | /** |
14bb2c11 TU |
1601 | * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address |
1602 | * it is mapped to. | |
1603 | * @dev: drm device | |
1604 | * @data: ioctl data blob | |
1605 | * @file: drm file | |
673a394b EA |
1606 | * |
1607 | * While the mapping holds a reference on the contents of the object, it doesn't | |
1608 | * imply a ref on the object itself. | |
34367381 DV |
1609 | * |
1610 | * IMPORTANT: | |
1611 | * | |
1612 | * DRM driver writers who look a this function as an example for how to do GEM | |
1613 | * mmap support, please don't implement mmap support like here. The modern way | |
1614 | * to implement DRM mmap support is with an mmap offset ioctl (like | |
1615 | * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. | |
1616 | * That way debug tooling like valgrind will understand what's going on, hiding | |
1617 | * the mmap call in a driver private ioctl will break that. The i915 driver only | |
1618 | * does cpu mmaps this way because we didn't know better. | |
673a394b EA |
1619 | */ |
1620 | int | |
1621 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |
05394f39 | 1622 | struct drm_file *file) |
673a394b EA |
1623 | { |
1624 | struct drm_i915_gem_mmap *args = data; | |
03ac0642 | 1625 | struct drm_i915_gem_object *obj; |
673a394b EA |
1626 | unsigned long addr; |
1627 | ||
1816f923 AG |
1628 | if (args->flags & ~(I915_MMAP_WC)) |
1629 | return -EINVAL; | |
1630 | ||
568a58e5 | 1631 | if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT)) |
1816f923 AG |
1632 | return -ENODEV; |
1633 | ||
03ac0642 CW |
1634 | obj = i915_gem_object_lookup(file, args->handle); |
1635 | if (!obj) | |
bf79cb91 | 1636 | return -ENOENT; |
673a394b | 1637 | |
1286ff73 DV |
1638 | /* prime objects have no backing filp to GEM mmap |
1639 | * pages from. | |
1640 | */ | |
03ac0642 | 1641 | if (!obj->base.filp) { |
794a11cb CW |
1642 | addr = -ENXIO; |
1643 | goto err; | |
1644 | } | |
1645 | ||
1646 | if (range_overflows(args->offset, args->size, (u64)obj->base.size)) { | |
1647 | addr = -EINVAL; | |
1648 | goto err; | |
1286ff73 DV |
1649 | } |
1650 | ||
03ac0642 | 1651 | addr = vm_mmap(obj->base.filp, 0, args->size, |
673a394b EA |
1652 | PROT_READ | PROT_WRITE, MAP_SHARED, |
1653 | args->offset); | |
ebfb6977 JL |
1654 | if (IS_ERR_VALUE(addr)) |
1655 | goto err; | |
1656 | ||
1816f923 AG |
1657 | if (args->flags & I915_MMAP_WC) { |
1658 | struct mm_struct *mm = current->mm; | |
1659 | struct vm_area_struct *vma; | |
1660 | ||
80a89a5e | 1661 | if (down_write_killable(&mm->mmap_sem)) { |
794a11cb CW |
1662 | addr = -EINTR; |
1663 | goto err; | |
80a89a5e | 1664 | } |
1816f923 | 1665 | vma = find_vma(mm, addr); |
5c4604e7 | 1666 | if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) |
1816f923 AG |
1667 | vma->vm_page_prot = |
1668 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | |
1669 | else | |
1670 | addr = -ENOMEM; | |
1671 | up_write(&mm->mmap_sem); | |
ebfb6977 JL |
1672 | if (IS_ERR_VALUE(addr)) |
1673 | goto err; | |
aeecc969 CW |
1674 | |
1675 | /* This may race, but that's ok, it only gets set */ | |
50349247 | 1676 | WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU); |
1816f923 | 1677 | } |
f0cd5182 | 1678 | i915_gem_object_put(obj); |
673a394b | 1679 | |
739f3abd | 1680 | args->addr_ptr = (u64)addr; |
673a394b | 1681 | return 0; |
ebfb6977 JL |
1682 | |
1683 | err: | |
1684 | i915_gem_object_put(obj); | |
ebfb6977 | 1685 | return addr; |
673a394b EA |
1686 | } |
1687 | ||
d899aceb | 1688 | static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) |
03af84fe | 1689 | { |
6649a0b6 | 1690 | return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT; |
03af84fe CW |
1691 | } |
1692 | ||
4cc69075 CW |
1693 | /** |
1694 | * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps | |
1695 | * | |
1696 | * A history of the GTT mmap interface: | |
1697 | * | |
1698 | * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to | |
1699 | * aligned and suitable for fencing, and still fit into the available | |
1700 | * mappable space left by the pinned display objects. A classic problem | |
1701 | * we called the page-fault-of-doom where we would ping-pong between | |
1702 | * two objects that could not fit inside the GTT and so the memcpy | |
1703 | * would page one object in at the expense of the other between every | |
1704 | * single byte. | |
1705 | * | |
1706 | * 1 - Objects can be any size, and have any compatible fencing (X Y, or none | |
1707 | * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the | |
1708 | * object is too large for the available space (or simply too large | |
1709 | * for the mappable aperture!), a view is created instead and faulted | |
1710 | * into userspace. (This view is aligned and sized appropriately for | |
1711 | * fenced access.) | |
1712 | * | |
e22d8e3c CW |
1713 | * 2 - Recognise WC as a separate cache domain so that we can flush the |
1714 | * delayed writes via GTT before performing direct access via WC. | |
1715 | * | |
4cc69075 CW |
1716 | * Restrictions: |
1717 | * | |
1718 | * * snoopable objects cannot be accessed via the GTT. It can cause machine | |
1719 | * hangs on some architectures, corruption on others. An attempt to service | |
1720 | * a GTT page fault from a snoopable object will generate a SIGBUS. | |
1721 | * | |
1722 | * * the object must be able to fit into RAM (physical memory, though no | |
1723 | * limited to the mappable aperture). | |
1724 | * | |
1725 | * | |
1726 | * Caveats: | |
1727 | * | |
1728 | * * a new GTT page fault will synchronize rendering from the GPU and flush | |
1729 | * all data to system memory. Subsequent access will not be synchronized. | |
1730 | * | |
1731 | * * all mappings are revoked on runtime device suspend. | |
1732 | * | |
1733 | * * there are only 8, 16 or 32 fence registers to share between all users | |
1734 | * (older machines require fence register for display and blitter access | |
1735 | * as well). Contention of the fence registers will cause the previous users | |
1736 | * to be unmapped and any new access will generate new page faults. | |
1737 | * | |
1738 | * * running out of memory while servicing a fault may generate a SIGBUS, | |
1739 | * rather than the expected SIGSEGV. | |
1740 | */ | |
1741 | int i915_gem_mmap_gtt_version(void) | |
1742 | { | |
e22d8e3c | 1743 | return 2; |
4cc69075 CW |
1744 | } |
1745 | ||
2d4281bb | 1746 | static inline struct i915_ggtt_view |
d899aceb | 1747 | compute_partial_view(const struct drm_i915_gem_object *obj, |
2d4281bb CW |
1748 | pgoff_t page_offset, |
1749 | unsigned int chunk) | |
1750 | { | |
1751 | struct i915_ggtt_view view; | |
1752 | ||
1753 | if (i915_gem_object_is_tiled(obj)) | |
1754 | chunk = roundup(chunk, tile_row_pages(obj)); | |
1755 | ||
2d4281bb | 1756 | view.type = I915_GGTT_VIEW_PARTIAL; |
8bab1193 CW |
1757 | view.partial.offset = rounddown(page_offset, chunk); |
1758 | view.partial.size = | |
2d4281bb | 1759 | min_t(unsigned int, chunk, |
8bab1193 | 1760 | (obj->base.size >> PAGE_SHIFT) - view.partial.offset); |
2d4281bb CW |
1761 | |
1762 | /* If the partial covers the entire object, just create a normal VMA. */ | |
1763 | if (chunk >= obj->base.size >> PAGE_SHIFT) | |
1764 | view.type = I915_GGTT_VIEW_NORMAL; | |
1765 | ||
1766 | return view; | |
1767 | } | |
1768 | ||
de151cf6 JB |
1769 | /** |
1770 | * i915_gem_fault - fault a page into the GTT | |
d9072a3e | 1771 | * @vmf: fault info |
de151cf6 JB |
1772 | * |
1773 | * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped | |
1774 | * from userspace. The fault handler takes care of binding the object to | |
1775 | * the GTT (if needed), allocating and programming a fence register (again, | |
1776 | * only if needed based on whether the old reg is still valid or the object | |
1777 | * is tiled) and inserting a new PTE into the faulting process. | |
1778 | * | |
1779 | * Note that the faulting process may involve evicting existing objects | |
1780 | * from the GTT and/or fence registers to make room. So performance may | |
1781 | * suffer if the GTT working set is large or there are few fence registers | |
1782 | * left. | |
4cc69075 CW |
1783 | * |
1784 | * The current feature set supported by i915_gem_fault() and thus GTT mmaps | |
1785 | * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version). | |
de151cf6 | 1786 | */ |
52137010 | 1787 | vm_fault_t i915_gem_fault(struct vm_fault *vmf) |
de151cf6 | 1788 | { |
420980ca | 1789 | #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) |
11bac800 | 1790 | struct vm_area_struct *area = vmf->vma; |
058d88c4 | 1791 | struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); |
05394f39 | 1792 | struct drm_device *dev = obj->base.dev; |
72e96d64 JL |
1793 | struct drm_i915_private *dev_priv = to_i915(dev); |
1794 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | |
aae7c06b | 1795 | bool write = area->vm_flags & VM_WRITE; |
538ef96b | 1796 | intel_wakeref_t wakeref; |
058d88c4 | 1797 | struct i915_vma *vma; |
de151cf6 | 1798 | pgoff_t page_offset; |
2caffbf1 | 1799 | int srcu; |
b8f9096d | 1800 | int ret; |
f65c9168 | 1801 | |
3e977ac6 CW |
1802 | /* Sanity check that we allow writing into this object */ |
1803 | if (i915_gem_object_is_readonly(obj) && write) | |
1804 | return VM_FAULT_SIGBUS; | |
1805 | ||
de151cf6 | 1806 | /* We don't use vmf->pgoff since that has the fake offset */ |
1a29d85e | 1807 | page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; |
de151cf6 | 1808 | |
db53a302 CW |
1809 | trace_i915_gem_object_fault(obj, page_offset, true, write); |
1810 | ||
6e4930f6 | 1811 | /* Try to flush the object off the GPU first without holding the lock. |
b8f9096d | 1812 | * Upon acquiring the lock, we will perform our sanity checks and then |
6e4930f6 CW |
1813 | * repeat the flush holding the lock in the normal manner to catch cases |
1814 | * where we are gazumped. | |
1815 | */ | |
e95433c7 CW |
1816 | ret = i915_gem_object_wait(obj, |
1817 | I915_WAIT_INTERRUPTIBLE, | |
62eb3c24 | 1818 | MAX_SCHEDULE_TIMEOUT); |
6e4930f6 | 1819 | if (ret) |
b8f9096d CW |
1820 | goto err; |
1821 | ||
40e62d5d CW |
1822 | ret = i915_gem_object_pin_pages(obj); |
1823 | if (ret) | |
1824 | goto err; | |
1825 | ||
538ef96b | 1826 | wakeref = intel_runtime_pm_get(dev_priv); |
b8f9096d | 1827 | |
43a8f684 CW |
1828 | srcu = i915_reset_trylock(dev_priv); |
1829 | if (srcu < 0) { | |
1830 | ret = srcu; | |
1831 | goto err_rpm; | |
1832 | } | |
1833 | ||
b8f9096d CW |
1834 | ret = i915_mutex_lock_interruptible(dev); |
1835 | if (ret) | |
43a8f684 | 1836 | goto err_reset; |
6e4930f6 | 1837 | |
eb119bd6 | 1838 | /* Access to snoopable pages through the GTT is incoherent. */ |
0031fb96 | 1839 | if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) { |
ddeff6ee | 1840 | ret = -EFAULT; |
b8f9096d | 1841 | goto err_unlock; |
eb119bd6 CW |
1842 | } |
1843 | ||
a61007a8 | 1844 | /* Now pin it into the GTT as needed */ |
7e7367d3 CW |
1845 | vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, |
1846 | PIN_MAPPABLE | | |
1847 | PIN_NONBLOCK | | |
1848 | PIN_NONFAULT); | |
a61007a8 | 1849 | if (IS_ERR(vma)) { |
a61007a8 | 1850 | /* Use a partial view if it is bigger than available space */ |
2d4281bb | 1851 | struct i915_ggtt_view view = |
8201c1fa | 1852 | compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); |
7e7367d3 | 1853 | unsigned int flags; |
aa136d9d | 1854 | |
7e7367d3 CW |
1855 | flags = PIN_MAPPABLE; |
1856 | if (view.type == I915_GGTT_VIEW_NORMAL) | |
1857 | flags |= PIN_NONBLOCK; /* avoid warnings for pinned */ | |
1858 | ||
1859 | /* | |
1860 | * Userspace is now writing through an untracked VMA, abandon | |
50349247 CW |
1861 | * all hope that the hardware is able to track future writes. |
1862 | */ | |
1863 | obj->frontbuffer_ggtt_origin = ORIGIN_CPU; | |
1864 | ||
7e7367d3 CW |
1865 | vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); |
1866 | if (IS_ERR(vma) && !view.type) { | |
1867 | flags = PIN_MAPPABLE; | |
1868 | view.type = I915_GGTT_VIEW_PARTIAL; | |
1869 | vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); | |
1870 | } | |
a61007a8 | 1871 | } |
058d88c4 CW |
1872 | if (IS_ERR(vma)) { |
1873 | ret = PTR_ERR(vma); | |
b8f9096d | 1874 | goto err_unlock; |
058d88c4 | 1875 | } |
4a684a41 | 1876 | |
c9839303 CW |
1877 | ret = i915_gem_object_set_to_gtt_domain(obj, write); |
1878 | if (ret) | |
b8f9096d | 1879 | goto err_unpin; |
74898d7e | 1880 | |
aeaaa55c CW |
1881 | ret = i915_vma_pin_fence(vma); |
1882 | if (ret) | |
1883 | goto err_unpin; | |
1884 | ||
b90b91d8 | 1885 | /* Finally, remap it using the new GTT offset */ |
c58305af | 1886 | ret = remap_io_mapping(area, |
8bab1193 | 1887 | area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), |
73ebd503 | 1888 | (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, |
c58305af | 1889 | min_t(u64, vma->size, area->vm_end - area->vm_start), |
73ebd503 | 1890 | &ggtt->iomap); |
a65adaf8 | 1891 | if (ret) |
43a8f684 | 1892 | goto err_fence; |
a61007a8 | 1893 | |
a65adaf8 CW |
1894 | /* Mark as being mmapped into userspace for later revocation */ |
1895 | assert_rpm_wakelock_held(dev_priv); | |
1896 | if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) | |
1897 | list_add(&obj->userfault_link, &dev_priv->mm.userfault_list); | |
1898 | GEM_BUG_ON(!obj->userfault_count); | |
1899 | ||
7125397b CW |
1900 | i915_vma_set_ggtt_write(vma); |
1901 | ||
aeaaa55c CW |
1902 | err_fence: |
1903 | i915_vma_unpin_fence(vma); | |
b8f9096d | 1904 | err_unpin: |
058d88c4 | 1905 | __i915_vma_unpin(vma); |
b8f9096d | 1906 | err_unlock: |
de151cf6 | 1907 | mutex_unlock(&dev->struct_mutex); |
43a8f684 CW |
1908 | err_reset: |
1909 | i915_reset_unlock(dev_priv, srcu); | |
b8f9096d | 1910 | err_rpm: |
538ef96b | 1911 | intel_runtime_pm_put(dev_priv, wakeref); |
40e62d5d | 1912 | i915_gem_object_unpin_pages(obj); |
b8f9096d | 1913 | err: |
de151cf6 | 1914 | switch (ret) { |
d9bc7e9f | 1915 | case -EIO: |
2232f031 DV |
1916 | /* |
1917 | * We eat errors when the gpu is terminally wedged to avoid | |
1918 | * userspace unduly crashing (gl has no provisions for mmaps to | |
1919 | * fail). But any other -EIO isn't ours (e.g. swap in failure) | |
1920 | * and so needs to be reported. | |
1921 | */ | |
c41166f9 | 1922 | if (!i915_terminally_wedged(dev_priv)) |
52137010 | 1923 | return VM_FAULT_SIGBUS; |
f0d759f0 | 1924 | /* else: fall through */ |
045e769a | 1925 | case -EAGAIN: |
571c608d DV |
1926 | /* |
1927 | * EAGAIN means the gpu is hung and we'll wait for the error | |
1928 | * handler to reset everything when re-faulting in | |
1929 | * i915_mutex_lock_interruptible. | |
d9bc7e9f | 1930 | */ |
c715089f CW |
1931 | case 0: |
1932 | case -ERESTARTSYS: | |
bed636ab | 1933 | case -EINTR: |
e79e0fe3 DR |
1934 | case -EBUSY: |
1935 | /* | |
1936 | * EBUSY is ok: this just means that another thread | |
1937 | * already did the job. | |
1938 | */ | |
52137010 | 1939 | return VM_FAULT_NOPAGE; |
de151cf6 | 1940 | case -ENOMEM: |
52137010 | 1941 | return VM_FAULT_OOM; |
a7c2e1aa | 1942 | case -ENOSPC: |
45d67817 | 1943 | case -EFAULT: |
52137010 | 1944 | return VM_FAULT_SIGBUS; |
de151cf6 | 1945 | default: |
a7c2e1aa | 1946 | WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); |
52137010 | 1947 | return VM_FAULT_SIGBUS; |
de151cf6 JB |
1948 | } |
1949 | } | |
1950 | ||
a65adaf8 CW |
1951 | static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) |
1952 | { | |
1953 | struct i915_vma *vma; | |
1954 | ||
1955 | GEM_BUG_ON(!obj->userfault_count); | |
1956 | ||
1957 | obj->userfault_count = 0; | |
1958 | list_del(&obj->userfault_link); | |
1959 | drm_vma_node_unmap(&obj->base.vma_node, | |
1960 | obj->base.dev->anon_inode->i_mapping); | |
1961 | ||
e2189dd0 | 1962 | for_each_ggtt_vma(vma, obj) |
a65adaf8 | 1963 | i915_vma_unset_userfault(vma); |
a65adaf8 CW |
1964 | } |
1965 | ||
901782b2 CW |
1966 | /** |
1967 | * i915_gem_release_mmap - remove physical page mappings | |
1968 | * @obj: obj in question | |
1969 | * | |
af901ca1 | 1970 | * Preserve the reservation of the mmapping with the DRM core code, but |
901782b2 CW |
1971 | * relinquish ownership of the pages back to the system. |
1972 | * | |
1973 | * It is vital that we remove the page mapping if we have mapped a tiled | |
1974 | * object through the GTT and then lose the fence register due to | |
1975 | * resource pressure. Similarly if the object has been moved out of the | |
1976 | * aperture, than pages mapped into userspace must be revoked. Removing the | |
1977 | * mapping will then trigger a page fault on the next user access, allowing | |
1978 | * fixup by i915_gem_fault(). | |
1979 | */ | |
d05ca301 | 1980 | void |
05394f39 | 1981 | i915_gem_release_mmap(struct drm_i915_gem_object *obj) |
901782b2 | 1982 | { |
275f039d | 1983 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
538ef96b | 1984 | intel_wakeref_t wakeref; |
275f039d | 1985 | |
349f2ccf CW |
1986 | /* Serialisation between user GTT access and our code depends upon |
1987 | * revoking the CPU's PTE whilst the mutex is held. The next user | |
1988 | * pagefault then has to wait until we release the mutex. | |
9c870d03 CW |
1989 | * |
1990 | * Note that RPM complicates somewhat by adding an additional | |
1991 | * requirement that operations to the GGTT be made holding the RPM | |
1992 | * wakeref. | |
349f2ccf | 1993 | */ |
275f039d | 1994 | lockdep_assert_held(&i915->drm.struct_mutex); |
538ef96b | 1995 | wakeref = intel_runtime_pm_get(i915); |
349f2ccf | 1996 | |
a65adaf8 | 1997 | if (!obj->userfault_count) |
9c870d03 | 1998 | goto out; |
901782b2 | 1999 | |
a65adaf8 | 2000 | __i915_gem_object_release_mmap(obj); |
349f2ccf CW |
2001 | |
2002 | /* Ensure that the CPU's PTE are revoked and there are not outstanding | |
2003 | * memory transactions from userspace before we return. The TLB | |
2004 | * flushing implied above by changing the PTE above *should* be | |
2005 | * sufficient, an extra barrier here just provides us with a bit | |
2006 | * of paranoid documentation about our requirement to serialise | |
2007 | * memory writes before touching registers / GSM. | |
2008 | */ | |
2009 | wmb(); | |
9c870d03 CW |
2010 | |
2011 | out: | |
538ef96b | 2012 | intel_runtime_pm_put(i915, wakeref); |
901782b2 CW |
2013 | } |
2014 | ||
7c108fd8 | 2015 | void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) |
eedd10f4 | 2016 | { |
3594a3e2 | 2017 | struct drm_i915_gem_object *obj, *on; |
7c108fd8 | 2018 | int i; |
eedd10f4 | 2019 | |
3594a3e2 CW |
2020 | /* |
2021 | * Only called during RPM suspend. All users of the userfault_list | |
2022 | * must be holding an RPM wakeref to ensure that this can not | |
2023 | * run concurrently with themselves (and use the struct_mutex for | |
2024 | * protection between themselves). | |
2025 | */ | |
275f039d | 2026 | |
3594a3e2 | 2027 | list_for_each_entry_safe(obj, on, |
a65adaf8 CW |
2028 | &dev_priv->mm.userfault_list, userfault_link) |
2029 | __i915_gem_object_release_mmap(obj); | |
7c108fd8 CW |
2030 | |
2031 | /* The fence will be lost when the device powers down. If any were | |
2032 | * in use by hardware (i.e. they are pinned), we should not be powering | |
2033 | * down! All other fences will be reacquired by the user upon waking. | |
2034 | */ | |
2035 | for (i = 0; i < dev_priv->num_fence_regs; i++) { | |
2036 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; | |
2037 | ||
e0ec3ec6 CW |
2038 | /* Ideally we want to assert that the fence register is not |
2039 | * live at this point (i.e. that no piece of code will be | |
2040 | * trying to write through fence + GTT, as that both violates | |
2041 | * our tracking of activity and associated locking/barriers, | |
2042 | * but also is illegal given that the hw is powered down). | |
2043 | * | |
2044 | * Previously we used reg->pin_count as a "liveness" indicator. | |
2045 | * That is not sufficient, and we need a more fine-grained | |
2046 | * tool if we want to have a sanity check here. | |
2047 | */ | |
7c108fd8 CW |
2048 | |
2049 | if (!reg->vma) | |
2050 | continue; | |
2051 | ||
a65adaf8 | 2052 | GEM_BUG_ON(i915_vma_has_userfault(reg->vma)); |
7c108fd8 CW |
2053 | reg->dirty = true; |
2054 | } | |
eedd10f4 CW |
2055 | } |
2056 | ||
d8cb5086 CW |
2057 | static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) |
2058 | { | |
fac5e23e | 2059 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
f3f6184c | 2060 | int err; |
da494d7c | 2061 | |
f3f6184c | 2062 | err = drm_gem_create_mmap_offset(&obj->base); |
b42a13d9 | 2063 | if (likely(!err)) |
f3f6184c | 2064 | return 0; |
d8cb5086 | 2065 | |
b42a13d9 CW |
2066 | /* Attempt to reap some mmap space from dead objects */ |
2067 | do { | |
ec625fb9 CW |
2068 | err = i915_gem_wait_for_idle(dev_priv, |
2069 | I915_WAIT_INTERRUPTIBLE, | |
2070 | MAX_SCHEDULE_TIMEOUT); | |
b42a13d9 CW |
2071 | if (err) |
2072 | break; | |
f3f6184c | 2073 | |
b42a13d9 | 2074 | i915_gem_drain_freed_objects(dev_priv); |
f3f6184c | 2075 | err = drm_gem_create_mmap_offset(&obj->base); |
b42a13d9 CW |
2076 | if (!err) |
2077 | break; | |
2078 | ||
2079 | } while (flush_delayed_work(&dev_priv->gt.retire_work)); | |
da494d7c | 2080 | |
f3f6184c | 2081 | return err; |
d8cb5086 CW |
2082 | } |
2083 | ||
2084 | static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) | |
2085 | { | |
d8cb5086 CW |
2086 | drm_gem_free_mmap_offset(&obj->base); |
2087 | } | |
2088 | ||
da6b51d0 | 2089 | int |
ff72145b DA |
2090 | i915_gem_mmap_gtt(struct drm_file *file, |
2091 | struct drm_device *dev, | |
739f3abd JN |
2092 | u32 handle, |
2093 | u64 *offset) | |
de151cf6 | 2094 | { |
05394f39 | 2095 | struct drm_i915_gem_object *obj; |
de151cf6 JB |
2096 | int ret; |
2097 | ||
03ac0642 | 2098 | obj = i915_gem_object_lookup(file, handle); |
f3f6184c CW |
2099 | if (!obj) |
2100 | return -ENOENT; | |
ab18282d | 2101 | |
d8cb5086 | 2102 | ret = i915_gem_object_create_mmap_offset(obj); |
f3f6184c CW |
2103 | if (ret == 0) |
2104 | *offset = drm_vma_node_offset_addr(&obj->base.vma_node); | |
de151cf6 | 2105 | |
f0cd5182 | 2106 | i915_gem_object_put(obj); |
1d7cfea1 | 2107 | return ret; |
de151cf6 JB |
2108 | } |
2109 | ||
ff72145b DA |
2110 | /** |
2111 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing | |
2112 | * @dev: DRM device | |
2113 | * @data: GTT mapping ioctl data | |
2114 | * @file: GEM object info | |
2115 | * | |
2116 | * Simply returns the fake offset to userspace so it can mmap it. | |
2117 | * The mmap call will end up in drm_gem_mmap(), which will set things | |
2118 | * up so we can get faults in the handler above. | |
2119 | * | |
2120 | * The fault handler will take care of binding the object into the GTT | |
2121 | * (since it may have been evicted to make room for something), allocating | |
2122 | * a fence register, and mapping the appropriate aperture address into | |
2123 | * userspace. | |
2124 | */ | |
2125 | int | |
2126 | i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, | |
2127 | struct drm_file *file) | |
2128 | { | |
2129 | struct drm_i915_gem_mmap_gtt *args = data; | |
2130 | ||
da6b51d0 | 2131 | return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); |
ff72145b DA |
2132 | } |
2133 | ||
225067ee DV |
2134 | /* Immediately discard the backing storage */ |
2135 | static void | |
2136 | i915_gem_object_truncate(struct drm_i915_gem_object *obj) | |
e5281ccd | 2137 | { |
4d6294bf | 2138 | i915_gem_object_free_mmap_offset(obj); |
1286ff73 | 2139 | |
4d6294bf CW |
2140 | if (obj->base.filp == NULL) |
2141 | return; | |
e5281ccd | 2142 | |
225067ee DV |
2143 | /* Our goal here is to return as much of the memory as |
2144 | * is possible back to the system as we are called from OOM. | |
2145 | * To do this we must instruct the shmfs to drop all of its | |
2146 | * backing pages, *now*. | |
2147 | */ | |
5537252b | 2148 | shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); |
a4f5ea64 | 2149 | obj->mm.madv = __I915_MADV_PURGED; |
4e5462ee | 2150 | obj->mm.pages = ERR_PTR(-EFAULT); |
225067ee | 2151 | } |
e5281ccd | 2152 | |
5537252b | 2153 | /* Try to discard unwanted pages */ |
03ac84f1 | 2154 | void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj) |
225067ee | 2155 | { |
5537252b CW |
2156 | struct address_space *mapping; |
2157 | ||
1233e2db | 2158 | lockdep_assert_held(&obj->mm.lock); |
f1fa4f44 | 2159 | GEM_BUG_ON(i915_gem_object_has_pages(obj)); |
1233e2db | 2160 | |
a4f5ea64 | 2161 | switch (obj->mm.madv) { |
5537252b CW |
2162 | case I915_MADV_DONTNEED: |
2163 | i915_gem_object_truncate(obj); | |
2164 | case __I915_MADV_PURGED: | |
2165 | return; | |
2166 | } | |
2167 | ||
2168 | if (obj->base.filp == NULL) | |
2169 | return; | |
2170 | ||
93c76a3d | 2171 | mapping = obj->base.filp->f_mapping, |
5537252b | 2172 | invalidate_mapping_pages(mapping, 0, (loff_t)-1); |
e5281ccd CW |
2173 | } |
2174 | ||
64e3d12f KHY |
2175 | /* |
2176 | * Move pages to appropriate lru and release the pagevec, decrementing the | |
2177 | * ref count of those pages. | |
2178 | */ | |
2179 | static void check_release_pagevec(struct pagevec *pvec) | |
2180 | { | |
2181 | check_move_unevictable_pages(pvec); | |
2182 | __pagevec_release(pvec); | |
2183 | cond_resched(); | |
2184 | } | |
2185 | ||
5cdf5881 | 2186 | static void |
03ac84f1 CW |
2187 | i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, |
2188 | struct sg_table *pages) | |
673a394b | 2189 | { |
85d1225e | 2190 | struct sgt_iter sgt_iter; |
64e3d12f | 2191 | struct pagevec pvec; |
85d1225e | 2192 | struct page *page; |
1286ff73 | 2193 | |
e5facdf9 | 2194 | __i915_gem_object_release_shmem(obj, pages, true); |
673a394b | 2195 | |
03ac84f1 | 2196 | i915_gem_gtt_finish_pages(obj, pages); |
e2273302 | 2197 | |
6dacfd2f | 2198 | if (i915_gem_object_needs_bit17_swizzle(obj)) |
03ac84f1 | 2199 | i915_gem_object_save_bit_17_swizzle(obj, pages); |
280b713b | 2200 | |
64e3d12f KHY |
2201 | mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping); |
2202 | ||
2203 | pagevec_init(&pvec); | |
03ac84f1 | 2204 | for_each_sgt_page(page, sgt_iter, pages) { |
a4f5ea64 | 2205 | if (obj->mm.dirty) |
9da3da66 | 2206 | set_page_dirty(page); |
3ef94daa | 2207 | |
a4f5ea64 | 2208 | if (obj->mm.madv == I915_MADV_WILLNEED) |
9da3da66 | 2209 | mark_page_accessed(page); |
3ef94daa | 2210 | |
64e3d12f KHY |
2211 | if (!pagevec_add(&pvec, page)) |
2212 | check_release_pagevec(&pvec); | |
3ef94daa | 2213 | } |
64e3d12f KHY |
2214 | if (pagevec_count(&pvec)) |
2215 | check_release_pagevec(&pvec); | |
a4f5ea64 | 2216 | obj->mm.dirty = false; |
673a394b | 2217 | |
03ac84f1 CW |
2218 | sg_free_table(pages); |
2219 | kfree(pages); | |
37e680a1 | 2220 | } |
6c085a72 | 2221 | |
96d77634 CW |
2222 | static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) |
2223 | { | |
2224 | struct radix_tree_iter iter; | |
c23aa71b | 2225 | void __rcu **slot; |
96d77634 | 2226 | |
bea6e987 | 2227 | rcu_read_lock(); |
a4f5ea64 CW |
2228 | radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) |
2229 | radix_tree_delete(&obj->mm.get_page.radix, iter.index); | |
bea6e987 | 2230 | rcu_read_unlock(); |
96d77634 CW |
2231 | } |
2232 | ||
acd1c1e6 CW |
2233 | static struct sg_table * |
2234 | __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) | |
37e680a1 | 2235 | { |
f2123818 | 2236 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
03ac84f1 | 2237 | struct sg_table *pages; |
37e680a1 | 2238 | |
03ac84f1 | 2239 | pages = fetch_and_zero(&obj->mm.pages); |
484d9a84 CW |
2240 | if (IS_ERR_OR_NULL(pages)) |
2241 | return pages; | |
a2165e31 | 2242 | |
f2123818 CW |
2243 | spin_lock(&i915->mm.obj_lock); |
2244 | list_del(&obj->mm.link); | |
2245 | spin_unlock(&i915->mm.obj_lock); | |
2246 | ||
a4f5ea64 | 2247 | if (obj->mm.mapping) { |
4b30cb23 CW |
2248 | void *ptr; |
2249 | ||
0ce81788 | 2250 | ptr = page_mask_bits(obj->mm.mapping); |
4b30cb23 CW |
2251 | if (is_vmalloc_addr(ptr)) |
2252 | vunmap(ptr); | |
fb8621d3 | 2253 | else |
4b30cb23 CW |
2254 | kunmap(kmap_to_page(ptr)); |
2255 | ||
a4f5ea64 | 2256 | obj->mm.mapping = NULL; |
0a798eb9 CW |
2257 | } |
2258 | ||
96d77634 | 2259 | __i915_gem_object_reset_page_iter(obj); |
acd1c1e6 CW |
2260 | obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; |
2261 | ||
2262 | return pages; | |
2263 | } | |
96d77634 | 2264 | |
484d9a84 CW |
2265 | int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, |
2266 | enum i915_mm_subclass subclass) | |
acd1c1e6 CW |
2267 | { |
2268 | struct sg_table *pages; | |
484d9a84 | 2269 | int ret; |
acd1c1e6 CW |
2270 | |
2271 | if (i915_gem_object_has_pinned_pages(obj)) | |
484d9a84 | 2272 | return -EBUSY; |
acd1c1e6 CW |
2273 | |
2274 | GEM_BUG_ON(obj->bind_count); | |
acd1c1e6 CW |
2275 | |
2276 | /* May be called by shrinker from within get_pages() (on another bo) */ | |
2277 | mutex_lock_nested(&obj->mm.lock, subclass); | |
484d9a84 CW |
2278 | if (unlikely(atomic_read(&obj->mm.pages_pin_count))) { |
2279 | ret = -EBUSY; | |
acd1c1e6 | 2280 | goto unlock; |
484d9a84 | 2281 | } |
acd1c1e6 CW |
2282 | |
2283 | /* | |
2284 | * ->put_pages might need to allocate memory for the bit17 swizzle | |
2285 | * array, hence protect them from being reaped by removing them from gtt | |
2286 | * lists early. | |
2287 | */ | |
2288 | pages = __i915_gem_object_unset_pages(obj); | |
484d9a84 CW |
2289 | |
2290 | /* | |
2291 | * XXX Temporary hijinx to avoid updating all backends to handle | |
2292 | * NULL pages. In the future, when we have more asynchronous | |
2293 | * get_pages backends we should be better able to handle the | |
2294 | * cancellation of the async task in a more uniform manner. | |
2295 | */ | |
2296 | if (!pages && !i915_gem_object_needs_async_cancel(obj)) | |
2297 | pages = ERR_PTR(-EINVAL); | |
2298 | ||
4e5462ee CW |
2299 | if (!IS_ERR(pages)) |
2300 | obj->ops->put_pages(obj, pages); | |
2301 | ||
484d9a84 | 2302 | ret = 0; |
1233e2db CW |
2303 | unlock: |
2304 | mutex_unlock(&obj->mm.lock); | |
484d9a84 CW |
2305 | |
2306 | return ret; | |
6c085a72 CW |
2307 | } |
2308 | ||
f8e57863 | 2309 | bool i915_sg_trim(struct sg_table *orig_st) |
0c40ce13 TU |
2310 | { |
2311 | struct sg_table new_st; | |
2312 | struct scatterlist *sg, *new_sg; | |
2313 | unsigned int i; | |
2314 | ||
2315 | if (orig_st->nents == orig_st->orig_nents) | |
935a2f77 | 2316 | return false; |
0c40ce13 | 2317 | |
8bfc478f | 2318 | if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN)) |
935a2f77 | 2319 | return false; |
0c40ce13 TU |
2320 | |
2321 | new_sg = new_st.sgl; | |
2322 | for_each_sg(orig_st->sgl, sg, orig_st->nents, i) { | |
2323 | sg_set_page(new_sg, sg_page(sg), sg->length, 0); | |
c6d22ab6 MA |
2324 | sg_dma_address(new_sg) = sg_dma_address(sg); |
2325 | sg_dma_len(new_sg) = sg_dma_len(sg); | |
2326 | ||
0c40ce13 TU |
2327 | new_sg = sg_next(new_sg); |
2328 | } | |
c2dc6cc9 | 2329 | GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */ |
0c40ce13 TU |
2330 | |
2331 | sg_free_table(orig_st); | |
2332 | ||
2333 | *orig_st = new_st; | |
935a2f77 | 2334 | return true; |
0c40ce13 TU |
2335 | } |
2336 | ||
b91b09ee | 2337 | static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) |
e5281ccd | 2338 | { |
fac5e23e | 2339 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
d766ef53 CW |
2340 | const unsigned long page_count = obj->base.size / PAGE_SIZE; |
2341 | unsigned long i; | |
e5281ccd | 2342 | struct address_space *mapping; |
9da3da66 CW |
2343 | struct sg_table *st; |
2344 | struct scatterlist *sg; | |
85d1225e | 2345 | struct sgt_iter sgt_iter; |
e5281ccd | 2346 | struct page *page; |
90797e6d | 2347 | unsigned long last_pfn = 0; /* suppress gcc warning */ |
5602452e | 2348 | unsigned int max_segment = i915_sg_segment_size(); |
84e8978e | 2349 | unsigned int sg_page_sizes; |
64e3d12f | 2350 | struct pagevec pvec; |
4846bf0c | 2351 | gfp_t noreclaim; |
e2273302 | 2352 | int ret; |
e5281ccd | 2353 | |
e0ff7a7c CW |
2354 | /* |
2355 | * Assert that the object is not currently in any GPU domain. As it | |
6c085a72 CW |
2356 | * wasn't in the GTT, there shouldn't be any way it could have been in |
2357 | * a GPU cache | |
2358 | */ | |
c0a51fd0 CK |
2359 | GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); |
2360 | GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); | |
6c085a72 | 2361 | |
e0ff7a7c CW |
2362 | /* |
2363 | * If there's no chance of allocating enough pages for the whole | |
2364 | * object, bail early. | |
2365 | */ | |
ca79b0c2 | 2366 | if (page_count > totalram_pages()) |
e0ff7a7c CW |
2367 | return -ENOMEM; |
2368 | ||
9da3da66 CW |
2369 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
2370 | if (st == NULL) | |
b91b09ee | 2371 | return -ENOMEM; |
9da3da66 | 2372 | |
d766ef53 | 2373 | rebuild_st: |
9da3da66 | 2374 | if (sg_alloc_table(st, page_count, GFP_KERNEL)) { |
9da3da66 | 2375 | kfree(st); |
b91b09ee | 2376 | return -ENOMEM; |
9da3da66 | 2377 | } |
e5281ccd | 2378 | |
e0ff7a7c CW |
2379 | /* |
2380 | * Get the list of pages out of our struct file. They'll be pinned | |
9da3da66 CW |
2381 | * at this point until we release them. |
2382 | * | |
2383 | * Fail silently without starting the shrinker | |
2384 | */ | |
93c76a3d | 2385 | mapping = obj->base.filp->f_mapping; |
64e3d12f | 2386 | mapping_set_unevictable(mapping); |
0f6ab55d | 2387 | noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); |
4846bf0c CW |
2388 | noreclaim |= __GFP_NORETRY | __GFP_NOWARN; |
2389 | ||
90797e6d ID |
2390 | sg = st->sgl; |
2391 | st->nents = 0; | |
84e8978e | 2392 | sg_page_sizes = 0; |
90797e6d | 2393 | for (i = 0; i < page_count; i++) { |
4846bf0c CW |
2394 | const unsigned int shrink[] = { |
2395 | I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE, | |
2396 | 0, | |
2397 | }, *s = shrink; | |
2398 | gfp_t gfp = noreclaim; | |
2399 | ||
2400 | do { | |
e6db7f4d | 2401 | cond_resched(); |
6c085a72 | 2402 | page = shmem_read_mapping_page_gfp(mapping, i, gfp); |
772b5408 | 2403 | if (!IS_ERR(page)) |
4846bf0c CW |
2404 | break; |
2405 | ||
2406 | if (!*s) { | |
2407 | ret = PTR_ERR(page); | |
2408 | goto err_sg; | |
2409 | } | |
2410 | ||
912d572d | 2411 | i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++); |
24f8e00a | 2412 | |
e0ff7a7c CW |
2413 | /* |
2414 | * We've tried hard to allocate the memory by reaping | |
6c085a72 CW |
2415 | * our own buffer, now let the real VM do its job and |
2416 | * go down in flames if truly OOM. | |
24f8e00a CW |
2417 | * |
2418 | * However, since graphics tend to be disposable, | |
2419 | * defer the oom here by reporting the ENOMEM back | |
2420 | * to userspace. | |
6c085a72 | 2421 | */ |
4846bf0c CW |
2422 | if (!*s) { |
2423 | /* reclaim and warn, but no oom */ | |
2424 | gfp = mapping_gfp_mask(mapping); | |
eaf41801 | 2425 | |
e0ff7a7c CW |
2426 | /* |
2427 | * Our bo are always dirty and so we require | |
eaf41801 CW |
2428 | * kswapd to reclaim our pages (direct reclaim |
2429 | * does not effectively begin pageout of our | |
2430 | * buffers on its own). However, direct reclaim | |
2431 | * only waits for kswapd when under allocation | |
2432 | * congestion. So as a result __GFP_RECLAIM is | |
2433 | * unreliable and fails to actually reclaim our | |
2434 | * dirty pages -- unless you try over and over | |
2435 | * again with !__GFP_NORETRY. However, we still | |
2436 | * want to fail this allocation rather than | |
2437 | * trigger the out-of-memory killer and for | |
dbb32956 | 2438 | * this we want __GFP_RETRY_MAYFAIL. |
eaf41801 | 2439 | */ |
dbb32956 | 2440 | gfp |= __GFP_RETRY_MAYFAIL; |
e2273302 | 2441 | } |
4846bf0c CW |
2442 | } while (1); |
2443 | ||
871dfbd6 CW |
2444 | if (!i || |
2445 | sg->length >= max_segment || | |
2446 | page_to_pfn(page) != last_pfn + 1) { | |
a5c08166 | 2447 | if (i) { |
84e8978e | 2448 | sg_page_sizes |= sg->length; |
90797e6d | 2449 | sg = sg_next(sg); |
a5c08166 | 2450 | } |
90797e6d ID |
2451 | st->nents++; |
2452 | sg_set_page(sg, page, PAGE_SIZE, 0); | |
2453 | } else { | |
2454 | sg->length += PAGE_SIZE; | |
2455 | } | |
2456 | last_pfn = page_to_pfn(page); | |
3bbbe706 DV |
2457 | |
2458 | /* Check that the i965g/gm workaround works. */ | |
2459 | WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); | |
e5281ccd | 2460 | } |
a5c08166 | 2461 | if (sg) { /* loop terminated early; short sg table */ |
84e8978e | 2462 | sg_page_sizes |= sg->length; |
426729dc | 2463 | sg_mark_end(sg); |
a5c08166 | 2464 | } |
74ce6b6c | 2465 | |
0c40ce13 TU |
2466 | /* Trim unused sg entries to avoid wasting memory. */ |
2467 | i915_sg_trim(st); | |
2468 | ||
03ac84f1 | 2469 | ret = i915_gem_gtt_prepare_pages(obj, st); |
d766ef53 | 2470 | if (ret) { |
e0ff7a7c CW |
2471 | /* |
2472 | * DMA remapping failed? One possible cause is that | |
d766ef53 CW |
2473 | * it could not reserve enough large entries, asking |
2474 | * for PAGE_SIZE chunks instead may be helpful. | |
2475 | */ | |
2476 | if (max_segment > PAGE_SIZE) { | |
2477 | for_each_sgt_page(page, sgt_iter, st) | |
2478 | put_page(page); | |
2479 | sg_free_table(st); | |
2480 | ||
2481 | max_segment = PAGE_SIZE; | |
2482 | goto rebuild_st; | |
2483 | } else { | |
2484 | dev_warn(&dev_priv->drm.pdev->dev, | |
2485 | "Failed to DMA remap %lu pages\n", | |
2486 | page_count); | |
2487 | goto err_pages; | |
2488 | } | |
2489 | } | |
e2273302 | 2490 | |
6dacfd2f | 2491 | if (i915_gem_object_needs_bit17_swizzle(obj)) |
03ac84f1 | 2492 | i915_gem_object_do_bit_17_swizzle(obj, st); |
e5281ccd | 2493 | |
84e8978e | 2494 | __i915_gem_object_set_pages(obj, st, sg_page_sizes); |
b91b09ee MA |
2495 | |
2496 | return 0; | |
e5281ccd | 2497 | |
b17993b7 | 2498 | err_sg: |
90797e6d | 2499 | sg_mark_end(sg); |
b17993b7 | 2500 | err_pages: |
64e3d12f KHY |
2501 | mapping_clear_unevictable(mapping); |
2502 | pagevec_init(&pvec); | |
2503 | for_each_sgt_page(page, sgt_iter, st) { | |
2504 | if (!pagevec_add(&pvec, page)) | |
2505 | check_release_pagevec(&pvec); | |
2506 | } | |
2507 | if (pagevec_count(&pvec)) | |
2508 | check_release_pagevec(&pvec); | |
9da3da66 CW |
2509 | sg_free_table(st); |
2510 | kfree(st); | |
0820baf3 | 2511 | |
e0ff7a7c CW |
2512 | /* |
2513 | * shmemfs first checks if there is enough memory to allocate the page | |
0820baf3 CW |
2514 | * and reports ENOSPC should there be insufficient, along with the usual |
2515 | * ENOMEM for a genuine allocation failure. | |
2516 | * | |
2517 | * We use ENOSPC in our driver to mean that we have run out of aperture | |
2518 | * space and so want to translate the error from shmemfs back to our | |
2519 | * usual understanding of ENOMEM. | |
2520 | */ | |
e2273302 ID |
2521 | if (ret == -ENOSPC) |
2522 | ret = -ENOMEM; | |
2523 | ||
b91b09ee | 2524 | return ret; |
03ac84f1 CW |
2525 | } |
2526 | ||
2527 | void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, | |
a5c08166 | 2528 | struct sg_table *pages, |
84e8978e | 2529 | unsigned int sg_page_sizes) |
03ac84f1 | 2530 | { |
a5c08166 MA |
2531 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
2532 | unsigned long supported = INTEL_INFO(i915)->page_sizes; | |
2533 | int i; | |
2534 | ||
1233e2db | 2535 | lockdep_assert_held(&obj->mm.lock); |
03ac84f1 CW |
2536 | |
2537 | obj->mm.get_page.sg_pos = pages->sgl; | |
2538 | obj->mm.get_page.sg_idx = 0; | |
2539 | ||
2540 | obj->mm.pages = pages; | |
2c3a3f44 CW |
2541 | |
2542 | if (i915_gem_object_is_tiled(obj) && | |
f2123818 | 2543 | i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { |
2c3a3f44 CW |
2544 | GEM_BUG_ON(obj->mm.quirked); |
2545 | __i915_gem_object_pin_pages(obj); | |
2546 | obj->mm.quirked = true; | |
2547 | } | |
a5c08166 | 2548 | |
84e8978e MA |
2549 | GEM_BUG_ON(!sg_page_sizes); |
2550 | obj->mm.page_sizes.phys = sg_page_sizes; | |
a5c08166 MA |
2551 | |
2552 | /* | |
84e8978e MA |
2553 | * Calculate the supported page-sizes which fit into the given |
2554 | * sg_page_sizes. This will give us the page-sizes which we may be able | |
2555 | * to use opportunistically when later inserting into the GTT. For | |
2556 | * example if phys=2G, then in theory we should be able to use 1G, 2M, | |
2557 | * 64K or 4K pages, although in practice this will depend on a number of | |
2558 | * other factors. | |
a5c08166 MA |
2559 | */ |
2560 | obj->mm.page_sizes.sg = 0; | |
2561 | for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { | |
2562 | if (obj->mm.page_sizes.phys & ~0u << i) | |
2563 | obj->mm.page_sizes.sg |= BIT(i); | |
2564 | } | |
a5c08166 | 2565 | GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg)); |
f2123818 CW |
2566 | |
2567 | spin_lock(&i915->mm.obj_lock); | |
2568 | list_add(&obj->mm.link, &i915->mm.unbound_list); | |
2569 | spin_unlock(&i915->mm.obj_lock); | |
03ac84f1 CW |
2570 | } |
2571 | ||
2572 | static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) | |
2573 | { | |
b91b09ee | 2574 | int err; |
03ac84f1 CW |
2575 | |
2576 | if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { | |
2577 | DRM_DEBUG("Attempting to obtain a purgeable object\n"); | |
2578 | return -EFAULT; | |
2579 | } | |
2580 | ||
b91b09ee | 2581 | err = obj->ops->get_pages(obj); |
b65a9b98 | 2582 | GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj)); |
03ac84f1 | 2583 | |
b91b09ee | 2584 | return err; |
673a394b EA |
2585 | } |
2586 | ||
37e680a1 | 2587 | /* Ensure that the associated pages are gathered from the backing storage |
1233e2db | 2588 | * and pinned into our object. i915_gem_object_pin_pages() may be called |
37e680a1 | 2589 | * multiple times before they are released by a single call to |
1233e2db | 2590 | * i915_gem_object_unpin_pages() - once the pages are no longer referenced |
37e680a1 CW |
2591 | * either as a result of memory pressure (reaping pages under the shrinker) |
2592 | * or as the object is itself released. | |
2593 | */ | |
a4f5ea64 | 2594 | int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) |
37e680a1 | 2595 | { |
03ac84f1 | 2596 | int err; |
37e680a1 | 2597 | |
1233e2db CW |
2598 | err = mutex_lock_interruptible(&obj->mm.lock); |
2599 | if (err) | |
2600 | return err; | |
4c7d62c6 | 2601 | |
f1fa4f44 | 2602 | if (unlikely(!i915_gem_object_has_pages(obj))) { |
88c880bb CW |
2603 | GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); |
2604 | ||
2c3a3f44 CW |
2605 | err = ____i915_gem_object_get_pages(obj); |
2606 | if (err) | |
2607 | goto unlock; | |
37e680a1 | 2608 | |
2c3a3f44 CW |
2609 | smp_mb__before_atomic(); |
2610 | } | |
2611 | atomic_inc(&obj->mm.pages_pin_count); | |
ee286370 | 2612 | |
1233e2db CW |
2613 | unlock: |
2614 | mutex_unlock(&obj->mm.lock); | |
03ac84f1 | 2615 | return err; |
673a394b EA |
2616 | } |
2617 | ||
dd6034c6 | 2618 | /* The 'mapping' part of i915_gem_object_pin_map() below */ |
d31d7cb1 CW |
2619 | static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, |
2620 | enum i915_map_type type) | |
dd6034c6 DG |
2621 | { |
2622 | unsigned long n_pages = obj->base.size >> PAGE_SHIFT; | |
a4f5ea64 | 2623 | struct sg_table *sgt = obj->mm.pages; |
85d1225e DG |
2624 | struct sgt_iter sgt_iter; |
2625 | struct page *page; | |
b338fa47 DG |
2626 | struct page *stack_pages[32]; |
2627 | struct page **pages = stack_pages; | |
dd6034c6 | 2628 | unsigned long i = 0; |
d31d7cb1 | 2629 | pgprot_t pgprot; |
dd6034c6 DG |
2630 | void *addr; |
2631 | ||
2632 | /* A single page can always be kmapped */ | |
d31d7cb1 | 2633 | if (n_pages == 1 && type == I915_MAP_WB) |
dd6034c6 DG |
2634 | return kmap(sg_page(sgt->sgl)); |
2635 | ||
b338fa47 DG |
2636 | if (n_pages > ARRAY_SIZE(stack_pages)) { |
2637 | /* Too big for stack -- allocate temporary array instead */ | |
0ee931c4 | 2638 | pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); |
b338fa47 DG |
2639 | if (!pages) |
2640 | return NULL; | |
2641 | } | |
dd6034c6 | 2642 | |
85d1225e DG |
2643 | for_each_sgt_page(page, sgt_iter, sgt) |
2644 | pages[i++] = page; | |
dd6034c6 DG |
2645 | |
2646 | /* Check that we have the expected number of pages */ | |
2647 | GEM_BUG_ON(i != n_pages); | |
2648 | ||
d31d7cb1 | 2649 | switch (type) { |
a575c676 CW |
2650 | default: |
2651 | MISSING_CASE(type); | |
2652 | /* fallthrough to use PAGE_KERNEL anyway */ | |
d31d7cb1 CW |
2653 | case I915_MAP_WB: |
2654 | pgprot = PAGE_KERNEL; | |
2655 | break; | |
2656 | case I915_MAP_WC: | |
2657 | pgprot = pgprot_writecombine(PAGE_KERNEL_IO); | |
2658 | break; | |
2659 | } | |
2660 | addr = vmap(pages, n_pages, 0, pgprot); | |
dd6034c6 | 2661 | |
b338fa47 | 2662 | if (pages != stack_pages) |
2098105e | 2663 | kvfree(pages); |
dd6034c6 DG |
2664 | |
2665 | return addr; | |
2666 | } | |
2667 | ||
2668 | /* get, pin, and map the pages of the object into kernel space */ | |
d31d7cb1 CW |
2669 | void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, |
2670 | enum i915_map_type type) | |
0a798eb9 | 2671 | { |
d31d7cb1 CW |
2672 | enum i915_map_type has_type; |
2673 | bool pinned; | |
2674 | void *ptr; | |
0a798eb9 CW |
2675 | int ret; |
2676 | ||
a03f395a TZ |
2677 | if (unlikely(!i915_gem_object_has_struct_page(obj))) |
2678 | return ERR_PTR(-ENXIO); | |
0a798eb9 | 2679 | |
1233e2db | 2680 | ret = mutex_lock_interruptible(&obj->mm.lock); |
0a798eb9 CW |
2681 | if (ret) |
2682 | return ERR_PTR(ret); | |
2683 | ||
a575c676 CW |
2684 | pinned = !(type & I915_MAP_OVERRIDE); |
2685 | type &= ~I915_MAP_OVERRIDE; | |
2686 | ||
1233e2db | 2687 | if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { |
f1fa4f44 | 2688 | if (unlikely(!i915_gem_object_has_pages(obj))) { |
88c880bb CW |
2689 | GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); |
2690 | ||
2c3a3f44 CW |
2691 | ret = ____i915_gem_object_get_pages(obj); |
2692 | if (ret) | |
2693 | goto err_unlock; | |
1233e2db | 2694 | |
2c3a3f44 CW |
2695 | smp_mb__before_atomic(); |
2696 | } | |
2697 | atomic_inc(&obj->mm.pages_pin_count); | |
1233e2db CW |
2698 | pinned = false; |
2699 | } | |
f1fa4f44 | 2700 | GEM_BUG_ON(!i915_gem_object_has_pages(obj)); |
0a798eb9 | 2701 | |
0ce81788 | 2702 | ptr = page_unpack_bits(obj->mm.mapping, &has_type); |
d31d7cb1 CW |
2703 | if (ptr && has_type != type) { |
2704 | if (pinned) { | |
2705 | ret = -EBUSY; | |
1233e2db | 2706 | goto err_unpin; |
0a798eb9 | 2707 | } |
d31d7cb1 CW |
2708 | |
2709 | if (is_vmalloc_addr(ptr)) | |
2710 | vunmap(ptr); | |
2711 | else | |
2712 | kunmap(kmap_to_page(ptr)); | |
2713 | ||
a4f5ea64 | 2714 | ptr = obj->mm.mapping = NULL; |
0a798eb9 CW |
2715 | } |
2716 | ||
d31d7cb1 CW |
2717 | if (!ptr) { |
2718 | ptr = i915_gem_object_map(obj, type); | |
2719 | if (!ptr) { | |
2720 | ret = -ENOMEM; | |
1233e2db | 2721 | goto err_unpin; |
d31d7cb1 CW |
2722 | } |
2723 | ||
0ce81788 | 2724 | obj->mm.mapping = page_pack_bits(ptr, type); |
d31d7cb1 CW |
2725 | } |
2726 | ||
1233e2db CW |
2727 | out_unlock: |
2728 | mutex_unlock(&obj->mm.lock); | |
d31d7cb1 CW |
2729 | return ptr; |
2730 | ||
1233e2db CW |
2731 | err_unpin: |
2732 | atomic_dec(&obj->mm.pages_pin_count); | |
2733 | err_unlock: | |
2734 | ptr = ERR_PTR(ret); | |
2735 | goto out_unlock; | |
0a798eb9 CW |
2736 | } |
2737 | ||
7c55e2c5 CW |
2738 | static int |
2739 | i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, | |
2740 | const struct drm_i915_gem_pwrite *arg) | |
2741 | { | |
2742 | struct address_space *mapping = obj->base.filp->f_mapping; | |
2743 | char __user *user_data = u64_to_user_ptr(arg->data_ptr); | |
2744 | u64 remain, offset; | |
2745 | unsigned int pg; | |
2746 | ||
2747 | /* Before we instantiate/pin the backing store for our use, we | |
2748 | * can prepopulate the shmemfs filp efficiently using a write into | |
2749 | * the pagecache. We avoid the penalty of instantiating all the | |
2750 | * pages, important if the user is just writing to a few and never | |
2751 | * uses the object on the GPU, and using a direct write into shmemfs | |
2752 | * allows it to avoid the cost of retrieving a page (either swapin | |
2753 | * or clearing-before-use) before it is overwritten. | |
2754 | */ | |
f1fa4f44 | 2755 | if (i915_gem_object_has_pages(obj)) |
7c55e2c5 CW |
2756 | return -ENODEV; |
2757 | ||
a6d65e45 CW |
2758 | if (obj->mm.madv != I915_MADV_WILLNEED) |
2759 | return -EFAULT; | |
2760 | ||
7c55e2c5 CW |
2761 | /* Before the pages are instantiated the object is treated as being |
2762 | * in the CPU domain. The pages will be clflushed as required before | |
2763 | * use, and we can freely write into the pages directly. If userspace | |
2764 | * races pwrite with any other operation; corruption will ensue - | |
2765 | * that is userspace's prerogative! | |
2766 | */ | |
2767 | ||
2768 | remain = arg->size; | |
2769 | offset = arg->offset; | |
2770 | pg = offset_in_page(offset); | |
2771 | ||
2772 | do { | |
2773 | unsigned int len, unwritten; | |
2774 | struct page *page; | |
2775 | void *data, *vaddr; | |
2776 | int err; | |
2777 | ||
2778 | len = PAGE_SIZE - pg; | |
2779 | if (len > remain) | |
2780 | len = remain; | |
2781 | ||
2782 | err = pagecache_write_begin(obj->base.filp, mapping, | |
2783 | offset, len, 0, | |
2784 | &page, &data); | |
2785 | if (err < 0) | |
2786 | return err; | |
2787 | ||
2788 | vaddr = kmap(page); | |
2789 | unwritten = copy_from_user(vaddr + pg, user_data, len); | |
2790 | kunmap(page); | |
2791 | ||
2792 | err = pagecache_write_end(obj->base.filp, mapping, | |
2793 | offset, len, len - unwritten, | |
2794 | page, data); | |
2795 | if (err < 0) | |
2796 | return err; | |
2797 | ||
2798 | if (unwritten) | |
2799 | return -EFAULT; | |
2800 | ||
2801 | remain -= len; | |
2802 | user_data += len; | |
2803 | offset += len; | |
2804 | pg = 0; | |
2805 | } while (remain); | |
2806 | ||
2807 | return 0; | |
2808 | } | |
2809 | ||
75ef9da2 | 2810 | static void |
673a394b EA |
2811 | i915_gem_retire_work_handler(struct work_struct *work) |
2812 | { | |
b29c19b6 | 2813 | struct drm_i915_private *dev_priv = |
67d97da3 | 2814 | container_of(work, typeof(*dev_priv), gt.retire_work.work); |
91c8a326 | 2815 | struct drm_device *dev = &dev_priv->drm; |
673a394b | 2816 | |
891b48cf | 2817 | /* Come back later if the device is busy... */ |
b29c19b6 | 2818 | if (mutex_trylock(&dev->struct_mutex)) { |
e61e0f51 | 2819 | i915_retire_requests(dev_priv); |
b29c19b6 | 2820 | mutex_unlock(&dev->struct_mutex); |
673a394b | 2821 | } |
67d97da3 | 2822 | |
88923048 CW |
2823 | /* |
2824 | * Keep the retire handler running until we are finally idle. | |
67d97da3 CW |
2825 | * We do not need to do this test under locking as in the worst-case |
2826 | * we queue the retire worker once too often. | |
2827 | */ | |
88923048 | 2828 | if (READ_ONCE(dev_priv->gt.awake)) |
67d97da3 CW |
2829 | queue_delayed_work(dev_priv->wq, |
2830 | &dev_priv->gt.retire_work, | |
bcb45086 | 2831 | round_jiffies_up_relative(HZ)); |
b29c19b6 | 2832 | } |
0a58705b | 2833 | |
c6eeb479 CW |
2834 | static bool switch_to_kernel_context_sync(struct drm_i915_private *i915, |
2835 | unsigned long mask) | |
5861b013 CW |
2836 | { |
2837 | bool result = true; | |
2838 | ||
2839 | /* | |
2840 | * Even if we fail to switch, give whatever is running a small chance | |
2841 | * to save itself before we report the failure. Yes, this may be a | |
2842 | * false positive due to e.g. ENOMEM, caveat emptor! | |
2843 | */ | |
c6eeb479 | 2844 | if (i915_gem_switch_to_kernel_context(i915, mask)) |
5861b013 CW |
2845 | result = false; |
2846 | ||
2847 | if (i915_gem_wait_for_idle(i915, | |
2848 | I915_WAIT_LOCKED | | |
2849 | I915_WAIT_FOR_IDLE_BOOST, | |
2850 | I915_GEM_IDLE_TIMEOUT)) | |
2851 | result = false; | |
2852 | ||
7d6ce558 | 2853 | if (!result) { |
831ebf18 CW |
2854 | if (i915_modparams.reset) { /* XXX hide warning from gem_eio */ |
2855 | dev_err(i915->drm.dev, | |
2856 | "Failed to idle engines, declaring wedged!\n"); | |
2857 | GEM_TRACE_DUMP(); | |
2858 | } | |
2859 | ||
5861b013 | 2860 | /* Forcibly cancel outstanding work and leave the gpu quiet. */ |
5861b013 CW |
2861 | i915_gem_set_wedged(i915); |
2862 | } | |
2863 | ||
2864 | i915_retire_requests(i915); /* ensure we flush after wedging */ | |
2865 | return result; | |
2866 | } | |
2867 | ||
604c37d7 CW |
2868 | static bool load_power_context(struct drm_i915_private *i915) |
2869 | { | |
c6eeb479 CW |
2870 | /* Force loading the kernel context on all engines */ |
2871 | if (!switch_to_kernel_context_sync(i915, ALL_ENGINES)) | |
604c37d7 CW |
2872 | return false; |
2873 | ||
2874 | /* | |
2875 | * Immediately park the GPU so that we enable powersaving and | |
2876 | * treat it as idle. The next time we issue a request, we will | |
2877 | * unpark and start using the engine->pinned_default_state, otherwise | |
2878 | * it is in limbo and an early reset may fail. | |
2879 | */ | |
2880 | __i915_gem_park(i915); | |
2881 | ||
2882 | return true; | |
2883 | } | |
2884 | ||
b29c19b6 CW |
2885 | static void |
2886 | i915_gem_idle_work_handler(struct work_struct *work) | |
2887 | { | |
5861b013 CW |
2888 | struct drm_i915_private *i915 = |
2889 | container_of(work, typeof(*i915), gt.idle_work.work); | |
67d97da3 CW |
2890 | bool rearm_hangcheck; |
2891 | ||
5861b013 | 2892 | if (!READ_ONCE(i915->gt.awake)) |
67d97da3 CW |
2893 | return; |
2894 | ||
5861b013 | 2895 | if (READ_ONCE(i915->gt.active_requests)) |
4dfacb0b CW |
2896 | return; |
2897 | ||
67d97da3 | 2898 | rearm_hangcheck = |
5861b013 | 2899 | cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work); |
67d97da3 | 2900 | |
5861b013 | 2901 | if (!mutex_trylock(&i915->drm.struct_mutex)) { |
67d97da3 | 2902 | /* Currently busy, come back later */ |
5861b013 CW |
2903 | mod_delayed_work(i915->wq, |
2904 | &i915->gt.idle_work, | |
67d97da3 CW |
2905 | msecs_to_jiffies(50)); |
2906 | goto out_rearm; | |
2907 | } | |
2908 | ||
93c97dc1 | 2909 | /* |
5861b013 CW |
2910 | * Flush out the last user context, leaving only the pinned |
2911 | * kernel context resident. Should anything unfortunate happen | |
2912 | * while we are idle (such as the GPU being power cycled), no users | |
2913 | * will be harmed. | |
93c97dc1 | 2914 | */ |
5861b013 CW |
2915 | if (!work_pending(&i915->gt.idle_work.work) && |
2916 | !i915->gt.active_requests) { | |
2917 | ++i915->gt.active_requests; /* don't requeue idle */ | |
b29c19b6 | 2918 | |
c6eeb479 | 2919 | switch_to_kernel_context_sync(i915, i915->gt.active_engines); |
35c94185 | 2920 | |
5861b013 CW |
2921 | if (!--i915->gt.active_requests) { |
2922 | __i915_gem_park(i915); | |
2923 | rearm_hangcheck = false; | |
2924 | } | |
2925 | } | |
1934f5de | 2926 | |
5861b013 | 2927 | mutex_unlock(&i915->drm.struct_mutex); |
b29c19b6 | 2928 | |
67d97da3 CW |
2929 | out_rearm: |
2930 | if (rearm_hangcheck) { | |
5861b013 CW |
2931 | GEM_BUG_ON(!i915->gt.awake); |
2932 | i915_queue_hangcheck(i915); | |
35c94185 | 2933 | } |
673a394b EA |
2934 | } |
2935 | ||
b1f788c6 CW |
2936 | void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) |
2937 | { | |
d1b48c1e | 2938 | struct drm_i915_private *i915 = to_i915(gem->dev); |
b1f788c6 CW |
2939 | struct drm_i915_gem_object *obj = to_intel_bo(gem); |
2940 | struct drm_i915_file_private *fpriv = file->driver_priv; | |
d1b48c1e | 2941 | struct i915_lut_handle *lut, *ln; |
b1f788c6 | 2942 | |
d1b48c1e CW |
2943 | mutex_lock(&i915->drm.struct_mutex); |
2944 | ||
2945 | list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) { | |
2946 | struct i915_gem_context *ctx = lut->ctx; | |
2947 | struct i915_vma *vma; | |
2948 | ||
432295d7 | 2949 | GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF)); |
d1b48c1e CW |
2950 | if (ctx->file_priv != fpriv) |
2951 | continue; | |
2952 | ||
2953 | vma = radix_tree_delete(&ctx->handles_vma, lut->handle); | |
3ffff017 CW |
2954 | GEM_BUG_ON(vma->obj != obj); |
2955 | ||
2956 | /* We allow the process to have multiple handles to the same | |
2957 | * vma, in the same fd namespace, by virtue of flink/open. | |
2958 | */ | |
2959 | GEM_BUG_ON(!vma->open_count); | |
2960 | if (!--vma->open_count && !i915_vma_is_ggtt(vma)) | |
b1f788c6 | 2961 | i915_vma_close(vma); |
f8a7fde4 | 2962 | |
d1b48c1e CW |
2963 | list_del(&lut->obj_link); |
2964 | list_del(&lut->ctx_link); | |
4ff4b44c | 2965 | |
13f1bfd3 | 2966 | i915_lut_handle_free(lut); |
d1b48c1e | 2967 | __i915_gem_object_release_unless_active(obj); |
f8a7fde4 | 2968 | } |
d1b48c1e CW |
2969 | |
2970 | mutex_unlock(&i915->drm.struct_mutex); | |
b1f788c6 CW |
2971 | } |
2972 | ||
e95433c7 CW |
2973 | static unsigned long to_wait_timeout(s64 timeout_ns) |
2974 | { | |
2975 | if (timeout_ns < 0) | |
2976 | return MAX_SCHEDULE_TIMEOUT; | |
2977 | ||
2978 | if (timeout_ns == 0) | |
2979 | return 0; | |
2980 | ||
2981 | return nsecs_to_jiffies_timeout(timeout_ns); | |
2982 | } | |
2983 | ||
23ba4fd0 BW |
2984 | /** |
2985 | * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT | |
14bb2c11 TU |
2986 | * @dev: drm device pointer |
2987 | * @data: ioctl data blob | |
2988 | * @file: drm file pointer | |
23ba4fd0 BW |
2989 | * |
2990 | * Returns 0 if successful, else an error is returned with the remaining time in | |
2991 | * the timeout parameter. | |
2992 | * -ETIME: object is still busy after timeout | |
2993 | * -ERESTARTSYS: signal interrupted the wait | |
2994 | * -ENONENT: object doesn't exist | |
2995 | * Also possible, but rare: | |
b8050148 | 2996 | * -EAGAIN: incomplete, restart syscall |
23ba4fd0 BW |
2997 | * -ENOMEM: damn |
2998 | * -ENODEV: Internal IRQ fail | |
2999 | * -E?: The add request failed | |
3000 | * | |
3001 | * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any | |
3002 | * non-zero timeout parameter the wait ioctl will wait for the given number of | |
3003 | * nanoseconds on an object becoming unbusy. Since the wait itself does so | |
3004 | * without holding struct_mutex the object may become re-busied before this | |
3005 | * function completes. A similar but shorter * race condition exists in the busy | |
3006 | * ioctl | |
3007 | */ | |
3008 | int | |
3009 | i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |
3010 | { | |
3011 | struct drm_i915_gem_wait *args = data; | |
3012 | struct drm_i915_gem_object *obj; | |
e95433c7 CW |
3013 | ktime_t start; |
3014 | long ret; | |
23ba4fd0 | 3015 | |
11b5d511 DV |
3016 | if (args->flags != 0) |
3017 | return -EINVAL; | |
3018 | ||
03ac0642 | 3019 | obj = i915_gem_object_lookup(file, args->bo_handle); |
033d549b | 3020 | if (!obj) |
23ba4fd0 | 3021 | return -ENOENT; |
23ba4fd0 | 3022 | |
e95433c7 CW |
3023 | start = ktime_get(); |
3024 | ||
3025 | ret = i915_gem_object_wait(obj, | |
e9eaf82d CW |
3026 | I915_WAIT_INTERRUPTIBLE | |
3027 | I915_WAIT_PRIORITY | | |
3028 | I915_WAIT_ALL, | |
62eb3c24 | 3029 | to_wait_timeout(args->timeout_ns)); |
e95433c7 CW |
3030 | |
3031 | if (args->timeout_ns > 0) { | |
3032 | args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); | |
3033 | if (args->timeout_ns < 0) | |
3034 | args->timeout_ns = 0; | |
c1d2061b CW |
3035 | |
3036 | /* | |
3037 | * Apparently ktime isn't accurate enough and occasionally has a | |
3038 | * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch | |
3039 | * things up to make the test happy. We allow up to 1 jiffy. | |
3040 | * | |
3041 | * This is a regression from the timespec->ktime conversion. | |
3042 | */ | |
3043 | if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) | |
3044 | args->timeout_ns = 0; | |
b8050148 CW |
3045 | |
3046 | /* Asked to wait beyond the jiffie/scheduler precision? */ | |
3047 | if (ret == -ETIME && args->timeout_ns) | |
3048 | ret = -EAGAIN; | |
b4716185 CW |
3049 | } |
3050 | ||
f0cd5182 | 3051 | i915_gem_object_put(obj); |
ff865885 | 3052 | return ret; |
23ba4fd0 BW |
3053 | } |
3054 | ||
25112b64 CW |
3055 | static int wait_for_engines(struct drm_i915_private *i915) |
3056 | { | |
ee42c00e | 3057 | if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) { |
59e4b19d CW |
3058 | dev_err(i915->drm.dev, |
3059 | "Failed to idle engines, declaring wedged!\n"); | |
629820fc | 3060 | GEM_TRACE_DUMP(); |
cad9946c CW |
3061 | i915_gem_set_wedged(i915); |
3062 | return -EIO; | |
25112b64 CW |
3063 | } |
3064 | ||
3065 | return 0; | |
3066 | } | |
3067 | ||
1e345568 CW |
3068 | static long |
3069 | wait_for_timelines(struct drm_i915_private *i915, | |
3070 | unsigned int flags, long timeout) | |
3071 | { | |
3072 | struct i915_gt_timelines *gt = &i915->gt.timelines; | |
3073 | struct i915_timeline *tl; | |
3074 | ||
3075 | if (!READ_ONCE(i915->gt.active_requests)) | |
3076 | return timeout; | |
3077 | ||
3078 | mutex_lock(>->mutex); | |
9407d3bd | 3079 | list_for_each_entry(tl, >->active_list, link) { |
1e345568 CW |
3080 | struct i915_request *rq; |
3081 | ||
21950ee7 | 3082 | rq = i915_active_request_get_unlocked(&tl->last_request); |
1e345568 CW |
3083 | if (!rq) |
3084 | continue; | |
3085 | ||
3086 | mutex_unlock(>->mutex); | |
3087 | ||
3088 | /* | |
3089 | * "Race-to-idle". | |
3090 | * | |
3091 | * Switching to the kernel context is often used a synchronous | |
3092 | * step prior to idling, e.g. in suspend for flushing all | |
3093 | * current operations to memory before sleeping. These we | |
3094 | * want to complete as quickly as possible to avoid prolonged | |
3095 | * stalls, so allow the gpu to boost to maximum clocks. | |
3096 | */ | |
3097 | if (flags & I915_WAIT_FOR_IDLE_BOOST) | |
62eb3c24 | 3098 | gen6_rps_boost(rq); |
1e345568 CW |
3099 | |
3100 | timeout = i915_request_wait(rq, flags, timeout); | |
3101 | i915_request_put(rq); | |
3102 | if (timeout < 0) | |
3103 | return timeout; | |
3104 | ||
3105 | /* restart after reacquiring the lock */ | |
3106 | mutex_lock(>->mutex); | |
9407d3bd | 3107 | tl = list_entry(>->active_list, typeof(*tl), link); |
1e345568 CW |
3108 | } |
3109 | mutex_unlock(>->mutex); | |
3110 | ||
3111 | return timeout; | |
3112 | } | |
3113 | ||
ec625fb9 CW |
3114 | int i915_gem_wait_for_idle(struct drm_i915_private *i915, |
3115 | unsigned int flags, long timeout) | |
73cb9701 | 3116 | { |
ec625fb9 CW |
3117 | GEM_TRACE("flags=%x (%s), timeout=%ld%s\n", |
3118 | flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked", | |
3119 | timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : ""); | |
09a4c02e | 3120 | |
863e9fde CW |
3121 | /* If the device is asleep, we have no requests outstanding */ |
3122 | if (!READ_ONCE(i915->gt.awake)) | |
3123 | return 0; | |
3124 | ||
1e345568 CW |
3125 | timeout = wait_for_timelines(i915, flags, timeout); |
3126 | if (timeout < 0) | |
3127 | return timeout; | |
3128 | ||
9caa34aa | 3129 | if (flags & I915_WAIT_LOCKED) { |
a89d1f92 | 3130 | int err; |
9caa34aa CW |
3131 | |
3132 | lockdep_assert_held(&i915->drm.struct_mutex); | |
3133 | ||
a61b47f6 CW |
3134 | err = wait_for_engines(i915); |
3135 | if (err) | |
3136 | return err; | |
3137 | ||
e61e0f51 | 3138 | i915_retire_requests(i915); |
a89d1f92 | 3139 | } |
a61b47f6 CW |
3140 | |
3141 | return 0; | |
4df2faf4 DV |
3142 | } |
3143 | ||
5a97bcc6 CW |
3144 | static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) |
3145 | { | |
e27ab73d CW |
3146 | /* |
3147 | * We manually flush the CPU domain so that we can override and | |
3148 | * force the flush for the display, and perform it asyncrhonously. | |
3149 | */ | |
3150 | flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); | |
3151 | if (obj->cache_dirty) | |
3152 | i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE); | |
c0a51fd0 | 3153 | obj->write_domain = 0; |
5a97bcc6 CW |
3154 | } |
3155 | ||
3156 | void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) | |
3157 | { | |
bd3d2252 | 3158 | if (!READ_ONCE(obj->pin_global)) |
5a97bcc6 CW |
3159 | return; |
3160 | ||
3161 | mutex_lock(&obj->base.dev->struct_mutex); | |
3162 | __i915_gem_object_flush_for_display(obj); | |
3163 | mutex_unlock(&obj->base.dev->struct_mutex); | |
3164 | } | |
3165 | ||
e22d8e3c CW |
3166 | /** |
3167 | * Moves a single object to the WC read, and possibly write domain. | |
3168 | * @obj: object to act on | |
3169 | * @write: ask for write access or read only | |
3170 | * | |
3171 | * This function returns when the move is complete, including waiting on | |
3172 | * flushes to occur. | |
3173 | */ | |
3174 | int | |
3175 | i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write) | |
3176 | { | |
3177 | int ret; | |
3178 | ||
3179 | lockdep_assert_held(&obj->base.dev->struct_mutex); | |
3180 | ||
3181 | ret = i915_gem_object_wait(obj, | |
3182 | I915_WAIT_INTERRUPTIBLE | | |
3183 | I915_WAIT_LOCKED | | |
3184 | (write ? I915_WAIT_ALL : 0), | |
62eb3c24 | 3185 | MAX_SCHEDULE_TIMEOUT); |
e22d8e3c CW |
3186 | if (ret) |
3187 | return ret; | |
3188 | ||
c0a51fd0 | 3189 | if (obj->write_domain == I915_GEM_DOMAIN_WC) |
e22d8e3c CW |
3190 | return 0; |
3191 | ||
3192 | /* Flush and acquire obj->pages so that we are coherent through | |
3193 | * direct access in memory with previous cached writes through | |
3194 | * shmemfs and that our cache domain tracking remains valid. | |
3195 | * For example, if the obj->filp was moved to swap without us | |
3196 | * being notified and releasing the pages, we would mistakenly | |
3197 | * continue to assume that the obj remained out of the CPU cached | |
3198 | * domain. | |
3199 | */ | |
3200 | ret = i915_gem_object_pin_pages(obj); | |
3201 | if (ret) | |
3202 | return ret; | |
3203 | ||
3204 | flush_write_domain(obj, ~I915_GEM_DOMAIN_WC); | |
3205 | ||
3206 | /* Serialise direct access to this object with the barriers for | |
3207 | * coherent writes from the GPU, by effectively invalidating the | |
3208 | * WC domain upon first access. | |
3209 | */ | |
c0a51fd0 | 3210 | if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0) |
e22d8e3c CW |
3211 | mb(); |
3212 | ||
3213 | /* It should now be out of any other write domains, and we can update | |
3214 | * the domain values for our changes. | |
3215 | */ | |
c0a51fd0 CK |
3216 | GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0); |
3217 | obj->read_domains |= I915_GEM_DOMAIN_WC; | |
e22d8e3c | 3218 | if (write) { |
c0a51fd0 CK |
3219 | obj->read_domains = I915_GEM_DOMAIN_WC; |
3220 | obj->write_domain = I915_GEM_DOMAIN_WC; | |
e22d8e3c CW |
3221 | obj->mm.dirty = true; |
3222 | } | |
3223 | ||
3224 | i915_gem_object_unpin_pages(obj); | |
3225 | return 0; | |
3226 | } | |
3227 | ||
2ef7eeaa EA |
3228 | /** |
3229 | * Moves a single object to the GTT read, and possibly write domain. | |
14bb2c11 TU |
3230 | * @obj: object to act on |
3231 | * @write: ask for write access or read only | |
2ef7eeaa EA |
3232 | * |
3233 | * This function returns when the move is complete, including waiting on | |
3234 | * flushes to occur. | |
3235 | */ | |
79e53945 | 3236 | int |
2021746e | 3237 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) |
2ef7eeaa | 3238 | { |
e47c68e9 | 3239 | int ret; |
2ef7eeaa | 3240 | |
e95433c7 | 3241 | lockdep_assert_held(&obj->base.dev->struct_mutex); |
4c7d62c6 | 3242 | |
e95433c7 CW |
3243 | ret = i915_gem_object_wait(obj, |
3244 | I915_WAIT_INTERRUPTIBLE | | |
3245 | I915_WAIT_LOCKED | | |
3246 | (write ? I915_WAIT_ALL : 0), | |
62eb3c24 | 3247 | MAX_SCHEDULE_TIMEOUT); |
88241785 CW |
3248 | if (ret) |
3249 | return ret; | |
3250 | ||
c0a51fd0 | 3251 | if (obj->write_domain == I915_GEM_DOMAIN_GTT) |
c13d87ea CW |
3252 | return 0; |
3253 | ||
43566ded CW |
3254 | /* Flush and acquire obj->pages so that we are coherent through |
3255 | * direct access in memory with previous cached writes through | |
3256 | * shmemfs and that our cache domain tracking remains valid. | |
3257 | * For example, if the obj->filp was moved to swap without us | |
3258 | * being notified and releasing the pages, we would mistakenly | |
3259 | * continue to assume that the obj remained out of the CPU cached | |
3260 | * domain. | |
3261 | */ | |
a4f5ea64 | 3262 | ret = i915_gem_object_pin_pages(obj); |
43566ded CW |
3263 | if (ret) |
3264 | return ret; | |
3265 | ||
ef74921b | 3266 | flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT); |
1c5d22f7 | 3267 | |
d0a57789 CW |
3268 | /* Serialise direct access to this object with the barriers for |
3269 | * coherent writes from the GPU, by effectively invalidating the | |
3270 | * GTT domain upon first access. | |
3271 | */ | |
c0a51fd0 | 3272 | if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0) |
d0a57789 CW |
3273 | mb(); |
3274 | ||
e47c68e9 EA |
3275 | /* It should now be out of any other write domains, and we can update |
3276 | * the domain values for our changes. | |
3277 | */ | |
c0a51fd0 CK |
3278 | GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); |
3279 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | |
e47c68e9 | 3280 | if (write) { |
c0a51fd0 CK |
3281 | obj->read_domains = I915_GEM_DOMAIN_GTT; |
3282 | obj->write_domain = I915_GEM_DOMAIN_GTT; | |
a4f5ea64 | 3283 | obj->mm.dirty = true; |
2ef7eeaa EA |
3284 | } |
3285 | ||
a4f5ea64 | 3286 | i915_gem_object_unpin_pages(obj); |
e47c68e9 EA |
3287 | return 0; |
3288 | } | |
3289 | ||
ef55f92a CW |
3290 | /** |
3291 | * Changes the cache-level of an object across all VMA. | |
14bb2c11 TU |
3292 | * @obj: object to act on |
3293 | * @cache_level: new cache level to set for the object | |
ef55f92a CW |
3294 | * |
3295 | * After this function returns, the object will be in the new cache-level | |
3296 | * across all GTT and the contents of the backing storage will be coherent, | |
3297 | * with respect to the new cache-level. In order to keep the backing storage | |
3298 | * coherent for all users, we only allow a single cache level to be set | |
3299 | * globally on the object and prevent it from being changed whilst the | |
3300 | * hardware is reading from the object. That is if the object is currently | |
3301 | * on the scanout it will be set to uncached (or equivalent display | |
3302 | * cache coherency) and all non-MOCS GPU access will also be uncached so | |
3303 | * that all direct access to the scanout remains coherent. | |
3304 | */ | |
e4ffd173 CW |
3305 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
3306 | enum i915_cache_level cache_level) | |
3307 | { | |
aa653a68 | 3308 | struct i915_vma *vma; |
a6a7cc4b | 3309 | int ret; |
e4ffd173 | 3310 | |
4c7d62c6 CW |
3311 | lockdep_assert_held(&obj->base.dev->struct_mutex); |
3312 | ||
e4ffd173 | 3313 | if (obj->cache_level == cache_level) |
a6a7cc4b | 3314 | return 0; |
e4ffd173 | 3315 | |
ef55f92a CW |
3316 | /* Inspect the list of currently bound VMA and unbind any that would |
3317 | * be invalid given the new cache-level. This is principally to | |
3318 | * catch the issue of the CS prefetch crossing page boundaries and | |
3319 | * reading an invalid PTE on older architectures. | |
3320 | */ | |
aa653a68 | 3321 | restart: |
528cbd17 | 3322 | list_for_each_entry(vma, &obj->vma.list, obj_link) { |
ef55f92a CW |
3323 | if (!drm_mm_node_allocated(&vma->node)) |
3324 | continue; | |
3325 | ||
20dfbde4 | 3326 | if (i915_vma_is_pinned(vma)) { |
ef55f92a CW |
3327 | DRM_DEBUG("can not change the cache level of pinned objects\n"); |
3328 | return -EBUSY; | |
3329 | } | |
3330 | ||
010e3e68 CW |
3331 | if (!i915_vma_is_closed(vma) && |
3332 | i915_gem_valid_gtt_space(vma, cache_level)) | |
aa653a68 CW |
3333 | continue; |
3334 | ||
3335 | ret = i915_vma_unbind(vma); | |
3336 | if (ret) | |
3337 | return ret; | |
3338 | ||
3339 | /* As unbinding may affect other elements in the | |
3340 | * obj->vma_list (due to side-effects from retiring | |
3341 | * an active vma), play safe and restart the iterator. | |
3342 | */ | |
3343 | goto restart; | |
42d6ab48 CW |
3344 | } |
3345 | ||
ef55f92a CW |
3346 | /* We can reuse the existing drm_mm nodes but need to change the |
3347 | * cache-level on the PTE. We could simply unbind them all and | |
3348 | * rebind with the correct cache-level on next use. However since | |
3349 | * we already have a valid slot, dma mapping, pages etc, we may as | |
3350 | * rewrite the PTE in the belief that doing so tramples upon less | |
3351 | * state and so involves less work. | |
3352 | */ | |
15717de2 | 3353 | if (obj->bind_count) { |
ef55f92a CW |
3354 | /* Before we change the PTE, the GPU must not be accessing it. |
3355 | * If we wait upon the object, we know that all the bound | |
3356 | * VMA are no longer active. | |
3357 | */ | |
e95433c7 CW |
3358 | ret = i915_gem_object_wait(obj, |
3359 | I915_WAIT_INTERRUPTIBLE | | |
3360 | I915_WAIT_LOCKED | | |
3361 | I915_WAIT_ALL, | |
62eb3c24 | 3362 | MAX_SCHEDULE_TIMEOUT); |
e4ffd173 CW |
3363 | if (ret) |
3364 | return ret; | |
3365 | ||
0031fb96 TU |
3366 | if (!HAS_LLC(to_i915(obj->base.dev)) && |
3367 | cache_level != I915_CACHE_NONE) { | |
ef55f92a CW |
3368 | /* Access to snoopable pages through the GTT is |
3369 | * incoherent and on some machines causes a hard | |
3370 | * lockup. Relinquish the CPU mmaping to force | |
3371 | * userspace to refault in the pages and we can | |
3372 | * then double check if the GTT mapping is still | |
3373 | * valid for that pointer access. | |
3374 | */ | |
3375 | i915_gem_release_mmap(obj); | |
3376 | ||
3377 | /* As we no longer need a fence for GTT access, | |
3378 | * we can relinquish it now (and so prevent having | |
3379 | * to steal a fence from someone else on the next | |
3380 | * fence request). Note GPU activity would have | |
3381 | * dropped the fence as all snoopable access is | |
3382 | * supposed to be linear. | |
3383 | */ | |
e2189dd0 | 3384 | for_each_ggtt_vma(vma, obj) { |
49ef5294 CW |
3385 | ret = i915_vma_put_fence(vma); |
3386 | if (ret) | |
3387 | return ret; | |
3388 | } | |
ef55f92a CW |
3389 | } else { |
3390 | /* We either have incoherent backing store and | |
3391 | * so no GTT access or the architecture is fully | |
3392 | * coherent. In such cases, existing GTT mmaps | |
3393 | * ignore the cache bit in the PTE and we can | |
3394 | * rewrite it without confusing the GPU or having | |
3395 | * to force userspace to fault back in its mmaps. | |
3396 | */ | |
e4ffd173 CW |
3397 | } |
3398 | ||
528cbd17 | 3399 | list_for_each_entry(vma, &obj->vma.list, obj_link) { |
ef55f92a CW |
3400 | if (!drm_mm_node_allocated(&vma->node)) |
3401 | continue; | |
3402 | ||
3403 | ret = i915_vma_bind(vma, cache_level, PIN_UPDATE); | |
3404 | if (ret) | |
3405 | return ret; | |
3406 | } | |
e4ffd173 CW |
3407 | } |
3408 | ||
528cbd17 | 3409 | list_for_each_entry(vma, &obj->vma.list, obj_link) |
2c22569b | 3410 | vma->node.color = cache_level; |
b8f55be6 | 3411 | i915_gem_object_set_cache_coherency(obj, cache_level); |
e27ab73d | 3412 | obj->cache_dirty = true; /* Always invalidate stale cachelines */ |
2c22569b | 3413 | |
e4ffd173 CW |
3414 | return 0; |
3415 | } | |
3416 | ||
199adf40 BW |
3417 | int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, |
3418 | struct drm_file *file) | |
e6994aee | 3419 | { |
199adf40 | 3420 | struct drm_i915_gem_caching *args = data; |
e6994aee | 3421 | struct drm_i915_gem_object *obj; |
fbbd37b3 | 3422 | int err = 0; |
e6994aee | 3423 | |
fbbd37b3 CW |
3424 | rcu_read_lock(); |
3425 | obj = i915_gem_object_lookup_rcu(file, args->handle); | |
3426 | if (!obj) { | |
3427 | err = -ENOENT; | |
3428 | goto out; | |
3429 | } | |
e6994aee | 3430 | |
651d794f CW |
3431 | switch (obj->cache_level) { |
3432 | case I915_CACHE_LLC: | |
3433 | case I915_CACHE_L3_LLC: | |
3434 | args->caching = I915_CACHING_CACHED; | |
3435 | break; | |
3436 | ||
4257d3ba CW |
3437 | case I915_CACHE_WT: |
3438 | args->caching = I915_CACHING_DISPLAY; | |
3439 | break; | |
3440 | ||
651d794f CW |
3441 | default: |
3442 | args->caching = I915_CACHING_NONE; | |
3443 | break; | |
3444 | } | |
fbbd37b3 CW |
3445 | out: |
3446 | rcu_read_unlock(); | |
3447 | return err; | |
e6994aee CW |
3448 | } |
3449 | ||
199adf40 BW |
3450 | int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, |
3451 | struct drm_file *file) | |
e6994aee | 3452 | { |
9c870d03 | 3453 | struct drm_i915_private *i915 = to_i915(dev); |
199adf40 | 3454 | struct drm_i915_gem_caching *args = data; |
e6994aee CW |
3455 | struct drm_i915_gem_object *obj; |
3456 | enum i915_cache_level level; | |
d65415df | 3457 | int ret = 0; |
e6994aee | 3458 | |
199adf40 BW |
3459 | switch (args->caching) { |
3460 | case I915_CACHING_NONE: | |
e6994aee CW |
3461 | level = I915_CACHE_NONE; |
3462 | break; | |
199adf40 | 3463 | case I915_CACHING_CACHED: |
e5756c10 ID |
3464 | /* |
3465 | * Due to a HW issue on BXT A stepping, GPU stores via a | |
3466 | * snooped mapping may leave stale data in a corresponding CPU | |
3467 | * cacheline, whereas normally such cachelines would get | |
3468 | * invalidated. | |
3469 | */ | |
9c870d03 | 3470 | if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) |
e5756c10 ID |
3471 | return -ENODEV; |
3472 | ||
e6994aee CW |
3473 | level = I915_CACHE_LLC; |
3474 | break; | |
4257d3ba | 3475 | case I915_CACHING_DISPLAY: |
9c870d03 | 3476 | level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE; |
4257d3ba | 3477 | break; |
e6994aee CW |
3478 | default: |
3479 | return -EINVAL; | |
3480 | } | |
3481 | ||
d65415df CW |
3482 | obj = i915_gem_object_lookup(file, args->handle); |
3483 | if (!obj) | |
3484 | return -ENOENT; | |
3485 | ||
a03f395a TZ |
3486 | /* |
3487 | * The caching mode of proxy object is handled by its generator, and | |
3488 | * not allowed to be changed by userspace. | |
3489 | */ | |
3490 | if (i915_gem_object_is_proxy(obj)) { | |
3491 | ret = -ENXIO; | |
3492 | goto out; | |
3493 | } | |
3494 | ||
d65415df CW |
3495 | if (obj->cache_level == level) |
3496 | goto out; | |
3497 | ||
3498 | ret = i915_gem_object_wait(obj, | |
3499 | I915_WAIT_INTERRUPTIBLE, | |
62eb3c24 | 3500 | MAX_SCHEDULE_TIMEOUT); |
3bc2913e | 3501 | if (ret) |
d65415df | 3502 | goto out; |
3bc2913e | 3503 | |
d65415df CW |
3504 | ret = i915_mutex_lock_interruptible(dev); |
3505 | if (ret) | |
3506 | goto out; | |
e6994aee CW |
3507 | |
3508 | ret = i915_gem_object_set_cache_level(obj, level); | |
e6994aee | 3509 | mutex_unlock(&dev->struct_mutex); |
d65415df CW |
3510 | |
3511 | out: | |
3512 | i915_gem_object_put(obj); | |
e6994aee CW |
3513 | return ret; |
3514 | } | |
3515 | ||
b9241ea3 | 3516 | /* |
07bcd99b DP |
3517 | * Prepare buffer for display plane (scanout, cursors, etc). Can be called from |
3518 | * an uninterruptible phase (modesetting) and allows any flushes to be pipelined | |
3519 | * (for pageflips). We only flush the caches while preparing the buffer for | |
3520 | * display, the callers are responsible for frontbuffer flush. | |
b9241ea3 | 3521 | */ |
058d88c4 | 3522 | struct i915_vma * |
2da3b9b9 CW |
3523 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
3524 | u32 alignment, | |
5935485f CW |
3525 | const struct i915_ggtt_view *view, |
3526 | unsigned int flags) | |
b9241ea3 | 3527 | { |
058d88c4 | 3528 | struct i915_vma *vma; |
b9241ea3 ZW |
3529 | int ret; |
3530 | ||
4c7d62c6 CW |
3531 | lockdep_assert_held(&obj->base.dev->struct_mutex); |
3532 | ||
bd3d2252 | 3533 | /* Mark the global pin early so that we account for the |
cc98b413 CW |
3534 | * display coherency whilst setting up the cache domains. |
3535 | */ | |
bd3d2252 | 3536 | obj->pin_global++; |
cc98b413 | 3537 | |
a7ef0640 EA |
3538 | /* The display engine is not coherent with the LLC cache on gen6. As |
3539 | * a result, we make sure that the pinning that is about to occur is | |
3540 | * done with uncached PTEs. This is lowest common denominator for all | |
3541 | * chipsets. | |
3542 | * | |
3543 | * However for gen6+, we could do better by using the GFDT bit instead | |
3544 | * of uncaching, which would allow us to flush all the LLC-cached data | |
3545 | * with that bit in the PTE to main memory with just one PIPE_CONTROL. | |
3546 | */ | |
651d794f | 3547 | ret = i915_gem_object_set_cache_level(obj, |
8652744b TU |
3548 | HAS_WT(to_i915(obj->base.dev)) ? |
3549 | I915_CACHE_WT : I915_CACHE_NONE); | |
058d88c4 CW |
3550 | if (ret) { |
3551 | vma = ERR_PTR(ret); | |
bd3d2252 | 3552 | goto err_unpin_global; |
058d88c4 | 3553 | } |
a7ef0640 | 3554 | |
2da3b9b9 CW |
3555 | /* As the user may map the buffer once pinned in the display plane |
3556 | * (e.g. libkms for the bootup splash), we have to ensure that we | |
2efb813d CW |
3557 | * always use map_and_fenceable for all scanout buffers. However, |
3558 | * it may simply be too big to fit into mappable, in which case | |
3559 | * put it anyway and hope that userspace can cope (but always first | |
3560 | * try to preserve the existing ABI). | |
2da3b9b9 | 3561 | */ |
2efb813d | 3562 | vma = ERR_PTR(-ENOSPC); |
5935485f CW |
3563 | if ((flags & PIN_MAPPABLE) == 0 && |
3564 | (!view || view->type == I915_GGTT_VIEW_NORMAL)) | |
2efb813d | 3565 | vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, |
5935485f CW |
3566 | flags | |
3567 | PIN_MAPPABLE | | |
3568 | PIN_NONBLOCK); | |
3569 | if (IS_ERR(vma)) | |
767a222e | 3570 | vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); |
058d88c4 | 3571 | if (IS_ERR(vma)) |
bd3d2252 | 3572 | goto err_unpin_global; |
2da3b9b9 | 3573 | |
d8923dcf CW |
3574 | vma->display_alignment = max_t(u64, vma->display_alignment, alignment); |
3575 | ||
5a97bcc6 | 3576 | __i915_gem_object_flush_for_display(obj); |
b118c1e3 | 3577 | |
2da3b9b9 CW |
3578 | /* It should now be out of any other write domains, and we can update |
3579 | * the domain values for our changes. | |
3580 | */ | |
c0a51fd0 | 3581 | obj->read_domains |= I915_GEM_DOMAIN_GTT; |
b9241ea3 | 3582 | |
058d88c4 | 3583 | return vma; |
cc98b413 | 3584 | |
bd3d2252 CW |
3585 | err_unpin_global: |
3586 | obj->pin_global--; | |
058d88c4 | 3587 | return vma; |
cc98b413 CW |
3588 | } |
3589 | ||
3590 | void | |
058d88c4 | 3591 | i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) |
cc98b413 | 3592 | { |
49d73912 | 3593 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); |
4c7d62c6 | 3594 | |
bd3d2252 | 3595 | if (WARN_ON(vma->obj->pin_global == 0)) |
8a0c39b1 TU |
3596 | return; |
3597 | ||
bd3d2252 | 3598 | if (--vma->obj->pin_global == 0) |
f51455d4 | 3599 | vma->display_alignment = I915_GTT_MIN_ALIGNMENT; |
e6617330 | 3600 | |
383d5823 | 3601 | /* Bump the LRU to try and avoid premature eviction whilst flipping */ |
befedbb7 | 3602 | i915_gem_object_bump_inactive_ggtt(vma->obj); |
383d5823 | 3603 | |
058d88c4 | 3604 | i915_vma_unpin(vma); |
b9241ea3 ZW |
3605 | } |
3606 | ||
e47c68e9 EA |
3607 | /** |
3608 | * Moves a single object to the CPU read, and possibly write domain. | |
14bb2c11 TU |
3609 | * @obj: object to act on |
3610 | * @write: requesting write or read-only access | |
e47c68e9 EA |
3611 | * |
3612 | * This function returns when the move is complete, including waiting on | |
3613 | * flushes to occur. | |
3614 | */ | |
dabdfe02 | 3615 | int |
919926ae | 3616 | i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) |
e47c68e9 | 3617 | { |
e47c68e9 EA |
3618 | int ret; |
3619 | ||
e95433c7 | 3620 | lockdep_assert_held(&obj->base.dev->struct_mutex); |
4c7d62c6 | 3621 | |
e95433c7 CW |
3622 | ret = i915_gem_object_wait(obj, |
3623 | I915_WAIT_INTERRUPTIBLE | | |
3624 | I915_WAIT_LOCKED | | |
3625 | (write ? I915_WAIT_ALL : 0), | |
62eb3c24 | 3626 | MAX_SCHEDULE_TIMEOUT); |
88241785 CW |
3627 | if (ret) |
3628 | return ret; | |
3629 | ||
ef74921b | 3630 | flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); |
2ef7eeaa | 3631 | |
e47c68e9 | 3632 | /* Flush the CPU cache if it's still invalid. */ |
c0a51fd0 | 3633 | if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { |
57822dc6 | 3634 | i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); |
c0a51fd0 | 3635 | obj->read_domains |= I915_GEM_DOMAIN_CPU; |
2ef7eeaa EA |
3636 | } |
3637 | ||
3638 | /* It should now be out of any other write domains, and we can update | |
3639 | * the domain values for our changes. | |
3640 | */ | |
c0a51fd0 | 3641 | GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU); |
e47c68e9 EA |
3642 | |
3643 | /* If we're writing through the CPU, then the GPU read domains will | |
3644 | * need to be invalidated at next use. | |
3645 | */ | |
e27ab73d CW |
3646 | if (write) |
3647 | __start_cpu_write(obj); | |
2ef7eeaa EA |
3648 | |
3649 | return 0; | |
3650 | } | |
3651 | ||
673a394b EA |
3652 | /* Throttle our rendering by waiting until the ring has completed our requests |
3653 | * emitted over 20 msec ago. | |
3654 | * | |
b962442e EA |
3655 | * Note that if we were to use the current jiffies each time around the loop, |
3656 | * we wouldn't escape the function with any frames outstanding if the time to | |
3657 | * render a frame was over 20ms. | |
3658 | * | |
673a394b EA |
3659 | * This should get us reasonable parallelism between CPU and GPU but also |
3660 | * relatively low latency when blocking on a particular request to finish. | |
3661 | */ | |
40a5f0de | 3662 | static int |
f787a5f5 | 3663 | i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) |
40a5f0de | 3664 | { |
fac5e23e | 3665 | struct drm_i915_private *dev_priv = to_i915(dev); |
f787a5f5 | 3666 | struct drm_i915_file_private *file_priv = file->driver_priv; |
d0bc54f2 | 3667 | unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; |
e61e0f51 | 3668 | struct i915_request *request, *target = NULL; |
e95433c7 | 3669 | long ret; |
93533c29 | 3670 | |
f4457ae7 | 3671 | /* ABI: return -EIO if already wedged */ |
c41166f9 CW |
3672 | ret = i915_terminally_wedged(dev_priv); |
3673 | if (ret) | |
3674 | return ret; | |
e110e8d6 | 3675 | |
1c25595f | 3676 | spin_lock(&file_priv->mm.lock); |
c8659efa | 3677 | list_for_each_entry(request, &file_priv->mm.request_list, client_link) { |
b962442e EA |
3678 | if (time_after_eq(request->emitted_jiffies, recent_enough)) |
3679 | break; | |
40a5f0de | 3680 | |
c8659efa CW |
3681 | if (target) { |
3682 | list_del(&target->client_link); | |
3683 | target->file_priv = NULL; | |
3684 | } | |
fcfa423c | 3685 | |
54fb2411 | 3686 | target = request; |
b962442e | 3687 | } |
ff865885 | 3688 | if (target) |
e61e0f51 | 3689 | i915_request_get(target); |
1c25595f | 3690 | spin_unlock(&file_priv->mm.lock); |
40a5f0de | 3691 | |
54fb2411 | 3692 | if (target == NULL) |
f787a5f5 | 3693 | return 0; |
2bc43b5c | 3694 | |
e61e0f51 | 3695 | ret = i915_request_wait(target, |
e95433c7 CW |
3696 | I915_WAIT_INTERRUPTIBLE, |
3697 | MAX_SCHEDULE_TIMEOUT); | |
e61e0f51 | 3698 | i915_request_put(target); |
ff865885 | 3699 | |
e95433c7 | 3700 | return ret < 0 ? ret : 0; |
40a5f0de EA |
3701 | } |
3702 | ||
058d88c4 | 3703 | struct i915_vma * |
ec7adb6e JL |
3704 | i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, |
3705 | const struct i915_ggtt_view *view, | |
91b2db6f | 3706 | u64 size, |
2ffffd0f CW |
3707 | u64 alignment, |
3708 | u64 flags) | |
ec7adb6e | 3709 | { |
ad16d2ed | 3710 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
82ad6443 | 3711 | struct i915_address_space *vm = &dev_priv->ggtt.vm; |
59bfa124 CW |
3712 | struct i915_vma *vma; |
3713 | int ret; | |
72e96d64 | 3714 | |
4c7d62c6 CW |
3715 | lockdep_assert_held(&obj->base.dev->struct_mutex); |
3716 | ||
ac87a6fd CW |
3717 | if (flags & PIN_MAPPABLE && |
3718 | (!view || view->type == I915_GGTT_VIEW_NORMAL)) { | |
43ae70d9 CW |
3719 | /* If the required space is larger than the available |
3720 | * aperture, we will not able to find a slot for the | |
3721 | * object and unbinding the object now will be in | |
3722 | * vain. Worse, doing so may cause us to ping-pong | |
3723 | * the object in and out of the Global GTT and | |
3724 | * waste a lot of cycles under the mutex. | |
3725 | */ | |
3726 | if (obj->base.size > dev_priv->ggtt.mappable_end) | |
3727 | return ERR_PTR(-E2BIG); | |
3728 | ||
3729 | /* If NONBLOCK is set the caller is optimistically | |
3730 | * trying to cache the full object within the mappable | |
3731 | * aperture, and *must* have a fallback in place for | |
3732 | * situations where we cannot bind the object. We | |
3733 | * can be a little more lax here and use the fallback | |
3734 | * more often to avoid costly migrations of ourselves | |
3735 | * and other objects within the aperture. | |
3736 | * | |
3737 | * Half-the-aperture is used as a simple heuristic. | |
3738 | * More interesting would to do search for a free | |
3739 | * block prior to making the commitment to unbind. | |
3740 | * That caters for the self-harm case, and with a | |
3741 | * little more heuristics (e.g. NOFAULT, NOEVICT) | |
3742 | * we could try to minimise harm to others. | |
3743 | */ | |
3744 | if (flags & PIN_NONBLOCK && | |
3745 | obj->base.size > dev_priv->ggtt.mappable_end / 2) | |
3746 | return ERR_PTR(-ENOSPC); | |
3747 | } | |
3748 | ||
718659a6 | 3749 | vma = i915_vma_instance(obj, vm, view); |
772b5408 | 3750 | if (IS_ERR(vma)) |
058d88c4 | 3751 | return vma; |
59bfa124 CW |
3752 | |
3753 | if (i915_vma_misplaced(vma, size, alignment, flags)) { | |
43ae70d9 CW |
3754 | if (flags & PIN_NONBLOCK) { |
3755 | if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) | |
3756 | return ERR_PTR(-ENOSPC); | |
59bfa124 | 3757 | |
43ae70d9 | 3758 | if (flags & PIN_MAPPABLE && |
944397f0 | 3759 | vma->fence_size > dev_priv->ggtt.mappable_end / 2) |
ad16d2ed CW |
3760 | return ERR_PTR(-ENOSPC); |
3761 | } | |
3762 | ||
59bfa124 CW |
3763 | WARN(i915_vma_is_pinned(vma), |
3764 | "bo is already pinned in ggtt with incorrect alignment:" | |
05a20d09 CW |
3765 | " offset=%08x, req.alignment=%llx," |
3766 | " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n", | |
3767 | i915_ggtt_offset(vma), alignment, | |
59bfa124 | 3768 | !!(flags & PIN_MAPPABLE), |
05a20d09 | 3769 | i915_vma_is_map_and_fenceable(vma)); |
59bfa124 CW |
3770 | ret = i915_vma_unbind(vma); |
3771 | if (ret) | |
058d88c4 | 3772 | return ERR_PTR(ret); |
59bfa124 CW |
3773 | } |
3774 | ||
058d88c4 CW |
3775 | ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); |
3776 | if (ret) | |
3777 | return ERR_PTR(ret); | |
ec7adb6e | 3778 | |
058d88c4 | 3779 | return vma; |
673a394b EA |
3780 | } |
3781 | ||
edf6b76f | 3782 | static __always_inline unsigned int __busy_read_flag(unsigned int id) |
3fdc13c7 | 3783 | { |
c8b50242 CW |
3784 | if (id == I915_ENGINE_CLASS_INVALID) |
3785 | return 0xffff0000; | |
3786 | ||
3787 | GEM_BUG_ON(id >= 16); | |
3fdc13c7 CW |
3788 | return 0x10000 << id; |
3789 | } | |
3790 | ||
3791 | static __always_inline unsigned int __busy_write_id(unsigned int id) | |
3792 | { | |
c8b50242 CW |
3793 | /* |
3794 | * The uABI guarantees an active writer is also amongst the read | |
70cb472c CW |
3795 | * engines. This would be true if we accessed the activity tracking |
3796 | * under the lock, but as we perform the lookup of the object and | |
3797 | * its activity locklessly we can not guarantee that the last_write | |
3798 | * being active implies that we have set the same engine flag from | |
3799 | * last_read - hence we always set both read and write busy for | |
3800 | * last_write. | |
3801 | */ | |
c8b50242 CW |
3802 | if (id == I915_ENGINE_CLASS_INVALID) |
3803 | return 0xffffffff; | |
3804 | ||
3805 | return (id + 1) | __busy_read_flag(id); | |
3fdc13c7 CW |
3806 | } |
3807 | ||
edf6b76f | 3808 | static __always_inline unsigned int |
d07f0e59 | 3809 | __busy_set_if_active(const struct dma_fence *fence, |
3fdc13c7 CW |
3810 | unsigned int (*flag)(unsigned int id)) |
3811 | { | |
c8b50242 | 3812 | const struct i915_request *rq; |
3fdc13c7 | 3813 | |
c8b50242 CW |
3814 | /* |
3815 | * We have to check the current hw status of the fence as the uABI | |
d07f0e59 CW |
3816 | * guarantees forward progress. We could rely on the idle worker |
3817 | * to eventually flush us, but to minimise latency just ask the | |
3818 | * hardware. | |
1255501d | 3819 | * |
d07f0e59 | 3820 | * Note we only report on the status of native fences. |
1255501d | 3821 | */ |
d07f0e59 CW |
3822 | if (!dma_fence_is_i915(fence)) |
3823 | return 0; | |
3824 | ||
3825 | /* opencode to_request() in order to avoid const warnings */ | |
c8b50242 | 3826 | rq = container_of(fence, const struct i915_request, fence); |
e61e0f51 | 3827 | if (i915_request_completed(rq)) |
d07f0e59 CW |
3828 | return 0; |
3829 | ||
c8b50242 | 3830 | return flag(rq->engine->uabi_class); |
3fdc13c7 CW |
3831 | } |
3832 | ||
edf6b76f | 3833 | static __always_inline unsigned int |
d07f0e59 | 3834 | busy_check_reader(const struct dma_fence *fence) |
3fdc13c7 | 3835 | { |
d07f0e59 | 3836 | return __busy_set_if_active(fence, __busy_read_flag); |
3fdc13c7 CW |
3837 | } |
3838 | ||
edf6b76f | 3839 | static __always_inline unsigned int |
d07f0e59 | 3840 | busy_check_writer(const struct dma_fence *fence) |
3fdc13c7 | 3841 | { |
d07f0e59 CW |
3842 | if (!fence) |
3843 | return 0; | |
3844 | ||
3845 | return __busy_set_if_active(fence, __busy_write_id); | |
3fdc13c7 CW |
3846 | } |
3847 | ||
673a394b EA |
3848 | int |
3849 | i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |
05394f39 | 3850 | struct drm_file *file) |
673a394b EA |
3851 | { |
3852 | struct drm_i915_gem_busy *args = data; | |
05394f39 | 3853 | struct drm_i915_gem_object *obj; |
d07f0e59 CW |
3854 | struct reservation_object_list *list; |
3855 | unsigned int seq; | |
fbbd37b3 | 3856 | int err; |
673a394b | 3857 | |
d07f0e59 | 3858 | err = -ENOENT; |
fbbd37b3 CW |
3859 | rcu_read_lock(); |
3860 | obj = i915_gem_object_lookup_rcu(file, args->handle); | |
d07f0e59 | 3861 | if (!obj) |
fbbd37b3 | 3862 | goto out; |
d1b851fc | 3863 | |
c8b50242 CW |
3864 | /* |
3865 | * A discrepancy here is that we do not report the status of | |
d07f0e59 CW |
3866 | * non-i915 fences, i.e. even though we may report the object as idle, |
3867 | * a call to set-domain may still stall waiting for foreign rendering. | |
3868 | * This also means that wait-ioctl may report an object as busy, | |
3869 | * where busy-ioctl considers it idle. | |
3870 | * | |
3871 | * We trade the ability to warn of foreign fences to report on which | |
3872 | * i915 engines are active for the object. | |
3873 | * | |
3874 | * Alternatively, we can trade that extra information on read/write | |
3875 | * activity with | |
3876 | * args->busy = | |
3877 | * !reservation_object_test_signaled_rcu(obj->resv, true); | |
3878 | * to report the overall busyness. This is what the wait-ioctl does. | |
3879 | * | |
3880 | */ | |
3881 | retry: | |
3882 | seq = raw_read_seqcount(&obj->resv->seq); | |
426960be | 3883 | |
d07f0e59 CW |
3884 | /* Translate the exclusive fence to the READ *and* WRITE engine */ |
3885 | args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl)); | |
3fdc13c7 | 3886 | |
d07f0e59 CW |
3887 | /* Translate shared fences to READ set of engines */ |
3888 | list = rcu_dereference(obj->resv->fence); | |
3889 | if (list) { | |
3890 | unsigned int shared_count = list->shared_count, i; | |
3fdc13c7 | 3891 | |
d07f0e59 CW |
3892 | for (i = 0; i < shared_count; ++i) { |
3893 | struct dma_fence *fence = | |
3894 | rcu_dereference(list->shared[i]); | |
3895 | ||
3896 | args->busy |= busy_check_reader(fence); | |
3897 | } | |
426960be | 3898 | } |
673a394b | 3899 | |
d07f0e59 CW |
3900 | if (args->busy && read_seqcount_retry(&obj->resv->seq, seq)) |
3901 | goto retry; | |
3902 | ||
3903 | err = 0; | |
fbbd37b3 CW |
3904 | out: |
3905 | rcu_read_unlock(); | |
3906 | return err; | |
673a394b EA |
3907 | } |
3908 | ||
3909 | int | |
3910 | i915_gem_throttle_ioctl(struct drm_device *dev, void *data, | |
3911 | struct drm_file *file_priv) | |
3912 | { | |
0206e353 | 3913 | return i915_gem_ring_throttle(dev, file_priv); |
673a394b EA |
3914 | } |
3915 | ||
3ef94daa CW |
3916 | int |
3917 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, | |
3918 | struct drm_file *file_priv) | |
3919 | { | |
fac5e23e | 3920 | struct drm_i915_private *dev_priv = to_i915(dev); |
3ef94daa | 3921 | struct drm_i915_gem_madvise *args = data; |
05394f39 | 3922 | struct drm_i915_gem_object *obj; |
1233e2db | 3923 | int err; |
3ef94daa CW |
3924 | |
3925 | switch (args->madv) { | |
3926 | case I915_MADV_DONTNEED: | |
3927 | case I915_MADV_WILLNEED: | |
3928 | break; | |
3929 | default: | |
3930 | return -EINVAL; | |
3931 | } | |
3932 | ||
03ac0642 | 3933 | obj = i915_gem_object_lookup(file_priv, args->handle); |
1233e2db CW |
3934 | if (!obj) |
3935 | return -ENOENT; | |
3936 | ||
3937 | err = mutex_lock_interruptible(&obj->mm.lock); | |
3938 | if (err) | |
3939 | goto out; | |
3ef94daa | 3940 | |
f1fa4f44 | 3941 | if (i915_gem_object_has_pages(obj) && |
3e510a8e | 3942 | i915_gem_object_is_tiled(obj) && |
656bfa3a | 3943 | dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { |
bc0629a7 CW |
3944 | if (obj->mm.madv == I915_MADV_WILLNEED) { |
3945 | GEM_BUG_ON(!obj->mm.quirked); | |
a4f5ea64 | 3946 | __i915_gem_object_unpin_pages(obj); |
bc0629a7 CW |
3947 | obj->mm.quirked = false; |
3948 | } | |
3949 | if (args->madv == I915_MADV_WILLNEED) { | |
2c3a3f44 | 3950 | GEM_BUG_ON(obj->mm.quirked); |
a4f5ea64 | 3951 | __i915_gem_object_pin_pages(obj); |
bc0629a7 CW |
3952 | obj->mm.quirked = true; |
3953 | } | |
656bfa3a DV |
3954 | } |
3955 | ||
a4f5ea64 CW |
3956 | if (obj->mm.madv != __I915_MADV_PURGED) |
3957 | obj->mm.madv = args->madv; | |
3ef94daa | 3958 | |
6c085a72 | 3959 | /* if the object is no longer attached, discard its backing storage */ |
f1fa4f44 CW |
3960 | if (obj->mm.madv == I915_MADV_DONTNEED && |
3961 | !i915_gem_object_has_pages(obj)) | |
2d7ef395 CW |
3962 | i915_gem_object_truncate(obj); |
3963 | ||
a4f5ea64 | 3964 | args->retained = obj->mm.madv != __I915_MADV_PURGED; |
1233e2db | 3965 | mutex_unlock(&obj->mm.lock); |
bb6baf76 | 3966 | |
1233e2db | 3967 | out: |
f8c417cd | 3968 | i915_gem_object_put(obj); |
1233e2db | 3969 | return err; |
3ef94daa CW |
3970 | } |
3971 | ||
5b8c8aec | 3972 | static void |
21950ee7 CW |
3973 | frontbuffer_retire(struct i915_active_request *active, |
3974 | struct i915_request *request) | |
5b8c8aec CW |
3975 | { |
3976 | struct drm_i915_gem_object *obj = | |
3977 | container_of(active, typeof(*obj), frontbuffer_write); | |
3978 | ||
d59b21ec | 3979 | intel_fb_obj_flush(obj, ORIGIN_CS); |
5b8c8aec CW |
3980 | } |
3981 | ||
37e680a1 CW |
3982 | void i915_gem_object_init(struct drm_i915_gem_object *obj, |
3983 | const struct drm_i915_gem_object_ops *ops) | |
0327d6ba | 3984 | { |
1233e2db CW |
3985 | mutex_init(&obj->mm.lock); |
3986 | ||
528cbd17 CW |
3987 | spin_lock_init(&obj->vma.lock); |
3988 | INIT_LIST_HEAD(&obj->vma.list); | |
3989 | ||
d1b48c1e | 3990 | INIT_LIST_HEAD(&obj->lut_list); |
8d9d5744 | 3991 | INIT_LIST_HEAD(&obj->batch_pool_link); |
0327d6ba | 3992 | |
8811d616 CW |
3993 | init_rcu_head(&obj->rcu); |
3994 | ||
37e680a1 CW |
3995 | obj->ops = ops; |
3996 | ||
d07f0e59 CW |
3997 | reservation_object_init(&obj->__builtin_resv); |
3998 | obj->resv = &obj->__builtin_resv; | |
3999 | ||
50349247 | 4000 | obj->frontbuffer_ggtt_origin = ORIGIN_GTT; |
21950ee7 CW |
4001 | i915_active_request_init(&obj->frontbuffer_write, |
4002 | NULL, frontbuffer_retire); | |
a4f5ea64 CW |
4003 | |
4004 | obj->mm.madv = I915_MADV_WILLNEED; | |
4005 | INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); | |
4006 | mutex_init(&obj->mm.get_page.lock); | |
0327d6ba | 4007 | |
f19ec8cb | 4008 | i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); |
0327d6ba CW |
4009 | } |
4010 | ||
37e680a1 | 4011 | static const struct drm_i915_gem_object_ops i915_gem_object_ops = { |
3599a91c TU |
4012 | .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | |
4013 | I915_GEM_OBJECT_IS_SHRINKABLE, | |
7c55e2c5 | 4014 | |
37e680a1 CW |
4015 | .get_pages = i915_gem_object_get_pages_gtt, |
4016 | .put_pages = i915_gem_object_put_pages_gtt, | |
7c55e2c5 CW |
4017 | |
4018 | .pwrite = i915_gem_object_pwrite_gtt, | |
37e680a1 CW |
4019 | }; |
4020 | ||
465c403c MA |
4021 | static int i915_gem_object_create_shmem(struct drm_device *dev, |
4022 | struct drm_gem_object *obj, | |
4023 | size_t size) | |
4024 | { | |
4025 | struct drm_i915_private *i915 = to_i915(dev); | |
4026 | unsigned long flags = VM_NORESERVE; | |
4027 | struct file *filp; | |
4028 | ||
4029 | drm_gem_private_object_init(dev, obj, size); | |
4030 | ||
4031 | if (i915->mm.gemfs) | |
4032 | filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size, | |
4033 | flags); | |
4034 | else | |
4035 | filp = shmem_file_setup("i915", size, flags); | |
4036 | ||
4037 | if (IS_ERR(filp)) | |
4038 | return PTR_ERR(filp); | |
4039 | ||
4040 | obj->filp = filp; | |
4041 | ||
4042 | return 0; | |
4043 | } | |
4044 | ||
b4bcbe2a | 4045 | struct drm_i915_gem_object * |
12d79d78 | 4046 | i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) |
ac52bc56 | 4047 | { |
c397b908 | 4048 | struct drm_i915_gem_object *obj; |
5949eac4 | 4049 | struct address_space *mapping; |
b8f55be6 | 4050 | unsigned int cache_level; |
1a240d4d | 4051 | gfp_t mask; |
fe3db79b | 4052 | int ret; |
ac52bc56 | 4053 | |
b4bcbe2a CW |
4054 | /* There is a prevalence of the assumption that we fit the object's |
4055 | * page count inside a 32bit _signed_ variable. Let's document this and | |
4056 | * catch if we ever need to fix it. In the meantime, if you do spot | |
4057 | * such a local variable, please consider fixing! | |
4058 | */ | |
7a3ee5de | 4059 | if (size >> PAGE_SHIFT > INT_MAX) |
b4bcbe2a CW |
4060 | return ERR_PTR(-E2BIG); |
4061 | ||
4062 | if (overflows_type(size, obj->base.size)) | |
4063 | return ERR_PTR(-E2BIG); | |
4064 | ||
13f1bfd3 | 4065 | obj = i915_gem_object_alloc(); |
c397b908 | 4066 | if (obj == NULL) |
fe3db79b | 4067 | return ERR_PTR(-ENOMEM); |
673a394b | 4068 | |
465c403c | 4069 | ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size); |
fe3db79b CW |
4070 | if (ret) |
4071 | goto fail; | |
673a394b | 4072 | |
bed1ea95 | 4073 | mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; |
c0f86832 | 4074 | if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) { |
bed1ea95 CW |
4075 | /* 965gm cannot relocate objects above 4GiB. */ |
4076 | mask &= ~__GFP_HIGHMEM; | |
4077 | mask |= __GFP_DMA32; | |
4078 | } | |
4079 | ||
93c76a3d | 4080 | mapping = obj->base.filp->f_mapping; |
bed1ea95 | 4081 | mapping_set_gfp_mask(mapping, mask); |
4846bf0c | 4082 | GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); |
5949eac4 | 4083 | |
37e680a1 | 4084 | i915_gem_object_init(obj, &i915_gem_object_ops); |
73aa808f | 4085 | |
c0a51fd0 CK |
4086 | obj->write_domain = I915_GEM_DOMAIN_CPU; |
4087 | obj->read_domains = I915_GEM_DOMAIN_CPU; | |
673a394b | 4088 | |
b8f55be6 | 4089 | if (HAS_LLC(dev_priv)) |
3d29b842 | 4090 | /* On some devices, we can have the GPU use the LLC (the CPU |
a1871112 EA |
4091 | * cache) for about a 10% performance improvement |
4092 | * compared to uncached. Graphics requests other than | |
4093 | * display scanout are coherent with the CPU in | |
4094 | * accessing this cache. This means in this mode we | |
4095 | * don't need to clflush on the CPU side, and on the | |
4096 | * GPU side we only need to flush internal caches to | |
4097 | * get data visible to the CPU. | |
4098 | * | |
4099 | * However, we maintain the display planes as UC, and so | |
4100 | * need to rebind when first used as such. | |
4101 | */ | |
b8f55be6 CW |
4102 | cache_level = I915_CACHE_LLC; |
4103 | else | |
4104 | cache_level = I915_CACHE_NONE; | |
a1871112 | 4105 | |
b8f55be6 | 4106 | i915_gem_object_set_cache_coherency(obj, cache_level); |
e27ab73d | 4107 | |
d861e338 DV |
4108 | trace_i915_gem_object_create(obj); |
4109 | ||
05394f39 | 4110 | return obj; |
fe3db79b CW |
4111 | |
4112 | fail: | |
4113 | i915_gem_object_free(obj); | |
fe3db79b | 4114 | return ERR_PTR(ret); |
c397b908 DV |
4115 | } |
4116 | ||
340fbd8c CW |
4117 | static bool discard_backing_storage(struct drm_i915_gem_object *obj) |
4118 | { | |
4119 | /* If we are the last user of the backing storage (be it shmemfs | |
4120 | * pages or stolen etc), we know that the pages are going to be | |
4121 | * immediately released. In this case, we can then skip copying | |
4122 | * back the contents from the GPU. | |
4123 | */ | |
4124 | ||
a4f5ea64 | 4125 | if (obj->mm.madv != I915_MADV_WILLNEED) |
340fbd8c CW |
4126 | return false; |
4127 | ||
4128 | if (obj->base.filp == NULL) | |
4129 | return true; | |
4130 | ||
4131 | /* At first glance, this looks racy, but then again so would be | |
4132 | * userspace racing mmap against close. However, the first external | |
4133 | * reference to the filp can only be obtained through the | |
4134 | * i915_gem_mmap_ioctl() which safeguards us against the user | |
4135 | * acquiring such a reference whilst we are in the middle of | |
4136 | * freeing the object. | |
4137 | */ | |
4138 | return atomic_long_read(&obj->base.filp->f_count) == 1; | |
4139 | } | |
4140 | ||
fbbd37b3 CW |
4141 | static void __i915_gem_free_objects(struct drm_i915_private *i915, |
4142 | struct llist_node *freed) | |
673a394b | 4143 | { |
fbbd37b3 | 4144 | struct drm_i915_gem_object *obj, *on; |
538ef96b | 4145 | intel_wakeref_t wakeref; |
673a394b | 4146 | |
538ef96b | 4147 | wakeref = intel_runtime_pm_get(i915); |
cc731f5a | 4148 | llist_for_each_entry_safe(obj, on, freed, freed) { |
fbbd37b3 CW |
4149 | struct i915_vma *vma, *vn; |
4150 | ||
4151 | trace_i915_gem_object_destroy(obj); | |
4152 | ||
cc731f5a CW |
4153 | mutex_lock(&i915->drm.struct_mutex); |
4154 | ||
fbbd37b3 | 4155 | GEM_BUG_ON(i915_gem_object_is_active(obj)); |
528cbd17 | 4156 | list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) { |
fbbd37b3 CW |
4157 | GEM_BUG_ON(i915_vma_is_active(vma)); |
4158 | vma->flags &= ~I915_VMA_PIN_MASK; | |
3365e226 | 4159 | i915_vma_destroy(vma); |
fbbd37b3 | 4160 | } |
528cbd17 CW |
4161 | GEM_BUG_ON(!list_empty(&obj->vma.list)); |
4162 | GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree)); | |
fbbd37b3 | 4163 | |
f2123818 CW |
4164 | /* This serializes freeing with the shrinker. Since the free |
4165 | * is delayed, first by RCU then by the workqueue, we want the | |
4166 | * shrinker to be able to free pages of unreferenced objects, | |
4167 | * or else we may oom whilst there are plenty of deferred | |
4168 | * freed objects. | |
4169 | */ | |
4170 | if (i915_gem_object_has_pages(obj)) { | |
4171 | spin_lock(&i915->mm.obj_lock); | |
4172 | list_del_init(&obj->mm.link); | |
4173 | spin_unlock(&i915->mm.obj_lock); | |
4174 | } | |
4175 | ||
cc731f5a | 4176 | mutex_unlock(&i915->drm.struct_mutex); |
fbbd37b3 | 4177 | |
fbbd37b3 | 4178 | GEM_BUG_ON(obj->bind_count); |
a65adaf8 | 4179 | GEM_BUG_ON(obj->userfault_count); |
fbbd37b3 | 4180 | GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits)); |
67b48040 | 4181 | GEM_BUG_ON(!list_empty(&obj->lut_list)); |
fbbd37b3 CW |
4182 | |
4183 | if (obj->ops->release) | |
4184 | obj->ops->release(obj); | |
f65c9168 | 4185 | |
fbbd37b3 CW |
4186 | if (WARN_ON(i915_gem_object_has_pinned_pages(obj))) |
4187 | atomic_set(&obj->mm.pages_pin_count, 0); | |
548625ee | 4188 | __i915_gem_object_put_pages(obj, I915_MM_NORMAL); |
f1fa4f44 | 4189 | GEM_BUG_ON(i915_gem_object_has_pages(obj)); |
fbbd37b3 CW |
4190 | |
4191 | if (obj->base.import_attach) | |
4192 | drm_prime_gem_destroy(&obj->base, NULL); | |
4193 | ||
d07f0e59 | 4194 | reservation_object_fini(&obj->__builtin_resv); |
fbbd37b3 CW |
4195 | drm_gem_object_release(&obj->base); |
4196 | i915_gem_info_remove_obj(i915, obj->base.size); | |
4197 | ||
6e514e37 | 4198 | bitmap_free(obj->bit_17); |
fbbd37b3 | 4199 | i915_gem_object_free(obj); |
cc731f5a | 4200 | |
c9c70471 CW |
4201 | GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); |
4202 | atomic_dec(&i915->mm.free_count); | |
4203 | ||
cc731f5a CW |
4204 | if (on) |
4205 | cond_resched(); | |
fbbd37b3 | 4206 | } |
538ef96b | 4207 | intel_runtime_pm_put(i915, wakeref); |
fbbd37b3 CW |
4208 | } |
4209 | ||
4210 | static void i915_gem_flush_free_objects(struct drm_i915_private *i915) | |
4211 | { | |
4212 | struct llist_node *freed; | |
4213 | ||
87701b4b CW |
4214 | /* Free the oldest, most stale object to keep the free_list short */ |
4215 | freed = NULL; | |
4216 | if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */ | |
4217 | /* Only one consumer of llist_del_first() allowed */ | |
4218 | spin_lock(&i915->mm.free_lock); | |
4219 | freed = llist_del_first(&i915->mm.free_list); | |
4220 | spin_unlock(&i915->mm.free_lock); | |
4221 | } | |
4222 | if (unlikely(freed)) { | |
4223 | freed->next = NULL; | |
fbbd37b3 | 4224 | __i915_gem_free_objects(i915, freed); |
87701b4b | 4225 | } |
fbbd37b3 CW |
4226 | } |
4227 | ||
4228 | static void __i915_gem_free_work(struct work_struct *work) | |
4229 | { | |
4230 | struct drm_i915_private *i915 = | |
4231 | container_of(work, struct drm_i915_private, mm.free_work); | |
4232 | struct llist_node *freed; | |
26e12f89 | 4233 | |
2ef1e729 CW |
4234 | /* |
4235 | * All file-owned VMA should have been released by this point through | |
b1f788c6 CW |
4236 | * i915_gem_close_object(), or earlier by i915_gem_context_close(). |
4237 | * However, the object may also be bound into the global GTT (e.g. | |
4238 | * older GPUs without per-process support, or for direct access through | |
4239 | * the GTT either for the user or for scanout). Those VMA still need to | |
4240 | * unbound now. | |
4241 | */ | |
1488fc08 | 4242 | |
f991c492 | 4243 | spin_lock(&i915->mm.free_lock); |
5ad08be7 | 4244 | while ((freed = llist_del_all(&i915->mm.free_list))) { |
f991c492 CW |
4245 | spin_unlock(&i915->mm.free_lock); |
4246 | ||
fbbd37b3 | 4247 | __i915_gem_free_objects(i915, freed); |
5ad08be7 | 4248 | if (need_resched()) |
f991c492 CW |
4249 | return; |
4250 | ||
4251 | spin_lock(&i915->mm.free_lock); | |
5ad08be7 | 4252 | } |
f991c492 | 4253 | spin_unlock(&i915->mm.free_lock); |
fbbd37b3 | 4254 | } |
a071fa00 | 4255 | |
fbbd37b3 CW |
4256 | static void __i915_gem_free_object_rcu(struct rcu_head *head) |
4257 | { | |
4258 | struct drm_i915_gem_object *obj = | |
4259 | container_of(head, typeof(*obj), rcu); | |
4260 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | |
8811d616 CW |
4261 | |
4262 | /* | |
4263 | * We reuse obj->rcu for the freed list, so we had better not treat | |
4264 | * it like a rcu_head from this point forwards. And we expect all | |
4265 | * objects to be freed via this path. | |
4266 | */ | |
4267 | destroy_rcu_head(&obj->rcu); | |
fbbd37b3 | 4268 | |
2ef1e729 CW |
4269 | /* |
4270 | * Since we require blocking on struct_mutex to unbind the freed | |
4271 | * object from the GPU before releasing resources back to the | |
4272 | * system, we can not do that directly from the RCU callback (which may | |
4273 | * be a softirq context), but must instead then defer that work onto a | |
4274 | * kthread. We use the RCU callback rather than move the freed object | |
4275 | * directly onto the work queue so that we can mix between using the | |
4276 | * worker and performing frees directly from subsequent allocations for | |
4277 | * crude but effective memory throttling. | |
fbbd37b3 CW |
4278 | */ |
4279 | if (llist_add(&obj->freed, &i915->mm.free_list)) | |
beacbd16 | 4280 | queue_work(i915->wq, &i915->mm.free_work); |
fbbd37b3 | 4281 | } |
656bfa3a | 4282 | |
fbbd37b3 CW |
4283 | void i915_gem_free_object(struct drm_gem_object *gem_obj) |
4284 | { | |
4285 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); | |
a4f5ea64 | 4286 | |
bc0629a7 CW |
4287 | if (obj->mm.quirked) |
4288 | __i915_gem_object_unpin_pages(obj); | |
4289 | ||
340fbd8c | 4290 | if (discard_backing_storage(obj)) |
a4f5ea64 | 4291 | obj->mm.madv = I915_MADV_DONTNEED; |
de151cf6 | 4292 | |
2ef1e729 CW |
4293 | /* |
4294 | * Before we free the object, make sure any pure RCU-only | |
fbbd37b3 CW |
4295 | * read-side critical sections are complete, e.g. |
4296 | * i915_gem_busy_ioctl(). For the corresponding synchronized | |
4297 | * lookup see i915_gem_object_lookup_rcu(). | |
4298 | */ | |
c9c70471 | 4299 | atomic_inc(&to_i915(obj->base.dev)->mm.free_count); |
fbbd37b3 | 4300 | call_rcu(&obj->rcu, __i915_gem_free_object_rcu); |
673a394b EA |
4301 | } |
4302 | ||
f8a7fde4 CW |
4303 | void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj) |
4304 | { | |
4305 | lockdep_assert_held(&obj->base.dev->struct_mutex); | |
4306 | ||
d1b48c1e CW |
4307 | if (!i915_gem_object_has_active_reference(obj) && |
4308 | i915_gem_object_is_active(obj)) | |
f8a7fde4 CW |
4309 | i915_gem_object_set_active_reference(obj); |
4310 | else | |
4311 | i915_gem_object_put(obj); | |
4312 | } | |
4313 | ||
24145517 CW |
4314 | void i915_gem_sanitize(struct drm_i915_private *i915) |
4315 | { | |
538ef96b CW |
4316 | intel_wakeref_t wakeref; |
4317 | ||
c3160da9 CW |
4318 | GEM_TRACE("\n"); |
4319 | ||
538ef96b | 4320 | wakeref = intel_runtime_pm_get(i915); |
3ceea6a1 | 4321 | intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); |
c3160da9 CW |
4322 | |
4323 | /* | |
4324 | * As we have just resumed the machine and woken the device up from | |
4325 | * deep PCI sleep (presumably D3_cold), assume the HW has been reset | |
4326 | * back to defaults, recovering from whatever wedged state we left it | |
4327 | * in and so worth trying to use the device once more. | |
4328 | */ | |
c41166f9 | 4329 | if (i915_terminally_wedged(i915)) |
f36325f3 | 4330 | i915_gem_unset_wedged(i915); |
f36325f3 | 4331 | |
24145517 CW |
4332 | /* |
4333 | * If we inherit context state from the BIOS or earlier occupants | |
4334 | * of the GPU, the GPU may be in an inconsistent state when we | |
4335 | * try to take over. The only way to remove the earlier state | |
4336 | * is by resetting. However, resetting on earlier gen is tricky as | |
4337 | * it may impact the display and we are uncertain about the stability | |
ea117b8d | 4338 | * of the reset, so this could be applied to even earlier gen. |
24145517 | 4339 | */ |
55277e1f | 4340 | intel_engines_sanitize(i915, false); |
c3160da9 | 4341 | |
3ceea6a1 | 4342 | intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); |
538ef96b | 4343 | intel_runtime_pm_put(i915, wakeref); |
c3160da9 | 4344 | |
eb8d0f5a | 4345 | mutex_lock(&i915->drm.struct_mutex); |
4dfacb0b CW |
4346 | i915_gem_contexts_lost(i915); |
4347 | mutex_unlock(&i915->drm.struct_mutex); | |
24145517 CW |
4348 | } |
4349 | ||
5861b013 | 4350 | void i915_gem_suspend(struct drm_i915_private *i915) |
29105ccc | 4351 | { |
538ef96b | 4352 | intel_wakeref_t wakeref; |
28dfe52a | 4353 | |
09a4c02e CW |
4354 | GEM_TRACE("\n"); |
4355 | ||
538ef96b | 4356 | wakeref = intel_runtime_pm_get(i915); |
bf06112f | 4357 | intel_suspend_gt_powersave(i915); |
54b4f68f | 4358 | |
eb8d0f5a CW |
4359 | flush_workqueue(i915->wq); |
4360 | ||
bf06112f | 4361 | mutex_lock(&i915->drm.struct_mutex); |
5ab57c70 | 4362 | |
bf06112f CW |
4363 | /* |
4364 | * We have to flush all the executing contexts to main memory so | |
5ab57c70 CW |
4365 | * that they can saved in the hibernation image. To ensure the last |
4366 | * context image is coherent, we have to switch away from it. That | |
bf06112f | 4367 | * leaves the i915->kernel_context still active when |
5ab57c70 CW |
4368 | * we actually suspend, and its image in memory may not match the GPU |
4369 | * state. Fortunately, the kernel_context is disposable and we do | |
4370 | * not rely on its state. | |
4371 | */ | |
c6eeb479 | 4372 | switch_to_kernel_context_sync(i915, i915->gt.active_engines); |
01f8f33e | 4373 | |
bf06112f | 4374 | mutex_unlock(&i915->drm.struct_mutex); |
eb8d0f5a | 4375 | i915_reset_flush(i915); |
45c5f202 | 4376 | |
eb8d0f5a | 4377 | drain_delayed_work(&i915->gt.retire_work); |
bdeb9785 | 4378 | |
bf06112f CW |
4379 | /* |
4380 | * As the idle_work is rearming if it detects a race, play safe and | |
bdeb9785 CW |
4381 | * repeat the flush until it is definitely idle. |
4382 | */ | |
bf06112f | 4383 | drain_delayed_work(&i915->gt.idle_work); |
bdeb9785 | 4384 | |
bf06112f CW |
4385 | /* |
4386 | * Assert that we successfully flushed all the work and | |
bdcf120b CW |
4387 | * reset the GPU back to its idle, low power state. |
4388 | */ | |
50b022af | 4389 | GEM_BUG_ON(i915->gt.awake); |
bdcf120b | 4390 | |
538ef96b | 4391 | intel_runtime_pm_put(i915, wakeref); |
ec92ad00 CW |
4392 | } |
4393 | ||
4394 | void i915_gem_suspend_late(struct drm_i915_private *i915) | |
4395 | { | |
9776f472 CW |
4396 | struct drm_i915_gem_object *obj; |
4397 | struct list_head *phases[] = { | |
4398 | &i915->mm.unbound_list, | |
4399 | &i915->mm.bound_list, | |
4400 | NULL | |
4401 | }, **phase; | |
4402 | ||
1c777c5d ID |
4403 | /* |
4404 | * Neither the BIOS, ourselves or any other kernel | |
4405 | * expects the system to be in execlists mode on startup, | |
4406 | * so we need to reset the GPU back to legacy mode. And the only | |
4407 | * known way to disable logical contexts is through a GPU reset. | |
4408 | * | |
4409 | * So in order to leave the system in a known default configuration, | |
4410 | * always reset the GPU upon unload and suspend. Afterwards we then | |
4411 | * clean up the GEM state tracking, flushing off the requests and | |
4412 | * leaving the system in a known idle state. | |
4413 | * | |
4414 | * Note that is of the upmost importance that the GPU is idle and | |
4415 | * all stray writes are flushed *before* we dismantle the backing | |
4416 | * storage for the pinned objects. | |
4417 | * | |
4418 | * However, since we are uncertain that resetting the GPU on older | |
4419 | * machines is a good idea, we don't - just in case it leaves the | |
4420 | * machine in an unusable condition. | |
4421 | */ | |
1c777c5d | 4422 | |
9776f472 CW |
4423 | mutex_lock(&i915->drm.struct_mutex); |
4424 | for (phase = phases; *phase; phase++) { | |
4425 | list_for_each_entry(obj, *phase, mm.link) | |
4426 | WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); | |
4427 | } | |
4428 | mutex_unlock(&i915->drm.struct_mutex); | |
4429 | ||
ec92ad00 CW |
4430 | intel_uc_sanitize(i915); |
4431 | i915_gem_sanitize(i915); | |
673a394b EA |
4432 | } |
4433 | ||
37cd3300 | 4434 | void i915_gem_resume(struct drm_i915_private *i915) |
5ab57c70 | 4435 | { |
4dfacb0b CW |
4436 | GEM_TRACE("\n"); |
4437 | ||
37cd3300 | 4438 | WARN_ON(i915->gt.awake); |
5ab57c70 | 4439 | |
37cd3300 | 4440 | mutex_lock(&i915->drm.struct_mutex); |
3ceea6a1 | 4441 | intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); |
31ab49ab | 4442 | |
37cd3300 CW |
4443 | i915_gem_restore_gtt_mappings(i915); |
4444 | i915_gem_restore_fences(i915); | |
5ab57c70 | 4445 | |
6ca9a2be CW |
4446 | /* |
4447 | * As we didn't flush the kernel context before suspend, we cannot | |
5ab57c70 CW |
4448 | * guarantee that the context image is complete. So let's just reset |
4449 | * it and start again. | |
4450 | */ | |
37cd3300 | 4451 | i915->gt.resume(i915); |
5ab57c70 | 4452 | |
37cd3300 CW |
4453 | if (i915_gem_init_hw(i915)) |
4454 | goto err_wedged; | |
4455 | ||
7cfca4af | 4456 | intel_uc_resume(i915); |
7469c62c | 4457 | |
37cd3300 | 4458 | /* Always reload a context for powersaving. */ |
604c37d7 | 4459 | if (!load_power_context(i915)) |
37cd3300 CW |
4460 | goto err_wedged; |
4461 | ||
4462 | out_unlock: | |
3ceea6a1 | 4463 | intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); |
37cd3300 CW |
4464 | mutex_unlock(&i915->drm.struct_mutex); |
4465 | return; | |
4466 | ||
4467 | err_wedged: | |
c41166f9 CW |
4468 | if (!i915_reset_failed(i915)) { |
4469 | dev_err(i915->drm.dev, | |
4470 | "Failed to re-initialize GPU, declaring it wedged!\n"); | |
6ca9a2be CW |
4471 | i915_gem_set_wedged(i915); |
4472 | } | |
37cd3300 | 4473 | goto out_unlock; |
5ab57c70 CW |
4474 | } |
4475 | ||
c6be607a | 4476 | void i915_gem_init_swizzling(struct drm_i915_private *dev_priv) |
f691e2f4 | 4477 | { |
c6be607a | 4478 | if (INTEL_GEN(dev_priv) < 5 || |
f691e2f4 DV |
4479 | dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) |
4480 | return; | |
4481 | ||
4482 | I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | | |
4483 | DISP_TILE_SURFACE_SWIZZLING); | |
4484 | ||
cf819eff | 4485 | if (IS_GEN(dev_priv, 5)) |
11782b02 DV |
4486 | return; |
4487 | ||
f691e2f4 | 4488 | I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); |
cf819eff | 4489 | if (IS_GEN(dev_priv, 6)) |
6b26c86d | 4490 | I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); |
cf819eff | 4491 | else if (IS_GEN(dev_priv, 7)) |
6b26c86d | 4492 | I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); |
cf819eff | 4493 | else if (IS_GEN(dev_priv, 8)) |
31a5336e | 4494 | I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); |
8782e26c BW |
4495 | else |
4496 | BUG(); | |
f691e2f4 | 4497 | } |
e21af88d | 4498 | |
50a0bc90 | 4499 | static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base) |
81e7f200 | 4500 | { |
81e7f200 VS |
4501 | I915_WRITE(RING_CTL(base), 0); |
4502 | I915_WRITE(RING_HEAD(base), 0); | |
4503 | I915_WRITE(RING_TAIL(base), 0); | |
4504 | I915_WRITE(RING_START(base), 0); | |
4505 | } | |
4506 | ||
50a0bc90 | 4507 | static void init_unused_rings(struct drm_i915_private *dev_priv) |
81e7f200 | 4508 | { |
50a0bc90 TU |
4509 | if (IS_I830(dev_priv)) { |
4510 | init_unused_ring(dev_priv, PRB1_BASE); | |
4511 | init_unused_ring(dev_priv, SRB0_BASE); | |
4512 | init_unused_ring(dev_priv, SRB1_BASE); | |
4513 | init_unused_ring(dev_priv, SRB2_BASE); | |
4514 | init_unused_ring(dev_priv, SRB3_BASE); | |
cf819eff | 4515 | } else if (IS_GEN(dev_priv, 2)) { |
50a0bc90 TU |
4516 | init_unused_ring(dev_priv, SRB0_BASE); |
4517 | init_unused_ring(dev_priv, SRB1_BASE); | |
cf819eff | 4518 | } else if (IS_GEN(dev_priv, 3)) { |
50a0bc90 TU |
4519 | init_unused_ring(dev_priv, PRB1_BASE); |
4520 | init_unused_ring(dev_priv, PRB2_BASE); | |
81e7f200 VS |
4521 | } |
4522 | } | |
4523 | ||
20a8a74a | 4524 | static int __i915_gem_restart_engines(void *data) |
4fc7c971 | 4525 | { |
20a8a74a | 4526 | struct drm_i915_private *i915 = data; |
e2f80391 | 4527 | struct intel_engine_cs *engine; |
3b3f1650 | 4528 | enum intel_engine_id id; |
20a8a74a CW |
4529 | int err; |
4530 | ||
4531 | for_each_engine(engine, i915, id) { | |
4532 | err = engine->init_hw(engine); | |
8177e112 CW |
4533 | if (err) { |
4534 | DRM_ERROR("Failed to restart %s (%d)\n", | |
4535 | engine->name, err); | |
20a8a74a | 4536 | return err; |
8177e112 | 4537 | } |
20a8a74a CW |
4538 | } |
4539 | ||
2d5eaad0 CW |
4540 | intel_engines_set_scheduler_caps(i915); |
4541 | ||
20a8a74a CW |
4542 | return 0; |
4543 | } | |
4544 | ||
4545 | int i915_gem_init_hw(struct drm_i915_private *dev_priv) | |
4546 | { | |
d200cda6 | 4547 | int ret; |
4fc7c971 | 4548 | |
de867c20 CW |
4549 | dev_priv->gt.last_init_time = ktime_get(); |
4550 | ||
5e4f5189 | 4551 | /* Double layer security blanket, see i915_gem_init() */ |
3ceea6a1 | 4552 | intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); |
5e4f5189 | 4553 | |
0031fb96 | 4554 | if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9) |
05e21cc4 | 4555 | I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); |
4fc7c971 | 4556 | |
772c2a51 | 4557 | if (IS_HASWELL(dev_priv)) |
50a0bc90 | 4558 | I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? |
0bf21347 | 4559 | LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); |
9435373e | 4560 | |
094304be | 4561 | /* Apply the GT workarounds... */ |
25d140fa | 4562 | intel_gt_apply_workarounds(dev_priv); |
094304be TU |
4563 | /* ...and determine whether they are sticking. */ |
4564 | intel_gt_verify_workarounds(dev_priv, "init"); | |
59b449d5 | 4565 | |
c6be607a | 4566 | i915_gem_init_swizzling(dev_priv); |
4fc7c971 | 4567 | |
d5abdfda DV |
4568 | /* |
4569 | * At least 830 can leave some of the unused rings | |
4570 | * "active" (ie. head != tail) after resume which | |
4571 | * will prevent c3 entry. Makes sure all unused rings | |
4572 | * are totally idle. | |
4573 | */ | |
50a0bc90 | 4574 | init_unused_rings(dev_priv); |
d5abdfda | 4575 | |
ed54c1a1 | 4576 | BUG_ON(!dev_priv->kernel_context); |
c41166f9 CW |
4577 | ret = i915_terminally_wedged(dev_priv); |
4578 | if (ret) | |
6f74b36b | 4579 | goto out; |
90638cc1 | 4580 | |
c6be607a | 4581 | ret = i915_ppgtt_init_hw(dev_priv); |
4ad2fd88 | 4582 | if (ret) { |
8177e112 | 4583 | DRM_ERROR("Enabling PPGTT failed (%d)\n", ret); |
4ad2fd88 JH |
4584 | goto out; |
4585 | } | |
4586 | ||
f08e2035 JL |
4587 | ret = intel_wopcm_init_hw(&dev_priv->wopcm); |
4588 | if (ret) { | |
4589 | DRM_ERROR("Enabling WOPCM failed (%d)\n", ret); | |
4590 | goto out; | |
4591 | } | |
4592 | ||
9bdc3573 MW |
4593 | /* We can't enable contexts until all firmware is loaded */ |
4594 | ret = intel_uc_init_hw(dev_priv); | |
8177e112 CW |
4595 | if (ret) { |
4596 | DRM_ERROR("Enabling uc failed (%d)\n", ret); | |
9bdc3573 | 4597 | goto out; |
8177e112 | 4598 | } |
9bdc3573 | 4599 | |
bf9e8429 | 4600 | intel_mocs_init_l3cc_table(dev_priv); |
0ccdacf6 | 4601 | |
136109c6 CW |
4602 | /* Only when the HW is re-initialised, can we replay the requests */ |
4603 | ret = __i915_gem_restart_engines(dev_priv); | |
b96f6ebf MW |
4604 | if (ret) |
4605 | goto cleanup_uc; | |
60c0a66e | 4606 | |
3ceea6a1 | 4607 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); |
60c0a66e MW |
4608 | |
4609 | return 0; | |
b96f6ebf MW |
4610 | |
4611 | cleanup_uc: | |
4612 | intel_uc_fini_hw(dev_priv); | |
60c0a66e | 4613 | out: |
3ceea6a1 | 4614 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); |
60c0a66e MW |
4615 | |
4616 | return ret; | |
8187a2b7 ZN |
4617 | } |
4618 | ||
d2b4b979 CW |
4619 | static int __intel_engines_record_defaults(struct drm_i915_private *i915) |
4620 | { | |
4621 | struct i915_gem_context *ctx; | |
4622 | struct intel_engine_cs *engine; | |
4623 | enum intel_engine_id id; | |
604c37d7 | 4624 | int err = 0; |
d2b4b979 CW |
4625 | |
4626 | /* | |
4627 | * As we reset the gpu during very early sanitisation, the current | |
4628 | * register state on the GPU should reflect its defaults values. | |
4629 | * We load a context onto the hw (with restore-inhibit), then switch | |
4630 | * over to a second context to save that default register state. We | |
4631 | * can then prime every new context with that state so they all start | |
4632 | * from the same default HW values. | |
4633 | */ | |
4634 | ||
4635 | ctx = i915_gem_context_create_kernel(i915, 0); | |
4636 | if (IS_ERR(ctx)) | |
4637 | return PTR_ERR(ctx); | |
4638 | ||
4639 | for_each_engine(engine, i915, id) { | |
e61e0f51 | 4640 | struct i915_request *rq; |
d2b4b979 | 4641 | |
e61e0f51 | 4642 | rq = i915_request_alloc(engine, ctx); |
d2b4b979 CW |
4643 | if (IS_ERR(rq)) { |
4644 | err = PTR_ERR(rq); | |
4645 | goto out_ctx; | |
4646 | } | |
4647 | ||
3fef5cda | 4648 | err = 0; |
d2b4b979 CW |
4649 | if (engine->init_context) |
4650 | err = engine->init_context(rq); | |
4651 | ||
697b9a87 | 4652 | i915_request_add(rq); |
d2b4b979 CW |
4653 | if (err) |
4654 | goto err_active; | |
4655 | } | |
4656 | ||
604c37d7 CW |
4657 | /* Flush the default context image to memory, and enable powersaving. */ |
4658 | if (!load_power_context(i915)) { | |
4659 | err = -EIO; | |
d2b4b979 | 4660 | goto err_active; |
2621cefa | 4661 | } |
d2b4b979 | 4662 | |
d2b4b979 | 4663 | for_each_engine(engine, i915, id) { |
c4d52feb | 4664 | struct intel_context *ce; |
d2b4b979 | 4665 | struct i915_vma *state; |
37d7c9cc | 4666 | void *vaddr; |
d2b4b979 | 4667 | |
c4d52feb CW |
4668 | ce = intel_context_lookup(ctx, engine); |
4669 | if (!ce) | |
4670 | continue; | |
666424ab | 4671 | |
c4d52feb | 4672 | state = ce->state; |
d2b4b979 CW |
4673 | if (!state) |
4674 | continue; | |
4675 | ||
08819549 | 4676 | GEM_BUG_ON(intel_context_is_pinned(ce)); |
c4d52feb | 4677 | |
d2b4b979 CW |
4678 | /* |
4679 | * As we will hold a reference to the logical state, it will | |
4680 | * not be torn down with the context, and importantly the | |
4681 | * object will hold onto its vma (making it possible for a | |
4682 | * stray GTT write to corrupt our defaults). Unmap the vma | |
4683 | * from the GTT to prevent such accidents and reclaim the | |
4684 | * space. | |
4685 | */ | |
4686 | err = i915_vma_unbind(state); | |
4687 | if (err) | |
4688 | goto err_active; | |
4689 | ||
4690 | err = i915_gem_object_set_to_cpu_domain(state->obj, false); | |
4691 | if (err) | |
4692 | goto err_active; | |
4693 | ||
4694 | engine->default_state = i915_gem_object_get(state->obj); | |
37d7c9cc CW |
4695 | |
4696 | /* Check we can acquire the image of the context state */ | |
4697 | vaddr = i915_gem_object_pin_map(engine->default_state, | |
666424ab | 4698 | I915_MAP_FORCE_WB); |
37d7c9cc CW |
4699 | if (IS_ERR(vaddr)) { |
4700 | err = PTR_ERR(vaddr); | |
4701 | goto err_active; | |
4702 | } | |
4703 | ||
4704 | i915_gem_object_unpin_map(engine->default_state); | |
d2b4b979 CW |
4705 | } |
4706 | ||
4707 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) { | |
4708 | unsigned int found = intel_engines_has_context_isolation(i915); | |
4709 | ||
4710 | /* | |
4711 | * Make sure that classes with multiple engine instances all | |
4712 | * share the same basic configuration. | |
4713 | */ | |
4714 | for_each_engine(engine, i915, id) { | |
4715 | unsigned int bit = BIT(engine->uabi_class); | |
4716 | unsigned int expected = engine->default_state ? bit : 0; | |
4717 | ||
4718 | if ((found & bit) != expected) { | |
4719 | DRM_ERROR("mismatching default context state for class %d on engine %s\n", | |
4720 | engine->uabi_class, engine->name); | |
4721 | } | |
4722 | } | |
4723 | } | |
4724 | ||
4725 | out_ctx: | |
4726 | i915_gem_context_set_closed(ctx); | |
4727 | i915_gem_context_put(ctx); | |
4728 | return err; | |
4729 | ||
4730 | err_active: | |
4731 | /* | |
4732 | * If we have to abandon now, we expect the engines to be idle | |
604c37d7 CW |
4733 | * and ready to be torn-down. The quickest way we can accomplish |
4734 | * this is by declaring ourselves wedged. | |
d2b4b979 | 4735 | */ |
604c37d7 | 4736 | i915_gem_set_wedged(i915); |
d2b4b979 CW |
4737 | goto out_ctx; |
4738 | } | |
4739 | ||
51797499 CW |
4740 | static int |
4741 | i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size) | |
4742 | { | |
4743 | struct drm_i915_gem_object *obj; | |
4744 | struct i915_vma *vma; | |
4745 | int ret; | |
4746 | ||
4747 | obj = i915_gem_object_create_stolen(i915, size); | |
4748 | if (!obj) | |
4749 | obj = i915_gem_object_create_internal(i915, size); | |
4750 | if (IS_ERR(obj)) { | |
4751 | DRM_ERROR("Failed to allocate scratch page\n"); | |
4752 | return PTR_ERR(obj); | |
4753 | } | |
4754 | ||
4755 | vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); | |
4756 | if (IS_ERR(vma)) { | |
4757 | ret = PTR_ERR(vma); | |
4758 | goto err_unref; | |
4759 | } | |
4760 | ||
4761 | ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); | |
4762 | if (ret) | |
4763 | goto err_unref; | |
4764 | ||
4765 | i915->gt.scratch = vma; | |
4766 | return 0; | |
4767 | ||
4768 | err_unref: | |
4769 | i915_gem_object_put(obj); | |
4770 | return ret; | |
4771 | } | |
4772 | ||
4773 | static void i915_gem_fini_scratch(struct drm_i915_private *i915) | |
4774 | { | |
4775 | i915_vma_unpin_and_release(&i915->gt.scratch, 0); | |
4776 | } | |
4777 | ||
bf9e8429 | 4778 | int i915_gem_init(struct drm_i915_private *dev_priv) |
1070a42b | 4779 | { |
1070a42b CW |
4780 | int ret; |
4781 | ||
52b2416c CD |
4782 | /* We need to fallback to 4K pages if host doesn't support huge gtt. */ |
4783 | if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv)) | |
da9fe3f3 MA |
4784 | mkwrite_device_info(dev_priv)->page_sizes = |
4785 | I915_GTT_PAGE_SIZE_4K; | |
4786 | ||
94312828 | 4787 | dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1); |
57822dc6 | 4788 | |
fb5c551a | 4789 | if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { |
821ed7df | 4790 | dev_priv->gt.resume = intel_lr_context_resume; |
117897f4 | 4791 | dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup; |
fb5c551a CW |
4792 | } else { |
4793 | dev_priv->gt.resume = intel_legacy_submission_resume; | |
4794 | dev_priv->gt.cleanup_engine = intel_engine_cleanup; | |
a83014d3 OM |
4795 | } |
4796 | ||
1e345568 CW |
4797 | i915_timelines_init(dev_priv); |
4798 | ||
ee48700d CW |
4799 | ret = i915_gem_init_userptr(dev_priv); |
4800 | if (ret) | |
4801 | return ret; | |
4802 | ||
f7dc0157 | 4803 | ret = intel_uc_init_misc(dev_priv); |
6b0478fb JL |
4804 | if (ret) |
4805 | return ret; | |
4806 | ||
f7dc0157 | 4807 | ret = intel_wopcm_init(&dev_priv->wopcm); |
3176ff49 | 4808 | if (ret) |
f7dc0157 | 4809 | goto err_uc_misc; |
3176ff49 | 4810 | |
5e4f5189 CW |
4811 | /* This is just a security blanket to placate dragons. |
4812 | * On some systems, we very sporadically observe that the first TLBs | |
4813 | * used by the CS may be stale, despite us poking the TLB reset. If | |
4814 | * we hold the forcewake during initialisation these problems | |
4815 | * just magically go away. | |
4816 | */ | |
ee48700d | 4817 | mutex_lock(&dev_priv->drm.struct_mutex); |
3ceea6a1 | 4818 | intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); |
5e4f5189 | 4819 | |
f6b9d5ca | 4820 | ret = i915_gem_init_ggtt(dev_priv); |
6ca9a2be CW |
4821 | if (ret) { |
4822 | GEM_BUG_ON(ret == -EIO); | |
4823 | goto err_unlock; | |
4824 | } | |
d62b4892 | 4825 | |
51797499 | 4826 | ret = i915_gem_init_scratch(dev_priv, |
cf819eff | 4827 | IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE); |
6ca9a2be CW |
4828 | if (ret) { |
4829 | GEM_BUG_ON(ret == -EIO); | |
4830 | goto err_ggtt; | |
4831 | } | |
2fa48d8d | 4832 | |
51797499 CW |
4833 | ret = i915_gem_contexts_init(dev_priv); |
4834 | if (ret) { | |
4835 | GEM_BUG_ON(ret == -EIO); | |
4836 | goto err_scratch; | |
4837 | } | |
4838 | ||
bf9e8429 | 4839 | ret = intel_engines_init(dev_priv); |
6ca9a2be CW |
4840 | if (ret) { |
4841 | GEM_BUG_ON(ret == -EIO); | |
4842 | goto err_context; | |
4843 | } | |
2fa48d8d | 4844 | |
f58d13d5 CW |
4845 | intel_init_gt_powersave(dev_priv); |
4846 | ||
61b5c158 | 4847 | ret = intel_uc_init(dev_priv); |
cc6a818a | 4848 | if (ret) |
6ca9a2be | 4849 | goto err_pm; |
cc6a818a | 4850 | |
61b5c158 MW |
4851 | ret = i915_gem_init_hw(dev_priv); |
4852 | if (ret) | |
4853 | goto err_uc_init; | |
4854 | ||
cc6a818a CW |
4855 | /* |
4856 | * Despite its name intel_init_clock_gating applies both display | |
4857 | * clock gating workarounds; GT mmio workarounds and the occasional | |
4858 | * GT power context workaround. Worse, sometimes it includes a context | |
4859 | * register workaround which we need to apply before we record the | |
4860 | * default HW state for all contexts. | |
4861 | * | |
4862 | * FIXME: break up the workarounds and apply them at the right time! | |
4863 | */ | |
4864 | intel_init_clock_gating(dev_priv); | |
4865 | ||
d2b4b979 | 4866 | ret = __intel_engines_record_defaults(dev_priv); |
6ca9a2be CW |
4867 | if (ret) |
4868 | goto err_init_hw; | |
4869 | ||
4870 | if (i915_inject_load_failure()) { | |
4871 | ret = -ENODEV; | |
4872 | goto err_init_hw; | |
4873 | } | |
4874 | ||
4875 | if (i915_inject_load_failure()) { | |
4876 | ret = -EIO; | |
4877 | goto err_init_hw; | |
4878 | } | |
4879 | ||
3ceea6a1 | 4880 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); |
6ca9a2be CW |
4881 | mutex_unlock(&dev_priv->drm.struct_mutex); |
4882 | ||
4883 | return 0; | |
4884 | ||
4885 | /* | |
4886 | * Unwinding is complicated by that we want to handle -EIO to mean | |
4887 | * disable GPU submission but keep KMS alive. We want to mark the | |
4888 | * HW as irrevisibly wedged, but keep enough state around that the | |
4889 | * driver doesn't explode during runtime. | |
4890 | */ | |
4891 | err_init_hw: | |
8571a05a CW |
4892 | mutex_unlock(&dev_priv->drm.struct_mutex); |
4893 | ||
5861b013 | 4894 | i915_gem_suspend(dev_priv); |
8571a05a CW |
4895 | i915_gem_suspend_late(dev_priv); |
4896 | ||
8bcf9f70 CW |
4897 | i915_gem_drain_workqueue(dev_priv); |
4898 | ||
8571a05a | 4899 | mutex_lock(&dev_priv->drm.struct_mutex); |
6ca9a2be | 4900 | intel_uc_fini_hw(dev_priv); |
61b5c158 MW |
4901 | err_uc_init: |
4902 | intel_uc_fini(dev_priv); | |
6ca9a2be CW |
4903 | err_pm: |
4904 | if (ret != -EIO) { | |
4905 | intel_cleanup_gt_powersave(dev_priv); | |
4906 | i915_gem_cleanup_engines(dev_priv); | |
4907 | } | |
4908 | err_context: | |
4909 | if (ret != -EIO) | |
4910 | i915_gem_contexts_fini(dev_priv); | |
51797499 CW |
4911 | err_scratch: |
4912 | i915_gem_fini_scratch(dev_priv); | |
6ca9a2be CW |
4913 | err_ggtt: |
4914 | err_unlock: | |
3ceea6a1 | 4915 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); |
6ca9a2be CW |
4916 | mutex_unlock(&dev_priv->drm.struct_mutex); |
4917 | ||
f7dc0157 | 4918 | err_uc_misc: |
70deeadd | 4919 | intel_uc_fini_misc(dev_priv); |
da943b5a | 4920 | |
1e345568 | 4921 | if (ret != -EIO) { |
6ca9a2be | 4922 | i915_gem_cleanup_userptr(dev_priv); |
1e345568 CW |
4923 | i915_timelines_fini(dev_priv); |
4924 | } | |
6ca9a2be | 4925 | |
60990320 | 4926 | if (ret == -EIO) { |
7ed43df7 CW |
4927 | mutex_lock(&dev_priv->drm.struct_mutex); |
4928 | ||
6ca9a2be CW |
4929 | /* |
4930 | * Allow engine initialisation to fail by marking the GPU as | |
60990320 CW |
4931 | * wedged. But we only want to do this where the GPU is angry, |
4932 | * for all other failure, such as an allocation failure, bail. | |
4933 | */ | |
c41166f9 | 4934 | if (!i915_reset_failed(dev_priv)) { |
51c18bf7 CW |
4935 | i915_load_error(dev_priv, |
4936 | "Failed to initialize GPU, declaring it wedged!\n"); | |
6f74b36b CW |
4937 | i915_gem_set_wedged(dev_priv); |
4938 | } | |
7ed43df7 CW |
4939 | |
4940 | /* Minimal basic recovery for KMS */ | |
4941 | ret = i915_ggtt_enable_hw(dev_priv); | |
4942 | i915_gem_restore_gtt_mappings(dev_priv); | |
4943 | i915_gem_restore_fences(dev_priv); | |
4944 | intel_init_clock_gating(dev_priv); | |
4945 | ||
4946 | mutex_unlock(&dev_priv->drm.struct_mutex); | |
1070a42b CW |
4947 | } |
4948 | ||
6ca9a2be | 4949 | i915_gem_drain_freed_objects(dev_priv); |
60990320 | 4950 | return ret; |
1070a42b CW |
4951 | } |
4952 | ||
8979187a MW |
4953 | void i915_gem_fini(struct drm_i915_private *dev_priv) |
4954 | { | |
4955 | i915_gem_suspend_late(dev_priv); | |
30b71084 | 4956 | intel_disable_gt_powersave(dev_priv); |
8979187a MW |
4957 | |
4958 | /* Flush any outstanding unpin_work. */ | |
4959 | i915_gem_drain_workqueue(dev_priv); | |
4960 | ||
4961 | mutex_lock(&dev_priv->drm.struct_mutex); | |
4962 | intel_uc_fini_hw(dev_priv); | |
4963 | intel_uc_fini(dev_priv); | |
4964 | i915_gem_cleanup_engines(dev_priv); | |
4965 | i915_gem_contexts_fini(dev_priv); | |
51797499 | 4966 | i915_gem_fini_scratch(dev_priv); |
8979187a MW |
4967 | mutex_unlock(&dev_priv->drm.struct_mutex); |
4968 | ||
25d140fa TU |
4969 | intel_wa_list_free(&dev_priv->gt_wa_list); |
4970 | ||
30b71084 CW |
4971 | intel_cleanup_gt_powersave(dev_priv); |
4972 | ||
8979187a MW |
4973 | intel_uc_fini_misc(dev_priv); |
4974 | i915_gem_cleanup_userptr(dev_priv); | |
1e345568 | 4975 | i915_timelines_fini(dev_priv); |
8979187a MW |
4976 | |
4977 | i915_gem_drain_freed_objects(dev_priv); | |
4978 | ||
4979 | WARN_ON(!list_empty(&dev_priv->contexts.list)); | |
4980 | } | |
4981 | ||
24145517 CW |
4982 | void i915_gem_init_mmio(struct drm_i915_private *i915) |
4983 | { | |
4984 | i915_gem_sanitize(i915); | |
4985 | } | |
4986 | ||
8187a2b7 | 4987 | void |
cb15d9f8 | 4988 | i915_gem_cleanup_engines(struct drm_i915_private *dev_priv) |
8187a2b7 | 4989 | { |
e2f80391 | 4990 | struct intel_engine_cs *engine; |
3b3f1650 | 4991 | enum intel_engine_id id; |
8187a2b7 | 4992 | |
3b3f1650 | 4993 | for_each_engine(engine, dev_priv, id) |
117897f4 | 4994 | dev_priv->gt.cleanup_engine(engine); |
8187a2b7 ZN |
4995 | } |
4996 | ||
40ae4e16 ID |
4997 | void |
4998 | i915_gem_load_init_fences(struct drm_i915_private *dev_priv) | |
4999 | { | |
49ef5294 | 5000 | int i; |
40ae4e16 | 5001 | |
c56b89f1 | 5002 | if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) && |
40ae4e16 ID |
5003 | !IS_CHERRYVIEW(dev_priv)) |
5004 | dev_priv->num_fence_regs = 32; | |
c56b89f1 | 5005 | else if (INTEL_GEN(dev_priv) >= 4 || |
73f67aa8 JN |
5006 | IS_I945G(dev_priv) || IS_I945GM(dev_priv) || |
5007 | IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) | |
40ae4e16 ID |
5008 | dev_priv->num_fence_regs = 16; |
5009 | else | |
5010 | dev_priv->num_fence_regs = 8; | |
5011 | ||
c033666a | 5012 | if (intel_vgpu_active(dev_priv)) |
40ae4e16 ID |
5013 | dev_priv->num_fence_regs = |
5014 | I915_READ(vgtif_reg(avail_rs.fence_num)); | |
5015 | ||
5016 | /* Initialize fence registers to zero */ | |
49ef5294 CW |
5017 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
5018 | struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i]; | |
5019 | ||
5020 | fence->i915 = dev_priv; | |
5021 | fence->id = i; | |
5022 | list_add_tail(&fence->link, &dev_priv->mm.fence_list); | |
5023 | } | |
4362f4f6 | 5024 | i915_gem_restore_fences(dev_priv); |
40ae4e16 | 5025 | |
4362f4f6 | 5026 | i915_gem_detect_bit_6_swizzle(dev_priv); |
40ae4e16 ID |
5027 | } |
5028 | ||
9c52d1c8 CW |
5029 | static void i915_gem_init__mm(struct drm_i915_private *i915) |
5030 | { | |
5031 | spin_lock_init(&i915->mm.object_stat_lock); | |
5032 | spin_lock_init(&i915->mm.obj_lock); | |
5033 | spin_lock_init(&i915->mm.free_lock); | |
5034 | ||
5035 | init_llist_head(&i915->mm.free_list); | |
5036 | ||
5037 | INIT_LIST_HEAD(&i915->mm.unbound_list); | |
5038 | INIT_LIST_HEAD(&i915->mm.bound_list); | |
5039 | INIT_LIST_HEAD(&i915->mm.fence_list); | |
5040 | INIT_LIST_HEAD(&i915->mm.userfault_list); | |
5041 | ||
5042 | INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); | |
5043 | } | |
5044 | ||
a0de908d | 5045 | int i915_gem_init_early(struct drm_i915_private *dev_priv) |
673a394b | 5046 | { |
13f1bfd3 | 5047 | int err; |
d1b48c1e | 5048 | |
643b450a | 5049 | INIT_LIST_HEAD(&dev_priv->gt.active_rings); |
3365e226 | 5050 | INIT_LIST_HEAD(&dev_priv->gt.closed_vma); |
643b450a | 5051 | |
9c52d1c8 | 5052 | i915_gem_init__mm(dev_priv); |
f2123818 | 5053 | |
67d97da3 | 5054 | INIT_DELAYED_WORK(&dev_priv->gt.retire_work, |
673a394b | 5055 | i915_gem_retire_work_handler); |
67d97da3 | 5056 | INIT_DELAYED_WORK(&dev_priv->gt.idle_work, |
b29c19b6 | 5057 | i915_gem_idle_work_handler); |
1f15b76f | 5058 | init_waitqueue_head(&dev_priv->gpu_error.wait_queue); |
1f83fee0 | 5059 | init_waitqueue_head(&dev_priv->gpu_error.reset_queue); |
18bb2bcc | 5060 | mutex_init(&dev_priv->gpu_error.wedge_mutex); |
2caffbf1 | 5061 | init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu); |
31169714 | 5062 | |
6f633402 JL |
5063 | atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0); |
5064 | ||
b5add959 | 5065 | spin_lock_init(&dev_priv->fb_tracking.lock); |
73cb9701 | 5066 | |
465c403c MA |
5067 | err = i915_gemfs_init(dev_priv); |
5068 | if (err) | |
5069 | DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err); | |
5070 | ||
73cb9701 | 5071 | return 0; |
673a394b | 5072 | } |
71acb5eb | 5073 | |
a0de908d | 5074 | void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) |
d64aa096 | 5075 | { |
c4d4c1c6 | 5076 | i915_gem_drain_freed_objects(dev_priv); |
c9c70471 CW |
5077 | GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list)); |
5078 | GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count)); | |
c4d4c1c6 | 5079 | WARN_ON(dev_priv->mm.object_count); |
ea84aa77 | 5080 | |
2caffbf1 CW |
5081 | cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu); |
5082 | ||
465c403c | 5083 | i915_gemfs_fini(dev_priv); |
d64aa096 ID |
5084 | } |
5085 | ||
6a800eab CW |
5086 | int i915_gem_freeze(struct drm_i915_private *dev_priv) |
5087 | { | |
d0aa301a CW |
5088 | /* Discard all purgeable objects, let userspace recover those as |
5089 | * required after resuming. | |
5090 | */ | |
6a800eab | 5091 | i915_gem_shrink_all(dev_priv); |
6a800eab | 5092 | |
6a800eab CW |
5093 | return 0; |
5094 | } | |
5095 | ||
95c778da | 5096 | int i915_gem_freeze_late(struct drm_i915_private *i915) |
461fb99c CW |
5097 | { |
5098 | struct drm_i915_gem_object *obj; | |
7aab2d53 | 5099 | struct list_head *phases[] = { |
95c778da CW |
5100 | &i915->mm.unbound_list, |
5101 | &i915->mm.bound_list, | |
7aab2d53 | 5102 | NULL |
95c778da | 5103 | }, **phase; |
461fb99c | 5104 | |
95c778da CW |
5105 | /* |
5106 | * Called just before we write the hibernation image. | |
461fb99c CW |
5107 | * |
5108 | * We need to update the domain tracking to reflect that the CPU | |
5109 | * will be accessing all the pages to create and restore from the | |
5110 | * hibernation, and so upon restoration those pages will be in the | |
5111 | * CPU domain. | |
5112 | * | |
5113 | * To make sure the hibernation image contains the latest state, | |
5114 | * we update that state just before writing out the image. | |
7aab2d53 CW |
5115 | * |
5116 | * To try and reduce the hibernation image, we manually shrink | |
d0aa301a | 5117 | * the objects as well, see i915_gem_freeze() |
461fb99c CW |
5118 | */ |
5119 | ||
95c778da CW |
5120 | i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND); |
5121 | i915_gem_drain_freed_objects(i915); | |
461fb99c | 5122 | |
95c778da CW |
5123 | mutex_lock(&i915->drm.struct_mutex); |
5124 | for (phase = phases; *phase; phase++) { | |
5125 | list_for_each_entry(obj, *phase, mm.link) | |
5126 | WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true)); | |
461fb99c | 5127 | } |
95c778da | 5128 | mutex_unlock(&i915->drm.struct_mutex); |
461fb99c CW |
5129 | |
5130 | return 0; | |
5131 | } | |
5132 | ||
f787a5f5 | 5133 | void i915_gem_release(struct drm_device *dev, struct drm_file *file) |
b962442e | 5134 | { |
f787a5f5 | 5135 | struct drm_i915_file_private *file_priv = file->driver_priv; |
e61e0f51 | 5136 | struct i915_request *request; |
b962442e EA |
5137 | |
5138 | /* Clean up our request list when the client is going away, so that | |
5139 | * later retire_requests won't dereference our soon-to-be-gone | |
5140 | * file_priv. | |
5141 | */ | |
1c25595f | 5142 | spin_lock(&file_priv->mm.lock); |
c8659efa | 5143 | list_for_each_entry(request, &file_priv->mm.request_list, client_link) |
f787a5f5 | 5144 | request->file_priv = NULL; |
1c25595f | 5145 | spin_unlock(&file_priv->mm.lock); |
b29c19b6 CW |
5146 | } |
5147 | ||
829a0af2 | 5148 | int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) |
b29c19b6 CW |
5149 | { |
5150 | struct drm_i915_file_private *file_priv; | |
e422b888 | 5151 | int ret; |
b29c19b6 | 5152 | |
c4c29d7b | 5153 | DRM_DEBUG("\n"); |
b29c19b6 CW |
5154 | |
5155 | file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); | |
5156 | if (!file_priv) | |
5157 | return -ENOMEM; | |
5158 | ||
5159 | file->driver_priv = file_priv; | |
829a0af2 | 5160 | file_priv->dev_priv = i915; |
ab0e7ff9 | 5161 | file_priv->file = file; |
b29c19b6 CW |
5162 | |
5163 | spin_lock_init(&file_priv->mm.lock); | |
5164 | INIT_LIST_HEAD(&file_priv->mm.request_list); | |
b29c19b6 | 5165 | |
c80ff16e | 5166 | file_priv->bsd_engine = -1; |
14921f3c | 5167 | file_priv->hang_timestamp = jiffies; |
de1add36 | 5168 | |
829a0af2 | 5169 | ret = i915_gem_context_open(i915, file); |
e422b888 BW |
5170 | if (ret) |
5171 | kfree(file_priv); | |
b29c19b6 | 5172 | |
e422b888 | 5173 | return ret; |
b29c19b6 CW |
5174 | } |
5175 | ||
b680c37a DV |
5176 | /** |
5177 | * i915_gem_track_fb - update frontbuffer tracking | |
d9072a3e GT |
5178 | * @old: current GEM buffer for the frontbuffer slots |
5179 | * @new: new GEM buffer for the frontbuffer slots | |
5180 | * @frontbuffer_bits: bitmask of frontbuffer slots | |
b680c37a DV |
5181 | * |
5182 | * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them | |
5183 | * from @old and setting them in @new. Both @old and @new can be NULL. | |
5184 | */ | |
a071fa00 DV |
5185 | void i915_gem_track_fb(struct drm_i915_gem_object *old, |
5186 | struct drm_i915_gem_object *new, | |
5187 | unsigned frontbuffer_bits) | |
5188 | { | |
faf5bf0a CW |
5189 | /* Control of individual bits within the mask are guarded by |
5190 | * the owning plane->mutex, i.e. we can never see concurrent | |
5191 | * manipulation of individual bits. But since the bitfield as a whole | |
5192 | * is updated using RMW, we need to use atomics in order to update | |
5193 | * the bits. | |
5194 | */ | |
5195 | BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > | |
74f6e183 | 5196 | BITS_PER_TYPE(atomic_t)); |
faf5bf0a | 5197 | |
a071fa00 | 5198 | if (old) { |
faf5bf0a CW |
5199 | WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits)); |
5200 | atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits); | |
a071fa00 DV |
5201 | } |
5202 | ||
5203 | if (new) { | |
faf5bf0a CW |
5204 | WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits); |
5205 | atomic_or(frontbuffer_bits, &new->frontbuffer_bits); | |
a071fa00 DV |
5206 | } |
5207 | } | |
5208 | ||
ea70299d DG |
5209 | /* Allocate a new GEM object and fill it with the supplied data */ |
5210 | struct drm_i915_gem_object * | |
12d79d78 | 5211 | i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, |
ea70299d DG |
5212 | const void *data, size_t size) |
5213 | { | |
5214 | struct drm_i915_gem_object *obj; | |
be062fa4 CW |
5215 | struct file *file; |
5216 | size_t offset; | |
5217 | int err; | |
ea70299d | 5218 | |
12d79d78 | 5219 | obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE)); |
fe3db79b | 5220 | if (IS_ERR(obj)) |
ea70299d DG |
5221 | return obj; |
5222 | ||
c0a51fd0 | 5223 | GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); |
ea70299d | 5224 | |
be062fa4 CW |
5225 | file = obj->base.filp; |
5226 | offset = 0; | |
5227 | do { | |
5228 | unsigned int len = min_t(typeof(size), size, PAGE_SIZE); | |
5229 | struct page *page; | |
5230 | void *pgdata, *vaddr; | |
ea70299d | 5231 | |
be062fa4 CW |
5232 | err = pagecache_write_begin(file, file->f_mapping, |
5233 | offset, len, 0, | |
5234 | &page, &pgdata); | |
5235 | if (err < 0) | |
5236 | goto fail; | |
ea70299d | 5237 | |
be062fa4 CW |
5238 | vaddr = kmap(page); |
5239 | memcpy(vaddr, data, len); | |
5240 | kunmap(page); | |
5241 | ||
5242 | err = pagecache_write_end(file, file->f_mapping, | |
5243 | offset, len, len, | |
5244 | page, pgdata); | |
5245 | if (err < 0) | |
5246 | goto fail; | |
5247 | ||
5248 | size -= len; | |
5249 | data += len; | |
5250 | offset += len; | |
5251 | } while (size); | |
ea70299d DG |
5252 | |
5253 | return obj; | |
5254 | ||
5255 | fail: | |
f8c417cd | 5256 | i915_gem_object_put(obj); |
be062fa4 | 5257 | return ERR_PTR(err); |
ea70299d | 5258 | } |
96d77634 CW |
5259 | |
5260 | struct scatterlist * | |
5261 | i915_gem_object_get_sg(struct drm_i915_gem_object *obj, | |
5262 | unsigned int n, | |
5263 | unsigned int *offset) | |
5264 | { | |
a4f5ea64 | 5265 | struct i915_gem_object_page_iter *iter = &obj->mm.get_page; |
96d77634 CW |
5266 | struct scatterlist *sg; |
5267 | unsigned int idx, count; | |
5268 | ||
5269 | might_sleep(); | |
5270 | GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); | |
a4f5ea64 | 5271 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); |
96d77634 CW |
5272 | |
5273 | /* As we iterate forward through the sg, we record each entry in a | |
5274 | * radixtree for quick repeated (backwards) lookups. If we have seen | |
5275 | * this index previously, we will have an entry for it. | |
5276 | * | |
5277 | * Initial lookup is O(N), but this is amortized to O(1) for | |
5278 | * sequential page access (where each new request is consecutive | |
5279 | * to the previous one). Repeated lookups are O(lg(obj->base.size)), | |
5280 | * i.e. O(1) with a large constant! | |
5281 | */ | |
5282 | if (n < READ_ONCE(iter->sg_idx)) | |
5283 | goto lookup; | |
5284 | ||
5285 | mutex_lock(&iter->lock); | |
5286 | ||
5287 | /* We prefer to reuse the last sg so that repeated lookup of this | |
5288 | * (or the subsequent) sg are fast - comparing against the last | |
5289 | * sg is faster than going through the radixtree. | |
5290 | */ | |
5291 | ||
5292 | sg = iter->sg_pos; | |
5293 | idx = iter->sg_idx; | |
5294 | count = __sg_page_count(sg); | |
5295 | ||
5296 | while (idx + count <= n) { | |
3159f943 MW |
5297 | void *entry; |
5298 | unsigned long i; | |
96d77634 CW |
5299 | int ret; |
5300 | ||
5301 | /* If we cannot allocate and insert this entry, or the | |
5302 | * individual pages from this range, cancel updating the | |
5303 | * sg_idx so that on this lookup we are forced to linearly | |
5304 | * scan onwards, but on future lookups we will try the | |
5305 | * insertion again (in which case we need to be careful of | |
5306 | * the error return reporting that we have already inserted | |
5307 | * this index). | |
5308 | */ | |
5309 | ret = radix_tree_insert(&iter->radix, idx, sg); | |
5310 | if (ret && ret != -EEXIST) | |
5311 | goto scan; | |
5312 | ||
3159f943 | 5313 | entry = xa_mk_value(idx); |
96d77634 | 5314 | for (i = 1; i < count; i++) { |
3159f943 | 5315 | ret = radix_tree_insert(&iter->radix, idx + i, entry); |
96d77634 CW |
5316 | if (ret && ret != -EEXIST) |
5317 | goto scan; | |
5318 | } | |
5319 | ||
5320 | idx += count; | |
5321 | sg = ____sg_next(sg); | |
5322 | count = __sg_page_count(sg); | |
5323 | } | |
5324 | ||
5325 | scan: | |
5326 | iter->sg_pos = sg; | |
5327 | iter->sg_idx = idx; | |
5328 | ||
5329 | mutex_unlock(&iter->lock); | |
5330 | ||
5331 | if (unlikely(n < idx)) /* insertion completed by another thread */ | |
5332 | goto lookup; | |
5333 | ||
5334 | /* In case we failed to insert the entry into the radixtree, we need | |
5335 | * to look beyond the current sg. | |
5336 | */ | |
5337 | while (idx + count <= n) { | |
5338 | idx += count; | |
5339 | sg = ____sg_next(sg); | |
5340 | count = __sg_page_count(sg); | |
5341 | } | |
5342 | ||
5343 | *offset = n - idx; | |
5344 | return sg; | |
5345 | ||
5346 | lookup: | |
5347 | rcu_read_lock(); | |
5348 | ||
5349 | sg = radix_tree_lookup(&iter->radix, n); | |
5350 | GEM_BUG_ON(!sg); | |
5351 | ||
5352 | /* If this index is in the middle of multi-page sg entry, | |
3159f943 | 5353 | * the radix tree will contain a value entry that points |
96d77634 CW |
5354 | * to the start of that range. We will return the pointer to |
5355 | * the base page and the offset of this page within the | |
5356 | * sg entry's range. | |
5357 | */ | |
5358 | *offset = 0; | |
3159f943 MW |
5359 | if (unlikely(xa_is_value(sg))) { |
5360 | unsigned long base = xa_to_value(sg); | |
96d77634 CW |
5361 | |
5362 | sg = radix_tree_lookup(&iter->radix, base); | |
5363 | GEM_BUG_ON(!sg); | |
5364 | ||
5365 | *offset = n - base; | |
5366 | } | |
5367 | ||
5368 | rcu_read_unlock(); | |
5369 | ||
5370 | return sg; | |
5371 | } | |
5372 | ||
5373 | struct page * | |
5374 | i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n) | |
5375 | { | |
5376 | struct scatterlist *sg; | |
5377 | unsigned int offset; | |
5378 | ||
5379 | GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); | |
5380 | ||
5381 | sg = i915_gem_object_get_sg(obj, n, &offset); | |
5382 | return nth_page(sg_page(sg), offset); | |
5383 | } | |
5384 | ||
5385 | /* Like i915_gem_object_get_page(), but mark the returned page dirty */ | |
5386 | struct page * | |
5387 | i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, | |
5388 | unsigned int n) | |
5389 | { | |
5390 | struct page *page; | |
5391 | ||
5392 | page = i915_gem_object_get_page(obj, n); | |
a4f5ea64 | 5393 | if (!obj->mm.dirty) |
96d77634 CW |
5394 | set_page_dirty(page); |
5395 | ||
5396 | return page; | |
5397 | } | |
5398 | ||
5399 | dma_addr_t | |
5400 | i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, | |
5401 | unsigned long n) | |
5402 | { | |
5403 | struct scatterlist *sg; | |
5404 | unsigned int offset; | |
5405 | ||
5406 | sg = i915_gem_object_get_sg(obj, n, &offset); | |
5407 | return sg_dma_address(sg) + (offset << PAGE_SHIFT); | |
5408 | } | |
935a2f77 | 5409 | |
8eeb7906 CW |
5410 | int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) |
5411 | { | |
5412 | struct sg_table *pages; | |
5413 | int err; | |
5414 | ||
5415 | if (align > obj->base.size) | |
5416 | return -EINVAL; | |
5417 | ||
5418 | if (obj->ops == &i915_gem_phys_ops) | |
5419 | return 0; | |
5420 | ||
5421 | if (obj->ops != &i915_gem_object_ops) | |
5422 | return -EINVAL; | |
5423 | ||
5424 | err = i915_gem_object_unbind(obj); | |
5425 | if (err) | |
5426 | return err; | |
5427 | ||
5428 | mutex_lock(&obj->mm.lock); | |
5429 | ||
5430 | if (obj->mm.madv != I915_MADV_WILLNEED) { | |
5431 | err = -EFAULT; | |
5432 | goto err_unlock; | |
5433 | } | |
5434 | ||
5435 | if (obj->mm.quirked) { | |
5436 | err = -EFAULT; | |
5437 | goto err_unlock; | |
5438 | } | |
5439 | ||
5440 | if (obj->mm.mapping) { | |
5441 | err = -EBUSY; | |
5442 | goto err_unlock; | |
5443 | } | |
5444 | ||
acd1c1e6 | 5445 | pages = __i915_gem_object_unset_pages(obj); |
f2123818 | 5446 | |
8eeb7906 CW |
5447 | obj->ops = &i915_gem_phys_ops; |
5448 | ||
8fb6a5df | 5449 | err = ____i915_gem_object_get_pages(obj); |
8eeb7906 CW |
5450 | if (err) |
5451 | goto err_xfer; | |
5452 | ||
5453 | /* Perma-pin (until release) the physical set of pages */ | |
5454 | __i915_gem_object_pin_pages(obj); | |
5455 | ||
5456 | if (!IS_ERR_OR_NULL(pages)) | |
5457 | i915_gem_object_ops.put_pages(obj, pages); | |
5458 | mutex_unlock(&obj->mm.lock); | |
5459 | return 0; | |
5460 | ||
5461 | err_xfer: | |
5462 | obj->ops = &i915_gem_object_ops; | |
acd1c1e6 CW |
5463 | if (!IS_ERR_OR_NULL(pages)) { |
5464 | unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl); | |
5465 | ||
5466 | __i915_gem_object_set_pages(obj, pages, sg_page_sizes); | |
5467 | } | |
8eeb7906 CW |
5468 | err_unlock: |
5469 | mutex_unlock(&obj->mm.lock); | |
5470 | return err; | |
5471 | } | |
5472 | ||
935a2f77 CW |
5473 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
5474 | #include "selftests/scatterlist.c" | |
66d9cb5d | 5475 | #include "selftests/mock_gem_device.c" |
44653988 | 5476 | #include "selftests/huge_gem_object.c" |
4049866f | 5477 | #include "selftests/huge_pages.c" |
8335fd65 | 5478 | #include "selftests/i915_gem_object.c" |
17059450 | 5479 | #include "selftests/i915_gem_coherency.c" |
3f51b7e1 | 5480 | #include "selftests/i915_gem.c" |
935a2f77 | 5481 | #endif |