Commit | Line | Data |
---|---|---|
b8f55be6 CW |
1 | /* |
2 | * Copyright © 2017 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
e9b67ec2 | 25 | #include <linux/highmem.h> |
f86dbacb DV |
26 | #include <linux/sched/mm.h> |
27 | ||
5f2ec909 JN |
28 | #include <drm/drm_cache.h> |
29 | ||
df0566a6 | 30 | #include "display/intel_frontbuffer.h" |
d3ac8d42 | 31 | #include "pxp/intel_pxp.h" |
c8eb426d | 32 | |
b8f55be6 | 33 | #include "i915_drv.h" |
5472b3f2 | 34 | #include "i915_file_private.h" |
b414fcd5 | 35 | #include "i915_gem_clflush.h" |
10be98a7 | 36 | #include "i915_gem_context.h" |
c8eb426d | 37 | #include "i915_gem_dmabuf.h" |
cc662126 | 38 | #include "i915_gem_mman.h" |
10be98a7 | 39 | #include "i915_gem_object.h" |
f6c466b8 | 40 | #include "i915_gem_ttm.h" |
5fbc2c2b | 41 | #include "i915_memcpy.h" |
a09d9a80 | 42 | #include "i915_trace.h" |
b8f55be6 | 43 | |
c8ad09af | 44 | static struct kmem_cache *slab_objects; |
13f1bfd3 | 45 | |
10012620 TZ |
46 | static const struct drm_gem_object_funcs i915_gem_object_funcs; |
47 | ||
5e352e32 FY |
48 | unsigned int i915_gem_get_pat_index(struct drm_i915_private *i915, |
49 | enum i915_cache_level level) | |
50 | { | |
51 | if (drm_WARN_ON(&i915->drm, level >= I915_MAX_CACHE_LEVEL)) | |
52 | return 0; | |
53 | ||
54 | return INTEL_INFO(i915)->cachelevel_to_pat[level]; | |
55 | } | |
56 | ||
9275277d FY |
57 | bool i915_gem_object_has_cache_level(const struct drm_i915_gem_object *obj, |
58 | enum i915_cache_level lvl) | |
59 | { | |
60 | /* | |
61 | * In case the pat_index is set by user space, this kernel mode | |
62 | * driver should leave the coherency to be managed by user space, | |
63 | * simply return true here. | |
64 | */ | |
65 | if (obj->pat_set_by_user) | |
66 | return true; | |
67 | ||
68 | /* | |
69 | * Otherwise the pat_index should have been converted from cache_level | |
70 | * so that the following comparison is valid. | |
71 | */ | |
72 | return obj->pat_index == i915_gem_get_pat_index(obj_to_i915(obj), lvl); | |
73 | } | |
74 | ||
13f1bfd3 CW |
75 | struct drm_i915_gem_object *i915_gem_object_alloc(void) |
76 | { | |
10012620 TZ |
77 | struct drm_i915_gem_object *obj; |
78 | ||
c8ad09af | 79 | obj = kmem_cache_zalloc(slab_objects, GFP_KERNEL); |
10012620 TZ |
80 | if (!obj) |
81 | return NULL; | |
82 | obj->base.funcs = &i915_gem_object_funcs; | |
83 | ||
84 | return obj; | |
13f1bfd3 CW |
85 | } |
86 | ||
87 | void i915_gem_object_free(struct drm_i915_gem_object *obj) | |
88 | { | |
c8ad09af | 89 | return kmem_cache_free(slab_objects, obj); |
13f1bfd3 CW |
90 | } |
91 | ||
8475355f | 92 | void i915_gem_object_init(struct drm_i915_gem_object *obj, |
7867d709 | 93 | const struct drm_i915_gem_object_ops *ops, |
c471748d | 94 | struct lock_class_key *key, unsigned flags) |
8475355f | 95 | { |
f4db23f2 TH |
96 | /* |
97 | * A gem object is embedded both in a struct ttm_buffer_object :/ and | |
98 | * in a drm_i915_gem_object. Make sure they are aliased. | |
99 | */ | |
100 | BUILD_BUG_ON(offsetof(typeof(*obj), base) != | |
101 | offsetof(typeof(*obj), __do_not_access.base)); | |
102 | ||
8475355f CW |
103 | spin_lock_init(&obj->vma.lock); |
104 | INIT_LIST_HEAD(&obj->vma.list); | |
105 | ||
1aff1903 CW |
106 | INIT_LIST_HEAD(&obj->mm.link); |
107 | ||
8475355f | 108 | INIT_LIST_HEAD(&obj->lut_list); |
096a42dd | 109 | spin_lock_init(&obj->lut_lock); |
8475355f | 110 | |
cc662126 | 111 | spin_lock_init(&obj->mmo.lock); |
78655598 | 112 | obj->mmo.offsets = RB_ROOT; |
cc662126 | 113 | |
8475355f CW |
114 | init_rcu_head(&obj->rcu); |
115 | ||
116 | obj->ops = ops; | |
c471748d ML |
117 | GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS); |
118 | obj->flags = flags; | |
8475355f | 119 | |
8475355f CW |
120 | obj->mm.madv = I915_MADV_WILLNEED; |
121 | INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); | |
122 | mutex_init(&obj->mm.get_page.lock); | |
934941ed TU |
123 | INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN); |
124 | mutex_init(&obj->mm.get_dma_page.lock); | |
8475355f CW |
125 | } |
126 | ||
068396bb | 127 | /** |
0af4cbfa | 128 | * __i915_gem_object_fini - Clean up a GEM object initialization |
068396bb TH |
129 | * @obj: The gem object to cleanup |
130 | * | |
131 | * This function cleans up gem object fields that are set up by | |
132 | * drm_gem_private_object_init() and i915_gem_object_init(). | |
133 | * It's primarily intended as a helper for backends that need to | |
134 | * clean up the gem object in separate steps. | |
135 | */ | |
136 | void __i915_gem_object_fini(struct drm_i915_gem_object *obj) | |
137 | { | |
138 | mutex_destroy(&obj->mm.get_page.lock); | |
139 | mutex_destroy(&obj->mm.get_dma_page.lock); | |
140 | dma_resv_fini(&obj->base._resv); | |
141 | } | |
142 | ||
b8f55be6 | 143 | /** |
0af4cbfa RD |
144 | * i915_gem_object_set_cache_coherency - Mark up the object's coherency levels |
145 | * for a given cache_level | |
b8f55be6 CW |
146 | * @obj: #drm_i915_gem_object |
147 | * @cache_level: cache level | |
148 | */ | |
149 | void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, | |
150 | unsigned int cache_level) | |
151 | { | |
068b1bd0 MA |
152 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
153 | ||
9275277d | 154 | obj->pat_index = i915_gem_get_pat_index(i915, cache_level); |
b8f55be6 CW |
155 | |
156 | if (cache_level != I915_CACHE_NONE) | |
157 | obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ | | |
158 | I915_BO_CACHE_COHERENT_FOR_WRITE); | |
068b1bd0 | 159 | else if (HAS_LLC(i915)) |
b8f55be6 CW |
160 | obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ; |
161 | else | |
162 | obj->cache_coherent = 0; | |
163 | ||
164 | obj->cache_dirty = | |
9275277d FY |
165 | !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) && |
166 | !IS_DGFX(i915); | |
167 | } | |
168 | ||
169 | /** | |
170 | * i915_gem_object_set_pat_index - set PAT index to be used in PTE encode | |
171 | * @obj: #drm_i915_gem_object | |
172 | * @pat_index: PAT index | |
173 | * | |
174 | * This is a clone of i915_gem_object_set_cache_coherency taking pat index | |
175 | * instead of cache_level as its second argument. | |
176 | */ | |
177 | void i915_gem_object_set_pat_index(struct drm_i915_gem_object *obj, | |
178 | unsigned int pat_index) | |
179 | { | |
180 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | |
181 | ||
182 | if (obj->pat_index == pat_index) | |
183 | return; | |
184 | ||
185 | obj->pat_index = pat_index; | |
186 | ||
187 | if (pat_index != i915_gem_get_pat_index(i915, I915_CACHE_NONE)) | |
188 | obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ | | |
189 | I915_BO_CACHE_COHERENT_FOR_WRITE); | |
190 | else if (HAS_LLC(i915)) | |
191 | obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ; | |
192 | else | |
193 | obj->cache_coherent = 0; | |
194 | ||
195 | obj->cache_dirty = | |
068b1bd0 MA |
196 | !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) && |
197 | !IS_DGFX(i915); | |
b8f55be6 | 198 | } |
13f1bfd3 | 199 | |
30f1dccd MA |
200 | bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj) |
201 | { | |
202 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | |
203 | ||
204 | /* | |
205 | * This is purely from a security perspective, so we simply don't care | |
206 | * about non-userspace objects being able to bypass the LLC. | |
207 | */ | |
208 | if (!(obj->flags & I915_BO_ALLOC_USER)) | |
209 | return false; | |
210 | ||
81b1b599 FY |
211 | /* |
212 | * Always flush cache for UMD objects at creation time. | |
213 | */ | |
214 | if (obj->pat_set_by_user) | |
215 | return true; | |
216 | ||
30f1dccd MA |
217 | /* |
218 | * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it | |
219 | * possible for userspace to bypass the GTT caching bits set by the | |
220 | * kernel, as per the given object cache_level. This is troublesome | |
221 | * since the heavy flush we apply when first gathering the pages is | |
222 | * skipped if the kernel thinks the object is coherent with the GPU. As | |
223 | * a result it might be possible to bypass the cache and read the | |
224 | * contents of the page directly, which could be stale data. If it's | |
225 | * just a case of userspace shooting themselves in the foot then so be | |
226 | * it, but since i915 takes the stance of always zeroing memory before | |
227 | * handing it to userspace, we need to prevent this. | |
228 | */ | |
0c65dc06 | 229 | return (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)); |
30f1dccd MA |
230 | } |
231 | ||
10012620 | 232 | static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) |
8475355f | 233 | { |
8475355f CW |
234 | struct drm_i915_gem_object *obj = to_intel_bo(gem); |
235 | struct drm_i915_file_private *fpriv = file->driver_priv; | |
096a42dd | 236 | struct i915_lut_handle bookmark = {}; |
78655598 | 237 | struct i915_mmap_offset *mmo, *mn; |
8475355f | 238 | struct i915_lut_handle *lut, *ln; |
155ab883 | 239 | LIST_HEAD(close); |
8475355f | 240 | |
096a42dd | 241 | spin_lock(&obj->lut_lock); |
8475355f CW |
242 | list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) { |
243 | struct i915_gem_context *ctx = lut->ctx; | |
8475355f | 244 | |
096a42dd CW |
245 | if (ctx && ctx->file_priv == fpriv) { |
246 | i915_gem_context_get(ctx); | |
247 | list_move(&lut->obj_link, &close); | |
248 | } | |
8475355f | 249 | |
096a42dd CW |
250 | /* Break long locks, and carefully continue on from this spot */ |
251 | if (&ln->obj_link != &obj->lut_list) { | |
252 | list_add_tail(&bookmark.obj_link, &ln->obj_link); | |
253 | if (cond_resched_lock(&obj->lut_lock)) | |
254 | list_safe_reset_next(&bookmark, ln, obj_link); | |
255 | __list_del_entry(&bookmark.obj_link); | |
256 | } | |
155ab883 | 257 | } |
096a42dd | 258 | spin_unlock(&obj->lut_lock); |
155ab883 | 259 | |
cc662126 | 260 | spin_lock(&obj->mmo.lock); |
78655598 | 261 | rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) |
cc662126 | 262 | drm_vma_node_revoke(&mmo->vma_node, file); |
cc662126 AJ |
263 | spin_unlock(&obj->mmo.lock); |
264 | ||
155ab883 CW |
265 | list_for_each_entry_safe(lut, ln, &close, obj_link) { |
266 | struct i915_gem_context *ctx = lut->ctx; | |
267 | struct i915_vma *vma; | |
8475355f | 268 | |
155ab883 CW |
269 | /* |
270 | * We allow the process to have multiple handles to the same | |
8475355f CW |
271 | * vma, in the same fd namespace, by virtue of flink/open. |
272 | */ | |
8475355f | 273 | |
f7ce8639 | 274 | mutex_lock(&ctx->lut_mutex); |
155ab883 CW |
275 | vma = radix_tree_delete(&ctx->handles_vma, lut->handle); |
276 | if (vma) { | |
277 | GEM_BUG_ON(vma->obj != obj); | |
278 | GEM_BUG_ON(!atomic_read(&vma->open_count)); | |
50689771 | 279 | i915_vma_close(vma); |
155ab883 | 280 | } |
f7ce8639 | 281 | mutex_unlock(&ctx->lut_mutex); |
155ab883 CW |
282 | |
283 | i915_gem_context_put(lut->ctx); | |
8475355f | 284 | i915_lut_handle_free(lut); |
c017cf6b | 285 | i915_gem_object_put(obj); |
8475355f | 286 | } |
8475355f CW |
287 | } |
288 | ||
213d5092 | 289 | void __i915_gem_free_object_rcu(struct rcu_head *head) |
c03467ba CW |
290 | { |
291 | struct drm_i915_gem_object *obj = | |
292 | container_of(head, typeof(*obj), rcu); | |
293 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | |
294 | ||
295 | i915_gem_object_free(obj); | |
296 | ||
297 | GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); | |
298 | atomic_dec(&i915->mm.free_count); | |
299 | } | |
300 | ||
db833785 CW |
301 | static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj) |
302 | { | |
303 | /* Skip serialisation and waking the device if known to be not used. */ | |
304 | ||
ad74457a | 305 | if (obj->userfault_count && !IS_DGFX(to_i915(obj->base.dev))) |
db833785 CW |
306 | i915_gem_object_release_mmap_gtt(obj); |
307 | ||
308 | if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) { | |
309 | struct i915_mmap_offset *mmo, *mn; | |
310 | ||
311 | i915_gem_object_release_mmap_offset(obj); | |
312 | ||
313 | rbtree_postorder_for_each_entry_safe(mmo, mn, | |
314 | &obj->mmo.offsets, | |
315 | offset) { | |
316 | drm_vma_offset_remove(obj->base.dev->vma_offset_manager, | |
317 | &mmo->vma_node); | |
318 | kfree(mmo); | |
319 | } | |
320 | obj->mmo.offsets = RB_ROOT; | |
321 | } | |
322 | } | |
323 | ||
068396bb TH |
324 | /** |
325 | * __i915_gem_object_pages_fini - Clean up pages use of a gem object | |
326 | * @obj: The gem object to clean up | |
327 | * | |
328 | * This function cleans up usage of the object mm.pages member. It | |
329 | * is intended for backends that need to clean up a gem object in | |
330 | * separate steps and needs to be called when the object is idle before | |
331 | * the object's backing memory is freed. | |
332 | */ | |
333 | void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj) | |
8475355f | 334 | { |
7dd5c565 | 335 | assert_object_held_shared(obj); |
be7612fd | 336 | |
213d5092 TH |
337 | if (!list_empty(&obj->vma.list)) { |
338 | struct i915_vma *vma; | |
339 | ||
213d5092 TH |
340 | spin_lock(&obj->vma.lock); |
341 | while ((vma = list_first_entry_or_null(&obj->vma.list, | |
342 | struct i915_vma, | |
343 | obj_link))) { | |
344 | GEM_BUG_ON(vma->obj != obj); | |
345 | spin_unlock(&obj->vma.lock); | |
8475355f | 346 | |
c03d9826 | 347 | i915_vma_destroy(vma); |
2850748e | 348 | |
2850748e | 349 | spin_lock(&obj->vma.lock); |
213d5092 TH |
350 | } |
351 | spin_unlock(&obj->vma.lock); | |
352 | } | |
2850748e | 353 | |
213d5092 | 354 | __i915_gem_object_free_mmaps(obj); |
2850748e | 355 | |
213d5092 | 356 | atomic_set(&obj->mm.pages_pin_count, 0); |
f2d8e15b DO |
357 | |
358 | /* | |
359 | * dma_buf_unmap_attachment() requires reservation to be | |
360 | * locked. The imported GEM shouldn't share reservation lock | |
361 | * and ttm_bo_cleanup_memtype_use() shouldn't be invoked for | |
362 | * dma-buf, so it's safe to take the lock. | |
363 | */ | |
364 | if (obj->base.import_attach) | |
365 | i915_gem_object_lock(obj, NULL); | |
366 | ||
213d5092 | 367 | __i915_gem_object_put_pages(obj); |
f2d8e15b DO |
368 | |
369 | if (obj->base.import_attach) | |
370 | i915_gem_object_unlock(obj); | |
371 | ||
213d5092 | 372 | GEM_BUG_ON(i915_gem_object_has_pages(obj)); |
068396bb TH |
373 | } |
374 | ||
375 | void __i915_gem_free_object(struct drm_i915_gem_object *obj) | |
376 | { | |
377 | trace_i915_gem_object_destroy(obj); | |
378 | ||
379 | GEM_BUG_ON(!list_empty(&obj->lut_list)); | |
380 | ||
213d5092 | 381 | bitmap_free(obj->bit_17); |
cc662126 | 382 | |
213d5092 TH |
383 | if (obj->base.import_attach) |
384 | drm_prime_gem_destroy(&obj->base, NULL); | |
8475355f | 385 | |
213d5092 | 386 | drm_gem_free_mmap_offset(&obj->base); |
8475355f | 387 | |
213d5092 TH |
388 | if (obj->ops->release) |
389 | obj->ops->release(obj); | |
8475355f | 390 | |
213d5092 TH |
391 | if (obj->mm.n_placements > 1) |
392 | kfree(obj->mm.placements); | |
0c159ffe | 393 | |
213d5092 TH |
394 | if (obj->shares_resv_from) |
395 | i915_vm_resv_put(obj->shares_resv_from); | |
068396bb TH |
396 | |
397 | __i915_gem_object_fini(obj); | |
213d5092 | 398 | } |
8475355f | 399 | |
213d5092 TH |
400 | static void __i915_gem_free_objects(struct drm_i915_private *i915, |
401 | struct llist_node *freed) | |
402 | { | |
403 | struct drm_i915_gem_object *obj, *on; | |
2459e56f | 404 | |
213d5092 TH |
405 | llist_for_each_entry_safe(obj, on, freed, freed) { |
406 | might_sleep(); | |
407 | if (obj->ops->delayed_free) { | |
408 | obj->ops->delayed_free(obj); | |
409 | continue; | |
410 | } | |
be7612fd | 411 | |
068396bb | 412 | __i915_gem_object_pages_fini(obj); |
213d5092 | 413 | __i915_gem_free_object(obj); |
4d8151ae | 414 | |
c03467ba CW |
415 | /* But keep the pointer alive for RCU-protected lookups */ |
416 | call_rcu(&obj->rcu, __i915_gem_free_object_rcu); | |
deeee411 | 417 | cond_resched(); |
8475355f | 418 | } |
8475355f CW |
419 | } |
420 | ||
421 | void i915_gem_flush_free_objects(struct drm_i915_private *i915) | |
422 | { | |
515b8b7e CW |
423 | struct llist_node *freed = llist_del_all(&i915->mm.free_list); |
424 | ||
425 | if (unlikely(freed)) | |
8475355f | 426 | __i915_gem_free_objects(i915, freed); |
8475355f CW |
427 | } |
428 | ||
429 | static void __i915_gem_free_work(struct work_struct *work) | |
430 | { | |
431 | struct drm_i915_private *i915 = | |
7dd5c565 | 432 | container_of(work, struct drm_i915_private, mm.free_work); |
8475355f | 433 | |
515b8b7e | 434 | i915_gem_flush_free_objects(i915); |
8475355f CW |
435 | } |
436 | ||
10012620 | 437 | static void i915_gem_free_object(struct drm_gem_object *gem_obj) |
8475355f | 438 | { |
c03467ba | 439 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
8475355f CW |
440 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
441 | ||
8e7cb179 CW |
442 | GEM_BUG_ON(i915_gem_object_is_framebuffer(obj)); |
443 | ||
8475355f | 444 | /* |
c03467ba CW |
445 | * Before we free the object, make sure any pure RCU-only |
446 | * read-side critical sections are complete, e.g. | |
447 | * i915_gem_busy_ioctl(). For the corresponding synchronized | |
448 | * lookup see i915_gem_object_lookup_rcu(). | |
8475355f | 449 | */ |
c03467ba CW |
450 | atomic_inc(&i915->mm.free_count); |
451 | ||
8475355f CW |
452 | /* |
453 | * Since we require blocking on struct_mutex to unbind the freed | |
454 | * object from the GPU before releasing resources back to the | |
455 | * system, we can not do that directly from the RCU callback (which may | |
456 | * be a softirq context), but must instead then defer that work onto a | |
457 | * kthread. We use the RCU callback rather than move the freed object | |
458 | * directly onto the work queue so that we can mix between using the | |
459 | * worker and performing frees directly from subsequent allocations for | |
460 | * crude but effective memory throttling. | |
461 | */ | |
213d5092 | 462 | |
8475355f | 463 | if (llist_add(&obj->freed, &i915->mm.free_list)) |
7dd5c565 | 464 | queue_work(i915->wq, &i915->mm.free_work); |
8475355f CW |
465 | } |
466 | ||
da42104f CW |
467 | void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj, |
468 | enum fb_op_origin origin) | |
469 | { | |
470 | struct intel_frontbuffer *front; | |
471 | ||
7b574550 | 472 | front = i915_gem_object_get_frontbuffer(obj); |
da42104f CW |
473 | if (front) { |
474 | intel_frontbuffer_flush(front, origin); | |
475 | intel_frontbuffer_put(front); | |
476 | } | |
477 | } | |
478 | ||
479 | void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj, | |
480 | enum fb_op_origin origin) | |
481 | { | |
482 | struct intel_frontbuffer *front; | |
483 | ||
7b574550 | 484 | front = i915_gem_object_get_frontbuffer(obj); |
da42104f CW |
485 | if (front) { |
486 | intel_frontbuffer_invalidate(front, origin); | |
487 | intel_frontbuffer_put(front); | |
488 | } | |
489 | } | |
490 | ||
5fbc2c2b ID |
491 | static void |
492 | i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) | |
493 | { | |
f47e6306 | 494 | pgoff_t idx = offset >> PAGE_SHIFT; |
5fbc2c2b ID |
495 | void *src_map; |
496 | void *src_ptr; | |
497 | ||
f47e6306 | 498 | src_map = kmap_atomic(i915_gem_object_get_page(obj, idx)); |
5fbc2c2b ID |
499 | |
500 | src_ptr = src_map + offset_in_page(offset); | |
501 | if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) | |
502 | drm_clflush_virt_range(src_ptr, size); | |
503 | memcpy(dst, src_ptr, size); | |
504 | ||
505 | kunmap_atomic(src_map); | |
506 | } | |
507 | ||
508 | static void | |
509 | i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) | |
510 | { | |
f47e6306 CW |
511 | pgoff_t idx = offset >> PAGE_SHIFT; |
512 | dma_addr_t dma = i915_gem_object_get_dma_address(obj, idx); | |
5fbc2c2b ID |
513 | void __iomem *src_map; |
514 | void __iomem *src_ptr; | |
5fbc2c2b ID |
515 | |
516 | src_map = io_mapping_map_wc(&obj->mm.region->iomap, | |
517 | dma - obj->mm.region->region.start, | |
518 | PAGE_SIZE); | |
519 | ||
520 | src_ptr = src_map + offset_in_page(offset); | |
521 | if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size)) | |
522 | memcpy_fromio(dst, src_ptr, size); | |
523 | ||
524 | io_mapping_unmap(src_map); | |
525 | } | |
526 | ||
7024f80e MA |
527 | static bool object_has_mappable_iomem(struct drm_i915_gem_object *obj) |
528 | { | |
529 | GEM_BUG_ON(!i915_gem_object_has_iomem(obj)); | |
530 | ||
531 | if (IS_DGFX(to_i915(obj->base.dev))) | |
532 | return i915_ttm_resource_mappable(i915_gem_to_ttm(obj)->resource); | |
533 | ||
534 | return true; | |
535 | } | |
536 | ||
5fbc2c2b ID |
537 | /** |
538 | * i915_gem_object_read_from_page - read data from the page of a GEM object | |
539 | * @obj: GEM object to read from | |
540 | * @offset: offset within the object | |
541 | * @dst: buffer to store the read data | |
542 | * @size: size to read | |
543 | * | |
544 | * Reads data from @obj at the specified offset. The requested region to read | |
545 | * from can't cross a page boundary. The caller must ensure that @obj pages | |
546 | * are pinned and that @obj is synced wrt. any related writes. | |
547 | * | |
0af4cbfa | 548 | * Return: %0 on success or -ENODEV if the type of @obj's backing store is |
5fbc2c2b ID |
549 | * unsupported. |
550 | */ | |
551 | int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) | |
552 | { | |
f47e6306 | 553 | GEM_BUG_ON(overflows_type(offset >> PAGE_SHIFT, pgoff_t)); |
5fbc2c2b ID |
554 | GEM_BUG_ON(offset >= obj->base.size); |
555 | GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size); | |
556 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); | |
557 | ||
558 | if (i915_gem_object_has_struct_page(obj)) | |
559 | i915_gem_object_read_from_page_kmap(obj, offset, dst, size); | |
7024f80e | 560 | else if (i915_gem_object_has_iomem(obj) && object_has_mappable_iomem(obj)) |
5fbc2c2b ID |
561 | i915_gem_object_read_from_page_iomap(obj, offset, dst, size); |
562 | else | |
563 | return -ENODEV; | |
564 | ||
565 | return 0; | |
566 | } | |
567 | ||
213d5092 TH |
568 | /** |
569 | * i915_gem_object_evictable - Whether object is likely evictable after unbind. | |
570 | * @obj: The object to check | |
571 | * | |
572 | * This function checks whether the object is likely unvictable after unbind. | |
573 | * If the object is not locked when checking, the result is only advisory. | |
574 | * If the object is locked when checking, and the function returns true, | |
575 | * then an eviction should indeed be possible. But since unlocked vma | |
576 | * unpinning and unbinding is currently possible, the object can actually | |
577 | * become evictable even if this function returns false. | |
578 | * | |
579 | * Return: true if the object may be evictable. False otherwise. | |
580 | */ | |
581 | bool i915_gem_object_evictable(struct drm_i915_gem_object *obj) | |
582 | { | |
583 | struct i915_vma *vma; | |
584 | int pin_count = atomic_read(&obj->mm.pages_pin_count); | |
585 | ||
586 | if (!pin_count) | |
587 | return true; | |
588 | ||
589 | spin_lock(&obj->vma.lock); | |
590 | list_for_each_entry(vma, &obj->vma.list, obj_link) { | |
591 | if (i915_vma_is_pinned(vma)) { | |
592 | spin_unlock(&obj->vma.lock); | |
593 | return false; | |
594 | } | |
595 | if (atomic_read(&vma->pages_count)) | |
596 | pin_count--; | |
597 | } | |
598 | spin_unlock(&obj->vma.lock); | |
599 | GEM_WARN_ON(pin_count < 0); | |
600 | ||
601 | return pin_count == 0; | |
602 | } | |
603 | ||
2e53d7c1 TH |
604 | /** |
605 | * i915_gem_object_migratable - Whether the object is migratable out of the | |
606 | * current region. | |
607 | * @obj: Pointer to the object. | |
608 | * | |
609 | * Return: Whether the object is allowed to be resident in other | |
610 | * regions than the current while pages are present. | |
611 | */ | |
612 | bool i915_gem_object_migratable(struct drm_i915_gem_object *obj) | |
613 | { | |
614 | struct intel_memory_region *mr = READ_ONCE(obj->mm.region); | |
615 | ||
616 | if (!mr) | |
617 | return false; | |
618 | ||
619 | return obj->mm.n_placements > 1; | |
620 | } | |
621 | ||
0ff37575 TH |
622 | /** |
623 | * i915_gem_object_has_struct_page - Whether the object is page-backed | |
624 | * @obj: The object to query. | |
625 | * | |
626 | * This function should only be called while the object is locked or pinned, | |
627 | * otherwise the page backing may change under the caller. | |
628 | * | |
629 | * Return: True if page-backed, false otherwise. | |
630 | */ | |
631 | bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) | |
632 | { | |
633 | #ifdef CONFIG_LOCKDEP | |
634 | if (IS_DGFX(to_i915(obj->base.dev)) && | |
635 | i915_gem_object_evictable((void __force *)obj)) | |
636 | assert_object_held_shared(obj); | |
637 | #endif | |
638 | return obj->mem_flags & I915_BO_FLAG_STRUCT_PAGE; | |
639 | } | |
640 | ||
641 | /** | |
642 | * i915_gem_object_has_iomem - Whether the object is iomem-backed | |
643 | * @obj: The object to query. | |
644 | * | |
645 | * This function should only be called while the object is locked or pinned, | |
646 | * otherwise the iomem backing may change under the caller. | |
647 | * | |
648 | * Return: True if iomem-backed, false otherwise. | |
649 | */ | |
650 | bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj) | |
651 | { | |
652 | #ifdef CONFIG_LOCKDEP | |
653 | if (IS_DGFX(to_i915(obj->base.dev)) && | |
654 | i915_gem_object_evictable((void __force *)obj)) | |
655 | assert_object_held_shared(obj); | |
656 | #endif | |
657 | return obj->mem_flags & I915_BO_FLAG_IOMEM; | |
658 | } | |
659 | ||
b6e913e1 TH |
660 | /** |
661 | * i915_gem_object_can_migrate - Whether an object likely can be migrated | |
662 | * | |
663 | * @obj: The object to migrate | |
664 | * @id: The region intended to migrate to | |
665 | * | |
666 | * Check whether the object backend supports migration to the | |
667 | * given region. Note that pinning may affect the ability to migrate as | |
668 | * returned by this function. | |
669 | * | |
670 | * This function is primarily intended as a helper for checking the | |
671 | * possibility to migrate objects and might be slightly less permissive | |
672 | * than i915_gem_object_migrate() when it comes to objects with the | |
673 | * I915_BO_ALLOC_USER flag set. | |
674 | * | |
675 | * Return: true if migration is possible, false otherwise. | |
676 | */ | |
677 | bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj, | |
678 | enum intel_region_id id) | |
679 | { | |
680 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | |
681 | unsigned int num_allowed = obj->mm.n_placements; | |
682 | struct intel_memory_region *mr; | |
683 | unsigned int i; | |
684 | ||
685 | GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN); | |
686 | GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED); | |
687 | ||
688 | mr = i915->mm.regions[id]; | |
689 | if (!mr) | |
690 | return false; | |
691 | ||
a7ce8f82 MA |
692 | if (!IS_ALIGNED(obj->base.size, mr->min_page_size)) |
693 | return false; | |
694 | ||
b6e913e1 TH |
695 | if (obj->mm.region == mr) |
696 | return true; | |
697 | ||
698 | if (!i915_gem_object_evictable(obj)) | |
699 | return false; | |
700 | ||
701 | if (!obj->ops->migrate) | |
702 | return false; | |
703 | ||
704 | if (!(obj->flags & I915_BO_ALLOC_USER)) | |
705 | return true; | |
706 | ||
707 | if (num_allowed == 0) | |
708 | return false; | |
709 | ||
710 | for (i = 0; i < num_allowed; ++i) { | |
711 | if (mr == obj->mm.placements[i]) | |
712 | return true; | |
713 | } | |
714 | ||
715 | return false; | |
716 | } | |
717 | ||
718 | /** | |
719 | * i915_gem_object_migrate - Migrate an object to the desired region id | |
720 | * @obj: The object to migrate. | |
721 | * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may | |
722 | * not be successful in evicting other objects to make room for this object. | |
723 | * @id: The region id to migrate to. | |
724 | * | |
725 | * Attempt to migrate the object to the desired memory region. The | |
726 | * object backend must support migration and the object may not be | |
727 | * pinned, (explicitly pinned pages or pinned vmas). The object must | |
728 | * be locked. | |
729 | * On successful completion, the object will have pages pointing to | |
730 | * memory in the new region, but an async migration task may not have | |
731 | * completed yet, and to accomplish that, i915_gem_object_wait_migration() | |
732 | * must be called. | |
733 | * | |
b6e913e1 TH |
734 | * Note: the @ww parameter is not used yet, but included to make sure |
735 | * callers put some effort into obtaining a valid ww ctx if one is | |
736 | * available. | |
737 | * | |
738 | * Return: 0 on success. Negative error code on failure. In particular may | |
739 | * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance | |
740 | * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and | |
741 | * -EBUSY if the object is pinned. | |
742 | */ | |
743 | int i915_gem_object_migrate(struct drm_i915_gem_object *obj, | |
744 | struct i915_gem_ww_ctx *ww, | |
745 | enum intel_region_id id) | |
695ddc93 MA |
746 | { |
747 | return __i915_gem_object_migrate(obj, ww, id, obj->flags); | |
748 | } | |
749 | ||
750 | /** | |
751 | * __i915_gem_object_migrate - Migrate an object to the desired region id, with | |
752 | * control of the extra flags | |
753 | * @obj: The object to migrate. | |
754 | * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may | |
755 | * not be successful in evicting other objects to make room for this object. | |
756 | * @id: The region id to migrate to. | |
757 | * @flags: The object flags. Normally just obj->flags. | |
758 | * | |
759 | * Attempt to migrate the object to the desired memory region. The | |
760 | * object backend must support migration and the object may not be | |
761 | * pinned, (explicitly pinned pages or pinned vmas). The object must | |
762 | * be locked. | |
763 | * On successful completion, the object will have pages pointing to | |
764 | * memory in the new region, but an async migration task may not have | |
765 | * completed yet, and to accomplish that, i915_gem_object_wait_migration() | |
766 | * must be called. | |
767 | * | |
768 | * Note: the @ww parameter is not used yet, but included to make sure | |
769 | * callers put some effort into obtaining a valid ww ctx if one is | |
770 | * available. | |
771 | * | |
772 | * Return: 0 on success. Negative error code on failure. In particular may | |
773 | * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance | |
774 | * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and | |
775 | * -EBUSY if the object is pinned. | |
776 | */ | |
777 | int __i915_gem_object_migrate(struct drm_i915_gem_object *obj, | |
778 | struct i915_gem_ww_ctx *ww, | |
779 | enum intel_region_id id, | |
780 | unsigned int flags) | |
b6e913e1 TH |
781 | { |
782 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | |
783 | struct intel_memory_region *mr; | |
784 | ||
785 | GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN); | |
786 | GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED); | |
787 | assert_object_held(obj); | |
788 | ||
789 | mr = i915->mm.regions[id]; | |
790 | GEM_BUG_ON(!mr); | |
791 | ||
f3170ba8 JE |
792 | if (!i915_gem_object_can_migrate(obj, id)) |
793 | return -EINVAL; | |
b6e913e1 | 794 | |
76b62448 JE |
795 | if (!obj->ops->migrate) { |
796 | if (GEM_WARN_ON(obj->mm.region != mr)) | |
797 | return -EINVAL; | |
798 | return 0; | |
799 | } | |
800 | ||
695ddc93 | 801 | return obj->ops->migrate(obj, mr, flags); |
b6e913e1 TH |
802 | } |
803 | ||
b3f450d9 MA |
804 | /** |
805 | * i915_gem_object_placement_possible - Check whether the object can be | |
806 | * placed at certain memory type | |
807 | * @obj: Pointer to the object | |
808 | * @type: The memory type to check | |
809 | * | |
810 | * Return: True if the object can be placed in @type. False otherwise. | |
811 | */ | |
812 | bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj, | |
813 | enum intel_memory_type type) | |
814 | { | |
815 | unsigned int i; | |
816 | ||
817 | if (!obj->mm.n_placements) { | |
818 | switch (type) { | |
819 | case INTEL_MEMORY_LOCAL: | |
820 | return i915_gem_object_has_iomem(obj); | |
821 | case INTEL_MEMORY_SYSTEM: | |
822 | return i915_gem_object_has_pages(obj); | |
823 | default: | |
824 | /* Ignore stolen for now */ | |
825 | GEM_BUG_ON(1); | |
826 | return false; | |
827 | } | |
828 | } | |
829 | ||
830 | for (i = 0; i < obj->mm.n_placements; i++) { | |
831 | if (obj->mm.placements[i]->type == type) | |
832 | return true; | |
833 | } | |
834 | ||
835 | return false; | |
836 | } | |
837 | ||
efeb3caf MA |
838 | /** |
839 | * i915_gem_object_needs_ccs_pages - Check whether the object requires extra | |
840 | * pages when placed in system-memory, in order to save and later restore the | |
841 | * flat-CCS aux state when the object is moved between local-memory and | |
842 | * system-memory | |
843 | * @obj: Pointer to the object | |
844 | * | |
845 | * Return: True if the object needs extra ccs pages. False otherwise. | |
846 | */ | |
847 | bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj) | |
848 | { | |
849 | bool lmem_placement = false; | |
850 | int i; | |
851 | ||
873fef88 MA |
852 | if (!HAS_FLAT_CCS(to_i915(obj->base.dev))) |
853 | return false; | |
854 | ||
95df9cc2 MA |
855 | if (obj->flags & I915_BO_ALLOC_CCS_AUX) |
856 | return true; | |
857 | ||
efeb3caf MA |
858 | for (i = 0; i < obj->mm.n_placements; i++) { |
859 | /* Compression is not allowed for the objects with smem placement */ | |
860 | if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM) | |
861 | return false; | |
862 | if (!lmem_placement && | |
863 | obj->mm.placements[i]->type == INTEL_MEMORY_LOCAL) | |
864 | lmem_placement = true; | |
865 | } | |
866 | ||
867 | return lmem_placement; | |
868 | } | |
869 | ||
8475355f CW |
870 | void i915_gem_init__objects(struct drm_i915_private *i915) |
871 | { | |
7dd5c565 | 872 | INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); |
8475355f CW |
873 | } |
874 | ||
c8ad09af | 875 | void i915_objects_module_exit(void) |
103b76ee | 876 | { |
c8ad09af | 877 | kmem_cache_destroy(slab_objects); |
103b76ee CW |
878 | } |
879 | ||
c8ad09af | 880 | int __init i915_objects_module_init(void) |
13f1bfd3 | 881 | { |
c8ad09af DV |
882 | slab_objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); |
883 | if (!slab_objects) | |
13f1bfd3 CW |
884 | return -ENOMEM; |
885 | ||
886 | return 0; | |
887 | } | |
10be98a7 | 888 | |
10012620 TZ |
889 | static const struct drm_gem_object_funcs i915_gem_object_funcs = { |
890 | .free = i915_gem_free_object, | |
891 | .close = i915_gem_close_object, | |
892 | .export = i915_gem_prime_export, | |
893 | }; | |
894 | ||
f6c466b8 ML |
895 | /** |
896 | * i915_gem_object_get_moving_fence - Get the object's moving fence if any | |
897 | * @obj: The object whose moving fence to get. | |
1d7f5e6c | 898 | * @fence: The resulting fence |
f6c466b8 ML |
899 | * |
900 | * A non-signaled moving fence means that there is an async operation | |
901 | * pending on the object that needs to be waited on before setting up | |
902 | * any GPU- or CPU PTEs to the object's pages. | |
903 | * | |
1d7f5e6c | 904 | * Return: Negative error code or 0 for success. |
f6c466b8 | 905 | */ |
1d7f5e6c CK |
906 | int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj, |
907 | struct dma_fence **fence) | |
f6c466b8 | 908 | { |
1d7f5e6c CK |
909 | return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL, |
910 | fence); | |
950505ca TH |
911 | } |
912 | ||
f6c466b8 ML |
913 | /** |
914 | * i915_gem_object_wait_moving_fence - Wait for the object's moving fence if any | |
915 | * @obj: The object whose moving fence to wait for. | |
916 | * @intr: Whether to wait interruptible. | |
917 | * | |
918 | * If the moving fence signaled without an error, it is detached from the | |
919 | * object and put. | |
920 | * | |
921 | * Return: 0 if successful, -ERESTARTSYS if the wait was interrupted, | |
922 | * negative error code if the async operation represented by the | |
923 | * moving fence failed. | |
924 | */ | |
925 | int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, | |
926 | bool intr) | |
927 | { | |
9362a07a | 928 | long ret; |
f6c466b8 ML |
929 | |
930 | assert_object_held(obj); | |
f6c466b8 | 931 | |
9362a07a MA |
932 | ret = dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL, |
933 | intr, MAX_SCHEDULE_TIMEOUT); | |
934 | if (!ret) | |
935 | ret = -ETIME; | |
bfe53be2 MA |
936 | else if (ret > 0 && i915_gem_object_has_unknown_state(obj)) |
937 | ret = -EIO; | |
f6c466b8 | 938 | |
9362a07a | 939 | return ret < 0 ? ret : 0; |
f6c466b8 ML |
940 | } |
941 | ||
81d4baaf | 942 | /* |
bfe53be2 MA |
943 | * i915_gem_object_has_unknown_state - Return true if the object backing pages are |
944 | * in an unknown_state. This means that userspace must NEVER be allowed to touch | |
945 | * the pages, with either the GPU or CPU. | |
946 | * | |
947 | * ONLY valid to be called after ensuring that all kernel fences have signalled | |
948 | * (in particular the fence for moving/clearing the object). | |
949 | */ | |
950 | bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj) | |
951 | { | |
952 | /* | |
953 | * The below barrier pairs with the dma_fence_signal() in | |
954 | * __memcpy_work(). We should only sample the unknown_state after all | |
955 | * the kernel fences have signalled. | |
956 | */ | |
957 | smp_rmb(); | |
958 | return obj->mm.unknown_state; | |
959 | } | |
960 | ||
10be98a7 CW |
961 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
962 | #include "selftests/huge_gem_object.c" | |
963 | #include "selftests/huge_pages.c" | |
bf74a18c | 964 | #include "selftests/i915_gem_migrate.c" |
10be98a7 CW |
965 | #include "selftests/i915_gem_object.c" |
966 | #include "selftests/i915_gem_coherency.c" | |
967 | #endif |