Commit | Line | Data |
---|---|---|
673a394b EA |
1 | /* |
2 | * Copyright © 2008 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <eric@anholt.net> | |
25 | * | |
26 | */ | |
27 | ||
7994369f | 28 | #include <linux/dma-buf.h> |
673a394b | 29 | #include <linux/file.h> |
7994369f | 30 | #include <linux/fs.h> |
7938f421 | 31 | #include <linux/iosys-map.h> |
7994369f LDM |
32 | #include <linux/mem_encrypt.h> |
33 | #include <linux/mm.h> | |
673a394b | 34 | #include <linux/mman.h> |
7994369f | 35 | #include <linux/module.h> |
673a394b | 36 | #include <linux/pagemap.h> |
fb4b4927 | 37 | #include <linux/pagevec.h> |
7994369f LDM |
38 | #include <linux/shmem_fs.h> |
39 | #include <linux/slab.h> | |
b8c75bd9 | 40 | #include <linux/string_helpers.h> |
7994369f LDM |
41 | #include <linux/types.h> |
42 | #include <linux/uaccess.h> | |
0500c04e | 43 | |
1c535876 | 44 | #include <drm/drm.h> |
0500c04e SR |
45 | #include <drm/drm_device.h> |
46 | #include <drm/drm_drv.h> | |
47 | #include <drm/drm_file.h> | |
d9fc9413 | 48 | #include <drm/drm_gem.h> |
641b9103 | 49 | #include <drm/drm_managed.h> |
45d58b40 | 50 | #include <drm/drm_print.h> |
0500c04e SR |
51 | #include <drm/drm_vma_manager.h> |
52 | ||
67d0ec4e | 53 | #include "drm_internal.h" |
673a394b EA |
54 | |
55 | /** @file drm_gem.c | |
56 | * | |
57 | * This file provides some of the base ioctls and library routines for | |
58 | * the graphics memory manager implemented by each device driver. | |
59 | * | |
60 | * Because various devices have different requirements in terms of | |
61 | * synchronization and migration strategies, implementing that is left up to | |
62 | * the driver, and all that the general API provides should be generic -- | |
63 | * allocating objects, reading/writing data with the cpu, freeing objects. | |
64 | * Even there, platform-dependent optimizations for reading/writing data with | |
65 | * the CPU mean we'll likely hook those out to driver-specific calls. However, | |
66 | * the DRI2 implementation wants to have at least allocate/mmap be generic. | |
67 | * | |
68 | * The goal was to have swap-backed object allocation managed through | |
69 | * struct file. However, file descriptors as handles to a struct file have | |
70 | * two major failings: | |
71 | * - Process limits prevent more than 1024 or so being used at a time by | |
72 | * default. | |
73 | * - Inability to allocate high fds will aggravate the X Server's select() | |
74 | * handling, and likely that of many GL client applications as well. | |
75 | * | |
76 | * This led to a plan of using our own integer IDs (called handles, following | |
77 | * DRM terminology) to mimic fds, and implement the fd syscalls we need as | |
78 | * ioctls. The objects themselves will still include the struct file so | |
79 | * that we can transition to fds if the required kernel infrastructure shows | |
80 | * up at a later date, and as our interface with shmfs for memory allocation. | |
81 | */ | |
82 | ||
641b9103 DV |
83 | static void |
84 | drm_gem_init_release(struct drm_device *dev, void *ptr) | |
85 | { | |
86 | drm_vma_offset_manager_destroy(dev->vma_offset_manager); | |
87 | } | |
88 | ||
673a394b | 89 | /** |
89d61fc0 DV |
90 | * drm_gem_init - Initialize the GEM device fields |
91 | * @dev: drm_devic structure to initialize | |
673a394b | 92 | */ |
673a394b EA |
93 | int |
94 | drm_gem_init(struct drm_device *dev) | |
95 | { | |
b04a5906 | 96 | struct drm_vma_offset_manager *vma_offset_manager; |
a2c0a97b | 97 | |
cd4f013f | 98 | mutex_init(&dev->object_name_lock); |
e86584c5 | 99 | idr_init_base(&dev->object_name_idr, 1); |
a2c0a97b | 100 | |
641b9103 DV |
101 | vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), |
102 | GFP_KERNEL); | |
b04a5906 | 103 | if (!vma_offset_manager) { |
a2c0a97b JB |
104 | DRM_ERROR("out of memory\n"); |
105 | return -ENOMEM; | |
106 | } | |
107 | ||
b04a5906 DV |
108 | dev->vma_offset_manager = vma_offset_manager; |
109 | drm_vma_offset_manager_init(vma_offset_manager, | |
0de23977 DH |
110 | DRM_FILE_PAGE_OFFSET_START, |
111 | DRM_FILE_PAGE_OFFSET_SIZE); | |
a2c0a97b | 112 | |
641b9103 | 113 | return drmm_add_action(dev, drm_gem_init_release, NULL); |
a2c0a97b JB |
114 | } |
115 | ||
1d397043 | 116 | /** |
0992b254 MC |
117 | * drm_gem_object_init_with_mnt - initialize an allocated shmem-backed GEM |
118 | * object in a given shmfs mountpoint | |
119 | * | |
89d61fc0 DV |
120 | * @dev: drm_device the object should be initialized for |
121 | * @obj: drm_gem_object to initialize | |
122 | * @size: object size | |
0992b254 MC |
123 | * @gemfs: tmpfs mount where the GEM object will be created. If NULL, use |
124 | * the usual tmpfs mountpoint (`shm_mnt`). | |
89d61fc0 | 125 | * |
62cb7011 | 126 | * Initialize an already allocated GEM object of the specified size with |
1d397043 DV |
127 | * shmfs backing store. |
128 | */ | |
0992b254 MC |
129 | int drm_gem_object_init_with_mnt(struct drm_device *dev, |
130 | struct drm_gem_object *obj, size_t size, | |
131 | struct vfsmount *gemfs) | |
1d397043 | 132 | { |
89c8233f | 133 | struct file *filp; |
1d397043 | 134 | |
6ab11a26 DV |
135 | drm_gem_private_object_init(dev, obj, size); |
136 | ||
0992b254 MC |
137 | if (gemfs) |
138 | filp = shmem_file_setup_with_mnt(gemfs, "drm mm object", size, | |
139 | VM_NORESERVE); | |
140 | else | |
141 | filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); | |
142 | ||
89c8233f DH |
143 | if (IS_ERR(filp)) |
144 | return PTR_ERR(filp); | |
1d397043 | 145 | |
89c8233f | 146 | obj->filp = filp; |
1d397043 | 147 | |
1d397043 DV |
148 | return 0; |
149 | } | |
0992b254 MC |
150 | EXPORT_SYMBOL(drm_gem_object_init_with_mnt); |
151 | ||
152 | /** | |
153 | * drm_gem_object_init - initialize an allocated shmem-backed GEM object | |
154 | * @dev: drm_device the object should be initialized for | |
155 | * @obj: drm_gem_object to initialize | |
156 | * @size: object size | |
157 | * | |
158 | * Initialize an already allocated GEM object of the specified size with | |
159 | * shmfs backing store. | |
160 | */ | |
161 | int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, | |
162 | size_t size) | |
163 | { | |
164 | return drm_gem_object_init_with_mnt(dev, obj, size, NULL); | |
165 | } | |
1d397043 DV |
166 | EXPORT_SYMBOL(drm_gem_object_init); |
167 | ||
62cb7011 | 168 | /** |
2a5706a3 | 169 | * drm_gem_private_object_init - initialize an allocated private GEM object |
89d61fc0 DV |
170 | * @dev: drm_device the object should be initialized for |
171 | * @obj: drm_gem_object to initialize | |
172 | * @size: object size | |
173 | * | |
62cb7011 AC |
174 | * Initialize an already allocated GEM object of the specified size with |
175 | * no GEM provided backing store. Instead the caller is responsible for | |
176 | * backing the object and handling it. | |
177 | */ | |
89c8233f DH |
178 | void drm_gem_private_object_init(struct drm_device *dev, |
179 | struct drm_gem_object *obj, size_t size) | |
62cb7011 AC |
180 | { |
181 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); | |
182 | ||
183 | obj->dev = dev; | |
184 | obj->filp = NULL; | |
185 | ||
186 | kref_init(&obj->refcount); | |
a8e11d1c | 187 | obj->handle_count = 0; |
62cb7011 | 188 | obj->size = size; |
52791eee | 189 | dma_resv_init(&obj->_resv); |
1ba62714 RH |
190 | if (!obj->resv) |
191 | obj->resv = &obj->_resv; | |
192 | ||
e6303f32 DK |
193 | if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA)) |
194 | drm_gem_gpuva_init(obj); | |
195 | ||
88d7ebe5 | 196 | drm_vma_node_reset(&obj->vma_node); |
e7c2af13 | 197 | INIT_LIST_HEAD(&obj->lru_node); |
62cb7011 AC |
198 | } |
199 | EXPORT_SYMBOL(drm_gem_private_object_init); | |
200 | ||
7df34a61 C |
201 | /** |
202 | * drm_gem_private_object_fini - Finalize a failed drm_gem_object | |
203 | * @obj: drm_gem_object | |
204 | * | |
205 | * Uninitialize an already allocated GEM object when it initialized failed | |
206 | */ | |
207 | void drm_gem_private_object_fini(struct drm_gem_object *obj) | |
208 | { | |
209 | WARN_ON(obj->dma_buf); | |
210 | ||
211 | dma_resv_fini(&obj->_resv); | |
212 | } | |
213 | EXPORT_SYMBOL(drm_gem_private_object_fini); | |
214 | ||
5307dce8 TZ |
215 | static void drm_gem_object_handle_get(struct drm_gem_object *obj) |
216 | { | |
217 | struct drm_device *dev = obj->dev; | |
218 | ||
219 | drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock)); | |
220 | ||
221 | if (obj->handle_count++ == 0) | |
222 | drm_gem_object_get(obj); | |
223 | } | |
224 | ||
225 | /** | |
226 | * drm_gem_object_handle_get_unlocked - acquire reference on user-space handles | |
227 | * @obj: GEM object | |
228 | * | |
229 | * Acquires a reference on the GEM buffer object's handle. Required | |
230 | * to keep the GEM object alive. Call drm_gem_object_handle_put_unlocked() | |
231 | * to release the reference. | |
232 | */ | |
233 | void drm_gem_object_handle_get_unlocked(struct drm_gem_object *obj) | |
234 | { | |
235 | struct drm_device *dev = obj->dev; | |
236 | ||
237 | guard(mutex)(&dev->object_name_lock); | |
238 | ||
239 | drm_WARN_ON(dev, !obj->handle_count); /* first ref taken in create-tail helper */ | |
240 | drm_gem_object_handle_get(obj); | |
241 | } | |
242 | EXPORT_SYMBOL(drm_gem_object_handle_get_unlocked); | |
243 | ||
36da5908 | 244 | /** |
c6a84325 | 245 | * drm_gem_object_handle_free - release resources bound to userspace handles |
89d61fc0 DV |
246 | * @obj: GEM object to clean up. |
247 | * | |
36da5908 DV |
248 | * Called after the last handle to the object has been closed |
249 | * | |
250 | * Removes any name for the object. Note that this must be | |
251 | * called before drm_gem_object_free or we'll be touching | |
252 | * freed memory | |
253 | */ | |
254 | static void drm_gem_object_handle_free(struct drm_gem_object *obj) | |
255 | { | |
256 | struct drm_device *dev = obj->dev; | |
257 | ||
258 | /* Remove any name for this object */ | |
36da5908 DV |
259 | if (obj->name) { |
260 | idr_remove(&dev->object_name_idr, obj->name); | |
261 | obj->name = 0; | |
a8e11d1c | 262 | } |
36da5908 DV |
263 | } |
264 | ||
319c933c DV |
265 | static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) |
266 | { | |
267 | /* Unbreak the reference cycle if we have an exported dma_buf. */ | |
268 | if (obj->dma_buf) { | |
269 | dma_buf_put(obj->dma_buf); | |
270 | obj->dma_buf = NULL; | |
271 | } | |
272 | } | |
273 | ||
5307dce8 TZ |
274 | /** |
275 | * drm_gem_object_handle_put_unlocked - releases reference on user-space handles | |
276 | * @obj: GEM object | |
277 | * | |
278 | * Releases a reference on the GEM buffer object's handle. Possibly releases | |
279 | * the GEM buffer object and associated dma-buf objects. | |
280 | */ | |
281 | void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) | |
36da5908 | 282 | { |
98a8883a CW |
283 | struct drm_device *dev = obj->dev; |
284 | bool final = false; | |
285 | ||
6afe6929 | 286 | if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) |
36da5908 DV |
287 | return; |
288 | ||
289 | /* | |
290 | * Must bump handle count first as this may be the last | |
291 | * ref, in which case the object would disappear before we | |
292 | * checked for a name | |
293 | */ | |
294 | ||
98a8883a | 295 | mutex_lock(&dev->object_name_lock); |
319c933c | 296 | if (--obj->handle_count == 0) { |
36da5908 | 297 | drm_gem_object_handle_free(obj); |
319c933c | 298 | drm_gem_object_exported_dma_buf_free(obj); |
98a8883a | 299 | final = true; |
319c933c | 300 | } |
98a8883a | 301 | mutex_unlock(&dev->object_name_lock); |
a8e11d1c | 302 | |
98a8883a | 303 | if (final) |
be6ee102 | 304 | drm_gem_object_put(obj); |
36da5908 | 305 | } |
5307dce8 | 306 | EXPORT_SYMBOL(drm_gem_object_handle_put_unlocked); |
36da5908 | 307 | |
8815b23a CW |
308 | /* |
309 | * Called at device or object close to release the file's | |
310 | * handle references on objects. | |
311 | */ | |
312 | static int | |
313 | drm_gem_object_release_handle(int id, void *ptr, void *data) | |
314 | { | |
315 | struct drm_file *file_priv = data; | |
316 | struct drm_gem_object *obj = ptr; | |
8815b23a | 317 | |
d693def4 | 318 | if (obj->funcs->close) |
b39b5394 | 319 | obj->funcs->close(obj, file_priv); |
d0a133f7 | 320 | |
ea2aa97c | 321 | drm_prime_remove_buf_handle(&file_priv->prime, id); |
d9a1f0b4 | 322 | drm_vma_node_revoke(&obj->vma_node, file_priv); |
8815b23a | 323 | |
e6b62714 | 324 | drm_gem_object_handle_put_unlocked(obj); |
8815b23a CW |
325 | |
326 | return 0; | |
327 | } | |
328 | ||
673a394b | 329 | /** |
89d61fc0 DV |
330 | * drm_gem_handle_delete - deletes the given file-private handle |
331 | * @filp: drm file-private structure to use for the handle look up | |
332 | * @handle: userspace handle to delete | |
333 | * | |
df2e0900 DV |
334 | * Removes the GEM handle from the @filp lookup table which has been added with |
335 | * drm_gem_handle_create(). If this is the last handle also cleans up linked | |
336 | * resources like GEM names. | |
673a394b | 337 | */ |
ff72145b | 338 | int |
a1a2d1d3 | 339 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) |
673a394b | 340 | { |
673a394b EA |
341 | struct drm_gem_object *obj; |
342 | ||
673a394b EA |
343 | spin_lock(&filp->table_lock); |
344 | ||
345 | /* Check if we currently have a reference on the object */ | |
f6cd7dae CW |
346 | obj = idr_replace(&filp->object_idr, NULL, handle); |
347 | spin_unlock(&filp->table_lock); | |
348 | if (IS_ERR_OR_NULL(obj)) | |
673a394b | 349 | return -EINVAL; |
673a394b | 350 | |
f6cd7dae CW |
351 | /* Release driver's reference and decrement refcount. */ |
352 | drm_gem_object_release_handle(handle, obj, filp); | |
353 | ||
354 | /* And finally make the handle available for future allocations. */ | |
355 | spin_lock(&filp->table_lock); | |
673a394b EA |
356 | idr_remove(&filp->object_idr, handle); |
357 | spin_unlock(&filp->table_lock); | |
358 | ||
673a394b EA |
359 | return 0; |
360 | } | |
ff72145b | 361 | EXPORT_SYMBOL(drm_gem_handle_delete); |
673a394b | 362 | |
db611527 | 363 | /** |
abd4e745 | 364 | * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object |
db611527 NT |
365 | * @file: drm file-private structure containing the gem object |
366 | * @dev: corresponding drm_device | |
367 | * @handle: gem object handle | |
368 | * @offset: return location for the fake mmap offset | |
369 | * | |
370 | * This implements the &drm_driver.dumb_map_offset kms driver callback for | |
371 | * drivers which use gem to manage their backing storage. | |
372 | * | |
373 | * Returns: | |
374 | * 0 on success or a negative error code on failure. | |
375 | */ | |
abd4e745 | 376 | int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
db611527 NT |
377 | u32 handle, u64 *offset) |
378 | { | |
379 | struct drm_gem_object *obj; | |
380 | int ret; | |
381 | ||
382 | obj = drm_gem_object_lookup(file, handle); | |
383 | if (!obj) | |
384 | return -ENOENT; | |
385 | ||
90378e58 | 386 | /* Don't allow imported objects to be mapped */ |
b57aa47d | 387 | if (drm_gem_is_imported(obj)) { |
90378e58 NT |
388 | ret = -EINVAL; |
389 | goto out; | |
390 | } | |
391 | ||
db611527 NT |
392 | ret = drm_gem_create_mmap_offset(obj); |
393 | if (ret) | |
394 | goto out; | |
395 | ||
396 | *offset = drm_vma_node_offset_addr(&obj->vma_node); | |
397 | out: | |
be6ee102 | 398 | drm_gem_object_put(obj); |
db611527 NT |
399 | |
400 | return ret; | |
401 | } | |
abd4e745 | 402 | EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); |
db611527 | 403 | |
673a394b | 404 | /** |
20228c44 | 405 | * drm_gem_handle_create_tail - internal functions to create a handle |
89d61fc0 DV |
406 | * @file_priv: drm file-private structure to register the handle for |
407 | * @obj: object to register | |
8bf8180f | 408 | * @handlep: pointer to return the created handle to the caller |
1dd3a060 | 409 | * |
940eba2d DV |
410 | * This expects the &drm_device.object_name_lock to be held already and will |
411 | * drop it before returning. Used to avoid races in establishing new handles | |
412 | * when importing an object from either an flink name or a dma-buf. | |
df2e0900 DV |
413 | * |
414 | * Handles must be release again through drm_gem_handle_delete(). This is done | |
415 | * when userspace closes @file_priv for all attached handles, or through the | |
416 | * GEM_CLOSE ioctl for individual handles. | |
673a394b EA |
417 | */ |
418 | int | |
20228c44 DV |
419 | drm_gem_handle_create_tail(struct drm_file *file_priv, |
420 | struct drm_gem_object *obj, | |
421 | u32 *handlep) | |
673a394b | 422 | { |
304eda32 | 423 | struct drm_device *dev = obj->dev; |
9649399e | 424 | u32 handle; |
304eda32 | 425 | int ret; |
673a394b | 426 | |
20228c44 | 427 | WARN_ON(!mutex_is_locked(&dev->object_name_lock)); |
5307dce8 TZ |
428 | |
429 | drm_gem_object_handle_get(obj); | |
20228c44 | 430 | |
673a394b | 431 | /* |
2e928815 TH |
432 | * Get the user-visible handle using idr. Preload and perform |
433 | * allocation under our spinlock. | |
673a394b | 434 | */ |
2e928815 | 435 | idr_preload(GFP_KERNEL); |
673a394b | 436 | spin_lock(&file_priv->table_lock); |
2e928815 TH |
437 | |
438 | ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); | |
98a8883a | 439 | |
673a394b | 440 | spin_unlock(&file_priv->table_lock); |
2e928815 | 441 | idr_preload_end(); |
98a8883a | 442 | |
cd4f013f | 443 | mutex_unlock(&dev->object_name_lock); |
6984128d CW |
444 | if (ret < 0) |
445 | goto err_unref; | |
446 | ||
9649399e | 447 | handle = ret; |
673a394b | 448 | |
d9a1f0b4 | 449 | ret = drm_vma_node_allow(&obj->vma_node, file_priv); |
6984128d CW |
450 | if (ret) |
451 | goto err_remove; | |
304eda32 | 452 | |
d693def4 | 453 | if (obj->funcs->open) { |
b39b5394 NT |
454 | ret = obj->funcs->open(obj, file_priv); |
455 | if (ret) | |
456 | goto err_revoke; | |
304eda32 BS |
457 | } |
458 | ||
9649399e | 459 | *handlep = handle; |
673a394b | 460 | return 0; |
6984128d CW |
461 | |
462 | err_revoke: | |
d9a1f0b4 | 463 | drm_vma_node_revoke(&obj->vma_node, file_priv); |
6984128d CW |
464 | err_remove: |
465 | spin_lock(&file_priv->table_lock); | |
9649399e | 466 | idr_remove(&file_priv->object_idr, handle); |
6984128d CW |
467 | spin_unlock(&file_priv->table_lock); |
468 | err_unref: | |
e6b62714 | 469 | drm_gem_object_handle_put_unlocked(obj); |
6984128d | 470 | return ret; |
673a394b | 471 | } |
20228c44 DV |
472 | |
473 | /** | |
8bf8180f | 474 | * drm_gem_handle_create - create a gem handle for an object |
89d61fc0 DV |
475 | * @file_priv: drm file-private structure to register the handle for |
476 | * @obj: object to register | |
82c0ef94 | 477 | * @handlep: pointer to return the created handle to the caller |
89d61fc0 | 478 | * |
39031176 DV |
479 | * Create a handle for this object. This adds a handle reference to the object, |
480 | * which includes a regular reference count. Callers will likely want to | |
481 | * dereference the object afterwards. | |
482 | * | |
483 | * Since this publishes @obj to userspace it must be fully set up by this point, | |
484 | * drivers must call this last in their buffer object creation callbacks. | |
20228c44 | 485 | */ |
8bf8180f TR |
486 | int drm_gem_handle_create(struct drm_file *file_priv, |
487 | struct drm_gem_object *obj, | |
488 | u32 *handlep) | |
20228c44 DV |
489 | { |
490 | mutex_lock(&obj->dev->object_name_lock); | |
491 | ||
492 | return drm_gem_handle_create_tail(file_priv, obj, handlep); | |
493 | } | |
673a394b EA |
494 | EXPORT_SYMBOL(drm_gem_handle_create); |
495 | ||
75ef8b3b RC |
496 | |
497 | /** | |
498 | * drm_gem_free_mmap_offset - release a fake mmap offset for an object | |
499 | * @obj: obj in question | |
500 | * | |
501 | * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). | |
f74418a4 DV |
502 | * |
503 | * Note that drm_gem_object_release() already calls this function, so drivers | |
504 | * don't have to take care of releasing the mmap offset themselves when freeing | |
505 | * the GEM object. | |
75ef8b3b RC |
506 | */ |
507 | void | |
508 | drm_gem_free_mmap_offset(struct drm_gem_object *obj) | |
509 | { | |
510 | struct drm_device *dev = obj->dev; | |
75ef8b3b | 511 | |
b04a5906 | 512 | drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); |
75ef8b3b RC |
513 | } |
514 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); | |
515 | ||
516 | /** | |
367bbd49 | 517 | * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object |
75ef8b3b | 518 | * @obj: obj in question |
367bbd49 | 519 | * @size: the virtual size |
75ef8b3b RC |
520 | * |
521 | * GEM memory mapping works by handing back to userspace a fake mmap offset | |
522 | * it can use in a subsequent mmap(2) call. The DRM core code then looks | |
523 | * up the object based on the offset and sets up the various memory mapping | |
524 | * structures. | |
525 | * | |
367bbd49 | 526 | * This routine allocates and attaches a fake offset for @obj, in cases where |
940eba2d DV |
527 | * the virtual size differs from the physical size (ie. &drm_gem_object.size). |
528 | * Otherwise just use drm_gem_create_mmap_offset(). | |
f74418a4 DV |
529 | * |
530 | * This function is idempotent and handles an already allocated mmap offset | |
531 | * transparently. Drivers do not need to check for this case. | |
75ef8b3b RC |
532 | */ |
533 | int | |
367bbd49 | 534 | drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) |
75ef8b3b RC |
535 | { |
536 | struct drm_device *dev = obj->dev; | |
75ef8b3b | 537 | |
b04a5906 | 538 | return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, |
367bbd49 RC |
539 | size / PAGE_SIZE); |
540 | } | |
541 | EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); | |
542 | ||
543 | /** | |
544 | * drm_gem_create_mmap_offset - create a fake mmap offset for an object | |
545 | * @obj: obj in question | |
546 | * | |
547 | * GEM memory mapping works by handing back to userspace a fake mmap offset | |
548 | * it can use in a subsequent mmap(2) call. The DRM core code then looks | |
549 | * up the object based on the offset and sets up the various memory mapping | |
550 | * structures. | |
551 | * | |
552 | * This routine allocates and attaches a fake offset for @obj. | |
f74418a4 DV |
553 | * |
554 | * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release | |
555 | * the fake offset again. | |
367bbd49 RC |
556 | */ |
557 | int drm_gem_create_mmap_offset(struct drm_gem_object *obj) | |
558 | { | |
559 | return drm_gem_create_mmap_offset_size(obj, obj->size); | |
75ef8b3b RC |
560 | } |
561 | EXPORT_SYMBOL(drm_gem_create_mmap_offset); | |
562 | ||
fb4b4927 | 563 | /* |
3291e09a MWO |
564 | * Move folios to appropriate lru and release the folios, decrementing the |
565 | * ref count of those folios. | |
fb4b4927 | 566 | */ |
3291e09a | 567 | static void drm_gem_check_release_batch(struct folio_batch *fbatch) |
fb4b4927 | 568 | { |
3291e09a MWO |
569 | check_move_unevictable_folios(fbatch); |
570 | __folio_batch_release(fbatch); | |
fb4b4927 KHY |
571 | cond_resched(); |
572 | } | |
573 | ||
bcc5c9d5 RC |
574 | /** |
575 | * drm_gem_get_pages - helper to allocate backing pages for a GEM object | |
576 | * from shmem | |
577 | * @obj: obj in question | |
0cdbe8ac DH |
578 | * |
579 | * This reads the page-array of the shmem-backing storage of the given gem | |
580 | * object. An array of pages is returned. If a page is not allocated or | |
581 | * swapped-out, this will allocate/swap-in the required pages. Note that the | |
582 | * whole object is covered by the page-array and pinned in memory. | |
583 | * | |
584 | * Use drm_gem_put_pages() to release the array and unpin all pages. | |
585 | * | |
586 | * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). | |
587 | * If you require other GFP-masks, you have to do those allocations yourself. | |
588 | * | |
589 | * Note that you are not allowed to change gfp-zones during runtime. That is, | |
590 | * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as | |
591 | * set during initialization. If you have special zone constraints, set them | |
5b9fbfff | 592 | * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care |
0cdbe8ac | 593 | * to keep pages in the required zone during swap-in. |
e0b3d214 DV |
594 | * |
595 | * This function is only valid on objects initialized with | |
596 | * drm_gem_object_init(), but not for those initialized with | |
597 | * drm_gem_private_object_init() only. | |
bcc5c9d5 | 598 | */ |
0cdbe8ac | 599 | struct page **drm_gem_get_pages(struct drm_gem_object *obj) |
bcc5c9d5 | 600 | { |
bcc5c9d5 | 601 | struct address_space *mapping; |
3291e09a MWO |
602 | struct page **pages; |
603 | struct folio *folio; | |
604 | struct folio_batch fbatch; | |
b7fd68ab | 605 | long i, j, npages; |
e0b3d214 DV |
606 | |
607 | if (WARN_ON(!obj->filp)) | |
608 | return ERR_PTR(-EINVAL); | |
609 | ||
bcc5c9d5 | 610 | /* This is the shared memory object that backs the GEM resource */ |
93c76a3d | 611 | mapping = obj->filp->f_mapping; |
bcc5c9d5 RC |
612 | |
613 | /* We already BUG_ON() for non-page-aligned sizes in | |
614 | * drm_gem_object_init(), so we should never hit this unless | |
615 | * driver author is doing something really wrong: | |
616 | */ | |
617 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); | |
618 | ||
619 | npages = obj->size >> PAGE_SHIFT; | |
620 | ||
2098105e | 621 | pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
bcc5c9d5 RC |
622 | if (pages == NULL) |
623 | return ERR_PTR(-ENOMEM); | |
624 | ||
fb4b4927 KHY |
625 | mapping_set_unevictable(mapping); |
626 | ||
3291e09a MWO |
627 | i = 0; |
628 | while (i < npages) { | |
b7fd68ab | 629 | long nr; |
3291e09a MWO |
630 | folio = shmem_read_folio_gfp(mapping, i, |
631 | mapping_gfp_mask(mapping)); | |
632 | if (IS_ERR(folio)) | |
bcc5c9d5 | 633 | goto fail; |
b7fd68ab MWO |
634 | nr = min(npages - i, folio_nr_pages(folio)); |
635 | for (j = 0; j < nr; j++, i++) | |
3291e09a | 636 | pages[i] = folio_file_page(folio, i); |
bcc5c9d5 | 637 | |
2123000b DH |
638 | /* Make sure shmem keeps __GFP_DMA32 allocated pages in the |
639 | * correct region during swapin. Note that this requires | |
640 | * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) | |
641 | * so shmem can relocate pages during swapin if required. | |
bcc5c9d5 | 642 | */ |
c62d2555 | 643 | BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && |
3291e09a | 644 | (folio_pfn(folio) >= 0x00100000UL)); |
bcc5c9d5 RC |
645 | } |
646 | ||
647 | return pages; | |
648 | ||
649 | fail: | |
fb4b4927 | 650 | mapping_clear_unevictable(mapping); |
3291e09a MWO |
651 | folio_batch_init(&fbatch); |
652 | j = 0; | |
653 | while (j < i) { | |
654 | struct folio *f = page_folio(pages[j]); | |
655 | if (!folio_batch_add(&fbatch, f)) | |
656 | drm_gem_check_release_batch(&fbatch); | |
657 | j += folio_nr_pages(f); | |
fb4b4927 | 658 | } |
3291e09a MWO |
659 | if (fbatch.nr) |
660 | drm_gem_check_release_batch(&fbatch); | |
bcc5c9d5 | 661 | |
2098105e | 662 | kvfree(pages); |
3291e09a | 663 | return ERR_CAST(folio); |
bcc5c9d5 RC |
664 | } |
665 | EXPORT_SYMBOL(drm_gem_get_pages); | |
666 | ||
667 | /** | |
668 | * drm_gem_put_pages - helper to free backing pages for a GEM object | |
669 | * @obj: obj in question | |
670 | * @pages: pages to free | |
671 | * @dirty: if true, pages will be marked as dirty | |
672 | * @accessed: if true, the pages will be marked as accessed | |
673 | */ | |
674 | void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, | |
675 | bool dirty, bool accessed) | |
676 | { | |
677 | int i, npages; | |
fb4b4927 | 678 | struct address_space *mapping; |
3291e09a | 679 | struct folio_batch fbatch; |
fb4b4927 KHY |
680 | |
681 | mapping = file_inode(obj->filp)->i_mapping; | |
682 | mapping_clear_unevictable(mapping); | |
bcc5c9d5 RC |
683 | |
684 | /* We already BUG_ON() for non-page-aligned sizes in | |
685 | * drm_gem_object_init(), so we should never hit this unless | |
686 | * driver author is doing something really wrong: | |
687 | */ | |
688 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); | |
689 | ||
690 | npages = obj->size >> PAGE_SHIFT; | |
691 | ||
3291e09a | 692 | folio_batch_init(&fbatch); |
bcc5c9d5 | 693 | for (i = 0; i < npages; i++) { |
3291e09a MWO |
694 | struct folio *folio; |
695 | ||
930a4024 RH |
696 | if (!pages[i]) |
697 | continue; | |
3291e09a | 698 | folio = page_folio(pages[i]); |
930a4024 | 699 | |
bcc5c9d5 | 700 | if (dirty) |
3291e09a | 701 | folio_mark_dirty(folio); |
bcc5c9d5 RC |
702 | |
703 | if (accessed) | |
3291e09a | 704 | folio_mark_accessed(folio); |
bcc5c9d5 RC |
705 | |
706 | /* Undo the reference we took when populating the table */ | |
3291e09a MWO |
707 | if (!folio_batch_add(&fbatch, folio)) |
708 | drm_gem_check_release_batch(&fbatch); | |
709 | i += folio_nr_pages(folio) - 1; | |
bcc5c9d5 | 710 | } |
3291e09a MWO |
711 | if (folio_batch_count(&fbatch)) |
712 | drm_gem_check_release_batch(&fbatch); | |
bcc5c9d5 | 713 | |
2098105e | 714 | kvfree(pages); |
bcc5c9d5 RC |
715 | } |
716 | EXPORT_SYMBOL(drm_gem_put_pages); | |
717 | ||
c117aa4d RH |
718 | static int objects_lookup(struct drm_file *filp, u32 *handle, int count, |
719 | struct drm_gem_object **objs) | |
720 | { | |
721 | int i, ret = 0; | |
722 | struct drm_gem_object *obj; | |
723 | ||
724 | spin_lock(&filp->table_lock); | |
725 | ||
726 | for (i = 0; i < count; i++) { | |
727 | /* Check if we currently have a reference on the object */ | |
728 | obj = idr_find(&filp->object_idr, handle[i]); | |
729 | if (!obj) { | |
730 | ret = -ENOENT; | |
731 | break; | |
732 | } | |
733 | drm_gem_object_get(obj); | |
734 | objs[i] = obj; | |
735 | } | |
736 | spin_unlock(&filp->table_lock); | |
737 | ||
738 | return ret; | |
739 | } | |
740 | ||
741 | /** | |
742 | * drm_gem_objects_lookup - look up GEM objects from an array of handles | |
743 | * @filp: DRM file private date | |
744 | * @bo_handles: user pointer to array of userspace handle | |
745 | * @count: size of handle array | |
746 | * @objs_out: returned pointer to array of drm_gem_object pointers | |
747 | * | |
748 | * Takes an array of userspace handles and returns a newly allocated array of | |
749 | * GEM objects. | |
750 | * | |
751 | * For a single handle lookup, use drm_gem_object_lookup(). | |
752 | * | |
753 | * Returns: | |
c117aa4d | 754 | * @objs filled in with GEM object pointers. Returned GEM objects need to be |
be6ee102 | 755 | * released with drm_gem_object_put(). -ENOENT is returned on a lookup |
c117aa4d RH |
756 | * failure. 0 is returned on success. |
757 | * | |
758 | */ | |
759 | int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, | |
760 | int count, struct drm_gem_object ***objs_out) | |
761 | { | |
762 | int ret; | |
763 | u32 *handles; | |
764 | struct drm_gem_object **objs; | |
765 | ||
766 | if (!count) | |
767 | return 0; | |
768 | ||
769 | objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), | |
770 | GFP_KERNEL | __GFP_ZERO); | |
771 | if (!objs) | |
772 | return -ENOMEM; | |
773 | ||
ec0bb482 DC |
774 | *objs_out = objs; |
775 | ||
c117aa4d RH |
776 | handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); |
777 | if (!handles) { | |
778 | ret = -ENOMEM; | |
779 | goto out; | |
780 | } | |
781 | ||
782 | if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { | |
783 | ret = -EFAULT; | |
784 | DRM_DEBUG("Failed to copy in GEM handles\n"); | |
785 | goto out; | |
786 | } | |
787 | ||
788 | ret = objects_lookup(filp, handles, count, objs); | |
c117aa4d RH |
789 | out: |
790 | kvfree(handles); | |
791 | return ret; | |
792 | ||
793 | } | |
794 | EXPORT_SYMBOL(drm_gem_objects_lookup); | |
795 | ||
df2e0900 | 796 | /** |
1e55a53a | 797 | * drm_gem_object_lookup - look up a GEM object from its handle |
df2e0900 DV |
798 | * @filp: DRM file private date |
799 | * @handle: userspace handle | |
800 | * | |
22bc22cc | 801 | * If looking up an array of handles, use drm_gem_objects_lookup(). |
df2e0900 | 802 | * |
22bc22cc | 803 | * Returns: |
df2e0900 DV |
804 | * A reference to the object named by the handle if such exists on @filp, NULL |
805 | * otherwise. | |
806 | */ | |
673a394b | 807 | struct drm_gem_object * |
a8ad0bd8 | 808 | drm_gem_object_lookup(struct drm_file *filp, u32 handle) |
673a394b | 809 | { |
c117aa4d | 810 | struct drm_gem_object *obj = NULL; |
673a394b | 811 | |
c117aa4d | 812 | objects_lookup(filp, &handle, 1, &obj); |
673a394b EA |
813 | return obj; |
814 | } | |
815 | EXPORT_SYMBOL(drm_gem_object_lookup); | |
816 | ||
1ba62714 | 817 | /** |
52791eee | 818 | * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects |
1ba62714 RH |
819 | * shared and/or exclusive fences. |
820 | * @filep: DRM file private date | |
821 | * @handle: userspace handle | |
822 | * @wait_all: if true, wait on all fences, else wait on just exclusive fence | |
823 | * @timeout: timeout value in jiffies or zero to return immediately | |
824 | * | |
825 | * Returns: | |
1ba62714 RH |
826 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or |
827 | * greater than 0 on success. | |
828 | */ | |
52791eee | 829 | long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, |
1ba62714 RH |
830 | bool wait_all, unsigned long timeout) |
831 | { | |
832 | long ret; | |
833 | struct drm_gem_object *obj; | |
834 | ||
835 | obj = drm_gem_object_lookup(filep, handle); | |
836 | if (!obj) { | |
837 | DRM_DEBUG("Failed to look up GEM BO %d\n", handle); | |
838 | return -EINVAL; | |
839 | } | |
840 | ||
7bc80a54 CK |
841 | ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all), |
842 | true, timeout); | |
1ba62714 RH |
843 | if (ret == 0) |
844 | ret = -ETIME; | |
845 | else if (ret > 0) | |
846 | ret = 0; | |
847 | ||
be6ee102 | 848 | drm_gem_object_put(obj); |
1ba62714 RH |
849 | |
850 | return ret; | |
851 | } | |
52791eee | 852 | EXPORT_SYMBOL(drm_gem_dma_resv_wait); |
1ba62714 | 853 | |
673a394b | 854 | /** |
89d61fc0 DV |
855 | * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl |
856 | * @dev: drm_device | |
857 | * @data: ioctl data | |
858 | * @file_priv: drm file-private structure | |
859 | * | |
673a394b EA |
860 | * Releases the handle to an mm object. |
861 | */ | |
862 | int | |
863 | drm_gem_close_ioctl(struct drm_device *dev, void *data, | |
864 | struct drm_file *file_priv) | |
865 | { | |
866 | struct drm_gem_close *args = data; | |
867 | int ret; | |
868 | ||
1bcecfac | 869 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
69fdf420 | 870 | return -EOPNOTSUPP; |
673a394b EA |
871 | |
872 | ret = drm_gem_handle_delete(file_priv, args->handle); | |
873 | ||
874 | return ret; | |
875 | } | |
876 | ||
877 | /** | |
89d61fc0 DV |
878 | * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl |
879 | * @dev: drm_device | |
880 | * @data: ioctl data | |
881 | * @file_priv: drm file-private structure | |
882 | * | |
673a394b EA |
883 | * Create a global name for an object, returning the name. |
884 | * | |
885 | * Note that the name does not hold a reference; when the object | |
886 | * is freed, the name goes away. | |
887 | */ | |
888 | int | |
889 | drm_gem_flink_ioctl(struct drm_device *dev, void *data, | |
890 | struct drm_file *file_priv) | |
891 | { | |
892 | struct drm_gem_flink *args = data; | |
893 | struct drm_gem_object *obj; | |
894 | int ret; | |
895 | ||
1bcecfac | 896 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
69fdf420 | 897 | return -EOPNOTSUPP; |
673a394b | 898 | |
a8ad0bd8 | 899 | obj = drm_gem_object_lookup(file_priv, args->handle); |
673a394b | 900 | if (obj == NULL) |
bf79cb91 | 901 | return -ENOENT; |
673a394b | 902 | |
cd4f013f | 903 | mutex_lock(&dev->object_name_lock); |
a8e11d1c DV |
904 | /* prevent races with concurrent gem_close. */ |
905 | if (obj->handle_count == 0) { | |
906 | ret = -ENOENT; | |
907 | goto err; | |
908 | } | |
909 | ||
8d59bae5 | 910 | if (!obj->name) { |
0f646425 | 911 | ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); |
2e928815 | 912 | if (ret < 0) |
8d59bae5 | 913 | goto err; |
2e07fb22 YC |
914 | |
915 | obj->name = ret; | |
8d59bae5 | 916 | } |
3e49c4f4 | 917 | |
2e07fb22 YC |
918 | args->name = (uint64_t) obj->name; |
919 | ret = 0; | |
920 | ||
3e49c4f4 | 921 | err: |
cd4f013f | 922 | mutex_unlock(&dev->object_name_lock); |
be6ee102 | 923 | drm_gem_object_put(obj); |
3e49c4f4 | 924 | return ret; |
673a394b EA |
925 | } |
926 | ||
927 | /** | |
e9d2871f | 928 | * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl |
89d61fc0 DV |
929 | * @dev: drm_device |
930 | * @data: ioctl data | |
931 | * @file_priv: drm file-private structure | |
932 | * | |
673a394b | 933 | * Open an object using the global name, returning a handle and the size. |
a9e10b16 SC |
934 | * |
935 | * This handle (of course) holds a reference to the object, so the object | |
936 | * will not go away until the handle is deleted. | |
673a394b EA |
937 | */ |
938 | int | |
939 | drm_gem_open_ioctl(struct drm_device *dev, void *data, | |
940 | struct drm_file *file_priv) | |
941 | { | |
942 | struct drm_gem_open *args = data; | |
943 | struct drm_gem_object *obj; | |
944 | int ret; | |
a1a2d1d3 | 945 | u32 handle; |
673a394b | 946 | |
1bcecfac | 947 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
69fdf420 | 948 | return -EOPNOTSUPP; |
673a394b | 949 | |
cd4f013f | 950 | mutex_lock(&dev->object_name_lock); |
673a394b | 951 | obj = idr_find(&dev->object_name_idr, (int) args->name); |
20228c44 | 952 | if (obj) { |
e6b62714 | 953 | drm_gem_object_get(obj); |
20228c44 DV |
954 | } else { |
955 | mutex_unlock(&dev->object_name_lock); | |
673a394b | 956 | return -ENOENT; |
20228c44 | 957 | } |
673a394b | 958 | |
20228c44 DV |
959 | /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ |
960 | ret = drm_gem_handle_create_tail(file_priv, obj, &handle); | |
673a394b | 961 | if (ret) |
8490d6a7 | 962 | goto err; |
673a394b EA |
963 | |
964 | args->handle = handle; | |
965 | args->size = obj->size; | |
966 | ||
8490d6a7 | 967 | err: |
c44264f9 | 968 | drm_gem_object_put(obj); |
8490d6a7 | 969 | return ret; |
673a394b EA |
970 | } |
971 | ||
972 | /** | |
0ae865ef | 973 | * drm_gem_open - initializes GEM file-private structures at devnode open time |
89d61fc0 DV |
974 | * @dev: drm_device which is being opened by userspace |
975 | * @file_private: drm file-private structure to set up | |
976 | * | |
673a394b EA |
977 | * Called at device open time, sets up the structure for handling refcounting |
978 | * of mm objects. | |
979 | */ | |
980 | void | |
981 | drm_gem_open(struct drm_device *dev, struct drm_file *file_private) | |
982 | { | |
e86584c5 | 983 | idr_init_base(&file_private->object_idr, 1); |
673a394b EA |
984 | spin_lock_init(&file_private->table_lock); |
985 | } | |
986 | ||
673a394b | 987 | /** |
89d61fc0 DV |
988 | * drm_gem_release - release file-private GEM resources |
989 | * @dev: drm_device which is being closed by userspace | |
990 | * @file_private: drm file-private structure to clean up | |
991 | * | |
673a394b EA |
992 | * Called at close time when the filp is going away. |
993 | * | |
994 | * Releases any remaining references on objects by this filp. | |
995 | */ | |
996 | void | |
997 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) | |
998 | { | |
673a394b | 999 | idr_for_each(&file_private->object_idr, |
304eda32 | 1000 | &drm_gem_object_release_handle, file_private); |
673a394b | 1001 | idr_destroy(&file_private->object_idr); |
673a394b EA |
1002 | } |
1003 | ||
f74418a4 DV |
1004 | /** |
1005 | * drm_gem_object_release - release GEM buffer object resources | |
1006 | * @obj: GEM buffer object | |
1007 | * | |
0ae865ef | 1008 | * This releases any structures and resources used by @obj and is the inverse of |
f74418a4 DV |
1009 | * drm_gem_object_init(). |
1010 | */ | |
fd632aa3 DV |
1011 | void |
1012 | drm_gem_object_release(struct drm_gem_object *obj) | |
c3ae90c0 | 1013 | { |
62cb7011 | 1014 | if (obj->filp) |
16d2831d | 1015 | fput(obj->filp); |
77472347 | 1016 | |
7df34a61 C |
1017 | drm_gem_private_object_fini(obj); |
1018 | ||
77472347 | 1019 | drm_gem_free_mmap_offset(obj); |
e7c2af13 | 1020 | drm_gem_lru_remove(obj); |
c3ae90c0 | 1021 | } |
fd632aa3 | 1022 | EXPORT_SYMBOL(drm_gem_object_release); |
c3ae90c0 | 1023 | |
673a394b | 1024 | /** |
89d61fc0 DV |
1025 | * drm_gem_object_free - free a GEM object |
1026 | * @kref: kref of the object to free | |
1027 | * | |
673a394b EA |
1028 | * Called after the last reference to the object has been lost. |
1029 | * | |
1030 | * Frees the object | |
1031 | */ | |
1032 | void | |
1033 | drm_gem_object_free(struct kref *kref) | |
1034 | { | |
6ff774bd DV |
1035 | struct drm_gem_object *obj = |
1036 | container_of(kref, struct drm_gem_object, refcount); | |
673a394b | 1037 | |
d693def4 TZ |
1038 | if (WARN_ON(!obj->funcs->free)) |
1039 | return; | |
1040 | ||
1041 | obj->funcs->free(obj); | |
673a394b EA |
1042 | } |
1043 | EXPORT_SYMBOL(drm_gem_object_free); | |
1044 | ||
df2e0900 DV |
1045 | /** |
1046 | * drm_gem_vm_open - vma->ops->open implementation for GEM | |
1047 | * @vma: VM area structure | |
1048 | * | |
1049 | * This function implements the #vm_operations_struct open() callback for GEM | |
1050 | * drivers. This must be used together with drm_gem_vm_close(). | |
1051 | */ | |
ab00b3e5 JB |
1052 | void drm_gem_vm_open(struct vm_area_struct *vma) |
1053 | { | |
1054 | struct drm_gem_object *obj = vma->vm_private_data; | |
1055 | ||
e6b62714 | 1056 | drm_gem_object_get(obj); |
ab00b3e5 JB |
1057 | } |
1058 | EXPORT_SYMBOL(drm_gem_vm_open); | |
1059 | ||
df2e0900 DV |
1060 | /** |
1061 | * drm_gem_vm_close - vma->ops->close implementation for GEM | |
1062 | * @vma: VM area structure | |
1063 | * | |
1064 | * This function implements the #vm_operations_struct close() callback for GEM | |
1065 | * drivers. This must be used together with drm_gem_vm_open(). | |
1066 | */ | |
ab00b3e5 JB |
1067 | void drm_gem_vm_close(struct vm_area_struct *vma) |
1068 | { | |
1069 | struct drm_gem_object *obj = vma->vm_private_data; | |
ab00b3e5 | 1070 | |
be6ee102 | 1071 | drm_gem_object_put(obj); |
ab00b3e5 JB |
1072 | } |
1073 | EXPORT_SYMBOL(drm_gem_vm_close); | |
1074 | ||
1c5aafa6 LP |
1075 | /** |
1076 | * drm_gem_mmap_obj - memory map a GEM object | |
1077 | * @obj: the GEM object to map | |
1078 | * @obj_size: the object size to be mapped, in bytes | |
1079 | * @vma: VMA for the area to be mapped | |
1080 | * | |
d693def4 TZ |
1081 | * Set up the VMA to prepare mapping of the GEM object using the GEM object's |
1082 | * vm_ops. Depending on their requirements, GEM objects can either | |
1083 | * provide a fault handler in their vm_ops (in which case any accesses to | |
1c5aafa6 LP |
1084 | * the object will be trapped, to perform migration, GTT binding, surface |
1085 | * register allocation, or performance monitoring), or mmap the buffer memory | |
1086 | * synchronously after calling drm_gem_mmap_obj. | |
1087 | * | |
1088 | * This function is mainly intended to implement the DMABUF mmap operation, when | |
1089 | * the GEM object is not looked up based on its fake offset. To implement the | |
1090 | * DRM mmap operation, drivers should use the drm_gem_mmap() function. | |
1091 | * | |
ca481c9b DH |
1092 | * drm_gem_mmap_obj() assumes the user is granted access to the buffer while |
1093 | * drm_gem_mmap() prevents unprivileged users from mapping random objects. So | |
1094 | * callers must verify access restrictions before calling this helper. | |
1095 | * | |
1c5aafa6 | 1096 | * Return 0 or success or -EINVAL if the object size is smaller than the VMA |
d693def4 | 1097 | * size, or if no vm_ops are provided. |
1c5aafa6 LP |
1098 | */ |
1099 | int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, | |
1100 | struct vm_area_struct *vma) | |
1101 | { | |
c40069cb | 1102 | int ret; |
1c5aafa6 LP |
1103 | |
1104 | /* Check for valid size. */ | |
1105 | if (obj_size < vma->vm_end - vma->vm_start) | |
1106 | return -EINVAL; | |
1107 | ||
9786b65b GH |
1108 | /* Take a ref for this mapping of the object, so that the fault |
1109 | * handler can dereference the mmap offset's pointer to the object. | |
1110 | * This reference is cleaned up by the corresponding vm_close | |
1111 | * (which should happen whether the vma was created by this call, or | |
1112 | * by a vm_open due to mremap or partial unmap or whatever). | |
1113 | */ | |
1114 | drm_gem_object_get(obj); | |
1115 | ||
f49a51bf | 1116 | vma->vm_private_data = obj; |
47d35c1c | 1117 | vma->vm_ops = obj->funcs->vm_ops; |
f49a51bf | 1118 | |
d693def4 | 1119 | if (obj->funcs->mmap) { |
c40069cb | 1120 | ret = obj->funcs->mmap(obj, vma); |
47d35c1c TZ |
1121 | if (ret) |
1122 | goto err_drm_gem_object_put; | |
c40069cb GH |
1123 | WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); |
1124 | } else { | |
47d35c1c TZ |
1125 | if (!vma->vm_ops) { |
1126 | ret = -EINVAL; | |
1127 | goto err_drm_gem_object_put; | |
9786b65b | 1128 | } |
c40069cb | 1129 | |
1c71222e | 1130 | vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); |
c40069cb GH |
1131 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
1132 | vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); | |
1133 | } | |
1c5aafa6 | 1134 | |
1c5aafa6 | 1135 | return 0; |
47d35c1c TZ |
1136 | |
1137 | err_drm_gem_object_put: | |
1138 | drm_gem_object_put(obj); | |
1139 | return ret; | |
1c5aafa6 LP |
1140 | } |
1141 | EXPORT_SYMBOL(drm_gem_mmap_obj); | |
ab00b3e5 | 1142 | |
a2c0a97b JB |
1143 | /** |
1144 | * drm_gem_mmap - memory map routine for GEM objects | |
1145 | * @filp: DRM file pointer | |
1146 | * @vma: VMA for the area to be mapped | |
1147 | * | |
1148 | * If a driver supports GEM object mapping, mmap calls on the DRM file | |
1149 | * descriptor will end up here. | |
1150 | * | |
1c5aafa6 | 1151 | * Look up the GEM object based on the offset passed in (vma->vm_pgoff will |
a2c0a97b | 1152 | * contain the fake offset we created when the GTT map ioctl was called on |
1c5aafa6 | 1153 | * the object) and map it with a call to drm_gem_mmap_obj(). |
ca481c9b DH |
1154 | * |
1155 | * If the caller is not granted access to the buffer object, the mmap will fail | |
1156 | * with EACCES. Please see the vma manager for more information. | |
a2c0a97b JB |
1157 | */ |
1158 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |
1159 | { | |
1160 | struct drm_file *priv = filp->private_data; | |
1161 | struct drm_device *dev = priv->minor->dev; | |
2225cfe4 | 1162 | struct drm_gem_object *obj = NULL; |
0de23977 | 1163 | struct drm_vma_offset_node *node; |
a8469aa8 | 1164 | int ret; |
a2c0a97b | 1165 | |
c07dcd61 | 1166 | if (drm_dev_is_unplugged(dev)) |
2c07a21d DA |
1167 | return -ENODEV; |
1168 | ||
2225cfe4 DV |
1169 | drm_vma_offset_lock_lookup(dev->vma_offset_manager); |
1170 | node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, | |
1171 | vma->vm_pgoff, | |
1172 | vma_pages(vma)); | |
1173 | if (likely(node)) { | |
1174 | obj = container_of(node, struct drm_gem_object, vma_node); | |
1175 | /* | |
1176 | * When the object is being freed, after it hits 0-refcnt it | |
1177 | * proceeds to tear down the object. In the process it will | |
1178 | * attempt to remove the VMA offset and so acquire this | |
1179 | * mgr->vm_lock. Therefore if we find an object with a 0-refcnt | |
1180 | * that matches our range, we know it is in the process of being | |
1181 | * destroyed and will be freed as soon as we release the lock - | |
1182 | * so we have to check for the 0-refcnted object and treat it as | |
1183 | * invalid. | |
1184 | */ | |
1185 | if (!kref_get_unless_zero(&obj->refcount)) | |
1186 | obj = NULL; | |
1187 | } | |
1188 | drm_vma_offset_unlock_lookup(dev->vma_offset_manager); | |
a2c0a97b | 1189 | |
2225cfe4 | 1190 | if (!obj) |
197633b9 | 1191 | return -EINVAL; |
2225cfe4 | 1192 | |
d9a1f0b4 | 1193 | if (!drm_vma_node_is_allowed(node, priv)) { |
be6ee102 | 1194 | drm_gem_object_put(obj); |
ca481c9b | 1195 | return -EACCES; |
a2c0a97b JB |
1196 | } |
1197 | ||
2225cfe4 DV |
1198 | ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, |
1199 | vma); | |
a2c0a97b | 1200 | |
be6ee102 | 1201 | drm_gem_object_put(obj); |
a2c0a97b JB |
1202 | |
1203 | return ret; | |
1204 | } | |
1205 | EXPORT_SYMBOL(drm_gem_mmap); | |
45d58b40 NT |
1206 | |
1207 | void drm_gem_print_info(struct drm_printer *p, unsigned int indent, | |
1208 | const struct drm_gem_object *obj) | |
1209 | { | |
1210 | drm_printf_indent(p, indent, "name=%d\n", obj->name); | |
1211 | drm_printf_indent(p, indent, "refcount=%u\n", | |
1212 | kref_read(&obj->refcount)); | |
1213 | drm_printf_indent(p, indent, "start=%08lx\n", | |
1214 | drm_vma_node_start(&obj->vma_node)); | |
1215 | drm_printf_indent(p, indent, "size=%zu\n", obj->size); | |
1216 | drm_printf_indent(p, indent, "imported=%s\n", | |
b57aa47d | 1217 | str_yes_no(drm_gem_is_imported(obj))); |
45d58b40 | 1218 | |
d693def4 | 1219 | if (obj->funcs->print_info) |
b39b5394 | 1220 | obj->funcs->print_info(p, indent, obj); |
45d58b40 | 1221 | } |
b39b5394 | 1222 | |
a7802784 | 1223 | int drm_gem_pin_locked(struct drm_gem_object *obj) |
b39b5394 | 1224 | { |
d693def4 | 1225 | if (obj->funcs->pin) |
b39b5394 | 1226 | return obj->funcs->pin(obj); |
319eeec5 SJ |
1227 | |
1228 | return 0; | |
b39b5394 | 1229 | } |
b39b5394 | 1230 | |
a7802784 | 1231 | void drm_gem_unpin_locked(struct drm_gem_object *obj) |
b39b5394 | 1232 | { |
d693def4 | 1233 | if (obj->funcs->unpin) |
b39b5394 | 1234 | obj->funcs->unpin(obj); |
b39b5394 | 1235 | } |
b39b5394 | 1236 | |
a7802784 TZ |
1237 | int drm_gem_pin(struct drm_gem_object *obj) |
1238 | { | |
1239 | int ret; | |
1240 | ||
1241 | dma_resv_lock(obj->resv, NULL); | |
1242 | ret = drm_gem_pin_locked(obj); | |
1243 | dma_resv_unlock(obj->resv); | |
1244 | ||
1245 | return ret; | |
1246 | } | |
1247 | ||
1248 | void drm_gem_unpin(struct drm_gem_object *obj) | |
1249 | { | |
1250 | dma_resv_lock(obj->resv, NULL); | |
1251 | drm_gem_unpin_locked(obj); | |
1252 | dma_resv_unlock(obj->resv); | |
1253 | } | |
1254 | ||
8f5c4871 | 1255 | int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map) |
b39b5394 | 1256 | { |
49a3f51d | 1257 | int ret; |
b39b5394 | 1258 | |
79e2cf2e DO |
1259 | dma_resv_assert_held(obj->resv); |
1260 | ||
49a3f51d | 1261 | if (!obj->funcs->vmap) |
a745fb1c | 1262 | return -EOPNOTSUPP; |
b39b5394 | 1263 | |
a745fb1c | 1264 | ret = obj->funcs->vmap(obj, map); |
49a3f51d | 1265 | if (ret) |
a745fb1c | 1266 | return ret; |
7938f421 | 1267 | else if (iosys_map_is_null(map)) |
a745fb1c | 1268 | return -ENOMEM; |
b39b5394 | 1269 | |
a745fb1c | 1270 | return 0; |
b39b5394 | 1271 | } |
8f5c4871 | 1272 | EXPORT_SYMBOL(drm_gem_vmap_locked); |
b39b5394 | 1273 | |
8f5c4871 | 1274 | void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map) |
b39b5394 | 1275 | { |
79e2cf2e DO |
1276 | dma_resv_assert_held(obj->resv); |
1277 | ||
7938f421 | 1278 | if (iosys_map_is_null(map)) |
b39b5394 NT |
1279 | return; |
1280 | ||
d693def4 | 1281 | if (obj->funcs->vunmap) |
a745fb1c TZ |
1282 | obj->funcs->vunmap(obj, map); |
1283 | ||
1284 | /* Always set the mapping to NULL. Callers may rely on this. */ | |
7938f421 | 1285 | iosys_map_clear(map); |
b39b5394 | 1286 | } |
8f5c4871 | 1287 | EXPORT_SYMBOL(drm_gem_vunmap_locked); |
7edc3e3b | 1288 | |
b4b0193e TZ |
1289 | void drm_gem_lock(struct drm_gem_object *obj) |
1290 | { | |
1291 | dma_resv_lock(obj->resv, NULL); | |
1292 | } | |
1293 | EXPORT_SYMBOL(drm_gem_lock); | |
1294 | ||
1295 | void drm_gem_unlock(struct drm_gem_object *obj) | |
1296 | { | |
1297 | dma_resv_unlock(obj->resv); | |
1298 | } | |
1299 | EXPORT_SYMBOL(drm_gem_unlock); | |
1300 | ||
8f5c4871 | 1301 | int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) |
79e2cf2e DO |
1302 | { |
1303 | int ret; | |
1304 | ||
1305 | dma_resv_lock(obj->resv, NULL); | |
8f5c4871 | 1306 | ret = drm_gem_vmap_locked(obj, map); |
79e2cf2e DO |
1307 | dma_resv_unlock(obj->resv); |
1308 | ||
1309 | return ret; | |
1310 | } | |
8f5c4871 | 1311 | EXPORT_SYMBOL(drm_gem_vmap); |
79e2cf2e | 1312 | |
8f5c4871 | 1313 | void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) |
79e2cf2e DO |
1314 | { |
1315 | dma_resv_lock(obj->resv, NULL); | |
8f5c4871 | 1316 | drm_gem_vunmap_locked(obj, map); |
79e2cf2e DO |
1317 | dma_resv_unlock(obj->resv); |
1318 | } | |
8f5c4871 | 1319 | EXPORT_SYMBOL(drm_gem_vunmap); |
79e2cf2e | 1320 | |
7edc3e3b EA |
1321 | /** |
1322 | * drm_gem_lock_reservations - Sets up the ww context and acquires | |
1323 | * the lock on an array of GEM objects. | |
1324 | * | |
1325 | * Once you've locked your reservations, you'll want to set up space | |
1326 | * for your shared fences (if applicable), submit your job, then | |
1327 | * drm_gem_unlock_reservations(). | |
1328 | * | |
1329 | * @objs: drm_gem_objects to lock | |
1330 | * @count: Number of objects in @objs | |
1331 | * @acquire_ctx: struct ww_acquire_ctx that will be initialized as | |
1332 | * part of tracking this set of locked reservations. | |
1333 | */ | |
1334 | int | |
1335 | drm_gem_lock_reservations(struct drm_gem_object **objs, int count, | |
1336 | struct ww_acquire_ctx *acquire_ctx) | |
1337 | { | |
1338 | int contended = -1; | |
1339 | int i, ret; | |
1340 | ||
1341 | ww_acquire_init(acquire_ctx, &reservation_ww_class); | |
1342 | ||
1343 | retry: | |
1344 | if (contended != -1) { | |
1345 | struct drm_gem_object *obj = objs[contended]; | |
1346 | ||
52791eee | 1347 | ret = dma_resv_lock_slow_interruptible(obj->resv, |
0dbd555a | 1348 | acquire_ctx); |
7edc3e3b | 1349 | if (ret) { |
2939deac | 1350 | ww_acquire_fini(acquire_ctx); |
7edc3e3b EA |
1351 | return ret; |
1352 | } | |
1353 | } | |
1354 | ||
1355 | for (i = 0; i < count; i++) { | |
1356 | if (i == contended) | |
1357 | continue; | |
1358 | ||
52791eee | 1359 | ret = dma_resv_lock_interruptible(objs[i]->resv, |
0dbd555a | 1360 | acquire_ctx); |
7edc3e3b EA |
1361 | if (ret) { |
1362 | int j; | |
1363 | ||
1364 | for (j = 0; j < i; j++) | |
52791eee | 1365 | dma_resv_unlock(objs[j]->resv); |
7edc3e3b EA |
1366 | |
1367 | if (contended != -1 && contended >= i) | |
52791eee | 1368 | dma_resv_unlock(objs[contended]->resv); |
7edc3e3b EA |
1369 | |
1370 | if (ret == -EDEADLK) { | |
1371 | contended = i; | |
1372 | goto retry; | |
1373 | } | |
1374 | ||
2939deac | 1375 | ww_acquire_fini(acquire_ctx); |
7edc3e3b EA |
1376 | return ret; |
1377 | } | |
1378 | } | |
1379 | ||
1380 | ww_acquire_done(acquire_ctx); | |
1381 | ||
1382 | return 0; | |
1383 | } | |
1384 | EXPORT_SYMBOL(drm_gem_lock_reservations); | |
1385 | ||
1386 | void | |
1387 | drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, | |
1388 | struct ww_acquire_ctx *acquire_ctx) | |
1389 | { | |
1390 | int i; | |
1391 | ||
1392 | for (i = 0; i < count; i++) | |
52791eee | 1393 | dma_resv_unlock(objs[i]->resv); |
7edc3e3b EA |
1394 | |
1395 | ww_acquire_fini(acquire_ctx); | |
1396 | } | |
1397 | EXPORT_SYMBOL(drm_gem_unlock_reservations); | |
e7c2af13 RC |
1398 | |
1399 | /** | |
1400 | * drm_gem_lru_init - initialize a LRU | |
1401 | * | |
1402 | * @lru: The LRU to initialize | |
1403 | * @lock: The lock protecting the LRU | |
1404 | */ | |
1405 | void | |
1406 | drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock) | |
1407 | { | |
1408 | lru->lock = lock; | |
1409 | lru->count = 0; | |
1410 | INIT_LIST_HEAD(&lru->list); | |
1411 | } | |
1412 | EXPORT_SYMBOL(drm_gem_lru_init); | |
1413 | ||
1414 | static void | |
1415 | drm_gem_lru_remove_locked(struct drm_gem_object *obj) | |
1416 | { | |
1417 | obj->lru->count -= obj->size >> PAGE_SHIFT; | |
1418 | WARN_ON(obj->lru->count < 0); | |
1419 | list_del(&obj->lru_node); | |
1420 | obj->lru = NULL; | |
1421 | } | |
1422 | ||
1423 | /** | |
1424 | * drm_gem_lru_remove - remove object from whatever LRU it is in | |
1425 | * | |
1426 | * If the object is currently in any LRU, remove it. | |
1427 | * | |
1428 | * @obj: The GEM object to remove from current LRU | |
1429 | */ | |
1430 | void | |
1431 | drm_gem_lru_remove(struct drm_gem_object *obj) | |
1432 | { | |
1433 | struct drm_gem_lru *lru = obj->lru; | |
1434 | ||
1435 | if (!lru) | |
1436 | return; | |
1437 | ||
1438 | mutex_lock(lru->lock); | |
1439 | drm_gem_lru_remove_locked(obj); | |
1440 | mutex_unlock(lru->lock); | |
1441 | } | |
1442 | EXPORT_SYMBOL(drm_gem_lru_remove); | |
1443 | ||
b43f9afb RC |
1444 | /** |
1445 | * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU | |
1446 | * | |
1447 | * Like &drm_gem_lru_move_tail but lru lock must be held | |
1448 | * | |
1449 | * @lru: The LRU to move the object into. | |
1450 | * @obj: The GEM object to move into this LRU | |
1451 | */ | |
1452 | void | |
e7c2af13 RC |
1453 | drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj) |
1454 | { | |
1455 | lockdep_assert_held_once(lru->lock); | |
1456 | ||
1457 | if (obj->lru) | |
1458 | drm_gem_lru_remove_locked(obj); | |
1459 | ||
1460 | lru->count += obj->size >> PAGE_SHIFT; | |
1461 | list_add_tail(&obj->lru_node, &lru->list); | |
1462 | obj->lru = lru; | |
1463 | } | |
b43f9afb | 1464 | EXPORT_SYMBOL(drm_gem_lru_move_tail_locked); |
e7c2af13 RC |
1465 | |
1466 | /** | |
1467 | * drm_gem_lru_move_tail - move the object to the tail of the LRU | |
1468 | * | |
1469 | * If the object is already in this LRU it will be moved to the | |
1470 | * tail. Otherwise it will be removed from whichever other LRU | |
1471 | * it is in (if any) and moved into this LRU. | |
1472 | * | |
1473 | * @lru: The LRU to move the object into. | |
1474 | * @obj: The GEM object to move into this LRU | |
1475 | */ | |
1476 | void | |
1477 | drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj) | |
1478 | { | |
1479 | mutex_lock(lru->lock); | |
1480 | drm_gem_lru_move_tail_locked(lru, obj); | |
1481 | mutex_unlock(lru->lock); | |
1482 | } | |
1483 | EXPORT_SYMBOL(drm_gem_lru_move_tail); | |
1484 | ||
1485 | /** | |
1486 | * drm_gem_lru_scan - helper to implement shrinker.scan_objects | |
1487 | * | |
1488 | * If the shrink callback succeeds, it is expected that the driver | |
1489 | * move the object out of this LRU. | |
1490 | * | |
1491 | * If the LRU possibly contain active buffers, it is the responsibility | |
1492 | * of the shrink callback to check for this (ie. dma_resv_test_signaled()) | |
1493 | * or if necessary block until the buffer becomes idle. | |
1494 | * | |
1495 | * @lru: The LRU to scan | |
1496 | * @nr_to_scan: The number of pages to try to reclaim | |
9630b585 | 1497 | * @remaining: The number of pages left to reclaim, should be initialized by caller |
e7c2af13 RC |
1498 | * @shrink: Callback to try to shrink/reclaim the object. |
1499 | */ | |
1500 | unsigned long | |
9630b585 DO |
1501 | drm_gem_lru_scan(struct drm_gem_lru *lru, |
1502 | unsigned int nr_to_scan, | |
1503 | unsigned long *remaining, | |
e7c2af13 RC |
1504 | bool (*shrink)(struct drm_gem_object *obj)) |
1505 | { | |
1506 | struct drm_gem_lru still_in_lru; | |
1507 | struct drm_gem_object *obj; | |
1508 | unsigned freed = 0; | |
1509 | ||
1510 | drm_gem_lru_init(&still_in_lru, lru->lock); | |
1511 | ||
1512 | mutex_lock(lru->lock); | |
1513 | ||
1514 | while (freed < nr_to_scan) { | |
1515 | obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node); | |
1516 | ||
1517 | if (!obj) | |
1518 | break; | |
1519 | ||
1520 | drm_gem_lru_move_tail_locked(&still_in_lru, obj); | |
1521 | ||
1522 | /* | |
1523 | * If it's in the process of being freed, gem_object->free() | |
1524 | * may be blocked on lock waiting to remove it. So just | |
1525 | * skip it. | |
1526 | */ | |
1527 | if (!kref_get_unless_zero(&obj->refcount)) | |
1528 | continue; | |
1529 | ||
1530 | /* | |
1531 | * Now that we own a reference, we can drop the lock for the | |
1532 | * rest of the loop body, to reduce contention with other | |
1533 | * code paths that need the LRU lock | |
1534 | */ | |
1535 | mutex_unlock(lru->lock); | |
1536 | ||
1537 | /* | |
1538 | * Note that this still needs to be trylock, since we can | |
1539 | * hit shrinker in response to trying to get backing pages | |
1540 | * for this obj (ie. while it's lock is already held) | |
1541 | */ | |
9630b585 DO |
1542 | if (!dma_resv_trylock(obj->resv)) { |
1543 | *remaining += obj->size >> PAGE_SHIFT; | |
e7c2af13 | 1544 | goto tail; |
9630b585 | 1545 | } |
e7c2af13 RC |
1546 | |
1547 | if (shrink(obj)) { | |
1548 | freed += obj->size >> PAGE_SHIFT; | |
1549 | ||
1550 | /* | |
1551 | * If we succeeded in releasing the object's backing | |
1552 | * pages, we expect the driver to have moved the object | |
1553 | * out of this LRU | |
1554 | */ | |
1555 | WARN_ON(obj->lru == &still_in_lru); | |
1556 | WARN_ON(obj->lru == lru); | |
1557 | } | |
1558 | ||
1559 | dma_resv_unlock(obj->resv); | |
1560 | ||
1561 | tail: | |
1562 | drm_gem_object_put(obj); | |
1563 | mutex_lock(lru->lock); | |
1564 | } | |
1565 | ||
1566 | /* | |
1567 | * Move objects we've skipped over out of the temporary still_in_lru | |
1568 | * back into this LRU | |
1569 | */ | |
1570 | list_for_each_entry (obj, &still_in_lru.list, lru_node) | |
1571 | obj->lru = lru; | |
1572 | list_splice_tail(&still_in_lru.list, &lru->list); | |
1573 | lru->count += still_in_lru.count; | |
1574 | ||
1575 | mutex_unlock(lru->lock); | |
1576 | ||
1577 | return freed; | |
1578 | } | |
1579 | EXPORT_SYMBOL(drm_gem_lru_scan); | |
7eabaa89 DO |
1580 | |
1581 | /** | |
9a0fd089 | 1582 | * drm_gem_evict_locked - helper to evict backing pages for a GEM object |
7eabaa89 DO |
1583 | * @obj: obj in question |
1584 | */ | |
9a0fd089 | 1585 | int drm_gem_evict_locked(struct drm_gem_object *obj) |
7eabaa89 DO |
1586 | { |
1587 | dma_resv_assert_held(obj->resv); | |
1588 | ||
1589 | if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)) | |
1590 | return -EBUSY; | |
1591 | ||
1592 | if (obj->funcs->evict) | |
1593 | return obj->funcs->evict(obj); | |
1594 | ||
1595 | return 0; | |
1596 | } | |
9a0fd089 | 1597 | EXPORT_SYMBOL(drm_gem_evict_locked); |