Commit | Line | Data |
---|---|---|
673a394b EA |
1 | /* |
2 | * Copyright © 2008 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <eric@anholt.net> | |
25 | * | |
26 | */ | |
27 | ||
7994369f | 28 | #include <linux/dma-buf.h> |
673a394b | 29 | #include <linux/file.h> |
7994369f | 30 | #include <linux/fs.h> |
7938f421 | 31 | #include <linux/iosys-map.h> |
7994369f LDM |
32 | #include <linux/mem_encrypt.h> |
33 | #include <linux/mm.h> | |
673a394b | 34 | #include <linux/mman.h> |
7994369f | 35 | #include <linux/module.h> |
673a394b | 36 | #include <linux/pagemap.h> |
fb4b4927 | 37 | #include <linux/pagevec.h> |
7994369f LDM |
38 | #include <linux/shmem_fs.h> |
39 | #include <linux/slab.h> | |
b8c75bd9 | 40 | #include <linux/string_helpers.h> |
7994369f LDM |
41 | #include <linux/types.h> |
42 | #include <linux/uaccess.h> | |
0500c04e | 43 | |
1c535876 | 44 | #include <drm/drm.h> |
0500c04e SR |
45 | #include <drm/drm_device.h> |
46 | #include <drm/drm_drv.h> | |
47 | #include <drm/drm_file.h> | |
d9fc9413 | 48 | #include <drm/drm_gem.h> |
641b9103 | 49 | #include <drm/drm_managed.h> |
45d58b40 | 50 | #include <drm/drm_print.h> |
0500c04e SR |
51 | #include <drm/drm_vma_manager.h> |
52 | ||
67d0ec4e | 53 | #include "drm_internal.h" |
673a394b EA |
54 | |
55 | /** @file drm_gem.c | |
56 | * | |
57 | * This file provides some of the base ioctls and library routines for | |
58 | * the graphics memory manager implemented by each device driver. | |
59 | * | |
60 | * Because various devices have different requirements in terms of | |
61 | * synchronization and migration strategies, implementing that is left up to | |
62 | * the driver, and all that the general API provides should be generic -- | |
63 | * allocating objects, reading/writing data with the cpu, freeing objects. | |
64 | * Even there, platform-dependent optimizations for reading/writing data with | |
65 | * the CPU mean we'll likely hook those out to driver-specific calls. However, | |
66 | * the DRI2 implementation wants to have at least allocate/mmap be generic. | |
67 | * | |
68 | * The goal was to have swap-backed object allocation managed through | |
69 | * struct file. However, file descriptors as handles to a struct file have | |
70 | * two major failings: | |
71 | * - Process limits prevent more than 1024 or so being used at a time by | |
72 | * default. | |
73 | * - Inability to allocate high fds will aggravate the X Server's select() | |
74 | * handling, and likely that of many GL client applications as well. | |
75 | * | |
76 | * This led to a plan of using our own integer IDs (called handles, following | |
77 | * DRM terminology) to mimic fds, and implement the fd syscalls we need as | |
78 | * ioctls. The objects themselves will still include the struct file so | |
79 | * that we can transition to fds if the required kernel infrastructure shows | |
80 | * up at a later date, and as our interface with shmfs for memory allocation. | |
81 | */ | |
82 | ||
641b9103 DV |
83 | static void |
84 | drm_gem_init_release(struct drm_device *dev, void *ptr) | |
85 | { | |
86 | drm_vma_offset_manager_destroy(dev->vma_offset_manager); | |
87 | } | |
88 | ||
673a394b | 89 | /** |
89d61fc0 DV |
90 | * drm_gem_init - Initialize the GEM device fields |
91 | * @dev: drm_devic structure to initialize | |
673a394b | 92 | */ |
673a394b EA |
93 | int |
94 | drm_gem_init(struct drm_device *dev) | |
95 | { | |
b04a5906 | 96 | struct drm_vma_offset_manager *vma_offset_manager; |
a2c0a97b | 97 | |
cd4f013f | 98 | mutex_init(&dev->object_name_lock); |
e86584c5 | 99 | idr_init_base(&dev->object_name_idr, 1); |
a2c0a97b | 100 | |
641b9103 DV |
101 | vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), |
102 | GFP_KERNEL); | |
b04a5906 | 103 | if (!vma_offset_manager) { |
a2c0a97b JB |
104 | DRM_ERROR("out of memory\n"); |
105 | return -ENOMEM; | |
106 | } | |
107 | ||
b04a5906 DV |
108 | dev->vma_offset_manager = vma_offset_manager; |
109 | drm_vma_offset_manager_init(vma_offset_manager, | |
0de23977 DH |
110 | DRM_FILE_PAGE_OFFSET_START, |
111 | DRM_FILE_PAGE_OFFSET_SIZE); | |
a2c0a97b | 112 | |
641b9103 | 113 | return drmm_add_action(dev, drm_gem_init_release, NULL); |
a2c0a97b JB |
114 | } |
115 | ||
1d397043 | 116 | /** |
0992b254 MC |
117 | * drm_gem_object_init_with_mnt - initialize an allocated shmem-backed GEM |
118 | * object in a given shmfs mountpoint | |
119 | * | |
89d61fc0 DV |
120 | * @dev: drm_device the object should be initialized for |
121 | * @obj: drm_gem_object to initialize | |
122 | * @size: object size | |
0992b254 MC |
123 | * @gemfs: tmpfs mount where the GEM object will be created. If NULL, use |
124 | * the usual tmpfs mountpoint (`shm_mnt`). | |
89d61fc0 | 125 | * |
62cb7011 | 126 | * Initialize an already allocated GEM object of the specified size with |
1d397043 DV |
127 | * shmfs backing store. |
128 | */ | |
0992b254 MC |
129 | int drm_gem_object_init_with_mnt(struct drm_device *dev, |
130 | struct drm_gem_object *obj, size_t size, | |
131 | struct vfsmount *gemfs) | |
1d397043 | 132 | { |
89c8233f | 133 | struct file *filp; |
1d397043 | 134 | |
6ab11a26 DV |
135 | drm_gem_private_object_init(dev, obj, size); |
136 | ||
0992b254 MC |
137 | if (gemfs) |
138 | filp = shmem_file_setup_with_mnt(gemfs, "drm mm object", size, | |
139 | VM_NORESERVE); | |
140 | else | |
141 | filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); | |
142 | ||
89c8233f DH |
143 | if (IS_ERR(filp)) |
144 | return PTR_ERR(filp); | |
1d397043 | 145 | |
89c8233f | 146 | obj->filp = filp; |
1d397043 | 147 | |
1d397043 DV |
148 | return 0; |
149 | } | |
0992b254 MC |
150 | EXPORT_SYMBOL(drm_gem_object_init_with_mnt); |
151 | ||
152 | /** | |
153 | * drm_gem_object_init - initialize an allocated shmem-backed GEM object | |
154 | * @dev: drm_device the object should be initialized for | |
155 | * @obj: drm_gem_object to initialize | |
156 | * @size: object size | |
157 | * | |
158 | * Initialize an already allocated GEM object of the specified size with | |
159 | * shmfs backing store. | |
160 | */ | |
161 | int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, | |
162 | size_t size) | |
163 | { | |
164 | return drm_gem_object_init_with_mnt(dev, obj, size, NULL); | |
165 | } | |
1d397043 DV |
166 | EXPORT_SYMBOL(drm_gem_object_init); |
167 | ||
62cb7011 | 168 | /** |
2a5706a3 | 169 | * drm_gem_private_object_init - initialize an allocated private GEM object |
89d61fc0 DV |
170 | * @dev: drm_device the object should be initialized for |
171 | * @obj: drm_gem_object to initialize | |
172 | * @size: object size | |
173 | * | |
62cb7011 AC |
174 | * Initialize an already allocated GEM object of the specified size with |
175 | * no GEM provided backing store. Instead the caller is responsible for | |
176 | * backing the object and handling it. | |
177 | */ | |
89c8233f DH |
178 | void drm_gem_private_object_init(struct drm_device *dev, |
179 | struct drm_gem_object *obj, size_t size) | |
62cb7011 AC |
180 | { |
181 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); | |
182 | ||
183 | obj->dev = dev; | |
184 | obj->filp = NULL; | |
185 | ||
186 | kref_init(&obj->refcount); | |
a8e11d1c | 187 | obj->handle_count = 0; |
62cb7011 | 188 | obj->size = size; |
52791eee | 189 | dma_resv_init(&obj->_resv); |
1ba62714 RH |
190 | if (!obj->resv) |
191 | obj->resv = &obj->_resv; | |
192 | ||
e6303f32 DK |
193 | if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA)) |
194 | drm_gem_gpuva_init(obj); | |
195 | ||
88d7ebe5 | 196 | drm_vma_node_reset(&obj->vma_node); |
e7c2af13 | 197 | INIT_LIST_HEAD(&obj->lru_node); |
62cb7011 AC |
198 | } |
199 | EXPORT_SYMBOL(drm_gem_private_object_init); | |
200 | ||
7df34a61 C |
201 | /** |
202 | * drm_gem_private_object_fini - Finalize a failed drm_gem_object | |
203 | * @obj: drm_gem_object | |
204 | * | |
205 | * Uninitialize an already allocated GEM object when it initialized failed | |
206 | */ | |
207 | void drm_gem_private_object_fini(struct drm_gem_object *obj) | |
208 | { | |
209 | WARN_ON(obj->dma_buf); | |
210 | ||
211 | dma_resv_fini(&obj->_resv); | |
212 | } | |
213 | EXPORT_SYMBOL(drm_gem_private_object_fini); | |
214 | ||
36da5908 | 215 | /** |
c6a84325 | 216 | * drm_gem_object_handle_free - release resources bound to userspace handles |
89d61fc0 DV |
217 | * @obj: GEM object to clean up. |
218 | * | |
36da5908 DV |
219 | * Called after the last handle to the object has been closed |
220 | * | |
221 | * Removes any name for the object. Note that this must be | |
222 | * called before drm_gem_object_free or we'll be touching | |
223 | * freed memory | |
224 | */ | |
225 | static void drm_gem_object_handle_free(struct drm_gem_object *obj) | |
226 | { | |
227 | struct drm_device *dev = obj->dev; | |
228 | ||
229 | /* Remove any name for this object */ | |
36da5908 DV |
230 | if (obj->name) { |
231 | idr_remove(&dev->object_name_idr, obj->name); | |
232 | obj->name = 0; | |
a8e11d1c | 233 | } |
36da5908 DV |
234 | } |
235 | ||
319c933c DV |
236 | static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) |
237 | { | |
238 | /* Unbreak the reference cycle if we have an exported dma_buf. */ | |
239 | if (obj->dma_buf) { | |
240 | dma_buf_put(obj->dma_buf); | |
241 | obj->dma_buf = NULL; | |
242 | } | |
243 | } | |
244 | ||
becee2a5 | 245 | static void |
e6b62714 | 246 | drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) |
36da5908 | 247 | { |
98a8883a CW |
248 | struct drm_device *dev = obj->dev; |
249 | bool final = false; | |
250 | ||
6afe6929 | 251 | if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) |
36da5908 DV |
252 | return; |
253 | ||
254 | /* | |
255 | * Must bump handle count first as this may be the last | |
256 | * ref, in which case the object would disappear before we | |
257 | * checked for a name | |
258 | */ | |
259 | ||
98a8883a | 260 | mutex_lock(&dev->object_name_lock); |
319c933c | 261 | if (--obj->handle_count == 0) { |
36da5908 | 262 | drm_gem_object_handle_free(obj); |
319c933c | 263 | drm_gem_object_exported_dma_buf_free(obj); |
98a8883a | 264 | final = true; |
319c933c | 265 | } |
98a8883a | 266 | mutex_unlock(&dev->object_name_lock); |
a8e11d1c | 267 | |
98a8883a | 268 | if (final) |
be6ee102 | 269 | drm_gem_object_put(obj); |
36da5908 DV |
270 | } |
271 | ||
8815b23a CW |
272 | /* |
273 | * Called at device or object close to release the file's | |
274 | * handle references on objects. | |
275 | */ | |
276 | static int | |
277 | drm_gem_object_release_handle(int id, void *ptr, void *data) | |
278 | { | |
279 | struct drm_file *file_priv = data; | |
280 | struct drm_gem_object *obj = ptr; | |
8815b23a | 281 | |
d693def4 | 282 | if (obj->funcs->close) |
b39b5394 | 283 | obj->funcs->close(obj, file_priv); |
d0a133f7 | 284 | |
ea2aa97c | 285 | drm_prime_remove_buf_handle(&file_priv->prime, id); |
d9a1f0b4 | 286 | drm_vma_node_revoke(&obj->vma_node, file_priv); |
8815b23a | 287 | |
e6b62714 | 288 | drm_gem_object_handle_put_unlocked(obj); |
8815b23a CW |
289 | |
290 | return 0; | |
291 | } | |
292 | ||
673a394b | 293 | /** |
89d61fc0 DV |
294 | * drm_gem_handle_delete - deletes the given file-private handle |
295 | * @filp: drm file-private structure to use for the handle look up | |
296 | * @handle: userspace handle to delete | |
297 | * | |
df2e0900 DV |
298 | * Removes the GEM handle from the @filp lookup table which has been added with |
299 | * drm_gem_handle_create(). If this is the last handle also cleans up linked | |
300 | * resources like GEM names. | |
673a394b | 301 | */ |
ff72145b | 302 | int |
a1a2d1d3 | 303 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) |
673a394b | 304 | { |
673a394b EA |
305 | struct drm_gem_object *obj; |
306 | ||
673a394b EA |
307 | spin_lock(&filp->table_lock); |
308 | ||
309 | /* Check if we currently have a reference on the object */ | |
f6cd7dae CW |
310 | obj = idr_replace(&filp->object_idr, NULL, handle); |
311 | spin_unlock(&filp->table_lock); | |
312 | if (IS_ERR_OR_NULL(obj)) | |
673a394b | 313 | return -EINVAL; |
673a394b | 314 | |
f6cd7dae CW |
315 | /* Release driver's reference and decrement refcount. */ |
316 | drm_gem_object_release_handle(handle, obj, filp); | |
317 | ||
318 | /* And finally make the handle available for future allocations. */ | |
319 | spin_lock(&filp->table_lock); | |
673a394b EA |
320 | idr_remove(&filp->object_idr, handle); |
321 | spin_unlock(&filp->table_lock); | |
322 | ||
673a394b EA |
323 | return 0; |
324 | } | |
ff72145b | 325 | EXPORT_SYMBOL(drm_gem_handle_delete); |
673a394b | 326 | |
db611527 | 327 | /** |
abd4e745 | 328 | * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object |
db611527 NT |
329 | * @file: drm file-private structure containing the gem object |
330 | * @dev: corresponding drm_device | |
331 | * @handle: gem object handle | |
332 | * @offset: return location for the fake mmap offset | |
333 | * | |
334 | * This implements the &drm_driver.dumb_map_offset kms driver callback for | |
335 | * drivers which use gem to manage their backing storage. | |
336 | * | |
337 | * Returns: | |
338 | * 0 on success or a negative error code on failure. | |
339 | */ | |
abd4e745 | 340 | int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
db611527 NT |
341 | u32 handle, u64 *offset) |
342 | { | |
343 | struct drm_gem_object *obj; | |
344 | int ret; | |
345 | ||
346 | obj = drm_gem_object_lookup(file, handle); | |
347 | if (!obj) | |
348 | return -ENOENT; | |
349 | ||
90378e58 | 350 | /* Don't allow imported objects to be mapped */ |
b57aa47d | 351 | if (drm_gem_is_imported(obj)) { |
90378e58 NT |
352 | ret = -EINVAL; |
353 | goto out; | |
354 | } | |
355 | ||
db611527 NT |
356 | ret = drm_gem_create_mmap_offset(obj); |
357 | if (ret) | |
358 | goto out; | |
359 | ||
360 | *offset = drm_vma_node_offset_addr(&obj->vma_node); | |
361 | out: | |
be6ee102 | 362 | drm_gem_object_put(obj); |
db611527 NT |
363 | |
364 | return ret; | |
365 | } | |
abd4e745 | 366 | EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); |
db611527 | 367 | |
673a394b | 368 | /** |
20228c44 | 369 | * drm_gem_handle_create_tail - internal functions to create a handle |
89d61fc0 DV |
370 | * @file_priv: drm file-private structure to register the handle for |
371 | * @obj: object to register | |
8bf8180f | 372 | * @handlep: pointer to return the created handle to the caller |
1dd3a060 | 373 | * |
940eba2d DV |
374 | * This expects the &drm_device.object_name_lock to be held already and will |
375 | * drop it before returning. Used to avoid races in establishing new handles | |
376 | * when importing an object from either an flink name or a dma-buf. | |
df2e0900 DV |
377 | * |
378 | * Handles must be release again through drm_gem_handle_delete(). This is done | |
379 | * when userspace closes @file_priv for all attached handles, or through the | |
380 | * GEM_CLOSE ioctl for individual handles. | |
673a394b EA |
381 | */ |
382 | int | |
20228c44 DV |
383 | drm_gem_handle_create_tail(struct drm_file *file_priv, |
384 | struct drm_gem_object *obj, | |
385 | u32 *handlep) | |
673a394b | 386 | { |
304eda32 | 387 | struct drm_device *dev = obj->dev; |
9649399e | 388 | u32 handle; |
304eda32 | 389 | int ret; |
673a394b | 390 | |
20228c44 | 391 | WARN_ON(!mutex_is_locked(&dev->object_name_lock)); |
98a8883a | 392 | if (obj->handle_count++ == 0) |
e6b62714 | 393 | drm_gem_object_get(obj); |
20228c44 | 394 | |
673a394b | 395 | /* |
2e928815 TH |
396 | * Get the user-visible handle using idr. Preload and perform |
397 | * allocation under our spinlock. | |
673a394b | 398 | */ |
2e928815 | 399 | idr_preload(GFP_KERNEL); |
673a394b | 400 | spin_lock(&file_priv->table_lock); |
2e928815 TH |
401 | |
402 | ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); | |
98a8883a | 403 | |
673a394b | 404 | spin_unlock(&file_priv->table_lock); |
2e928815 | 405 | idr_preload_end(); |
98a8883a | 406 | |
cd4f013f | 407 | mutex_unlock(&dev->object_name_lock); |
6984128d CW |
408 | if (ret < 0) |
409 | goto err_unref; | |
410 | ||
9649399e | 411 | handle = ret; |
673a394b | 412 | |
d9a1f0b4 | 413 | ret = drm_vma_node_allow(&obj->vma_node, file_priv); |
6984128d CW |
414 | if (ret) |
415 | goto err_remove; | |
304eda32 | 416 | |
d693def4 | 417 | if (obj->funcs->open) { |
b39b5394 NT |
418 | ret = obj->funcs->open(obj, file_priv); |
419 | if (ret) | |
420 | goto err_revoke; | |
304eda32 BS |
421 | } |
422 | ||
9649399e | 423 | *handlep = handle; |
673a394b | 424 | return 0; |
6984128d CW |
425 | |
426 | err_revoke: | |
d9a1f0b4 | 427 | drm_vma_node_revoke(&obj->vma_node, file_priv); |
6984128d CW |
428 | err_remove: |
429 | spin_lock(&file_priv->table_lock); | |
9649399e | 430 | idr_remove(&file_priv->object_idr, handle); |
6984128d CW |
431 | spin_unlock(&file_priv->table_lock); |
432 | err_unref: | |
e6b62714 | 433 | drm_gem_object_handle_put_unlocked(obj); |
6984128d | 434 | return ret; |
673a394b | 435 | } |
20228c44 DV |
436 | |
437 | /** | |
8bf8180f | 438 | * drm_gem_handle_create - create a gem handle for an object |
89d61fc0 DV |
439 | * @file_priv: drm file-private structure to register the handle for |
440 | * @obj: object to register | |
82c0ef94 | 441 | * @handlep: pointer to return the created handle to the caller |
89d61fc0 | 442 | * |
39031176 DV |
443 | * Create a handle for this object. This adds a handle reference to the object, |
444 | * which includes a regular reference count. Callers will likely want to | |
445 | * dereference the object afterwards. | |
446 | * | |
447 | * Since this publishes @obj to userspace it must be fully set up by this point, | |
448 | * drivers must call this last in their buffer object creation callbacks. | |
20228c44 | 449 | */ |
8bf8180f TR |
450 | int drm_gem_handle_create(struct drm_file *file_priv, |
451 | struct drm_gem_object *obj, | |
452 | u32 *handlep) | |
20228c44 DV |
453 | { |
454 | mutex_lock(&obj->dev->object_name_lock); | |
455 | ||
456 | return drm_gem_handle_create_tail(file_priv, obj, handlep); | |
457 | } | |
673a394b EA |
458 | EXPORT_SYMBOL(drm_gem_handle_create); |
459 | ||
75ef8b3b RC |
460 | |
461 | /** | |
462 | * drm_gem_free_mmap_offset - release a fake mmap offset for an object | |
463 | * @obj: obj in question | |
464 | * | |
465 | * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). | |
f74418a4 DV |
466 | * |
467 | * Note that drm_gem_object_release() already calls this function, so drivers | |
468 | * don't have to take care of releasing the mmap offset themselves when freeing | |
469 | * the GEM object. | |
75ef8b3b RC |
470 | */ |
471 | void | |
472 | drm_gem_free_mmap_offset(struct drm_gem_object *obj) | |
473 | { | |
474 | struct drm_device *dev = obj->dev; | |
75ef8b3b | 475 | |
b04a5906 | 476 | drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); |
75ef8b3b RC |
477 | } |
478 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); | |
479 | ||
480 | /** | |
367bbd49 | 481 | * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object |
75ef8b3b | 482 | * @obj: obj in question |
367bbd49 | 483 | * @size: the virtual size |
75ef8b3b RC |
484 | * |
485 | * GEM memory mapping works by handing back to userspace a fake mmap offset | |
486 | * it can use in a subsequent mmap(2) call. The DRM core code then looks | |
487 | * up the object based on the offset and sets up the various memory mapping | |
488 | * structures. | |
489 | * | |
367bbd49 | 490 | * This routine allocates and attaches a fake offset for @obj, in cases where |
940eba2d DV |
491 | * the virtual size differs from the physical size (ie. &drm_gem_object.size). |
492 | * Otherwise just use drm_gem_create_mmap_offset(). | |
f74418a4 DV |
493 | * |
494 | * This function is idempotent and handles an already allocated mmap offset | |
495 | * transparently. Drivers do not need to check for this case. | |
75ef8b3b RC |
496 | */ |
497 | int | |
367bbd49 | 498 | drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) |
75ef8b3b RC |
499 | { |
500 | struct drm_device *dev = obj->dev; | |
75ef8b3b | 501 | |
b04a5906 | 502 | return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, |
367bbd49 RC |
503 | size / PAGE_SIZE); |
504 | } | |
505 | EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); | |
506 | ||
507 | /** | |
508 | * drm_gem_create_mmap_offset - create a fake mmap offset for an object | |
509 | * @obj: obj in question | |
510 | * | |
511 | * GEM memory mapping works by handing back to userspace a fake mmap offset | |
512 | * it can use in a subsequent mmap(2) call. The DRM core code then looks | |
513 | * up the object based on the offset and sets up the various memory mapping | |
514 | * structures. | |
515 | * | |
516 | * This routine allocates and attaches a fake offset for @obj. | |
f74418a4 DV |
517 | * |
518 | * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release | |
519 | * the fake offset again. | |
367bbd49 RC |
520 | */ |
521 | int drm_gem_create_mmap_offset(struct drm_gem_object *obj) | |
522 | { | |
523 | return drm_gem_create_mmap_offset_size(obj, obj->size); | |
75ef8b3b RC |
524 | } |
525 | EXPORT_SYMBOL(drm_gem_create_mmap_offset); | |
526 | ||
fb4b4927 | 527 | /* |
3291e09a MWO |
528 | * Move folios to appropriate lru and release the folios, decrementing the |
529 | * ref count of those folios. | |
fb4b4927 | 530 | */ |
3291e09a | 531 | static void drm_gem_check_release_batch(struct folio_batch *fbatch) |
fb4b4927 | 532 | { |
3291e09a MWO |
533 | check_move_unevictable_folios(fbatch); |
534 | __folio_batch_release(fbatch); | |
fb4b4927 KHY |
535 | cond_resched(); |
536 | } | |
537 | ||
bcc5c9d5 RC |
538 | /** |
539 | * drm_gem_get_pages - helper to allocate backing pages for a GEM object | |
540 | * from shmem | |
541 | * @obj: obj in question | |
0cdbe8ac DH |
542 | * |
543 | * This reads the page-array of the shmem-backing storage of the given gem | |
544 | * object. An array of pages is returned. If a page is not allocated or | |
545 | * swapped-out, this will allocate/swap-in the required pages. Note that the | |
546 | * whole object is covered by the page-array and pinned in memory. | |
547 | * | |
548 | * Use drm_gem_put_pages() to release the array and unpin all pages. | |
549 | * | |
550 | * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). | |
551 | * If you require other GFP-masks, you have to do those allocations yourself. | |
552 | * | |
553 | * Note that you are not allowed to change gfp-zones during runtime. That is, | |
554 | * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as | |
555 | * set during initialization. If you have special zone constraints, set them | |
5b9fbfff | 556 | * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care |
0cdbe8ac | 557 | * to keep pages in the required zone during swap-in. |
e0b3d214 DV |
558 | * |
559 | * This function is only valid on objects initialized with | |
560 | * drm_gem_object_init(), but not for those initialized with | |
561 | * drm_gem_private_object_init() only. | |
bcc5c9d5 | 562 | */ |
0cdbe8ac | 563 | struct page **drm_gem_get_pages(struct drm_gem_object *obj) |
bcc5c9d5 | 564 | { |
bcc5c9d5 | 565 | struct address_space *mapping; |
3291e09a MWO |
566 | struct page **pages; |
567 | struct folio *folio; | |
568 | struct folio_batch fbatch; | |
b7fd68ab | 569 | long i, j, npages; |
e0b3d214 DV |
570 | |
571 | if (WARN_ON(!obj->filp)) | |
572 | return ERR_PTR(-EINVAL); | |
573 | ||
bcc5c9d5 | 574 | /* This is the shared memory object that backs the GEM resource */ |
93c76a3d | 575 | mapping = obj->filp->f_mapping; |
bcc5c9d5 RC |
576 | |
577 | /* We already BUG_ON() for non-page-aligned sizes in | |
578 | * drm_gem_object_init(), so we should never hit this unless | |
579 | * driver author is doing something really wrong: | |
580 | */ | |
581 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); | |
582 | ||
583 | npages = obj->size >> PAGE_SHIFT; | |
584 | ||
2098105e | 585 | pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
bcc5c9d5 RC |
586 | if (pages == NULL) |
587 | return ERR_PTR(-ENOMEM); | |
588 | ||
fb4b4927 KHY |
589 | mapping_set_unevictable(mapping); |
590 | ||
3291e09a MWO |
591 | i = 0; |
592 | while (i < npages) { | |
b7fd68ab | 593 | long nr; |
3291e09a MWO |
594 | folio = shmem_read_folio_gfp(mapping, i, |
595 | mapping_gfp_mask(mapping)); | |
596 | if (IS_ERR(folio)) | |
bcc5c9d5 | 597 | goto fail; |
b7fd68ab MWO |
598 | nr = min(npages - i, folio_nr_pages(folio)); |
599 | for (j = 0; j < nr; j++, i++) | |
3291e09a | 600 | pages[i] = folio_file_page(folio, i); |
bcc5c9d5 | 601 | |
2123000b DH |
602 | /* Make sure shmem keeps __GFP_DMA32 allocated pages in the |
603 | * correct region during swapin. Note that this requires | |
604 | * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) | |
605 | * so shmem can relocate pages during swapin if required. | |
bcc5c9d5 | 606 | */ |
c62d2555 | 607 | BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && |
3291e09a | 608 | (folio_pfn(folio) >= 0x00100000UL)); |
bcc5c9d5 RC |
609 | } |
610 | ||
611 | return pages; | |
612 | ||
613 | fail: | |
fb4b4927 | 614 | mapping_clear_unevictable(mapping); |
3291e09a MWO |
615 | folio_batch_init(&fbatch); |
616 | j = 0; | |
617 | while (j < i) { | |
618 | struct folio *f = page_folio(pages[j]); | |
619 | if (!folio_batch_add(&fbatch, f)) | |
620 | drm_gem_check_release_batch(&fbatch); | |
621 | j += folio_nr_pages(f); | |
fb4b4927 | 622 | } |
3291e09a MWO |
623 | if (fbatch.nr) |
624 | drm_gem_check_release_batch(&fbatch); | |
bcc5c9d5 | 625 | |
2098105e | 626 | kvfree(pages); |
3291e09a | 627 | return ERR_CAST(folio); |
bcc5c9d5 RC |
628 | } |
629 | EXPORT_SYMBOL(drm_gem_get_pages); | |
630 | ||
631 | /** | |
632 | * drm_gem_put_pages - helper to free backing pages for a GEM object | |
633 | * @obj: obj in question | |
634 | * @pages: pages to free | |
635 | * @dirty: if true, pages will be marked as dirty | |
636 | * @accessed: if true, the pages will be marked as accessed | |
637 | */ | |
638 | void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, | |
639 | bool dirty, bool accessed) | |
640 | { | |
641 | int i, npages; | |
fb4b4927 | 642 | struct address_space *mapping; |
3291e09a | 643 | struct folio_batch fbatch; |
fb4b4927 KHY |
644 | |
645 | mapping = file_inode(obj->filp)->i_mapping; | |
646 | mapping_clear_unevictable(mapping); | |
bcc5c9d5 RC |
647 | |
648 | /* We already BUG_ON() for non-page-aligned sizes in | |
649 | * drm_gem_object_init(), so we should never hit this unless | |
650 | * driver author is doing something really wrong: | |
651 | */ | |
652 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); | |
653 | ||
654 | npages = obj->size >> PAGE_SHIFT; | |
655 | ||
3291e09a | 656 | folio_batch_init(&fbatch); |
bcc5c9d5 | 657 | for (i = 0; i < npages; i++) { |
3291e09a MWO |
658 | struct folio *folio; |
659 | ||
930a4024 RH |
660 | if (!pages[i]) |
661 | continue; | |
3291e09a | 662 | folio = page_folio(pages[i]); |
930a4024 | 663 | |
bcc5c9d5 | 664 | if (dirty) |
3291e09a | 665 | folio_mark_dirty(folio); |
bcc5c9d5 RC |
666 | |
667 | if (accessed) | |
3291e09a | 668 | folio_mark_accessed(folio); |
bcc5c9d5 RC |
669 | |
670 | /* Undo the reference we took when populating the table */ | |
3291e09a MWO |
671 | if (!folio_batch_add(&fbatch, folio)) |
672 | drm_gem_check_release_batch(&fbatch); | |
673 | i += folio_nr_pages(folio) - 1; | |
bcc5c9d5 | 674 | } |
3291e09a MWO |
675 | if (folio_batch_count(&fbatch)) |
676 | drm_gem_check_release_batch(&fbatch); | |
bcc5c9d5 | 677 | |
2098105e | 678 | kvfree(pages); |
bcc5c9d5 RC |
679 | } |
680 | EXPORT_SYMBOL(drm_gem_put_pages); | |
681 | ||
c117aa4d RH |
682 | static int objects_lookup(struct drm_file *filp, u32 *handle, int count, |
683 | struct drm_gem_object **objs) | |
684 | { | |
685 | int i, ret = 0; | |
686 | struct drm_gem_object *obj; | |
687 | ||
688 | spin_lock(&filp->table_lock); | |
689 | ||
690 | for (i = 0; i < count; i++) { | |
691 | /* Check if we currently have a reference on the object */ | |
692 | obj = idr_find(&filp->object_idr, handle[i]); | |
693 | if (!obj) { | |
694 | ret = -ENOENT; | |
695 | break; | |
696 | } | |
697 | drm_gem_object_get(obj); | |
698 | objs[i] = obj; | |
699 | } | |
700 | spin_unlock(&filp->table_lock); | |
701 | ||
702 | return ret; | |
703 | } | |
704 | ||
705 | /** | |
706 | * drm_gem_objects_lookup - look up GEM objects from an array of handles | |
707 | * @filp: DRM file private date | |
708 | * @bo_handles: user pointer to array of userspace handle | |
709 | * @count: size of handle array | |
710 | * @objs_out: returned pointer to array of drm_gem_object pointers | |
711 | * | |
712 | * Takes an array of userspace handles and returns a newly allocated array of | |
713 | * GEM objects. | |
714 | * | |
715 | * For a single handle lookup, use drm_gem_object_lookup(). | |
716 | * | |
717 | * Returns: | |
c117aa4d | 718 | * @objs filled in with GEM object pointers. Returned GEM objects need to be |
be6ee102 | 719 | * released with drm_gem_object_put(). -ENOENT is returned on a lookup |
c117aa4d RH |
720 | * failure. 0 is returned on success. |
721 | * | |
722 | */ | |
723 | int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, | |
724 | int count, struct drm_gem_object ***objs_out) | |
725 | { | |
726 | int ret; | |
727 | u32 *handles; | |
728 | struct drm_gem_object **objs; | |
729 | ||
730 | if (!count) | |
731 | return 0; | |
732 | ||
733 | objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), | |
734 | GFP_KERNEL | __GFP_ZERO); | |
735 | if (!objs) | |
736 | return -ENOMEM; | |
737 | ||
ec0bb482 DC |
738 | *objs_out = objs; |
739 | ||
c117aa4d RH |
740 | handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); |
741 | if (!handles) { | |
742 | ret = -ENOMEM; | |
743 | goto out; | |
744 | } | |
745 | ||
746 | if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { | |
747 | ret = -EFAULT; | |
748 | DRM_DEBUG("Failed to copy in GEM handles\n"); | |
749 | goto out; | |
750 | } | |
751 | ||
752 | ret = objects_lookup(filp, handles, count, objs); | |
c117aa4d RH |
753 | out: |
754 | kvfree(handles); | |
755 | return ret; | |
756 | ||
757 | } | |
758 | EXPORT_SYMBOL(drm_gem_objects_lookup); | |
759 | ||
df2e0900 | 760 | /** |
1e55a53a | 761 | * drm_gem_object_lookup - look up a GEM object from its handle |
df2e0900 DV |
762 | * @filp: DRM file private date |
763 | * @handle: userspace handle | |
764 | * | |
22bc22cc | 765 | * If looking up an array of handles, use drm_gem_objects_lookup(). |
df2e0900 | 766 | * |
22bc22cc | 767 | * Returns: |
df2e0900 DV |
768 | * A reference to the object named by the handle if such exists on @filp, NULL |
769 | * otherwise. | |
770 | */ | |
673a394b | 771 | struct drm_gem_object * |
a8ad0bd8 | 772 | drm_gem_object_lookup(struct drm_file *filp, u32 handle) |
673a394b | 773 | { |
c117aa4d | 774 | struct drm_gem_object *obj = NULL; |
673a394b | 775 | |
c117aa4d | 776 | objects_lookup(filp, &handle, 1, &obj); |
673a394b EA |
777 | return obj; |
778 | } | |
779 | EXPORT_SYMBOL(drm_gem_object_lookup); | |
780 | ||
1ba62714 | 781 | /** |
52791eee | 782 | * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects |
1ba62714 RH |
783 | * shared and/or exclusive fences. |
784 | * @filep: DRM file private date | |
785 | * @handle: userspace handle | |
786 | * @wait_all: if true, wait on all fences, else wait on just exclusive fence | |
787 | * @timeout: timeout value in jiffies or zero to return immediately | |
788 | * | |
789 | * Returns: | |
1ba62714 RH |
790 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or |
791 | * greater than 0 on success. | |
792 | */ | |
52791eee | 793 | long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, |
1ba62714 RH |
794 | bool wait_all, unsigned long timeout) |
795 | { | |
796 | long ret; | |
797 | struct drm_gem_object *obj; | |
798 | ||
799 | obj = drm_gem_object_lookup(filep, handle); | |
800 | if (!obj) { | |
801 | DRM_DEBUG("Failed to look up GEM BO %d\n", handle); | |
802 | return -EINVAL; | |
803 | } | |
804 | ||
7bc80a54 CK |
805 | ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all), |
806 | true, timeout); | |
1ba62714 RH |
807 | if (ret == 0) |
808 | ret = -ETIME; | |
809 | else if (ret > 0) | |
810 | ret = 0; | |
811 | ||
be6ee102 | 812 | drm_gem_object_put(obj); |
1ba62714 RH |
813 | |
814 | return ret; | |
815 | } | |
52791eee | 816 | EXPORT_SYMBOL(drm_gem_dma_resv_wait); |
1ba62714 | 817 | |
673a394b | 818 | /** |
89d61fc0 DV |
819 | * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl |
820 | * @dev: drm_device | |
821 | * @data: ioctl data | |
822 | * @file_priv: drm file-private structure | |
823 | * | |
673a394b EA |
824 | * Releases the handle to an mm object. |
825 | */ | |
826 | int | |
827 | drm_gem_close_ioctl(struct drm_device *dev, void *data, | |
828 | struct drm_file *file_priv) | |
829 | { | |
830 | struct drm_gem_close *args = data; | |
831 | int ret; | |
832 | ||
1bcecfac | 833 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
69fdf420 | 834 | return -EOPNOTSUPP; |
673a394b EA |
835 | |
836 | ret = drm_gem_handle_delete(file_priv, args->handle); | |
837 | ||
838 | return ret; | |
839 | } | |
840 | ||
841 | /** | |
89d61fc0 DV |
842 | * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl |
843 | * @dev: drm_device | |
844 | * @data: ioctl data | |
845 | * @file_priv: drm file-private structure | |
846 | * | |
673a394b EA |
847 | * Create a global name for an object, returning the name. |
848 | * | |
849 | * Note that the name does not hold a reference; when the object | |
850 | * is freed, the name goes away. | |
851 | */ | |
852 | int | |
853 | drm_gem_flink_ioctl(struct drm_device *dev, void *data, | |
854 | struct drm_file *file_priv) | |
855 | { | |
856 | struct drm_gem_flink *args = data; | |
857 | struct drm_gem_object *obj; | |
858 | int ret; | |
859 | ||
1bcecfac | 860 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
69fdf420 | 861 | return -EOPNOTSUPP; |
673a394b | 862 | |
a8ad0bd8 | 863 | obj = drm_gem_object_lookup(file_priv, args->handle); |
673a394b | 864 | if (obj == NULL) |
bf79cb91 | 865 | return -ENOENT; |
673a394b | 866 | |
cd4f013f | 867 | mutex_lock(&dev->object_name_lock); |
a8e11d1c DV |
868 | /* prevent races with concurrent gem_close. */ |
869 | if (obj->handle_count == 0) { | |
870 | ret = -ENOENT; | |
871 | goto err; | |
872 | } | |
873 | ||
8d59bae5 | 874 | if (!obj->name) { |
0f646425 | 875 | ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); |
2e928815 | 876 | if (ret < 0) |
8d59bae5 | 877 | goto err; |
2e07fb22 YC |
878 | |
879 | obj->name = ret; | |
8d59bae5 | 880 | } |
3e49c4f4 | 881 | |
2e07fb22 YC |
882 | args->name = (uint64_t) obj->name; |
883 | ret = 0; | |
884 | ||
3e49c4f4 | 885 | err: |
cd4f013f | 886 | mutex_unlock(&dev->object_name_lock); |
be6ee102 | 887 | drm_gem_object_put(obj); |
3e49c4f4 | 888 | return ret; |
673a394b EA |
889 | } |
890 | ||
891 | /** | |
e9d2871f | 892 | * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl |
89d61fc0 DV |
893 | * @dev: drm_device |
894 | * @data: ioctl data | |
895 | * @file_priv: drm file-private structure | |
896 | * | |
673a394b | 897 | * Open an object using the global name, returning a handle and the size. |
a9e10b16 SC |
898 | * |
899 | * This handle (of course) holds a reference to the object, so the object | |
900 | * will not go away until the handle is deleted. | |
673a394b EA |
901 | */ |
902 | int | |
903 | drm_gem_open_ioctl(struct drm_device *dev, void *data, | |
904 | struct drm_file *file_priv) | |
905 | { | |
906 | struct drm_gem_open *args = data; | |
907 | struct drm_gem_object *obj; | |
908 | int ret; | |
a1a2d1d3 | 909 | u32 handle; |
673a394b | 910 | |
1bcecfac | 911 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
69fdf420 | 912 | return -EOPNOTSUPP; |
673a394b | 913 | |
cd4f013f | 914 | mutex_lock(&dev->object_name_lock); |
673a394b | 915 | obj = idr_find(&dev->object_name_idr, (int) args->name); |
20228c44 | 916 | if (obj) { |
e6b62714 | 917 | drm_gem_object_get(obj); |
20228c44 DV |
918 | } else { |
919 | mutex_unlock(&dev->object_name_lock); | |
673a394b | 920 | return -ENOENT; |
20228c44 | 921 | } |
673a394b | 922 | |
20228c44 DV |
923 | /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ |
924 | ret = drm_gem_handle_create_tail(file_priv, obj, &handle); | |
673a394b | 925 | if (ret) |
8490d6a7 | 926 | goto err; |
673a394b EA |
927 | |
928 | args->handle = handle; | |
929 | args->size = obj->size; | |
930 | ||
8490d6a7 | 931 | err: |
c44264f9 | 932 | drm_gem_object_put(obj); |
8490d6a7 | 933 | return ret; |
673a394b EA |
934 | } |
935 | ||
936 | /** | |
0ae865ef | 937 | * drm_gem_open - initializes GEM file-private structures at devnode open time |
89d61fc0 DV |
938 | * @dev: drm_device which is being opened by userspace |
939 | * @file_private: drm file-private structure to set up | |
940 | * | |
673a394b EA |
941 | * Called at device open time, sets up the structure for handling refcounting |
942 | * of mm objects. | |
943 | */ | |
944 | void | |
945 | drm_gem_open(struct drm_device *dev, struct drm_file *file_private) | |
946 | { | |
e86584c5 | 947 | idr_init_base(&file_private->object_idr, 1); |
673a394b EA |
948 | spin_lock_init(&file_private->table_lock); |
949 | } | |
950 | ||
673a394b | 951 | /** |
89d61fc0 DV |
952 | * drm_gem_release - release file-private GEM resources |
953 | * @dev: drm_device which is being closed by userspace | |
954 | * @file_private: drm file-private structure to clean up | |
955 | * | |
673a394b EA |
956 | * Called at close time when the filp is going away. |
957 | * | |
958 | * Releases any remaining references on objects by this filp. | |
959 | */ | |
960 | void | |
961 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) | |
962 | { | |
673a394b | 963 | idr_for_each(&file_private->object_idr, |
304eda32 | 964 | &drm_gem_object_release_handle, file_private); |
673a394b | 965 | idr_destroy(&file_private->object_idr); |
673a394b EA |
966 | } |
967 | ||
f74418a4 DV |
968 | /** |
969 | * drm_gem_object_release - release GEM buffer object resources | |
970 | * @obj: GEM buffer object | |
971 | * | |
0ae865ef | 972 | * This releases any structures and resources used by @obj and is the inverse of |
f74418a4 DV |
973 | * drm_gem_object_init(). |
974 | */ | |
fd632aa3 DV |
975 | void |
976 | drm_gem_object_release(struct drm_gem_object *obj) | |
c3ae90c0 | 977 | { |
62cb7011 | 978 | if (obj->filp) |
16d2831d | 979 | fput(obj->filp); |
77472347 | 980 | |
7df34a61 C |
981 | drm_gem_private_object_fini(obj); |
982 | ||
77472347 | 983 | drm_gem_free_mmap_offset(obj); |
e7c2af13 | 984 | drm_gem_lru_remove(obj); |
c3ae90c0 | 985 | } |
fd632aa3 | 986 | EXPORT_SYMBOL(drm_gem_object_release); |
c3ae90c0 | 987 | |
673a394b | 988 | /** |
89d61fc0 DV |
989 | * drm_gem_object_free - free a GEM object |
990 | * @kref: kref of the object to free | |
991 | * | |
673a394b EA |
992 | * Called after the last reference to the object has been lost. |
993 | * | |
994 | * Frees the object | |
995 | */ | |
996 | void | |
997 | drm_gem_object_free(struct kref *kref) | |
998 | { | |
6ff774bd DV |
999 | struct drm_gem_object *obj = |
1000 | container_of(kref, struct drm_gem_object, refcount); | |
673a394b | 1001 | |
d693def4 TZ |
1002 | if (WARN_ON(!obj->funcs->free)) |
1003 | return; | |
1004 | ||
1005 | obj->funcs->free(obj); | |
673a394b EA |
1006 | } |
1007 | EXPORT_SYMBOL(drm_gem_object_free); | |
1008 | ||
df2e0900 DV |
1009 | /** |
1010 | * drm_gem_vm_open - vma->ops->open implementation for GEM | |
1011 | * @vma: VM area structure | |
1012 | * | |
1013 | * This function implements the #vm_operations_struct open() callback for GEM | |
1014 | * drivers. This must be used together with drm_gem_vm_close(). | |
1015 | */ | |
ab00b3e5 JB |
1016 | void drm_gem_vm_open(struct vm_area_struct *vma) |
1017 | { | |
1018 | struct drm_gem_object *obj = vma->vm_private_data; | |
1019 | ||
e6b62714 | 1020 | drm_gem_object_get(obj); |
ab00b3e5 JB |
1021 | } |
1022 | EXPORT_SYMBOL(drm_gem_vm_open); | |
1023 | ||
df2e0900 DV |
1024 | /** |
1025 | * drm_gem_vm_close - vma->ops->close implementation for GEM | |
1026 | * @vma: VM area structure | |
1027 | * | |
1028 | * This function implements the #vm_operations_struct close() callback for GEM | |
1029 | * drivers. This must be used together with drm_gem_vm_open(). | |
1030 | */ | |
ab00b3e5 JB |
1031 | void drm_gem_vm_close(struct vm_area_struct *vma) |
1032 | { | |
1033 | struct drm_gem_object *obj = vma->vm_private_data; | |
ab00b3e5 | 1034 | |
be6ee102 | 1035 | drm_gem_object_put(obj); |
ab00b3e5 JB |
1036 | } |
1037 | EXPORT_SYMBOL(drm_gem_vm_close); | |
1038 | ||
1c5aafa6 LP |
1039 | /** |
1040 | * drm_gem_mmap_obj - memory map a GEM object | |
1041 | * @obj: the GEM object to map | |
1042 | * @obj_size: the object size to be mapped, in bytes | |
1043 | * @vma: VMA for the area to be mapped | |
1044 | * | |
d693def4 TZ |
1045 | * Set up the VMA to prepare mapping of the GEM object using the GEM object's |
1046 | * vm_ops. Depending on their requirements, GEM objects can either | |
1047 | * provide a fault handler in their vm_ops (in which case any accesses to | |
1c5aafa6 LP |
1048 | * the object will be trapped, to perform migration, GTT binding, surface |
1049 | * register allocation, or performance monitoring), or mmap the buffer memory | |
1050 | * synchronously after calling drm_gem_mmap_obj. | |
1051 | * | |
1052 | * This function is mainly intended to implement the DMABUF mmap operation, when | |
1053 | * the GEM object is not looked up based on its fake offset. To implement the | |
1054 | * DRM mmap operation, drivers should use the drm_gem_mmap() function. | |
1055 | * | |
ca481c9b DH |
1056 | * drm_gem_mmap_obj() assumes the user is granted access to the buffer while |
1057 | * drm_gem_mmap() prevents unprivileged users from mapping random objects. So | |
1058 | * callers must verify access restrictions before calling this helper. | |
1059 | * | |
1c5aafa6 | 1060 | * Return 0 or success or -EINVAL if the object size is smaller than the VMA |
d693def4 | 1061 | * size, or if no vm_ops are provided. |
1c5aafa6 LP |
1062 | */ |
1063 | int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, | |
1064 | struct vm_area_struct *vma) | |
1065 | { | |
c40069cb | 1066 | int ret; |
1c5aafa6 LP |
1067 | |
1068 | /* Check for valid size. */ | |
1069 | if (obj_size < vma->vm_end - vma->vm_start) | |
1070 | return -EINVAL; | |
1071 | ||
9786b65b GH |
1072 | /* Take a ref for this mapping of the object, so that the fault |
1073 | * handler can dereference the mmap offset's pointer to the object. | |
1074 | * This reference is cleaned up by the corresponding vm_close | |
1075 | * (which should happen whether the vma was created by this call, or | |
1076 | * by a vm_open due to mremap or partial unmap or whatever). | |
1077 | */ | |
1078 | drm_gem_object_get(obj); | |
1079 | ||
f49a51bf | 1080 | vma->vm_private_data = obj; |
47d35c1c | 1081 | vma->vm_ops = obj->funcs->vm_ops; |
f49a51bf | 1082 | |
d693def4 | 1083 | if (obj->funcs->mmap) { |
c40069cb | 1084 | ret = obj->funcs->mmap(obj, vma); |
47d35c1c TZ |
1085 | if (ret) |
1086 | goto err_drm_gem_object_put; | |
c40069cb GH |
1087 | WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); |
1088 | } else { | |
47d35c1c TZ |
1089 | if (!vma->vm_ops) { |
1090 | ret = -EINVAL; | |
1091 | goto err_drm_gem_object_put; | |
9786b65b | 1092 | } |
c40069cb | 1093 | |
1c71222e | 1094 | vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); |
c40069cb GH |
1095 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
1096 | vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); | |
1097 | } | |
1c5aafa6 | 1098 | |
1c5aafa6 | 1099 | return 0; |
47d35c1c TZ |
1100 | |
1101 | err_drm_gem_object_put: | |
1102 | drm_gem_object_put(obj); | |
1103 | return ret; | |
1c5aafa6 LP |
1104 | } |
1105 | EXPORT_SYMBOL(drm_gem_mmap_obj); | |
ab00b3e5 | 1106 | |
a2c0a97b JB |
1107 | /** |
1108 | * drm_gem_mmap - memory map routine for GEM objects | |
1109 | * @filp: DRM file pointer | |
1110 | * @vma: VMA for the area to be mapped | |
1111 | * | |
1112 | * If a driver supports GEM object mapping, mmap calls on the DRM file | |
1113 | * descriptor will end up here. | |
1114 | * | |
1c5aafa6 | 1115 | * Look up the GEM object based on the offset passed in (vma->vm_pgoff will |
a2c0a97b | 1116 | * contain the fake offset we created when the GTT map ioctl was called on |
1c5aafa6 | 1117 | * the object) and map it with a call to drm_gem_mmap_obj(). |
ca481c9b DH |
1118 | * |
1119 | * If the caller is not granted access to the buffer object, the mmap will fail | |
1120 | * with EACCES. Please see the vma manager for more information. | |
a2c0a97b JB |
1121 | */ |
1122 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |
1123 | { | |
1124 | struct drm_file *priv = filp->private_data; | |
1125 | struct drm_device *dev = priv->minor->dev; | |
2225cfe4 | 1126 | struct drm_gem_object *obj = NULL; |
0de23977 | 1127 | struct drm_vma_offset_node *node; |
a8469aa8 | 1128 | int ret; |
a2c0a97b | 1129 | |
c07dcd61 | 1130 | if (drm_dev_is_unplugged(dev)) |
2c07a21d DA |
1131 | return -ENODEV; |
1132 | ||
2225cfe4 DV |
1133 | drm_vma_offset_lock_lookup(dev->vma_offset_manager); |
1134 | node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, | |
1135 | vma->vm_pgoff, | |
1136 | vma_pages(vma)); | |
1137 | if (likely(node)) { | |
1138 | obj = container_of(node, struct drm_gem_object, vma_node); | |
1139 | /* | |
1140 | * When the object is being freed, after it hits 0-refcnt it | |
1141 | * proceeds to tear down the object. In the process it will | |
1142 | * attempt to remove the VMA offset and so acquire this | |
1143 | * mgr->vm_lock. Therefore if we find an object with a 0-refcnt | |
1144 | * that matches our range, we know it is in the process of being | |
1145 | * destroyed and will be freed as soon as we release the lock - | |
1146 | * so we have to check for the 0-refcnted object and treat it as | |
1147 | * invalid. | |
1148 | */ | |
1149 | if (!kref_get_unless_zero(&obj->refcount)) | |
1150 | obj = NULL; | |
1151 | } | |
1152 | drm_vma_offset_unlock_lookup(dev->vma_offset_manager); | |
a2c0a97b | 1153 | |
2225cfe4 | 1154 | if (!obj) |
197633b9 | 1155 | return -EINVAL; |
2225cfe4 | 1156 | |
d9a1f0b4 | 1157 | if (!drm_vma_node_is_allowed(node, priv)) { |
be6ee102 | 1158 | drm_gem_object_put(obj); |
ca481c9b | 1159 | return -EACCES; |
a2c0a97b JB |
1160 | } |
1161 | ||
2225cfe4 DV |
1162 | ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, |
1163 | vma); | |
a2c0a97b | 1164 | |
be6ee102 | 1165 | drm_gem_object_put(obj); |
a2c0a97b JB |
1166 | |
1167 | return ret; | |
1168 | } | |
1169 | EXPORT_SYMBOL(drm_gem_mmap); | |
45d58b40 NT |
1170 | |
1171 | void drm_gem_print_info(struct drm_printer *p, unsigned int indent, | |
1172 | const struct drm_gem_object *obj) | |
1173 | { | |
1174 | drm_printf_indent(p, indent, "name=%d\n", obj->name); | |
1175 | drm_printf_indent(p, indent, "refcount=%u\n", | |
1176 | kref_read(&obj->refcount)); | |
1177 | drm_printf_indent(p, indent, "start=%08lx\n", | |
1178 | drm_vma_node_start(&obj->vma_node)); | |
1179 | drm_printf_indent(p, indent, "size=%zu\n", obj->size); | |
1180 | drm_printf_indent(p, indent, "imported=%s\n", | |
b57aa47d | 1181 | str_yes_no(drm_gem_is_imported(obj))); |
45d58b40 | 1182 | |
d693def4 | 1183 | if (obj->funcs->print_info) |
b39b5394 | 1184 | obj->funcs->print_info(p, indent, obj); |
45d58b40 | 1185 | } |
b39b5394 | 1186 | |
a7802784 | 1187 | int drm_gem_pin_locked(struct drm_gem_object *obj) |
b39b5394 | 1188 | { |
d693def4 | 1189 | if (obj->funcs->pin) |
b39b5394 | 1190 | return obj->funcs->pin(obj); |
319eeec5 SJ |
1191 | |
1192 | return 0; | |
b39b5394 | 1193 | } |
b39b5394 | 1194 | |
a7802784 | 1195 | void drm_gem_unpin_locked(struct drm_gem_object *obj) |
b39b5394 | 1196 | { |
d693def4 | 1197 | if (obj->funcs->unpin) |
b39b5394 | 1198 | obj->funcs->unpin(obj); |
b39b5394 | 1199 | } |
b39b5394 | 1200 | |
a7802784 TZ |
1201 | int drm_gem_pin(struct drm_gem_object *obj) |
1202 | { | |
1203 | int ret; | |
1204 | ||
1205 | dma_resv_lock(obj->resv, NULL); | |
1206 | ret = drm_gem_pin_locked(obj); | |
1207 | dma_resv_unlock(obj->resv); | |
1208 | ||
1209 | return ret; | |
1210 | } | |
1211 | ||
1212 | void drm_gem_unpin(struct drm_gem_object *obj) | |
1213 | { | |
1214 | dma_resv_lock(obj->resv, NULL); | |
1215 | drm_gem_unpin_locked(obj); | |
1216 | dma_resv_unlock(obj->resv); | |
1217 | } | |
1218 | ||
8f5c4871 | 1219 | int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map) |
b39b5394 | 1220 | { |
49a3f51d | 1221 | int ret; |
b39b5394 | 1222 | |
79e2cf2e DO |
1223 | dma_resv_assert_held(obj->resv); |
1224 | ||
49a3f51d | 1225 | if (!obj->funcs->vmap) |
a745fb1c | 1226 | return -EOPNOTSUPP; |
b39b5394 | 1227 | |
a745fb1c | 1228 | ret = obj->funcs->vmap(obj, map); |
49a3f51d | 1229 | if (ret) |
a745fb1c | 1230 | return ret; |
7938f421 | 1231 | else if (iosys_map_is_null(map)) |
a745fb1c | 1232 | return -ENOMEM; |
b39b5394 | 1233 | |
a745fb1c | 1234 | return 0; |
b39b5394 | 1235 | } |
8f5c4871 | 1236 | EXPORT_SYMBOL(drm_gem_vmap_locked); |
b39b5394 | 1237 | |
8f5c4871 | 1238 | void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map) |
b39b5394 | 1239 | { |
79e2cf2e DO |
1240 | dma_resv_assert_held(obj->resv); |
1241 | ||
7938f421 | 1242 | if (iosys_map_is_null(map)) |
b39b5394 NT |
1243 | return; |
1244 | ||
d693def4 | 1245 | if (obj->funcs->vunmap) |
a745fb1c TZ |
1246 | obj->funcs->vunmap(obj, map); |
1247 | ||
1248 | /* Always set the mapping to NULL. Callers may rely on this. */ | |
7938f421 | 1249 | iosys_map_clear(map); |
b39b5394 | 1250 | } |
8f5c4871 | 1251 | EXPORT_SYMBOL(drm_gem_vunmap_locked); |
7edc3e3b | 1252 | |
b4b0193e TZ |
1253 | void drm_gem_lock(struct drm_gem_object *obj) |
1254 | { | |
1255 | dma_resv_lock(obj->resv, NULL); | |
1256 | } | |
1257 | EXPORT_SYMBOL(drm_gem_lock); | |
1258 | ||
1259 | void drm_gem_unlock(struct drm_gem_object *obj) | |
1260 | { | |
1261 | dma_resv_unlock(obj->resv); | |
1262 | } | |
1263 | EXPORT_SYMBOL(drm_gem_unlock); | |
1264 | ||
8f5c4871 | 1265 | int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) |
79e2cf2e DO |
1266 | { |
1267 | int ret; | |
1268 | ||
1269 | dma_resv_lock(obj->resv, NULL); | |
8f5c4871 | 1270 | ret = drm_gem_vmap_locked(obj, map); |
79e2cf2e DO |
1271 | dma_resv_unlock(obj->resv); |
1272 | ||
1273 | return ret; | |
1274 | } | |
8f5c4871 | 1275 | EXPORT_SYMBOL(drm_gem_vmap); |
79e2cf2e | 1276 | |
8f5c4871 | 1277 | void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) |
79e2cf2e DO |
1278 | { |
1279 | dma_resv_lock(obj->resv, NULL); | |
8f5c4871 | 1280 | drm_gem_vunmap_locked(obj, map); |
79e2cf2e DO |
1281 | dma_resv_unlock(obj->resv); |
1282 | } | |
8f5c4871 | 1283 | EXPORT_SYMBOL(drm_gem_vunmap); |
79e2cf2e | 1284 | |
7edc3e3b EA |
1285 | /** |
1286 | * drm_gem_lock_reservations - Sets up the ww context and acquires | |
1287 | * the lock on an array of GEM objects. | |
1288 | * | |
1289 | * Once you've locked your reservations, you'll want to set up space | |
1290 | * for your shared fences (if applicable), submit your job, then | |
1291 | * drm_gem_unlock_reservations(). | |
1292 | * | |
1293 | * @objs: drm_gem_objects to lock | |
1294 | * @count: Number of objects in @objs | |
1295 | * @acquire_ctx: struct ww_acquire_ctx that will be initialized as | |
1296 | * part of tracking this set of locked reservations. | |
1297 | */ | |
1298 | int | |
1299 | drm_gem_lock_reservations(struct drm_gem_object **objs, int count, | |
1300 | struct ww_acquire_ctx *acquire_ctx) | |
1301 | { | |
1302 | int contended = -1; | |
1303 | int i, ret; | |
1304 | ||
1305 | ww_acquire_init(acquire_ctx, &reservation_ww_class); | |
1306 | ||
1307 | retry: | |
1308 | if (contended != -1) { | |
1309 | struct drm_gem_object *obj = objs[contended]; | |
1310 | ||
52791eee | 1311 | ret = dma_resv_lock_slow_interruptible(obj->resv, |
0dbd555a | 1312 | acquire_ctx); |
7edc3e3b | 1313 | if (ret) { |
2939deac | 1314 | ww_acquire_fini(acquire_ctx); |
7edc3e3b EA |
1315 | return ret; |
1316 | } | |
1317 | } | |
1318 | ||
1319 | for (i = 0; i < count; i++) { | |
1320 | if (i == contended) | |
1321 | continue; | |
1322 | ||
52791eee | 1323 | ret = dma_resv_lock_interruptible(objs[i]->resv, |
0dbd555a | 1324 | acquire_ctx); |
7edc3e3b EA |
1325 | if (ret) { |
1326 | int j; | |
1327 | ||
1328 | for (j = 0; j < i; j++) | |
52791eee | 1329 | dma_resv_unlock(objs[j]->resv); |
7edc3e3b EA |
1330 | |
1331 | if (contended != -1 && contended >= i) | |
52791eee | 1332 | dma_resv_unlock(objs[contended]->resv); |
7edc3e3b EA |
1333 | |
1334 | if (ret == -EDEADLK) { | |
1335 | contended = i; | |
1336 | goto retry; | |
1337 | } | |
1338 | ||
2939deac | 1339 | ww_acquire_fini(acquire_ctx); |
7edc3e3b EA |
1340 | return ret; |
1341 | } | |
1342 | } | |
1343 | ||
1344 | ww_acquire_done(acquire_ctx); | |
1345 | ||
1346 | return 0; | |
1347 | } | |
1348 | EXPORT_SYMBOL(drm_gem_lock_reservations); | |
1349 | ||
1350 | void | |
1351 | drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, | |
1352 | struct ww_acquire_ctx *acquire_ctx) | |
1353 | { | |
1354 | int i; | |
1355 | ||
1356 | for (i = 0; i < count; i++) | |
52791eee | 1357 | dma_resv_unlock(objs[i]->resv); |
7edc3e3b EA |
1358 | |
1359 | ww_acquire_fini(acquire_ctx); | |
1360 | } | |
1361 | EXPORT_SYMBOL(drm_gem_unlock_reservations); | |
e7c2af13 RC |
1362 | |
1363 | /** | |
1364 | * drm_gem_lru_init - initialize a LRU | |
1365 | * | |
1366 | * @lru: The LRU to initialize | |
1367 | * @lock: The lock protecting the LRU | |
1368 | */ | |
1369 | void | |
1370 | drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock) | |
1371 | { | |
1372 | lru->lock = lock; | |
1373 | lru->count = 0; | |
1374 | INIT_LIST_HEAD(&lru->list); | |
1375 | } | |
1376 | EXPORT_SYMBOL(drm_gem_lru_init); | |
1377 | ||
1378 | static void | |
1379 | drm_gem_lru_remove_locked(struct drm_gem_object *obj) | |
1380 | { | |
1381 | obj->lru->count -= obj->size >> PAGE_SHIFT; | |
1382 | WARN_ON(obj->lru->count < 0); | |
1383 | list_del(&obj->lru_node); | |
1384 | obj->lru = NULL; | |
1385 | } | |
1386 | ||
1387 | /** | |
1388 | * drm_gem_lru_remove - remove object from whatever LRU it is in | |
1389 | * | |
1390 | * If the object is currently in any LRU, remove it. | |
1391 | * | |
1392 | * @obj: The GEM object to remove from current LRU | |
1393 | */ | |
1394 | void | |
1395 | drm_gem_lru_remove(struct drm_gem_object *obj) | |
1396 | { | |
1397 | struct drm_gem_lru *lru = obj->lru; | |
1398 | ||
1399 | if (!lru) | |
1400 | return; | |
1401 | ||
1402 | mutex_lock(lru->lock); | |
1403 | drm_gem_lru_remove_locked(obj); | |
1404 | mutex_unlock(lru->lock); | |
1405 | } | |
1406 | EXPORT_SYMBOL(drm_gem_lru_remove); | |
1407 | ||
b43f9afb RC |
1408 | /** |
1409 | * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU | |
1410 | * | |
1411 | * Like &drm_gem_lru_move_tail but lru lock must be held | |
1412 | * | |
1413 | * @lru: The LRU to move the object into. | |
1414 | * @obj: The GEM object to move into this LRU | |
1415 | */ | |
1416 | void | |
e7c2af13 RC |
1417 | drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj) |
1418 | { | |
1419 | lockdep_assert_held_once(lru->lock); | |
1420 | ||
1421 | if (obj->lru) | |
1422 | drm_gem_lru_remove_locked(obj); | |
1423 | ||
1424 | lru->count += obj->size >> PAGE_SHIFT; | |
1425 | list_add_tail(&obj->lru_node, &lru->list); | |
1426 | obj->lru = lru; | |
1427 | } | |
b43f9afb | 1428 | EXPORT_SYMBOL(drm_gem_lru_move_tail_locked); |
e7c2af13 RC |
1429 | |
1430 | /** | |
1431 | * drm_gem_lru_move_tail - move the object to the tail of the LRU | |
1432 | * | |
1433 | * If the object is already in this LRU it will be moved to the | |
1434 | * tail. Otherwise it will be removed from whichever other LRU | |
1435 | * it is in (if any) and moved into this LRU. | |
1436 | * | |
1437 | * @lru: The LRU to move the object into. | |
1438 | * @obj: The GEM object to move into this LRU | |
1439 | */ | |
1440 | void | |
1441 | drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj) | |
1442 | { | |
1443 | mutex_lock(lru->lock); | |
1444 | drm_gem_lru_move_tail_locked(lru, obj); | |
1445 | mutex_unlock(lru->lock); | |
1446 | } | |
1447 | EXPORT_SYMBOL(drm_gem_lru_move_tail); | |
1448 | ||
1449 | /** | |
1450 | * drm_gem_lru_scan - helper to implement shrinker.scan_objects | |
1451 | * | |
1452 | * If the shrink callback succeeds, it is expected that the driver | |
1453 | * move the object out of this LRU. | |
1454 | * | |
1455 | * If the LRU possibly contain active buffers, it is the responsibility | |
1456 | * of the shrink callback to check for this (ie. dma_resv_test_signaled()) | |
1457 | * or if necessary block until the buffer becomes idle. | |
1458 | * | |
1459 | * @lru: The LRU to scan | |
1460 | * @nr_to_scan: The number of pages to try to reclaim | |
9630b585 | 1461 | * @remaining: The number of pages left to reclaim, should be initialized by caller |
e7c2af13 RC |
1462 | * @shrink: Callback to try to shrink/reclaim the object. |
1463 | */ | |
1464 | unsigned long | |
9630b585 DO |
1465 | drm_gem_lru_scan(struct drm_gem_lru *lru, |
1466 | unsigned int nr_to_scan, | |
1467 | unsigned long *remaining, | |
e7c2af13 RC |
1468 | bool (*shrink)(struct drm_gem_object *obj)) |
1469 | { | |
1470 | struct drm_gem_lru still_in_lru; | |
1471 | struct drm_gem_object *obj; | |
1472 | unsigned freed = 0; | |
1473 | ||
1474 | drm_gem_lru_init(&still_in_lru, lru->lock); | |
1475 | ||
1476 | mutex_lock(lru->lock); | |
1477 | ||
1478 | while (freed < nr_to_scan) { | |
1479 | obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node); | |
1480 | ||
1481 | if (!obj) | |
1482 | break; | |
1483 | ||
1484 | drm_gem_lru_move_tail_locked(&still_in_lru, obj); | |
1485 | ||
1486 | /* | |
1487 | * If it's in the process of being freed, gem_object->free() | |
1488 | * may be blocked on lock waiting to remove it. So just | |
1489 | * skip it. | |
1490 | */ | |
1491 | if (!kref_get_unless_zero(&obj->refcount)) | |
1492 | continue; | |
1493 | ||
1494 | /* | |
1495 | * Now that we own a reference, we can drop the lock for the | |
1496 | * rest of the loop body, to reduce contention with other | |
1497 | * code paths that need the LRU lock | |
1498 | */ | |
1499 | mutex_unlock(lru->lock); | |
1500 | ||
1501 | /* | |
1502 | * Note that this still needs to be trylock, since we can | |
1503 | * hit shrinker in response to trying to get backing pages | |
1504 | * for this obj (ie. while it's lock is already held) | |
1505 | */ | |
9630b585 DO |
1506 | if (!dma_resv_trylock(obj->resv)) { |
1507 | *remaining += obj->size >> PAGE_SHIFT; | |
e7c2af13 | 1508 | goto tail; |
9630b585 | 1509 | } |
e7c2af13 RC |
1510 | |
1511 | if (shrink(obj)) { | |
1512 | freed += obj->size >> PAGE_SHIFT; | |
1513 | ||
1514 | /* | |
1515 | * If we succeeded in releasing the object's backing | |
1516 | * pages, we expect the driver to have moved the object | |
1517 | * out of this LRU | |
1518 | */ | |
1519 | WARN_ON(obj->lru == &still_in_lru); | |
1520 | WARN_ON(obj->lru == lru); | |
1521 | } | |
1522 | ||
1523 | dma_resv_unlock(obj->resv); | |
1524 | ||
1525 | tail: | |
1526 | drm_gem_object_put(obj); | |
1527 | mutex_lock(lru->lock); | |
1528 | } | |
1529 | ||
1530 | /* | |
1531 | * Move objects we've skipped over out of the temporary still_in_lru | |
1532 | * back into this LRU | |
1533 | */ | |
1534 | list_for_each_entry (obj, &still_in_lru.list, lru_node) | |
1535 | obj->lru = lru; | |
1536 | list_splice_tail(&still_in_lru.list, &lru->list); | |
1537 | lru->count += still_in_lru.count; | |
1538 | ||
1539 | mutex_unlock(lru->lock); | |
1540 | ||
1541 | return freed; | |
1542 | } | |
1543 | EXPORT_SYMBOL(drm_gem_lru_scan); | |
7eabaa89 DO |
1544 | |
1545 | /** | |
9a0fd089 | 1546 | * drm_gem_evict_locked - helper to evict backing pages for a GEM object |
7eabaa89 DO |
1547 | * @obj: obj in question |
1548 | */ | |
9a0fd089 | 1549 | int drm_gem_evict_locked(struct drm_gem_object *obj) |
7eabaa89 DO |
1550 | { |
1551 | dma_resv_assert_held(obj->resv); | |
1552 | ||
1553 | if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)) | |
1554 | return -EBUSY; | |
1555 | ||
1556 | if (obj->funcs->evict) | |
1557 | return obj->funcs->evict(obj); | |
1558 | ||
1559 | return 0; | |
1560 | } | |
9a0fd089 | 1561 | EXPORT_SYMBOL(drm_gem_evict_locked); |