Commit | Line | Data |
---|---|---|
673a394b EA |
1 | /* |
2 | * Copyright © 2008 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <eric@anholt.net> | |
25 | * | |
26 | */ | |
27 | ||
28 | #include <linux/types.h> | |
29 | #include <linux/slab.h> | |
30 | #include <linux/mm.h> | |
31 | #include <linux/uaccess.h> | |
32 | #include <linux/fs.h> | |
33 | #include <linux/file.h> | |
34 | #include <linux/module.h> | |
35 | #include <linux/mman.h> | |
36 | #include <linux/pagemap.h> | |
5949eac4 | 37 | #include <linux/shmem_fs.h> |
3248877e | 38 | #include <linux/dma-buf.h> |
95cf9264 | 39 | #include <linux/mem_encrypt.h> |
fb4b4927 | 40 | #include <linux/pagevec.h> |
0500c04e SR |
41 | |
42 | #include <drm/drm_device.h> | |
43 | #include <drm/drm_drv.h> | |
44 | #include <drm/drm_file.h> | |
d9fc9413 | 45 | #include <drm/drm_gem.h> |
45d58b40 | 46 | #include <drm/drm_print.h> |
0500c04e SR |
47 | #include <drm/drm_vma_manager.h> |
48 | ||
67d0ec4e | 49 | #include "drm_internal.h" |
673a394b EA |
50 | |
51 | /** @file drm_gem.c | |
52 | * | |
53 | * This file provides some of the base ioctls and library routines for | |
54 | * the graphics memory manager implemented by each device driver. | |
55 | * | |
56 | * Because various devices have different requirements in terms of | |
57 | * synchronization and migration strategies, implementing that is left up to | |
58 | * the driver, and all that the general API provides should be generic -- | |
59 | * allocating objects, reading/writing data with the cpu, freeing objects. | |
60 | * Even there, platform-dependent optimizations for reading/writing data with | |
61 | * the CPU mean we'll likely hook those out to driver-specific calls. However, | |
62 | * the DRI2 implementation wants to have at least allocate/mmap be generic. | |
63 | * | |
64 | * The goal was to have swap-backed object allocation managed through | |
65 | * struct file. However, file descriptors as handles to a struct file have | |
66 | * two major failings: | |
67 | * - Process limits prevent more than 1024 or so being used at a time by | |
68 | * default. | |
69 | * - Inability to allocate high fds will aggravate the X Server's select() | |
70 | * handling, and likely that of many GL client applications as well. | |
71 | * | |
72 | * This led to a plan of using our own integer IDs (called handles, following | |
73 | * DRM terminology) to mimic fds, and implement the fd syscalls we need as | |
74 | * ioctls. The objects themselves will still include the struct file so | |
75 | * that we can transition to fds if the required kernel infrastructure shows | |
76 | * up at a later date, and as our interface with shmfs for memory allocation. | |
77 | */ | |
78 | ||
79 | /** | |
89d61fc0 DV |
80 | * drm_gem_init - Initialize the GEM device fields |
81 | * @dev: drm_devic structure to initialize | |
673a394b | 82 | */ |
673a394b EA |
83 | int |
84 | drm_gem_init(struct drm_device *dev) | |
85 | { | |
b04a5906 | 86 | struct drm_vma_offset_manager *vma_offset_manager; |
a2c0a97b | 87 | |
cd4f013f | 88 | mutex_init(&dev->object_name_lock); |
e86584c5 | 89 | idr_init_base(&dev->object_name_idr, 1); |
a2c0a97b | 90 | |
b04a5906 DV |
91 | vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL); |
92 | if (!vma_offset_manager) { | |
a2c0a97b JB |
93 | DRM_ERROR("out of memory\n"); |
94 | return -ENOMEM; | |
95 | } | |
96 | ||
b04a5906 DV |
97 | dev->vma_offset_manager = vma_offset_manager; |
98 | drm_vma_offset_manager_init(vma_offset_manager, | |
0de23977 DH |
99 | DRM_FILE_PAGE_OFFSET_START, |
100 | DRM_FILE_PAGE_OFFSET_SIZE); | |
a2c0a97b | 101 | |
673a394b EA |
102 | return 0; |
103 | } | |
104 | ||
a2c0a97b JB |
105 | void |
106 | drm_gem_destroy(struct drm_device *dev) | |
107 | { | |
a2c0a97b | 108 | |
b04a5906 DV |
109 | drm_vma_offset_manager_destroy(dev->vma_offset_manager); |
110 | kfree(dev->vma_offset_manager); | |
111 | dev->vma_offset_manager = NULL; | |
a2c0a97b JB |
112 | } |
113 | ||
1d397043 | 114 | /** |
89d61fc0 DV |
115 | * drm_gem_object_init - initialize an allocated shmem-backed GEM object |
116 | * @dev: drm_device the object should be initialized for | |
117 | * @obj: drm_gem_object to initialize | |
118 | * @size: object size | |
119 | * | |
62cb7011 | 120 | * Initialize an already allocated GEM object of the specified size with |
1d397043 DV |
121 | * shmfs backing store. |
122 | */ | |
123 | int drm_gem_object_init(struct drm_device *dev, | |
124 | struct drm_gem_object *obj, size_t size) | |
125 | { | |
89c8233f | 126 | struct file *filp; |
1d397043 | 127 | |
6ab11a26 DV |
128 | drm_gem_private_object_init(dev, obj, size); |
129 | ||
89c8233f DH |
130 | filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
131 | if (IS_ERR(filp)) | |
132 | return PTR_ERR(filp); | |
1d397043 | 133 | |
89c8233f | 134 | obj->filp = filp; |
1d397043 | 135 | |
1d397043 DV |
136 | return 0; |
137 | } | |
138 | EXPORT_SYMBOL(drm_gem_object_init); | |
139 | ||
62cb7011 | 140 | /** |
2a5706a3 | 141 | * drm_gem_private_object_init - initialize an allocated private GEM object |
89d61fc0 DV |
142 | * @dev: drm_device the object should be initialized for |
143 | * @obj: drm_gem_object to initialize | |
144 | * @size: object size | |
145 | * | |
62cb7011 AC |
146 | * Initialize an already allocated GEM object of the specified size with |
147 | * no GEM provided backing store. Instead the caller is responsible for | |
148 | * backing the object and handling it. | |
149 | */ | |
89c8233f DH |
150 | void drm_gem_private_object_init(struct drm_device *dev, |
151 | struct drm_gem_object *obj, size_t size) | |
62cb7011 AC |
152 | { |
153 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); | |
154 | ||
155 | obj->dev = dev; | |
156 | obj->filp = NULL; | |
157 | ||
158 | kref_init(&obj->refcount); | |
a8e11d1c | 159 | obj->handle_count = 0; |
62cb7011 | 160 | obj->size = size; |
1ba62714 RH |
161 | reservation_object_init(&obj->_resv); |
162 | if (!obj->resv) | |
163 | obj->resv = &obj->_resv; | |
164 | ||
88d7ebe5 | 165 | drm_vma_node_reset(&obj->vma_node); |
62cb7011 AC |
166 | } |
167 | EXPORT_SYMBOL(drm_gem_private_object_init); | |
168 | ||
0ff926c7 DA |
169 | static void |
170 | drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) | |
171 | { | |
319c933c DV |
172 | /* |
173 | * Note: obj->dma_buf can't disappear as long as we still hold a | |
174 | * handle reference in obj->handle_count. | |
175 | */ | |
d0b2c533 | 176 | mutex_lock(&filp->prime.lock); |
319c933c | 177 | if (obj->dma_buf) { |
d0b2c533 DV |
178 | drm_prime_remove_buf_handle_locked(&filp->prime, |
179 | obj->dma_buf); | |
0ff926c7 | 180 | } |
d0b2c533 | 181 | mutex_unlock(&filp->prime.lock); |
0ff926c7 DA |
182 | } |
183 | ||
36da5908 | 184 | /** |
c6a84325 | 185 | * drm_gem_object_handle_free - release resources bound to userspace handles |
89d61fc0 DV |
186 | * @obj: GEM object to clean up. |
187 | * | |
36da5908 DV |
188 | * Called after the last handle to the object has been closed |
189 | * | |
190 | * Removes any name for the object. Note that this must be | |
191 | * called before drm_gem_object_free or we'll be touching | |
192 | * freed memory | |
193 | */ | |
194 | static void drm_gem_object_handle_free(struct drm_gem_object *obj) | |
195 | { | |
196 | struct drm_device *dev = obj->dev; | |
197 | ||
198 | /* Remove any name for this object */ | |
36da5908 DV |
199 | if (obj->name) { |
200 | idr_remove(&dev->object_name_idr, obj->name); | |
201 | obj->name = 0; | |
a8e11d1c | 202 | } |
36da5908 DV |
203 | } |
204 | ||
319c933c DV |
205 | static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) |
206 | { | |
207 | /* Unbreak the reference cycle if we have an exported dma_buf. */ | |
208 | if (obj->dma_buf) { | |
209 | dma_buf_put(obj->dma_buf); | |
210 | obj->dma_buf = NULL; | |
211 | } | |
212 | } | |
213 | ||
becee2a5 | 214 | static void |
e6b62714 | 215 | drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) |
36da5908 | 216 | { |
98a8883a CW |
217 | struct drm_device *dev = obj->dev; |
218 | bool final = false; | |
219 | ||
a8e11d1c | 220 | if (WARN_ON(obj->handle_count == 0)) |
36da5908 DV |
221 | return; |
222 | ||
223 | /* | |
224 | * Must bump handle count first as this may be the last | |
225 | * ref, in which case the object would disappear before we | |
226 | * checked for a name | |
227 | */ | |
228 | ||
98a8883a | 229 | mutex_lock(&dev->object_name_lock); |
319c933c | 230 | if (--obj->handle_count == 0) { |
36da5908 | 231 | drm_gem_object_handle_free(obj); |
319c933c | 232 | drm_gem_object_exported_dma_buf_free(obj); |
98a8883a | 233 | final = true; |
319c933c | 234 | } |
98a8883a | 235 | mutex_unlock(&dev->object_name_lock); |
a8e11d1c | 236 | |
98a8883a | 237 | if (final) |
e6b62714 | 238 | drm_gem_object_put_unlocked(obj); |
36da5908 DV |
239 | } |
240 | ||
8815b23a CW |
241 | /* |
242 | * Called at device or object close to release the file's | |
243 | * handle references on objects. | |
244 | */ | |
245 | static int | |
246 | drm_gem_object_release_handle(int id, void *ptr, void *data) | |
247 | { | |
248 | struct drm_file *file_priv = data; | |
249 | struct drm_gem_object *obj = ptr; | |
250 | struct drm_device *dev = obj->dev; | |
251 | ||
b39b5394 NT |
252 | if (obj->funcs && obj->funcs->close) |
253 | obj->funcs->close(obj, file_priv); | |
254 | else if (dev->driver->gem_close_object) | |
d0a133f7 CW |
255 | dev->driver->gem_close_object(obj, file_priv); |
256 | ||
ae75f836 | 257 | drm_gem_remove_prime_handles(obj, file_priv); |
d9a1f0b4 | 258 | drm_vma_node_revoke(&obj->vma_node, file_priv); |
8815b23a | 259 | |
e6b62714 | 260 | drm_gem_object_handle_put_unlocked(obj); |
8815b23a CW |
261 | |
262 | return 0; | |
263 | } | |
264 | ||
673a394b | 265 | /** |
89d61fc0 DV |
266 | * drm_gem_handle_delete - deletes the given file-private handle |
267 | * @filp: drm file-private structure to use for the handle look up | |
268 | * @handle: userspace handle to delete | |
269 | * | |
df2e0900 DV |
270 | * Removes the GEM handle from the @filp lookup table which has been added with |
271 | * drm_gem_handle_create(). If this is the last handle also cleans up linked | |
272 | * resources like GEM names. | |
673a394b | 273 | */ |
ff72145b | 274 | int |
a1a2d1d3 | 275 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) |
673a394b | 276 | { |
673a394b EA |
277 | struct drm_gem_object *obj; |
278 | ||
673a394b EA |
279 | spin_lock(&filp->table_lock); |
280 | ||
281 | /* Check if we currently have a reference on the object */ | |
f6cd7dae CW |
282 | obj = idr_replace(&filp->object_idr, NULL, handle); |
283 | spin_unlock(&filp->table_lock); | |
284 | if (IS_ERR_OR_NULL(obj)) | |
673a394b | 285 | return -EINVAL; |
673a394b | 286 | |
f6cd7dae CW |
287 | /* Release driver's reference and decrement refcount. */ |
288 | drm_gem_object_release_handle(handle, obj, filp); | |
289 | ||
290 | /* And finally make the handle available for future allocations. */ | |
291 | spin_lock(&filp->table_lock); | |
673a394b EA |
292 | idr_remove(&filp->object_idr, handle); |
293 | spin_unlock(&filp->table_lock); | |
294 | ||
673a394b EA |
295 | return 0; |
296 | } | |
ff72145b | 297 | EXPORT_SYMBOL(drm_gem_handle_delete); |
673a394b | 298 | |
db611527 | 299 | /** |
415d2e9e | 300 | * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object |
db611527 NT |
301 | * @file: drm file-private structure containing the gem object |
302 | * @dev: corresponding drm_device | |
303 | * @handle: gem object handle | |
304 | * @offset: return location for the fake mmap offset | |
305 | * | |
306 | * This implements the &drm_driver.dumb_map_offset kms driver callback for | |
307 | * drivers which use gem to manage their backing storage. | |
308 | * | |
309 | * Returns: | |
310 | * 0 on success or a negative error code on failure. | |
311 | */ | |
415d2e9e | 312 | int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
db611527 NT |
313 | u32 handle, u64 *offset) |
314 | { | |
315 | struct drm_gem_object *obj; | |
316 | int ret; | |
317 | ||
318 | obj = drm_gem_object_lookup(file, handle); | |
319 | if (!obj) | |
320 | return -ENOENT; | |
321 | ||
90378e58 NT |
322 | /* Don't allow imported objects to be mapped */ |
323 | if (obj->import_attach) { | |
324 | ret = -EINVAL; | |
325 | goto out; | |
326 | } | |
327 | ||
db611527 NT |
328 | ret = drm_gem_create_mmap_offset(obj); |
329 | if (ret) | |
330 | goto out; | |
331 | ||
332 | *offset = drm_vma_node_offset_addr(&obj->vma_node); | |
333 | out: | |
334 | drm_gem_object_put_unlocked(obj); | |
335 | ||
336 | return ret; | |
337 | } | |
415d2e9e | 338 | EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); |
db611527 | 339 | |
43387b37 DV |
340 | /** |
341 | * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers | |
89d61fc0 DV |
342 | * @file: drm file-private structure to remove the dumb handle from |
343 | * @dev: corresponding drm_device | |
344 | * @handle: the dumb handle to remove | |
1dd3a060 | 345 | * |
940eba2d DV |
346 | * This implements the &drm_driver.dumb_destroy kms driver callback for drivers |
347 | * which use gem to manage their backing storage. | |
43387b37 DV |
348 | */ |
349 | int drm_gem_dumb_destroy(struct drm_file *file, | |
350 | struct drm_device *dev, | |
351 | uint32_t handle) | |
352 | { | |
353 | return drm_gem_handle_delete(file, handle); | |
354 | } | |
355 | EXPORT_SYMBOL(drm_gem_dumb_destroy); | |
356 | ||
673a394b | 357 | /** |
20228c44 | 358 | * drm_gem_handle_create_tail - internal functions to create a handle |
89d61fc0 DV |
359 | * @file_priv: drm file-private structure to register the handle for |
360 | * @obj: object to register | |
8bf8180f | 361 | * @handlep: pointer to return the created handle to the caller |
1dd3a060 | 362 | * |
940eba2d DV |
363 | * This expects the &drm_device.object_name_lock to be held already and will |
364 | * drop it before returning. Used to avoid races in establishing new handles | |
365 | * when importing an object from either an flink name or a dma-buf. | |
df2e0900 DV |
366 | * |
367 | * Handles must be release again through drm_gem_handle_delete(). This is done | |
368 | * when userspace closes @file_priv for all attached handles, or through the | |
369 | * GEM_CLOSE ioctl for individual handles. | |
673a394b EA |
370 | */ |
371 | int | |
20228c44 DV |
372 | drm_gem_handle_create_tail(struct drm_file *file_priv, |
373 | struct drm_gem_object *obj, | |
374 | u32 *handlep) | |
673a394b | 375 | { |
304eda32 | 376 | struct drm_device *dev = obj->dev; |
9649399e | 377 | u32 handle; |
304eda32 | 378 | int ret; |
673a394b | 379 | |
20228c44 | 380 | WARN_ON(!mutex_is_locked(&dev->object_name_lock)); |
98a8883a | 381 | if (obj->handle_count++ == 0) |
e6b62714 | 382 | drm_gem_object_get(obj); |
20228c44 | 383 | |
673a394b | 384 | /* |
2e928815 TH |
385 | * Get the user-visible handle using idr. Preload and perform |
386 | * allocation under our spinlock. | |
673a394b | 387 | */ |
2e928815 | 388 | idr_preload(GFP_KERNEL); |
673a394b | 389 | spin_lock(&file_priv->table_lock); |
2e928815 TH |
390 | |
391 | ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); | |
98a8883a | 392 | |
673a394b | 393 | spin_unlock(&file_priv->table_lock); |
2e928815 | 394 | idr_preload_end(); |
98a8883a | 395 | |
cd4f013f | 396 | mutex_unlock(&dev->object_name_lock); |
6984128d CW |
397 | if (ret < 0) |
398 | goto err_unref; | |
399 | ||
9649399e | 400 | handle = ret; |
673a394b | 401 | |
d9a1f0b4 | 402 | ret = drm_vma_node_allow(&obj->vma_node, file_priv); |
6984128d CW |
403 | if (ret) |
404 | goto err_remove; | |
304eda32 | 405 | |
b39b5394 NT |
406 | if (obj->funcs && obj->funcs->open) { |
407 | ret = obj->funcs->open(obj, file_priv); | |
408 | if (ret) | |
409 | goto err_revoke; | |
410 | } else if (dev->driver->gem_open_object) { | |
304eda32 | 411 | ret = dev->driver->gem_open_object(obj, file_priv); |
6984128d CW |
412 | if (ret) |
413 | goto err_revoke; | |
304eda32 BS |
414 | } |
415 | ||
9649399e | 416 | *handlep = handle; |
673a394b | 417 | return 0; |
6984128d CW |
418 | |
419 | err_revoke: | |
d9a1f0b4 | 420 | drm_vma_node_revoke(&obj->vma_node, file_priv); |
6984128d CW |
421 | err_remove: |
422 | spin_lock(&file_priv->table_lock); | |
9649399e | 423 | idr_remove(&file_priv->object_idr, handle); |
6984128d CW |
424 | spin_unlock(&file_priv->table_lock); |
425 | err_unref: | |
e6b62714 | 426 | drm_gem_object_handle_put_unlocked(obj); |
6984128d | 427 | return ret; |
673a394b | 428 | } |
20228c44 DV |
429 | |
430 | /** | |
8bf8180f | 431 | * drm_gem_handle_create - create a gem handle for an object |
89d61fc0 DV |
432 | * @file_priv: drm file-private structure to register the handle for |
433 | * @obj: object to register | |
434 | * @handlep: pionter to return the created handle to the caller | |
435 | * | |
39031176 DV |
436 | * Create a handle for this object. This adds a handle reference to the object, |
437 | * which includes a regular reference count. Callers will likely want to | |
438 | * dereference the object afterwards. | |
439 | * | |
440 | * Since this publishes @obj to userspace it must be fully set up by this point, | |
441 | * drivers must call this last in their buffer object creation callbacks. | |
20228c44 | 442 | */ |
8bf8180f TR |
443 | int drm_gem_handle_create(struct drm_file *file_priv, |
444 | struct drm_gem_object *obj, | |
445 | u32 *handlep) | |
20228c44 DV |
446 | { |
447 | mutex_lock(&obj->dev->object_name_lock); | |
448 | ||
449 | return drm_gem_handle_create_tail(file_priv, obj, handlep); | |
450 | } | |
673a394b EA |
451 | EXPORT_SYMBOL(drm_gem_handle_create); |
452 | ||
75ef8b3b RC |
453 | |
454 | /** | |
455 | * drm_gem_free_mmap_offset - release a fake mmap offset for an object | |
456 | * @obj: obj in question | |
457 | * | |
458 | * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). | |
f74418a4 DV |
459 | * |
460 | * Note that drm_gem_object_release() already calls this function, so drivers | |
461 | * don't have to take care of releasing the mmap offset themselves when freeing | |
462 | * the GEM object. | |
75ef8b3b RC |
463 | */ |
464 | void | |
465 | drm_gem_free_mmap_offset(struct drm_gem_object *obj) | |
466 | { | |
467 | struct drm_device *dev = obj->dev; | |
75ef8b3b | 468 | |
b04a5906 | 469 | drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); |
75ef8b3b RC |
470 | } |
471 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); | |
472 | ||
473 | /** | |
367bbd49 | 474 | * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object |
75ef8b3b | 475 | * @obj: obj in question |
367bbd49 | 476 | * @size: the virtual size |
75ef8b3b RC |
477 | * |
478 | * GEM memory mapping works by handing back to userspace a fake mmap offset | |
479 | * it can use in a subsequent mmap(2) call. The DRM core code then looks | |
480 | * up the object based on the offset and sets up the various memory mapping | |
481 | * structures. | |
482 | * | |
367bbd49 | 483 | * This routine allocates and attaches a fake offset for @obj, in cases where |
940eba2d DV |
484 | * the virtual size differs from the physical size (ie. &drm_gem_object.size). |
485 | * Otherwise just use drm_gem_create_mmap_offset(). | |
f74418a4 DV |
486 | * |
487 | * This function is idempotent and handles an already allocated mmap offset | |
488 | * transparently. Drivers do not need to check for this case. | |
75ef8b3b RC |
489 | */ |
490 | int | |
367bbd49 | 491 | drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) |
75ef8b3b RC |
492 | { |
493 | struct drm_device *dev = obj->dev; | |
75ef8b3b | 494 | |
b04a5906 | 495 | return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, |
367bbd49 RC |
496 | size / PAGE_SIZE); |
497 | } | |
498 | EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); | |
499 | ||
500 | /** | |
501 | * drm_gem_create_mmap_offset - create a fake mmap offset for an object | |
502 | * @obj: obj in question | |
503 | * | |
504 | * GEM memory mapping works by handing back to userspace a fake mmap offset | |
505 | * it can use in a subsequent mmap(2) call. The DRM core code then looks | |
506 | * up the object based on the offset and sets up the various memory mapping | |
507 | * structures. | |
508 | * | |
509 | * This routine allocates and attaches a fake offset for @obj. | |
f74418a4 DV |
510 | * |
511 | * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release | |
512 | * the fake offset again. | |
367bbd49 RC |
513 | */ |
514 | int drm_gem_create_mmap_offset(struct drm_gem_object *obj) | |
515 | { | |
516 | return drm_gem_create_mmap_offset_size(obj, obj->size); | |
75ef8b3b RC |
517 | } |
518 | EXPORT_SYMBOL(drm_gem_create_mmap_offset); | |
519 | ||
fb4b4927 KHY |
520 | /* |
521 | * Move pages to appropriate lru and release the pagevec, decrementing the | |
522 | * ref count of those pages. | |
523 | */ | |
524 | static void drm_gem_check_release_pagevec(struct pagevec *pvec) | |
525 | { | |
526 | check_move_unevictable_pages(pvec); | |
527 | __pagevec_release(pvec); | |
528 | cond_resched(); | |
529 | } | |
530 | ||
bcc5c9d5 RC |
531 | /** |
532 | * drm_gem_get_pages - helper to allocate backing pages for a GEM object | |
533 | * from shmem | |
534 | * @obj: obj in question | |
0cdbe8ac DH |
535 | * |
536 | * This reads the page-array of the shmem-backing storage of the given gem | |
537 | * object. An array of pages is returned. If a page is not allocated or | |
538 | * swapped-out, this will allocate/swap-in the required pages. Note that the | |
539 | * whole object is covered by the page-array and pinned in memory. | |
540 | * | |
541 | * Use drm_gem_put_pages() to release the array and unpin all pages. | |
542 | * | |
543 | * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). | |
544 | * If you require other GFP-masks, you have to do those allocations yourself. | |
545 | * | |
546 | * Note that you are not allowed to change gfp-zones during runtime. That is, | |
547 | * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as | |
548 | * set during initialization. If you have special zone constraints, set them | |
5b9fbfff | 549 | * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care |
0cdbe8ac | 550 | * to keep pages in the required zone during swap-in. |
bcc5c9d5 | 551 | */ |
0cdbe8ac | 552 | struct page **drm_gem_get_pages(struct drm_gem_object *obj) |
bcc5c9d5 | 553 | { |
bcc5c9d5 RC |
554 | struct address_space *mapping; |
555 | struct page *p, **pages; | |
fb4b4927 | 556 | struct pagevec pvec; |
bcc5c9d5 RC |
557 | int i, npages; |
558 | ||
559 | /* This is the shared memory object that backs the GEM resource */ | |
93c76a3d | 560 | mapping = obj->filp->f_mapping; |
bcc5c9d5 RC |
561 | |
562 | /* We already BUG_ON() for non-page-aligned sizes in | |
563 | * drm_gem_object_init(), so we should never hit this unless | |
564 | * driver author is doing something really wrong: | |
565 | */ | |
566 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); | |
567 | ||
568 | npages = obj->size >> PAGE_SHIFT; | |
569 | ||
2098105e | 570 | pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
bcc5c9d5 RC |
571 | if (pages == NULL) |
572 | return ERR_PTR(-ENOMEM); | |
573 | ||
fb4b4927 KHY |
574 | mapping_set_unevictable(mapping); |
575 | ||
bcc5c9d5 | 576 | for (i = 0; i < npages; i++) { |
0cdbe8ac | 577 | p = shmem_read_mapping_page(mapping, i); |
bcc5c9d5 RC |
578 | if (IS_ERR(p)) |
579 | goto fail; | |
580 | pages[i] = p; | |
581 | ||
2123000b DH |
582 | /* Make sure shmem keeps __GFP_DMA32 allocated pages in the |
583 | * correct region during swapin. Note that this requires | |
584 | * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) | |
585 | * so shmem can relocate pages during swapin if required. | |
bcc5c9d5 | 586 | */ |
c62d2555 | 587 | BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && |
bcc5c9d5 RC |
588 | (page_to_pfn(p) >= 0x00100000UL)); |
589 | } | |
590 | ||
591 | return pages; | |
592 | ||
593 | fail: | |
fb4b4927 KHY |
594 | mapping_clear_unevictable(mapping); |
595 | pagevec_init(&pvec); | |
596 | while (i--) { | |
597 | if (!pagevec_add(&pvec, pages[i])) | |
598 | drm_gem_check_release_pagevec(&pvec); | |
599 | } | |
600 | if (pagevec_count(&pvec)) | |
601 | drm_gem_check_release_pagevec(&pvec); | |
bcc5c9d5 | 602 | |
2098105e | 603 | kvfree(pages); |
bcc5c9d5 RC |
604 | return ERR_CAST(p); |
605 | } | |
606 | EXPORT_SYMBOL(drm_gem_get_pages); | |
607 | ||
608 | /** | |
609 | * drm_gem_put_pages - helper to free backing pages for a GEM object | |
610 | * @obj: obj in question | |
611 | * @pages: pages to free | |
612 | * @dirty: if true, pages will be marked as dirty | |
613 | * @accessed: if true, the pages will be marked as accessed | |
614 | */ | |
615 | void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, | |
616 | bool dirty, bool accessed) | |
617 | { | |
618 | int i, npages; | |
fb4b4927 KHY |
619 | struct address_space *mapping; |
620 | struct pagevec pvec; | |
621 | ||
622 | mapping = file_inode(obj->filp)->i_mapping; | |
623 | mapping_clear_unevictable(mapping); | |
bcc5c9d5 RC |
624 | |
625 | /* We already BUG_ON() for non-page-aligned sizes in | |
626 | * drm_gem_object_init(), so we should never hit this unless | |
627 | * driver author is doing something really wrong: | |
628 | */ | |
629 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); | |
630 | ||
631 | npages = obj->size >> PAGE_SHIFT; | |
632 | ||
fb4b4927 | 633 | pagevec_init(&pvec); |
bcc5c9d5 RC |
634 | for (i = 0; i < npages; i++) { |
635 | if (dirty) | |
636 | set_page_dirty(pages[i]); | |
637 | ||
638 | if (accessed) | |
639 | mark_page_accessed(pages[i]); | |
640 | ||
641 | /* Undo the reference we took when populating the table */ | |
fb4b4927 KHY |
642 | if (!pagevec_add(&pvec, pages[i])) |
643 | drm_gem_check_release_pagevec(&pvec); | |
bcc5c9d5 | 644 | } |
fb4b4927 KHY |
645 | if (pagevec_count(&pvec)) |
646 | drm_gem_check_release_pagevec(&pvec); | |
bcc5c9d5 | 647 | |
2098105e | 648 | kvfree(pages); |
bcc5c9d5 RC |
649 | } |
650 | EXPORT_SYMBOL(drm_gem_put_pages); | |
651 | ||
c117aa4d RH |
652 | static int objects_lookup(struct drm_file *filp, u32 *handle, int count, |
653 | struct drm_gem_object **objs) | |
654 | { | |
655 | int i, ret = 0; | |
656 | struct drm_gem_object *obj; | |
657 | ||
658 | spin_lock(&filp->table_lock); | |
659 | ||
660 | for (i = 0; i < count; i++) { | |
661 | /* Check if we currently have a reference on the object */ | |
662 | obj = idr_find(&filp->object_idr, handle[i]); | |
663 | if (!obj) { | |
664 | ret = -ENOENT; | |
665 | break; | |
666 | } | |
667 | drm_gem_object_get(obj); | |
668 | objs[i] = obj; | |
669 | } | |
670 | spin_unlock(&filp->table_lock); | |
671 | ||
672 | return ret; | |
673 | } | |
674 | ||
675 | /** | |
676 | * drm_gem_objects_lookup - look up GEM objects from an array of handles | |
677 | * @filp: DRM file private date | |
678 | * @bo_handles: user pointer to array of userspace handle | |
679 | * @count: size of handle array | |
680 | * @objs_out: returned pointer to array of drm_gem_object pointers | |
681 | * | |
682 | * Takes an array of userspace handles and returns a newly allocated array of | |
683 | * GEM objects. | |
684 | * | |
685 | * For a single handle lookup, use drm_gem_object_lookup(). | |
686 | * | |
687 | * Returns: | |
688 | * | |
689 | * @objs filled in with GEM object pointers. Returned GEM objects need to be | |
690 | * released with drm_gem_object_put(). -ENOENT is returned on a lookup | |
691 | * failure. 0 is returned on success. | |
692 | * | |
693 | */ | |
694 | int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, | |
695 | int count, struct drm_gem_object ***objs_out) | |
696 | { | |
697 | int ret; | |
698 | u32 *handles; | |
699 | struct drm_gem_object **objs; | |
700 | ||
701 | if (!count) | |
702 | return 0; | |
703 | ||
704 | objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), | |
705 | GFP_KERNEL | __GFP_ZERO); | |
706 | if (!objs) | |
707 | return -ENOMEM; | |
708 | ||
709 | handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); | |
710 | if (!handles) { | |
711 | ret = -ENOMEM; | |
712 | goto out; | |
713 | } | |
714 | ||
715 | if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { | |
716 | ret = -EFAULT; | |
717 | DRM_DEBUG("Failed to copy in GEM handles\n"); | |
718 | goto out; | |
719 | } | |
720 | ||
721 | ret = objects_lookup(filp, handles, count, objs); | |
722 | *objs_out = objs; | |
723 | ||
724 | out: | |
725 | kvfree(handles); | |
726 | return ret; | |
727 | ||
728 | } | |
729 | EXPORT_SYMBOL(drm_gem_objects_lookup); | |
730 | ||
df2e0900 | 731 | /** |
1e55a53a | 732 | * drm_gem_object_lookup - look up a GEM object from its handle |
df2e0900 DV |
733 | * @filp: DRM file private date |
734 | * @handle: userspace handle | |
735 | * | |
736 | * Returns: | |
737 | * | |
738 | * A reference to the object named by the handle if such exists on @filp, NULL | |
739 | * otherwise. | |
c117aa4d RH |
740 | * |
741 | * If looking up an array of handles, use drm_gem_objects_lookup(). | |
df2e0900 | 742 | */ |
673a394b | 743 | struct drm_gem_object * |
a8ad0bd8 | 744 | drm_gem_object_lookup(struct drm_file *filp, u32 handle) |
673a394b | 745 | { |
c117aa4d | 746 | struct drm_gem_object *obj = NULL; |
673a394b | 747 | |
c117aa4d | 748 | objects_lookup(filp, &handle, 1, &obj); |
673a394b EA |
749 | return obj; |
750 | } | |
751 | EXPORT_SYMBOL(drm_gem_object_lookup); | |
752 | ||
1ba62714 RH |
753 | /** |
754 | * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects | |
755 | * shared and/or exclusive fences. | |
756 | * @filep: DRM file private date | |
757 | * @handle: userspace handle | |
758 | * @wait_all: if true, wait on all fences, else wait on just exclusive fence | |
759 | * @timeout: timeout value in jiffies or zero to return immediately | |
760 | * | |
761 | * Returns: | |
762 | * | |
763 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or | |
764 | * greater than 0 on success. | |
765 | */ | |
766 | long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle, | |
767 | bool wait_all, unsigned long timeout) | |
768 | { | |
769 | long ret; | |
770 | struct drm_gem_object *obj; | |
771 | ||
772 | obj = drm_gem_object_lookup(filep, handle); | |
773 | if (!obj) { | |
774 | DRM_DEBUG("Failed to look up GEM BO %d\n", handle); | |
775 | return -EINVAL; | |
776 | } | |
777 | ||
778 | ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all, | |
779 | true, timeout); | |
780 | if (ret == 0) | |
781 | ret = -ETIME; | |
782 | else if (ret > 0) | |
783 | ret = 0; | |
784 | ||
785 | drm_gem_object_put_unlocked(obj); | |
786 | ||
787 | return ret; | |
788 | } | |
789 | EXPORT_SYMBOL(drm_gem_reservation_object_wait); | |
790 | ||
673a394b | 791 | /** |
89d61fc0 DV |
792 | * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl |
793 | * @dev: drm_device | |
794 | * @data: ioctl data | |
795 | * @file_priv: drm file-private structure | |
796 | * | |
673a394b EA |
797 | * Releases the handle to an mm object. |
798 | */ | |
799 | int | |
800 | drm_gem_close_ioctl(struct drm_device *dev, void *data, | |
801 | struct drm_file *file_priv) | |
802 | { | |
803 | struct drm_gem_close *args = data; | |
804 | int ret; | |
805 | ||
1bcecfac | 806 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
69fdf420 | 807 | return -EOPNOTSUPP; |
673a394b EA |
808 | |
809 | ret = drm_gem_handle_delete(file_priv, args->handle); | |
810 | ||
811 | return ret; | |
812 | } | |
813 | ||
814 | /** | |
89d61fc0 DV |
815 | * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl |
816 | * @dev: drm_device | |
817 | * @data: ioctl data | |
818 | * @file_priv: drm file-private structure | |
819 | * | |
673a394b EA |
820 | * Create a global name for an object, returning the name. |
821 | * | |
822 | * Note that the name does not hold a reference; when the object | |
823 | * is freed, the name goes away. | |
824 | */ | |
825 | int | |
826 | drm_gem_flink_ioctl(struct drm_device *dev, void *data, | |
827 | struct drm_file *file_priv) | |
828 | { | |
829 | struct drm_gem_flink *args = data; | |
830 | struct drm_gem_object *obj; | |
831 | int ret; | |
832 | ||
1bcecfac | 833 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
69fdf420 | 834 | return -EOPNOTSUPP; |
673a394b | 835 | |
a8ad0bd8 | 836 | obj = drm_gem_object_lookup(file_priv, args->handle); |
673a394b | 837 | if (obj == NULL) |
bf79cb91 | 838 | return -ENOENT; |
673a394b | 839 | |
cd4f013f | 840 | mutex_lock(&dev->object_name_lock); |
a8e11d1c DV |
841 | /* prevent races with concurrent gem_close. */ |
842 | if (obj->handle_count == 0) { | |
843 | ret = -ENOENT; | |
844 | goto err; | |
845 | } | |
846 | ||
8d59bae5 | 847 | if (!obj->name) { |
0f646425 | 848 | ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); |
2e928815 | 849 | if (ret < 0) |
8d59bae5 | 850 | goto err; |
2e07fb22 YC |
851 | |
852 | obj->name = ret; | |
8d59bae5 | 853 | } |
3e49c4f4 | 854 | |
2e07fb22 YC |
855 | args->name = (uint64_t) obj->name; |
856 | ret = 0; | |
857 | ||
3e49c4f4 | 858 | err: |
cd4f013f | 859 | mutex_unlock(&dev->object_name_lock); |
e6b62714 | 860 | drm_gem_object_put_unlocked(obj); |
3e49c4f4 | 861 | return ret; |
673a394b EA |
862 | } |
863 | ||
864 | /** | |
89d61fc0 DV |
865 | * drm_gem_open - implementation of the GEM_OPEN ioctl |
866 | * @dev: drm_device | |
867 | * @data: ioctl data | |
868 | * @file_priv: drm file-private structure | |
869 | * | |
673a394b EA |
870 | * Open an object using the global name, returning a handle and the size. |
871 | * | |
872 | * This handle (of course) holds a reference to the object, so the object | |
873 | * will not go away until the handle is deleted. | |
874 | */ | |
875 | int | |
876 | drm_gem_open_ioctl(struct drm_device *dev, void *data, | |
877 | struct drm_file *file_priv) | |
878 | { | |
879 | struct drm_gem_open *args = data; | |
880 | struct drm_gem_object *obj; | |
881 | int ret; | |
a1a2d1d3 | 882 | u32 handle; |
673a394b | 883 | |
1bcecfac | 884 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
69fdf420 | 885 | return -EOPNOTSUPP; |
673a394b | 886 | |
cd4f013f | 887 | mutex_lock(&dev->object_name_lock); |
673a394b | 888 | obj = idr_find(&dev->object_name_idr, (int) args->name); |
20228c44 | 889 | if (obj) { |
e6b62714 | 890 | drm_gem_object_get(obj); |
20228c44 DV |
891 | } else { |
892 | mutex_unlock(&dev->object_name_lock); | |
673a394b | 893 | return -ENOENT; |
20228c44 | 894 | } |
673a394b | 895 | |
20228c44 DV |
896 | /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ |
897 | ret = drm_gem_handle_create_tail(file_priv, obj, &handle); | |
e6b62714 | 898 | drm_gem_object_put_unlocked(obj); |
673a394b EA |
899 | if (ret) |
900 | return ret; | |
901 | ||
902 | args->handle = handle; | |
903 | args->size = obj->size; | |
904 | ||
905 | return 0; | |
906 | } | |
907 | ||
908 | /** | |
89d61fc0 DV |
909 | * gem_gem_open - initalizes GEM file-private structures at devnode open time |
910 | * @dev: drm_device which is being opened by userspace | |
911 | * @file_private: drm file-private structure to set up | |
912 | * | |
673a394b EA |
913 | * Called at device open time, sets up the structure for handling refcounting |
914 | * of mm objects. | |
915 | */ | |
916 | void | |
917 | drm_gem_open(struct drm_device *dev, struct drm_file *file_private) | |
918 | { | |
e86584c5 | 919 | idr_init_base(&file_private->object_idr, 1); |
673a394b EA |
920 | spin_lock_init(&file_private->table_lock); |
921 | } | |
922 | ||
673a394b | 923 | /** |
89d61fc0 DV |
924 | * drm_gem_release - release file-private GEM resources |
925 | * @dev: drm_device which is being closed by userspace | |
926 | * @file_private: drm file-private structure to clean up | |
927 | * | |
673a394b EA |
928 | * Called at close time when the filp is going away. |
929 | * | |
930 | * Releases any remaining references on objects by this filp. | |
931 | */ | |
932 | void | |
933 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) | |
934 | { | |
673a394b | 935 | idr_for_each(&file_private->object_idr, |
304eda32 | 936 | &drm_gem_object_release_handle, file_private); |
673a394b | 937 | idr_destroy(&file_private->object_idr); |
673a394b EA |
938 | } |
939 | ||
f74418a4 DV |
940 | /** |
941 | * drm_gem_object_release - release GEM buffer object resources | |
942 | * @obj: GEM buffer object | |
943 | * | |
944 | * This releases any structures and resources used by @obj and is the invers of | |
945 | * drm_gem_object_init(). | |
946 | */ | |
fd632aa3 DV |
947 | void |
948 | drm_gem_object_release(struct drm_gem_object *obj) | |
c3ae90c0 | 949 | { |
319c933c DV |
950 | WARN_ON(obj->dma_buf); |
951 | ||
62cb7011 | 952 | if (obj->filp) |
16d2831d | 953 | fput(obj->filp); |
77472347 | 954 | |
1ba62714 | 955 | reservation_object_fini(&obj->_resv); |
77472347 | 956 | drm_gem_free_mmap_offset(obj); |
c3ae90c0 | 957 | } |
fd632aa3 | 958 | EXPORT_SYMBOL(drm_gem_object_release); |
c3ae90c0 | 959 | |
673a394b | 960 | /** |
89d61fc0 DV |
961 | * drm_gem_object_free - free a GEM object |
962 | * @kref: kref of the object to free | |
963 | * | |
673a394b | 964 | * Called after the last reference to the object has been lost. |
940eba2d | 965 | * Must be called holding &drm_device.struct_mutex. |
673a394b EA |
966 | * |
967 | * Frees the object | |
968 | */ | |
969 | void | |
970 | drm_gem_object_free(struct kref *kref) | |
971 | { | |
6ff774bd DV |
972 | struct drm_gem_object *obj = |
973 | container_of(kref, struct drm_gem_object, refcount); | |
673a394b EA |
974 | struct drm_device *dev = obj->dev; |
975 | ||
b39b5394 NT |
976 | if (obj->funcs) { |
977 | obj->funcs->free(obj); | |
978 | } else if (dev->driver->gem_free_object_unlocked) { | |
9f0ba539 | 979 | dev->driver->gem_free_object_unlocked(obj); |
6d3e7fdd DV |
980 | } else if (dev->driver->gem_free_object) { |
981 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | |
982 | ||
673a394b | 983 | dev->driver->gem_free_object(obj); |
6d3e7fdd | 984 | } |
673a394b EA |
985 | } |
986 | EXPORT_SYMBOL(drm_gem_object_free); | |
987 | ||
9f0ba539 | 988 | /** |
e6b62714 | 989 | * drm_gem_object_put_unlocked - drop a GEM buffer object reference |
9f0ba539 DV |
990 | * @obj: GEM buffer object |
991 | * | |
992 | * This releases a reference to @obj. Callers must not hold the | |
940eba2d | 993 | * &drm_device.struct_mutex lock when calling this function. |
9f0ba539 | 994 | * |
e6b62714 | 995 | * See also __drm_gem_object_put(). |
9f0ba539 DV |
996 | */ |
997 | void | |
e6b62714 | 998 | drm_gem_object_put_unlocked(struct drm_gem_object *obj) |
9f0ba539 DV |
999 | { |
1000 | struct drm_device *dev; | |
1001 | ||
1002 | if (!obj) | |
1003 | return; | |
1004 | ||
1005 | dev = obj->dev; | |
9f0ba539 | 1006 | |
b39b5394 | 1007 | if (dev->driver->gem_free_object) { |
3379c04c DV |
1008 | might_lock(&dev->struct_mutex); |
1009 | if (kref_put_mutex(&obj->refcount, drm_gem_object_free, | |
9f0ba539 | 1010 | &dev->struct_mutex)) |
3379c04c | 1011 | mutex_unlock(&dev->struct_mutex); |
b39b5394 NT |
1012 | } else { |
1013 | kref_put(&obj->refcount, drm_gem_object_free); | |
3379c04c | 1014 | } |
9f0ba539 | 1015 | } |
e6b62714 | 1016 | EXPORT_SYMBOL(drm_gem_object_put_unlocked); |
9f0ba539 DV |
1017 | |
1018 | /** | |
e6b62714 | 1019 | * drm_gem_object_put - release a GEM buffer object reference |
9f0ba539 DV |
1020 | * @obj: GEM buffer object |
1021 | * | |
940eba2d DV |
1022 | * This releases a reference to @obj. Callers must hold the |
1023 | * &drm_device.struct_mutex lock when calling this function, even when the | |
1024 | * driver doesn't use &drm_device.struct_mutex for anything. | |
9f0ba539 DV |
1025 | * |
1026 | * For drivers not encumbered with legacy locking use | |
e6b62714 | 1027 | * drm_gem_object_put_unlocked() instead. |
9f0ba539 DV |
1028 | */ |
1029 | void | |
e6b62714 | 1030 | drm_gem_object_put(struct drm_gem_object *obj) |
9f0ba539 DV |
1031 | { |
1032 | if (obj) { | |
1033 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); | |
1034 | ||
1035 | kref_put(&obj->refcount, drm_gem_object_free); | |
1036 | } | |
1037 | } | |
e6b62714 | 1038 | EXPORT_SYMBOL(drm_gem_object_put); |
9f0ba539 | 1039 | |
df2e0900 DV |
1040 | /** |
1041 | * drm_gem_vm_open - vma->ops->open implementation for GEM | |
1042 | * @vma: VM area structure | |
1043 | * | |
1044 | * This function implements the #vm_operations_struct open() callback for GEM | |
1045 | * drivers. This must be used together with drm_gem_vm_close(). | |
1046 | */ | |
ab00b3e5 JB |
1047 | void drm_gem_vm_open(struct vm_area_struct *vma) |
1048 | { | |
1049 | struct drm_gem_object *obj = vma->vm_private_data; | |
1050 | ||
e6b62714 | 1051 | drm_gem_object_get(obj); |
ab00b3e5 JB |
1052 | } |
1053 | EXPORT_SYMBOL(drm_gem_vm_open); | |
1054 | ||
df2e0900 DV |
1055 | /** |
1056 | * drm_gem_vm_close - vma->ops->close implementation for GEM | |
1057 | * @vma: VM area structure | |
1058 | * | |
1059 | * This function implements the #vm_operations_struct close() callback for GEM | |
1060 | * drivers. This must be used together with drm_gem_vm_open(). | |
1061 | */ | |
ab00b3e5 JB |
1062 | void drm_gem_vm_close(struct vm_area_struct *vma) |
1063 | { | |
1064 | struct drm_gem_object *obj = vma->vm_private_data; | |
ab00b3e5 | 1065 | |
e6b62714 | 1066 | drm_gem_object_put_unlocked(obj); |
ab00b3e5 JB |
1067 | } |
1068 | EXPORT_SYMBOL(drm_gem_vm_close); | |
1069 | ||
1c5aafa6 LP |
1070 | /** |
1071 | * drm_gem_mmap_obj - memory map a GEM object | |
1072 | * @obj: the GEM object to map | |
1073 | * @obj_size: the object size to be mapped, in bytes | |
1074 | * @vma: VMA for the area to be mapped | |
1075 | * | |
1076 | * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops | |
1077 | * provided by the driver. Depending on their requirements, drivers can either | |
1078 | * provide a fault handler in their gem_vm_ops (in which case any accesses to | |
1079 | * the object will be trapped, to perform migration, GTT binding, surface | |
1080 | * register allocation, or performance monitoring), or mmap the buffer memory | |
1081 | * synchronously after calling drm_gem_mmap_obj. | |
1082 | * | |
1083 | * This function is mainly intended to implement the DMABUF mmap operation, when | |
1084 | * the GEM object is not looked up based on its fake offset. To implement the | |
1085 | * DRM mmap operation, drivers should use the drm_gem_mmap() function. | |
1086 | * | |
ca481c9b DH |
1087 | * drm_gem_mmap_obj() assumes the user is granted access to the buffer while |
1088 | * drm_gem_mmap() prevents unprivileged users from mapping random objects. So | |
1089 | * callers must verify access restrictions before calling this helper. | |
1090 | * | |
1c5aafa6 LP |
1091 | * Return 0 or success or -EINVAL if the object size is smaller than the VMA |
1092 | * size, or if no gem_vm_ops are provided. | |
1093 | */ | |
1094 | int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, | |
1095 | struct vm_area_struct *vma) | |
1096 | { | |
1097 | struct drm_device *dev = obj->dev; | |
1098 | ||
1099 | /* Check for valid size. */ | |
1100 | if (obj_size < vma->vm_end - vma->vm_start) | |
1101 | return -EINVAL; | |
1102 | ||
b39b5394 NT |
1103 | if (obj->funcs && obj->funcs->vm_ops) |
1104 | vma->vm_ops = obj->funcs->vm_ops; | |
1105 | else if (dev->driver->gem_vm_ops) | |
1106 | vma->vm_ops = dev->driver->gem_vm_ops; | |
1107 | else | |
1c5aafa6 LP |
1108 | return -EINVAL; |
1109 | ||
1110 | vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; | |
1c5aafa6 | 1111 | vma->vm_private_data = obj; |
16d2831d | 1112 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
95cf9264 | 1113 | vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); |
1c5aafa6 LP |
1114 | |
1115 | /* Take a ref for this mapping of the object, so that the fault | |
1116 | * handler can dereference the mmap offset's pointer to the object. | |
1117 | * This reference is cleaned up by the corresponding vm_close | |
1118 | * (which should happen whether the vma was created by this call, or | |
1119 | * by a vm_open due to mremap or partial unmap or whatever). | |
1120 | */ | |
e6b62714 | 1121 | drm_gem_object_get(obj); |
1c5aafa6 | 1122 | |
1c5aafa6 LP |
1123 | return 0; |
1124 | } | |
1125 | EXPORT_SYMBOL(drm_gem_mmap_obj); | |
ab00b3e5 | 1126 | |
a2c0a97b JB |
1127 | /** |
1128 | * drm_gem_mmap - memory map routine for GEM objects | |
1129 | * @filp: DRM file pointer | |
1130 | * @vma: VMA for the area to be mapped | |
1131 | * | |
1132 | * If a driver supports GEM object mapping, mmap calls on the DRM file | |
1133 | * descriptor will end up here. | |
1134 | * | |
1c5aafa6 | 1135 | * Look up the GEM object based on the offset passed in (vma->vm_pgoff will |
a2c0a97b | 1136 | * contain the fake offset we created when the GTT map ioctl was called on |
1c5aafa6 | 1137 | * the object) and map it with a call to drm_gem_mmap_obj(). |
ca481c9b DH |
1138 | * |
1139 | * If the caller is not granted access to the buffer object, the mmap will fail | |
1140 | * with EACCES. Please see the vma manager for more information. | |
a2c0a97b JB |
1141 | */ |
1142 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |
1143 | { | |
1144 | struct drm_file *priv = filp->private_data; | |
1145 | struct drm_device *dev = priv->minor->dev; | |
2225cfe4 | 1146 | struct drm_gem_object *obj = NULL; |
0de23977 | 1147 | struct drm_vma_offset_node *node; |
a8469aa8 | 1148 | int ret; |
a2c0a97b | 1149 | |
c07dcd61 | 1150 | if (drm_dev_is_unplugged(dev)) |
2c07a21d DA |
1151 | return -ENODEV; |
1152 | ||
2225cfe4 DV |
1153 | drm_vma_offset_lock_lookup(dev->vma_offset_manager); |
1154 | node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, | |
1155 | vma->vm_pgoff, | |
1156 | vma_pages(vma)); | |
1157 | if (likely(node)) { | |
1158 | obj = container_of(node, struct drm_gem_object, vma_node); | |
1159 | /* | |
1160 | * When the object is being freed, after it hits 0-refcnt it | |
1161 | * proceeds to tear down the object. In the process it will | |
1162 | * attempt to remove the VMA offset and so acquire this | |
1163 | * mgr->vm_lock. Therefore if we find an object with a 0-refcnt | |
1164 | * that matches our range, we know it is in the process of being | |
1165 | * destroyed and will be freed as soon as we release the lock - | |
1166 | * so we have to check for the 0-refcnted object and treat it as | |
1167 | * invalid. | |
1168 | */ | |
1169 | if (!kref_get_unless_zero(&obj->refcount)) | |
1170 | obj = NULL; | |
1171 | } | |
1172 | drm_vma_offset_unlock_lookup(dev->vma_offset_manager); | |
a2c0a97b | 1173 | |
2225cfe4 | 1174 | if (!obj) |
197633b9 | 1175 | return -EINVAL; |
2225cfe4 | 1176 | |
d9a1f0b4 | 1177 | if (!drm_vma_node_is_allowed(node, priv)) { |
e6b62714 | 1178 | drm_gem_object_put_unlocked(obj); |
ca481c9b | 1179 | return -EACCES; |
a2c0a97b JB |
1180 | } |
1181 | ||
3e977ac6 CW |
1182 | if (node->readonly) { |
1183 | if (vma->vm_flags & VM_WRITE) { | |
1184 | drm_gem_object_put_unlocked(obj); | |
1185 | return -EINVAL; | |
1186 | } | |
1187 | ||
1188 | vma->vm_flags &= ~VM_MAYWRITE; | |
1189 | } | |
1190 | ||
2225cfe4 DV |
1191 | ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, |
1192 | vma); | |
a2c0a97b | 1193 | |
e6b62714 | 1194 | drm_gem_object_put_unlocked(obj); |
a2c0a97b JB |
1195 | |
1196 | return ret; | |
1197 | } | |
1198 | EXPORT_SYMBOL(drm_gem_mmap); | |
45d58b40 NT |
1199 | |
1200 | void drm_gem_print_info(struct drm_printer *p, unsigned int indent, | |
1201 | const struct drm_gem_object *obj) | |
1202 | { | |
1203 | drm_printf_indent(p, indent, "name=%d\n", obj->name); | |
1204 | drm_printf_indent(p, indent, "refcount=%u\n", | |
1205 | kref_read(&obj->refcount)); | |
1206 | drm_printf_indent(p, indent, "start=%08lx\n", | |
1207 | drm_vma_node_start(&obj->vma_node)); | |
1208 | drm_printf_indent(p, indent, "size=%zu\n", obj->size); | |
1209 | drm_printf_indent(p, indent, "imported=%s\n", | |
1210 | obj->import_attach ? "yes" : "no"); | |
1211 | ||
b39b5394 NT |
1212 | if (obj->funcs && obj->funcs->print_info) |
1213 | obj->funcs->print_info(p, indent, obj); | |
1214 | else if (obj->dev->driver->gem_print_info) | |
45d58b40 NT |
1215 | obj->dev->driver->gem_print_info(p, indent, obj); |
1216 | } | |
b39b5394 | 1217 | |
b39b5394 NT |
1218 | int drm_gem_pin(struct drm_gem_object *obj) |
1219 | { | |
1220 | if (obj->funcs && obj->funcs->pin) | |
1221 | return obj->funcs->pin(obj); | |
1222 | else if (obj->dev->driver->gem_prime_pin) | |
1223 | return obj->dev->driver->gem_prime_pin(obj); | |
1224 | else | |
1225 | return 0; | |
1226 | } | |
b39b5394 | 1227 | |
b39b5394 NT |
1228 | void drm_gem_unpin(struct drm_gem_object *obj) |
1229 | { | |
1230 | if (obj->funcs && obj->funcs->unpin) | |
1231 | obj->funcs->unpin(obj); | |
1232 | else if (obj->dev->driver->gem_prime_unpin) | |
1233 | obj->dev->driver->gem_prime_unpin(obj); | |
1234 | } | |
b39b5394 | 1235 | |
b39b5394 NT |
1236 | void *drm_gem_vmap(struct drm_gem_object *obj) |
1237 | { | |
1238 | void *vaddr; | |
1239 | ||
1240 | if (obj->funcs && obj->funcs->vmap) | |
1241 | vaddr = obj->funcs->vmap(obj); | |
1242 | else if (obj->dev->driver->gem_prime_vmap) | |
1243 | vaddr = obj->dev->driver->gem_prime_vmap(obj); | |
1244 | else | |
1245 | vaddr = ERR_PTR(-EOPNOTSUPP); | |
1246 | ||
1247 | if (!vaddr) | |
1248 | vaddr = ERR_PTR(-ENOMEM); | |
1249 | ||
1250 | return vaddr; | |
1251 | } | |
b39b5394 | 1252 | |
b39b5394 NT |
1253 | void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr) |
1254 | { | |
1255 | if (!vaddr) | |
1256 | return; | |
1257 | ||
1258 | if (obj->funcs && obj->funcs->vunmap) | |
1259 | obj->funcs->vunmap(obj, vaddr); | |
1260 | else if (obj->dev->driver->gem_prime_vunmap) | |
1261 | obj->dev->driver->gem_prime_vunmap(obj, vaddr); | |
1262 | } | |
7edc3e3b EA |
1263 | |
1264 | /** | |
1265 | * drm_gem_lock_reservations - Sets up the ww context and acquires | |
1266 | * the lock on an array of GEM objects. | |
1267 | * | |
1268 | * Once you've locked your reservations, you'll want to set up space | |
1269 | * for your shared fences (if applicable), submit your job, then | |
1270 | * drm_gem_unlock_reservations(). | |
1271 | * | |
1272 | * @objs: drm_gem_objects to lock | |
1273 | * @count: Number of objects in @objs | |
1274 | * @acquire_ctx: struct ww_acquire_ctx that will be initialized as | |
1275 | * part of tracking this set of locked reservations. | |
1276 | */ | |
1277 | int | |
1278 | drm_gem_lock_reservations(struct drm_gem_object **objs, int count, | |
1279 | struct ww_acquire_ctx *acquire_ctx) | |
1280 | { | |
1281 | int contended = -1; | |
1282 | int i, ret; | |
1283 | ||
1284 | ww_acquire_init(acquire_ctx, &reservation_ww_class); | |
1285 | ||
1286 | retry: | |
1287 | if (contended != -1) { | |
1288 | struct drm_gem_object *obj = objs[contended]; | |
1289 | ||
1290 | ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock, | |
1291 | acquire_ctx); | |
1292 | if (ret) { | |
1293 | ww_acquire_done(acquire_ctx); | |
1294 | return ret; | |
1295 | } | |
1296 | } | |
1297 | ||
1298 | for (i = 0; i < count; i++) { | |
1299 | if (i == contended) | |
1300 | continue; | |
1301 | ||
1302 | ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock, | |
1303 | acquire_ctx); | |
1304 | if (ret) { | |
1305 | int j; | |
1306 | ||
1307 | for (j = 0; j < i; j++) | |
1308 | ww_mutex_unlock(&objs[j]->resv->lock); | |
1309 | ||
1310 | if (contended != -1 && contended >= i) | |
1311 | ww_mutex_unlock(&objs[contended]->resv->lock); | |
1312 | ||
1313 | if (ret == -EDEADLK) { | |
1314 | contended = i; | |
1315 | goto retry; | |
1316 | } | |
1317 | ||
1318 | ww_acquire_done(acquire_ctx); | |
1319 | return ret; | |
1320 | } | |
1321 | } | |
1322 | ||
1323 | ww_acquire_done(acquire_ctx); | |
1324 | ||
1325 | return 0; | |
1326 | } | |
1327 | EXPORT_SYMBOL(drm_gem_lock_reservations); | |
1328 | ||
1329 | void | |
1330 | drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, | |
1331 | struct ww_acquire_ctx *acquire_ctx) | |
1332 | { | |
1333 | int i; | |
1334 | ||
1335 | for (i = 0; i < count; i++) | |
1336 | ww_mutex_unlock(&objs[i]->resv->lock); | |
1337 | ||
1338 | ww_acquire_fini(acquire_ctx); | |
1339 | } | |
1340 | EXPORT_SYMBOL(drm_gem_unlock_reservations); | |
5d5a179d EA |
1341 | |
1342 | /** | |
1343 | * drm_gem_fence_array_add - Adds the fence to an array of fences to be | |
1344 | * waited on, deduplicating fences from the same context. | |
1345 | * | |
761e473f SP |
1346 | * @fence_array: array of dma_fence * for the job to block on. |
1347 | * @fence: the dma_fence to add to the list of dependencies. | |
5d5a179d EA |
1348 | * |
1349 | * Returns: | |
1350 | * 0 on success, or an error on failing to expand the array. | |
1351 | */ | |
1352 | int drm_gem_fence_array_add(struct xarray *fence_array, | |
1353 | struct dma_fence *fence) | |
1354 | { | |
1355 | struct dma_fence *entry; | |
1356 | unsigned long index; | |
1357 | u32 id = 0; | |
1358 | int ret; | |
1359 | ||
1360 | if (!fence) | |
1361 | return 0; | |
1362 | ||
1363 | /* Deduplicate if we already depend on a fence from the same context. | |
1364 | * This lets the size of the array of deps scale with the number of | |
1365 | * engines involved, rather than the number of BOs. | |
1366 | */ | |
1367 | xa_for_each(fence_array, index, entry) { | |
1368 | if (entry->context != fence->context) | |
1369 | continue; | |
1370 | ||
1371 | if (dma_fence_is_later(fence, entry)) { | |
1372 | dma_fence_put(entry); | |
1373 | xa_store(fence_array, index, fence, GFP_KERNEL); | |
1374 | } else { | |
1375 | dma_fence_put(fence); | |
1376 | } | |
1377 | return 0; | |
1378 | } | |
1379 | ||
1380 | ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL); | |
1381 | if (ret != 0) | |
1382 | dma_fence_put(fence); | |
1383 | ||
1384 | return ret; | |
1385 | } | |
1386 | EXPORT_SYMBOL(drm_gem_fence_array_add); | |
1387 | ||
1388 | /** | |
1389 | * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked | |
1390 | * in the GEM object's reservation object to an array of dma_fences for use in | |
1391 | * scheduling a rendering job. | |
1392 | * | |
1393 | * This should be called after drm_gem_lock_reservations() on your array of | |
1394 | * GEM objects used in the job but before updating the reservations with your | |
1395 | * own fences. | |
1396 | * | |
761e473f SP |
1397 | * @fence_array: array of dma_fence * for the job to block on. |
1398 | * @obj: the gem object to add new dependencies from. | |
1399 | * @write: whether the job might write the object (so we need to depend on | |
5d5a179d EA |
1400 | * shared fences in the reservation object). |
1401 | */ | |
1402 | int drm_gem_fence_array_add_implicit(struct xarray *fence_array, | |
1403 | struct drm_gem_object *obj, | |
1404 | bool write) | |
1405 | { | |
1406 | int ret; | |
1407 | struct dma_fence **fences; | |
1408 | unsigned int i, fence_count; | |
1409 | ||
1410 | if (!write) { | |
1411 | struct dma_fence *fence = | |
1412 | reservation_object_get_excl_rcu(obj->resv); | |
1413 | ||
1414 | return drm_gem_fence_array_add(fence_array, fence); | |
1415 | } | |
1416 | ||
1417 | ret = reservation_object_get_fences_rcu(obj->resv, NULL, | |
1418 | &fence_count, &fences); | |
1419 | if (ret || !fence_count) | |
1420 | return ret; | |
1421 | ||
1422 | for (i = 0; i < fence_count; i++) { | |
1423 | ret = drm_gem_fence_array_add(fence_array, fences[i]); | |
1424 | if (ret) | |
1425 | break; | |
1426 | } | |
1427 | ||
1428 | for (; i < fence_count; i++) | |
1429 | dma_fence_put(fences[i]); | |
1430 | kfree(fences); | |
1431 | return ret; | |
1432 | } | |
1433 | EXPORT_SYMBOL(drm_gem_fence_array_add_implicit); |