drm/ttm: Add vmap/vunmap to TTM and TTM GEM helpers
[linux-block.git] / drivers / gpu / drm / drm_gem.c
CommitLineData
673a394b
EA
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <linux/types.h>
29#include <linux/slab.h>
30#include <linux/mm.h>
31#include <linux/uaccess.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/module.h>
35#include <linux/mman.h>
36#include <linux/pagemap.h>
5949eac4 37#include <linux/shmem_fs.h>
3248877e 38#include <linux/dma-buf.h>
95cf9264 39#include <linux/mem_encrypt.h>
fb4b4927 40#include <linux/pagevec.h>
0500c04e 41
1c535876 42#include <drm/drm.h>
0500c04e
SR
43#include <drm/drm_device.h>
44#include <drm/drm_drv.h>
45#include <drm/drm_file.h>
d9fc9413 46#include <drm/drm_gem.h>
641b9103 47#include <drm/drm_managed.h>
45d58b40 48#include <drm/drm_print.h>
0500c04e
SR
49#include <drm/drm_vma_manager.h>
50
67d0ec4e 51#include "drm_internal.h"
673a394b
EA
52
53/** @file drm_gem.c
54 *
55 * This file provides some of the base ioctls and library routines for
56 * the graphics memory manager implemented by each device driver.
57 *
58 * Because various devices have different requirements in terms of
59 * synchronization and migration strategies, implementing that is left up to
60 * the driver, and all that the general API provides should be generic --
61 * allocating objects, reading/writing data with the cpu, freeing objects.
62 * Even there, platform-dependent optimizations for reading/writing data with
63 * the CPU mean we'll likely hook those out to driver-specific calls. However,
64 * the DRI2 implementation wants to have at least allocate/mmap be generic.
65 *
66 * The goal was to have swap-backed object allocation managed through
67 * struct file. However, file descriptors as handles to a struct file have
68 * two major failings:
69 * - Process limits prevent more than 1024 or so being used at a time by
70 * default.
71 * - Inability to allocate high fds will aggravate the X Server's select()
72 * handling, and likely that of many GL client applications as well.
73 *
74 * This led to a plan of using our own integer IDs (called handles, following
75 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
76 * ioctls. The objects themselves will still include the struct file so
77 * that we can transition to fds if the required kernel infrastructure shows
78 * up at a later date, and as our interface with shmfs for memory allocation.
79 */
80
641b9103
DV
81static void
82drm_gem_init_release(struct drm_device *dev, void *ptr)
83{
84 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
85}
86
673a394b 87/**
89d61fc0
DV
88 * drm_gem_init - Initialize the GEM device fields
89 * @dev: drm_devic structure to initialize
673a394b 90 */
673a394b
EA
91int
92drm_gem_init(struct drm_device *dev)
93{
b04a5906 94 struct drm_vma_offset_manager *vma_offset_manager;
a2c0a97b 95
cd4f013f 96 mutex_init(&dev->object_name_lock);
e86584c5 97 idr_init_base(&dev->object_name_idr, 1);
a2c0a97b 98
641b9103
DV
99 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
100 GFP_KERNEL);
b04a5906 101 if (!vma_offset_manager) {
a2c0a97b
JB
102 DRM_ERROR("out of memory\n");
103 return -ENOMEM;
104 }
105
b04a5906
DV
106 dev->vma_offset_manager = vma_offset_manager;
107 drm_vma_offset_manager_init(vma_offset_manager,
0de23977
DH
108 DRM_FILE_PAGE_OFFSET_START,
109 DRM_FILE_PAGE_OFFSET_SIZE);
a2c0a97b 110
641b9103 111 return drmm_add_action(dev, drm_gem_init_release, NULL);
a2c0a97b
JB
112}
113
1d397043 114/**
89d61fc0
DV
115 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
116 * @dev: drm_device the object should be initialized for
117 * @obj: drm_gem_object to initialize
118 * @size: object size
119 *
62cb7011 120 * Initialize an already allocated GEM object of the specified size with
1d397043
DV
121 * shmfs backing store.
122 */
123int drm_gem_object_init(struct drm_device *dev,
124 struct drm_gem_object *obj, size_t size)
125{
89c8233f 126 struct file *filp;
1d397043 127
6ab11a26
DV
128 drm_gem_private_object_init(dev, obj, size);
129
89c8233f
DH
130 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
131 if (IS_ERR(filp))
132 return PTR_ERR(filp);
1d397043 133
89c8233f 134 obj->filp = filp;
1d397043 135
1d397043
DV
136 return 0;
137}
138EXPORT_SYMBOL(drm_gem_object_init);
139
62cb7011 140/**
2a5706a3 141 * drm_gem_private_object_init - initialize an allocated private GEM object
89d61fc0
DV
142 * @dev: drm_device the object should be initialized for
143 * @obj: drm_gem_object to initialize
144 * @size: object size
145 *
62cb7011
AC
146 * Initialize an already allocated GEM object of the specified size with
147 * no GEM provided backing store. Instead the caller is responsible for
148 * backing the object and handling it.
149 */
89c8233f
DH
150void drm_gem_private_object_init(struct drm_device *dev,
151 struct drm_gem_object *obj, size_t size)
62cb7011
AC
152{
153 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
154
155 obj->dev = dev;
156 obj->filp = NULL;
157
158 kref_init(&obj->refcount);
a8e11d1c 159 obj->handle_count = 0;
62cb7011 160 obj->size = size;
52791eee 161 dma_resv_init(&obj->_resv);
1ba62714
RH
162 if (!obj->resv)
163 obj->resv = &obj->_resv;
164
88d7ebe5 165 drm_vma_node_reset(&obj->vma_node);
62cb7011
AC
166}
167EXPORT_SYMBOL(drm_gem_private_object_init);
168
0ff926c7
DA
169static void
170drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
171{
319c933c
DV
172 /*
173 * Note: obj->dma_buf can't disappear as long as we still hold a
174 * handle reference in obj->handle_count.
175 */
d0b2c533 176 mutex_lock(&filp->prime.lock);
319c933c 177 if (obj->dma_buf) {
d0b2c533
DV
178 drm_prime_remove_buf_handle_locked(&filp->prime,
179 obj->dma_buf);
0ff926c7 180 }
d0b2c533 181 mutex_unlock(&filp->prime.lock);
0ff926c7
DA
182}
183
36da5908 184/**
c6a84325 185 * drm_gem_object_handle_free - release resources bound to userspace handles
89d61fc0
DV
186 * @obj: GEM object to clean up.
187 *
36da5908
DV
188 * Called after the last handle to the object has been closed
189 *
190 * Removes any name for the object. Note that this must be
191 * called before drm_gem_object_free or we'll be touching
192 * freed memory
193 */
194static void drm_gem_object_handle_free(struct drm_gem_object *obj)
195{
196 struct drm_device *dev = obj->dev;
197
198 /* Remove any name for this object */
36da5908
DV
199 if (obj->name) {
200 idr_remove(&dev->object_name_idr, obj->name);
201 obj->name = 0;
a8e11d1c 202 }
36da5908
DV
203}
204
319c933c
DV
205static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
206{
207 /* Unbreak the reference cycle if we have an exported dma_buf. */
208 if (obj->dma_buf) {
209 dma_buf_put(obj->dma_buf);
210 obj->dma_buf = NULL;
211 }
212}
213
becee2a5 214static void
e6b62714 215drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
36da5908 216{
98a8883a
CW
217 struct drm_device *dev = obj->dev;
218 bool final = false;
219
6afe6929 220 if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
36da5908
DV
221 return;
222
223 /*
224 * Must bump handle count first as this may be the last
225 * ref, in which case the object would disappear before we
226 * checked for a name
227 */
228
98a8883a 229 mutex_lock(&dev->object_name_lock);
319c933c 230 if (--obj->handle_count == 0) {
36da5908 231 drm_gem_object_handle_free(obj);
319c933c 232 drm_gem_object_exported_dma_buf_free(obj);
98a8883a 233 final = true;
319c933c 234 }
98a8883a 235 mutex_unlock(&dev->object_name_lock);
a8e11d1c 236
98a8883a 237 if (final)
be6ee102 238 drm_gem_object_put(obj);
36da5908
DV
239}
240
8815b23a
CW
241/*
242 * Called at device or object close to release the file's
243 * handle references on objects.
244 */
245static int
246drm_gem_object_release_handle(int id, void *ptr, void *data)
247{
248 struct drm_file *file_priv = data;
249 struct drm_gem_object *obj = ptr;
8815b23a 250
d693def4 251 if (obj->funcs->close)
b39b5394 252 obj->funcs->close(obj, file_priv);
d0a133f7 253
ae75f836 254 drm_gem_remove_prime_handles(obj, file_priv);
d9a1f0b4 255 drm_vma_node_revoke(&obj->vma_node, file_priv);
8815b23a 256
e6b62714 257 drm_gem_object_handle_put_unlocked(obj);
8815b23a
CW
258
259 return 0;
260}
261
673a394b 262/**
89d61fc0
DV
263 * drm_gem_handle_delete - deletes the given file-private handle
264 * @filp: drm file-private structure to use for the handle look up
265 * @handle: userspace handle to delete
266 *
df2e0900
DV
267 * Removes the GEM handle from the @filp lookup table which has been added with
268 * drm_gem_handle_create(). If this is the last handle also cleans up linked
269 * resources like GEM names.
673a394b 270 */
ff72145b 271int
a1a2d1d3 272drm_gem_handle_delete(struct drm_file *filp, u32 handle)
673a394b 273{
673a394b
EA
274 struct drm_gem_object *obj;
275
673a394b
EA
276 spin_lock(&filp->table_lock);
277
278 /* Check if we currently have a reference on the object */
f6cd7dae
CW
279 obj = idr_replace(&filp->object_idr, NULL, handle);
280 spin_unlock(&filp->table_lock);
281 if (IS_ERR_OR_NULL(obj))
673a394b 282 return -EINVAL;
673a394b 283
f6cd7dae
CW
284 /* Release driver's reference and decrement refcount. */
285 drm_gem_object_release_handle(handle, obj, filp);
286
287 /* And finally make the handle available for future allocations. */
288 spin_lock(&filp->table_lock);
673a394b
EA
289 idr_remove(&filp->object_idr, handle);
290 spin_unlock(&filp->table_lock);
291
673a394b
EA
292 return 0;
293}
ff72145b 294EXPORT_SYMBOL(drm_gem_handle_delete);
673a394b 295
db611527 296/**
abd4e745 297 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
db611527
NT
298 * @file: drm file-private structure containing the gem object
299 * @dev: corresponding drm_device
300 * @handle: gem object handle
301 * @offset: return location for the fake mmap offset
302 *
303 * This implements the &drm_driver.dumb_map_offset kms driver callback for
304 * drivers which use gem to manage their backing storage.
305 *
306 * Returns:
307 * 0 on success or a negative error code on failure.
308 */
abd4e745 309int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
db611527
NT
310 u32 handle, u64 *offset)
311{
312 struct drm_gem_object *obj;
313 int ret;
314
315 obj = drm_gem_object_lookup(file, handle);
316 if (!obj)
317 return -ENOENT;
318
90378e58
NT
319 /* Don't allow imported objects to be mapped */
320 if (obj->import_attach) {
321 ret = -EINVAL;
322 goto out;
323 }
324
db611527
NT
325 ret = drm_gem_create_mmap_offset(obj);
326 if (ret)
327 goto out;
328
329 *offset = drm_vma_node_offset_addr(&obj->vma_node);
330out:
be6ee102 331 drm_gem_object_put(obj);
db611527
NT
332
333 return ret;
334}
abd4e745 335EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
db611527 336
43387b37
DV
337/**
338 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
89d61fc0
DV
339 * @file: drm file-private structure to remove the dumb handle from
340 * @dev: corresponding drm_device
341 * @handle: the dumb handle to remove
1dd3a060 342 *
940eba2d
DV
343 * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
344 * which use gem to manage their backing storage.
43387b37
DV
345 */
346int drm_gem_dumb_destroy(struct drm_file *file,
347 struct drm_device *dev,
348 uint32_t handle)
349{
350 return drm_gem_handle_delete(file, handle);
351}
352EXPORT_SYMBOL(drm_gem_dumb_destroy);
353
673a394b 354/**
20228c44 355 * drm_gem_handle_create_tail - internal functions to create a handle
89d61fc0
DV
356 * @file_priv: drm file-private structure to register the handle for
357 * @obj: object to register
8bf8180f 358 * @handlep: pointer to return the created handle to the caller
1dd3a060 359 *
940eba2d
DV
360 * This expects the &drm_device.object_name_lock to be held already and will
361 * drop it before returning. Used to avoid races in establishing new handles
362 * when importing an object from either an flink name or a dma-buf.
df2e0900
DV
363 *
364 * Handles must be release again through drm_gem_handle_delete(). This is done
365 * when userspace closes @file_priv for all attached handles, or through the
366 * GEM_CLOSE ioctl for individual handles.
673a394b
EA
367 */
368int
20228c44
DV
369drm_gem_handle_create_tail(struct drm_file *file_priv,
370 struct drm_gem_object *obj,
371 u32 *handlep)
673a394b 372{
304eda32 373 struct drm_device *dev = obj->dev;
9649399e 374 u32 handle;
304eda32 375 int ret;
673a394b 376
20228c44 377 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
98a8883a 378 if (obj->handle_count++ == 0)
e6b62714 379 drm_gem_object_get(obj);
20228c44 380
673a394b 381 /*
2e928815
TH
382 * Get the user-visible handle using idr. Preload and perform
383 * allocation under our spinlock.
673a394b 384 */
2e928815 385 idr_preload(GFP_KERNEL);
673a394b 386 spin_lock(&file_priv->table_lock);
2e928815
TH
387
388 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
98a8883a 389
673a394b 390 spin_unlock(&file_priv->table_lock);
2e928815 391 idr_preload_end();
98a8883a 392
cd4f013f 393 mutex_unlock(&dev->object_name_lock);
6984128d
CW
394 if (ret < 0)
395 goto err_unref;
396
9649399e 397 handle = ret;
673a394b 398
d9a1f0b4 399 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
6984128d
CW
400 if (ret)
401 goto err_remove;
304eda32 402
d693def4 403 if (obj->funcs->open) {
b39b5394
NT
404 ret = obj->funcs->open(obj, file_priv);
405 if (ret)
406 goto err_revoke;
304eda32
BS
407 }
408
9649399e 409 *handlep = handle;
673a394b 410 return 0;
6984128d
CW
411
412err_revoke:
d9a1f0b4 413 drm_vma_node_revoke(&obj->vma_node, file_priv);
6984128d
CW
414err_remove:
415 spin_lock(&file_priv->table_lock);
9649399e 416 idr_remove(&file_priv->object_idr, handle);
6984128d
CW
417 spin_unlock(&file_priv->table_lock);
418err_unref:
e6b62714 419 drm_gem_object_handle_put_unlocked(obj);
6984128d 420 return ret;
673a394b 421}
20228c44
DV
422
423/**
8bf8180f 424 * drm_gem_handle_create - create a gem handle for an object
89d61fc0
DV
425 * @file_priv: drm file-private structure to register the handle for
426 * @obj: object to register
82c0ef94 427 * @handlep: pointer to return the created handle to the caller
89d61fc0 428 *
39031176
DV
429 * Create a handle for this object. This adds a handle reference to the object,
430 * which includes a regular reference count. Callers will likely want to
431 * dereference the object afterwards.
432 *
433 * Since this publishes @obj to userspace it must be fully set up by this point,
434 * drivers must call this last in their buffer object creation callbacks.
20228c44 435 */
8bf8180f
TR
436int drm_gem_handle_create(struct drm_file *file_priv,
437 struct drm_gem_object *obj,
438 u32 *handlep)
20228c44
DV
439{
440 mutex_lock(&obj->dev->object_name_lock);
441
442 return drm_gem_handle_create_tail(file_priv, obj, handlep);
443}
673a394b
EA
444EXPORT_SYMBOL(drm_gem_handle_create);
445
75ef8b3b
RC
446
447/**
448 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
449 * @obj: obj in question
450 *
451 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
f74418a4
DV
452 *
453 * Note that drm_gem_object_release() already calls this function, so drivers
454 * don't have to take care of releasing the mmap offset themselves when freeing
455 * the GEM object.
75ef8b3b
RC
456 */
457void
458drm_gem_free_mmap_offset(struct drm_gem_object *obj)
459{
460 struct drm_device *dev = obj->dev;
75ef8b3b 461
b04a5906 462 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
75ef8b3b
RC
463}
464EXPORT_SYMBOL(drm_gem_free_mmap_offset);
465
466/**
367bbd49 467 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
75ef8b3b 468 * @obj: obj in question
367bbd49 469 * @size: the virtual size
75ef8b3b
RC
470 *
471 * GEM memory mapping works by handing back to userspace a fake mmap offset
472 * it can use in a subsequent mmap(2) call. The DRM core code then looks
473 * up the object based on the offset and sets up the various memory mapping
474 * structures.
475 *
367bbd49 476 * This routine allocates and attaches a fake offset for @obj, in cases where
940eba2d
DV
477 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
478 * Otherwise just use drm_gem_create_mmap_offset().
f74418a4
DV
479 *
480 * This function is idempotent and handles an already allocated mmap offset
481 * transparently. Drivers do not need to check for this case.
75ef8b3b
RC
482 */
483int
367bbd49 484drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
75ef8b3b
RC
485{
486 struct drm_device *dev = obj->dev;
75ef8b3b 487
b04a5906 488 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
367bbd49
RC
489 size / PAGE_SIZE);
490}
491EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
492
493/**
494 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
495 * @obj: obj in question
496 *
497 * GEM memory mapping works by handing back to userspace a fake mmap offset
498 * it can use in a subsequent mmap(2) call. The DRM core code then looks
499 * up the object based on the offset and sets up the various memory mapping
500 * structures.
501 *
502 * This routine allocates and attaches a fake offset for @obj.
f74418a4
DV
503 *
504 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
505 * the fake offset again.
367bbd49
RC
506 */
507int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
508{
509 return drm_gem_create_mmap_offset_size(obj, obj->size);
75ef8b3b
RC
510}
511EXPORT_SYMBOL(drm_gem_create_mmap_offset);
512
fb4b4927
KHY
513/*
514 * Move pages to appropriate lru and release the pagevec, decrementing the
515 * ref count of those pages.
516 */
517static void drm_gem_check_release_pagevec(struct pagevec *pvec)
518{
519 check_move_unevictable_pages(pvec);
520 __pagevec_release(pvec);
521 cond_resched();
522}
523
bcc5c9d5
RC
524/**
525 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
526 * from shmem
527 * @obj: obj in question
0cdbe8ac
DH
528 *
529 * This reads the page-array of the shmem-backing storage of the given gem
530 * object. An array of pages is returned. If a page is not allocated or
531 * swapped-out, this will allocate/swap-in the required pages. Note that the
532 * whole object is covered by the page-array and pinned in memory.
533 *
534 * Use drm_gem_put_pages() to release the array and unpin all pages.
535 *
536 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
537 * If you require other GFP-masks, you have to do those allocations yourself.
538 *
539 * Note that you are not allowed to change gfp-zones during runtime. That is,
540 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
541 * set during initialization. If you have special zone constraints, set them
5b9fbfff 542 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
0cdbe8ac 543 * to keep pages in the required zone during swap-in.
e0b3d214
DV
544 *
545 * This function is only valid on objects initialized with
546 * drm_gem_object_init(), but not for those initialized with
547 * drm_gem_private_object_init() only.
bcc5c9d5 548 */
0cdbe8ac 549struct page **drm_gem_get_pages(struct drm_gem_object *obj)
bcc5c9d5 550{
bcc5c9d5
RC
551 struct address_space *mapping;
552 struct page *p, **pages;
fb4b4927 553 struct pagevec pvec;
bcc5c9d5
RC
554 int i, npages;
555
e0b3d214
DV
556
557 if (WARN_ON(!obj->filp))
558 return ERR_PTR(-EINVAL);
559
bcc5c9d5 560 /* This is the shared memory object that backs the GEM resource */
93c76a3d 561 mapping = obj->filp->f_mapping;
bcc5c9d5
RC
562
563 /* We already BUG_ON() for non-page-aligned sizes in
564 * drm_gem_object_init(), so we should never hit this unless
565 * driver author is doing something really wrong:
566 */
567 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
568
569 npages = obj->size >> PAGE_SHIFT;
570
2098105e 571 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
bcc5c9d5
RC
572 if (pages == NULL)
573 return ERR_PTR(-ENOMEM);
574
fb4b4927
KHY
575 mapping_set_unevictable(mapping);
576
bcc5c9d5 577 for (i = 0; i < npages; i++) {
0cdbe8ac 578 p = shmem_read_mapping_page(mapping, i);
bcc5c9d5
RC
579 if (IS_ERR(p))
580 goto fail;
581 pages[i] = p;
582
2123000b
DH
583 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
584 * correct region during swapin. Note that this requires
585 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
586 * so shmem can relocate pages during swapin if required.
bcc5c9d5 587 */
c62d2555 588 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
bcc5c9d5
RC
589 (page_to_pfn(p) >= 0x00100000UL));
590 }
591
592 return pages;
593
594fail:
fb4b4927
KHY
595 mapping_clear_unevictable(mapping);
596 pagevec_init(&pvec);
597 while (i--) {
598 if (!pagevec_add(&pvec, pages[i]))
599 drm_gem_check_release_pagevec(&pvec);
600 }
601 if (pagevec_count(&pvec))
602 drm_gem_check_release_pagevec(&pvec);
bcc5c9d5 603
2098105e 604 kvfree(pages);
bcc5c9d5
RC
605 return ERR_CAST(p);
606}
607EXPORT_SYMBOL(drm_gem_get_pages);
608
609/**
610 * drm_gem_put_pages - helper to free backing pages for a GEM object
611 * @obj: obj in question
612 * @pages: pages to free
613 * @dirty: if true, pages will be marked as dirty
614 * @accessed: if true, the pages will be marked as accessed
615 */
616void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
617 bool dirty, bool accessed)
618{
619 int i, npages;
fb4b4927
KHY
620 struct address_space *mapping;
621 struct pagevec pvec;
622
623 mapping = file_inode(obj->filp)->i_mapping;
624 mapping_clear_unevictable(mapping);
bcc5c9d5
RC
625
626 /* We already BUG_ON() for non-page-aligned sizes in
627 * drm_gem_object_init(), so we should never hit this unless
628 * driver author is doing something really wrong:
629 */
630 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
631
632 npages = obj->size >> PAGE_SHIFT;
633
fb4b4927 634 pagevec_init(&pvec);
bcc5c9d5 635 for (i = 0; i < npages; i++) {
930a4024
RH
636 if (!pages[i])
637 continue;
638
bcc5c9d5
RC
639 if (dirty)
640 set_page_dirty(pages[i]);
641
642 if (accessed)
643 mark_page_accessed(pages[i]);
644
645 /* Undo the reference we took when populating the table */
fb4b4927
KHY
646 if (!pagevec_add(&pvec, pages[i]))
647 drm_gem_check_release_pagevec(&pvec);
bcc5c9d5 648 }
fb4b4927
KHY
649 if (pagevec_count(&pvec))
650 drm_gem_check_release_pagevec(&pvec);
bcc5c9d5 651
2098105e 652 kvfree(pages);
bcc5c9d5
RC
653}
654EXPORT_SYMBOL(drm_gem_put_pages);
655
c117aa4d
RH
656static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
657 struct drm_gem_object **objs)
658{
659 int i, ret = 0;
660 struct drm_gem_object *obj;
661
662 spin_lock(&filp->table_lock);
663
664 for (i = 0; i < count; i++) {
665 /* Check if we currently have a reference on the object */
666 obj = idr_find(&filp->object_idr, handle[i]);
667 if (!obj) {
668 ret = -ENOENT;
669 break;
670 }
671 drm_gem_object_get(obj);
672 objs[i] = obj;
673 }
674 spin_unlock(&filp->table_lock);
675
676 return ret;
677}
678
679/**
680 * drm_gem_objects_lookup - look up GEM objects from an array of handles
681 * @filp: DRM file private date
682 * @bo_handles: user pointer to array of userspace handle
683 * @count: size of handle array
684 * @objs_out: returned pointer to array of drm_gem_object pointers
685 *
686 * Takes an array of userspace handles and returns a newly allocated array of
687 * GEM objects.
688 *
689 * For a single handle lookup, use drm_gem_object_lookup().
690 *
691 * Returns:
692 *
693 * @objs filled in with GEM object pointers. Returned GEM objects need to be
be6ee102 694 * released with drm_gem_object_put(). -ENOENT is returned on a lookup
c117aa4d
RH
695 * failure. 0 is returned on success.
696 *
697 */
698int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
699 int count, struct drm_gem_object ***objs_out)
700{
701 int ret;
702 u32 *handles;
703 struct drm_gem_object **objs;
704
705 if (!count)
706 return 0;
707
708 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
709 GFP_KERNEL | __GFP_ZERO);
710 if (!objs)
711 return -ENOMEM;
712
ec0bb482
DC
713 *objs_out = objs;
714
c117aa4d
RH
715 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
716 if (!handles) {
717 ret = -ENOMEM;
718 goto out;
719 }
720
721 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
722 ret = -EFAULT;
723 DRM_DEBUG("Failed to copy in GEM handles\n");
724 goto out;
725 }
726
727 ret = objects_lookup(filp, handles, count, objs);
c117aa4d
RH
728out:
729 kvfree(handles);
730 return ret;
731
732}
733EXPORT_SYMBOL(drm_gem_objects_lookup);
734
df2e0900 735/**
1e55a53a 736 * drm_gem_object_lookup - look up a GEM object from its handle
df2e0900
DV
737 * @filp: DRM file private date
738 * @handle: userspace handle
739 *
740 * Returns:
741 *
742 * A reference to the object named by the handle if such exists on @filp, NULL
743 * otherwise.
c117aa4d
RH
744 *
745 * If looking up an array of handles, use drm_gem_objects_lookup().
df2e0900 746 */
673a394b 747struct drm_gem_object *
a8ad0bd8 748drm_gem_object_lookup(struct drm_file *filp, u32 handle)
673a394b 749{
c117aa4d 750 struct drm_gem_object *obj = NULL;
673a394b 751
c117aa4d 752 objects_lookup(filp, &handle, 1, &obj);
673a394b
EA
753 return obj;
754}
755EXPORT_SYMBOL(drm_gem_object_lookup);
756
1ba62714 757/**
52791eee 758 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
1ba62714
RH
759 * shared and/or exclusive fences.
760 * @filep: DRM file private date
761 * @handle: userspace handle
762 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
763 * @timeout: timeout value in jiffies or zero to return immediately
764 *
765 * Returns:
766 *
767 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
768 * greater than 0 on success.
769 */
52791eee 770long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
1ba62714
RH
771 bool wait_all, unsigned long timeout)
772{
773 long ret;
774 struct drm_gem_object *obj;
775
776 obj = drm_gem_object_lookup(filep, handle);
777 if (!obj) {
778 DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
779 return -EINVAL;
780 }
781
52791eee 782 ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
1ba62714
RH
783 true, timeout);
784 if (ret == 0)
785 ret = -ETIME;
786 else if (ret > 0)
787 ret = 0;
788
be6ee102 789 drm_gem_object_put(obj);
1ba62714
RH
790
791 return ret;
792}
52791eee 793EXPORT_SYMBOL(drm_gem_dma_resv_wait);
1ba62714 794
673a394b 795/**
89d61fc0
DV
796 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
797 * @dev: drm_device
798 * @data: ioctl data
799 * @file_priv: drm file-private structure
800 *
673a394b
EA
801 * Releases the handle to an mm object.
802 */
803int
804drm_gem_close_ioctl(struct drm_device *dev, void *data,
805 struct drm_file *file_priv)
806{
807 struct drm_gem_close *args = data;
808 int ret;
809
1bcecfac 810 if (!drm_core_check_feature(dev, DRIVER_GEM))
69fdf420 811 return -EOPNOTSUPP;
673a394b
EA
812
813 ret = drm_gem_handle_delete(file_priv, args->handle);
814
815 return ret;
816}
817
818/**
89d61fc0
DV
819 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
820 * @dev: drm_device
821 * @data: ioctl data
822 * @file_priv: drm file-private structure
823 *
673a394b
EA
824 * Create a global name for an object, returning the name.
825 *
826 * Note that the name does not hold a reference; when the object
827 * is freed, the name goes away.
828 */
829int
830drm_gem_flink_ioctl(struct drm_device *dev, void *data,
831 struct drm_file *file_priv)
832{
833 struct drm_gem_flink *args = data;
834 struct drm_gem_object *obj;
835 int ret;
836
1bcecfac 837 if (!drm_core_check_feature(dev, DRIVER_GEM))
69fdf420 838 return -EOPNOTSUPP;
673a394b 839
a8ad0bd8 840 obj = drm_gem_object_lookup(file_priv, args->handle);
673a394b 841 if (obj == NULL)
bf79cb91 842 return -ENOENT;
673a394b 843
cd4f013f 844 mutex_lock(&dev->object_name_lock);
a8e11d1c
DV
845 /* prevent races with concurrent gem_close. */
846 if (obj->handle_count == 0) {
847 ret = -ENOENT;
848 goto err;
849 }
850
8d59bae5 851 if (!obj->name) {
0f646425 852 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
2e928815 853 if (ret < 0)
8d59bae5 854 goto err;
2e07fb22
YC
855
856 obj->name = ret;
8d59bae5 857 }
3e49c4f4 858
2e07fb22
YC
859 args->name = (uint64_t) obj->name;
860 ret = 0;
861
3e49c4f4 862err:
cd4f013f 863 mutex_unlock(&dev->object_name_lock);
be6ee102 864 drm_gem_object_put(obj);
3e49c4f4 865 return ret;
673a394b
EA
866}
867
868/**
89d61fc0
DV
869 * drm_gem_open - implementation of the GEM_OPEN ioctl
870 * @dev: drm_device
871 * @data: ioctl data
872 * @file_priv: drm file-private structure
873 *
673a394b 874 * Open an object using the global name, returning a handle and the size.
a9e10b16
SC
875 *
876 * This handle (of course) holds a reference to the object, so the object
877 * will not go away until the handle is deleted.
673a394b
EA
878 */
879int
880drm_gem_open_ioctl(struct drm_device *dev, void *data,
881 struct drm_file *file_priv)
882{
883 struct drm_gem_open *args = data;
884 struct drm_gem_object *obj;
885 int ret;
a1a2d1d3 886 u32 handle;
673a394b 887
1bcecfac 888 if (!drm_core_check_feature(dev, DRIVER_GEM))
69fdf420 889 return -EOPNOTSUPP;
673a394b 890
cd4f013f 891 mutex_lock(&dev->object_name_lock);
673a394b 892 obj = idr_find(&dev->object_name_idr, (int) args->name);
20228c44 893 if (obj) {
e6b62714 894 drm_gem_object_get(obj);
20228c44
DV
895 } else {
896 mutex_unlock(&dev->object_name_lock);
673a394b 897 return -ENOENT;
20228c44 898 }
673a394b 899
20228c44
DV
900 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
901 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
673a394b 902 if (ret)
8490d6a7 903 goto err;
673a394b
EA
904
905 args->handle = handle;
906 args->size = obj->size;
907
8490d6a7 908err:
c44264f9 909 drm_gem_object_put(obj);
8490d6a7 910 return ret;
673a394b
EA
911}
912
913/**
89d61fc0
DV
914 * gem_gem_open - initalizes GEM file-private structures at devnode open time
915 * @dev: drm_device which is being opened by userspace
916 * @file_private: drm file-private structure to set up
917 *
673a394b
EA
918 * Called at device open time, sets up the structure for handling refcounting
919 * of mm objects.
920 */
921void
922drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
923{
e86584c5 924 idr_init_base(&file_private->object_idr, 1);
673a394b
EA
925 spin_lock_init(&file_private->table_lock);
926}
927
673a394b 928/**
89d61fc0
DV
929 * drm_gem_release - release file-private GEM resources
930 * @dev: drm_device which is being closed by userspace
931 * @file_private: drm file-private structure to clean up
932 *
673a394b
EA
933 * Called at close time when the filp is going away.
934 *
935 * Releases any remaining references on objects by this filp.
936 */
937void
938drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
939{
673a394b 940 idr_for_each(&file_private->object_idr,
304eda32 941 &drm_gem_object_release_handle, file_private);
673a394b 942 idr_destroy(&file_private->object_idr);
673a394b
EA
943}
944
f74418a4
DV
945/**
946 * drm_gem_object_release - release GEM buffer object resources
947 * @obj: GEM buffer object
948 *
949 * This releases any structures and resources used by @obj and is the invers of
950 * drm_gem_object_init().
951 */
fd632aa3
DV
952void
953drm_gem_object_release(struct drm_gem_object *obj)
c3ae90c0 954{
319c933c
DV
955 WARN_ON(obj->dma_buf);
956
62cb7011 957 if (obj->filp)
16d2831d 958 fput(obj->filp);
77472347 959
52791eee 960 dma_resv_fini(&obj->_resv);
77472347 961 drm_gem_free_mmap_offset(obj);
c3ae90c0 962}
fd632aa3 963EXPORT_SYMBOL(drm_gem_object_release);
c3ae90c0 964
673a394b 965/**
89d61fc0
DV
966 * drm_gem_object_free - free a GEM object
967 * @kref: kref of the object to free
968 *
673a394b
EA
969 * Called after the last reference to the object has been lost.
970 *
971 * Frees the object
972 */
973void
974drm_gem_object_free(struct kref *kref)
975{
6ff774bd
DV
976 struct drm_gem_object *obj =
977 container_of(kref, struct drm_gem_object, refcount);
673a394b 978
d693def4
TZ
979 if (WARN_ON(!obj->funcs->free))
980 return;
981
982 obj->funcs->free(obj);
673a394b
EA
983}
984EXPORT_SYMBOL(drm_gem_object_free);
985
9f0ba539 986/**
eecd7fd8 987 * drm_gem_object_put_locked - release a GEM buffer object reference
9f0ba539
DV
988 * @obj: GEM buffer object
989 *
940eba2d
DV
990 * This releases a reference to @obj. Callers must hold the
991 * &drm_device.struct_mutex lock when calling this function, even when the
992 * driver doesn't use &drm_device.struct_mutex for anything.
9f0ba539
DV
993 *
994 * For drivers not encumbered with legacy locking use
be6ee102 995 * drm_gem_object_put() instead.
9f0ba539
DV
996 */
997void
eecd7fd8 998drm_gem_object_put_locked(struct drm_gem_object *obj)
9f0ba539
DV
999{
1000 if (obj) {
1001 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
1002
1003 kref_put(&obj->refcount, drm_gem_object_free);
1004 }
1005}
eecd7fd8 1006EXPORT_SYMBOL(drm_gem_object_put_locked);
9f0ba539 1007
df2e0900
DV
1008/**
1009 * drm_gem_vm_open - vma->ops->open implementation for GEM
1010 * @vma: VM area structure
1011 *
1012 * This function implements the #vm_operations_struct open() callback for GEM
1013 * drivers. This must be used together with drm_gem_vm_close().
1014 */
ab00b3e5
JB
1015void drm_gem_vm_open(struct vm_area_struct *vma)
1016{
1017 struct drm_gem_object *obj = vma->vm_private_data;
1018
e6b62714 1019 drm_gem_object_get(obj);
ab00b3e5
JB
1020}
1021EXPORT_SYMBOL(drm_gem_vm_open);
1022
df2e0900
DV
1023/**
1024 * drm_gem_vm_close - vma->ops->close implementation for GEM
1025 * @vma: VM area structure
1026 *
1027 * This function implements the #vm_operations_struct close() callback for GEM
1028 * drivers. This must be used together with drm_gem_vm_open().
1029 */
ab00b3e5
JB
1030void drm_gem_vm_close(struct vm_area_struct *vma)
1031{
1032 struct drm_gem_object *obj = vma->vm_private_data;
ab00b3e5 1033
be6ee102 1034 drm_gem_object_put(obj);
ab00b3e5
JB
1035}
1036EXPORT_SYMBOL(drm_gem_vm_close);
1037
1c5aafa6
LP
1038/**
1039 * drm_gem_mmap_obj - memory map a GEM object
1040 * @obj: the GEM object to map
1041 * @obj_size: the object size to be mapped, in bytes
1042 * @vma: VMA for the area to be mapped
1043 *
d693def4
TZ
1044 * Set up the VMA to prepare mapping of the GEM object using the GEM object's
1045 * vm_ops. Depending on their requirements, GEM objects can either
1046 * provide a fault handler in their vm_ops (in which case any accesses to
1c5aafa6
LP
1047 * the object will be trapped, to perform migration, GTT binding, surface
1048 * register allocation, or performance monitoring), or mmap the buffer memory
1049 * synchronously after calling drm_gem_mmap_obj.
1050 *
1051 * This function is mainly intended to implement the DMABUF mmap operation, when
1052 * the GEM object is not looked up based on its fake offset. To implement the
1053 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1054 *
ca481c9b
DH
1055 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1056 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1057 * callers must verify access restrictions before calling this helper.
1058 *
1c5aafa6 1059 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
d693def4 1060 * size, or if no vm_ops are provided.
1c5aafa6
LP
1061 */
1062int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1063 struct vm_area_struct *vma)
1064{
c40069cb 1065 int ret;
1c5aafa6
LP
1066
1067 /* Check for valid size. */
1068 if (obj_size < vma->vm_end - vma->vm_start)
1069 return -EINVAL;
1070
9786b65b
GH
1071 /* Take a ref for this mapping of the object, so that the fault
1072 * handler can dereference the mmap offset's pointer to the object.
1073 * This reference is cleaned up by the corresponding vm_close
1074 * (which should happen whether the vma was created by this call, or
1075 * by a vm_open due to mremap or partial unmap or whatever).
1076 */
1077 drm_gem_object_get(obj);
1078
f49a51bf
DV
1079 vma->vm_private_data = obj;
1080
d693def4 1081 if (obj->funcs->mmap) {
c40069cb 1082 ret = obj->funcs->mmap(obj, vma);
9786b65b 1083 if (ret) {
be6ee102 1084 drm_gem_object_put(obj);
c40069cb 1085 return ret;
9786b65b 1086 }
c40069cb
GH
1087 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1088 } else {
d693def4 1089 if (obj->funcs->vm_ops)
c40069cb 1090 vma->vm_ops = obj->funcs->vm_ops;
9786b65b 1091 else {
be6ee102 1092 drm_gem_object_put(obj);
c40069cb 1093 return -EINVAL;
9786b65b 1094 }
c40069cb
GH
1095
1096 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1097 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1098 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1099 }
1c5aafa6 1100
1c5aafa6
LP
1101 return 0;
1102}
1103EXPORT_SYMBOL(drm_gem_mmap_obj);
ab00b3e5 1104
a2c0a97b
JB
1105/**
1106 * drm_gem_mmap - memory map routine for GEM objects
1107 * @filp: DRM file pointer
1108 * @vma: VMA for the area to be mapped
1109 *
1110 * If a driver supports GEM object mapping, mmap calls on the DRM file
1111 * descriptor will end up here.
1112 *
1c5aafa6 1113 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
a2c0a97b 1114 * contain the fake offset we created when the GTT map ioctl was called on
1c5aafa6 1115 * the object) and map it with a call to drm_gem_mmap_obj().
ca481c9b
DH
1116 *
1117 * If the caller is not granted access to the buffer object, the mmap will fail
1118 * with EACCES. Please see the vma manager for more information.
a2c0a97b
JB
1119 */
1120int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1121{
1122 struct drm_file *priv = filp->private_data;
1123 struct drm_device *dev = priv->minor->dev;
2225cfe4 1124 struct drm_gem_object *obj = NULL;
0de23977 1125 struct drm_vma_offset_node *node;
a8469aa8 1126 int ret;
a2c0a97b 1127
c07dcd61 1128 if (drm_dev_is_unplugged(dev))
2c07a21d
DA
1129 return -ENODEV;
1130
2225cfe4
DV
1131 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1132 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1133 vma->vm_pgoff,
1134 vma_pages(vma));
1135 if (likely(node)) {
1136 obj = container_of(node, struct drm_gem_object, vma_node);
1137 /*
1138 * When the object is being freed, after it hits 0-refcnt it
1139 * proceeds to tear down the object. In the process it will
1140 * attempt to remove the VMA offset and so acquire this
1141 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1142 * that matches our range, we know it is in the process of being
1143 * destroyed and will be freed as soon as we release the lock -
1144 * so we have to check for the 0-refcnted object and treat it as
1145 * invalid.
1146 */
1147 if (!kref_get_unless_zero(&obj->refcount))
1148 obj = NULL;
1149 }
1150 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
a2c0a97b 1151
2225cfe4 1152 if (!obj)
197633b9 1153 return -EINVAL;
2225cfe4 1154
d9a1f0b4 1155 if (!drm_vma_node_is_allowed(node, priv)) {
be6ee102 1156 drm_gem_object_put(obj);
ca481c9b 1157 return -EACCES;
a2c0a97b
JB
1158 }
1159
3e977ac6
CW
1160 if (node->readonly) {
1161 if (vma->vm_flags & VM_WRITE) {
be6ee102 1162 drm_gem_object_put(obj);
3e977ac6
CW
1163 return -EINVAL;
1164 }
1165
1166 vma->vm_flags &= ~VM_MAYWRITE;
1167 }
1168
2225cfe4
DV
1169 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1170 vma);
a2c0a97b 1171
be6ee102 1172 drm_gem_object_put(obj);
a2c0a97b
JB
1173
1174 return ret;
1175}
1176EXPORT_SYMBOL(drm_gem_mmap);
45d58b40
NT
1177
1178void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1179 const struct drm_gem_object *obj)
1180{
1181 drm_printf_indent(p, indent, "name=%d\n", obj->name);
1182 drm_printf_indent(p, indent, "refcount=%u\n",
1183 kref_read(&obj->refcount));
1184 drm_printf_indent(p, indent, "start=%08lx\n",
1185 drm_vma_node_start(&obj->vma_node));
1186 drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1187 drm_printf_indent(p, indent, "imported=%s\n",
1188 obj->import_attach ? "yes" : "no");
1189
d693def4 1190 if (obj->funcs->print_info)
b39b5394 1191 obj->funcs->print_info(p, indent, obj);
45d58b40 1192}
b39b5394 1193
b39b5394
NT
1194int drm_gem_pin(struct drm_gem_object *obj)
1195{
d693def4 1196 if (obj->funcs->pin)
b39b5394 1197 return obj->funcs->pin(obj);
b39b5394
NT
1198 else
1199 return 0;
1200}
b39b5394 1201
b39b5394
NT
1202void drm_gem_unpin(struct drm_gem_object *obj)
1203{
d693def4 1204 if (obj->funcs->unpin)
b39b5394 1205 obj->funcs->unpin(obj);
b39b5394 1206}
b39b5394 1207
b39b5394
NT
1208void *drm_gem_vmap(struct drm_gem_object *obj)
1209{
1210 void *vaddr;
1211
d693def4 1212 if (obj->funcs->vmap)
b39b5394 1213 vaddr = obj->funcs->vmap(obj);
b39b5394
NT
1214 else
1215 vaddr = ERR_PTR(-EOPNOTSUPP);
1216
1217 if (!vaddr)
1218 vaddr = ERR_PTR(-ENOMEM);
1219
1220 return vaddr;
1221}
b39b5394 1222
b39b5394
NT
1223void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
1224{
1225 if (!vaddr)
1226 return;
1227
d693def4 1228 if (obj->funcs->vunmap)
b39b5394 1229 obj->funcs->vunmap(obj, vaddr);
b39b5394 1230}
7edc3e3b
EA
1231
1232/**
1233 * drm_gem_lock_reservations - Sets up the ww context and acquires
1234 * the lock on an array of GEM objects.
1235 *
1236 * Once you've locked your reservations, you'll want to set up space
1237 * for your shared fences (if applicable), submit your job, then
1238 * drm_gem_unlock_reservations().
1239 *
1240 * @objs: drm_gem_objects to lock
1241 * @count: Number of objects in @objs
1242 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1243 * part of tracking this set of locked reservations.
1244 */
1245int
1246drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1247 struct ww_acquire_ctx *acquire_ctx)
1248{
1249 int contended = -1;
1250 int i, ret;
1251
1252 ww_acquire_init(acquire_ctx, &reservation_ww_class);
1253
1254retry:
1255 if (contended != -1) {
1256 struct drm_gem_object *obj = objs[contended];
1257
52791eee 1258 ret = dma_resv_lock_slow_interruptible(obj->resv,
0dbd555a 1259 acquire_ctx);
7edc3e3b
EA
1260 if (ret) {
1261 ww_acquire_done(acquire_ctx);
1262 return ret;
1263 }
1264 }
1265
1266 for (i = 0; i < count; i++) {
1267 if (i == contended)
1268 continue;
1269
52791eee 1270 ret = dma_resv_lock_interruptible(objs[i]->resv,
0dbd555a 1271 acquire_ctx);
7edc3e3b
EA
1272 if (ret) {
1273 int j;
1274
1275 for (j = 0; j < i; j++)
52791eee 1276 dma_resv_unlock(objs[j]->resv);
7edc3e3b
EA
1277
1278 if (contended != -1 && contended >= i)
52791eee 1279 dma_resv_unlock(objs[contended]->resv);
7edc3e3b
EA
1280
1281 if (ret == -EDEADLK) {
1282 contended = i;
1283 goto retry;
1284 }
1285
1286 ww_acquire_done(acquire_ctx);
1287 return ret;
1288 }
1289 }
1290
1291 ww_acquire_done(acquire_ctx);
1292
1293 return 0;
1294}
1295EXPORT_SYMBOL(drm_gem_lock_reservations);
1296
1297void
1298drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1299 struct ww_acquire_ctx *acquire_ctx)
1300{
1301 int i;
1302
1303 for (i = 0; i < count; i++)
52791eee 1304 dma_resv_unlock(objs[i]->resv);
7edc3e3b
EA
1305
1306 ww_acquire_fini(acquire_ctx);
1307}
1308EXPORT_SYMBOL(drm_gem_unlock_reservations);
5d5a179d
EA
1309
1310/**
1311 * drm_gem_fence_array_add - Adds the fence to an array of fences to be
1312 * waited on, deduplicating fences from the same context.
1313 *
761e473f
SP
1314 * @fence_array: array of dma_fence * for the job to block on.
1315 * @fence: the dma_fence to add to the list of dependencies.
5d5a179d
EA
1316 *
1317 * Returns:
1318 * 0 on success, or an error on failing to expand the array.
1319 */
1320int drm_gem_fence_array_add(struct xarray *fence_array,
1321 struct dma_fence *fence)
1322{
1323 struct dma_fence *entry;
1324 unsigned long index;
1325 u32 id = 0;
1326 int ret;
1327
1328 if (!fence)
1329 return 0;
1330
1331 /* Deduplicate if we already depend on a fence from the same context.
1332 * This lets the size of the array of deps scale with the number of
1333 * engines involved, rather than the number of BOs.
1334 */
1335 xa_for_each(fence_array, index, entry) {
1336 if (entry->context != fence->context)
1337 continue;
1338
1339 if (dma_fence_is_later(fence, entry)) {
1340 dma_fence_put(entry);
1341 xa_store(fence_array, index, fence, GFP_KERNEL);
1342 } else {
1343 dma_fence_put(fence);
1344 }
1345 return 0;
1346 }
1347
1348 ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
1349 if (ret != 0)
1350 dma_fence_put(fence);
1351
1352 return ret;
1353}
1354EXPORT_SYMBOL(drm_gem_fence_array_add);
1355
1356/**
1357 * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
1358 * in the GEM object's reservation object to an array of dma_fences for use in
1359 * scheduling a rendering job.
1360 *
1361 * This should be called after drm_gem_lock_reservations() on your array of
1362 * GEM objects used in the job but before updating the reservations with your
1363 * own fences.
1364 *
761e473f
SP
1365 * @fence_array: array of dma_fence * for the job to block on.
1366 * @obj: the gem object to add new dependencies from.
1367 * @write: whether the job might write the object (so we need to depend on
5d5a179d
EA
1368 * shared fences in the reservation object).
1369 */
1370int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
1371 struct drm_gem_object *obj,
1372 bool write)
1373{
1374 int ret;
1375 struct dma_fence **fences;
1376 unsigned int i, fence_count;
1377
1378 if (!write) {
1379 struct dma_fence *fence =
52791eee 1380 dma_resv_get_excl_rcu(obj->resv);
5d5a179d
EA
1381
1382 return drm_gem_fence_array_add(fence_array, fence);
1383 }
1384
52791eee 1385 ret = dma_resv_get_fences_rcu(obj->resv, NULL,
5d5a179d
EA
1386 &fence_count, &fences);
1387 if (ret || !fence_count)
1388 return ret;
1389
1390 for (i = 0; i < fence_count; i++) {
1391 ret = drm_gem_fence_array_add(fence_array, fences[i]);
1392 if (ret)
1393 break;
1394 }
1395
1396 for (; i < fence_count; i++)
1397 dma_fence_put(fences[i]);
1398 kfree(fences);
1399 return ret;
1400}
1401EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);