Merge tag 'linux-kselftest-kunit-fixes-6.2-rc7' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / drivers / gpu / drm / drm_gem.c
CommitLineData
673a394b
EA
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
7994369f 28#include <linux/dma-buf.h>
673a394b 29#include <linux/file.h>
7994369f 30#include <linux/fs.h>
7938f421 31#include <linux/iosys-map.h>
7994369f
LDM
32#include <linux/mem_encrypt.h>
33#include <linux/mm.h>
673a394b 34#include <linux/mman.h>
7994369f 35#include <linux/module.h>
673a394b 36#include <linux/pagemap.h>
fb4b4927 37#include <linux/pagevec.h>
7994369f
LDM
38#include <linux/shmem_fs.h>
39#include <linux/slab.h>
b8c75bd9 40#include <linux/string_helpers.h>
7994369f
LDM
41#include <linux/types.h>
42#include <linux/uaccess.h>
0500c04e 43
1c535876 44#include <drm/drm.h>
0500c04e
SR
45#include <drm/drm_device.h>
46#include <drm/drm_drv.h>
47#include <drm/drm_file.h>
d9fc9413 48#include <drm/drm_gem.h>
641b9103 49#include <drm/drm_managed.h>
45d58b40 50#include <drm/drm_print.h>
0500c04e
SR
51#include <drm/drm_vma_manager.h>
52
67d0ec4e 53#include "drm_internal.h"
673a394b
EA
54
55/** @file drm_gem.c
56 *
57 * This file provides some of the base ioctls and library routines for
58 * the graphics memory manager implemented by each device driver.
59 *
60 * Because various devices have different requirements in terms of
61 * synchronization and migration strategies, implementing that is left up to
62 * the driver, and all that the general API provides should be generic --
63 * allocating objects, reading/writing data with the cpu, freeing objects.
64 * Even there, platform-dependent optimizations for reading/writing data with
65 * the CPU mean we'll likely hook those out to driver-specific calls. However,
66 * the DRI2 implementation wants to have at least allocate/mmap be generic.
67 *
68 * The goal was to have swap-backed object allocation managed through
69 * struct file. However, file descriptors as handles to a struct file have
70 * two major failings:
71 * - Process limits prevent more than 1024 or so being used at a time by
72 * default.
73 * - Inability to allocate high fds will aggravate the X Server's select()
74 * handling, and likely that of many GL client applications as well.
75 *
76 * This led to a plan of using our own integer IDs (called handles, following
77 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
78 * ioctls. The objects themselves will still include the struct file so
79 * that we can transition to fds if the required kernel infrastructure shows
80 * up at a later date, and as our interface with shmfs for memory allocation.
81 */
82
641b9103
DV
83static void
84drm_gem_init_release(struct drm_device *dev, void *ptr)
85{
86 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
87}
88
673a394b 89/**
89d61fc0
DV
90 * drm_gem_init - Initialize the GEM device fields
91 * @dev: drm_devic structure to initialize
673a394b 92 */
673a394b
EA
93int
94drm_gem_init(struct drm_device *dev)
95{
b04a5906 96 struct drm_vma_offset_manager *vma_offset_manager;
a2c0a97b 97
cd4f013f 98 mutex_init(&dev->object_name_lock);
e86584c5 99 idr_init_base(&dev->object_name_idr, 1);
a2c0a97b 100
641b9103
DV
101 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
102 GFP_KERNEL);
b04a5906 103 if (!vma_offset_manager) {
a2c0a97b
JB
104 DRM_ERROR("out of memory\n");
105 return -ENOMEM;
106 }
107
b04a5906
DV
108 dev->vma_offset_manager = vma_offset_manager;
109 drm_vma_offset_manager_init(vma_offset_manager,
0de23977
DH
110 DRM_FILE_PAGE_OFFSET_START,
111 DRM_FILE_PAGE_OFFSET_SIZE);
a2c0a97b 112
641b9103 113 return drmm_add_action(dev, drm_gem_init_release, NULL);
a2c0a97b
JB
114}
115
1d397043 116/**
89d61fc0
DV
117 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
118 * @dev: drm_device the object should be initialized for
119 * @obj: drm_gem_object to initialize
120 * @size: object size
121 *
62cb7011 122 * Initialize an already allocated GEM object of the specified size with
1d397043
DV
123 * shmfs backing store.
124 */
125int drm_gem_object_init(struct drm_device *dev,
126 struct drm_gem_object *obj, size_t size)
127{
89c8233f 128 struct file *filp;
1d397043 129
6ab11a26
DV
130 drm_gem_private_object_init(dev, obj, size);
131
89c8233f
DH
132 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
133 if (IS_ERR(filp))
134 return PTR_ERR(filp);
1d397043 135
89c8233f 136 obj->filp = filp;
1d397043 137
1d397043
DV
138 return 0;
139}
140EXPORT_SYMBOL(drm_gem_object_init);
141
62cb7011 142/**
2a5706a3 143 * drm_gem_private_object_init - initialize an allocated private GEM object
89d61fc0
DV
144 * @dev: drm_device the object should be initialized for
145 * @obj: drm_gem_object to initialize
146 * @size: object size
147 *
62cb7011
AC
148 * Initialize an already allocated GEM object of the specified size with
149 * no GEM provided backing store. Instead the caller is responsible for
150 * backing the object and handling it.
151 */
89c8233f
DH
152void drm_gem_private_object_init(struct drm_device *dev,
153 struct drm_gem_object *obj, size_t size)
62cb7011
AC
154{
155 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
156
157 obj->dev = dev;
158 obj->filp = NULL;
159
160 kref_init(&obj->refcount);
a8e11d1c 161 obj->handle_count = 0;
62cb7011 162 obj->size = size;
52791eee 163 dma_resv_init(&obj->_resv);
1ba62714
RH
164 if (!obj->resv)
165 obj->resv = &obj->_resv;
166
88d7ebe5 167 drm_vma_node_reset(&obj->vma_node);
e7c2af13 168 INIT_LIST_HEAD(&obj->lru_node);
62cb7011
AC
169}
170EXPORT_SYMBOL(drm_gem_private_object_init);
171
36da5908 172/**
c6a84325 173 * drm_gem_object_handle_free - release resources bound to userspace handles
89d61fc0
DV
174 * @obj: GEM object to clean up.
175 *
36da5908
DV
176 * Called after the last handle to the object has been closed
177 *
178 * Removes any name for the object. Note that this must be
179 * called before drm_gem_object_free or we'll be touching
180 * freed memory
181 */
182static void drm_gem_object_handle_free(struct drm_gem_object *obj)
183{
184 struct drm_device *dev = obj->dev;
185
186 /* Remove any name for this object */
36da5908
DV
187 if (obj->name) {
188 idr_remove(&dev->object_name_idr, obj->name);
189 obj->name = 0;
a8e11d1c 190 }
36da5908
DV
191}
192
319c933c
DV
193static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
194{
195 /* Unbreak the reference cycle if we have an exported dma_buf. */
196 if (obj->dma_buf) {
197 dma_buf_put(obj->dma_buf);
198 obj->dma_buf = NULL;
199 }
200}
201
becee2a5 202static void
e6b62714 203drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
36da5908 204{
98a8883a
CW
205 struct drm_device *dev = obj->dev;
206 bool final = false;
207
6afe6929 208 if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
36da5908
DV
209 return;
210
211 /*
212 * Must bump handle count first as this may be the last
213 * ref, in which case the object would disappear before we
214 * checked for a name
215 */
216
98a8883a 217 mutex_lock(&dev->object_name_lock);
319c933c 218 if (--obj->handle_count == 0) {
36da5908 219 drm_gem_object_handle_free(obj);
319c933c 220 drm_gem_object_exported_dma_buf_free(obj);
98a8883a 221 final = true;
319c933c 222 }
98a8883a 223 mutex_unlock(&dev->object_name_lock);
a8e11d1c 224
98a8883a 225 if (final)
be6ee102 226 drm_gem_object_put(obj);
36da5908
DV
227}
228
8815b23a
CW
229/*
230 * Called at device or object close to release the file's
231 * handle references on objects.
232 */
233static int
234drm_gem_object_release_handle(int id, void *ptr, void *data)
235{
236 struct drm_file *file_priv = data;
237 struct drm_gem_object *obj = ptr;
8815b23a 238
d693def4 239 if (obj->funcs->close)
b39b5394 240 obj->funcs->close(obj, file_priv);
d0a133f7 241
ea2aa97c 242 drm_prime_remove_buf_handle(&file_priv->prime, id);
d9a1f0b4 243 drm_vma_node_revoke(&obj->vma_node, file_priv);
8815b23a 244
e6b62714 245 drm_gem_object_handle_put_unlocked(obj);
8815b23a
CW
246
247 return 0;
248}
249
673a394b 250/**
89d61fc0
DV
251 * drm_gem_handle_delete - deletes the given file-private handle
252 * @filp: drm file-private structure to use for the handle look up
253 * @handle: userspace handle to delete
254 *
df2e0900
DV
255 * Removes the GEM handle from the @filp lookup table which has been added with
256 * drm_gem_handle_create(). If this is the last handle also cleans up linked
257 * resources like GEM names.
673a394b 258 */
ff72145b 259int
a1a2d1d3 260drm_gem_handle_delete(struct drm_file *filp, u32 handle)
673a394b 261{
673a394b
EA
262 struct drm_gem_object *obj;
263
673a394b
EA
264 spin_lock(&filp->table_lock);
265
266 /* Check if we currently have a reference on the object */
f6cd7dae
CW
267 obj = idr_replace(&filp->object_idr, NULL, handle);
268 spin_unlock(&filp->table_lock);
269 if (IS_ERR_OR_NULL(obj))
673a394b 270 return -EINVAL;
673a394b 271
f6cd7dae
CW
272 /* Release driver's reference and decrement refcount. */
273 drm_gem_object_release_handle(handle, obj, filp);
274
275 /* And finally make the handle available for future allocations. */
276 spin_lock(&filp->table_lock);
673a394b
EA
277 idr_remove(&filp->object_idr, handle);
278 spin_unlock(&filp->table_lock);
279
673a394b
EA
280 return 0;
281}
ff72145b 282EXPORT_SYMBOL(drm_gem_handle_delete);
673a394b 283
db611527 284/**
abd4e745 285 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
db611527
NT
286 * @file: drm file-private structure containing the gem object
287 * @dev: corresponding drm_device
288 * @handle: gem object handle
289 * @offset: return location for the fake mmap offset
290 *
291 * This implements the &drm_driver.dumb_map_offset kms driver callback for
292 * drivers which use gem to manage their backing storage.
293 *
294 * Returns:
295 * 0 on success or a negative error code on failure.
296 */
abd4e745 297int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
db611527
NT
298 u32 handle, u64 *offset)
299{
300 struct drm_gem_object *obj;
301 int ret;
302
303 obj = drm_gem_object_lookup(file, handle);
304 if (!obj)
305 return -ENOENT;
306
90378e58
NT
307 /* Don't allow imported objects to be mapped */
308 if (obj->import_attach) {
309 ret = -EINVAL;
310 goto out;
311 }
312
db611527
NT
313 ret = drm_gem_create_mmap_offset(obj);
314 if (ret)
315 goto out;
316
317 *offset = drm_vma_node_offset_addr(&obj->vma_node);
318out:
be6ee102 319 drm_gem_object_put(obj);
db611527
NT
320
321 return ret;
322}
abd4e745 323EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
db611527 324
43387b37
DV
325int drm_gem_dumb_destroy(struct drm_file *file,
326 struct drm_device *dev,
47f10854 327 u32 handle)
43387b37
DV
328{
329 return drm_gem_handle_delete(file, handle);
330}
43387b37 331
673a394b 332/**
20228c44 333 * drm_gem_handle_create_tail - internal functions to create a handle
89d61fc0
DV
334 * @file_priv: drm file-private structure to register the handle for
335 * @obj: object to register
8bf8180f 336 * @handlep: pointer to return the created handle to the caller
1dd3a060 337 *
940eba2d
DV
338 * This expects the &drm_device.object_name_lock to be held already and will
339 * drop it before returning. Used to avoid races in establishing new handles
340 * when importing an object from either an flink name or a dma-buf.
df2e0900
DV
341 *
342 * Handles must be release again through drm_gem_handle_delete(). This is done
343 * when userspace closes @file_priv for all attached handles, or through the
344 * GEM_CLOSE ioctl for individual handles.
673a394b
EA
345 */
346int
20228c44
DV
347drm_gem_handle_create_tail(struct drm_file *file_priv,
348 struct drm_gem_object *obj,
349 u32 *handlep)
673a394b 350{
304eda32 351 struct drm_device *dev = obj->dev;
9649399e 352 u32 handle;
304eda32 353 int ret;
673a394b 354
20228c44 355 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
98a8883a 356 if (obj->handle_count++ == 0)
e6b62714 357 drm_gem_object_get(obj);
20228c44 358
673a394b 359 /*
2e928815
TH
360 * Get the user-visible handle using idr. Preload and perform
361 * allocation under our spinlock.
673a394b 362 */
2e928815 363 idr_preload(GFP_KERNEL);
673a394b 364 spin_lock(&file_priv->table_lock);
2e928815
TH
365
366 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
98a8883a 367
673a394b 368 spin_unlock(&file_priv->table_lock);
2e928815 369 idr_preload_end();
98a8883a 370
cd4f013f 371 mutex_unlock(&dev->object_name_lock);
6984128d
CW
372 if (ret < 0)
373 goto err_unref;
374
9649399e 375 handle = ret;
673a394b 376
d9a1f0b4 377 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
6984128d
CW
378 if (ret)
379 goto err_remove;
304eda32 380
d693def4 381 if (obj->funcs->open) {
b39b5394
NT
382 ret = obj->funcs->open(obj, file_priv);
383 if (ret)
384 goto err_revoke;
304eda32
BS
385 }
386
9649399e 387 *handlep = handle;
673a394b 388 return 0;
6984128d
CW
389
390err_revoke:
d9a1f0b4 391 drm_vma_node_revoke(&obj->vma_node, file_priv);
6984128d
CW
392err_remove:
393 spin_lock(&file_priv->table_lock);
9649399e 394 idr_remove(&file_priv->object_idr, handle);
6984128d
CW
395 spin_unlock(&file_priv->table_lock);
396err_unref:
e6b62714 397 drm_gem_object_handle_put_unlocked(obj);
6984128d 398 return ret;
673a394b 399}
20228c44
DV
400
401/**
8bf8180f 402 * drm_gem_handle_create - create a gem handle for an object
89d61fc0
DV
403 * @file_priv: drm file-private structure to register the handle for
404 * @obj: object to register
82c0ef94 405 * @handlep: pointer to return the created handle to the caller
89d61fc0 406 *
39031176
DV
407 * Create a handle for this object. This adds a handle reference to the object,
408 * which includes a regular reference count. Callers will likely want to
409 * dereference the object afterwards.
410 *
411 * Since this publishes @obj to userspace it must be fully set up by this point,
412 * drivers must call this last in their buffer object creation callbacks.
20228c44 413 */
8bf8180f
TR
414int drm_gem_handle_create(struct drm_file *file_priv,
415 struct drm_gem_object *obj,
416 u32 *handlep)
20228c44
DV
417{
418 mutex_lock(&obj->dev->object_name_lock);
419
420 return drm_gem_handle_create_tail(file_priv, obj, handlep);
421}
673a394b
EA
422EXPORT_SYMBOL(drm_gem_handle_create);
423
75ef8b3b
RC
424
425/**
426 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
427 * @obj: obj in question
428 *
429 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
f74418a4
DV
430 *
431 * Note that drm_gem_object_release() already calls this function, so drivers
432 * don't have to take care of releasing the mmap offset themselves when freeing
433 * the GEM object.
75ef8b3b
RC
434 */
435void
436drm_gem_free_mmap_offset(struct drm_gem_object *obj)
437{
438 struct drm_device *dev = obj->dev;
75ef8b3b 439
b04a5906 440 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
75ef8b3b
RC
441}
442EXPORT_SYMBOL(drm_gem_free_mmap_offset);
443
444/**
367bbd49 445 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
75ef8b3b 446 * @obj: obj in question
367bbd49 447 * @size: the virtual size
75ef8b3b
RC
448 *
449 * GEM memory mapping works by handing back to userspace a fake mmap offset
450 * it can use in a subsequent mmap(2) call. The DRM core code then looks
451 * up the object based on the offset and sets up the various memory mapping
452 * structures.
453 *
367bbd49 454 * This routine allocates and attaches a fake offset for @obj, in cases where
940eba2d
DV
455 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
456 * Otherwise just use drm_gem_create_mmap_offset().
f74418a4
DV
457 *
458 * This function is idempotent and handles an already allocated mmap offset
459 * transparently. Drivers do not need to check for this case.
75ef8b3b
RC
460 */
461int
367bbd49 462drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
75ef8b3b
RC
463{
464 struct drm_device *dev = obj->dev;
75ef8b3b 465
b04a5906 466 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
367bbd49
RC
467 size / PAGE_SIZE);
468}
469EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
470
471/**
472 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
473 * @obj: obj in question
474 *
475 * GEM memory mapping works by handing back to userspace a fake mmap offset
476 * it can use in a subsequent mmap(2) call. The DRM core code then looks
477 * up the object based on the offset and sets up the various memory mapping
478 * structures.
479 *
480 * This routine allocates and attaches a fake offset for @obj.
f74418a4
DV
481 *
482 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
483 * the fake offset again.
367bbd49
RC
484 */
485int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
486{
487 return drm_gem_create_mmap_offset_size(obj, obj->size);
75ef8b3b
RC
488}
489EXPORT_SYMBOL(drm_gem_create_mmap_offset);
490
fb4b4927
KHY
491/*
492 * Move pages to appropriate lru and release the pagevec, decrementing the
493 * ref count of those pages.
494 */
495static void drm_gem_check_release_pagevec(struct pagevec *pvec)
496{
497 check_move_unevictable_pages(pvec);
498 __pagevec_release(pvec);
499 cond_resched();
500}
501
bcc5c9d5
RC
502/**
503 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
504 * from shmem
505 * @obj: obj in question
0cdbe8ac
DH
506 *
507 * This reads the page-array of the shmem-backing storage of the given gem
508 * object. An array of pages is returned. If a page is not allocated or
509 * swapped-out, this will allocate/swap-in the required pages. Note that the
510 * whole object is covered by the page-array and pinned in memory.
511 *
512 * Use drm_gem_put_pages() to release the array and unpin all pages.
513 *
514 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
515 * If you require other GFP-masks, you have to do those allocations yourself.
516 *
517 * Note that you are not allowed to change gfp-zones during runtime. That is,
518 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
519 * set during initialization. If you have special zone constraints, set them
5b9fbfff 520 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
0cdbe8ac 521 * to keep pages in the required zone during swap-in.
e0b3d214
DV
522 *
523 * This function is only valid on objects initialized with
524 * drm_gem_object_init(), but not for those initialized with
525 * drm_gem_private_object_init() only.
bcc5c9d5 526 */
0cdbe8ac 527struct page **drm_gem_get_pages(struct drm_gem_object *obj)
bcc5c9d5 528{
bcc5c9d5
RC
529 struct address_space *mapping;
530 struct page *p, **pages;
fb4b4927 531 struct pagevec pvec;
bcc5c9d5
RC
532 int i, npages;
533
e0b3d214
DV
534
535 if (WARN_ON(!obj->filp))
536 return ERR_PTR(-EINVAL);
537
bcc5c9d5 538 /* This is the shared memory object that backs the GEM resource */
93c76a3d 539 mapping = obj->filp->f_mapping;
bcc5c9d5
RC
540
541 /* We already BUG_ON() for non-page-aligned sizes in
542 * drm_gem_object_init(), so we should never hit this unless
543 * driver author is doing something really wrong:
544 */
545 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
546
547 npages = obj->size >> PAGE_SHIFT;
548
2098105e 549 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
bcc5c9d5
RC
550 if (pages == NULL)
551 return ERR_PTR(-ENOMEM);
552
fb4b4927
KHY
553 mapping_set_unevictable(mapping);
554
bcc5c9d5 555 for (i = 0; i < npages; i++) {
0cdbe8ac 556 p = shmem_read_mapping_page(mapping, i);
bcc5c9d5
RC
557 if (IS_ERR(p))
558 goto fail;
559 pages[i] = p;
560
2123000b
DH
561 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
562 * correct region during swapin. Note that this requires
563 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
564 * so shmem can relocate pages during swapin if required.
bcc5c9d5 565 */
c62d2555 566 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
bcc5c9d5
RC
567 (page_to_pfn(p) >= 0x00100000UL));
568 }
569
570 return pages;
571
572fail:
fb4b4927
KHY
573 mapping_clear_unevictable(mapping);
574 pagevec_init(&pvec);
575 while (i--) {
576 if (!pagevec_add(&pvec, pages[i]))
577 drm_gem_check_release_pagevec(&pvec);
578 }
579 if (pagevec_count(&pvec))
580 drm_gem_check_release_pagevec(&pvec);
bcc5c9d5 581
2098105e 582 kvfree(pages);
bcc5c9d5
RC
583 return ERR_CAST(p);
584}
585EXPORT_SYMBOL(drm_gem_get_pages);
586
587/**
588 * drm_gem_put_pages - helper to free backing pages for a GEM object
589 * @obj: obj in question
590 * @pages: pages to free
591 * @dirty: if true, pages will be marked as dirty
592 * @accessed: if true, the pages will be marked as accessed
593 */
594void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
595 bool dirty, bool accessed)
596{
597 int i, npages;
fb4b4927
KHY
598 struct address_space *mapping;
599 struct pagevec pvec;
600
601 mapping = file_inode(obj->filp)->i_mapping;
602 mapping_clear_unevictable(mapping);
bcc5c9d5
RC
603
604 /* We already BUG_ON() for non-page-aligned sizes in
605 * drm_gem_object_init(), so we should never hit this unless
606 * driver author is doing something really wrong:
607 */
608 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
609
610 npages = obj->size >> PAGE_SHIFT;
611
fb4b4927 612 pagevec_init(&pvec);
bcc5c9d5 613 for (i = 0; i < npages; i++) {
930a4024
RH
614 if (!pages[i])
615 continue;
616
bcc5c9d5
RC
617 if (dirty)
618 set_page_dirty(pages[i]);
619
620 if (accessed)
621 mark_page_accessed(pages[i]);
622
623 /* Undo the reference we took when populating the table */
fb4b4927
KHY
624 if (!pagevec_add(&pvec, pages[i]))
625 drm_gem_check_release_pagevec(&pvec);
bcc5c9d5 626 }
fb4b4927
KHY
627 if (pagevec_count(&pvec))
628 drm_gem_check_release_pagevec(&pvec);
bcc5c9d5 629
2098105e 630 kvfree(pages);
bcc5c9d5
RC
631}
632EXPORT_SYMBOL(drm_gem_put_pages);
633
c117aa4d
RH
634static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
635 struct drm_gem_object **objs)
636{
637 int i, ret = 0;
638 struct drm_gem_object *obj;
639
640 spin_lock(&filp->table_lock);
641
642 for (i = 0; i < count; i++) {
643 /* Check if we currently have a reference on the object */
644 obj = idr_find(&filp->object_idr, handle[i]);
645 if (!obj) {
646 ret = -ENOENT;
647 break;
648 }
649 drm_gem_object_get(obj);
650 objs[i] = obj;
651 }
652 spin_unlock(&filp->table_lock);
653
654 return ret;
655}
656
657/**
658 * drm_gem_objects_lookup - look up GEM objects from an array of handles
659 * @filp: DRM file private date
660 * @bo_handles: user pointer to array of userspace handle
661 * @count: size of handle array
662 * @objs_out: returned pointer to array of drm_gem_object pointers
663 *
664 * Takes an array of userspace handles and returns a newly allocated array of
665 * GEM objects.
666 *
667 * For a single handle lookup, use drm_gem_object_lookup().
668 *
669 * Returns:
670 *
671 * @objs filled in with GEM object pointers. Returned GEM objects need to be
be6ee102 672 * released with drm_gem_object_put(). -ENOENT is returned on a lookup
c117aa4d
RH
673 * failure. 0 is returned on success.
674 *
675 */
676int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
677 int count, struct drm_gem_object ***objs_out)
678{
679 int ret;
680 u32 *handles;
681 struct drm_gem_object **objs;
682
683 if (!count)
684 return 0;
685
686 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
687 GFP_KERNEL | __GFP_ZERO);
688 if (!objs)
689 return -ENOMEM;
690
ec0bb482
DC
691 *objs_out = objs;
692
c117aa4d
RH
693 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
694 if (!handles) {
695 ret = -ENOMEM;
696 goto out;
697 }
698
699 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
700 ret = -EFAULT;
701 DRM_DEBUG("Failed to copy in GEM handles\n");
702 goto out;
703 }
704
705 ret = objects_lookup(filp, handles, count, objs);
c117aa4d
RH
706out:
707 kvfree(handles);
708 return ret;
709
710}
711EXPORT_SYMBOL(drm_gem_objects_lookup);
712
df2e0900 713/**
1e55a53a 714 * drm_gem_object_lookup - look up a GEM object from its handle
df2e0900
DV
715 * @filp: DRM file private date
716 * @handle: userspace handle
717 *
718 * Returns:
719 *
720 * A reference to the object named by the handle if such exists on @filp, NULL
721 * otherwise.
c117aa4d
RH
722 *
723 * If looking up an array of handles, use drm_gem_objects_lookup().
df2e0900 724 */
673a394b 725struct drm_gem_object *
a8ad0bd8 726drm_gem_object_lookup(struct drm_file *filp, u32 handle)
673a394b 727{
c117aa4d 728 struct drm_gem_object *obj = NULL;
673a394b 729
c117aa4d 730 objects_lookup(filp, &handle, 1, &obj);
673a394b
EA
731 return obj;
732}
733EXPORT_SYMBOL(drm_gem_object_lookup);
734
1ba62714 735/**
52791eee 736 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
1ba62714
RH
737 * shared and/or exclusive fences.
738 * @filep: DRM file private date
739 * @handle: userspace handle
740 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
741 * @timeout: timeout value in jiffies or zero to return immediately
742 *
743 * Returns:
744 *
745 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
746 * greater than 0 on success.
747 */
52791eee 748long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
1ba62714
RH
749 bool wait_all, unsigned long timeout)
750{
751 long ret;
752 struct drm_gem_object *obj;
753
754 obj = drm_gem_object_lookup(filep, handle);
755 if (!obj) {
756 DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
757 return -EINVAL;
758 }
759
7bc80a54
CK
760 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all),
761 true, timeout);
1ba62714
RH
762 if (ret == 0)
763 ret = -ETIME;
764 else if (ret > 0)
765 ret = 0;
766
be6ee102 767 drm_gem_object_put(obj);
1ba62714
RH
768
769 return ret;
770}
52791eee 771EXPORT_SYMBOL(drm_gem_dma_resv_wait);
1ba62714 772
673a394b 773/**
89d61fc0
DV
774 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
775 * @dev: drm_device
776 * @data: ioctl data
777 * @file_priv: drm file-private structure
778 *
673a394b
EA
779 * Releases the handle to an mm object.
780 */
781int
782drm_gem_close_ioctl(struct drm_device *dev, void *data,
783 struct drm_file *file_priv)
784{
785 struct drm_gem_close *args = data;
786 int ret;
787
1bcecfac 788 if (!drm_core_check_feature(dev, DRIVER_GEM))
69fdf420 789 return -EOPNOTSUPP;
673a394b
EA
790
791 ret = drm_gem_handle_delete(file_priv, args->handle);
792
793 return ret;
794}
795
796/**
89d61fc0
DV
797 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
798 * @dev: drm_device
799 * @data: ioctl data
800 * @file_priv: drm file-private structure
801 *
673a394b
EA
802 * Create a global name for an object, returning the name.
803 *
804 * Note that the name does not hold a reference; when the object
805 * is freed, the name goes away.
806 */
807int
808drm_gem_flink_ioctl(struct drm_device *dev, void *data,
809 struct drm_file *file_priv)
810{
811 struct drm_gem_flink *args = data;
812 struct drm_gem_object *obj;
813 int ret;
814
1bcecfac 815 if (!drm_core_check_feature(dev, DRIVER_GEM))
69fdf420 816 return -EOPNOTSUPP;
673a394b 817
a8ad0bd8 818 obj = drm_gem_object_lookup(file_priv, args->handle);
673a394b 819 if (obj == NULL)
bf79cb91 820 return -ENOENT;
673a394b 821
cd4f013f 822 mutex_lock(&dev->object_name_lock);
a8e11d1c
DV
823 /* prevent races with concurrent gem_close. */
824 if (obj->handle_count == 0) {
825 ret = -ENOENT;
826 goto err;
827 }
828
8d59bae5 829 if (!obj->name) {
0f646425 830 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
2e928815 831 if (ret < 0)
8d59bae5 832 goto err;
2e07fb22
YC
833
834 obj->name = ret;
8d59bae5 835 }
3e49c4f4 836
2e07fb22
YC
837 args->name = (uint64_t) obj->name;
838 ret = 0;
839
3e49c4f4 840err:
cd4f013f 841 mutex_unlock(&dev->object_name_lock);
be6ee102 842 drm_gem_object_put(obj);
3e49c4f4 843 return ret;
673a394b
EA
844}
845
846/**
e9d2871f 847 * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl
89d61fc0
DV
848 * @dev: drm_device
849 * @data: ioctl data
850 * @file_priv: drm file-private structure
851 *
673a394b 852 * Open an object using the global name, returning a handle and the size.
a9e10b16
SC
853 *
854 * This handle (of course) holds a reference to the object, so the object
855 * will not go away until the handle is deleted.
673a394b
EA
856 */
857int
858drm_gem_open_ioctl(struct drm_device *dev, void *data,
859 struct drm_file *file_priv)
860{
861 struct drm_gem_open *args = data;
862 struct drm_gem_object *obj;
863 int ret;
a1a2d1d3 864 u32 handle;
673a394b 865
1bcecfac 866 if (!drm_core_check_feature(dev, DRIVER_GEM))
69fdf420 867 return -EOPNOTSUPP;
673a394b 868
cd4f013f 869 mutex_lock(&dev->object_name_lock);
673a394b 870 obj = idr_find(&dev->object_name_idr, (int) args->name);
20228c44 871 if (obj) {
e6b62714 872 drm_gem_object_get(obj);
20228c44
DV
873 } else {
874 mutex_unlock(&dev->object_name_lock);
673a394b 875 return -ENOENT;
20228c44 876 }
673a394b 877
20228c44
DV
878 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
879 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
673a394b 880 if (ret)
8490d6a7 881 goto err;
673a394b
EA
882
883 args->handle = handle;
884 args->size = obj->size;
885
8490d6a7 886err:
c44264f9 887 drm_gem_object_put(obj);
8490d6a7 888 return ret;
673a394b
EA
889}
890
891/**
0ae865ef 892 * drm_gem_open - initializes GEM file-private structures at devnode open time
89d61fc0
DV
893 * @dev: drm_device which is being opened by userspace
894 * @file_private: drm file-private structure to set up
895 *
673a394b
EA
896 * Called at device open time, sets up the structure for handling refcounting
897 * of mm objects.
898 */
899void
900drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
901{
e86584c5 902 idr_init_base(&file_private->object_idr, 1);
673a394b
EA
903 spin_lock_init(&file_private->table_lock);
904}
905
673a394b 906/**
89d61fc0
DV
907 * drm_gem_release - release file-private GEM resources
908 * @dev: drm_device which is being closed by userspace
909 * @file_private: drm file-private structure to clean up
910 *
673a394b
EA
911 * Called at close time when the filp is going away.
912 *
913 * Releases any remaining references on objects by this filp.
914 */
915void
916drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
917{
673a394b 918 idr_for_each(&file_private->object_idr,
304eda32 919 &drm_gem_object_release_handle, file_private);
673a394b 920 idr_destroy(&file_private->object_idr);
673a394b
EA
921}
922
f74418a4
DV
923/**
924 * drm_gem_object_release - release GEM buffer object resources
925 * @obj: GEM buffer object
926 *
0ae865ef 927 * This releases any structures and resources used by @obj and is the inverse of
f74418a4
DV
928 * drm_gem_object_init().
929 */
fd632aa3
DV
930void
931drm_gem_object_release(struct drm_gem_object *obj)
c3ae90c0 932{
319c933c
DV
933 WARN_ON(obj->dma_buf);
934
62cb7011 935 if (obj->filp)
16d2831d 936 fput(obj->filp);
77472347 937
52791eee 938 dma_resv_fini(&obj->_resv);
77472347 939 drm_gem_free_mmap_offset(obj);
e7c2af13 940 drm_gem_lru_remove(obj);
c3ae90c0 941}
fd632aa3 942EXPORT_SYMBOL(drm_gem_object_release);
c3ae90c0 943
673a394b 944/**
89d61fc0
DV
945 * drm_gem_object_free - free a GEM object
946 * @kref: kref of the object to free
947 *
673a394b
EA
948 * Called after the last reference to the object has been lost.
949 *
950 * Frees the object
951 */
952void
953drm_gem_object_free(struct kref *kref)
954{
6ff774bd
DV
955 struct drm_gem_object *obj =
956 container_of(kref, struct drm_gem_object, refcount);
673a394b 957
d693def4
TZ
958 if (WARN_ON(!obj->funcs->free))
959 return;
960
961 obj->funcs->free(obj);
673a394b
EA
962}
963EXPORT_SYMBOL(drm_gem_object_free);
964
df2e0900
DV
965/**
966 * drm_gem_vm_open - vma->ops->open implementation for GEM
967 * @vma: VM area structure
968 *
969 * This function implements the #vm_operations_struct open() callback for GEM
970 * drivers. This must be used together with drm_gem_vm_close().
971 */
ab00b3e5
JB
972void drm_gem_vm_open(struct vm_area_struct *vma)
973{
974 struct drm_gem_object *obj = vma->vm_private_data;
975
e6b62714 976 drm_gem_object_get(obj);
ab00b3e5
JB
977}
978EXPORT_SYMBOL(drm_gem_vm_open);
979
df2e0900
DV
980/**
981 * drm_gem_vm_close - vma->ops->close implementation for GEM
982 * @vma: VM area structure
983 *
984 * This function implements the #vm_operations_struct close() callback for GEM
985 * drivers. This must be used together with drm_gem_vm_open().
986 */
ab00b3e5
JB
987void drm_gem_vm_close(struct vm_area_struct *vma)
988{
989 struct drm_gem_object *obj = vma->vm_private_data;
ab00b3e5 990
be6ee102 991 drm_gem_object_put(obj);
ab00b3e5
JB
992}
993EXPORT_SYMBOL(drm_gem_vm_close);
994
1c5aafa6
LP
995/**
996 * drm_gem_mmap_obj - memory map a GEM object
997 * @obj: the GEM object to map
998 * @obj_size: the object size to be mapped, in bytes
999 * @vma: VMA for the area to be mapped
1000 *
d693def4
TZ
1001 * Set up the VMA to prepare mapping of the GEM object using the GEM object's
1002 * vm_ops. Depending on their requirements, GEM objects can either
1003 * provide a fault handler in their vm_ops (in which case any accesses to
1c5aafa6
LP
1004 * the object will be trapped, to perform migration, GTT binding, surface
1005 * register allocation, or performance monitoring), or mmap the buffer memory
1006 * synchronously after calling drm_gem_mmap_obj.
1007 *
1008 * This function is mainly intended to implement the DMABUF mmap operation, when
1009 * the GEM object is not looked up based on its fake offset. To implement the
1010 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1011 *
ca481c9b
DH
1012 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1013 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1014 * callers must verify access restrictions before calling this helper.
1015 *
1c5aafa6 1016 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
d693def4 1017 * size, or if no vm_ops are provided.
1c5aafa6
LP
1018 */
1019int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1020 struct vm_area_struct *vma)
1021{
c40069cb 1022 int ret;
1c5aafa6
LP
1023
1024 /* Check for valid size. */
1025 if (obj_size < vma->vm_end - vma->vm_start)
1026 return -EINVAL;
1027
9786b65b
GH
1028 /* Take a ref for this mapping of the object, so that the fault
1029 * handler can dereference the mmap offset's pointer to the object.
1030 * This reference is cleaned up by the corresponding vm_close
1031 * (which should happen whether the vma was created by this call, or
1032 * by a vm_open due to mremap or partial unmap or whatever).
1033 */
1034 drm_gem_object_get(obj);
1035
f49a51bf 1036 vma->vm_private_data = obj;
47d35c1c 1037 vma->vm_ops = obj->funcs->vm_ops;
f49a51bf 1038
d693def4 1039 if (obj->funcs->mmap) {
c40069cb 1040 ret = obj->funcs->mmap(obj, vma);
47d35c1c
TZ
1041 if (ret)
1042 goto err_drm_gem_object_put;
c40069cb
GH
1043 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1044 } else {
47d35c1c
TZ
1045 if (!vma->vm_ops) {
1046 ret = -EINVAL;
1047 goto err_drm_gem_object_put;
9786b65b 1048 }
c40069cb
GH
1049
1050 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1051 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1052 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1053 }
1c5aafa6 1054
1c5aafa6 1055 return 0;
47d35c1c
TZ
1056
1057err_drm_gem_object_put:
1058 drm_gem_object_put(obj);
1059 return ret;
1c5aafa6
LP
1060}
1061EXPORT_SYMBOL(drm_gem_mmap_obj);
ab00b3e5 1062
a2c0a97b
JB
1063/**
1064 * drm_gem_mmap - memory map routine for GEM objects
1065 * @filp: DRM file pointer
1066 * @vma: VMA for the area to be mapped
1067 *
1068 * If a driver supports GEM object mapping, mmap calls on the DRM file
1069 * descriptor will end up here.
1070 *
1c5aafa6 1071 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
a2c0a97b 1072 * contain the fake offset we created when the GTT map ioctl was called on
1c5aafa6 1073 * the object) and map it with a call to drm_gem_mmap_obj().
ca481c9b
DH
1074 *
1075 * If the caller is not granted access to the buffer object, the mmap will fail
1076 * with EACCES. Please see the vma manager for more information.
a2c0a97b
JB
1077 */
1078int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1079{
1080 struct drm_file *priv = filp->private_data;
1081 struct drm_device *dev = priv->minor->dev;
2225cfe4 1082 struct drm_gem_object *obj = NULL;
0de23977 1083 struct drm_vma_offset_node *node;
a8469aa8 1084 int ret;
a2c0a97b 1085
c07dcd61 1086 if (drm_dev_is_unplugged(dev))
2c07a21d
DA
1087 return -ENODEV;
1088
2225cfe4
DV
1089 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1090 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1091 vma->vm_pgoff,
1092 vma_pages(vma));
1093 if (likely(node)) {
1094 obj = container_of(node, struct drm_gem_object, vma_node);
1095 /*
1096 * When the object is being freed, after it hits 0-refcnt it
1097 * proceeds to tear down the object. In the process it will
1098 * attempt to remove the VMA offset and so acquire this
1099 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1100 * that matches our range, we know it is in the process of being
1101 * destroyed and will be freed as soon as we release the lock -
1102 * so we have to check for the 0-refcnted object and treat it as
1103 * invalid.
1104 */
1105 if (!kref_get_unless_zero(&obj->refcount))
1106 obj = NULL;
1107 }
1108 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
a2c0a97b 1109
2225cfe4 1110 if (!obj)
197633b9 1111 return -EINVAL;
2225cfe4 1112
d9a1f0b4 1113 if (!drm_vma_node_is_allowed(node, priv)) {
be6ee102 1114 drm_gem_object_put(obj);
ca481c9b 1115 return -EACCES;
a2c0a97b
JB
1116 }
1117
2225cfe4
DV
1118 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1119 vma);
a2c0a97b 1120
be6ee102 1121 drm_gem_object_put(obj);
a2c0a97b
JB
1122
1123 return ret;
1124}
1125EXPORT_SYMBOL(drm_gem_mmap);
45d58b40
NT
1126
1127void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1128 const struct drm_gem_object *obj)
1129{
1130 drm_printf_indent(p, indent, "name=%d\n", obj->name);
1131 drm_printf_indent(p, indent, "refcount=%u\n",
1132 kref_read(&obj->refcount));
1133 drm_printf_indent(p, indent, "start=%08lx\n",
1134 drm_vma_node_start(&obj->vma_node));
1135 drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1136 drm_printf_indent(p, indent, "imported=%s\n",
b8c75bd9 1137 str_yes_no(obj->import_attach));
45d58b40 1138
d693def4 1139 if (obj->funcs->print_info)
b39b5394 1140 obj->funcs->print_info(p, indent, obj);
45d58b40 1141}
b39b5394 1142
b39b5394
NT
1143int drm_gem_pin(struct drm_gem_object *obj)
1144{
d693def4 1145 if (obj->funcs->pin)
b39b5394 1146 return obj->funcs->pin(obj);
b39b5394
NT
1147 else
1148 return 0;
1149}
b39b5394 1150
b39b5394
NT
1151void drm_gem_unpin(struct drm_gem_object *obj)
1152{
d693def4 1153 if (obj->funcs->unpin)
b39b5394 1154 obj->funcs->unpin(obj);
b39b5394 1155}
b39b5394 1156
7938f421 1157int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
b39b5394 1158{
49a3f51d 1159 int ret;
b39b5394 1160
79e2cf2e
DO
1161 dma_resv_assert_held(obj->resv);
1162
49a3f51d 1163 if (!obj->funcs->vmap)
a745fb1c 1164 return -EOPNOTSUPP;
b39b5394 1165
a745fb1c 1166 ret = obj->funcs->vmap(obj, map);
49a3f51d 1167 if (ret)
a745fb1c 1168 return ret;
7938f421 1169 else if (iosys_map_is_null(map))
a745fb1c 1170 return -ENOMEM;
b39b5394 1171
a745fb1c 1172 return 0;
b39b5394 1173}
db0c6bd2 1174EXPORT_SYMBOL(drm_gem_vmap);
b39b5394 1175
7938f421 1176void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
b39b5394 1177{
79e2cf2e
DO
1178 dma_resv_assert_held(obj->resv);
1179
7938f421 1180 if (iosys_map_is_null(map))
b39b5394
NT
1181 return;
1182
d693def4 1183 if (obj->funcs->vunmap)
a745fb1c
TZ
1184 obj->funcs->vunmap(obj, map);
1185
1186 /* Always set the mapping to NULL. Callers may rely on this. */
7938f421 1187 iosys_map_clear(map);
b39b5394 1188}
db0c6bd2 1189EXPORT_SYMBOL(drm_gem_vunmap);
7edc3e3b 1190
79e2cf2e
DO
1191int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
1192{
1193 int ret;
1194
1195 dma_resv_lock(obj->resv, NULL);
1196 ret = drm_gem_vmap(obj, map);
1197 dma_resv_unlock(obj->resv);
1198
1199 return ret;
1200}
1201EXPORT_SYMBOL(drm_gem_vmap_unlocked);
1202
1203void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
1204{
1205 dma_resv_lock(obj->resv, NULL);
1206 drm_gem_vunmap(obj, map);
1207 dma_resv_unlock(obj->resv);
1208}
1209EXPORT_SYMBOL(drm_gem_vunmap_unlocked);
1210
7edc3e3b
EA
1211/**
1212 * drm_gem_lock_reservations - Sets up the ww context and acquires
1213 * the lock on an array of GEM objects.
1214 *
1215 * Once you've locked your reservations, you'll want to set up space
1216 * for your shared fences (if applicable), submit your job, then
1217 * drm_gem_unlock_reservations().
1218 *
1219 * @objs: drm_gem_objects to lock
1220 * @count: Number of objects in @objs
1221 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1222 * part of tracking this set of locked reservations.
1223 */
1224int
1225drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1226 struct ww_acquire_ctx *acquire_ctx)
1227{
1228 int contended = -1;
1229 int i, ret;
1230
1231 ww_acquire_init(acquire_ctx, &reservation_ww_class);
1232
1233retry:
1234 if (contended != -1) {
1235 struct drm_gem_object *obj = objs[contended];
1236
52791eee 1237 ret = dma_resv_lock_slow_interruptible(obj->resv,
0dbd555a 1238 acquire_ctx);
7edc3e3b 1239 if (ret) {
2939deac 1240 ww_acquire_fini(acquire_ctx);
7edc3e3b
EA
1241 return ret;
1242 }
1243 }
1244
1245 for (i = 0; i < count; i++) {
1246 if (i == contended)
1247 continue;
1248
52791eee 1249 ret = dma_resv_lock_interruptible(objs[i]->resv,
0dbd555a 1250 acquire_ctx);
7edc3e3b
EA
1251 if (ret) {
1252 int j;
1253
1254 for (j = 0; j < i; j++)
52791eee 1255 dma_resv_unlock(objs[j]->resv);
7edc3e3b
EA
1256
1257 if (contended != -1 && contended >= i)
52791eee 1258 dma_resv_unlock(objs[contended]->resv);
7edc3e3b
EA
1259
1260 if (ret == -EDEADLK) {
1261 contended = i;
1262 goto retry;
1263 }
1264
2939deac 1265 ww_acquire_fini(acquire_ctx);
7edc3e3b
EA
1266 return ret;
1267 }
1268 }
1269
1270 ww_acquire_done(acquire_ctx);
1271
1272 return 0;
1273}
1274EXPORT_SYMBOL(drm_gem_lock_reservations);
1275
1276void
1277drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1278 struct ww_acquire_ctx *acquire_ctx)
1279{
1280 int i;
1281
1282 for (i = 0; i < count; i++)
52791eee 1283 dma_resv_unlock(objs[i]->resv);
7edc3e3b
EA
1284
1285 ww_acquire_fini(acquire_ctx);
1286}
1287EXPORT_SYMBOL(drm_gem_unlock_reservations);
e7c2af13
RC
1288
1289/**
1290 * drm_gem_lru_init - initialize a LRU
1291 *
1292 * @lru: The LRU to initialize
1293 * @lock: The lock protecting the LRU
1294 */
1295void
1296drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock)
1297{
1298 lru->lock = lock;
1299 lru->count = 0;
1300 INIT_LIST_HEAD(&lru->list);
1301}
1302EXPORT_SYMBOL(drm_gem_lru_init);
1303
1304static void
1305drm_gem_lru_remove_locked(struct drm_gem_object *obj)
1306{
1307 obj->lru->count -= obj->size >> PAGE_SHIFT;
1308 WARN_ON(obj->lru->count < 0);
1309 list_del(&obj->lru_node);
1310 obj->lru = NULL;
1311}
1312
1313/**
1314 * drm_gem_lru_remove - remove object from whatever LRU it is in
1315 *
1316 * If the object is currently in any LRU, remove it.
1317 *
1318 * @obj: The GEM object to remove from current LRU
1319 */
1320void
1321drm_gem_lru_remove(struct drm_gem_object *obj)
1322{
1323 struct drm_gem_lru *lru = obj->lru;
1324
1325 if (!lru)
1326 return;
1327
1328 mutex_lock(lru->lock);
1329 drm_gem_lru_remove_locked(obj);
1330 mutex_unlock(lru->lock);
1331}
1332EXPORT_SYMBOL(drm_gem_lru_remove);
1333
1334static void
1335drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj)
1336{
1337 lockdep_assert_held_once(lru->lock);
1338
1339 if (obj->lru)
1340 drm_gem_lru_remove_locked(obj);
1341
1342 lru->count += obj->size >> PAGE_SHIFT;
1343 list_add_tail(&obj->lru_node, &lru->list);
1344 obj->lru = lru;
1345}
1346
1347/**
1348 * drm_gem_lru_move_tail - move the object to the tail of the LRU
1349 *
1350 * If the object is already in this LRU it will be moved to the
1351 * tail. Otherwise it will be removed from whichever other LRU
1352 * it is in (if any) and moved into this LRU.
1353 *
1354 * @lru: The LRU to move the object into.
1355 * @obj: The GEM object to move into this LRU
1356 */
1357void
1358drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj)
1359{
1360 mutex_lock(lru->lock);
1361 drm_gem_lru_move_tail_locked(lru, obj);
1362 mutex_unlock(lru->lock);
1363}
1364EXPORT_SYMBOL(drm_gem_lru_move_tail);
1365
1366/**
1367 * drm_gem_lru_scan - helper to implement shrinker.scan_objects
1368 *
1369 * If the shrink callback succeeds, it is expected that the driver
1370 * move the object out of this LRU.
1371 *
1372 * If the LRU possibly contain active buffers, it is the responsibility
1373 * of the shrink callback to check for this (ie. dma_resv_test_signaled())
1374 * or if necessary block until the buffer becomes idle.
1375 *
1376 * @lru: The LRU to scan
1377 * @nr_to_scan: The number of pages to try to reclaim
1378 * @shrink: Callback to try to shrink/reclaim the object.
1379 */
1380unsigned long
1381drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
1382 bool (*shrink)(struct drm_gem_object *obj))
1383{
1384 struct drm_gem_lru still_in_lru;
1385 struct drm_gem_object *obj;
1386 unsigned freed = 0;
1387
1388 drm_gem_lru_init(&still_in_lru, lru->lock);
1389
1390 mutex_lock(lru->lock);
1391
1392 while (freed < nr_to_scan) {
1393 obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node);
1394
1395 if (!obj)
1396 break;
1397
1398 drm_gem_lru_move_tail_locked(&still_in_lru, obj);
1399
1400 /*
1401 * If it's in the process of being freed, gem_object->free()
1402 * may be blocked on lock waiting to remove it. So just
1403 * skip it.
1404 */
1405 if (!kref_get_unless_zero(&obj->refcount))
1406 continue;
1407
1408 /*
1409 * Now that we own a reference, we can drop the lock for the
1410 * rest of the loop body, to reduce contention with other
1411 * code paths that need the LRU lock
1412 */
1413 mutex_unlock(lru->lock);
1414
1415 /*
1416 * Note that this still needs to be trylock, since we can
1417 * hit shrinker in response to trying to get backing pages
1418 * for this obj (ie. while it's lock is already held)
1419 */
1420 if (!dma_resv_trylock(obj->resv))
1421 goto tail;
1422
1423 if (shrink(obj)) {
1424 freed += obj->size >> PAGE_SHIFT;
1425
1426 /*
1427 * If we succeeded in releasing the object's backing
1428 * pages, we expect the driver to have moved the object
1429 * out of this LRU
1430 */
1431 WARN_ON(obj->lru == &still_in_lru);
1432 WARN_ON(obj->lru == lru);
1433 }
1434
1435 dma_resv_unlock(obj->resv);
1436
1437tail:
1438 drm_gem_object_put(obj);
1439 mutex_lock(lru->lock);
1440 }
1441
1442 /*
1443 * Move objects we've skipped over out of the temporary still_in_lru
1444 * back into this LRU
1445 */
1446 list_for_each_entry (obj, &still_in_lru.list, lru_node)
1447 obj->lru = lru;
1448 list_splice_tail(&still_in_lru.list, &lru->list);
1449 lru->count += still_in_lru.count;
1450
1451 mutex_unlock(lru->lock);
1452
1453 return freed;
1454}
1455EXPORT_SYMBOL(drm_gem_lru_scan);