Merge branches 'acpi-bus' and 'acpi-video'
[linux-block.git] / drivers / gpu / drm / drm_gem_shmem_helper.c
CommitLineData
2194a63a
NT
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2018 Noralf Trønnes
4 */
5
6#include <linux/dma-buf.h>
7#include <linux/export.h>
4b2b5e14 8#include <linux/module.h>
2194a63a
NT
9#include <linux/mutex.h>
10#include <linux/shmem_fs.h>
11#include <linux/slab.h>
12#include <linux/vmalloc.h>
8581fd40 13#include <linux/module.h>
2194a63a 14
804b6e5e
DV
15#ifdef CONFIG_X86
16#include <asm/set_memory.h>
17#endif
18
d3ea256a 19#include <drm/drm.h>
2194a63a
NT
20#include <drm/drm_device.h>
21#include <drm/drm_drv.h>
22#include <drm/drm_gem_shmem_helper.h>
23#include <drm/drm_prime.h>
24#include <drm/drm_print.h>
25
08e438e6
SR
26MODULE_IMPORT_NS(DMA_BUF);
27
2194a63a
NT
28/**
29 * DOC: overview
30 *
31 * This library provides helpers for GEM objects backed by shmem buffers
32 * allocated using anonymous pageable memory.
a193f3b4
TZ
33 *
34 * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
35 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
36 * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
37 * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
2194a63a
NT
38 */
39
40static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
c7fbcb71
TZ
41 .free = drm_gem_shmem_object_free,
42 .print_info = drm_gem_shmem_object_print_info,
43 .pin = drm_gem_shmem_object_pin,
44 .unpin = drm_gem_shmem_object_unpin,
45 .get_sg_table = drm_gem_shmem_object_get_sg_table,
46 .vmap = drm_gem_shmem_object_vmap,
47 .vunmap = drm_gem_shmem_object_vunmap,
48 .mmap = drm_gem_shmem_object_mmap,
d315bdbf 49 .vm_ops = &drm_gem_shmem_vm_ops,
2194a63a
NT
50};
51
7d2cd72a
DV
52static struct drm_gem_shmem_object *
53__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
2194a63a
NT
54{
55 struct drm_gem_shmem_object *shmem;
56 struct drm_gem_object *obj;
7d2cd72a 57 int ret = 0;
2194a63a
NT
58
59 size = PAGE_ALIGN(size);
60
4ff22f48 61 if (dev->driver->gem_create_object) {
2194a63a 62 obj = dev->driver->gem_create_object(dev, size);
4ff22f48
TZ
63 if (IS_ERR(obj))
64 return ERR_CAST(obj);
65 shmem = to_drm_gem_shmem_obj(obj);
66 } else {
67 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
68 if (!shmem)
69 return ERR_PTR(-ENOMEM);
70 obj = &shmem->base;
71 }
0cf2ef46 72
2194a63a
NT
73 if (!obj->funcs)
74 obj->funcs = &drm_gem_shmem_funcs;
75
0cf2ef46 76 if (private) {
7d2cd72a 77 drm_gem_private_object_init(dev, obj, size);
0cf2ef46
TZ
78 shmem->map_wc = false; /* dma-buf mappings use always writecombine */
79 } else {
7d2cd72a 80 ret = drm_gem_object_init(dev, obj, size);
0cf2ef46 81 }
7df34a61
C
82 if (ret) {
83 drm_gem_private_object_fini(obj);
2194a63a 84 goto err_free;
7df34a61 85 }
2194a63a
NT
86
87 ret = drm_gem_create_mmap_offset(obj);
88 if (ret)
89 goto err_release;
90
2194a63a
NT
91 mutex_init(&shmem->pages_lock);
92 mutex_init(&shmem->vmap_lock);
17acb9f3 93 INIT_LIST_HEAD(&shmem->madv_list);
2194a63a 94
5b9f5f11
DV
95 if (!private) {
96 /*
97 * Our buffers are kept pinned, so allocating them
98 * from the MOVABLE zone is a really bad idea, and
99 * conflicts with CMA. See comments above new_inode()
100 * why this is required _and_ expected if you're
101 * going to pin these pages.
102 */
103 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
104 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
105 }
2194a63a
NT
106
107 return shmem;
108
109err_release:
110 drm_gem_object_release(obj);
111err_free:
112 kfree(obj);
113
114 return ERR_PTR(ret);
115}
7d2cd72a
DV
116/**
117 * drm_gem_shmem_create - Allocate an object with the given size
118 * @dev: DRM device
119 * @size: Size of the object to allocate
120 *
121 * This function creates a shmem GEM object.
122 *
123 * Returns:
124 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
125 * error code on failure.
126 */
127struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
128{
129 return __drm_gem_shmem_create(dev, size, false);
130}
2194a63a
NT
131EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
132
133/**
a193f3b4
TZ
134 * drm_gem_shmem_free - Free resources associated with a shmem GEM object
135 * @shmem: shmem GEM object to free
2194a63a
NT
136 *
137 * This function cleans up the GEM object state and frees the memory used to
c7fbcb71 138 * store the object itself.
2194a63a 139 */
a193f3b4 140void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
2194a63a 141{
a193f3b4 142 struct drm_gem_object *obj = &shmem->base;
2194a63a 143
e0106ac9 144 drm_WARN_ON(obj->dev, shmem->vmap_use_count);
2194a63a
NT
145
146 if (obj->import_attach) {
2194a63a 147 drm_prime_gem_destroy(obj, shmem->sgt);
2194a63a
NT
148 } else {
149 if (shmem->sgt) {
6c6fa39c
MS
150 dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
151 DMA_BIDIRECTIONAL, 0);
2194a63a
NT
152 sg_free_table(shmem->sgt);
153 kfree(shmem->sgt);
154 }
3bf5189d
RH
155 if (shmem->pages)
156 drm_gem_shmem_put_pages(shmem);
2194a63a
NT
157 }
158
e0106ac9 159 drm_WARN_ON(obj->dev, shmem->pages_use_count);
2194a63a
NT
160
161 drm_gem_object_release(obj);
162 mutex_destroy(&shmem->pages_lock);
163 mutex_destroy(&shmem->vmap_lock);
164 kfree(shmem);
165}
a193f3b4 166EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
2194a63a
NT
167
168static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
169{
170 struct drm_gem_object *obj = &shmem->base;
171 struct page **pages;
172
173 if (shmem->pages_use_count++ > 0)
174 return 0;
175
176 pages = drm_gem_get_pages(obj);
177 if (IS_ERR(pages)) {
3f6a1e22
DO
178 drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
179 PTR_ERR(pages));
2194a63a
NT
180 shmem->pages_use_count = 0;
181 return PTR_ERR(pages);
182 }
183
804b6e5e
DV
184 /*
185 * TODO: Allocating WC pages which are correctly flushed is only
186 * supported on x86. Ideal solution would be a GFP_WC flag, which also
187 * ttm_pool.c could use.
188 */
189#ifdef CONFIG_X86
190 if (shmem->map_wc)
191 set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
192#endif
193
2194a63a
NT
194 shmem->pages = pages;
195
196 return 0;
197}
198
199/*
200 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
201 * @shmem: shmem GEM object
202 *
203 * This function makes sure that backing pages exists for the shmem GEM object
204 * and increases the use count.
205 *
206 * Returns:
207 * 0 on success or a negative error code on failure.
208 */
209int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
210{
3f6a1e22 211 struct drm_gem_object *obj = &shmem->base;
2194a63a
NT
212 int ret;
213
e0106ac9 214 drm_WARN_ON(obj->dev, obj->import_attach);
52640835 215
2194a63a
NT
216 ret = mutex_lock_interruptible(&shmem->pages_lock);
217 if (ret)
218 return ret;
219 ret = drm_gem_shmem_get_pages_locked(shmem);
220 mutex_unlock(&shmem->pages_lock);
221
222 return ret;
223}
224EXPORT_SYMBOL(drm_gem_shmem_get_pages);
225
226static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
227{
228 struct drm_gem_object *obj = &shmem->base;
229
3f6a1e22 230 if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
2194a63a
NT
231 return;
232
233 if (--shmem->pages_use_count > 0)
234 return;
235
804b6e5e
DV
236#ifdef CONFIG_X86
237 if (shmem->map_wc)
238 set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
239#endif
240
2194a63a
NT
241 drm_gem_put_pages(obj, shmem->pages,
242 shmem->pages_mark_dirty_on_put,
243 shmem->pages_mark_accessed_on_put);
244 shmem->pages = NULL;
245}
246
247/*
248 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
249 * @shmem: shmem GEM object
250 *
251 * This function decreases the use count and puts the backing pages when use drops to zero.
252 */
253void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
254{
255 mutex_lock(&shmem->pages_lock);
256 drm_gem_shmem_put_pages_locked(shmem);
257 mutex_unlock(&shmem->pages_lock);
258}
259EXPORT_SYMBOL(drm_gem_shmem_put_pages);
260
261/**
262 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
a193f3b4 263 * @shmem: shmem GEM object
2194a63a
NT
264 *
265 * This function makes sure the backing pages are pinned in memory while the
c7fbcb71 266 * buffer is exported.
2194a63a
NT
267 *
268 * Returns:
269 * 0 on success or a negative error code on failure.
270 */
a193f3b4 271int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
2194a63a 272{
3f6a1e22
DO
273 struct drm_gem_object *obj = &shmem->base;
274
275 drm_WARN_ON(obj->dev, obj->import_attach);
52640835 276
2194a63a
NT
277 return drm_gem_shmem_get_pages(shmem);
278}
279EXPORT_SYMBOL(drm_gem_shmem_pin);
280
281/**
282 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
a193f3b4 283 * @shmem: shmem GEM object
2194a63a
NT
284 *
285 * This function removes the requirement that the backing pages are pinned in
c7fbcb71 286 * memory.
2194a63a 287 */
a193f3b4 288void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
2194a63a 289{
3f6a1e22
DO
290 struct drm_gem_object *obj = &shmem->base;
291
292 drm_WARN_ON(obj->dev, obj->import_attach);
52640835 293
2194a63a
NT
294 drm_gem_shmem_put_pages(shmem);
295}
296EXPORT_SYMBOL(drm_gem_shmem_unpin);
297
7938f421
LDM
298static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
299 struct iosys_map *map)
2194a63a
NT
300{
301 struct drm_gem_object *obj = &shmem->base;
6619ccf1 302 int ret = 0;
2194a63a 303
1cad6292 304 if (obj->import_attach) {
49a3f51d
TZ
305 ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
306 if (!ret) {
3f6a1e22 307 if (drm_WARN_ON(obj->dev, map->is_iomem)) {
df4aaf01 308 dma_buf_vunmap(obj->import_attach->dmabuf, map);
67fe7487 309 return -EIO;
49a3f51d 310 }
49a3f51d 311 }
1cad6292
GH
312 } else {
313 pgprot_t prot = PAGE_KERNEL;
314
67fe7487
DO
315 if (shmem->vmap_use_count++ > 0) {
316 iosys_map_set_vaddr(map, shmem->vaddr);
317 return 0;
318 }
319
0cc5fb4e
DV
320 ret = drm_gem_shmem_get_pages(shmem);
321 if (ret)
322 goto err_zero_use;
323
0cf2ef46 324 if (shmem->map_wc)
1cad6292 325 prot = pgprot_writecombine(prot);
be7d9f05 326 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
1cad6292 327 VM_MAP, prot);
6619ccf1
TZ
328 if (!shmem->vaddr)
329 ret = -ENOMEM;
49a3f51d 330 else
7938f421 331 iosys_map_set_vaddr(map, shmem->vaddr);
1cad6292 332 }
2194a63a 333
6619ccf1 334 if (ret) {
3f6a1e22 335 drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
2194a63a
NT
336 goto err_put_pages;
337 }
338
49a3f51d 339 return 0;
2194a63a
NT
340
341err_put_pages:
0cc5fb4e
DV
342 if (!obj->import_attach)
343 drm_gem_shmem_put_pages(shmem);
2194a63a
NT
344err_zero_use:
345 shmem->vmap_use_count = 0;
346
49a3f51d 347 return ret;
2194a63a
NT
348}
349
350/*
351 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
352 * @shmem: shmem GEM object
49a3f51d
TZ
353 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
354 * store.
2194a63a 355 *
0b638559 356 * This function makes sure that a contiguous kernel virtual address mapping
c7fbcb71
TZ
357 * exists for the buffer backing the shmem GEM object. It hides the differences
358 * between dma-buf imported and natively allocated objects.
0b638559
DV
359 *
360 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
2194a63a
NT
361 *
362 * Returns:
363 * 0 on success or a negative error code on failure.
364 */
7938f421
LDM
365int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
366 struct iosys_map *map)
2194a63a 367{
2194a63a
NT
368 int ret;
369
370 ret = mutex_lock_interruptible(&shmem->vmap_lock);
371 if (ret)
49a3f51d
TZ
372 return ret;
373 ret = drm_gem_shmem_vmap_locked(shmem, map);
2194a63a
NT
374 mutex_unlock(&shmem->vmap_lock);
375
49a3f51d 376 return ret;
2194a63a
NT
377}
378EXPORT_SYMBOL(drm_gem_shmem_vmap);
379
49a3f51d 380static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
7938f421 381 struct iosys_map *map)
2194a63a
NT
382{
383 struct drm_gem_object *obj = &shmem->base;
384
64e194e2 385 if (obj->import_attach) {
49a3f51d 386 dma_buf_vunmap(obj->import_attach->dmabuf, map);
64e194e2 387 } else {
67fe7487
DO
388 if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
389 return;
390
391 if (--shmem->vmap_use_count > 0)
392 return;
393
2194a63a 394 vunmap(shmem->vaddr);
64e194e2
NT
395 drm_gem_shmem_put_pages(shmem);
396 }
2194a63a
NT
397
398 shmem->vaddr = NULL;
2194a63a
NT
399}
400
401/*
0ae865ef 402 * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
2194a63a 403 * @shmem: shmem GEM object
49a3f51d 404 * @map: Kernel virtual address where the SHMEM GEM object was mapped
2194a63a 405 *
0b638559
DV
406 * This function cleans up a kernel virtual address mapping acquired by
407 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
408 * zero.
409 *
c7fbcb71
TZ
410 * This function hides the differences between dma-buf imported and natively
411 * allocated objects.
2194a63a 412 */
7938f421
LDM
413void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
414 struct iosys_map *map)
2194a63a 415{
2194a63a 416 mutex_lock(&shmem->vmap_lock);
49a3f51d 417 drm_gem_shmem_vunmap_locked(shmem, map);
2194a63a
NT
418 mutex_unlock(&shmem->vmap_lock);
419}
420EXPORT_SYMBOL(drm_gem_shmem_vunmap);
421
3ad8173b 422static int
2194a63a
NT
423drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
424 struct drm_device *dev, size_t size,
425 uint32_t *handle)
426{
427 struct drm_gem_shmem_object *shmem;
428 int ret;
429
cfe28f90 430 shmem = drm_gem_shmem_create(dev, size);
2194a63a 431 if (IS_ERR(shmem))
3ad8173b 432 return PTR_ERR(shmem);
2194a63a
NT
433
434 /*
435 * Allocate an id of idr table where the obj is registered
436 * and handle has the id what user can see.
437 */
438 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
439 /* drop reference from allocate - handle holds it now. */
be6ee102 440 drm_gem_object_put(&shmem->base);
2194a63a 441
3ad8173b 442 return ret;
2194a63a 443}
2194a63a 444
17acb9f3
RH
445/* Update madvise status, returns true if not purged, else
446 * false or -errno.
447 */
a193f3b4 448int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
17acb9f3 449{
17acb9f3
RH
450 mutex_lock(&shmem->pages_lock);
451
452 if (shmem->madv >= 0)
453 shmem->madv = madv;
454
455 madv = shmem->madv;
456
457 mutex_unlock(&shmem->pages_lock);
458
459 return (madv >= 0);
460}
461EXPORT_SYMBOL(drm_gem_shmem_madvise);
462
a193f3b4 463void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
17acb9f3 464{
a193f3b4 465 struct drm_gem_object *obj = &shmem->base;
17acb9f3 466 struct drm_device *dev = obj->dev;
17acb9f3 467
3f6a1e22 468 drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
17acb9f3 469
a193f3b4 470 dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
4fa3d66f
RH
471 sg_free_table(shmem->sgt);
472 kfree(shmem->sgt);
473 shmem->sgt = NULL;
474
17acb9f3
RH
475 drm_gem_shmem_put_pages_locked(shmem);
476
477 shmem->madv = -1;
478
479 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
480 drm_gem_free_mmap_offset(obj);
481
482 /* Our goal here is to return as much of the memory as
483 * is possible back to the system as we are called from OOM.
484 * To do this we must instruct the shmfs to drop all of its
485 * backing pages, *now*.
486 */
487 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
488
a193f3b4 489 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
17acb9f3
RH
490}
491EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
492
a193f3b4 493bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
17acb9f3 494{
dfbc7a46
RH
495 if (!mutex_trylock(&shmem->pages_lock))
496 return false;
a193f3b4 497 drm_gem_shmem_purge_locked(shmem);
17acb9f3 498 mutex_unlock(&shmem->pages_lock);
dfbc7a46
RH
499
500 return true;
17acb9f3
RH
501}
502EXPORT_SYMBOL(drm_gem_shmem_purge);
503
2194a63a
NT
504/**
505 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
506 * @file: DRM file structure to create the dumb buffer for
507 * @dev: DRM device
508 * @args: IOCTL data
509 *
510 * This function computes the pitch of the dumb buffer and rounds it up to an
511 * integer number of bytes per pixel. Drivers for hardware that doesn't have
512 * any additional restrictions on the pitch can directly use this function as
513 * their &drm_driver.dumb_create callback.
514 *
515 * For hardware with additional restrictions, drivers can adjust the fields
516 * set up by userspace before calling into this function.
517 *
518 * Returns:
519 * 0 on success or a negative error code on failure.
520 */
521int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
522 struct drm_mode_create_dumb *args)
523{
524 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
2194a63a
NT
525
526 if (!args->pitch || !args->size) {
527 args->pitch = min_pitch;
35d28365 528 args->size = PAGE_ALIGN(args->pitch * args->height);
2194a63a
NT
529 } else {
530 /* ensure sane minimum values */
531 if (args->pitch < min_pitch)
532 args->pitch = min_pitch;
533 if (args->size < args->pitch * args->height)
35d28365 534 args->size = PAGE_ALIGN(args->pitch * args->height);
2194a63a
NT
535 }
536
3ad8173b 537 return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
2194a63a
NT
538}
539EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
540
541static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
542{
543 struct vm_area_struct *vma = vmf->vma;
544 struct drm_gem_object *obj = vma->vm_private_data;
545 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
546 loff_t num_pages = obj->size >> PAGE_SHIFT;
d611b4a0 547 vm_fault_t ret;
2194a63a 548 struct page *page;
11d5a474
NR
549 pgoff_t page_offset;
550
551 /* We don't use vmf->pgoff since that has the fake offset */
552 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
2194a63a 553
d611b4a0
NR
554 mutex_lock(&shmem->pages_lock);
555
11d5a474 556 if (page_offset >= num_pages ||
3f6a1e22 557 drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
d611b4a0
NR
558 shmem->madv < 0) {
559 ret = VM_FAULT_SIGBUS;
560 } else {
11d5a474 561 page = shmem->pages[page_offset];
2194a63a 562
8b93d1d7 563 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
d611b4a0 564 }
2194a63a 565
d611b4a0
NR
566 mutex_unlock(&shmem->pages_lock);
567
568 return ret;
2194a63a
NT
569}
570
571static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
572{
573 struct drm_gem_object *obj = vma->vm_private_data;
574 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
2194a63a 575
3f6a1e22 576 drm_WARN_ON(obj->dev, obj->import_attach);
52640835 577
09bf649a
RC
578 mutex_lock(&shmem->pages_lock);
579
580 /*
581 * We should have already pinned the pages when the buffer was first
582 * mmap'd, vm_open() just grabs an additional reference for the new
583 * mm the vma is getting copied into (ie. on fork()).
584 */
3f6a1e22 585 if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
09bf649a
RC
586 shmem->pages_use_count++;
587
588 mutex_unlock(&shmem->pages_lock);
2194a63a
NT
589
590 drm_gem_vm_open(vma);
591}
592
593static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
594{
595 struct drm_gem_object *obj = vma->vm_private_data;
596 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
597
598 drm_gem_shmem_put_pages(shmem);
599 drm_gem_vm_close(vma);
600}
601
d315bdbf 602const struct vm_operations_struct drm_gem_shmem_vm_ops = {
2194a63a
NT
603 .fault = drm_gem_shmem_fault,
604 .open = drm_gem_shmem_vm_open,
605 .close = drm_gem_shmem_vm_close,
606};
d315bdbf 607EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
2194a63a
NT
608
609/**
610 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
a193f3b4 611 * @shmem: shmem GEM object
2194a63a
NT
612 * @vma: VMA for the area to be mapped
613 *
614 * This function implements an augmented version of the GEM DRM file mmap
c7fbcb71 615 * operation for shmem objects.
2194a63a
NT
616 *
617 * Returns:
618 * 0 on success or a negative error code on failure.
619 */
a193f3b4 620int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
2194a63a 621{
a193f3b4 622 struct drm_gem_object *obj = &shmem->base;
2194a63a
NT
623 int ret;
624
f49a51bf 625 if (obj->import_attach) {
07dd476f
BB
626 /* Reset both vm_ops and vm_private_data, so we don't end up with
627 * vm_ops pointing to our implementation if the dma-buf backend
628 * doesn't set those fields.
629 */
f49a51bf 630 vma->vm_private_data = NULL;
07dd476f
BB
631 vma->vm_ops = NULL;
632
ee9adb7a
DO
633 ret = dma_buf_mmap(obj->dma_buf, vma, 0);
634
635 /* Drop the reference drm_gem_mmap_obj() acquired.*/
636 if (!ret)
637 drm_gem_object_put(obj);
f49a51bf 638
ee9adb7a 639 return ret;
f49a51bf 640 }
26d3ac3c 641
2194a63a 642 ret = drm_gem_shmem_get_pages(shmem);
24013314 643 if (ret)
2194a63a 644 return ret;
2194a63a 645
1c71222e 646 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1cad6292 647 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
0cf2ef46 648 if (shmem->map_wc)
1cad6292 649 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
2194a63a 650
2194a63a
NT
651 return 0;
652}
653EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
654
655/**
656 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
a193f3b4 657 * @shmem: shmem GEM object
2194a63a
NT
658 * @p: DRM printer
659 * @indent: Tab indentation level
2194a63a 660 */
a193f3b4
TZ
661void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
662 struct drm_printer *p, unsigned int indent)
2194a63a 663{
67fe7487
DO
664 if (shmem->base.import_attach)
665 return;
666
2194a63a
NT
667 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
668 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
669 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
670}
671EXPORT_SYMBOL(drm_gem_shmem_print_info);
672
673/**
674 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
675 * pages for a shmem GEM object
a193f3b4 676 * @shmem: shmem GEM object
2194a63a
NT
677 *
678 * This function exports a scatter/gather table suitable for PRIME usage by
c7fbcb71 679 * calling the standard DMA mapping API.
0b638559
DV
680 *
681 * Drivers who need to acquire an scatter/gather table for objects need to call
682 * drm_gem_shmem_get_pages_sgt() instead.
2194a63a
NT
683 *
684 * Returns:
2b8428a1 685 * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
2194a63a 686 */
a193f3b4 687struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
2194a63a 688{
a193f3b4 689 struct drm_gem_object *obj = &shmem->base;
2194a63a 690
3f6a1e22 691 drm_WARN_ON(obj->dev, obj->import_attach);
52640835 692
707d561f 693 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
2194a63a
NT
694}
695EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
696
ddddedaa 697static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
2194a63a 698{
a193f3b4 699 struct drm_gem_object *obj = &shmem->base;
2194a63a 700 int ret;
2194a63a
NT
701 struct sg_table *sgt;
702
703 if (shmem->sgt)
704 return shmem->sgt;
705
3f6a1e22 706 drm_WARN_ON(obj->dev, obj->import_attach);
2194a63a 707
ddddedaa 708 ret = drm_gem_shmem_get_pages_locked(shmem);
2194a63a
NT
709 if (ret)
710 return ERR_PTR(ret);
711
a193f3b4 712 sgt = drm_gem_shmem_get_sg_table(shmem);
2194a63a
NT
713 if (IS_ERR(sgt)) {
714 ret = PTR_ERR(sgt);
715 goto err_put_pages;
716 }
717 /* Map the pages for use by the h/w. */
6c6fa39c
MS
718 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
719 if (ret)
720 goto err_free_sgt;
2194a63a
NT
721
722 shmem->sgt = sgt;
723
724 return sgt;
725
6c6fa39c
MS
726err_free_sgt:
727 sg_free_table(sgt);
728 kfree(sgt);
2194a63a 729err_put_pages:
ddddedaa 730 drm_gem_shmem_put_pages_locked(shmem);
2194a63a
NT
731 return ERR_PTR(ret);
732}
ddddedaa
AL
733
734/**
735 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
736 * scatter/gather table for a shmem GEM object.
737 * @shmem: shmem GEM object
738 *
739 * This function returns a scatter/gather table suitable for driver usage. If
740 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
741 * table created.
742 *
743 * This is the main function for drivers to get at backing storage, and it hides
744 * and difference between dma-buf imported and natively allocated objects.
745 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
746 *
747 * Returns:
748 * A pointer to the scatter/gather table of pinned pages or errno on failure.
749 */
750struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
751{
752 int ret;
753 struct sg_table *sgt;
754
755 ret = mutex_lock_interruptible(&shmem->pages_lock);
756 if (ret)
757 return ERR_PTR(ret);
758 sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
759 mutex_unlock(&shmem->pages_lock);
760
761 return sgt;
762}
047a7545 763EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
2194a63a
NT
764
765/**
766 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
767 * another driver's scatter/gather table of pinned pages
768 * @dev: Device to import into
769 * @attach: DMA-BUF attachment
770 * @sgt: Scatter/gather table of pinned pages
771 *
772 * This function imports a scatter/gather table exported via DMA-BUF by
773 * another driver. Drivers that use the shmem helpers should set this as their
774 * &drm_driver.gem_prime_import_sg_table callback.
775 *
776 * Returns:
777 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
778 * error code on failure.
779 */
780struct drm_gem_object *
781drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
782 struct dma_buf_attachment *attach,
783 struct sg_table *sgt)
784{
785 size_t size = PAGE_ALIGN(attach->dmabuf->size);
2194a63a 786 struct drm_gem_shmem_object *shmem;
2194a63a 787
cfe28f90 788 shmem = __drm_gem_shmem_create(dev, size, true);
2194a63a
NT
789 if (IS_ERR(shmem))
790 return ERR_CAST(shmem);
791
2194a63a 792 shmem->sgt = sgt;
2194a63a 793
46652809 794 drm_dbg_prime(dev, "size = %zu\n", size);
2194a63a
NT
795
796 return &shmem->base;
2194a63a
NT
797}
798EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
4b2b5e14
TZ
799
800MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
67505311 801MODULE_IMPORT_NS(DMA_BUF);
4b2b5e14 802MODULE_LICENSE("GPL v2");