dt-bindings: display: Add ingenic,jz4780-dw-hdmi DT Schema
[linux-2.6-block.git] / drivers / gpu / drm / drm_gem_shmem_helper.c
CommitLineData
2194a63a
NT
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2018 Noralf Trønnes
4 */
5
6#include <linux/dma-buf.h>
7#include <linux/export.h>
4b2b5e14 8#include <linux/module.h>
2194a63a
NT
9#include <linux/mutex.h>
10#include <linux/shmem_fs.h>
11#include <linux/slab.h>
12#include <linux/vmalloc.h>
8581fd40 13#include <linux/module.h>
2194a63a 14
804b6e5e
DV
15#ifdef CONFIG_X86
16#include <asm/set_memory.h>
17#endif
18
d3ea256a 19#include <drm/drm.h>
2194a63a
NT
20#include <drm/drm_device.h>
21#include <drm/drm_drv.h>
22#include <drm/drm_gem_shmem_helper.h>
23#include <drm/drm_prime.h>
24#include <drm/drm_print.h>
25
08e438e6
SR
26MODULE_IMPORT_NS(DMA_BUF);
27
2194a63a
NT
28/**
29 * DOC: overview
30 *
31 * This library provides helpers for GEM objects backed by shmem buffers
32 * allocated using anonymous pageable memory.
a193f3b4
TZ
33 *
34 * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
35 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
36 * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
37 * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
2194a63a
NT
38 */
39
40static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
c7fbcb71
TZ
41 .free = drm_gem_shmem_object_free,
42 .print_info = drm_gem_shmem_object_print_info,
43 .pin = drm_gem_shmem_object_pin,
44 .unpin = drm_gem_shmem_object_unpin,
45 .get_sg_table = drm_gem_shmem_object_get_sg_table,
46 .vmap = drm_gem_shmem_object_vmap,
47 .vunmap = drm_gem_shmem_object_vunmap,
48 .mmap = drm_gem_shmem_object_mmap,
2194a63a
NT
49};
50
7d2cd72a
DV
51static struct drm_gem_shmem_object *
52__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
2194a63a
NT
53{
54 struct drm_gem_shmem_object *shmem;
55 struct drm_gem_object *obj;
7d2cd72a 56 int ret = 0;
2194a63a
NT
57
58 size = PAGE_ALIGN(size);
59
4ff22f48 60 if (dev->driver->gem_create_object) {
2194a63a 61 obj = dev->driver->gem_create_object(dev, size);
4ff22f48
TZ
62 if (IS_ERR(obj))
63 return ERR_CAST(obj);
64 shmem = to_drm_gem_shmem_obj(obj);
65 } else {
66 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
67 if (!shmem)
68 return ERR_PTR(-ENOMEM);
69 obj = &shmem->base;
70 }
0cf2ef46 71
2194a63a
NT
72 if (!obj->funcs)
73 obj->funcs = &drm_gem_shmem_funcs;
74
0cf2ef46 75 if (private) {
7d2cd72a 76 drm_gem_private_object_init(dev, obj, size);
0cf2ef46
TZ
77 shmem->map_wc = false; /* dma-buf mappings use always writecombine */
78 } else {
7d2cd72a 79 ret = drm_gem_object_init(dev, obj, size);
0cf2ef46 80 }
2194a63a
NT
81 if (ret)
82 goto err_free;
83
84 ret = drm_gem_create_mmap_offset(obj);
85 if (ret)
86 goto err_release;
87
2194a63a
NT
88 mutex_init(&shmem->pages_lock);
89 mutex_init(&shmem->vmap_lock);
17acb9f3 90 INIT_LIST_HEAD(&shmem->madv_list);
2194a63a 91
5b9f5f11
DV
92 if (!private) {
93 /*
94 * Our buffers are kept pinned, so allocating them
95 * from the MOVABLE zone is a really bad idea, and
96 * conflicts with CMA. See comments above new_inode()
97 * why this is required _and_ expected if you're
98 * going to pin these pages.
99 */
100 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
101 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
102 }
2194a63a
NT
103
104 return shmem;
105
106err_release:
107 drm_gem_object_release(obj);
108err_free:
109 kfree(obj);
110
111 return ERR_PTR(ret);
112}
7d2cd72a
DV
113/**
114 * drm_gem_shmem_create - Allocate an object with the given size
115 * @dev: DRM device
116 * @size: Size of the object to allocate
117 *
118 * This function creates a shmem GEM object.
119 *
120 * Returns:
121 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
122 * error code on failure.
123 */
124struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
125{
126 return __drm_gem_shmem_create(dev, size, false);
127}
2194a63a
NT
128EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
129
130/**
a193f3b4
TZ
131 * drm_gem_shmem_free - Free resources associated with a shmem GEM object
132 * @shmem: shmem GEM object to free
2194a63a
NT
133 *
134 * This function cleans up the GEM object state and frees the memory used to
c7fbcb71 135 * store the object itself.
2194a63a 136 */
a193f3b4 137void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
2194a63a 138{
a193f3b4 139 struct drm_gem_object *obj = &shmem->base;
2194a63a
NT
140
141 WARN_ON(shmem->vmap_use_count);
142
143 if (obj->import_attach) {
2194a63a 144 drm_prime_gem_destroy(obj, shmem->sgt);
2194a63a
NT
145 } else {
146 if (shmem->sgt) {
6c6fa39c
MS
147 dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
148 DMA_BIDIRECTIONAL, 0);
2194a63a
NT
149 sg_free_table(shmem->sgt);
150 kfree(shmem->sgt);
151 }
3bf5189d
RH
152 if (shmem->pages)
153 drm_gem_shmem_put_pages(shmem);
2194a63a
NT
154 }
155
156 WARN_ON(shmem->pages_use_count);
157
158 drm_gem_object_release(obj);
159 mutex_destroy(&shmem->pages_lock);
160 mutex_destroy(&shmem->vmap_lock);
161 kfree(shmem);
162}
a193f3b4 163EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
2194a63a
NT
164
165static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
166{
167 struct drm_gem_object *obj = &shmem->base;
168 struct page **pages;
169
170 if (shmem->pages_use_count++ > 0)
171 return 0;
172
173 pages = drm_gem_get_pages(obj);
174 if (IS_ERR(pages)) {
175 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
176 shmem->pages_use_count = 0;
177 return PTR_ERR(pages);
178 }
179
804b6e5e
DV
180 /*
181 * TODO: Allocating WC pages which are correctly flushed is only
182 * supported on x86. Ideal solution would be a GFP_WC flag, which also
183 * ttm_pool.c could use.
184 */
185#ifdef CONFIG_X86
186 if (shmem->map_wc)
187 set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
188#endif
189
2194a63a
NT
190 shmem->pages = pages;
191
192 return 0;
193}
194
195/*
196 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
197 * @shmem: shmem GEM object
198 *
199 * This function makes sure that backing pages exists for the shmem GEM object
200 * and increases the use count.
201 *
202 * Returns:
203 * 0 on success or a negative error code on failure.
204 */
205int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
206{
207 int ret;
208
52640835
DV
209 WARN_ON(shmem->base.import_attach);
210
2194a63a
NT
211 ret = mutex_lock_interruptible(&shmem->pages_lock);
212 if (ret)
213 return ret;
214 ret = drm_gem_shmem_get_pages_locked(shmem);
215 mutex_unlock(&shmem->pages_lock);
216
217 return ret;
218}
219EXPORT_SYMBOL(drm_gem_shmem_get_pages);
220
221static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
222{
223 struct drm_gem_object *obj = &shmem->base;
224
225 if (WARN_ON_ONCE(!shmem->pages_use_count))
226 return;
227
228 if (--shmem->pages_use_count > 0)
229 return;
230
804b6e5e
DV
231#ifdef CONFIG_X86
232 if (shmem->map_wc)
233 set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
234#endif
235
2194a63a
NT
236 drm_gem_put_pages(obj, shmem->pages,
237 shmem->pages_mark_dirty_on_put,
238 shmem->pages_mark_accessed_on_put);
239 shmem->pages = NULL;
240}
241
242/*
243 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
244 * @shmem: shmem GEM object
245 *
246 * This function decreases the use count and puts the backing pages when use drops to zero.
247 */
248void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
249{
250 mutex_lock(&shmem->pages_lock);
251 drm_gem_shmem_put_pages_locked(shmem);
252 mutex_unlock(&shmem->pages_lock);
253}
254EXPORT_SYMBOL(drm_gem_shmem_put_pages);
255
256/**
257 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
a193f3b4 258 * @shmem: shmem GEM object
2194a63a
NT
259 *
260 * This function makes sure the backing pages are pinned in memory while the
c7fbcb71 261 * buffer is exported.
2194a63a
NT
262 *
263 * Returns:
264 * 0 on success or a negative error code on failure.
265 */
a193f3b4 266int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
2194a63a 267{
52640835
DV
268 WARN_ON(shmem->base.import_attach);
269
2194a63a
NT
270 return drm_gem_shmem_get_pages(shmem);
271}
272EXPORT_SYMBOL(drm_gem_shmem_pin);
273
274/**
275 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
a193f3b4 276 * @shmem: shmem GEM object
2194a63a
NT
277 *
278 * This function removes the requirement that the backing pages are pinned in
c7fbcb71 279 * memory.
2194a63a 280 */
a193f3b4 281void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
2194a63a 282{
52640835
DV
283 WARN_ON(shmem->base.import_attach);
284
2194a63a
NT
285 drm_gem_shmem_put_pages(shmem);
286}
287EXPORT_SYMBOL(drm_gem_shmem_unpin);
288
49a3f51d 289static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
2194a63a
NT
290{
291 struct drm_gem_object *obj = &shmem->base;
6619ccf1 292 int ret = 0;
2194a63a 293
49a3f51d
TZ
294 if (shmem->vmap_use_count++ > 0) {
295 dma_buf_map_set_vaddr(map, shmem->vaddr);
296 return 0;
297 }
2194a63a 298
1cad6292 299 if (obj->import_attach) {
49a3f51d
TZ
300 ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
301 if (!ret) {
302 if (WARN_ON(map->is_iomem)) {
303 ret = -EIO;
304 goto err_put_pages;
305 }
306 shmem->vaddr = map->vaddr;
307 }
1cad6292
GH
308 } else {
309 pgprot_t prot = PAGE_KERNEL;
310
0cc5fb4e
DV
311 ret = drm_gem_shmem_get_pages(shmem);
312 if (ret)
313 goto err_zero_use;
314
0cf2ef46 315 if (shmem->map_wc)
1cad6292 316 prot = pgprot_writecombine(prot);
be7d9f05 317 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
1cad6292 318 VM_MAP, prot);
6619ccf1
TZ
319 if (!shmem->vaddr)
320 ret = -ENOMEM;
49a3f51d
TZ
321 else
322 dma_buf_map_set_vaddr(map, shmem->vaddr);
1cad6292 323 }
2194a63a 324
6619ccf1
TZ
325 if (ret) {
326 DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
2194a63a
NT
327 goto err_put_pages;
328 }
329
49a3f51d 330 return 0;
2194a63a
NT
331
332err_put_pages:
0cc5fb4e
DV
333 if (!obj->import_attach)
334 drm_gem_shmem_put_pages(shmem);
2194a63a
NT
335err_zero_use:
336 shmem->vmap_use_count = 0;
337
49a3f51d 338 return ret;
2194a63a
NT
339}
340
341/*
342 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
343 * @shmem: shmem GEM object
49a3f51d
TZ
344 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
345 * store.
2194a63a 346 *
0b638559 347 * This function makes sure that a contiguous kernel virtual address mapping
c7fbcb71
TZ
348 * exists for the buffer backing the shmem GEM object. It hides the differences
349 * between dma-buf imported and natively allocated objects.
0b638559
DV
350 *
351 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
2194a63a
NT
352 *
353 * Returns:
354 * 0 on success or a negative error code on failure.
355 */
a193f3b4 356int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
2194a63a 357{
2194a63a
NT
358 int ret;
359
360 ret = mutex_lock_interruptible(&shmem->vmap_lock);
361 if (ret)
49a3f51d
TZ
362 return ret;
363 ret = drm_gem_shmem_vmap_locked(shmem, map);
2194a63a
NT
364 mutex_unlock(&shmem->vmap_lock);
365
49a3f51d 366 return ret;
2194a63a
NT
367}
368EXPORT_SYMBOL(drm_gem_shmem_vmap);
369
49a3f51d
TZ
370static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
371 struct dma_buf_map *map)
2194a63a
NT
372{
373 struct drm_gem_object *obj = &shmem->base;
374
375 if (WARN_ON_ONCE(!shmem->vmap_use_count))
376 return;
377
378 if (--shmem->vmap_use_count > 0)
379 return;
380
64e194e2 381 if (obj->import_attach) {
49a3f51d 382 dma_buf_vunmap(obj->import_attach->dmabuf, map);
64e194e2 383 } else {
2194a63a 384 vunmap(shmem->vaddr);
64e194e2
NT
385 drm_gem_shmem_put_pages(shmem);
386 }
2194a63a
NT
387
388 shmem->vaddr = NULL;
2194a63a
NT
389}
390
391/*
0ae865ef 392 * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
2194a63a 393 * @shmem: shmem GEM object
49a3f51d 394 * @map: Kernel virtual address where the SHMEM GEM object was mapped
2194a63a 395 *
0b638559
DV
396 * This function cleans up a kernel virtual address mapping acquired by
397 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
398 * zero.
399 *
c7fbcb71
TZ
400 * This function hides the differences between dma-buf imported and natively
401 * allocated objects.
2194a63a 402 */
a193f3b4 403void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
2194a63a 404{
2194a63a 405 mutex_lock(&shmem->vmap_lock);
49a3f51d 406 drm_gem_shmem_vunmap_locked(shmem, map);
2194a63a
NT
407 mutex_unlock(&shmem->vmap_lock);
408}
409EXPORT_SYMBOL(drm_gem_shmem_vunmap);
410
5a363c20 411static struct drm_gem_shmem_object *
2194a63a
NT
412drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
413 struct drm_device *dev, size_t size,
414 uint32_t *handle)
415{
416 struct drm_gem_shmem_object *shmem;
417 int ret;
418
cfe28f90 419 shmem = drm_gem_shmem_create(dev, size);
2194a63a
NT
420 if (IS_ERR(shmem))
421 return shmem;
422
423 /*
424 * Allocate an id of idr table where the obj is registered
425 * and handle has the id what user can see.
426 */
427 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
428 /* drop reference from allocate - handle holds it now. */
be6ee102 429 drm_gem_object_put(&shmem->base);
2194a63a
NT
430 if (ret)
431 return ERR_PTR(ret);
432
433 return shmem;
434}
2194a63a 435
17acb9f3
RH
436/* Update madvise status, returns true if not purged, else
437 * false or -errno.
438 */
a193f3b4 439int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
17acb9f3 440{
17acb9f3
RH
441 mutex_lock(&shmem->pages_lock);
442
443 if (shmem->madv >= 0)
444 shmem->madv = madv;
445
446 madv = shmem->madv;
447
448 mutex_unlock(&shmem->pages_lock);
449
450 return (madv >= 0);
451}
452EXPORT_SYMBOL(drm_gem_shmem_madvise);
453
a193f3b4 454void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
17acb9f3 455{
a193f3b4 456 struct drm_gem_object *obj = &shmem->base;
17acb9f3 457 struct drm_device *dev = obj->dev;
17acb9f3
RH
458
459 WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
460
a193f3b4 461 dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
4fa3d66f
RH
462 sg_free_table(shmem->sgt);
463 kfree(shmem->sgt);
464 shmem->sgt = NULL;
465
17acb9f3
RH
466 drm_gem_shmem_put_pages_locked(shmem);
467
468 shmem->madv = -1;
469
470 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
471 drm_gem_free_mmap_offset(obj);
472
473 /* Our goal here is to return as much of the memory as
474 * is possible back to the system as we are called from OOM.
475 * To do this we must instruct the shmfs to drop all of its
476 * backing pages, *now*.
477 */
478 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
479
a193f3b4 480 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
17acb9f3
RH
481}
482EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
483
a193f3b4 484bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
17acb9f3 485{
dfbc7a46
RH
486 if (!mutex_trylock(&shmem->pages_lock))
487 return false;
a193f3b4 488 drm_gem_shmem_purge_locked(shmem);
17acb9f3 489 mutex_unlock(&shmem->pages_lock);
dfbc7a46
RH
490
491 return true;
17acb9f3
RH
492}
493EXPORT_SYMBOL(drm_gem_shmem_purge);
494
2194a63a
NT
495/**
496 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
497 * @file: DRM file structure to create the dumb buffer for
498 * @dev: DRM device
499 * @args: IOCTL data
500 *
501 * This function computes the pitch of the dumb buffer and rounds it up to an
502 * integer number of bytes per pixel. Drivers for hardware that doesn't have
503 * any additional restrictions on the pitch can directly use this function as
504 * their &drm_driver.dumb_create callback.
505 *
506 * For hardware with additional restrictions, drivers can adjust the fields
507 * set up by userspace before calling into this function.
508 *
509 * Returns:
510 * 0 on success or a negative error code on failure.
511 */
512int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
513 struct drm_mode_create_dumb *args)
514{
515 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
516 struct drm_gem_shmem_object *shmem;
517
518 if (!args->pitch || !args->size) {
519 args->pitch = min_pitch;
35d28365 520 args->size = PAGE_ALIGN(args->pitch * args->height);
2194a63a
NT
521 } else {
522 /* ensure sane minimum values */
523 if (args->pitch < min_pitch)
524 args->pitch = min_pitch;
525 if (args->size < args->pitch * args->height)
35d28365 526 args->size = PAGE_ALIGN(args->pitch * args->height);
2194a63a
NT
527 }
528
529 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
530
531 return PTR_ERR_OR_ZERO(shmem);
532}
533EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
534
535static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
536{
537 struct vm_area_struct *vma = vmf->vma;
538 struct drm_gem_object *obj = vma->vm_private_data;
539 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
540 loff_t num_pages = obj->size >> PAGE_SHIFT;
d611b4a0 541 vm_fault_t ret;
2194a63a 542 struct page *page;
11d5a474
NR
543 pgoff_t page_offset;
544
545 /* We don't use vmf->pgoff since that has the fake offset */
546 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
2194a63a 547
d611b4a0
NR
548 mutex_lock(&shmem->pages_lock);
549
11d5a474 550 if (page_offset >= num_pages ||
d611b4a0
NR
551 WARN_ON_ONCE(!shmem->pages) ||
552 shmem->madv < 0) {
553 ret = VM_FAULT_SIGBUS;
554 } else {
11d5a474 555 page = shmem->pages[page_offset];
2194a63a 556
8b93d1d7 557 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
d611b4a0 558 }
2194a63a 559
d611b4a0
NR
560 mutex_unlock(&shmem->pages_lock);
561
562 return ret;
2194a63a
NT
563}
564
565static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
566{
567 struct drm_gem_object *obj = vma->vm_private_data;
568 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
569 int ret;
570
52640835
DV
571 WARN_ON(shmem->base.import_attach);
572
2194a63a
NT
573 ret = drm_gem_shmem_get_pages(shmem);
574 WARN_ON_ONCE(ret != 0);
575
576 drm_gem_vm_open(vma);
577}
578
579static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
580{
581 struct drm_gem_object *obj = vma->vm_private_data;
582 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
583
584 drm_gem_shmem_put_pages(shmem);
585 drm_gem_vm_close(vma);
586}
587
0be89589 588static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
2194a63a
NT
589 .fault = drm_gem_shmem_fault,
590 .open = drm_gem_shmem_vm_open,
591 .close = drm_gem_shmem_vm_close,
592};
2194a63a
NT
593
594/**
595 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
a193f3b4 596 * @shmem: shmem GEM object
2194a63a
NT
597 * @vma: VMA for the area to be mapped
598 *
599 * This function implements an augmented version of the GEM DRM file mmap
c7fbcb71 600 * operation for shmem objects.
2194a63a
NT
601 *
602 * Returns:
603 * 0 on success or a negative error code on failure.
604 */
a193f3b4 605int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
2194a63a 606{
a193f3b4 607 struct drm_gem_object *obj = &shmem->base;
2194a63a
NT
608 int ret;
609
f49a51bf
DV
610 if (obj->import_attach) {
611 /* Drop the reference drm_gem_mmap_obj() acquired.*/
612 drm_gem_object_put(obj);
613 vma->vm_private_data = NULL;
614
26d3ac3c 615 return dma_buf_mmap(obj->dma_buf, vma, 0);
f49a51bf 616 }
26d3ac3c 617
2194a63a
NT
618 ret = drm_gem_shmem_get_pages(shmem);
619 if (ret) {
620 drm_gem_vm_close(vma);
621 return ret;
622 }
623
8b93d1d7 624 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND;
1cad6292 625 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
0cf2ef46 626 if (shmem->map_wc)
1cad6292 627 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
0be89589 628 vma->vm_ops = &drm_gem_shmem_vm_ops;
2194a63a 629
2194a63a
NT
630 return 0;
631}
632EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
633
634/**
635 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
a193f3b4 636 * @shmem: shmem GEM object
2194a63a
NT
637 * @p: DRM printer
638 * @indent: Tab indentation level
2194a63a 639 */
a193f3b4
TZ
640void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
641 struct drm_printer *p, unsigned int indent)
2194a63a 642{
2194a63a
NT
643 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
644 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
645 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
646}
647EXPORT_SYMBOL(drm_gem_shmem_print_info);
648
649/**
650 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
651 * pages for a shmem GEM object
a193f3b4 652 * @shmem: shmem GEM object
2194a63a
NT
653 *
654 * This function exports a scatter/gather table suitable for PRIME usage by
c7fbcb71 655 * calling the standard DMA mapping API.
0b638559
DV
656 *
657 * Drivers who need to acquire an scatter/gather table for objects need to call
658 * drm_gem_shmem_get_pages_sgt() instead.
2194a63a
NT
659 *
660 * Returns:
661 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
662 */
a193f3b4 663struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
2194a63a 664{
a193f3b4 665 struct drm_gem_object *obj = &shmem->base;
2194a63a 666
52640835
DV
667 WARN_ON(shmem->base.import_attach);
668
707d561f 669 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
2194a63a
NT
670}
671EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
672
673/**
674 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
675 * scatter/gather table for a shmem GEM object.
a193f3b4 676 * @shmem: shmem GEM object
2194a63a
NT
677 *
678 * This function returns a scatter/gather table suitable for driver usage. If
679 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
680 * table created.
681 *
0b638559
DV
682 * This is the main function for drivers to get at backing storage, and it hides
683 * and difference between dma-buf imported and natively allocated objects.
684 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
685 *
2194a63a
NT
686 * Returns:
687 * A pointer to the scatter/gather table of pinned pages or errno on failure.
688 */
a193f3b4 689struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
2194a63a 690{
a193f3b4 691 struct drm_gem_object *obj = &shmem->base;
2194a63a 692 int ret;
2194a63a
NT
693 struct sg_table *sgt;
694
695 if (shmem->sgt)
696 return shmem->sgt;
697
698 WARN_ON(obj->import_attach);
699
700 ret = drm_gem_shmem_get_pages(shmem);
701 if (ret)
702 return ERR_PTR(ret);
703
a193f3b4 704 sgt = drm_gem_shmem_get_sg_table(shmem);
2194a63a
NT
705 if (IS_ERR(sgt)) {
706 ret = PTR_ERR(sgt);
707 goto err_put_pages;
708 }
709 /* Map the pages for use by the h/w. */
6c6fa39c
MS
710 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
711 if (ret)
712 goto err_free_sgt;
2194a63a
NT
713
714 shmem->sgt = sgt;
715
716 return sgt;
717
6c6fa39c
MS
718err_free_sgt:
719 sg_free_table(sgt);
720 kfree(sgt);
2194a63a
NT
721err_put_pages:
722 drm_gem_shmem_put_pages(shmem);
723 return ERR_PTR(ret);
724}
725EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
726
727/**
728 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
729 * another driver's scatter/gather table of pinned pages
730 * @dev: Device to import into
731 * @attach: DMA-BUF attachment
732 * @sgt: Scatter/gather table of pinned pages
733 *
734 * This function imports a scatter/gather table exported via DMA-BUF by
735 * another driver. Drivers that use the shmem helpers should set this as their
736 * &drm_driver.gem_prime_import_sg_table callback.
737 *
738 * Returns:
739 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
740 * error code on failure.
741 */
742struct drm_gem_object *
743drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
744 struct dma_buf_attachment *attach,
745 struct sg_table *sgt)
746{
747 size_t size = PAGE_ALIGN(attach->dmabuf->size);
2194a63a 748 struct drm_gem_shmem_object *shmem;
2194a63a 749
cfe28f90 750 shmem = __drm_gem_shmem_create(dev, size, true);
2194a63a
NT
751 if (IS_ERR(shmem))
752 return ERR_CAST(shmem);
753
2194a63a 754 shmem->sgt = sgt;
2194a63a
NT
755
756 DRM_DEBUG_PRIME("size = %zu\n", size);
757
758 return &shmem->base;
2194a63a
NT
759}
760EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
4b2b5e14
TZ
761
762MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
67505311 763MODULE_IMPORT_NS(DMA_BUF);
4b2b5e14 764MODULE_LICENSE("GPL v2");