drm/gem-shmem: Set vm_ops in static initializer
[linux-2.6-block.git] / drivers / gpu / drm / drm_gem_shmem_helper.c
CommitLineData
2194a63a
NT
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2018 Noralf Trønnes
4 */
5
6#include <linux/dma-buf.h>
7#include <linux/export.h>
4b2b5e14 8#include <linux/module.h>
2194a63a
NT
9#include <linux/mutex.h>
10#include <linux/shmem_fs.h>
11#include <linux/slab.h>
12#include <linux/vmalloc.h>
8581fd40 13#include <linux/module.h>
2194a63a 14
804b6e5e
DV
15#ifdef CONFIG_X86
16#include <asm/set_memory.h>
17#endif
18
d3ea256a 19#include <drm/drm.h>
2194a63a
NT
20#include <drm/drm_device.h>
21#include <drm/drm_drv.h>
22#include <drm/drm_gem_shmem_helper.h>
23#include <drm/drm_prime.h>
24#include <drm/drm_print.h>
25
08e438e6
SR
26MODULE_IMPORT_NS(DMA_BUF);
27
2194a63a
NT
28/**
29 * DOC: overview
30 *
31 * This library provides helpers for GEM objects backed by shmem buffers
32 * allocated using anonymous pageable memory.
a193f3b4
TZ
33 *
34 * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
35 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
36 * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
37 * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
2194a63a
NT
38 */
39
40static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
c7fbcb71
TZ
41 .free = drm_gem_shmem_object_free,
42 .print_info = drm_gem_shmem_object_print_info,
43 .pin = drm_gem_shmem_object_pin,
44 .unpin = drm_gem_shmem_object_unpin,
45 .get_sg_table = drm_gem_shmem_object_get_sg_table,
46 .vmap = drm_gem_shmem_object_vmap,
47 .vunmap = drm_gem_shmem_object_vunmap,
48 .mmap = drm_gem_shmem_object_mmap,
d315bdbf 49 .vm_ops = &drm_gem_shmem_vm_ops,
2194a63a
NT
50};
51
7d2cd72a
DV
52static struct drm_gem_shmem_object *
53__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
2194a63a
NT
54{
55 struct drm_gem_shmem_object *shmem;
56 struct drm_gem_object *obj;
7d2cd72a 57 int ret = 0;
2194a63a
NT
58
59 size = PAGE_ALIGN(size);
60
4ff22f48 61 if (dev->driver->gem_create_object) {
2194a63a 62 obj = dev->driver->gem_create_object(dev, size);
4ff22f48
TZ
63 if (IS_ERR(obj))
64 return ERR_CAST(obj);
65 shmem = to_drm_gem_shmem_obj(obj);
66 } else {
67 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
68 if (!shmem)
69 return ERR_PTR(-ENOMEM);
70 obj = &shmem->base;
71 }
0cf2ef46 72
2194a63a
NT
73 if (!obj->funcs)
74 obj->funcs = &drm_gem_shmem_funcs;
75
0cf2ef46 76 if (private) {
7d2cd72a 77 drm_gem_private_object_init(dev, obj, size);
0cf2ef46
TZ
78 shmem->map_wc = false; /* dma-buf mappings use always writecombine */
79 } else {
7d2cd72a 80 ret = drm_gem_object_init(dev, obj, size);
0cf2ef46 81 }
2194a63a
NT
82 if (ret)
83 goto err_free;
84
85 ret = drm_gem_create_mmap_offset(obj);
86 if (ret)
87 goto err_release;
88
2194a63a
NT
89 mutex_init(&shmem->pages_lock);
90 mutex_init(&shmem->vmap_lock);
17acb9f3 91 INIT_LIST_HEAD(&shmem->madv_list);
2194a63a 92
5b9f5f11
DV
93 if (!private) {
94 /*
95 * Our buffers are kept pinned, so allocating them
96 * from the MOVABLE zone is a really bad idea, and
97 * conflicts with CMA. See comments above new_inode()
98 * why this is required _and_ expected if you're
99 * going to pin these pages.
100 */
101 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
102 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
103 }
2194a63a
NT
104
105 return shmem;
106
107err_release:
108 drm_gem_object_release(obj);
109err_free:
110 kfree(obj);
111
112 return ERR_PTR(ret);
113}
7d2cd72a
DV
114/**
115 * drm_gem_shmem_create - Allocate an object with the given size
116 * @dev: DRM device
117 * @size: Size of the object to allocate
118 *
119 * This function creates a shmem GEM object.
120 *
121 * Returns:
122 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
123 * error code on failure.
124 */
125struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
126{
127 return __drm_gem_shmem_create(dev, size, false);
128}
2194a63a
NT
129EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
130
131/**
a193f3b4
TZ
132 * drm_gem_shmem_free - Free resources associated with a shmem GEM object
133 * @shmem: shmem GEM object to free
2194a63a
NT
134 *
135 * This function cleans up the GEM object state and frees the memory used to
c7fbcb71 136 * store the object itself.
2194a63a 137 */
a193f3b4 138void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
2194a63a 139{
a193f3b4 140 struct drm_gem_object *obj = &shmem->base;
2194a63a
NT
141
142 WARN_ON(shmem->vmap_use_count);
143
144 if (obj->import_attach) {
2194a63a 145 drm_prime_gem_destroy(obj, shmem->sgt);
2194a63a
NT
146 } else {
147 if (shmem->sgt) {
6c6fa39c
MS
148 dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
149 DMA_BIDIRECTIONAL, 0);
2194a63a
NT
150 sg_free_table(shmem->sgt);
151 kfree(shmem->sgt);
152 }
3bf5189d
RH
153 if (shmem->pages)
154 drm_gem_shmem_put_pages(shmem);
2194a63a
NT
155 }
156
157 WARN_ON(shmem->pages_use_count);
158
159 drm_gem_object_release(obj);
160 mutex_destroy(&shmem->pages_lock);
161 mutex_destroy(&shmem->vmap_lock);
162 kfree(shmem);
163}
a193f3b4 164EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
2194a63a
NT
165
166static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
167{
168 struct drm_gem_object *obj = &shmem->base;
169 struct page **pages;
170
171 if (shmem->pages_use_count++ > 0)
172 return 0;
173
174 pages = drm_gem_get_pages(obj);
175 if (IS_ERR(pages)) {
176 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
177 shmem->pages_use_count = 0;
178 return PTR_ERR(pages);
179 }
180
804b6e5e
DV
181 /*
182 * TODO: Allocating WC pages which are correctly flushed is only
183 * supported on x86. Ideal solution would be a GFP_WC flag, which also
184 * ttm_pool.c could use.
185 */
186#ifdef CONFIG_X86
187 if (shmem->map_wc)
188 set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
189#endif
190
2194a63a
NT
191 shmem->pages = pages;
192
193 return 0;
194}
195
196/*
197 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
198 * @shmem: shmem GEM object
199 *
200 * This function makes sure that backing pages exists for the shmem GEM object
201 * and increases the use count.
202 *
203 * Returns:
204 * 0 on success or a negative error code on failure.
205 */
206int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
207{
208 int ret;
209
52640835
DV
210 WARN_ON(shmem->base.import_attach);
211
2194a63a
NT
212 ret = mutex_lock_interruptible(&shmem->pages_lock);
213 if (ret)
214 return ret;
215 ret = drm_gem_shmem_get_pages_locked(shmem);
216 mutex_unlock(&shmem->pages_lock);
217
218 return ret;
219}
220EXPORT_SYMBOL(drm_gem_shmem_get_pages);
221
222static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
223{
224 struct drm_gem_object *obj = &shmem->base;
225
226 if (WARN_ON_ONCE(!shmem->pages_use_count))
227 return;
228
229 if (--shmem->pages_use_count > 0)
230 return;
231
804b6e5e
DV
232#ifdef CONFIG_X86
233 if (shmem->map_wc)
234 set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
235#endif
236
2194a63a
NT
237 drm_gem_put_pages(obj, shmem->pages,
238 shmem->pages_mark_dirty_on_put,
239 shmem->pages_mark_accessed_on_put);
240 shmem->pages = NULL;
241}
242
243/*
244 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
245 * @shmem: shmem GEM object
246 *
247 * This function decreases the use count and puts the backing pages when use drops to zero.
248 */
249void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
250{
251 mutex_lock(&shmem->pages_lock);
252 drm_gem_shmem_put_pages_locked(shmem);
253 mutex_unlock(&shmem->pages_lock);
254}
255EXPORT_SYMBOL(drm_gem_shmem_put_pages);
256
257/**
258 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
a193f3b4 259 * @shmem: shmem GEM object
2194a63a
NT
260 *
261 * This function makes sure the backing pages are pinned in memory while the
c7fbcb71 262 * buffer is exported.
2194a63a
NT
263 *
264 * Returns:
265 * 0 on success or a negative error code on failure.
266 */
a193f3b4 267int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
2194a63a 268{
52640835
DV
269 WARN_ON(shmem->base.import_attach);
270
2194a63a
NT
271 return drm_gem_shmem_get_pages(shmem);
272}
273EXPORT_SYMBOL(drm_gem_shmem_pin);
274
275/**
276 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
a193f3b4 277 * @shmem: shmem GEM object
2194a63a
NT
278 *
279 * This function removes the requirement that the backing pages are pinned in
c7fbcb71 280 * memory.
2194a63a 281 */
a193f3b4 282void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
2194a63a 283{
52640835
DV
284 WARN_ON(shmem->base.import_attach);
285
2194a63a
NT
286 drm_gem_shmem_put_pages(shmem);
287}
288EXPORT_SYMBOL(drm_gem_shmem_unpin);
289
49a3f51d 290static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
2194a63a
NT
291{
292 struct drm_gem_object *obj = &shmem->base;
6619ccf1 293 int ret = 0;
2194a63a 294
49a3f51d
TZ
295 if (shmem->vmap_use_count++ > 0) {
296 dma_buf_map_set_vaddr(map, shmem->vaddr);
297 return 0;
298 }
2194a63a 299
1cad6292 300 if (obj->import_attach) {
49a3f51d
TZ
301 ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
302 if (!ret) {
303 if (WARN_ON(map->is_iomem)) {
304 ret = -EIO;
305 goto err_put_pages;
306 }
307 shmem->vaddr = map->vaddr;
308 }
1cad6292
GH
309 } else {
310 pgprot_t prot = PAGE_KERNEL;
311
0cc5fb4e
DV
312 ret = drm_gem_shmem_get_pages(shmem);
313 if (ret)
314 goto err_zero_use;
315
0cf2ef46 316 if (shmem->map_wc)
1cad6292 317 prot = pgprot_writecombine(prot);
be7d9f05 318 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
1cad6292 319 VM_MAP, prot);
6619ccf1
TZ
320 if (!shmem->vaddr)
321 ret = -ENOMEM;
49a3f51d
TZ
322 else
323 dma_buf_map_set_vaddr(map, shmem->vaddr);
1cad6292 324 }
2194a63a 325
6619ccf1
TZ
326 if (ret) {
327 DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
2194a63a
NT
328 goto err_put_pages;
329 }
330
49a3f51d 331 return 0;
2194a63a
NT
332
333err_put_pages:
0cc5fb4e
DV
334 if (!obj->import_attach)
335 drm_gem_shmem_put_pages(shmem);
2194a63a
NT
336err_zero_use:
337 shmem->vmap_use_count = 0;
338
49a3f51d 339 return ret;
2194a63a
NT
340}
341
342/*
343 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
344 * @shmem: shmem GEM object
49a3f51d
TZ
345 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
346 * store.
2194a63a 347 *
0b638559 348 * This function makes sure that a contiguous kernel virtual address mapping
c7fbcb71
TZ
349 * exists for the buffer backing the shmem GEM object. It hides the differences
350 * between dma-buf imported and natively allocated objects.
0b638559
DV
351 *
352 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
2194a63a
NT
353 *
354 * Returns:
355 * 0 on success or a negative error code on failure.
356 */
a193f3b4 357int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
2194a63a 358{
2194a63a
NT
359 int ret;
360
361 ret = mutex_lock_interruptible(&shmem->vmap_lock);
362 if (ret)
49a3f51d
TZ
363 return ret;
364 ret = drm_gem_shmem_vmap_locked(shmem, map);
2194a63a
NT
365 mutex_unlock(&shmem->vmap_lock);
366
49a3f51d 367 return ret;
2194a63a
NT
368}
369EXPORT_SYMBOL(drm_gem_shmem_vmap);
370
49a3f51d
TZ
371static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
372 struct dma_buf_map *map)
2194a63a
NT
373{
374 struct drm_gem_object *obj = &shmem->base;
375
376 if (WARN_ON_ONCE(!shmem->vmap_use_count))
377 return;
378
379 if (--shmem->vmap_use_count > 0)
380 return;
381
64e194e2 382 if (obj->import_attach) {
49a3f51d 383 dma_buf_vunmap(obj->import_attach->dmabuf, map);
64e194e2 384 } else {
2194a63a 385 vunmap(shmem->vaddr);
64e194e2
NT
386 drm_gem_shmem_put_pages(shmem);
387 }
2194a63a
NT
388
389 shmem->vaddr = NULL;
2194a63a
NT
390}
391
392/*
0ae865ef 393 * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
2194a63a 394 * @shmem: shmem GEM object
49a3f51d 395 * @map: Kernel virtual address where the SHMEM GEM object was mapped
2194a63a 396 *
0b638559
DV
397 * This function cleans up a kernel virtual address mapping acquired by
398 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
399 * zero.
400 *
c7fbcb71
TZ
401 * This function hides the differences between dma-buf imported and natively
402 * allocated objects.
2194a63a 403 */
a193f3b4 404void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
2194a63a 405{
2194a63a 406 mutex_lock(&shmem->vmap_lock);
49a3f51d 407 drm_gem_shmem_vunmap_locked(shmem, map);
2194a63a
NT
408 mutex_unlock(&shmem->vmap_lock);
409}
410EXPORT_SYMBOL(drm_gem_shmem_vunmap);
411
5a363c20 412static struct drm_gem_shmem_object *
2194a63a
NT
413drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
414 struct drm_device *dev, size_t size,
415 uint32_t *handle)
416{
417 struct drm_gem_shmem_object *shmem;
418 int ret;
419
cfe28f90 420 shmem = drm_gem_shmem_create(dev, size);
2194a63a
NT
421 if (IS_ERR(shmem))
422 return shmem;
423
424 /*
425 * Allocate an id of idr table where the obj is registered
426 * and handle has the id what user can see.
427 */
428 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
429 /* drop reference from allocate - handle holds it now. */
be6ee102 430 drm_gem_object_put(&shmem->base);
2194a63a
NT
431 if (ret)
432 return ERR_PTR(ret);
433
434 return shmem;
435}
2194a63a 436
17acb9f3
RH
437/* Update madvise status, returns true if not purged, else
438 * false or -errno.
439 */
a193f3b4 440int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
17acb9f3 441{
17acb9f3
RH
442 mutex_lock(&shmem->pages_lock);
443
444 if (shmem->madv >= 0)
445 shmem->madv = madv;
446
447 madv = shmem->madv;
448
449 mutex_unlock(&shmem->pages_lock);
450
451 return (madv >= 0);
452}
453EXPORT_SYMBOL(drm_gem_shmem_madvise);
454
a193f3b4 455void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
17acb9f3 456{
a193f3b4 457 struct drm_gem_object *obj = &shmem->base;
17acb9f3 458 struct drm_device *dev = obj->dev;
17acb9f3
RH
459
460 WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
461
a193f3b4 462 dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
4fa3d66f
RH
463 sg_free_table(shmem->sgt);
464 kfree(shmem->sgt);
465 shmem->sgt = NULL;
466
17acb9f3
RH
467 drm_gem_shmem_put_pages_locked(shmem);
468
469 shmem->madv = -1;
470
471 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
472 drm_gem_free_mmap_offset(obj);
473
474 /* Our goal here is to return as much of the memory as
475 * is possible back to the system as we are called from OOM.
476 * To do this we must instruct the shmfs to drop all of its
477 * backing pages, *now*.
478 */
479 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
480
a193f3b4 481 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
17acb9f3
RH
482}
483EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
484
a193f3b4 485bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
17acb9f3 486{
dfbc7a46
RH
487 if (!mutex_trylock(&shmem->pages_lock))
488 return false;
a193f3b4 489 drm_gem_shmem_purge_locked(shmem);
17acb9f3 490 mutex_unlock(&shmem->pages_lock);
dfbc7a46
RH
491
492 return true;
17acb9f3
RH
493}
494EXPORT_SYMBOL(drm_gem_shmem_purge);
495
2194a63a
NT
496/**
497 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
498 * @file: DRM file structure to create the dumb buffer for
499 * @dev: DRM device
500 * @args: IOCTL data
501 *
502 * This function computes the pitch of the dumb buffer and rounds it up to an
503 * integer number of bytes per pixel. Drivers for hardware that doesn't have
504 * any additional restrictions on the pitch can directly use this function as
505 * their &drm_driver.dumb_create callback.
506 *
507 * For hardware with additional restrictions, drivers can adjust the fields
508 * set up by userspace before calling into this function.
509 *
510 * Returns:
511 * 0 on success or a negative error code on failure.
512 */
513int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
514 struct drm_mode_create_dumb *args)
515{
516 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
517 struct drm_gem_shmem_object *shmem;
518
519 if (!args->pitch || !args->size) {
520 args->pitch = min_pitch;
35d28365 521 args->size = PAGE_ALIGN(args->pitch * args->height);
2194a63a
NT
522 } else {
523 /* ensure sane minimum values */
524 if (args->pitch < min_pitch)
525 args->pitch = min_pitch;
526 if (args->size < args->pitch * args->height)
35d28365 527 args->size = PAGE_ALIGN(args->pitch * args->height);
2194a63a
NT
528 }
529
530 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
531
532 return PTR_ERR_OR_ZERO(shmem);
533}
534EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
535
536static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
537{
538 struct vm_area_struct *vma = vmf->vma;
539 struct drm_gem_object *obj = vma->vm_private_data;
540 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
541 loff_t num_pages = obj->size >> PAGE_SHIFT;
d611b4a0 542 vm_fault_t ret;
2194a63a 543 struct page *page;
11d5a474
NR
544 pgoff_t page_offset;
545
546 /* We don't use vmf->pgoff since that has the fake offset */
547 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
2194a63a 548
d611b4a0
NR
549 mutex_lock(&shmem->pages_lock);
550
11d5a474 551 if (page_offset >= num_pages ||
d611b4a0
NR
552 WARN_ON_ONCE(!shmem->pages) ||
553 shmem->madv < 0) {
554 ret = VM_FAULT_SIGBUS;
555 } else {
11d5a474 556 page = shmem->pages[page_offset];
2194a63a 557
8b93d1d7 558 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
d611b4a0 559 }
2194a63a 560
d611b4a0
NR
561 mutex_unlock(&shmem->pages_lock);
562
563 return ret;
2194a63a
NT
564}
565
566static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
567{
568 struct drm_gem_object *obj = vma->vm_private_data;
569 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
570 int ret;
571
52640835
DV
572 WARN_ON(shmem->base.import_attach);
573
2194a63a
NT
574 ret = drm_gem_shmem_get_pages(shmem);
575 WARN_ON_ONCE(ret != 0);
576
577 drm_gem_vm_open(vma);
578}
579
580static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
581{
582 struct drm_gem_object *obj = vma->vm_private_data;
583 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
584
585 drm_gem_shmem_put_pages(shmem);
586 drm_gem_vm_close(vma);
587}
588
d315bdbf 589const struct vm_operations_struct drm_gem_shmem_vm_ops = {
2194a63a
NT
590 .fault = drm_gem_shmem_fault,
591 .open = drm_gem_shmem_vm_open,
592 .close = drm_gem_shmem_vm_close,
593};
d315bdbf 594EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
2194a63a
NT
595
596/**
597 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
a193f3b4 598 * @shmem: shmem GEM object
2194a63a
NT
599 * @vma: VMA for the area to be mapped
600 *
601 * This function implements an augmented version of the GEM DRM file mmap
c7fbcb71 602 * operation for shmem objects.
2194a63a
NT
603 *
604 * Returns:
605 * 0 on success or a negative error code on failure.
606 */
a193f3b4 607int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
2194a63a 608{
a193f3b4 609 struct drm_gem_object *obj = &shmem->base;
2194a63a
NT
610 int ret;
611
f49a51bf
DV
612 if (obj->import_attach) {
613 /* Drop the reference drm_gem_mmap_obj() acquired.*/
614 drm_gem_object_put(obj);
615 vma->vm_private_data = NULL;
616
26d3ac3c 617 return dma_buf_mmap(obj->dma_buf, vma, 0);
f49a51bf 618 }
26d3ac3c 619
2194a63a
NT
620 ret = drm_gem_shmem_get_pages(shmem);
621 if (ret) {
622 drm_gem_vm_close(vma);
623 return ret;
624 }
625
8b93d1d7 626 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND;
1cad6292 627 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
0cf2ef46 628 if (shmem->map_wc)
1cad6292 629 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
2194a63a 630
2194a63a
NT
631 return 0;
632}
633EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
634
635/**
636 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
a193f3b4 637 * @shmem: shmem GEM object
2194a63a
NT
638 * @p: DRM printer
639 * @indent: Tab indentation level
2194a63a 640 */
a193f3b4
TZ
641void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
642 struct drm_printer *p, unsigned int indent)
2194a63a 643{
2194a63a
NT
644 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
645 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
646 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
647}
648EXPORT_SYMBOL(drm_gem_shmem_print_info);
649
650/**
651 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
652 * pages for a shmem GEM object
a193f3b4 653 * @shmem: shmem GEM object
2194a63a
NT
654 *
655 * This function exports a scatter/gather table suitable for PRIME usage by
c7fbcb71 656 * calling the standard DMA mapping API.
0b638559
DV
657 *
658 * Drivers who need to acquire an scatter/gather table for objects need to call
659 * drm_gem_shmem_get_pages_sgt() instead.
2194a63a
NT
660 *
661 * Returns:
662 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
663 */
a193f3b4 664struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
2194a63a 665{
a193f3b4 666 struct drm_gem_object *obj = &shmem->base;
2194a63a 667
52640835
DV
668 WARN_ON(shmem->base.import_attach);
669
707d561f 670 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
2194a63a
NT
671}
672EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
673
674/**
675 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
676 * scatter/gather table for a shmem GEM object.
a193f3b4 677 * @shmem: shmem GEM object
2194a63a
NT
678 *
679 * This function returns a scatter/gather table suitable for driver usage. If
680 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
681 * table created.
682 *
0b638559
DV
683 * This is the main function for drivers to get at backing storage, and it hides
684 * and difference between dma-buf imported and natively allocated objects.
685 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
686 *
2194a63a
NT
687 * Returns:
688 * A pointer to the scatter/gather table of pinned pages or errno on failure.
689 */
a193f3b4 690struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
2194a63a 691{
a193f3b4 692 struct drm_gem_object *obj = &shmem->base;
2194a63a 693 int ret;
2194a63a
NT
694 struct sg_table *sgt;
695
696 if (shmem->sgt)
697 return shmem->sgt;
698
699 WARN_ON(obj->import_attach);
700
701 ret = drm_gem_shmem_get_pages(shmem);
702 if (ret)
703 return ERR_PTR(ret);
704
a193f3b4 705 sgt = drm_gem_shmem_get_sg_table(shmem);
2194a63a
NT
706 if (IS_ERR(sgt)) {
707 ret = PTR_ERR(sgt);
708 goto err_put_pages;
709 }
710 /* Map the pages for use by the h/w. */
6c6fa39c
MS
711 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
712 if (ret)
713 goto err_free_sgt;
2194a63a
NT
714
715 shmem->sgt = sgt;
716
717 return sgt;
718
6c6fa39c
MS
719err_free_sgt:
720 sg_free_table(sgt);
721 kfree(sgt);
2194a63a
NT
722err_put_pages:
723 drm_gem_shmem_put_pages(shmem);
724 return ERR_PTR(ret);
725}
726EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
727
728/**
729 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
730 * another driver's scatter/gather table of pinned pages
731 * @dev: Device to import into
732 * @attach: DMA-BUF attachment
733 * @sgt: Scatter/gather table of pinned pages
734 *
735 * This function imports a scatter/gather table exported via DMA-BUF by
736 * another driver. Drivers that use the shmem helpers should set this as their
737 * &drm_driver.gem_prime_import_sg_table callback.
738 *
739 * Returns:
740 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
741 * error code on failure.
742 */
743struct drm_gem_object *
744drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
745 struct dma_buf_attachment *attach,
746 struct sg_table *sgt)
747{
748 size_t size = PAGE_ALIGN(attach->dmabuf->size);
2194a63a 749 struct drm_gem_shmem_object *shmem;
2194a63a 750
cfe28f90 751 shmem = __drm_gem_shmem_create(dev, size, true);
2194a63a
NT
752 if (IS_ERR(shmem))
753 return ERR_CAST(shmem);
754
2194a63a 755 shmem->sgt = sgt;
2194a63a
NT
756
757 DRM_DEBUG_PRIME("size = %zu\n", size);
758
759 return &shmem->base;
2194a63a
NT
760}
761EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
4b2b5e14
TZ
762
763MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
67505311 764MODULE_IMPORT_NS(DMA_BUF);
4b2b5e14 765MODULE_LICENSE("GPL v2");