drm: meson_drv add shutdown function
[linux-block.git] / drivers / gpu / drm / drm_gem_shmem_helper.c
CommitLineData
2194a63a
NT
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2018 Noralf Trønnes
4 */
5
6#include <linux/dma-buf.h>
7#include <linux/export.h>
8#include <linux/mutex.h>
9#include <linux/shmem_fs.h>
10#include <linux/slab.h>
11#include <linux/vmalloc.h>
12
d3ea256a 13#include <drm/drm.h>
2194a63a
NT
14#include <drm/drm_device.h>
15#include <drm/drm_drv.h>
16#include <drm/drm_gem_shmem_helper.h>
17#include <drm/drm_prime.h>
18#include <drm/drm_print.h>
19
20/**
21 * DOC: overview
22 *
23 * This library provides helpers for GEM objects backed by shmem buffers
24 * allocated using anonymous pageable memory.
25 */
26
27static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
28 .free = drm_gem_shmem_free_object,
29 .print_info = drm_gem_shmem_print_info,
30 .pin = drm_gem_shmem_pin,
31 .unpin = drm_gem_shmem_unpin,
32 .get_sg_table = drm_gem_shmem_get_sg_table,
33 .vmap = drm_gem_shmem_vmap,
34 .vunmap = drm_gem_shmem_vunmap,
0be89589 35 .mmap = drm_gem_shmem_mmap,
2194a63a
NT
36};
37
7d2cd72a
DV
38static struct drm_gem_shmem_object *
39__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
2194a63a
NT
40{
41 struct drm_gem_shmem_object *shmem;
42 struct drm_gem_object *obj;
7d2cd72a 43 int ret = 0;
2194a63a
NT
44
45 size = PAGE_ALIGN(size);
46
47 if (dev->driver->gem_create_object)
48 obj = dev->driver->gem_create_object(dev, size);
49 else
50 obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
51 if (!obj)
52 return ERR_PTR(-ENOMEM);
53
0cf2ef46
TZ
54 shmem = to_drm_gem_shmem_obj(obj);
55
2194a63a
NT
56 if (!obj->funcs)
57 obj->funcs = &drm_gem_shmem_funcs;
58
0cf2ef46 59 if (private) {
7d2cd72a 60 drm_gem_private_object_init(dev, obj, size);
0cf2ef46
TZ
61 shmem->map_wc = false; /* dma-buf mappings use always writecombine */
62 } else {
7d2cd72a 63 ret = drm_gem_object_init(dev, obj, size);
0cf2ef46 64 }
2194a63a
NT
65 if (ret)
66 goto err_free;
67
68 ret = drm_gem_create_mmap_offset(obj);
69 if (ret)
70 goto err_release;
71
2194a63a
NT
72 mutex_init(&shmem->pages_lock);
73 mutex_init(&shmem->vmap_lock);
17acb9f3 74 INIT_LIST_HEAD(&shmem->madv_list);
2194a63a 75
5b9f5f11
DV
76 if (!private) {
77 /*
78 * Our buffers are kept pinned, so allocating them
79 * from the MOVABLE zone is a really bad idea, and
80 * conflicts with CMA. See comments above new_inode()
81 * why this is required _and_ expected if you're
82 * going to pin these pages.
83 */
84 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
85 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
86 }
2194a63a
NT
87
88 return shmem;
89
90err_release:
91 drm_gem_object_release(obj);
92err_free:
93 kfree(obj);
94
95 return ERR_PTR(ret);
96}
7d2cd72a
DV
97/**
98 * drm_gem_shmem_create - Allocate an object with the given size
99 * @dev: DRM device
100 * @size: Size of the object to allocate
101 *
102 * This function creates a shmem GEM object.
103 *
104 * Returns:
105 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
106 * error code on failure.
107 */
108struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
109{
110 return __drm_gem_shmem_create(dev, size, false);
111}
2194a63a
NT
112EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
113
114/**
115 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
116 * @obj: GEM object to free
117 *
118 * This function cleans up the GEM object state and frees the memory used to
0b638559
DV
119 * store the object itself. It should be used to implement
120 * &drm_gem_object_funcs.free.
2194a63a
NT
121 */
122void drm_gem_shmem_free_object(struct drm_gem_object *obj)
123{
124 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
125
126 WARN_ON(shmem->vmap_use_count);
127
128 if (obj->import_attach) {
2194a63a 129 drm_prime_gem_destroy(obj, shmem->sgt);
2194a63a
NT
130 } else {
131 if (shmem->sgt) {
6c6fa39c
MS
132 dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
133 DMA_BIDIRECTIONAL, 0);
2194a63a
NT
134 sg_free_table(shmem->sgt);
135 kfree(shmem->sgt);
136 }
3bf5189d
RH
137 if (shmem->pages)
138 drm_gem_shmem_put_pages(shmem);
2194a63a
NT
139 }
140
141 WARN_ON(shmem->pages_use_count);
142
143 drm_gem_object_release(obj);
144 mutex_destroy(&shmem->pages_lock);
145 mutex_destroy(&shmem->vmap_lock);
146 kfree(shmem);
147}
148EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
149
150static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
151{
152 struct drm_gem_object *obj = &shmem->base;
153 struct page **pages;
154
155 if (shmem->pages_use_count++ > 0)
156 return 0;
157
158 pages = drm_gem_get_pages(obj);
159 if (IS_ERR(pages)) {
160 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
161 shmem->pages_use_count = 0;
162 return PTR_ERR(pages);
163 }
164
165 shmem->pages = pages;
166
167 return 0;
168}
169
170/*
171 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
172 * @shmem: shmem GEM object
173 *
174 * This function makes sure that backing pages exists for the shmem GEM object
175 * and increases the use count.
176 *
177 * Returns:
178 * 0 on success or a negative error code on failure.
179 */
180int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
181{
182 int ret;
183
52640835
DV
184 WARN_ON(shmem->base.import_attach);
185
2194a63a
NT
186 ret = mutex_lock_interruptible(&shmem->pages_lock);
187 if (ret)
188 return ret;
189 ret = drm_gem_shmem_get_pages_locked(shmem);
190 mutex_unlock(&shmem->pages_lock);
191
192 return ret;
193}
194EXPORT_SYMBOL(drm_gem_shmem_get_pages);
195
196static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
197{
198 struct drm_gem_object *obj = &shmem->base;
199
200 if (WARN_ON_ONCE(!shmem->pages_use_count))
201 return;
202
203 if (--shmem->pages_use_count > 0)
204 return;
205
206 drm_gem_put_pages(obj, shmem->pages,
207 shmem->pages_mark_dirty_on_put,
208 shmem->pages_mark_accessed_on_put);
209 shmem->pages = NULL;
210}
211
212/*
213 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
214 * @shmem: shmem GEM object
215 *
216 * This function decreases the use count and puts the backing pages when use drops to zero.
217 */
218void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
219{
220 mutex_lock(&shmem->pages_lock);
221 drm_gem_shmem_put_pages_locked(shmem);
222 mutex_unlock(&shmem->pages_lock);
223}
224EXPORT_SYMBOL(drm_gem_shmem_put_pages);
225
226/**
227 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
228 * @obj: GEM object
229 *
230 * This function makes sure the backing pages are pinned in memory while the
0b638559
DV
231 * buffer is exported. It should only be used to implement
232 * &drm_gem_object_funcs.pin.
2194a63a
NT
233 *
234 * Returns:
235 * 0 on success or a negative error code on failure.
236 */
237int drm_gem_shmem_pin(struct drm_gem_object *obj)
238{
239 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
240
52640835
DV
241 WARN_ON(shmem->base.import_attach);
242
2194a63a
NT
243 return drm_gem_shmem_get_pages(shmem);
244}
245EXPORT_SYMBOL(drm_gem_shmem_pin);
246
247/**
248 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
249 * @obj: GEM object
250 *
251 * This function removes the requirement that the backing pages are pinned in
0b638559 252 * memory. It should only be used to implement &drm_gem_object_funcs.unpin.
2194a63a
NT
253 */
254void drm_gem_shmem_unpin(struct drm_gem_object *obj)
255{
256 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
257
52640835
DV
258 WARN_ON(shmem->base.import_attach);
259
2194a63a
NT
260 drm_gem_shmem_put_pages(shmem);
261}
262EXPORT_SYMBOL(drm_gem_shmem_unpin);
263
49a3f51d 264static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map)
2194a63a
NT
265{
266 struct drm_gem_object *obj = &shmem->base;
6619ccf1 267 int ret = 0;
2194a63a 268
49a3f51d
TZ
269 if (shmem->vmap_use_count++ > 0) {
270 dma_buf_map_set_vaddr(map, shmem->vaddr);
271 return 0;
272 }
2194a63a 273
1cad6292 274 if (obj->import_attach) {
49a3f51d
TZ
275 ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
276 if (!ret) {
277 if (WARN_ON(map->is_iomem)) {
278 ret = -EIO;
279 goto err_put_pages;
280 }
281 shmem->vaddr = map->vaddr;
282 }
1cad6292
GH
283 } else {
284 pgprot_t prot = PAGE_KERNEL;
285
0cc5fb4e
DV
286 ret = drm_gem_shmem_get_pages(shmem);
287 if (ret)
288 goto err_zero_use;
289
0cf2ef46 290 if (shmem->map_wc)
1cad6292 291 prot = pgprot_writecombine(prot);
be7d9f05 292 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
1cad6292 293 VM_MAP, prot);
6619ccf1
TZ
294 if (!shmem->vaddr)
295 ret = -ENOMEM;
49a3f51d
TZ
296 else
297 dma_buf_map_set_vaddr(map, shmem->vaddr);
1cad6292 298 }
2194a63a 299
6619ccf1
TZ
300 if (ret) {
301 DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
2194a63a
NT
302 goto err_put_pages;
303 }
304
49a3f51d 305 return 0;
2194a63a
NT
306
307err_put_pages:
0cc5fb4e
DV
308 if (!obj->import_attach)
309 drm_gem_shmem_put_pages(shmem);
2194a63a
NT
310err_zero_use:
311 shmem->vmap_use_count = 0;
312
49a3f51d 313 return ret;
2194a63a
NT
314}
315
316/*
317 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
318 * @shmem: shmem GEM object
49a3f51d
TZ
319 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
320 * store.
2194a63a 321 *
0b638559
DV
322 * This function makes sure that a contiguous kernel virtual address mapping
323 * exists for the buffer backing the shmem GEM object.
324 *
325 * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
326 * also be called by drivers directly, in which case it will hide the
327 * differences between dma-buf imported and natively allocated objects.
328 *
329 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
2194a63a
NT
330 *
331 * Returns:
332 * 0 on success or a negative error code on failure.
333 */
49a3f51d 334int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
2194a63a
NT
335{
336 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
2194a63a
NT
337 int ret;
338
339 ret = mutex_lock_interruptible(&shmem->vmap_lock);
340 if (ret)
49a3f51d
TZ
341 return ret;
342 ret = drm_gem_shmem_vmap_locked(shmem, map);
2194a63a
NT
343 mutex_unlock(&shmem->vmap_lock);
344
49a3f51d 345 return ret;
2194a63a
NT
346}
347EXPORT_SYMBOL(drm_gem_shmem_vmap);
348
49a3f51d
TZ
349static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
350 struct dma_buf_map *map)
2194a63a
NT
351{
352 struct drm_gem_object *obj = &shmem->base;
353
354 if (WARN_ON_ONCE(!shmem->vmap_use_count))
355 return;
356
357 if (--shmem->vmap_use_count > 0)
358 return;
359
360 if (obj->import_attach)
49a3f51d 361 dma_buf_vunmap(obj->import_attach->dmabuf, map);
2194a63a
NT
362 else
363 vunmap(shmem->vaddr);
364
365 shmem->vaddr = NULL;
366 drm_gem_shmem_put_pages(shmem);
367}
368
369/*
370 * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
371 * @shmem: shmem GEM object
49a3f51d 372 * @map: Kernel virtual address where the SHMEM GEM object was mapped
2194a63a 373 *
0b638559
DV
374 * This function cleans up a kernel virtual address mapping acquired by
375 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
376 * zero.
377 *
378 * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
379 * also be called by drivers directly, in which case it will hide the
380 * differences between dma-buf imported and natively allocated objects.
2194a63a 381 */
49a3f51d 382void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
2194a63a
NT
383{
384 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
385
386 mutex_lock(&shmem->vmap_lock);
49a3f51d 387 drm_gem_shmem_vunmap_locked(shmem, map);
2194a63a
NT
388 mutex_unlock(&shmem->vmap_lock);
389}
390EXPORT_SYMBOL(drm_gem_shmem_vunmap);
391
392struct drm_gem_shmem_object *
393drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
394 struct drm_device *dev, size_t size,
395 uint32_t *handle)
396{
397 struct drm_gem_shmem_object *shmem;
398 int ret;
399
cfe28f90 400 shmem = drm_gem_shmem_create(dev, size);
2194a63a
NT
401 if (IS_ERR(shmem))
402 return shmem;
403
404 /*
405 * Allocate an id of idr table where the obj is registered
406 * and handle has the id what user can see.
407 */
408 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
409 /* drop reference from allocate - handle holds it now. */
be6ee102 410 drm_gem_object_put(&shmem->base);
2194a63a
NT
411 if (ret)
412 return ERR_PTR(ret);
413
414 return shmem;
415}
416EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
417
17acb9f3
RH
418/* Update madvise status, returns true if not purged, else
419 * false or -errno.
420 */
421int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
422{
423 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
424
425 mutex_lock(&shmem->pages_lock);
426
427 if (shmem->madv >= 0)
428 shmem->madv = madv;
429
430 madv = shmem->madv;
431
432 mutex_unlock(&shmem->pages_lock);
433
434 return (madv >= 0);
435}
436EXPORT_SYMBOL(drm_gem_shmem_madvise);
437
438void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
439{
440 struct drm_device *dev = obj->dev;
441 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
442
443 WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
444
6c6fa39c 445 dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
4fa3d66f
RH
446 sg_free_table(shmem->sgt);
447 kfree(shmem->sgt);
448 shmem->sgt = NULL;
449
17acb9f3
RH
450 drm_gem_shmem_put_pages_locked(shmem);
451
452 shmem->madv = -1;
453
454 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
455 drm_gem_free_mmap_offset(obj);
456
457 /* Our goal here is to return as much of the memory as
458 * is possible back to the system as we are called from OOM.
459 * To do this we must instruct the shmfs to drop all of its
460 * backing pages, *now*.
461 */
462 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
463
464 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
465 0, (loff_t)-1);
466}
467EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
468
dfbc7a46 469bool drm_gem_shmem_purge(struct drm_gem_object *obj)
17acb9f3
RH
470{
471 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
472
dfbc7a46
RH
473 if (!mutex_trylock(&shmem->pages_lock))
474 return false;
17acb9f3
RH
475 drm_gem_shmem_purge_locked(obj);
476 mutex_unlock(&shmem->pages_lock);
dfbc7a46
RH
477
478 return true;
17acb9f3
RH
479}
480EXPORT_SYMBOL(drm_gem_shmem_purge);
481
2194a63a
NT
482/**
483 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
484 * @file: DRM file structure to create the dumb buffer for
485 * @dev: DRM device
486 * @args: IOCTL data
487 *
488 * This function computes the pitch of the dumb buffer and rounds it up to an
489 * integer number of bytes per pixel. Drivers for hardware that doesn't have
490 * any additional restrictions on the pitch can directly use this function as
491 * their &drm_driver.dumb_create callback.
492 *
493 * For hardware with additional restrictions, drivers can adjust the fields
494 * set up by userspace before calling into this function.
495 *
496 * Returns:
497 * 0 on success or a negative error code on failure.
498 */
499int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
500 struct drm_mode_create_dumb *args)
501{
502 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
503 struct drm_gem_shmem_object *shmem;
504
505 if (!args->pitch || !args->size) {
506 args->pitch = min_pitch;
507 args->size = args->pitch * args->height;
508 } else {
509 /* ensure sane minimum values */
510 if (args->pitch < min_pitch)
511 args->pitch = min_pitch;
512 if (args->size < args->pitch * args->height)
513 args->size = args->pitch * args->height;
514 }
515
516 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
517
518 return PTR_ERR_OR_ZERO(shmem);
519}
520EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
521
522static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
523{
524 struct vm_area_struct *vma = vmf->vma;
525 struct drm_gem_object *obj = vma->vm_private_data;
526 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
527 loff_t num_pages = obj->size >> PAGE_SHIFT;
d611b4a0 528 vm_fault_t ret;
2194a63a 529 struct page *page;
11d5a474
NR
530 pgoff_t page_offset;
531
532 /* We don't use vmf->pgoff since that has the fake offset */
533 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
2194a63a 534
d611b4a0
NR
535 mutex_lock(&shmem->pages_lock);
536
11d5a474 537 if (page_offset >= num_pages ||
d611b4a0
NR
538 WARN_ON_ONCE(!shmem->pages) ||
539 shmem->madv < 0) {
540 ret = VM_FAULT_SIGBUS;
541 } else {
11d5a474 542 page = shmem->pages[page_offset];
2194a63a 543
d611b4a0
NR
544 ret = vmf_insert_page(vma, vmf->address, page);
545 }
2194a63a 546
d611b4a0
NR
547 mutex_unlock(&shmem->pages_lock);
548
549 return ret;
2194a63a
NT
550}
551
552static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
553{
554 struct drm_gem_object *obj = vma->vm_private_data;
555 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
556 int ret;
557
52640835
DV
558 WARN_ON(shmem->base.import_attach);
559
2194a63a
NT
560 ret = drm_gem_shmem_get_pages(shmem);
561 WARN_ON_ONCE(ret != 0);
562
563 drm_gem_vm_open(vma);
564}
565
566static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
567{
568 struct drm_gem_object *obj = vma->vm_private_data;
569 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
570
571 drm_gem_shmem_put_pages(shmem);
572 drm_gem_vm_close(vma);
573}
574
0be89589 575static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
2194a63a
NT
576 .fault = drm_gem_shmem_fault,
577 .open = drm_gem_shmem_vm_open,
578 .close = drm_gem_shmem_vm_close,
579};
2194a63a
NT
580
581/**
582 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
0be89589 583 * @obj: gem object
2194a63a
NT
584 * @vma: VMA for the area to be mapped
585 *
586 * This function implements an augmented version of the GEM DRM file mmap
587 * operation for shmem objects. Drivers which employ the shmem helpers should
0be89589 588 * use this function as their &drm_gem_object_funcs.mmap handler.
2194a63a
NT
589 *
590 * Returns:
591 * 0 on success or a negative error code on failure.
592 */
0be89589 593int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
2194a63a
NT
594{
595 struct drm_gem_shmem_object *shmem;
596 int ret;
597
f49a51bf
DV
598 if (obj->import_attach) {
599 /* Drop the reference drm_gem_mmap_obj() acquired.*/
600 drm_gem_object_put(obj);
601 vma->vm_private_data = NULL;
602
26d3ac3c 603 return dma_buf_mmap(obj->dma_buf, vma, 0);
f49a51bf 604 }
26d3ac3c 605
0be89589 606 shmem = to_drm_gem_shmem_obj(obj);
2194a63a
NT
607
608 ret = drm_gem_shmem_get_pages(shmem);
609 if (ret) {
610 drm_gem_vm_close(vma);
611 return ret;
612 }
613
1bf01e1e 614 vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
1cad6292 615 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
0cf2ef46 616 if (shmem->map_wc)
1cad6292 617 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
0be89589 618 vma->vm_ops = &drm_gem_shmem_vm_ops;
2194a63a 619
2194a63a
NT
620 return 0;
621}
622EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
623
624/**
625 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
626 * @p: DRM printer
627 * @indent: Tab indentation level
628 * @obj: GEM object
0b638559
DV
629 *
630 * This implements the &drm_gem_object_funcs.info callback.
2194a63a
NT
631 */
632void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
633 const struct drm_gem_object *obj)
634{
635 const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
636
637 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
638 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
639 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
640}
641EXPORT_SYMBOL(drm_gem_shmem_print_info);
642
643/**
644 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
645 * pages for a shmem GEM object
646 * @obj: GEM object
647 *
648 * This function exports a scatter/gather table suitable for PRIME usage by
0b638559
DV
649 * calling the standard DMA mapping API. Drivers should not call this function
650 * directly, instead it should only be used as an implementation for
651 * &drm_gem_object_funcs.get_sg_table.
652 *
653 * Drivers who need to acquire an scatter/gather table for objects need to call
654 * drm_gem_shmem_get_pages_sgt() instead.
2194a63a
NT
655 *
656 * Returns:
657 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
658 */
659struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
660{
661 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
662
52640835
DV
663 WARN_ON(shmem->base.import_attach);
664
707d561f 665 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
2194a63a
NT
666}
667EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
668
669/**
670 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
671 * scatter/gather table for a shmem GEM object.
672 * @obj: GEM object
673 *
674 * This function returns a scatter/gather table suitable for driver usage. If
675 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
676 * table created.
677 *
0b638559
DV
678 * This is the main function for drivers to get at backing storage, and it hides
679 * and difference between dma-buf imported and natively allocated objects.
680 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
681 *
2194a63a
NT
682 * Returns:
683 * A pointer to the scatter/gather table of pinned pages or errno on failure.
684 */
685struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
686{
687 int ret;
688 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
689 struct sg_table *sgt;
690
691 if (shmem->sgt)
692 return shmem->sgt;
693
694 WARN_ON(obj->import_attach);
695
696 ret = drm_gem_shmem_get_pages(shmem);
697 if (ret)
698 return ERR_PTR(ret);
699
700 sgt = drm_gem_shmem_get_sg_table(&shmem->base);
701 if (IS_ERR(sgt)) {
702 ret = PTR_ERR(sgt);
703 goto err_put_pages;
704 }
705 /* Map the pages for use by the h/w. */
6c6fa39c
MS
706 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
707 if (ret)
708 goto err_free_sgt;
2194a63a
NT
709
710 shmem->sgt = sgt;
711
712 return sgt;
713
6c6fa39c
MS
714err_free_sgt:
715 sg_free_table(sgt);
716 kfree(sgt);
2194a63a
NT
717err_put_pages:
718 drm_gem_shmem_put_pages(shmem);
719 return ERR_PTR(ret);
720}
721EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
722
723/**
724 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
725 * another driver's scatter/gather table of pinned pages
726 * @dev: Device to import into
727 * @attach: DMA-BUF attachment
728 * @sgt: Scatter/gather table of pinned pages
729 *
730 * This function imports a scatter/gather table exported via DMA-BUF by
731 * another driver. Drivers that use the shmem helpers should set this as their
732 * &drm_driver.gem_prime_import_sg_table callback.
733 *
734 * Returns:
735 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
736 * error code on failure.
737 */
738struct drm_gem_object *
739drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
740 struct dma_buf_attachment *attach,
741 struct sg_table *sgt)
742{
743 size_t size = PAGE_ALIGN(attach->dmabuf->size);
2194a63a 744 struct drm_gem_shmem_object *shmem;
2194a63a 745
cfe28f90 746 shmem = __drm_gem_shmem_create(dev, size, true);
2194a63a
NT
747 if (IS_ERR(shmem))
748 return ERR_CAST(shmem);
749
2194a63a 750 shmem->sgt = sgt;
2194a63a
NT
751
752 DRM_DEBUG_PRIME("size = %zu\n", size);
753
754 return &shmem->base;
2194a63a
NT
755}
756EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);