1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2018 Noralf Trønnes
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 #include <linux/module.h>
16 #include <asm/set_memory.h>
20 #include <drm/drm_device.h>
21 #include <drm/drm_drv.h>
22 #include <drm/drm_gem_shmem_helper.h>
23 #include <drm/drm_prime.h>
24 #include <drm/drm_print.h>
26 MODULE_IMPORT_NS(DMA_BUF);
31 * This library provides helpers for GEM objects backed by shmem buffers
32 * allocated using anonymous pageable memory.
34 * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
35 * For GEM callback helpers in struct &drm_gem_object functions, see likewise
36 * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
37 * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
40 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
41 .free = drm_gem_shmem_object_free,
42 .print_info = drm_gem_shmem_object_print_info,
43 .pin = drm_gem_shmem_object_pin,
44 .unpin = drm_gem_shmem_object_unpin,
45 .get_sg_table = drm_gem_shmem_object_get_sg_table,
46 .vmap = drm_gem_shmem_object_vmap,
47 .vunmap = drm_gem_shmem_object_vunmap,
48 .mmap = drm_gem_shmem_object_mmap,
49 .vm_ops = &drm_gem_shmem_vm_ops,
52 static struct drm_gem_shmem_object *
53 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
55 struct drm_gem_shmem_object *shmem;
56 struct drm_gem_object *obj;
59 size = PAGE_ALIGN(size);
61 if (dev->driver->gem_create_object) {
62 obj = dev->driver->gem_create_object(dev, size);
65 shmem = to_drm_gem_shmem_obj(obj);
67 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
69 return ERR_PTR(-ENOMEM);
74 obj->funcs = &drm_gem_shmem_funcs;
77 drm_gem_private_object_init(dev, obj, size);
78 shmem->map_wc = false; /* dma-buf mappings use always writecombine */
80 ret = drm_gem_object_init(dev, obj, size);
83 drm_gem_private_object_fini(obj);
87 ret = drm_gem_create_mmap_offset(obj);
91 mutex_init(&shmem->pages_lock);
92 mutex_init(&shmem->vmap_lock);
93 INIT_LIST_HEAD(&shmem->madv_list);
97 * Our buffers are kept pinned, so allocating them
98 * from the MOVABLE zone is a really bad idea, and
99 * conflicts with CMA. See comments above new_inode()
100 * why this is required _and_ expected if you're
101 * going to pin these pages.
103 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
104 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
110 drm_gem_object_release(obj);
117 * drm_gem_shmem_create - Allocate an object with the given size
119 * @size: Size of the object to allocate
121 * This function creates a shmem GEM object.
124 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
125 * error code on failure.
127 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
129 return __drm_gem_shmem_create(dev, size, false);
131 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
134 * drm_gem_shmem_free - Free resources associated with a shmem GEM object
135 * @shmem: shmem GEM object to free
137 * This function cleans up the GEM object state and frees the memory used to
138 * store the object itself.
140 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
142 struct drm_gem_object *obj = &shmem->base;
144 drm_WARN_ON(obj->dev, shmem->vmap_use_count);
146 if (obj->import_attach) {
147 drm_prime_gem_destroy(obj, shmem->sgt);
150 dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
151 DMA_BIDIRECTIONAL, 0);
152 sg_free_table(shmem->sgt);
156 drm_gem_shmem_put_pages(shmem);
159 drm_WARN_ON(obj->dev, shmem->pages_use_count);
161 drm_gem_object_release(obj);
162 mutex_destroy(&shmem->pages_lock);
163 mutex_destroy(&shmem->vmap_lock);
166 EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
168 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
170 struct drm_gem_object *obj = &shmem->base;
173 if (shmem->pages_use_count++ > 0)
176 pages = drm_gem_get_pages(obj);
178 drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
180 shmem->pages_use_count = 0;
181 return PTR_ERR(pages);
185 * TODO: Allocating WC pages which are correctly flushed is only
186 * supported on x86. Ideal solution would be a GFP_WC flag, which also
187 * ttm_pool.c could use.
191 set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
194 shmem->pages = pages;
200 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
201 * @shmem: shmem GEM object
203 * This function makes sure that backing pages exists for the shmem GEM object
204 * and increases the use count.
207 * 0 on success or a negative error code on failure.
209 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
211 struct drm_gem_object *obj = &shmem->base;
214 drm_WARN_ON(obj->dev, obj->import_attach);
216 ret = mutex_lock_interruptible(&shmem->pages_lock);
219 ret = drm_gem_shmem_get_pages_locked(shmem);
220 mutex_unlock(&shmem->pages_lock);
224 EXPORT_SYMBOL(drm_gem_shmem_get_pages);
226 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
228 struct drm_gem_object *obj = &shmem->base;
230 if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
233 if (--shmem->pages_use_count > 0)
238 set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
241 drm_gem_put_pages(obj, shmem->pages,
242 shmem->pages_mark_dirty_on_put,
243 shmem->pages_mark_accessed_on_put);
248 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
249 * @shmem: shmem GEM object
251 * This function decreases the use count and puts the backing pages when use drops to zero.
253 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
255 mutex_lock(&shmem->pages_lock);
256 drm_gem_shmem_put_pages_locked(shmem);
257 mutex_unlock(&shmem->pages_lock);
259 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
262 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
263 * @shmem: shmem GEM object
265 * This function makes sure the backing pages are pinned in memory while the
266 * buffer is exported.
269 * 0 on success or a negative error code on failure.
271 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
273 struct drm_gem_object *obj = &shmem->base;
275 drm_WARN_ON(obj->dev, obj->import_attach);
277 return drm_gem_shmem_get_pages(shmem);
279 EXPORT_SYMBOL(drm_gem_shmem_pin);
282 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
283 * @shmem: shmem GEM object
285 * This function removes the requirement that the backing pages are pinned in
288 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
290 struct drm_gem_object *obj = &shmem->base;
292 drm_WARN_ON(obj->dev, obj->import_attach);
294 drm_gem_shmem_put_pages(shmem);
296 EXPORT_SYMBOL(drm_gem_shmem_unpin);
298 static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
299 struct iosys_map *map)
301 struct drm_gem_object *obj = &shmem->base;
304 if (obj->import_attach) {
305 ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
307 if (drm_WARN_ON(obj->dev, map->is_iomem)) {
308 dma_buf_vunmap(obj->import_attach->dmabuf, map);
313 pgprot_t prot = PAGE_KERNEL;
315 if (shmem->vmap_use_count++ > 0) {
316 iosys_map_set_vaddr(map, shmem->vaddr);
320 ret = drm_gem_shmem_get_pages(shmem);
325 prot = pgprot_writecombine(prot);
326 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
331 iosys_map_set_vaddr(map, shmem->vaddr);
335 drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
342 if (!obj->import_attach)
343 drm_gem_shmem_put_pages(shmem);
345 shmem->vmap_use_count = 0;
351 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
352 * @shmem: shmem GEM object
353 * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
356 * This function makes sure that a contiguous kernel virtual address mapping
357 * exists for the buffer backing the shmem GEM object. It hides the differences
358 * between dma-buf imported and natively allocated objects.
360 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
363 * 0 on success or a negative error code on failure.
365 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
366 struct iosys_map *map)
370 ret = mutex_lock_interruptible(&shmem->vmap_lock);
373 ret = drm_gem_shmem_vmap_locked(shmem, map);
374 mutex_unlock(&shmem->vmap_lock);
378 EXPORT_SYMBOL(drm_gem_shmem_vmap);
380 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
381 struct iosys_map *map)
383 struct drm_gem_object *obj = &shmem->base;
385 if (obj->import_attach) {
386 dma_buf_vunmap(obj->import_attach->dmabuf, map);
388 if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
391 if (--shmem->vmap_use_count > 0)
394 vunmap(shmem->vaddr);
395 drm_gem_shmem_put_pages(shmem);
402 * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
403 * @shmem: shmem GEM object
404 * @map: Kernel virtual address where the SHMEM GEM object was mapped
406 * This function cleans up a kernel virtual address mapping acquired by
407 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
410 * This function hides the differences between dma-buf imported and natively
413 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
414 struct iosys_map *map)
416 mutex_lock(&shmem->vmap_lock);
417 drm_gem_shmem_vunmap_locked(shmem, map);
418 mutex_unlock(&shmem->vmap_lock);
420 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
423 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
424 struct drm_device *dev, size_t size,
427 struct drm_gem_shmem_object *shmem;
430 shmem = drm_gem_shmem_create(dev, size);
432 return PTR_ERR(shmem);
435 * Allocate an id of idr table where the obj is registered
436 * and handle has the id what user can see.
438 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
439 /* drop reference from allocate - handle holds it now. */
440 drm_gem_object_put(&shmem->base);
445 /* Update madvise status, returns true if not purged, else
448 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
450 mutex_lock(&shmem->pages_lock);
452 if (shmem->madv >= 0)
457 mutex_unlock(&shmem->pages_lock);
461 EXPORT_SYMBOL(drm_gem_shmem_madvise);
463 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
465 struct drm_gem_object *obj = &shmem->base;
466 struct drm_device *dev = obj->dev;
468 drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
470 dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
471 sg_free_table(shmem->sgt);
475 drm_gem_shmem_put_pages_locked(shmem);
479 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
480 drm_gem_free_mmap_offset(obj);
482 /* Our goal here is to return as much of the memory as
483 * is possible back to the system as we are called from OOM.
484 * To do this we must instruct the shmfs to drop all of its
485 * backing pages, *now*.
487 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
489 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
491 EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
493 bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
495 if (!mutex_trylock(&shmem->pages_lock))
497 drm_gem_shmem_purge_locked(shmem);
498 mutex_unlock(&shmem->pages_lock);
502 EXPORT_SYMBOL(drm_gem_shmem_purge);
505 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
506 * @file: DRM file structure to create the dumb buffer for
510 * This function computes the pitch of the dumb buffer and rounds it up to an
511 * integer number of bytes per pixel. Drivers for hardware that doesn't have
512 * any additional restrictions on the pitch can directly use this function as
513 * their &drm_driver.dumb_create callback.
515 * For hardware with additional restrictions, drivers can adjust the fields
516 * set up by userspace before calling into this function.
519 * 0 on success or a negative error code on failure.
521 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
522 struct drm_mode_create_dumb *args)
524 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
526 if (!args->pitch || !args->size) {
527 args->pitch = min_pitch;
528 args->size = PAGE_ALIGN(args->pitch * args->height);
530 /* ensure sane minimum values */
531 if (args->pitch < min_pitch)
532 args->pitch = min_pitch;
533 if (args->size < args->pitch * args->height)
534 args->size = PAGE_ALIGN(args->pitch * args->height);
537 return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
539 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
541 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
543 struct vm_area_struct *vma = vmf->vma;
544 struct drm_gem_object *obj = vma->vm_private_data;
545 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
546 loff_t num_pages = obj->size >> PAGE_SHIFT;
551 /* We don't use vmf->pgoff since that has the fake offset */
552 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
554 mutex_lock(&shmem->pages_lock);
556 if (page_offset >= num_pages ||
557 drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
559 ret = VM_FAULT_SIGBUS;
561 page = shmem->pages[page_offset];
563 ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
566 mutex_unlock(&shmem->pages_lock);
571 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
573 struct drm_gem_object *obj = vma->vm_private_data;
574 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
576 drm_WARN_ON(obj->dev, obj->import_attach);
578 mutex_lock(&shmem->pages_lock);
581 * We should have already pinned the pages when the buffer was first
582 * mmap'd, vm_open() just grabs an additional reference for the new
583 * mm the vma is getting copied into (ie. on fork()).
585 if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
586 shmem->pages_use_count++;
588 mutex_unlock(&shmem->pages_lock);
590 drm_gem_vm_open(vma);
593 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
595 struct drm_gem_object *obj = vma->vm_private_data;
596 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
598 drm_gem_shmem_put_pages(shmem);
599 drm_gem_vm_close(vma);
602 const struct vm_operations_struct drm_gem_shmem_vm_ops = {
603 .fault = drm_gem_shmem_fault,
604 .open = drm_gem_shmem_vm_open,
605 .close = drm_gem_shmem_vm_close,
607 EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
610 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
611 * @shmem: shmem GEM object
612 * @vma: VMA for the area to be mapped
614 * This function implements an augmented version of the GEM DRM file mmap
615 * operation for shmem objects.
618 * 0 on success or a negative error code on failure.
620 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
622 struct drm_gem_object *obj = &shmem->base;
625 if (obj->import_attach) {
626 /* Drop the reference drm_gem_mmap_obj() acquired.*/
627 drm_gem_object_put(obj);
628 vma->vm_private_data = NULL;
630 return dma_buf_mmap(obj->dma_buf, vma, 0);
633 ret = drm_gem_shmem_get_pages(shmem);
637 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
638 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
640 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
644 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
647 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
648 * @shmem: shmem GEM object
650 * @indent: Tab indentation level
652 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
653 struct drm_printer *p, unsigned int indent)
655 if (shmem->base.import_attach)
658 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
659 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
660 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
662 EXPORT_SYMBOL(drm_gem_shmem_print_info);
665 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
666 * pages for a shmem GEM object
667 * @shmem: shmem GEM object
669 * This function exports a scatter/gather table suitable for PRIME usage by
670 * calling the standard DMA mapping API.
672 * Drivers who need to acquire an scatter/gather table for objects need to call
673 * drm_gem_shmem_get_pages_sgt() instead.
676 * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
678 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
680 struct drm_gem_object *obj = &shmem->base;
682 drm_WARN_ON(obj->dev, obj->import_attach);
684 return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
686 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
688 static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
690 struct drm_gem_object *obj = &shmem->base;
692 struct sg_table *sgt;
697 drm_WARN_ON(obj->dev, obj->import_attach);
699 ret = drm_gem_shmem_get_pages_locked(shmem);
703 sgt = drm_gem_shmem_get_sg_table(shmem);
708 /* Map the pages for use by the h/w. */
709 ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
721 drm_gem_shmem_put_pages_locked(shmem);
726 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
727 * scatter/gather table for a shmem GEM object.
728 * @shmem: shmem GEM object
730 * This function returns a scatter/gather table suitable for driver usage. If
731 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
734 * This is the main function for drivers to get at backing storage, and it hides
735 * and difference between dma-buf imported and natively allocated objects.
736 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
739 * A pointer to the scatter/gather table of pinned pages or errno on failure.
741 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
744 struct sg_table *sgt;
746 ret = mutex_lock_interruptible(&shmem->pages_lock);
749 sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
750 mutex_unlock(&shmem->pages_lock);
754 EXPORT_SYMBOL(drm_gem_shmem_get_pages_sgt);
757 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
758 * another driver's scatter/gather table of pinned pages
759 * @dev: Device to import into
760 * @attach: DMA-BUF attachment
761 * @sgt: Scatter/gather table of pinned pages
763 * This function imports a scatter/gather table exported via DMA-BUF by
764 * another driver. Drivers that use the shmem helpers should set this as their
765 * &drm_driver.gem_prime_import_sg_table callback.
768 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
769 * error code on failure.
771 struct drm_gem_object *
772 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
773 struct dma_buf_attachment *attach,
774 struct sg_table *sgt)
776 size_t size = PAGE_ALIGN(attach->dmabuf->size);
777 struct drm_gem_shmem_object *shmem;
779 shmem = __drm_gem_shmem_create(dev, size, true);
781 return ERR_CAST(shmem);
785 drm_dbg_prime(dev, "size = %zu\n", size);
789 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
791 MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
792 MODULE_IMPORT_NS(DMA_BUF);
793 MODULE_LICENSE("GPL v2");