drm: Add library for shmem backed GEM objects
[linux-block.git] / drivers / gpu / drm / drm_gem_shmem_helper.c
CommitLineData
2194a63a
NT
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2018 Noralf Trønnes
4 */
5
6#include <linux/dma-buf.h>
7#include <linux/export.h>
8#include <linux/mutex.h>
9#include <linux/shmem_fs.h>
10#include <linux/slab.h>
11#include <linux/vmalloc.h>
12
13#include <drm/drm_device.h>
14#include <drm/drm_drv.h>
15#include <drm/drm_gem_shmem_helper.h>
16#include <drm/drm_prime.h>
17#include <drm/drm_print.h>
18
19/**
20 * DOC: overview
21 *
22 * This library provides helpers for GEM objects backed by shmem buffers
23 * allocated using anonymous pageable memory.
24 */
25
26static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
27 .free = drm_gem_shmem_free_object,
28 .print_info = drm_gem_shmem_print_info,
29 .pin = drm_gem_shmem_pin,
30 .unpin = drm_gem_shmem_unpin,
31 .get_sg_table = drm_gem_shmem_get_sg_table,
32 .vmap = drm_gem_shmem_vmap,
33 .vunmap = drm_gem_shmem_vunmap,
34 .vm_ops = &drm_gem_shmem_vm_ops,
35};
36
37/**
38 * drm_gem_shmem_create - Allocate an object with the given size
39 * @dev: DRM device
40 * @size: Size of the object to allocate
41 *
42 * This function creates a shmem GEM object.
43 *
44 * Returns:
45 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
46 * error code on failure.
47 */
48struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
49{
50 struct drm_gem_shmem_object *shmem;
51 struct drm_gem_object *obj;
52 int ret;
53
54 size = PAGE_ALIGN(size);
55
56 if (dev->driver->gem_create_object)
57 obj = dev->driver->gem_create_object(dev, size);
58 else
59 obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
60 if (!obj)
61 return ERR_PTR(-ENOMEM);
62
63 if (!obj->funcs)
64 obj->funcs = &drm_gem_shmem_funcs;
65
66 ret = drm_gem_object_init(dev, obj, size);
67 if (ret)
68 goto err_free;
69
70 ret = drm_gem_create_mmap_offset(obj);
71 if (ret)
72 goto err_release;
73
74 shmem = to_drm_gem_shmem_obj(obj);
75 mutex_init(&shmem->pages_lock);
76 mutex_init(&shmem->vmap_lock);
77
78 /*
79 * Our buffers are kept pinned, so allocating them
80 * from the MOVABLE zone is a really bad idea, and
81 * conflicts with CMA. See comments above new_inode()
82 * why this is required _and_ expected if you're
83 * going to pin these pages.
84 */
85 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
86 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
87
88 return shmem;
89
90err_release:
91 drm_gem_object_release(obj);
92err_free:
93 kfree(obj);
94
95 return ERR_PTR(ret);
96}
97EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
98
99/**
100 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
101 * @obj: GEM object to free
102 *
103 * This function cleans up the GEM object state and frees the memory used to
104 * store the object itself.
105 */
106void drm_gem_shmem_free_object(struct drm_gem_object *obj)
107{
108 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
109
110 WARN_ON(shmem->vmap_use_count);
111
112 if (obj->import_attach) {
113 shmem->pages_use_count--;
114 drm_prime_gem_destroy(obj, shmem->sgt);
115 kvfree(shmem->pages);
116 } else {
117 if (shmem->sgt) {
118 dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
119 shmem->sgt->nents, DMA_BIDIRECTIONAL);
120
121 drm_gem_shmem_put_pages(shmem);
122 sg_free_table(shmem->sgt);
123 kfree(shmem->sgt);
124 }
125 }
126
127 WARN_ON(shmem->pages_use_count);
128
129 drm_gem_object_release(obj);
130 mutex_destroy(&shmem->pages_lock);
131 mutex_destroy(&shmem->vmap_lock);
132 kfree(shmem);
133}
134EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
135
136static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
137{
138 struct drm_gem_object *obj = &shmem->base;
139 struct page **pages;
140
141 if (shmem->pages_use_count++ > 0)
142 return 0;
143
144 pages = drm_gem_get_pages(obj);
145 if (IS_ERR(pages)) {
146 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
147 shmem->pages_use_count = 0;
148 return PTR_ERR(pages);
149 }
150
151 shmem->pages = pages;
152
153 return 0;
154}
155
156/*
157 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
158 * @shmem: shmem GEM object
159 *
160 * This function makes sure that backing pages exists for the shmem GEM object
161 * and increases the use count.
162 *
163 * Returns:
164 * 0 on success or a negative error code on failure.
165 */
166int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
167{
168 int ret;
169
170 ret = mutex_lock_interruptible(&shmem->pages_lock);
171 if (ret)
172 return ret;
173 ret = drm_gem_shmem_get_pages_locked(shmem);
174 mutex_unlock(&shmem->pages_lock);
175
176 return ret;
177}
178EXPORT_SYMBOL(drm_gem_shmem_get_pages);
179
180static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
181{
182 struct drm_gem_object *obj = &shmem->base;
183
184 if (WARN_ON_ONCE(!shmem->pages_use_count))
185 return;
186
187 if (--shmem->pages_use_count > 0)
188 return;
189
190 drm_gem_put_pages(obj, shmem->pages,
191 shmem->pages_mark_dirty_on_put,
192 shmem->pages_mark_accessed_on_put);
193 shmem->pages = NULL;
194}
195
196/*
197 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
198 * @shmem: shmem GEM object
199 *
200 * This function decreases the use count and puts the backing pages when use drops to zero.
201 */
202void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
203{
204 mutex_lock(&shmem->pages_lock);
205 drm_gem_shmem_put_pages_locked(shmem);
206 mutex_unlock(&shmem->pages_lock);
207}
208EXPORT_SYMBOL(drm_gem_shmem_put_pages);
209
210/**
211 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
212 * @obj: GEM object
213 *
214 * This function makes sure the backing pages are pinned in memory while the
215 * buffer is exported.
216 *
217 * Returns:
218 * 0 on success or a negative error code on failure.
219 */
220int drm_gem_shmem_pin(struct drm_gem_object *obj)
221{
222 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
223
224 return drm_gem_shmem_get_pages(shmem);
225}
226EXPORT_SYMBOL(drm_gem_shmem_pin);
227
228/**
229 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
230 * @obj: GEM object
231 *
232 * This function removes the requirement that the backing pages are pinned in
233 * memory.
234 */
235void drm_gem_shmem_unpin(struct drm_gem_object *obj)
236{
237 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
238
239 drm_gem_shmem_put_pages(shmem);
240}
241EXPORT_SYMBOL(drm_gem_shmem_unpin);
242
243static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
244{
245 struct drm_gem_object *obj = &shmem->base;
246 int ret;
247
248 if (shmem->vmap_use_count++ > 0)
249 return shmem->vaddr;
250
251 ret = drm_gem_shmem_get_pages(shmem);
252 if (ret)
253 goto err_zero_use;
254
255 if (obj->import_attach)
256 shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
257 else
258 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, PAGE_KERNEL);
259
260 if (!shmem->vaddr) {
261 DRM_DEBUG_KMS("Failed to vmap pages\n");
262 ret = -ENOMEM;
263 goto err_put_pages;
264 }
265
266 return shmem->vaddr;
267
268err_put_pages:
269 drm_gem_shmem_put_pages(shmem);
270err_zero_use:
271 shmem->vmap_use_count = 0;
272
273 return ERR_PTR(ret);
274}
275
276/*
277 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
278 * @shmem: shmem GEM object
279 *
280 * This function makes sure that a virtual address exists for the buffer backing
281 * the shmem GEM object.
282 *
283 * Returns:
284 * 0 on success or a negative error code on failure.
285 */
286void *drm_gem_shmem_vmap(struct drm_gem_object *obj)
287{
288 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
289 void *vaddr;
290 int ret;
291
292 ret = mutex_lock_interruptible(&shmem->vmap_lock);
293 if (ret)
294 return ERR_PTR(ret);
295 vaddr = drm_gem_shmem_vmap_locked(shmem);
296 mutex_unlock(&shmem->vmap_lock);
297
298 return vaddr;
299}
300EXPORT_SYMBOL(drm_gem_shmem_vmap);
301
302static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
303{
304 struct drm_gem_object *obj = &shmem->base;
305
306 if (WARN_ON_ONCE(!shmem->vmap_use_count))
307 return;
308
309 if (--shmem->vmap_use_count > 0)
310 return;
311
312 if (obj->import_attach)
313 dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
314 else
315 vunmap(shmem->vaddr);
316
317 shmem->vaddr = NULL;
318 drm_gem_shmem_put_pages(shmem);
319}
320
321/*
322 * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
323 * @shmem: shmem GEM object
324 *
325 * This function removes the virtual address when use count drops to zero.
326 */
327void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr)
328{
329 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
330
331 mutex_lock(&shmem->vmap_lock);
332 drm_gem_shmem_vunmap_locked(shmem);
333 mutex_unlock(&shmem->vmap_lock);
334}
335EXPORT_SYMBOL(drm_gem_shmem_vunmap);
336
337struct drm_gem_shmem_object *
338drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
339 struct drm_device *dev, size_t size,
340 uint32_t *handle)
341{
342 struct drm_gem_shmem_object *shmem;
343 int ret;
344
345 shmem = drm_gem_shmem_create(dev, size);
346 if (IS_ERR(shmem))
347 return shmem;
348
349 /*
350 * Allocate an id of idr table where the obj is registered
351 * and handle has the id what user can see.
352 */
353 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
354 /* drop reference from allocate - handle holds it now. */
355 drm_gem_object_put_unlocked(&shmem->base);
356 if (ret)
357 return ERR_PTR(ret);
358
359 return shmem;
360}
361EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
362
363/**
364 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
365 * @file: DRM file structure to create the dumb buffer for
366 * @dev: DRM device
367 * @args: IOCTL data
368 *
369 * This function computes the pitch of the dumb buffer and rounds it up to an
370 * integer number of bytes per pixel. Drivers for hardware that doesn't have
371 * any additional restrictions on the pitch can directly use this function as
372 * their &drm_driver.dumb_create callback.
373 *
374 * For hardware with additional restrictions, drivers can adjust the fields
375 * set up by userspace before calling into this function.
376 *
377 * Returns:
378 * 0 on success or a negative error code on failure.
379 */
380int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
381 struct drm_mode_create_dumb *args)
382{
383 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
384 struct drm_gem_shmem_object *shmem;
385
386 if (!args->pitch || !args->size) {
387 args->pitch = min_pitch;
388 args->size = args->pitch * args->height;
389 } else {
390 /* ensure sane minimum values */
391 if (args->pitch < min_pitch)
392 args->pitch = min_pitch;
393 if (args->size < args->pitch * args->height)
394 args->size = args->pitch * args->height;
395 }
396
397 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
398
399 return PTR_ERR_OR_ZERO(shmem);
400}
401EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
402
403static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
404{
405 struct vm_area_struct *vma = vmf->vma;
406 struct drm_gem_object *obj = vma->vm_private_data;
407 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
408 loff_t num_pages = obj->size >> PAGE_SHIFT;
409 struct page *page;
410
411 if (vmf->pgoff > num_pages || WARN_ON_ONCE(!shmem->pages))
412 return VM_FAULT_SIGBUS;
413
414 page = shmem->pages[vmf->pgoff];
415
416 return vmf_insert_page(vma, vmf->address, page);
417}
418
419static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
420{
421 struct drm_gem_object *obj = vma->vm_private_data;
422 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
423 int ret;
424
425 ret = drm_gem_shmem_get_pages(shmem);
426 WARN_ON_ONCE(ret != 0);
427
428 drm_gem_vm_open(vma);
429}
430
431static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
432{
433 struct drm_gem_object *obj = vma->vm_private_data;
434 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
435
436 drm_gem_shmem_put_pages(shmem);
437 drm_gem_vm_close(vma);
438}
439
440const struct vm_operations_struct drm_gem_shmem_vm_ops = {
441 .fault = drm_gem_shmem_fault,
442 .open = drm_gem_shmem_vm_open,
443 .close = drm_gem_shmem_vm_close,
444};
445EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
446
447/**
448 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
449 * @filp: File object
450 * @vma: VMA for the area to be mapped
451 *
452 * This function implements an augmented version of the GEM DRM file mmap
453 * operation for shmem objects. Drivers which employ the shmem helpers should
454 * use this function as their &file_operations.mmap handler in the DRM device file's
455 * file_operations structure.
456 *
457 * Instead of directly referencing this function, drivers should use the
458 * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
459 *
460 * Returns:
461 * 0 on success or a negative error code on failure.
462 */
463int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
464{
465 struct drm_gem_shmem_object *shmem;
466 int ret;
467
468 ret = drm_gem_mmap(filp, vma);
469 if (ret)
470 return ret;
471
472 shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
473
474 ret = drm_gem_shmem_get_pages(shmem);
475 if (ret) {
476 drm_gem_vm_close(vma);
477 return ret;
478 }
479
480 /* VM_PFNMAP was set by drm_gem_mmap() */
481 vma->vm_flags &= ~VM_PFNMAP;
482 vma->vm_flags |= VM_MIXEDMAP;
483
484 /* Remove the fake offset */
485 vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
486
487 return 0;
488}
489EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
490
491/**
492 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
493 * @p: DRM printer
494 * @indent: Tab indentation level
495 * @obj: GEM object
496 */
497void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
498 const struct drm_gem_object *obj)
499{
500 const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
501
502 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
503 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
504 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
505}
506EXPORT_SYMBOL(drm_gem_shmem_print_info);
507
508/**
509 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
510 * pages for a shmem GEM object
511 * @obj: GEM object
512 *
513 * This function exports a scatter/gather table suitable for PRIME usage by
514 * calling the standard DMA mapping API.
515 *
516 * Returns:
517 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
518 */
519struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
520{
521 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
522
523 return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
524}
525EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
526
527/**
528 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
529 * scatter/gather table for a shmem GEM object.
530 * @obj: GEM object
531 *
532 * This function returns a scatter/gather table suitable for driver usage. If
533 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
534 * table created.
535 *
536 * Returns:
537 * A pointer to the scatter/gather table of pinned pages or errno on failure.
538 */
539struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
540{
541 int ret;
542 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
543 struct sg_table *sgt;
544
545 if (shmem->sgt)
546 return shmem->sgt;
547
548 WARN_ON(obj->import_attach);
549
550 ret = drm_gem_shmem_get_pages(shmem);
551 if (ret)
552 return ERR_PTR(ret);
553
554 sgt = drm_gem_shmem_get_sg_table(&shmem->base);
555 if (IS_ERR(sgt)) {
556 ret = PTR_ERR(sgt);
557 goto err_put_pages;
558 }
559 /* Map the pages for use by the h/w. */
560 dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
561
562 shmem->sgt = sgt;
563
564 return sgt;
565
566err_put_pages:
567 drm_gem_shmem_put_pages(shmem);
568 return ERR_PTR(ret);
569}
570EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
571
572/**
573 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
574 * another driver's scatter/gather table of pinned pages
575 * @dev: Device to import into
576 * @attach: DMA-BUF attachment
577 * @sgt: Scatter/gather table of pinned pages
578 *
579 * This function imports a scatter/gather table exported via DMA-BUF by
580 * another driver. Drivers that use the shmem helpers should set this as their
581 * &drm_driver.gem_prime_import_sg_table callback.
582 *
583 * Returns:
584 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
585 * error code on failure.
586 */
587struct drm_gem_object *
588drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
589 struct dma_buf_attachment *attach,
590 struct sg_table *sgt)
591{
592 size_t size = PAGE_ALIGN(attach->dmabuf->size);
593 size_t npages = size >> PAGE_SHIFT;
594 struct drm_gem_shmem_object *shmem;
595 int ret;
596
597 shmem = drm_gem_shmem_create(dev, size);
598 if (IS_ERR(shmem))
599 return ERR_CAST(shmem);
600
601 shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
602 if (!shmem->pages) {
603 ret = -ENOMEM;
604 goto err_free_gem;
605 }
606
607 ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
608 if (ret < 0)
609 goto err_free_array;
610
611 shmem->sgt = sgt;
612 shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
613
614 DRM_DEBUG_PRIME("size = %zu\n", size);
615
616 return &shmem->base;
617
618err_free_array:
619 kvfree(shmem->pages);
620err_free_gem:
621 drm_gem_object_put_unlocked(&shmem->base);
622
623 return ERR_PTR(ret);
624}
625EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);