3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
13 #include <drm/drm_vma_manager.h>
15 #include <linux/shmem_fs.h>
16 #include <linux/dma-buf.h>
17 #include <linux/pfn_t.h>
18 #include <drm/exynos_drm.h>
20 #include "exynos_drm_drv.h"
21 #include "exynos_drm_gem.h"
23 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
25 struct drm_device *dev = exynos_gem->base.dev;
27 unsigned int nr_pages;
31 if (exynos_gem->dma_addr) {
32 DRM_DEBUG_KMS("already allocated.\n");
36 exynos_gem->dma_attrs = 0;
39 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
40 * region will be allocated else physically contiguous
43 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
44 exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
47 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
48 * else cachable mapping.
50 if (exynos_gem->flags & EXYNOS_BO_WC ||
51 !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
52 attr = DMA_ATTR_WRITE_COMBINE;
54 attr = DMA_ATTR_NON_CONSISTENT;
56 exynos_gem->dma_attrs |= attr;
57 exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
59 nr_pages = exynos_gem->size >> PAGE_SHIFT;
61 exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
62 GFP_KERNEL | __GFP_ZERO);
63 if (!exynos_gem->pages) {
64 DRM_ERROR("failed to allocate pages.\n");
68 exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
69 &exynos_gem->dma_addr, GFP_KERNEL,
70 exynos_gem->dma_attrs);
71 if (!exynos_gem->cookie) {
72 DRM_ERROR("failed to allocate buffer.\n");
76 ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
77 exynos_gem->dma_addr, exynos_gem->size,
78 exynos_gem->dma_attrs);
80 DRM_ERROR("failed to get sgtable.\n");
84 if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
86 DRM_ERROR("invalid sgtable.\n");
93 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
94 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
101 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
102 exynos_gem->dma_addr, exynos_gem->dma_attrs);
104 kvfree(exynos_gem->pages);
109 static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
111 struct drm_device *dev = exynos_gem->base.dev;
113 if (!exynos_gem->dma_addr) {
114 DRM_DEBUG_KMS("dma_addr is invalid.\n");
118 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
119 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
121 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
122 (dma_addr_t)exynos_gem->dma_addr,
123 exynos_gem->dma_attrs);
125 kvfree(exynos_gem->pages);
128 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
129 struct drm_file *file_priv,
130 unsigned int *handle)
135 * allocate a id of idr table where the obj is registered
136 * and handle has the id what user can see.
138 ret = drm_gem_handle_create(file_priv, obj, handle);
142 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
144 /* drop reference from allocate - handle holds it now. */
145 drm_gem_object_put_unlocked(obj);
150 void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
152 struct drm_gem_object *obj = &exynos_gem->base;
154 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
157 * do not release memory region from exporter.
159 * the region will be released by exporter
160 * once dmabuf's refcount becomes 0.
162 if (obj->import_attach)
163 drm_prime_gem_destroy(obj, exynos_gem->sgt);
165 exynos_drm_free_buf(exynos_gem);
167 /* release file pointer to gem object. */
168 drm_gem_object_release(obj);
173 static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
176 struct exynos_drm_gem *exynos_gem;
177 struct drm_gem_object *obj;
180 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
182 return ERR_PTR(-ENOMEM);
184 exynos_gem->size = size;
185 obj = &exynos_gem->base;
187 ret = drm_gem_object_init(dev, obj, size);
189 DRM_ERROR("failed to initialize gem object\n");
194 ret = drm_gem_create_mmap_offset(obj);
196 drm_gem_object_release(obj);
201 DRM_DEBUG_KMS("created file object = %pK\n", obj->filp);
206 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
210 struct exynos_drm_gem *exynos_gem;
213 if (flags & ~(EXYNOS_BO_MASK)) {
214 DRM_ERROR("invalid GEM buffer flags: %u\n", flags);
215 return ERR_PTR(-EINVAL);
219 DRM_ERROR("invalid GEM buffer size: %lu\n", size);
220 return ERR_PTR(-EINVAL);
223 size = roundup(size, PAGE_SIZE);
225 exynos_gem = exynos_drm_gem_init(dev, size);
226 if (IS_ERR(exynos_gem))
229 if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
231 * when no IOMMU is available, all allocated buffers are
232 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
234 flags &= ~EXYNOS_BO_NONCONTIG;
235 DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
238 /* set memory type and cache attribute from user side. */
239 exynos_gem->flags = flags;
241 ret = exynos_drm_alloc_buf(exynos_gem);
243 drm_gem_object_release(&exynos_gem->base);
251 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
252 struct drm_file *file_priv)
254 struct drm_exynos_gem_create *args = data;
255 struct exynos_drm_gem *exynos_gem;
258 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
259 if (IS_ERR(exynos_gem))
260 return PTR_ERR(exynos_gem);
262 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
265 exynos_drm_gem_destroy(exynos_gem);
272 int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
273 struct drm_file *file_priv)
275 struct drm_exynos_gem_map *args = data;
277 return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
281 struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
282 unsigned int gem_handle)
284 struct drm_gem_object *obj;
286 obj = drm_gem_object_lookup(filp, gem_handle);
289 return to_exynos_gem(obj);
292 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
293 struct vm_area_struct *vma)
295 struct drm_device *drm_dev = exynos_gem->base.dev;
296 unsigned long vm_size;
299 vma->vm_flags &= ~VM_PFNMAP;
302 vm_size = vma->vm_end - vma->vm_start;
304 /* check if user-requested size is valid. */
305 if (vm_size > exynos_gem->size)
308 ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
309 exynos_gem->dma_addr, exynos_gem->size,
310 exynos_gem->dma_attrs);
312 DRM_ERROR("failed to mmap.\n");
319 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
320 struct drm_file *file_priv)
322 struct exynos_drm_gem *exynos_gem;
323 struct drm_exynos_gem_info *args = data;
324 struct drm_gem_object *obj;
326 obj = drm_gem_object_lookup(file_priv, args->handle);
328 DRM_ERROR("failed to lookup gem object.\n");
332 exynos_gem = to_exynos_gem(obj);
334 args->flags = exynos_gem->flags;
335 args->size = exynos_gem->size;
337 drm_gem_object_put_unlocked(obj);
342 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
344 exynos_drm_gem_destroy(to_exynos_gem(obj));
347 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
348 struct drm_device *dev,
349 struct drm_mode_create_dumb *args)
351 struct exynos_drm_gem *exynos_gem;
356 * allocate memory to be used for framebuffer.
357 * - this callback would be called by user application
358 * with DRM_IOCTL_MODE_CREATE_DUMB command.
361 args->pitch = args->width * ((args->bpp + 7) / 8);
362 args->size = args->pitch * args->height;
364 if (is_drm_iommu_supported(dev))
365 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
367 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
369 exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
370 if (IS_ERR(exynos_gem)) {
371 dev_warn(dev->dev, "FB allocation failed.\n");
372 return PTR_ERR(exynos_gem);
375 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
378 exynos_drm_gem_destroy(exynos_gem);
385 vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
387 struct vm_area_struct *vma = vmf->vma;
388 struct drm_gem_object *obj = vma->vm_private_data;
389 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
393 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
395 if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
396 DRM_ERROR("invalid page offset\n");
397 return VM_FAULT_SIGBUS;
400 pfn = page_to_pfn(exynos_gem->pages[page_offset]);
401 return vmf_insert_mixed(vma, vmf->address,
402 __pfn_to_pfn_t(pfn, PFN_DEV));
405 static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
406 struct vm_area_struct *vma)
408 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
411 DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags);
413 /* non-cachable as default. */
414 if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
415 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
416 else if (exynos_gem->flags & EXYNOS_BO_WC)
418 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
421 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
423 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
430 drm_gem_vm_close(vma);
435 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
437 struct drm_gem_object *obj;
440 /* set vm_area_struct. */
441 ret = drm_gem_mmap(filp, vma);
443 DRM_ERROR("failed to mmap.\n");
447 obj = vma->vm_private_data;
449 if (obj->import_attach)
450 return dma_buf_mmap(obj->dma_buf, vma, 0);
452 return exynos_drm_gem_mmap_obj(obj, vma);
455 /* low-level interface prime helpers */
456 struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
457 struct dma_buf *dma_buf)
459 return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
462 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
464 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
467 npages = exynos_gem->size >> PAGE_SHIFT;
469 return drm_prime_pages_to_sg(exynos_gem->pages, npages);
472 struct drm_gem_object *
473 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
474 struct dma_buf_attachment *attach,
475 struct sg_table *sgt)
477 struct exynos_drm_gem *exynos_gem;
481 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
482 if (IS_ERR(exynos_gem)) {
483 ret = PTR_ERR(exynos_gem);
487 exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
489 npages = exynos_gem->size >> PAGE_SHIFT;
490 exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
491 if (!exynos_gem->pages) {
496 ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
501 exynos_gem->sgt = sgt;
503 if (sgt->nents == 1) {
504 /* always physically continuous memory if sgt->nents is 1. */
505 exynos_gem->flags |= EXYNOS_BO_CONTIG;
508 * this case could be CONTIG or NONCONTIG type but for now
510 * TODO. we have to find a way that exporter can notify
511 * the type of its own buffer to importer.
513 exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
516 return &exynos_gem->base;
519 kvfree(exynos_gem->pages);
521 drm_gem_object_release(&exynos_gem->base);
526 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
531 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
536 int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
537 struct vm_area_struct *vma)
541 ret = drm_gem_mmap_obj(obj, obj->size, vma);
545 return exynos_drm_gem_mmap_obj(obj, vma);