1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2018 Etnaviv Project
6 #include <drm/drm_prime.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
12 #include "etnaviv_drv.h"
13 #include "etnaviv_gem.h"
14 #include "etnaviv_gpu.h"
15 #include "etnaviv_mmu.h"
17 static struct lock_class_key etnaviv_shm_lock_class;
18 static struct lock_class_key etnaviv_userptr_lock_class;
20 static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
22 struct drm_device *dev = etnaviv_obj->base.dev;
23 struct sg_table *sgt = etnaviv_obj->sgt;
26 * For non-cached buffers, ensure the new pages are clean
27 * because display controller, GPU, etc. are not coherent.
29 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
30 dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
33 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
35 struct drm_device *dev = etnaviv_obj->base.dev;
36 struct sg_table *sgt = etnaviv_obj->sgt;
39 * For non-cached buffers, ensure the new pages are clean
40 * because display controller, GPU, etc. are not coherent:
42 * WARNING: The DMA API does not support concurrent CPU
43 * and device access to the memory area. With BIDIRECTIONAL,
44 * we will clean the cache lines which overlap the region,
45 * and invalidate all cache lines (partially) contained in
48 * If you have dirty data in the overlapping cache lines,
49 * that will corrupt the GPU-written data. If you have
50 * written into the remainder of the region, this can
51 * discard those writes.
53 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
54 dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
57 /* called with etnaviv_obj->lock held */
58 static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
60 struct drm_device *dev = etnaviv_obj->base.dev;
61 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
64 dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
68 etnaviv_obj->pages = p;
73 static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
75 if (etnaviv_obj->sgt) {
76 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
77 sg_free_table(etnaviv_obj->sgt);
78 kfree(etnaviv_obj->sgt);
79 etnaviv_obj->sgt = NULL;
81 if (etnaviv_obj->pages) {
82 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
85 etnaviv_obj->pages = NULL;
89 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
93 lockdep_assert_held(&etnaviv_obj->lock);
95 if (!etnaviv_obj->pages) {
96 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
101 if (!etnaviv_obj->sgt) {
102 struct drm_device *dev = etnaviv_obj->base.dev;
103 unsigned int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
104 struct sg_table *sgt;
106 sgt = drm_prime_pages_to_sg(dev, etnaviv_obj->pages, npages);
108 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
110 return ERR_CAST(sgt);
113 etnaviv_obj->sgt = sgt;
115 etnaviv_gem_scatter_map(etnaviv_obj);
118 return etnaviv_obj->pages;
121 void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
123 lockdep_assert_held(&etnaviv_obj->lock);
124 /* when we start tracking the pin count, then do something here */
127 static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
128 struct vm_area_struct *vma)
130 pgprot_t vm_page_prot;
132 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
134 vm_page_prot = vm_get_page_prot(vma->vm_flags);
136 if (etnaviv_obj->flags & ETNA_BO_WC) {
137 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
138 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
139 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
142 * Shunt off cached objs to shmem file so they have their own
143 * address_space (so unmap_mapping_range does what we want,
144 * in particular in the case of mmap'd dmabufs)
147 vma_set_file(vma, etnaviv_obj->base.filp);
149 vma->vm_page_prot = vm_page_prot;
155 static int etnaviv_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
157 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
159 return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
162 static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
164 struct vm_area_struct *vma = vmf->vma;
165 struct drm_gem_object *obj = vma->vm_private_data;
166 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
173 * Make sure we don't parallel update on a fault, nor move or remove
174 * something from beneath our feet. Note that vmf_insert_page() is
175 * specifically coded to take care of this, so we don't have to.
177 err = mutex_lock_interruptible(&etnaviv_obj->lock);
179 return VM_FAULT_NOPAGE;
180 /* make sure we have pages attached now */
181 pages = etnaviv_gem_get_pages(etnaviv_obj);
182 mutex_unlock(&etnaviv_obj->lock);
185 err = PTR_ERR(pages);
186 return vmf_error(err);
189 /* We don't use vmf->pgoff since that has the fake offset: */
190 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
192 pfn = page_to_pfn(pages[pgoff]);
194 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
195 pfn, pfn << PAGE_SHIFT);
197 return vmf_insert_pfn(vma, vmf->address, pfn);
200 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
204 /* Make it mmapable */
205 ret = drm_gem_create_mmap_offset(obj);
207 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
209 *offset = drm_vma_node_offset_addr(&obj->vma_node);
214 static struct etnaviv_vram_mapping *
215 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
216 struct etnaviv_iommu_context *context)
218 struct etnaviv_vram_mapping *mapping;
220 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
221 if (mapping->context == context)
228 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
230 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
232 mutex_lock(&etnaviv_obj->lock);
233 WARN_ON(mapping->use == 0);
235 mutex_unlock(&etnaviv_obj->lock);
237 drm_gem_object_put(&etnaviv_obj->base);
240 struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
241 struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
244 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
245 struct etnaviv_vram_mapping *mapping;
249 mutex_lock(&etnaviv_obj->lock);
250 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
253 * Holding the object lock prevents the use count changing
254 * beneath us. If the use count is zero, the MMU might be
255 * reaping this object, so take the lock and re-check that
256 * the MMU owns this mapping to close this race.
258 if (mapping->use == 0) {
259 mutex_lock(&mmu_context->lock);
260 if (mapping->context == mmu_context)
261 if (va && mapping->iova != va) {
262 etnaviv_iommu_reap_mapping(mapping);
269 mutex_unlock(&mmu_context->lock);
278 pages = etnaviv_gem_get_pages(etnaviv_obj);
280 ret = PTR_ERR(pages);
285 * See if we have a reaped vram mapping we can re-use before
286 * allocating a fresh mapping.
288 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
290 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
296 INIT_LIST_HEAD(&mapping->scan_node);
297 mapping->object = etnaviv_obj;
299 list_del(&mapping->obj_node);
304 ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
305 mmu_context->global->memory_base,
310 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
313 mutex_unlock(&etnaviv_obj->lock);
318 /* Take a reference on the object */
319 drm_gem_object_get(obj);
323 void *etnaviv_gem_vmap(struct drm_gem_object *obj)
325 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
327 if (etnaviv_obj->vaddr)
328 return etnaviv_obj->vaddr;
330 mutex_lock(&etnaviv_obj->lock);
332 * Need to check again, as we might have raced with another thread
333 * while waiting for the mutex.
335 if (!etnaviv_obj->vaddr)
336 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
337 mutex_unlock(&etnaviv_obj->lock);
339 return etnaviv_obj->vaddr;
342 static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
346 lockdep_assert_held(&obj->lock);
348 pages = etnaviv_gem_get_pages(obj);
352 return vmap(pages, obj->base.size >> PAGE_SHIFT,
353 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
356 static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
358 op &= ETNA_PREP_READ | ETNA_PREP_WRITE;
360 if (op == ETNA_PREP_READ)
361 return DMA_FROM_DEVICE;
362 else if (op == ETNA_PREP_WRITE)
363 return DMA_TO_DEVICE;
365 return DMA_BIDIRECTIONAL;
368 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
369 struct drm_etnaviv_timespec *timeout)
371 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
372 struct drm_device *dev = obj->dev;
373 bool write = !!(op & ETNA_PREP_WRITE);
376 if (!etnaviv_obj->sgt) {
379 mutex_lock(&etnaviv_obj->lock);
380 ret = etnaviv_gem_get_pages(etnaviv_obj);
381 mutex_unlock(&etnaviv_obj->lock);
386 if (op & ETNA_PREP_NOSYNC) {
387 if (!dma_resv_test_signaled(obj->resv,
388 dma_resv_usage_rw(write)))
391 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
393 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
396 return ret == 0 ? -ETIMEDOUT : ret;
399 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
400 dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
401 etnaviv_op_to_dma_dir(op));
402 etnaviv_obj->last_cpu_prep_op = op;
408 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
410 struct drm_device *dev = obj->dev;
411 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
413 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
414 /* fini without a prep is almost certainly a userspace error */
415 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
416 dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
417 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
418 etnaviv_obj->last_cpu_prep_op = 0;
424 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
425 struct drm_etnaviv_timespec *timeout)
427 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
429 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
432 #ifdef CONFIG_DEBUG_FS
433 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
435 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
436 struct dma_resv *robj = obj->resv;
437 unsigned long off = drm_vma_node_start(&obj->vma_node);
440 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
441 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
442 obj->name, kref_read(&obj->refcount),
443 off, etnaviv_obj->vaddr, obj->size);
445 r = dma_resv_lock(robj, NULL);
449 dma_resv_describe(robj, m);
450 dma_resv_unlock(robj);
453 void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
456 struct etnaviv_gem_object *etnaviv_obj;
460 mutex_lock(&priv->gem_lock);
461 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
462 struct drm_gem_object *obj = &etnaviv_obj->base;
465 etnaviv_gem_describe(obj, m);
469 mutex_unlock(&priv->gem_lock);
471 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
475 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
477 vunmap(etnaviv_obj->vaddr);
478 put_pages(etnaviv_obj);
481 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
482 .get_pages = etnaviv_gem_shmem_get_pages,
483 .release = etnaviv_gem_shmem_release,
484 .vmap = etnaviv_gem_vmap_impl,
485 .mmap = etnaviv_gem_mmap_obj,
488 void etnaviv_gem_free_object(struct drm_gem_object *obj)
490 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
491 struct etnaviv_drm_private *priv = obj->dev->dev_private;
492 struct etnaviv_vram_mapping *mapping, *tmp;
494 /* object should not be active */
495 WARN_ON(is_active(etnaviv_obj));
497 mutex_lock(&priv->gem_lock);
498 list_del(&etnaviv_obj->gem_node);
499 mutex_unlock(&priv->gem_lock);
501 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
503 struct etnaviv_iommu_context *context = mapping->context;
505 WARN_ON(mapping->use);
508 etnaviv_iommu_unmap_gem(context, mapping);
510 list_del(&mapping->obj_node);
514 etnaviv_obj->ops->release(etnaviv_obj);
515 drm_gem_object_release(obj);
520 void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
522 struct etnaviv_drm_private *priv = dev->dev_private;
523 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
525 mutex_lock(&priv->gem_lock);
526 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
527 mutex_unlock(&priv->gem_lock);
530 static const struct vm_operations_struct vm_ops = {
531 .fault = etnaviv_gem_fault,
532 .open = drm_gem_vm_open,
533 .close = drm_gem_vm_close,
536 static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
537 .free = etnaviv_gem_free_object,
538 .pin = etnaviv_gem_prime_pin,
539 .unpin = etnaviv_gem_prime_unpin,
540 .get_sg_table = etnaviv_gem_prime_get_sg_table,
541 .vmap = etnaviv_gem_prime_vmap,
542 .mmap = etnaviv_gem_mmap,
546 static int etnaviv_gem_new_impl(struct drm_device *dev, u32 flags,
547 const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
549 struct etnaviv_gem_object *etnaviv_obj;
550 unsigned sz = sizeof(*etnaviv_obj);
554 switch (flags & ETNA_BO_CACHE_MASK) {
555 case ETNA_BO_UNCACHED:
564 dev_err(dev->dev, "invalid cache flag: %x\n",
565 (flags & ETNA_BO_CACHE_MASK));
569 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
573 etnaviv_obj->flags = flags;
574 etnaviv_obj->ops = ops;
576 mutex_init(&etnaviv_obj->lock);
577 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
579 *obj = &etnaviv_obj->base;
580 (*obj)->funcs = &etnaviv_gem_object_funcs;
585 /* convenience method to construct a GEM buffer object, and userspace handle */
586 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
587 u32 size, u32 flags, u32 *handle)
589 struct etnaviv_drm_private *priv = dev->dev_private;
590 struct drm_gem_object *obj = NULL;
593 size = PAGE_ALIGN(size);
595 ret = etnaviv_gem_new_impl(dev, flags, &etnaviv_gem_shmem_ops, &obj);
599 lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
601 ret = drm_gem_object_init(dev, obj, size);
606 * Our buffers are kept pinned, so allocating them from the MOVABLE
607 * zone is a really bad idea, and conflicts with CMA. See comments
608 * above new_inode() why this is required _and_ expected if you're
609 * going to pin these pages.
611 mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
613 etnaviv_gem_obj_add(dev, obj);
615 ret = drm_gem_handle_create(file, obj, handle);
617 /* drop reference from allocate - handle holds it now */
619 drm_gem_object_put(obj);
624 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
625 const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
627 struct drm_gem_object *obj;
630 ret = etnaviv_gem_new_impl(dev, flags, ops, &obj);
634 drm_gem_private_object_init(dev, obj, size);
636 *res = to_etnaviv_bo(obj);
641 static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
643 struct page **pvec = NULL;
644 struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
645 int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
646 unsigned int gup_flags = FOLL_LONGTERM;
648 might_lock_read(¤t->mm->mmap_lock);
650 if (userptr->mm != current->mm)
653 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
658 gup_flags |= FOLL_WRITE;
661 unsigned num_pages = npages - pinned;
662 uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
663 struct page **pages = pvec + pinned;
665 ret = pin_user_pages_fast(ptr, num_pages, gup_flags, pages);
667 unpin_user_pages(pvec, pinned);
674 } while (pinned < npages);
676 etnaviv_obj->pages = pvec;
681 static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
683 if (etnaviv_obj->sgt) {
684 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
685 sg_free_table(etnaviv_obj->sgt);
686 kfree(etnaviv_obj->sgt);
688 if (etnaviv_obj->pages) {
689 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
691 unpin_user_pages(etnaviv_obj->pages, npages);
692 kvfree(etnaviv_obj->pages);
696 static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
697 struct vm_area_struct *vma)
702 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
703 .get_pages = etnaviv_gem_userptr_get_pages,
704 .release = etnaviv_gem_userptr_release,
705 .vmap = etnaviv_gem_vmap_impl,
706 .mmap = etnaviv_gem_userptr_mmap_obj,
709 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
710 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
712 struct etnaviv_gem_object *etnaviv_obj;
715 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
716 &etnaviv_gem_userptr_ops, &etnaviv_obj);
720 lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
722 etnaviv_obj->userptr.ptr = ptr;
723 etnaviv_obj->userptr.mm = current->mm;
724 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
726 etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
728 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
730 /* drop reference from allocate - handle holds it now */
731 drm_gem_object_put(&etnaviv_obj->base);