1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 #include <linux/spinlock.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/dma-buf.h>
10 #include <linux/pfn_t.h>
12 #include <drm/drm_prime.h>
15 #include "msm_fence.h"
20 static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
23 static dma_addr_t physaddr(struct drm_gem_object *obj)
25 struct msm_gem_object *msm_obj = to_msm_bo(obj);
26 struct msm_drm_private *priv = obj->dev->dev_private;
27 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
31 static bool use_pages(struct drm_gem_object *obj)
33 struct msm_gem_object *msm_obj = to_msm_bo(obj);
34 return !msm_obj->vram_node;
38 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
39 * API. Really GPU cache is out of scope here (handled on cmdstream)
40 * and all we need to do is invalidate newly allocated pages before
41 * mapping to CPU as uncached/writecombine.
43 * On top of this, we have the added headache, that depending on
44 * display generation, the display's iommu may be wired up to either
45 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
46 * that here we either have dma-direct or iommu ops.
48 * Let this be a cautionary tail of abstraction gone wrong.
51 static void sync_for_device(struct msm_gem_object *msm_obj)
53 struct device *dev = msm_obj->base.dev->dev;
55 if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
56 dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
57 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
59 dma_map_sg(dev, msm_obj->sgt->sgl,
60 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
64 static void sync_for_cpu(struct msm_gem_object *msm_obj)
66 struct device *dev = msm_obj->base.dev->dev;
68 if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
69 dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
70 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
72 dma_unmap_sg(dev, msm_obj->sgt->sgl,
73 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
77 /* allocate pages from VRAM carveout, used when no IOMMU: */
78 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
80 struct msm_gem_object *msm_obj = to_msm_bo(obj);
81 struct msm_drm_private *priv = obj->dev->dev_private;
86 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
88 return ERR_PTR(-ENOMEM);
90 spin_lock(&priv->vram.lock);
91 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
92 spin_unlock(&priv->vram.lock);
98 paddr = physaddr(obj);
99 for (i = 0; i < npages; i++) {
100 p[i] = phys_to_page(paddr);
107 static struct page **get_pages(struct drm_gem_object *obj)
109 struct msm_gem_object *msm_obj = to_msm_bo(obj);
111 if (!msm_obj->pages) {
112 struct drm_device *dev = obj->dev;
114 int npages = obj->size >> PAGE_SHIFT;
117 p = drm_gem_get_pages(obj);
119 p = get_pages_vram(obj, npages);
122 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
129 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
130 if (IS_ERR(msm_obj->sgt)) {
131 void *ptr = ERR_CAST(msm_obj->sgt);
133 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
138 /* For non-cached buffers, ensure the new pages are clean
139 * because display controller, GPU, etc. are not coherent:
141 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
142 sync_for_device(msm_obj);
145 return msm_obj->pages;
148 static void put_pages_vram(struct drm_gem_object *obj)
150 struct msm_gem_object *msm_obj = to_msm_bo(obj);
151 struct msm_drm_private *priv = obj->dev->dev_private;
153 spin_lock(&priv->vram.lock);
154 drm_mm_remove_node(msm_obj->vram_node);
155 spin_unlock(&priv->vram.lock);
157 kvfree(msm_obj->pages);
160 static void put_pages(struct drm_gem_object *obj)
162 struct msm_gem_object *msm_obj = to_msm_bo(obj);
164 if (msm_obj->pages) {
166 /* For non-cached buffers, ensure the new
167 * pages are clean because display controller,
168 * GPU, etc. are not coherent:
170 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
171 sync_for_cpu(msm_obj);
173 sg_free_table(msm_obj->sgt);
178 drm_gem_put_pages(obj, msm_obj->pages, true, false);
182 msm_obj->pages = NULL;
186 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
188 struct msm_gem_object *msm_obj = to_msm_bo(obj);
191 mutex_lock(&msm_obj->lock);
193 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
194 mutex_unlock(&msm_obj->lock);
195 return ERR_PTR(-EBUSY);
199 mutex_unlock(&msm_obj->lock);
203 void msm_gem_put_pages(struct drm_gem_object *obj)
205 /* when we start tracking the pin count, then do something here */
208 int msm_gem_mmap_obj(struct drm_gem_object *obj,
209 struct vm_area_struct *vma)
211 struct msm_gem_object *msm_obj = to_msm_bo(obj);
213 vma->vm_flags &= ~VM_PFNMAP;
214 vma->vm_flags |= VM_MIXEDMAP;
216 if (msm_obj->flags & MSM_BO_WC) {
217 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
218 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
219 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
222 * Shunt off cached objs to shmem file so they have their own
223 * address_space (so unmap_mapping_range does what we want,
224 * in particular in the case of mmap'd dmabufs)
229 vma->vm_file = obj->filp;
231 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
237 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
241 ret = drm_gem_mmap(filp, vma);
243 DBG("mmap failed: %d", ret);
247 return msm_gem_mmap_obj(vma->vm_private_data, vma);
250 vm_fault_t msm_gem_fault(struct vm_fault *vmf)
252 struct vm_area_struct *vma = vmf->vma;
253 struct drm_gem_object *obj = vma->vm_private_data;
254 struct msm_gem_object *msm_obj = to_msm_bo(obj);
262 * vm_ops.open/drm_gem_mmap_obj and close get and put
263 * a reference on obj. So, we dont need to hold one here.
265 err = mutex_lock_interruptible(&msm_obj->lock);
267 ret = VM_FAULT_NOPAGE;
271 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
272 mutex_unlock(&msm_obj->lock);
273 return VM_FAULT_SIGBUS;
276 /* make sure we have pages attached now */
277 pages = get_pages(obj);
279 ret = vmf_error(PTR_ERR(pages));
283 /* We don't use vmf->pgoff since that has the fake offset: */
284 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
286 pfn = page_to_pfn(pages[pgoff]);
288 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
289 pfn, pfn << PAGE_SHIFT);
291 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
293 mutex_unlock(&msm_obj->lock);
298 /** get mmap offset */
299 static uint64_t mmap_offset(struct drm_gem_object *obj)
301 struct drm_device *dev = obj->dev;
302 struct msm_gem_object *msm_obj = to_msm_bo(obj);
305 WARN_ON(!mutex_is_locked(&msm_obj->lock));
307 /* Make it mmapable */
308 ret = drm_gem_create_mmap_offset(obj);
311 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
315 return drm_vma_node_offset_addr(&obj->vma_node);
318 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
321 struct msm_gem_object *msm_obj = to_msm_bo(obj);
323 mutex_lock(&msm_obj->lock);
324 offset = mmap_offset(obj);
325 mutex_unlock(&msm_obj->lock);
329 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
330 struct msm_gem_address_space *aspace)
332 struct msm_gem_object *msm_obj = to_msm_bo(obj);
333 struct msm_gem_vma *vma;
335 WARN_ON(!mutex_is_locked(&msm_obj->lock));
337 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
339 return ERR_PTR(-ENOMEM);
341 vma->aspace = aspace;
343 list_add_tail(&vma->list, &msm_obj->vmas);
348 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
349 struct msm_gem_address_space *aspace)
351 struct msm_gem_object *msm_obj = to_msm_bo(obj);
352 struct msm_gem_vma *vma;
354 WARN_ON(!mutex_is_locked(&msm_obj->lock));
356 list_for_each_entry(vma, &msm_obj->vmas, list) {
357 if (vma->aspace == aspace)
364 static void del_vma(struct msm_gem_vma *vma)
369 list_del(&vma->list);
373 /* Called with msm_obj->lock locked */
375 put_iova(struct drm_gem_object *obj)
377 struct msm_gem_object *msm_obj = to_msm_bo(obj);
378 struct msm_gem_vma *vma, *tmp;
380 WARN_ON(!mutex_is_locked(&msm_obj->lock));
382 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
384 msm_gem_purge_vma(vma->aspace, vma);
385 msm_gem_close_vma(vma->aspace, vma);
391 static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
392 struct msm_gem_address_space *aspace, uint64_t *iova,
393 u64 range_start, u64 range_end)
395 struct msm_gem_object *msm_obj = to_msm_bo(obj);
396 struct msm_gem_vma *vma;
399 WARN_ON(!mutex_is_locked(&msm_obj->lock));
401 vma = lookup_vma(obj, aspace);
404 vma = add_vma(obj, aspace);
408 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
409 range_start, range_end);
420 static int msm_gem_pin_iova(struct drm_gem_object *obj,
421 struct msm_gem_address_space *aspace)
423 struct msm_gem_object *msm_obj = to_msm_bo(obj);
424 struct msm_gem_vma *vma;
426 int prot = IOMMU_READ;
428 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
431 if (msm_obj->flags & MSM_BO_MAP_PRIV)
434 WARN_ON(!mutex_is_locked(&msm_obj->lock));
436 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
439 vma = lookup_vma(obj, aspace);
443 pages = get_pages(obj);
445 return PTR_ERR(pages);
447 return msm_gem_map_vma(aspace, vma, prot,
448 msm_obj->sgt, obj->size >> PAGE_SHIFT);
452 * get iova and pin it. Should have a matching put
453 * limits iova to specified range (in pages)
455 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
456 struct msm_gem_address_space *aspace, uint64_t *iova,
457 u64 range_start, u64 range_end)
459 struct msm_gem_object *msm_obj = to_msm_bo(obj);
463 mutex_lock(&msm_obj->lock);
465 ret = msm_gem_get_iova_locked(obj, aspace, &local,
466 range_start, range_end);
469 ret = msm_gem_pin_iova(obj, aspace);
474 mutex_unlock(&msm_obj->lock);
478 /* get iova and pin it. Should have a matching put */
479 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
480 struct msm_gem_address_space *aspace, uint64_t *iova)
482 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
486 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
487 * valid for the life of the object
489 int msm_gem_get_iova(struct drm_gem_object *obj,
490 struct msm_gem_address_space *aspace, uint64_t *iova)
492 struct msm_gem_object *msm_obj = to_msm_bo(obj);
495 mutex_lock(&msm_obj->lock);
496 ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX);
497 mutex_unlock(&msm_obj->lock);
502 /* get iova without taking a reference, used in places where you have
503 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
505 uint64_t msm_gem_iova(struct drm_gem_object *obj,
506 struct msm_gem_address_space *aspace)
508 struct msm_gem_object *msm_obj = to_msm_bo(obj);
509 struct msm_gem_vma *vma;
511 mutex_lock(&msm_obj->lock);
512 vma = lookup_vma(obj, aspace);
513 mutex_unlock(&msm_obj->lock);
516 return vma ? vma->iova : 0;
520 * Unpin a iova by updating the reference counts. The memory isn't actually
521 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
524 void msm_gem_unpin_iova(struct drm_gem_object *obj,
525 struct msm_gem_address_space *aspace)
527 struct msm_gem_object *msm_obj = to_msm_bo(obj);
528 struct msm_gem_vma *vma;
530 mutex_lock(&msm_obj->lock);
531 vma = lookup_vma(obj, aspace);
534 msm_gem_unmap_vma(aspace, vma);
536 mutex_unlock(&msm_obj->lock);
539 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
540 struct drm_mode_create_dumb *args)
542 args->pitch = align_pitch(args->width, args->bpp);
543 args->size = PAGE_ALIGN(args->pitch * args->height);
544 return msm_gem_new_handle(dev, file, args->size,
545 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
548 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
549 uint32_t handle, uint64_t *offset)
551 struct drm_gem_object *obj;
554 /* GEM does all our handle to object mapping */
555 obj = drm_gem_object_lookup(file, handle);
561 *offset = msm_gem_mmap_offset(obj);
563 drm_gem_object_put_unlocked(obj);
569 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
571 struct msm_gem_object *msm_obj = to_msm_bo(obj);
574 mutex_lock(&msm_obj->lock);
576 if (WARN_ON(msm_obj->madv > madv)) {
577 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
578 msm_obj->madv, madv);
579 mutex_unlock(&msm_obj->lock);
580 return ERR_PTR(-EBUSY);
583 /* increment vmap_count *before* vmap() call, so shrinker can
584 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
585 * This guarantees that we won't try to msm_gem_vunmap() this
586 * same object from within the vmap() call (while we already
587 * hold msm_obj->lock)
589 msm_obj->vmap_count++;
591 if (!msm_obj->vaddr) {
592 struct page **pages = get_pages(obj);
594 ret = PTR_ERR(pages);
597 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
598 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
599 if (msm_obj->vaddr == NULL) {
605 mutex_unlock(&msm_obj->lock);
606 return msm_obj->vaddr;
609 msm_obj->vmap_count--;
610 mutex_unlock(&msm_obj->lock);
614 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
616 return get_vaddr(obj, MSM_MADV_WILLNEED);
620 * Don't use this! It is for the very special case of dumping
621 * submits from GPU hangs or faults, were the bo may already
622 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
625 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
627 return get_vaddr(obj, __MSM_MADV_PURGED);
630 void msm_gem_put_vaddr(struct drm_gem_object *obj)
632 struct msm_gem_object *msm_obj = to_msm_bo(obj);
634 mutex_lock(&msm_obj->lock);
635 WARN_ON(msm_obj->vmap_count < 1);
636 msm_obj->vmap_count--;
637 mutex_unlock(&msm_obj->lock);
640 /* Update madvise status, returns true if not purged, else
643 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
645 struct msm_gem_object *msm_obj = to_msm_bo(obj);
647 mutex_lock(&msm_obj->lock);
649 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
651 if (msm_obj->madv != __MSM_MADV_PURGED)
652 msm_obj->madv = madv;
654 madv = msm_obj->madv;
656 mutex_unlock(&msm_obj->lock);
658 return (madv != __MSM_MADV_PURGED);
661 void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
663 struct drm_device *dev = obj->dev;
664 struct msm_gem_object *msm_obj = to_msm_bo(obj);
666 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
667 WARN_ON(!is_purgeable(msm_obj));
668 WARN_ON(obj->import_attach);
670 mutex_lock_nested(&msm_obj->lock, subclass);
674 msm_gem_vunmap_locked(obj);
678 msm_obj->madv = __MSM_MADV_PURGED;
680 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
681 drm_gem_free_mmap_offset(obj);
683 /* Our goal here is to return as much of the memory as
684 * is possible back to the system as we are called from OOM.
685 * To do this we must instruct the shmfs to drop all of its
686 * backing pages, *now*.
688 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
690 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
693 mutex_unlock(&msm_obj->lock);
696 static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
698 struct msm_gem_object *msm_obj = to_msm_bo(obj);
700 WARN_ON(!mutex_is_locked(&msm_obj->lock));
702 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
705 vunmap(msm_obj->vaddr);
706 msm_obj->vaddr = NULL;
709 void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
711 struct msm_gem_object *msm_obj = to_msm_bo(obj);
713 mutex_lock_nested(&msm_obj->lock, subclass);
714 msm_gem_vunmap_locked(obj);
715 mutex_unlock(&msm_obj->lock);
718 /* must be called before _move_to_active().. */
719 int msm_gem_sync_object(struct drm_gem_object *obj,
720 struct msm_fence_context *fctx, bool exclusive)
722 struct dma_resv_list *fobj;
723 struct dma_fence *fence;
726 fobj = dma_resv_get_list(obj->resv);
727 if (!fobj || (fobj->shared_count == 0)) {
728 fence = dma_resv_get_excl(obj->resv);
729 /* don't need to wait on our own fences, since ring is fifo */
730 if (fence && (fence->context != fctx->context)) {
731 ret = dma_fence_wait(fence, true);
737 if (!exclusive || !fobj)
740 for (i = 0; i < fobj->shared_count; i++) {
741 fence = rcu_dereference_protected(fobj->shared[i],
742 dma_resv_held(obj->resv));
743 if (fence->context != fctx->context) {
744 ret = dma_fence_wait(fence, true);
753 void msm_gem_move_to_active(struct drm_gem_object *obj,
754 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
756 struct msm_gem_object *msm_obj = to_msm_bo(obj);
757 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
760 dma_resv_add_excl_fence(obj->resv, fence);
762 dma_resv_add_shared_fence(obj->resv, fence);
763 list_del_init(&msm_obj->mm_list);
764 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
767 void msm_gem_move_to_inactive(struct drm_gem_object *obj)
769 struct drm_device *dev = obj->dev;
770 struct msm_drm_private *priv = dev->dev_private;
771 struct msm_gem_object *msm_obj = to_msm_bo(obj);
773 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
776 list_del_init(&msm_obj->mm_list);
777 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
780 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
782 bool write = !!(op & MSM_PREP_WRITE);
783 unsigned long remain =
784 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
787 ret = dma_resv_wait_timeout_rcu(obj->resv, write,
790 return remain == 0 ? -EBUSY : -ETIMEDOUT;
794 /* TODO cache maintenance */
799 int msm_gem_cpu_fini(struct drm_gem_object *obj)
801 /* TODO cache maintenance */
805 #ifdef CONFIG_DEBUG_FS
806 static void describe_fence(struct dma_fence *fence, const char *type,
809 if (!dma_fence_is_signaled(fence))
810 seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
811 fence->ops->get_driver_name(fence),
812 fence->ops->get_timeline_name(fence),
816 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
818 struct msm_gem_object *msm_obj = to_msm_bo(obj);
819 struct dma_resv *robj = obj->resv;
820 struct dma_resv_list *fobj;
821 struct dma_fence *fence;
822 struct msm_gem_vma *vma;
823 uint64_t off = drm_vma_node_start(&obj->vma_node);
826 mutex_lock(&msm_obj->lock);
828 switch (msm_obj->madv) {
829 case __MSM_MADV_PURGED:
832 case MSM_MADV_DONTNEED:
835 case MSM_MADV_WILLNEED:
841 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
842 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
843 obj->name, kref_read(&obj->refcount),
844 off, msm_obj->vaddr);
846 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
848 if (!list_empty(&msm_obj->vmas)) {
850 seq_puts(m, " vmas:");
852 list_for_each_entry(vma, &msm_obj->vmas, list)
853 seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
854 vma->aspace != NULL ? vma->aspace->name : NULL,
855 vma->iova, vma->mapped ? "mapped" : "unmapped",
862 fobj = rcu_dereference(robj->fence);
864 unsigned int i, shared_count = fobj->shared_count;
866 for (i = 0; i < shared_count; i++) {
867 fence = rcu_dereference(fobj->shared[i]);
868 describe_fence(fence, "Shared", m);
872 fence = rcu_dereference(robj->fence_excl);
874 describe_fence(fence, "Exclusive", m);
877 mutex_unlock(&msm_obj->lock);
880 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
882 struct msm_gem_object *msm_obj;
886 seq_puts(m, " flags id ref offset kaddr size madv name\n");
887 list_for_each_entry(msm_obj, list, mm_list) {
888 struct drm_gem_object *obj = &msm_obj->base;
890 msm_gem_describe(obj, m);
895 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
899 /* don't call directly! Use drm_gem_object_put() and friends */
900 void msm_gem_free_object(struct drm_gem_object *obj)
902 struct msm_gem_object *msm_obj = to_msm_bo(obj);
903 struct drm_device *dev = obj->dev;
904 struct msm_drm_private *priv = dev->dev_private;
906 if (llist_add(&msm_obj->freed, &priv->free_list))
907 queue_work(priv->wq, &priv->free_work);
910 static void free_object(struct msm_gem_object *msm_obj)
912 struct drm_gem_object *obj = &msm_obj->base;
913 struct drm_device *dev = obj->dev;
915 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
917 /* object should not be on active list: */
918 WARN_ON(is_active(msm_obj));
920 list_del(&msm_obj->mm_list);
922 mutex_lock(&msm_obj->lock);
926 if (obj->import_attach) {
928 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
930 /* Don't drop the pages for imported dmabuf, as they are not
931 * ours, just free the array we allocated:
934 kvfree(msm_obj->pages);
936 drm_prime_gem_destroy(obj, msm_obj->sgt);
938 msm_gem_vunmap_locked(obj);
942 drm_gem_object_release(obj);
944 mutex_unlock(&msm_obj->lock);
948 void msm_gem_free_work(struct work_struct *work)
950 struct msm_drm_private *priv =
951 container_of(work, struct msm_drm_private, free_work);
952 struct drm_device *dev = priv->dev;
953 struct llist_node *freed;
954 struct msm_gem_object *msm_obj, *next;
956 while ((freed = llist_del_all(&priv->free_list))) {
958 mutex_lock(&dev->struct_mutex);
960 llist_for_each_entry_safe(msm_obj, next,
962 free_object(msm_obj);
964 mutex_unlock(&dev->struct_mutex);
971 /* convenience method to construct a GEM buffer object, and userspace handle */
972 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
973 uint32_t size, uint32_t flags, uint32_t *handle,
976 struct drm_gem_object *obj;
979 obj = msm_gem_new(dev, size, flags);
985 msm_gem_object_set_name(obj, "%s", name);
987 ret = drm_gem_handle_create(file, obj, handle);
989 /* drop reference from allocate - handle holds it now */
990 drm_gem_object_put_unlocked(obj);
995 static int msm_gem_new_impl(struct drm_device *dev,
996 uint32_t size, uint32_t flags,
997 struct drm_gem_object **obj,
998 bool struct_mutex_locked)
1000 struct msm_drm_private *priv = dev->dev_private;
1001 struct msm_gem_object *msm_obj;
1003 switch (flags & MSM_BO_CACHE_MASK) {
1004 case MSM_BO_UNCACHED:
1009 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
1010 (flags & MSM_BO_CACHE_MASK));
1014 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1018 mutex_init(&msm_obj->lock);
1020 msm_obj->flags = flags;
1021 msm_obj->madv = MSM_MADV_WILLNEED;
1023 INIT_LIST_HEAD(&msm_obj->submit_entry);
1024 INIT_LIST_HEAD(&msm_obj->vmas);
1026 if (struct_mutex_locked) {
1027 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1028 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1030 mutex_lock(&dev->struct_mutex);
1031 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1032 mutex_unlock(&dev->struct_mutex);
1035 *obj = &msm_obj->base;
1040 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
1041 uint32_t size, uint32_t flags, bool struct_mutex_locked)
1043 struct msm_drm_private *priv = dev->dev_private;
1044 struct drm_gem_object *obj = NULL;
1045 bool use_vram = false;
1048 size = PAGE_ALIGN(size);
1050 if (!msm_use_mmu(dev))
1052 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1055 if (WARN_ON(use_vram && !priv->vram.size))
1056 return ERR_PTR(-EINVAL);
1058 /* Disallow zero sized objects as they make the underlying
1059 * infrastructure grumpy
1062 return ERR_PTR(-EINVAL);
1064 ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked);
1069 struct msm_gem_vma *vma;
1070 struct page **pages;
1071 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1073 mutex_lock(&msm_obj->lock);
1075 vma = add_vma(obj, NULL);
1076 mutex_unlock(&msm_obj->lock);
1082 to_msm_bo(obj)->vram_node = &vma->node;
1084 drm_gem_private_object_init(dev, obj, size);
1086 pages = get_pages(obj);
1087 if (IS_ERR(pages)) {
1088 ret = PTR_ERR(pages);
1092 vma->iova = physaddr(obj);
1094 ret = drm_gem_object_init(dev, obj, size);
1098 * Our buffers are kept pinned, so allocating them from the
1099 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1100 * See comments above new_inode() why this is required _and_
1101 * expected if you're going to pin these pages.
1103 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1109 drm_gem_object_put_unlocked(obj);
1110 return ERR_PTR(ret);
1113 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1114 uint32_t size, uint32_t flags)
1116 return _msm_gem_new(dev, size, flags, true);
1119 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1120 uint32_t size, uint32_t flags)
1122 return _msm_gem_new(dev, size, flags, false);
1125 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1126 struct dma_buf *dmabuf, struct sg_table *sgt)
1128 struct msm_gem_object *msm_obj;
1129 struct drm_gem_object *obj;
1133 /* if we don't have IOMMU, don't bother pretending we can import: */
1134 if (!msm_use_mmu(dev)) {
1135 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1136 return ERR_PTR(-EINVAL);
1139 size = PAGE_ALIGN(dmabuf->size);
1141 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false);
1145 drm_gem_private_object_init(dev, obj, size);
1147 npages = size / PAGE_SIZE;
1149 msm_obj = to_msm_bo(obj);
1150 mutex_lock(&msm_obj->lock);
1152 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1153 if (!msm_obj->pages) {
1154 mutex_unlock(&msm_obj->lock);
1159 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1161 mutex_unlock(&msm_obj->lock);
1165 mutex_unlock(&msm_obj->lock);
1169 drm_gem_object_put_unlocked(obj);
1170 return ERR_PTR(ret);
1173 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1174 uint32_t flags, struct msm_gem_address_space *aspace,
1175 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1178 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1182 return ERR_CAST(obj);
1185 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1190 vaddr = msm_gem_get_vaddr(obj);
1191 if (IS_ERR(vaddr)) {
1192 msm_gem_unpin_iova(obj, aspace);
1193 ret = PTR_ERR(vaddr);
1203 drm_gem_object_put(obj);
1205 drm_gem_object_put_unlocked(obj);
1207 return ERR_PTR(ret);
1211 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1212 uint32_t flags, struct msm_gem_address_space *aspace,
1213 struct drm_gem_object **bo, uint64_t *iova)
1215 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1218 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1219 uint32_t flags, struct msm_gem_address_space *aspace,
1220 struct drm_gem_object **bo, uint64_t *iova)
1222 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1225 void msm_gem_kernel_put(struct drm_gem_object *bo,
1226 struct msm_gem_address_space *aspace, bool locked)
1228 if (IS_ERR_OR_NULL(bo))
1231 msm_gem_put_vaddr(bo);
1232 msm_gem_unpin_iova(bo, aspace);
1235 drm_gem_object_put(bo);
1237 drm_gem_object_put_unlocked(bo);
1240 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1242 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1249 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);