2 * Copyright (C) 2015 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/spinlock.h>
18 #include <linux/shmem_fs.h>
20 #include "etnaviv_drv.h"
21 #include "etnaviv_gem.h"
22 #include "etnaviv_gpu.h"
23 #include "etnaviv_mmu.h"
25 static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
27 struct drm_device *dev = etnaviv_obj->base.dev;
28 struct sg_table *sgt = etnaviv_obj->sgt;
31 * For non-cached buffers, ensure the new pages are clean
32 * because display controller, GPU, etc. are not coherent.
34 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
35 dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
38 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
40 struct drm_device *dev = etnaviv_obj->base.dev;
41 struct sg_table *sgt = etnaviv_obj->sgt;
44 * For non-cached buffers, ensure the new pages are clean
45 * because display controller, GPU, etc. are not coherent:
47 * WARNING: The DMA API does not support concurrent CPU
48 * and device access to the memory area. With BIDIRECTIONAL,
49 * we will clean the cache lines which overlap the region,
50 * and invalidate all cache lines (partially) contained in
53 * If you have dirty data in the overlapping cache lines,
54 * that will corrupt the GPU-written data. If you have
55 * written into the remainder of the region, this can
56 * discard those writes.
58 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
59 dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
62 /* called with etnaviv_obj->lock held */
63 static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
65 struct drm_device *dev = etnaviv_obj->base.dev;
66 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
69 dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
73 etnaviv_obj->pages = p;
78 static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
80 if (etnaviv_obj->sgt) {
81 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
82 sg_free_table(etnaviv_obj->sgt);
83 kfree(etnaviv_obj->sgt);
84 etnaviv_obj->sgt = NULL;
86 if (etnaviv_obj->pages) {
87 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
90 etnaviv_obj->pages = NULL;
94 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
98 lockdep_assert_held(&etnaviv_obj->lock);
100 if (!etnaviv_obj->pages) {
101 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
106 if (!etnaviv_obj->sgt) {
107 struct drm_device *dev = etnaviv_obj->base.dev;
108 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
109 struct sg_table *sgt;
111 sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
113 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
115 return ERR_CAST(sgt);
118 etnaviv_obj->sgt = sgt;
120 etnaviv_gem_scatter_map(etnaviv_obj);
123 return etnaviv_obj->pages;
126 void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
128 lockdep_assert_held(&etnaviv_obj->lock);
129 /* when we start tracking the pin count, then do something here */
132 static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj,
133 struct vm_area_struct *vma)
135 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
136 pgprot_t vm_page_prot;
138 vma->vm_flags &= ~VM_PFNMAP;
139 vma->vm_flags |= VM_MIXEDMAP;
141 vm_page_prot = vm_get_page_prot(vma->vm_flags);
143 if (etnaviv_obj->flags & ETNA_BO_WC) {
144 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
145 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
146 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
149 * Shunt off cached objs to shmem file so they have their own
150 * address_space (so unmap_mapping_range does what we want,
151 * in particular in the case of mmap'd dmabufs)
156 vma->vm_file = obj->filp;
158 vma->vm_page_prot = vm_page_prot;
164 int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
166 struct etnaviv_gem_object *obj;
169 ret = drm_gem_mmap(filp, vma);
171 DBG("mmap failed: %d", ret);
175 obj = to_etnaviv_bo(vma->vm_private_data);
176 return etnaviv_gem_mmap_obj(vma->vm_private_data, vma);
179 int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
181 struct drm_gem_object *obj = vma->vm_private_data;
182 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
183 struct page **pages, *page;
188 * Make sure we don't parallel update on a fault, nor move or remove
189 * something from beneath our feet. Note that vm_insert_page() is
190 * specifically coded to take care of this, so we don't have to.
192 ret = mutex_lock_interruptible(&etnaviv_obj->lock);
196 /* make sure we have pages attached now */
197 pages = etnaviv_gem_get_pages(etnaviv_obj);
198 mutex_unlock(&etnaviv_obj->lock);
201 ret = PTR_ERR(pages);
205 /* We don't use vmf->pgoff since that has the fake offset: */
206 pgoff = ((unsigned long)vmf->virtual_address -
207 vma->vm_start) >> PAGE_SHIFT;
211 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
212 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
214 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
224 * EBUSY is ok: this just means that another thread
225 * already did the job.
227 return VM_FAULT_NOPAGE;
231 return VM_FAULT_SIGBUS;
235 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
239 /* Make it mmapable */
240 ret = drm_gem_create_mmap_offset(obj);
242 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
244 *offset = drm_vma_node_offset_addr(&obj->vma_node);
249 static struct etnaviv_vram_mapping *
250 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
251 struct etnaviv_iommu *mmu)
253 struct etnaviv_vram_mapping *mapping;
255 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
256 if (mapping->mmu == mmu)
263 int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu,
264 struct drm_gem_object *obj, u32 *iova)
266 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
267 struct etnaviv_vram_mapping *mapping;
271 mutex_lock(&etnaviv_obj->lock);
272 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
275 * Holding the object lock prevents the use count changing
276 * beneath us. If the use count is zero, the MMU might be
277 * reaping this object, so take the lock and re-check that
278 * the MMU owns this mapping to close this race.
280 if (mapping->use == 0) {
281 mutex_lock(&gpu->mmu->lock);
282 if (mapping->mmu == gpu->mmu)
286 mutex_unlock(&gpu->mmu->lock);
295 pages = etnaviv_gem_get_pages(etnaviv_obj);
297 ret = PTR_ERR(pages);
302 * See if we have a reaped vram mapping we can re-use before
303 * allocating a fresh mapping.
305 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
307 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
313 INIT_LIST_HEAD(&mapping->scan_node);
314 mapping->object = etnaviv_obj;
316 list_del(&mapping->obj_node);
319 mapping->mmu = gpu->mmu;
322 ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
327 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
330 mutex_unlock(&etnaviv_obj->lock);
333 /* Take a reference on the object */
334 drm_gem_object_reference(obj);
335 *iova = mapping->iova;
341 void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj)
343 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
344 struct etnaviv_vram_mapping *mapping;
346 mutex_lock(&etnaviv_obj->lock);
347 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
349 WARN_ON(mapping->use == 0);
351 mutex_unlock(&etnaviv_obj->lock);
353 drm_gem_object_unreference_unlocked(obj);
356 void *etnaviv_gem_vmap(struct drm_gem_object *obj)
358 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
360 mutex_lock(&etnaviv_obj->lock);
361 if (!etnaviv_obj->vaddr) {
362 struct page **pages = etnaviv_gem_get_pages(etnaviv_obj);
365 mutex_unlock(&etnaviv_obj->lock);
369 etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
370 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
372 mutex_unlock(&etnaviv_obj->lock);
374 return etnaviv_obj->vaddr;
377 static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
379 if (op & ETNA_PREP_READ)
380 return DMA_FROM_DEVICE;
381 else if (op & ETNA_PREP_WRITE)
382 return DMA_TO_DEVICE;
384 return DMA_BIDIRECTIONAL;
387 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
388 struct timespec *timeout)
390 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
391 struct drm_device *dev = obj->dev;
392 bool write = !!(op & ETNA_PREP_WRITE);
395 if (op & ETNA_PREP_NOSYNC) {
396 if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
400 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
402 ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
403 write, true, remain);
405 return ret == 0 ? -ETIMEDOUT : ret;
408 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
409 if (!etnaviv_obj->sgt) {
412 mutex_lock(&etnaviv_obj->lock);
413 ret = etnaviv_gem_get_pages(etnaviv_obj);
414 mutex_unlock(&etnaviv_obj->lock);
419 dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
420 etnaviv_obj->sgt->nents,
421 etnaviv_op_to_dma_dir(op));
422 etnaviv_obj->last_cpu_prep_op = op;
428 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
430 struct drm_device *dev = obj->dev;
431 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
433 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
434 /* fini without a prep is almost certainly a userspace error */
435 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
436 dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
437 etnaviv_obj->sgt->nents,
438 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
439 etnaviv_obj->last_cpu_prep_op = 0;
445 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
446 struct timespec *timeout)
448 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
450 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
453 #ifdef CONFIG_DEBUG_FS
454 static void etnaviv_gem_describe_fence(struct fence *fence,
455 const char *type, struct seq_file *m)
457 if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
458 seq_printf(m, "\t%9s: %s %s seq %u\n",
460 fence->ops->get_driver_name(fence),
461 fence->ops->get_timeline_name(fence),
465 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
467 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
468 struct reservation_object *robj = etnaviv_obj->resv;
469 struct reservation_object_list *fobj;
471 unsigned long off = drm_vma_node_start(&obj->vma_node);
473 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
474 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
475 obj->name, obj->refcount.refcount.counter,
476 off, etnaviv_obj->vaddr, obj->size);
479 fobj = rcu_dereference(robj->fence);
481 unsigned int i, shared_count = fobj->shared_count;
483 for (i = 0; i < shared_count; i++) {
484 fence = rcu_dereference(fobj->shared[i]);
485 etnaviv_gem_describe_fence(fence, "Shared", m);
489 fence = rcu_dereference(robj->fence_excl);
491 etnaviv_gem_describe_fence(fence, "Exclusive", m);
495 void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
498 struct etnaviv_gem_object *etnaviv_obj;
502 mutex_lock(&priv->gem_lock);
503 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
504 struct drm_gem_object *obj = &etnaviv_obj->base;
507 etnaviv_gem_describe(obj, m);
511 mutex_unlock(&priv->gem_lock);
513 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
517 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
519 if (etnaviv_obj->vaddr)
520 vunmap(etnaviv_obj->vaddr);
521 put_pages(etnaviv_obj);
524 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
525 .get_pages = etnaviv_gem_shmem_get_pages,
526 .release = etnaviv_gem_shmem_release,
529 void etnaviv_gem_free_object(struct drm_gem_object *obj)
531 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
532 struct etnaviv_vram_mapping *mapping, *tmp;
534 /* object should not be active */
535 WARN_ON(is_active(etnaviv_obj));
537 list_del(&etnaviv_obj->gem_node);
539 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
541 struct etnaviv_iommu *mmu = mapping->mmu;
543 WARN_ON(mapping->use);
546 etnaviv_iommu_unmap_gem(mmu, mapping);
548 list_del(&mapping->obj_node);
552 drm_gem_free_mmap_offset(obj);
553 etnaviv_obj->ops->release(etnaviv_obj);
554 if (etnaviv_obj->resv == &etnaviv_obj->_resv)
555 reservation_object_fini(&etnaviv_obj->_resv);
556 drm_gem_object_release(obj);
561 int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
563 struct etnaviv_drm_private *priv = dev->dev_private;
564 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
566 mutex_lock(&priv->gem_lock);
567 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
568 mutex_unlock(&priv->gem_lock);
573 static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
574 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
575 struct drm_gem_object **obj)
577 struct etnaviv_gem_object *etnaviv_obj;
578 unsigned sz = sizeof(*etnaviv_obj);
582 switch (flags & ETNA_BO_CACHE_MASK) {
583 case ETNA_BO_UNCACHED:
592 dev_err(dev->dev, "invalid cache flag: %x\n",
593 (flags & ETNA_BO_CACHE_MASK));
597 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
601 etnaviv_obj->flags = flags;
602 etnaviv_obj->ops = ops;
604 etnaviv_obj->resv = robj;
606 etnaviv_obj->resv = &etnaviv_obj->_resv;
607 reservation_object_init(&etnaviv_obj->_resv);
610 mutex_init(&etnaviv_obj->lock);
611 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
613 *obj = &etnaviv_obj->base;
618 static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
621 struct drm_gem_object *obj = NULL;
624 size = PAGE_ALIGN(size);
626 ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
627 &etnaviv_gem_shmem_ops, &obj);
631 ret = drm_gem_object_init(dev, obj, size);
633 struct address_space *mapping;
636 * Our buffers are kept pinned, so allocating them
637 * from the MOVABLE zone is a really bad idea, and
638 * conflicts with CMA. See coments above new_inode()
639 * why this is required _and_ expected if you're
640 * going to pin these pages.
642 mapping = file_inode(obj->filp)->i_mapping;
643 mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
653 drm_gem_object_unreference_unlocked(obj);
658 /* convenience method to construct a GEM buffer object, and userspace handle */
659 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
660 u32 size, u32 flags, u32 *handle)
662 struct drm_gem_object *obj;
665 obj = __etnaviv_gem_new(dev, size, flags);
669 ret = etnaviv_gem_obj_add(dev, obj);
671 drm_gem_object_unreference_unlocked(obj);
675 ret = drm_gem_handle_create(file, obj, handle);
677 /* drop reference from allocate - handle holds it now */
678 drm_gem_object_unreference_unlocked(obj);
683 struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
686 struct drm_gem_object *obj;
689 obj = __etnaviv_gem_new(dev, size, flags);
693 ret = etnaviv_gem_obj_add(dev, obj);
695 drm_gem_object_unreference_unlocked(obj);
702 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
703 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
704 struct etnaviv_gem_object **res)
706 struct drm_gem_object *obj;
709 ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
713 drm_gem_private_object_init(dev, obj, size);
715 *res = to_etnaviv_bo(obj);
720 struct get_pages_work {
721 struct work_struct work;
722 struct mm_struct *mm;
723 struct task_struct *task;
724 struct etnaviv_gem_object *etnaviv_obj;
727 static struct page **etnaviv_gem_userptr_do_get_pages(
728 struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
730 int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
734 pvec = drm_malloc_ab(npages, sizeof(struct page *));
736 return ERR_PTR(-ENOMEM);
739 ptr = etnaviv_obj->userptr.ptr;
741 down_read(&mm->mmap_sem);
742 while (pinned < npages) {
743 ret = get_user_pages(task, mm, ptr, npages - pinned,
744 !etnaviv_obj->userptr.ro, 0,
745 pvec + pinned, NULL);
749 ptr += ret * PAGE_SIZE;
752 up_read(&mm->mmap_sem);
755 release_pages(pvec, pinned, 0);
756 drm_free_large(pvec);
763 static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
765 struct get_pages_work *work = container_of(_work, typeof(*work), work);
766 struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
769 pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
771 mutex_lock(&etnaviv_obj->lock);
773 etnaviv_obj->userptr.work = ERR_CAST(pvec);
775 etnaviv_obj->userptr.work = NULL;
776 etnaviv_obj->pages = pvec;
779 mutex_unlock(&etnaviv_obj->lock);
780 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
783 put_task_struct(work->task);
787 static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
789 struct page **pvec = NULL;
790 struct get_pages_work *work;
791 struct mm_struct *mm;
792 int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
794 if (etnaviv_obj->userptr.work) {
795 if (IS_ERR(etnaviv_obj->userptr.work)) {
796 ret = PTR_ERR(etnaviv_obj->userptr.work);
797 etnaviv_obj->userptr.work = NULL;
804 mm = get_task_mm(etnaviv_obj->userptr.task);
806 if (mm == current->mm) {
807 pvec = drm_malloc_ab(npages, sizeof(struct page *));
813 pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
814 !etnaviv_obj->userptr.ro, pvec);
816 drm_free_large(pvec);
821 if (pinned == npages) {
822 etnaviv_obj->pages = pvec;
828 release_pages(pvec, pinned, 0);
829 drm_free_large(pvec);
831 work = kmalloc(sizeof(*work), GFP_KERNEL);
837 get_task_struct(current);
838 drm_gem_object_reference(&etnaviv_obj->base);
841 work->task = current;
842 work->etnaviv_obj = etnaviv_obj;
844 etnaviv_obj->userptr.work = &work->work;
845 INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
847 etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
852 static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
854 if (etnaviv_obj->sgt) {
855 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
856 sg_free_table(etnaviv_obj->sgt);
857 kfree(etnaviv_obj->sgt);
859 if (etnaviv_obj->pages) {
860 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
862 release_pages(etnaviv_obj->pages, npages, 0);
863 drm_free_large(etnaviv_obj->pages);
865 put_task_struct(etnaviv_obj->userptr.task);
868 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
869 .get_pages = etnaviv_gem_userptr_get_pages,
870 .release = etnaviv_gem_userptr_release,
873 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
874 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
876 struct etnaviv_gem_object *etnaviv_obj;
879 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
880 &etnaviv_gem_userptr_ops, &etnaviv_obj);
884 etnaviv_obj->userptr.ptr = ptr;
885 etnaviv_obj->userptr.task = current;
886 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
887 get_task_struct(current);
889 ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
891 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
895 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
897 /* drop reference from allocate - handle holds it now */
898 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);