drm/etnaviv: call correct function when trying to vmap a DMABUF
[linux-2.6-block.git] / drivers / gpu / drm / etnaviv / etnaviv_gem.c
CommitLineData
a8c21a54
T
1/*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/spinlock.h>
18#include <linux/shmem_fs.h>
19
20#include "etnaviv_drv.h"
21#include "etnaviv_gem.h"
22#include "etnaviv_gpu.h"
23#include "etnaviv_mmu.h"
24
25static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
26{
27 struct drm_device *dev = etnaviv_obj->base.dev;
28 struct sg_table *sgt = etnaviv_obj->sgt;
29
30 /*
31 * For non-cached buffers, ensure the new pages are clean
32 * because display controller, GPU, etc. are not coherent.
33 */
34 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
35 dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
36}
37
38static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
39{
40 struct drm_device *dev = etnaviv_obj->base.dev;
41 struct sg_table *sgt = etnaviv_obj->sgt;
42
43 /*
44 * For non-cached buffers, ensure the new pages are clean
45 * because display controller, GPU, etc. are not coherent:
46 *
47 * WARNING: The DMA API does not support concurrent CPU
48 * and device access to the memory area. With BIDIRECTIONAL,
49 * we will clean the cache lines which overlap the region,
50 * and invalidate all cache lines (partially) contained in
51 * the region.
52 *
53 * If you have dirty data in the overlapping cache lines,
54 * that will corrupt the GPU-written data. If you have
55 * written into the remainder of the region, this can
56 * discard those writes.
57 */
58 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
59 dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
60}
61
62/* called with etnaviv_obj->lock held */
63static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
64{
65 struct drm_device *dev = etnaviv_obj->base.dev;
66 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
67
68 if (IS_ERR(p)) {
69 dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
70 return PTR_ERR(p);
71 }
72
73 etnaviv_obj->pages = p;
74
75 return 0;
76}
77
78static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
79{
80 if (etnaviv_obj->sgt) {
81 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
82 sg_free_table(etnaviv_obj->sgt);
83 kfree(etnaviv_obj->sgt);
84 etnaviv_obj->sgt = NULL;
85 }
86 if (etnaviv_obj->pages) {
87 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
88 true, false);
89
90 etnaviv_obj->pages = NULL;
91 }
92}
93
94struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
95{
96 int ret;
97
98 lockdep_assert_held(&etnaviv_obj->lock);
99
100 if (!etnaviv_obj->pages) {
101 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
102 if (ret < 0)
103 return ERR_PTR(ret);
104 }
105
106 if (!etnaviv_obj->sgt) {
107 struct drm_device *dev = etnaviv_obj->base.dev;
108 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
109 struct sg_table *sgt;
110
111 sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
112 if (IS_ERR(sgt)) {
113 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
114 PTR_ERR(sgt));
115 return ERR_CAST(sgt);
116 }
117
118 etnaviv_obj->sgt = sgt;
119
120 etnaviv_gem_scatter_map(etnaviv_obj);
121 }
122
123 return etnaviv_obj->pages;
124}
125
126void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
127{
128 lockdep_assert_held(&etnaviv_obj->lock);
129 /* when we start tracking the pin count, then do something here */
130}
131
132static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj,
133 struct vm_area_struct *vma)
134{
135 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
136 pgprot_t vm_page_prot;
137
138 vma->vm_flags &= ~VM_PFNMAP;
139 vma->vm_flags |= VM_MIXEDMAP;
140
141 vm_page_prot = vm_get_page_prot(vma->vm_flags);
142
143 if (etnaviv_obj->flags & ETNA_BO_WC) {
144 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
145 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
146 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
147 } else {
148 /*
149 * Shunt off cached objs to shmem file so they have their own
150 * address_space (so unmap_mapping_range does what we want,
151 * in particular in the case of mmap'd dmabufs)
152 */
153 fput(vma->vm_file);
154 get_file(obj->filp);
155 vma->vm_pgoff = 0;
156 vma->vm_file = obj->filp;
157
158 vma->vm_page_prot = vm_page_prot;
159 }
160
161 return 0;
162}
163
164int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
165{
166 struct etnaviv_gem_object *obj;
167 int ret;
168
169 ret = drm_gem_mmap(filp, vma);
170 if (ret) {
171 DBG("mmap failed: %d", ret);
172 return ret;
173 }
174
175 obj = to_etnaviv_bo(vma->vm_private_data);
176 return etnaviv_gem_mmap_obj(vma->vm_private_data, vma);
177}
178
179int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
180{
181 struct drm_gem_object *obj = vma->vm_private_data;
182 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
183 struct page **pages, *page;
184 pgoff_t pgoff;
185 int ret;
186
187 /*
188 * Make sure we don't parallel update on a fault, nor move or remove
189 * something from beneath our feet. Note that vm_insert_page() is
190 * specifically coded to take care of this, so we don't have to.
191 */
192 ret = mutex_lock_interruptible(&etnaviv_obj->lock);
193 if (ret)
194 goto out;
195
196 /* make sure we have pages attached now */
197 pages = etnaviv_gem_get_pages(etnaviv_obj);
198 mutex_unlock(&etnaviv_obj->lock);
199
200 if (IS_ERR(pages)) {
201 ret = PTR_ERR(pages);
202 goto out;
203 }
204
205 /* We don't use vmf->pgoff since that has the fake offset: */
206 pgoff = ((unsigned long)vmf->virtual_address -
207 vma->vm_start) >> PAGE_SHIFT;
208
209 page = pages[pgoff];
210
211 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
212 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
213
214 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
215
216out:
217 switch (ret) {
218 case -EAGAIN:
219 case 0:
220 case -ERESTARTSYS:
221 case -EINTR:
222 case -EBUSY:
223 /*
224 * EBUSY is ok: this just means that another thread
225 * already did the job.
226 */
227 return VM_FAULT_NOPAGE;
228 case -ENOMEM:
229 return VM_FAULT_OOM;
230 default:
231 return VM_FAULT_SIGBUS;
232 }
233}
234
235int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
236{
237 int ret;
238
239 /* Make it mmapable */
240 ret = drm_gem_create_mmap_offset(obj);
241 if (ret)
242 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
243 else
244 *offset = drm_vma_node_offset_addr(&obj->vma_node);
245
246 return ret;
247}
248
249static struct etnaviv_vram_mapping *
250etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
251 struct etnaviv_iommu *mmu)
252{
253 struct etnaviv_vram_mapping *mapping;
254
255 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
256 if (mapping->mmu == mmu)
257 return mapping;
258 }
259
260 return NULL;
261}
262
263int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu,
264 struct drm_gem_object *obj, u32 *iova)
265{
266 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
267 struct etnaviv_vram_mapping *mapping;
268 struct page **pages;
269 int ret = 0;
270
271 mutex_lock(&etnaviv_obj->lock);
272 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
273 if (mapping) {
274 /*
275 * Holding the object lock prevents the use count changing
276 * beneath us. If the use count is zero, the MMU might be
277 * reaping this object, so take the lock and re-check that
278 * the MMU owns this mapping to close this race.
279 */
280 if (mapping->use == 0) {
281 mutex_lock(&gpu->mmu->lock);
282 if (mapping->mmu == gpu->mmu)
283 mapping->use += 1;
284 else
285 mapping = NULL;
286 mutex_unlock(&gpu->mmu->lock);
287 if (mapping)
288 goto out;
289 } else {
290 mapping->use += 1;
291 goto out;
292 }
293 }
294
295 pages = etnaviv_gem_get_pages(etnaviv_obj);
296 if (IS_ERR(pages)) {
297 ret = PTR_ERR(pages);
298 goto out;
299 }
300
301 /*
302 * See if we have a reaped vram mapping we can re-use before
303 * allocating a fresh mapping.
304 */
305 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
306 if (!mapping) {
307 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
ed94add0
DC
308 if (!mapping) {
309 ret = -ENOMEM;
310 goto out;
311 }
a8c21a54
T
312
313 INIT_LIST_HEAD(&mapping->scan_node);
314 mapping->object = etnaviv_obj;
315 } else {
316 list_del(&mapping->obj_node);
317 }
318
319 mapping->mmu = gpu->mmu;
320 mapping->use = 1;
321
322 ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
323 mapping);
324 if (ret < 0)
325 kfree(mapping);
326 else
327 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
328
329out:
330 mutex_unlock(&etnaviv_obj->lock);
331
332 if (!ret) {
333 /* Take a reference on the object */
334 drm_gem_object_reference(obj);
335 *iova = mapping->iova;
336 }
337
338 return ret;
339}
340
341void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj)
342{
343 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
344 struct etnaviv_vram_mapping *mapping;
345
346 mutex_lock(&etnaviv_obj->lock);
347 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
348
349 WARN_ON(mapping->use == 0);
350 mapping->use -= 1;
351 mutex_unlock(&etnaviv_obj->lock);
352
353 drm_gem_object_unreference_unlocked(obj);
354}
355
ce3088fd 356void *etnaviv_gem_vmap(struct drm_gem_object *obj)
a8c21a54
T
357{
358 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
359
a0a5ab3e
LS
360 if (etnaviv_obj->vaddr)
361 return etnaviv_obj->vaddr;
a8c21a54 362
a0a5ab3e
LS
363 mutex_lock(&etnaviv_obj->lock);
364 /*
365 * Need to check again, as we might have raced with another thread
366 * while waiting for the mutex.
367 */
368 if (!etnaviv_obj->vaddr)
369 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
a8c21a54
T
370 mutex_unlock(&etnaviv_obj->lock);
371
372 return etnaviv_obj->vaddr;
373}
374
a0a5ab3e
LS
375static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
376{
377 struct page **pages;
378
379 lockdep_assert_held(&obj->lock);
380
381 pages = etnaviv_gem_get_pages(obj);
382 if (IS_ERR(pages))
383 return NULL;
384
385 return vmap(pages, obj->base.size >> PAGE_SHIFT,
386 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
387}
388
a8c21a54
T
389static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
390{
391 if (op & ETNA_PREP_READ)
392 return DMA_FROM_DEVICE;
393 else if (op & ETNA_PREP_WRITE)
394 return DMA_TO_DEVICE;
395 else
396 return DMA_BIDIRECTIONAL;
397}
398
399int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
400 struct timespec *timeout)
401{
402 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
403 struct drm_device *dev = obj->dev;
404 bool write = !!(op & ETNA_PREP_WRITE);
405 int ret;
406
407 if (op & ETNA_PREP_NOSYNC) {
408 if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
409 write))
410 return -EBUSY;
411 } else {
412 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
413
414 ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
415 write, true, remain);
416 if (ret <= 0)
417 return ret == 0 ? -ETIMEDOUT : ret;
418 }
419
420 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
421 if (!etnaviv_obj->sgt) {
422 void *ret;
423
424 mutex_lock(&etnaviv_obj->lock);
425 ret = etnaviv_gem_get_pages(etnaviv_obj);
426 mutex_unlock(&etnaviv_obj->lock);
427 if (IS_ERR(ret))
428 return PTR_ERR(ret);
429 }
430
431 dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
432 etnaviv_obj->sgt->nents,
433 etnaviv_op_to_dma_dir(op));
434 etnaviv_obj->last_cpu_prep_op = op;
435 }
436
437 return 0;
438}
439
440int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
441{
442 struct drm_device *dev = obj->dev;
443 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
444
445 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
446 /* fini without a prep is almost certainly a userspace error */
447 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
448 dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
449 etnaviv_obj->sgt->nents,
450 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
451 etnaviv_obj->last_cpu_prep_op = 0;
452 }
453
454 return 0;
455}
456
457int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
458 struct timespec *timeout)
459{
460 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
461
462 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
463}
464
465#ifdef CONFIG_DEBUG_FS
466static void etnaviv_gem_describe_fence(struct fence *fence,
467 const char *type, struct seq_file *m)
468{
469 if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
470 seq_printf(m, "\t%9s: %s %s seq %u\n",
471 type,
472 fence->ops->get_driver_name(fence),
473 fence->ops->get_timeline_name(fence),
474 fence->seqno);
475}
476
477static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
478{
479 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
480 struct reservation_object *robj = etnaviv_obj->resv;
481 struct reservation_object_list *fobj;
482 struct fence *fence;
483 unsigned long off = drm_vma_node_start(&obj->vma_node);
484
485 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
486 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
487 obj->name, obj->refcount.refcount.counter,
488 off, etnaviv_obj->vaddr, obj->size);
489
490 rcu_read_lock();
491 fobj = rcu_dereference(robj->fence);
492 if (fobj) {
493 unsigned int i, shared_count = fobj->shared_count;
494
495 for (i = 0; i < shared_count; i++) {
496 fence = rcu_dereference(fobj->shared[i]);
497 etnaviv_gem_describe_fence(fence, "Shared", m);
498 }
499 }
500
501 fence = rcu_dereference(robj->fence_excl);
502 if (fence)
503 etnaviv_gem_describe_fence(fence, "Exclusive", m);
504 rcu_read_unlock();
505}
506
507void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
508 struct seq_file *m)
509{
510 struct etnaviv_gem_object *etnaviv_obj;
511 int count = 0;
512 size_t size = 0;
513
514 mutex_lock(&priv->gem_lock);
515 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
516 struct drm_gem_object *obj = &etnaviv_obj->base;
517
518 seq_puts(m, " ");
519 etnaviv_gem_describe(obj, m);
520 count++;
521 size += obj->size;
522 }
523 mutex_unlock(&priv->gem_lock);
524
525 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
526}
527#endif
528
529static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
530{
531 if (etnaviv_obj->vaddr)
532 vunmap(etnaviv_obj->vaddr);
533 put_pages(etnaviv_obj);
534}
535
536static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
537 .get_pages = etnaviv_gem_shmem_get_pages,
538 .release = etnaviv_gem_shmem_release,
a0a5ab3e 539 .vmap = etnaviv_gem_vmap_impl,
a8c21a54
T
540};
541
542void etnaviv_gem_free_object(struct drm_gem_object *obj)
543{
544 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
545 struct etnaviv_vram_mapping *mapping, *tmp;
546
547 /* object should not be active */
548 WARN_ON(is_active(etnaviv_obj));
549
550 list_del(&etnaviv_obj->gem_node);
551
552 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
553 obj_node) {
554 struct etnaviv_iommu *mmu = mapping->mmu;
555
556 WARN_ON(mapping->use);
557
558 if (mmu)
559 etnaviv_iommu_unmap_gem(mmu, mapping);
560
561 list_del(&mapping->obj_node);
562 kfree(mapping);
563 }
564
565 drm_gem_free_mmap_offset(obj);
566 etnaviv_obj->ops->release(etnaviv_obj);
567 if (etnaviv_obj->resv == &etnaviv_obj->_resv)
568 reservation_object_fini(&etnaviv_obj->_resv);
569 drm_gem_object_release(obj);
570
571 kfree(etnaviv_obj);
572}
573
574int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
575{
576 struct etnaviv_drm_private *priv = dev->dev_private;
577 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
578
579 mutex_lock(&priv->gem_lock);
580 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
581 mutex_unlock(&priv->gem_lock);
582
583 return 0;
584}
585
586static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
587 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
588 struct drm_gem_object **obj)
589{
590 struct etnaviv_gem_object *etnaviv_obj;
591 unsigned sz = sizeof(*etnaviv_obj);
592 bool valid = true;
593
594 /* validate flags */
595 switch (flags & ETNA_BO_CACHE_MASK) {
596 case ETNA_BO_UNCACHED:
597 case ETNA_BO_CACHED:
598 case ETNA_BO_WC:
599 break;
600 default:
601 valid = false;
602 }
603
604 if (!valid) {
605 dev_err(dev->dev, "invalid cache flag: %x\n",
606 (flags & ETNA_BO_CACHE_MASK));
607 return -EINVAL;
608 }
609
610 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
611 if (!etnaviv_obj)
612 return -ENOMEM;
613
614 etnaviv_obj->flags = flags;
615 etnaviv_obj->ops = ops;
616 if (robj) {
617 etnaviv_obj->resv = robj;
618 } else {
619 etnaviv_obj->resv = &etnaviv_obj->_resv;
620 reservation_object_init(&etnaviv_obj->_resv);
621 }
622
623 mutex_init(&etnaviv_obj->lock);
624 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
625
626 *obj = &etnaviv_obj->base;
627
628 return 0;
629}
630
631static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
632 u32 size, u32 flags)
633{
634 struct drm_gem_object *obj = NULL;
635 int ret;
636
637 size = PAGE_ALIGN(size);
638
639 ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
640 &etnaviv_gem_shmem_ops, &obj);
641 if (ret)
642 goto fail;
643
644 ret = drm_gem_object_init(dev, obj, size);
645 if (ret == 0) {
646 struct address_space *mapping;
647
648 /*
649 * Our buffers are kept pinned, so allocating them
650 * from the MOVABLE zone is a really bad idea, and
651 * conflicts with CMA. See coments above new_inode()
652 * why this is required _and_ expected if you're
653 * going to pin these pages.
654 */
655 mapping = file_inode(obj->filp)->i_mapping;
656 mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
657 }
658
659 if (ret)
660 goto fail;
661
662 return obj;
663
664fail:
665 if (obj)
666 drm_gem_object_unreference_unlocked(obj);
667
668 return ERR_PTR(ret);
669}
670
671/* convenience method to construct a GEM buffer object, and userspace handle */
672int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
673 u32 size, u32 flags, u32 *handle)
674{
675 struct drm_gem_object *obj;
676 int ret;
677
678 obj = __etnaviv_gem_new(dev, size, flags);
679 if (IS_ERR(obj))
680 return PTR_ERR(obj);
681
682 ret = etnaviv_gem_obj_add(dev, obj);
683 if (ret < 0) {
684 drm_gem_object_unreference_unlocked(obj);
685 return ret;
686 }
687
688 ret = drm_gem_handle_create(file, obj, handle);
689
690 /* drop reference from allocate - handle holds it now */
691 drm_gem_object_unreference_unlocked(obj);
692
693 return ret;
694}
695
696struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
697 u32 size, u32 flags)
698{
699 struct drm_gem_object *obj;
700 int ret;
701
702 obj = __etnaviv_gem_new(dev, size, flags);
703 if (IS_ERR(obj))
704 return obj;
705
706 ret = etnaviv_gem_obj_add(dev, obj);
707 if (ret < 0) {
708 drm_gem_object_unreference_unlocked(obj);
709 return ERR_PTR(ret);
710 }
711
712 return obj;
713}
714
715int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
716 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
717 struct etnaviv_gem_object **res)
718{
719 struct drm_gem_object *obj;
720 int ret;
721
722 ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
723 if (ret)
724 return ret;
725
726 drm_gem_private_object_init(dev, obj, size);
727
728 *res = to_etnaviv_bo(obj);
729
730 return 0;
731}
732
733struct get_pages_work {
734 struct work_struct work;
735 struct mm_struct *mm;
736 struct task_struct *task;
737 struct etnaviv_gem_object *etnaviv_obj;
738};
739
740static struct page **etnaviv_gem_userptr_do_get_pages(
741 struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
742{
743 int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
744 struct page **pvec;
745 uintptr_t ptr;
746
747 pvec = drm_malloc_ab(npages, sizeof(struct page *));
748 if (!pvec)
749 return ERR_PTR(-ENOMEM);
750
751 pinned = 0;
752 ptr = etnaviv_obj->userptr.ptr;
753
754 down_read(&mm->mmap_sem);
755 while (pinned < npages) {
756 ret = get_user_pages(task, mm, ptr, npages - pinned,
757 !etnaviv_obj->userptr.ro, 0,
758 pvec + pinned, NULL);
759 if (ret < 0)
760 break;
761
762 ptr += ret * PAGE_SIZE;
763 pinned += ret;
764 }
765 up_read(&mm->mmap_sem);
766
767 if (ret < 0) {
768 release_pages(pvec, pinned, 0);
769 drm_free_large(pvec);
770 return ERR_PTR(ret);
771 }
772
773 return pvec;
774}
775
776static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
777{
778 struct get_pages_work *work = container_of(_work, typeof(*work), work);
779 struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
780 struct page **pvec;
781
782 pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
783
784 mutex_lock(&etnaviv_obj->lock);
785 if (IS_ERR(pvec)) {
786 etnaviv_obj->userptr.work = ERR_CAST(pvec);
787 } else {
788 etnaviv_obj->userptr.work = NULL;
789 etnaviv_obj->pages = pvec;
790 }
791
792 mutex_unlock(&etnaviv_obj->lock);
793 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
794
795 mmput(work->mm);
796 put_task_struct(work->task);
797 kfree(work);
798}
799
800static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
801{
802 struct page **pvec = NULL;
803 struct get_pages_work *work;
804 struct mm_struct *mm;
805 int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
806
807 if (etnaviv_obj->userptr.work) {
808 if (IS_ERR(etnaviv_obj->userptr.work)) {
809 ret = PTR_ERR(etnaviv_obj->userptr.work);
810 etnaviv_obj->userptr.work = NULL;
811 } else {
812 ret = -EAGAIN;
813 }
814 return ret;
815 }
816
817 mm = get_task_mm(etnaviv_obj->userptr.task);
818 pinned = 0;
819 if (mm == current->mm) {
820 pvec = drm_malloc_ab(npages, sizeof(struct page *));
821 if (!pvec) {
822 mmput(mm);
823 return -ENOMEM;
824 }
825
826 pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
827 !etnaviv_obj->userptr.ro, pvec);
828 if (pinned < 0) {
829 drm_free_large(pvec);
830 mmput(mm);
831 return pinned;
832 }
833
834 if (pinned == npages) {
835 etnaviv_obj->pages = pvec;
836 mmput(mm);
837 return 0;
838 }
839 }
840
841 release_pages(pvec, pinned, 0);
842 drm_free_large(pvec);
843
844 work = kmalloc(sizeof(*work), GFP_KERNEL);
845 if (!work) {
846 mmput(mm);
847 return -ENOMEM;
848 }
849
850 get_task_struct(current);
851 drm_gem_object_reference(&etnaviv_obj->base);
852
853 work->mm = mm;
854 work->task = current;
855 work->etnaviv_obj = etnaviv_obj;
856
857 etnaviv_obj->userptr.work = &work->work;
858 INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
859
860 etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
861
862 return -EAGAIN;
863}
864
865static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
866{
867 if (etnaviv_obj->sgt) {
868 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
869 sg_free_table(etnaviv_obj->sgt);
870 kfree(etnaviv_obj->sgt);
871 }
872 if (etnaviv_obj->pages) {
873 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
874
875 release_pages(etnaviv_obj->pages, npages, 0);
876 drm_free_large(etnaviv_obj->pages);
877 }
878 put_task_struct(etnaviv_obj->userptr.task);
879}
880
881static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
882 .get_pages = etnaviv_gem_userptr_get_pages,
883 .release = etnaviv_gem_userptr_release,
a0a5ab3e 884 .vmap = etnaviv_gem_vmap_impl,
a8c21a54
T
885};
886
887int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
888 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
889{
890 struct etnaviv_gem_object *etnaviv_obj;
891 int ret;
892
893 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
894 &etnaviv_gem_userptr_ops, &etnaviv_obj);
895 if (ret)
896 return ret;
897
898 etnaviv_obj->userptr.ptr = ptr;
899 etnaviv_obj->userptr.task = current;
900 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
901 get_task_struct(current);
902
903 ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
904 if (ret) {
905 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
906 return ret;
907 }
908
909 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
910
911 /* drop reference from allocate - handle holds it now */
912 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
913
914 return ret;
915}