dma-buf: rename reservation_object to dma_resv
[linux-block.git] / drivers / gpu / drm / etnaviv / etnaviv_gem.c
CommitLineData
f6ffbd4f 1// SPDX-License-Identifier: GPL-2.0
a8c21a54 2/*
f6ffbd4f 3 * Copyright (C) 2015-2018 Etnaviv Project
a8c21a54
T
4 */
5
6#include <linux/spinlock.h>
7#include <linux/shmem_fs.h>
6e84f315 8#include <linux/sched/mm.h>
0881e7bd 9#include <linux/sched/task.h>
a8c21a54
T
10
11#include "etnaviv_drv.h"
12#include "etnaviv_gem.h"
13#include "etnaviv_gpu.h"
14#include "etnaviv_mmu.h"
15
d6a8743d
LS
16static struct lock_class_key etnaviv_shm_lock_class;
17static struct lock_class_key etnaviv_userptr_lock_class;
18
a8c21a54
T
19static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
20{
21 struct drm_device *dev = etnaviv_obj->base.dev;
22 struct sg_table *sgt = etnaviv_obj->sgt;
23
24 /*
25 * For non-cached buffers, ensure the new pages are clean
26 * because display controller, GPU, etc. are not coherent.
27 */
28 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
29 dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
30}
31
32static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
33{
34 struct drm_device *dev = etnaviv_obj->base.dev;
35 struct sg_table *sgt = etnaviv_obj->sgt;
36
37 /*
38 * For non-cached buffers, ensure the new pages are clean
39 * because display controller, GPU, etc. are not coherent:
40 *
41 * WARNING: The DMA API does not support concurrent CPU
42 * and device access to the memory area. With BIDIRECTIONAL,
43 * we will clean the cache lines which overlap the region,
44 * and invalidate all cache lines (partially) contained in
45 * the region.
46 *
47 * If you have dirty data in the overlapping cache lines,
48 * that will corrupt the GPU-written data. If you have
49 * written into the remainder of the region, this can
50 * discard those writes.
51 */
52 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
53 dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
54}
55
56/* called with etnaviv_obj->lock held */
57static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
58{
59 struct drm_device *dev = etnaviv_obj->base.dev;
60 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
61
62 if (IS_ERR(p)) {
f91ac470 63 dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
a8c21a54
T
64 return PTR_ERR(p);
65 }
66
67 etnaviv_obj->pages = p;
68
69 return 0;
70}
71
72static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
73{
74 if (etnaviv_obj->sgt) {
75 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
76 sg_free_table(etnaviv_obj->sgt);
77 kfree(etnaviv_obj->sgt);
78 etnaviv_obj->sgt = NULL;
79 }
80 if (etnaviv_obj->pages) {
81 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
82 true, false);
83
84 etnaviv_obj->pages = NULL;
85 }
86}
87
88struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
89{
90 int ret;
91
92 lockdep_assert_held(&etnaviv_obj->lock);
93
94 if (!etnaviv_obj->pages) {
95 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
96 if (ret < 0)
97 return ERR_PTR(ret);
98 }
99
100 if (!etnaviv_obj->sgt) {
101 struct drm_device *dev = etnaviv_obj->base.dev;
102 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
103 struct sg_table *sgt;
104
105 sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
106 if (IS_ERR(sgt)) {
107 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
108 PTR_ERR(sgt));
109 return ERR_CAST(sgt);
110 }
111
112 etnaviv_obj->sgt = sgt;
113
114 etnaviv_gem_scatter_map(etnaviv_obj);
115 }
116
117 return etnaviv_obj->pages;
118}
119
120void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
121{
122 lockdep_assert_held(&etnaviv_obj->lock);
123 /* when we start tracking the pin count, then do something here */
124}
125
0e7f26e6 126static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
a8c21a54
T
127 struct vm_area_struct *vma)
128{
a8c21a54
T
129 pgprot_t vm_page_prot;
130
131 vma->vm_flags &= ~VM_PFNMAP;
132 vma->vm_flags |= VM_MIXEDMAP;
133
134 vm_page_prot = vm_get_page_prot(vma->vm_flags);
135
136 if (etnaviv_obj->flags & ETNA_BO_WC) {
137 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
138 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
139 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
140 } else {
141 /*
142 * Shunt off cached objs to shmem file so they have their own
143 * address_space (so unmap_mapping_range does what we want,
144 * in particular in the case of mmap'd dmabufs)
145 */
146 fput(vma->vm_file);
0e7f26e6 147 get_file(etnaviv_obj->base.filp);
a8c21a54 148 vma->vm_pgoff = 0;
0e7f26e6 149 vma->vm_file = etnaviv_obj->base.filp;
a8c21a54
T
150
151 vma->vm_page_prot = vm_page_prot;
152 }
153
154 return 0;
155}
156
157int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
158{
159 struct etnaviv_gem_object *obj;
160 int ret;
161
162 ret = drm_gem_mmap(filp, vma);
163 if (ret) {
164 DBG("mmap failed: %d", ret);
165 return ret;
166 }
167
168 obj = to_etnaviv_bo(vma->vm_private_data);
a10e2bde 169 return obj->ops->mmap(obj, vma);
a8c21a54
T
170}
171
cfad05a2 172vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
a8c21a54 173{
11bac800 174 struct vm_area_struct *vma = vmf->vma;
a8c21a54
T
175 struct drm_gem_object *obj = vma->vm_private_data;
176 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
177 struct page **pages, *page;
178 pgoff_t pgoff;
cfad05a2 179 int err;
a8c21a54
T
180
181 /*
182 * Make sure we don't parallel update on a fault, nor move or remove
cfad05a2 183 * something from beneath our feet. Note that vmf_insert_page() is
a8c21a54
T
184 * specifically coded to take care of this, so we don't have to.
185 */
cfad05a2
SJ
186 err = mutex_lock_interruptible(&etnaviv_obj->lock);
187 if (err)
188 return VM_FAULT_NOPAGE;
a8c21a54
T
189 /* make sure we have pages attached now */
190 pages = etnaviv_gem_get_pages(etnaviv_obj);
191 mutex_unlock(&etnaviv_obj->lock);
192
193 if (IS_ERR(pages)) {
cfad05a2
SJ
194 err = PTR_ERR(pages);
195 return vmf_error(err);
a8c21a54
T
196 }
197
198 /* We don't use vmf->pgoff since that has the fake offset: */
1a29d85e 199 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
a8c21a54
T
200
201 page = pages[pgoff];
202
1a29d85e 203 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
a8c21a54
T
204 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
205
cfad05a2 206 return vmf_insert_page(vma, vmf->address, page);
a8c21a54
T
207}
208
209int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
210{
211 int ret;
212
213 /* Make it mmapable */
214 ret = drm_gem_create_mmap_offset(obj);
215 if (ret)
216 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
217 else
218 *offset = drm_vma_node_offset_addr(&obj->vma_node);
219
220 return ret;
221}
222
223static struct etnaviv_vram_mapping *
224etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
225 struct etnaviv_iommu *mmu)
226{
227 struct etnaviv_vram_mapping *mapping;
228
229 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
230 if (mapping->mmu == mmu)
231 return mapping;
232 }
233
234 return NULL;
235}
236
b6325f40
RK
237void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
238{
239 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
240
23d1dd03 241 drm_gem_object_get(&etnaviv_obj->base);
b6325f40
RK
242
243 mutex_lock(&etnaviv_obj->lock);
244 WARN_ON(mapping->use == 0);
245 mapping->use += 1;
246 mutex_unlock(&etnaviv_obj->lock);
247}
248
249void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
250{
251 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
252
253 mutex_lock(&etnaviv_obj->lock);
254 WARN_ON(mapping->use == 0);
255 mapping->use -= 1;
256 mutex_unlock(&etnaviv_obj->lock);
257
23d1dd03 258 drm_gem_object_put_unlocked(&etnaviv_obj->base);
b6325f40
RK
259}
260
261struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
262 struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
a8c21a54
T
263{
264 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
265 struct etnaviv_vram_mapping *mapping;
266 struct page **pages;
267 int ret = 0;
268
269 mutex_lock(&etnaviv_obj->lock);
270 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
271 if (mapping) {
272 /*
273 * Holding the object lock prevents the use count changing
274 * beneath us. If the use count is zero, the MMU might be
275 * reaping this object, so take the lock and re-check that
276 * the MMU owns this mapping to close this race.
277 */
278 if (mapping->use == 0) {
279 mutex_lock(&gpu->mmu->lock);
280 if (mapping->mmu == gpu->mmu)
281 mapping->use += 1;
282 else
283 mapping = NULL;
284 mutex_unlock(&gpu->mmu->lock);
285 if (mapping)
286 goto out;
287 } else {
288 mapping->use += 1;
289 goto out;
290 }
291 }
292
293 pages = etnaviv_gem_get_pages(etnaviv_obj);
294 if (IS_ERR(pages)) {
295 ret = PTR_ERR(pages);
296 goto out;
297 }
298
299 /*
300 * See if we have a reaped vram mapping we can re-use before
301 * allocating a fresh mapping.
302 */
303 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
304 if (!mapping) {
305 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
ed94add0
DC
306 if (!mapping) {
307 ret = -ENOMEM;
308 goto out;
309 }
a8c21a54
T
310
311 INIT_LIST_HEAD(&mapping->scan_node);
312 mapping->object = etnaviv_obj;
313 } else {
314 list_del(&mapping->obj_node);
315 }
316
317 mapping->mmu = gpu->mmu;
318 mapping->use = 1;
319
320 ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
321 mapping);
322 if (ret < 0)
323 kfree(mapping);
324 else
325 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
326
327out:
328 mutex_unlock(&etnaviv_obj->lock);
329
b6325f40
RK
330 if (ret)
331 return ERR_PTR(ret);
a8c21a54 332
b6325f40 333 /* Take a reference on the object */
23d1dd03 334 drm_gem_object_get(obj);
b6325f40 335 return mapping;
a8c21a54
T
336}
337
ce3088fd 338void *etnaviv_gem_vmap(struct drm_gem_object *obj)
a8c21a54
T
339{
340 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
341
a0a5ab3e
LS
342 if (etnaviv_obj->vaddr)
343 return etnaviv_obj->vaddr;
a8c21a54 344
a0a5ab3e
LS
345 mutex_lock(&etnaviv_obj->lock);
346 /*
347 * Need to check again, as we might have raced with another thread
348 * while waiting for the mutex.
349 */
350 if (!etnaviv_obj->vaddr)
351 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
a8c21a54
T
352 mutex_unlock(&etnaviv_obj->lock);
353
354 return etnaviv_obj->vaddr;
355}
356
a0a5ab3e
LS
357static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
358{
359 struct page **pages;
360
361 lockdep_assert_held(&obj->lock);
362
363 pages = etnaviv_gem_get_pages(obj);
364 if (IS_ERR(pages))
365 return NULL;
366
367 return vmap(pages, obj->base.size >> PAGE_SHIFT,
368 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
369}
370
a8c21a54
T
371static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
372{
373 if (op & ETNA_PREP_READ)
374 return DMA_FROM_DEVICE;
375 else if (op & ETNA_PREP_WRITE)
376 return DMA_TO_DEVICE;
377 else
378 return DMA_BIDIRECTIONAL;
379}
380
381int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
382 struct timespec *timeout)
383{
384 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
385 struct drm_device *dev = obj->dev;
386 bool write = !!(op & ETNA_PREP_WRITE);
46a269da
LS
387 int ret;
388
8cc47b3e
LS
389 if (!etnaviv_obj->sgt) {
390 void *ret;
391
392 mutex_lock(&etnaviv_obj->lock);
393 ret = etnaviv_gem_get_pages(etnaviv_obj);
394 mutex_unlock(&etnaviv_obj->lock);
395 if (IS_ERR(ret))
396 return PTR_ERR(ret);
397 }
398
46a269da 399 if (op & ETNA_PREP_NOSYNC) {
52791eee 400 if (!dma_resv_test_signaled_rcu(obj->resv,
46a269da
LS
401 write))
402 return -EBUSY;
403 } else {
404 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
405
52791eee 406 ret = dma_resv_wait_timeout_rcu(obj->resv,
46a269da
LS
407 write, true, remain);
408 if (ret <= 0)
409 return ret == 0 ? -ETIMEDOUT : ret;
410 }
a8c21a54
T
411
412 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
a8c21a54
T
413 dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
414 etnaviv_obj->sgt->nents,
415 etnaviv_op_to_dma_dir(op));
416 etnaviv_obj->last_cpu_prep_op = op;
417 }
418
419 return 0;
420}
421
422int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
423{
424 struct drm_device *dev = obj->dev;
425 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
426
427 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
428 /* fini without a prep is almost certainly a userspace error */
429 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
430 dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
431 etnaviv_obj->sgt->nents,
432 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
433 etnaviv_obj->last_cpu_prep_op = 0;
434 }
435
436 return 0;
437}
438
439int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
440 struct timespec *timeout)
441{
442 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
443
444 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
445}
446
447#ifdef CONFIG_DEBUG_FS
f54d1867 448static void etnaviv_gem_describe_fence(struct dma_fence *fence,
a8c21a54
T
449 const char *type, struct seq_file *m)
450{
f54d1867 451 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
3415701a 452 seq_printf(m, "\t%9s: %s %s seq %llu\n",
a8c21a54
T
453 type,
454 fence->ops->get_driver_name(fence),
455 fence->ops->get_timeline_name(fence),
456 fence->seqno);
457}
458
459static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
460{
461 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
52791eee
CK
462 struct dma_resv *robj = obj->resv;
463 struct dma_resv_list *fobj;
f54d1867 464 struct dma_fence *fence;
a8c21a54
T
465 unsigned long off = drm_vma_node_start(&obj->vma_node);
466
467 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
468 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
2c935bc5 469 obj->name, kref_read(&obj->refcount),
a8c21a54
T
470 off, etnaviv_obj->vaddr, obj->size);
471
472 rcu_read_lock();
473 fobj = rcu_dereference(robj->fence);
474 if (fobj) {
475 unsigned int i, shared_count = fobj->shared_count;
476
477 for (i = 0; i < shared_count; i++) {
478 fence = rcu_dereference(fobj->shared[i]);
479 etnaviv_gem_describe_fence(fence, "Shared", m);
480 }
481 }
482
483 fence = rcu_dereference(robj->fence_excl);
484 if (fence)
485 etnaviv_gem_describe_fence(fence, "Exclusive", m);
486 rcu_read_unlock();
487}
488
489void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
490 struct seq_file *m)
491{
492 struct etnaviv_gem_object *etnaviv_obj;
493 int count = 0;
494 size_t size = 0;
495
496 mutex_lock(&priv->gem_lock);
497 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
498 struct drm_gem_object *obj = &etnaviv_obj->base;
499
500 seq_puts(m, " ");
501 etnaviv_gem_describe(obj, m);
502 count++;
503 size += obj->size;
504 }
505 mutex_unlock(&priv->gem_lock);
506
507 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
508}
509#endif
510
511static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
512{
8c6e6188 513 vunmap(etnaviv_obj->vaddr);
a8c21a54
T
514 put_pages(etnaviv_obj);
515}
516
517static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
518 .get_pages = etnaviv_gem_shmem_get_pages,
519 .release = etnaviv_gem_shmem_release,
a0a5ab3e 520 .vmap = etnaviv_gem_vmap_impl,
a10e2bde 521 .mmap = etnaviv_gem_mmap_obj,
a8c21a54
T
522};
523
524void etnaviv_gem_free_object(struct drm_gem_object *obj)
525{
526 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
51841752 527 struct etnaviv_drm_private *priv = obj->dev->dev_private;
a8c21a54
T
528 struct etnaviv_vram_mapping *mapping, *tmp;
529
530 /* object should not be active */
531 WARN_ON(is_active(etnaviv_obj));
532
51841752 533 mutex_lock(&priv->gem_lock);
a8c21a54 534 list_del(&etnaviv_obj->gem_node);
51841752 535 mutex_unlock(&priv->gem_lock);
a8c21a54
T
536
537 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
538 obj_node) {
539 struct etnaviv_iommu *mmu = mapping->mmu;
540
541 WARN_ON(mapping->use);
542
543 if (mmu)
544 etnaviv_iommu_unmap_gem(mmu, mapping);
545
546 list_del(&mapping->obj_node);
547 kfree(mapping);
548 }
549
550 drm_gem_free_mmap_offset(obj);
551 etnaviv_obj->ops->release(etnaviv_obj);
a8c21a54
T
552 drm_gem_object_release(obj);
553
554 kfree(etnaviv_obj);
555}
556
54f09288 557void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
a8c21a54
T
558{
559 struct etnaviv_drm_private *priv = dev->dev_private;
560 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
561
562 mutex_lock(&priv->gem_lock);
563 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
564 mutex_unlock(&priv->gem_lock);
a8c21a54
T
565}
566
567static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
c6be8086 568 const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
a8c21a54
T
569{
570 struct etnaviv_gem_object *etnaviv_obj;
571 unsigned sz = sizeof(*etnaviv_obj);
572 bool valid = true;
573
574 /* validate flags */
575 switch (flags & ETNA_BO_CACHE_MASK) {
576 case ETNA_BO_UNCACHED:
577 case ETNA_BO_CACHED:
578 case ETNA_BO_WC:
579 break;
580 default:
581 valid = false;
582 }
583
584 if (!valid) {
585 dev_err(dev->dev, "invalid cache flag: %x\n",
586 (flags & ETNA_BO_CACHE_MASK));
587 return -EINVAL;
588 }
589
590 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
591 if (!etnaviv_obj)
592 return -ENOMEM;
593
594 etnaviv_obj->flags = flags;
595 etnaviv_obj->ops = ops;
a8c21a54
T
596
597 mutex_init(&etnaviv_obj->lock);
598 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
599
600 *obj = &etnaviv_obj->base;
601
602 return 0;
603}
604
cdd32563
LS
605/* convenience method to construct a GEM buffer object, and userspace handle */
606int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
607 u32 size, u32 flags, u32 *handle)
a8c21a54
T
608{
609 struct drm_gem_object *obj = NULL;
610 int ret;
611
612 size = PAGE_ALIGN(size);
613
c6be8086 614 ret = etnaviv_gem_new_impl(dev, size, flags,
a8c21a54
T
615 &etnaviv_gem_shmem_ops, &obj);
616 if (ret)
617 goto fail;
618
d6a8743d
LS
619 lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
620
a8c21a54 621 ret = drm_gem_object_init(dev, obj, size);
a8c21a54
T
622 if (ret)
623 goto fail;
624
fd2450a7
LS
625 /*
626 * Our buffers are kept pinned, so allocating them from the MOVABLE
627 * zone is a really bad idea, and conflicts with CMA. See comments
628 * above new_inode() why this is required _and_ expected if you're
629 * going to pin these pages.
630 */
631 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
632 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
633
54f09288 634 etnaviv_gem_obj_add(dev, obj);
a8c21a54
T
635
636 ret = drm_gem_handle_create(file, obj, handle);
637
638 /* drop reference from allocate - handle holds it now */
cdd32563 639fail:
23d1dd03 640 drm_gem_object_put_unlocked(obj);
a8c21a54
T
641
642 return ret;
643}
644
a8c21a54 645int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
c6be8086 646 const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
a8c21a54
T
647{
648 struct drm_gem_object *obj;
649 int ret;
650
c6be8086 651 ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
a8c21a54
T
652 if (ret)
653 return ret;
654
655 drm_gem_private_object_init(dev, obj, size);
656
657 *res = to_etnaviv_bo(obj);
658
659 return 0;
660}
661
a8c21a54
T
662static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
663{
664 struct page **pvec = NULL;
b2295c24
LS
665 struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
666 int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
a8c21a54 667
783c06cb
LS
668 might_lock_read(&current->mm->mmap_sem);
669
b2295c24
LS
670 if (userptr->mm != current->mm)
671 return -EPERM;
a8c21a54 672
b2295c24
LS
673 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
674 if (!pvec)
675 return -ENOMEM;
676
677 do {
678 unsigned num_pages = npages - pinned;
679 uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
680 struct page **pages = pvec + pinned;
a8c21a54 681
b2295c24
LS
682 ret = get_user_pages_fast(ptr, num_pages,
683 !userptr->ro ? FOLL_WRITE : 0, pages);
684 if (ret < 0) {
685 release_pages(pvec, pinned);
2098105e 686 kvfree(pvec);
b2295c24 687 return ret;
a8c21a54
T
688 }
689
b2295c24 690 pinned += ret;
a8c21a54 691
b2295c24 692 } while (pinned < npages);
a8c21a54 693
b2295c24 694 etnaviv_obj->pages = pvec;
a8c21a54 695
b2295c24 696 return 0;
a8c21a54
T
697}
698
699static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
700{
701 if (etnaviv_obj->sgt) {
702 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
703 sg_free_table(etnaviv_obj->sgt);
704 kfree(etnaviv_obj->sgt);
705 }
706 if (etnaviv_obj->pages) {
707 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
708
c6f92f9f 709 release_pages(etnaviv_obj->pages, npages);
2098105e 710 kvfree(etnaviv_obj->pages);
a8c21a54 711 }
a8c21a54
T
712}
713
a10e2bde
LS
714static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
715 struct vm_area_struct *vma)
716{
717 return -EINVAL;
718}
719
a8c21a54
T
720static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
721 .get_pages = etnaviv_gem_userptr_get_pages,
722 .release = etnaviv_gem_userptr_release,
a0a5ab3e 723 .vmap = etnaviv_gem_vmap_impl,
a10e2bde 724 .mmap = etnaviv_gem_userptr_mmap_obj,
a8c21a54
T
725};
726
727int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
728 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
729{
730 struct etnaviv_gem_object *etnaviv_obj;
731 int ret;
732
c6be8086 733 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
a8c21a54
T
734 &etnaviv_gem_userptr_ops, &etnaviv_obj);
735 if (ret)
736 return ret;
737
d6a8743d
LS
738 lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
739
a8c21a54 740 etnaviv_obj->userptr.ptr = ptr;
b2295c24 741 etnaviv_obj->userptr.mm = current->mm;
a8c21a54 742 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
a8c21a54 743
54f09288 744 etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
a8c21a54
T
745
746 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
54f09288 747
a8c21a54 748 /* drop reference from allocate - handle holds it now */
23d1dd03 749 drm_gem_object_put_unlocked(&etnaviv_obj->base);
a8c21a54
T
750 return ret;
751}