dma-buf: rename and cleanup dma_resv_get_list v2
[linux-2.6-block.git] / drivers / gpu / drm / etnaviv / etnaviv_gem.c
CommitLineData
f6ffbd4f 1// SPDX-License-Identifier: GPL-2.0
a8c21a54 2/*
f6ffbd4f 3 * Copyright (C) 2015-2018 Etnaviv Project
a8c21a54
T
4 */
5
6eae41fe
SR
6#include <drm/drm_prime.h>
7#include <linux/dma-mapping.h>
a8c21a54 8#include <linux/shmem_fs.h>
6eae41fe
SR
9#include <linux/spinlock.h>
10#include <linux/vmalloc.h>
a8c21a54
T
11
12#include "etnaviv_drv.h"
13#include "etnaviv_gem.h"
14#include "etnaviv_gpu.h"
15#include "etnaviv_mmu.h"
16
d6a8743d
LS
17static struct lock_class_key etnaviv_shm_lock_class;
18static struct lock_class_key etnaviv_userptr_lock_class;
19
a8c21a54
T
20static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
21{
22 struct drm_device *dev = etnaviv_obj->base.dev;
23 struct sg_table *sgt = etnaviv_obj->sgt;
24
25 /*
26 * For non-cached buffers, ensure the new pages are clean
27 * because display controller, GPU, etc. are not coherent.
28 */
29 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
182354a5 30 dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
a8c21a54
T
31}
32
33static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
34{
35 struct drm_device *dev = etnaviv_obj->base.dev;
36 struct sg_table *sgt = etnaviv_obj->sgt;
37
38 /*
39 * For non-cached buffers, ensure the new pages are clean
40 * because display controller, GPU, etc. are not coherent:
41 *
42 * WARNING: The DMA API does not support concurrent CPU
43 * and device access to the memory area. With BIDIRECTIONAL,
44 * we will clean the cache lines which overlap the region,
45 * and invalidate all cache lines (partially) contained in
46 * the region.
47 *
48 * If you have dirty data in the overlapping cache lines,
49 * that will corrupt the GPU-written data. If you have
50 * written into the remainder of the region, this can
51 * discard those writes.
52 */
53 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
182354a5 54 dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
a8c21a54
T
55}
56
57/* called with etnaviv_obj->lock held */
58static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
59{
60 struct drm_device *dev = etnaviv_obj->base.dev;
61 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
62
63 if (IS_ERR(p)) {
f91ac470 64 dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
a8c21a54
T
65 return PTR_ERR(p);
66 }
67
68 etnaviv_obj->pages = p;
69
70 return 0;
71}
72
73static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
74{
75 if (etnaviv_obj->sgt) {
76 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
77 sg_free_table(etnaviv_obj->sgt);
78 kfree(etnaviv_obj->sgt);
79 etnaviv_obj->sgt = NULL;
80 }
81 if (etnaviv_obj->pages) {
82 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
83 true, false);
84
85 etnaviv_obj->pages = NULL;
86 }
87}
88
89struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
90{
91 int ret;
92
93 lockdep_assert_held(&etnaviv_obj->lock);
94
95 if (!etnaviv_obj->pages) {
96 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
97 if (ret < 0)
98 return ERR_PTR(ret);
99 }
100
101 if (!etnaviv_obj->sgt) {
102 struct drm_device *dev = etnaviv_obj->base.dev;
103 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
104 struct sg_table *sgt;
105
707d561f
GH
106 sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
107 etnaviv_obj->pages, npages);
a8c21a54
T
108 if (IS_ERR(sgt)) {
109 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
110 PTR_ERR(sgt));
111 return ERR_CAST(sgt);
112 }
113
114 etnaviv_obj->sgt = sgt;
115
116 etnaviv_gem_scatter_map(etnaviv_obj);
117 }
118
119 return etnaviv_obj->pages;
120}
121
122void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
123{
124 lockdep_assert_held(&etnaviv_obj->lock);
125 /* when we start tracking the pin count, then do something here */
126}
127
0e7f26e6 128static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
a8c21a54
T
129 struct vm_area_struct *vma)
130{
a8c21a54
T
131 pgprot_t vm_page_prot;
132
133 vma->vm_flags &= ~VM_PFNMAP;
134 vma->vm_flags |= VM_MIXEDMAP;
135
136 vm_page_prot = vm_get_page_prot(vma->vm_flags);
137
138 if (etnaviv_obj->flags & ETNA_BO_WC) {
139 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
140 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
141 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
142 } else {
143 /*
144 * Shunt off cached objs to shmem file so they have their own
145 * address_space (so unmap_mapping_range does what we want,
146 * in particular in the case of mmap'd dmabufs)
147 */
a8c21a54 148 vma->vm_pgoff = 0;
295992fb 149 vma_set_file(vma, etnaviv_obj->base.filp);
a8c21a54
T
150
151 vma->vm_page_prot = vm_page_prot;
152 }
153
154 return 0;
155}
156
157int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
158{
159 struct etnaviv_gem_object *obj;
160 int ret;
161
162 ret = drm_gem_mmap(filp, vma);
163 if (ret) {
164 DBG("mmap failed: %d", ret);
165 return ret;
166 }
167
168 obj = to_etnaviv_bo(vma->vm_private_data);
a10e2bde 169 return obj->ops->mmap(obj, vma);
a8c21a54
T
170}
171
a7730627 172static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
a8c21a54 173{
11bac800 174 struct vm_area_struct *vma = vmf->vma;
a8c21a54
T
175 struct drm_gem_object *obj = vma->vm_private_data;
176 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
177 struct page **pages, *page;
178 pgoff_t pgoff;
cfad05a2 179 int err;
a8c21a54
T
180
181 /*
182 * Make sure we don't parallel update on a fault, nor move or remove
cfad05a2 183 * something from beneath our feet. Note that vmf_insert_page() is
a8c21a54
T
184 * specifically coded to take care of this, so we don't have to.
185 */
cfad05a2
SJ
186 err = mutex_lock_interruptible(&etnaviv_obj->lock);
187 if (err)
188 return VM_FAULT_NOPAGE;
a8c21a54
T
189 /* make sure we have pages attached now */
190 pages = etnaviv_gem_get_pages(etnaviv_obj);
191 mutex_unlock(&etnaviv_obj->lock);
192
193 if (IS_ERR(pages)) {
cfad05a2
SJ
194 err = PTR_ERR(pages);
195 return vmf_error(err);
a8c21a54
T
196 }
197
198 /* We don't use vmf->pgoff since that has the fake offset: */
1a29d85e 199 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
a8c21a54
T
200
201 page = pages[pgoff];
202
1a29d85e 203 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
a8c21a54
T
204 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
205
cfad05a2 206 return vmf_insert_page(vma, vmf->address, page);
a8c21a54
T
207}
208
209int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
210{
211 int ret;
212
213 /* Make it mmapable */
214 ret = drm_gem_create_mmap_offset(obj);
215 if (ret)
216 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
217 else
218 *offset = drm_vma_node_offset_addr(&obj->vma_node);
219
220 return ret;
221}
222
223static struct etnaviv_vram_mapping *
224etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
27b67278 225 struct etnaviv_iommu_context *context)
a8c21a54
T
226{
227 struct etnaviv_vram_mapping *mapping;
228
229 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
27b67278 230 if (mapping->context == context)
a8c21a54
T
231 return mapping;
232 }
233
234 return NULL;
235}
236
b6325f40
RK
237void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
238{
239 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
240
241 mutex_lock(&etnaviv_obj->lock);
242 WARN_ON(mapping->use == 0);
243 mapping->use -= 1;
244 mutex_unlock(&etnaviv_obj->lock);
245
6780bf32 246 drm_gem_object_put(&etnaviv_obj->base);
b6325f40
RK
247}
248
249struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
088880dd
LS
250 struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
251 u64 va)
a8c21a54
T
252{
253 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
254 struct etnaviv_vram_mapping *mapping;
255 struct page **pages;
256 int ret = 0;
257
258 mutex_lock(&etnaviv_obj->lock);
e6364d70 259 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
a8c21a54
T
260 if (mapping) {
261 /*
262 * Holding the object lock prevents the use count changing
263 * beneath us. If the use count is zero, the MMU might be
264 * reaping this object, so take the lock and re-check that
265 * the MMU owns this mapping to close this race.
266 */
267 if (mapping->use == 0) {
e6364d70
LS
268 mutex_lock(&mmu_context->lock);
269 if (mapping->context == mmu_context)
a8c21a54
T
270 mapping->use += 1;
271 else
272 mapping = NULL;
e6364d70 273 mutex_unlock(&mmu_context->lock);
a8c21a54
T
274 if (mapping)
275 goto out;
276 } else {
277 mapping->use += 1;
278 goto out;
279 }
280 }
281
282 pages = etnaviv_gem_get_pages(etnaviv_obj);
283 if (IS_ERR(pages)) {
284 ret = PTR_ERR(pages);
285 goto out;
286 }
287
288 /*
289 * See if we have a reaped vram mapping we can re-use before
290 * allocating a fresh mapping.
291 */
292 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
293 if (!mapping) {
294 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
ed94add0
DC
295 if (!mapping) {
296 ret = -ENOMEM;
297 goto out;
298 }
a8c21a54
T
299
300 INIT_LIST_HEAD(&mapping->scan_node);
301 mapping->object = etnaviv_obj;
302 } else {
303 list_del(&mapping->obj_node);
304 }
305
e6364d70
LS
306 etnaviv_iommu_context_get(mmu_context);
307 mapping->context = mmu_context;
a8c21a54
T
308 mapping->use = 1;
309
17e4660a 310 ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
17eae23b 311 mmu_context->global->memory_base,
088880dd 312 mapping, va);
e6364d70
LS
313 if (ret < 0) {
314 etnaviv_iommu_context_put(mmu_context);
a8c21a54 315 kfree(mapping);
e6364d70 316 } else {
a8c21a54 317 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
e6364d70 318 }
a8c21a54
T
319
320out:
321 mutex_unlock(&etnaviv_obj->lock);
322
b6325f40
RK
323 if (ret)
324 return ERR_PTR(ret);
a8c21a54 325
b6325f40 326 /* Take a reference on the object */
23d1dd03 327 drm_gem_object_get(obj);
b6325f40 328 return mapping;
a8c21a54
T
329}
330
ce3088fd 331void *etnaviv_gem_vmap(struct drm_gem_object *obj)
a8c21a54
T
332{
333 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
334
a0a5ab3e
LS
335 if (etnaviv_obj->vaddr)
336 return etnaviv_obj->vaddr;
a8c21a54 337
a0a5ab3e
LS
338 mutex_lock(&etnaviv_obj->lock);
339 /*
340 * Need to check again, as we might have raced with another thread
341 * while waiting for the mutex.
342 */
343 if (!etnaviv_obj->vaddr)
344 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
a8c21a54
T
345 mutex_unlock(&etnaviv_obj->lock);
346
347 return etnaviv_obj->vaddr;
348}
349
a0a5ab3e
LS
350static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
351{
352 struct page **pages;
353
354 lockdep_assert_held(&obj->lock);
355
356 pages = etnaviv_gem_get_pages(obj);
357 if (IS_ERR(pages))
358 return NULL;
359
360 return vmap(pages, obj->base.size >> PAGE_SHIFT,
361 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
362}
363
a8c21a54
T
364static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
365{
366 if (op & ETNA_PREP_READ)
367 return DMA_FROM_DEVICE;
368 else if (op & ETNA_PREP_WRITE)
369 return DMA_TO_DEVICE;
370 else
371 return DMA_BIDIRECTIONAL;
372}
373
374int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
38c4a4cf 375 struct drm_etnaviv_timespec *timeout)
a8c21a54
T
376{
377 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
378 struct drm_device *dev = obj->dev;
379 bool write = !!(op & ETNA_PREP_WRITE);
46a269da
LS
380 int ret;
381
8cc47b3e
LS
382 if (!etnaviv_obj->sgt) {
383 void *ret;
384
385 mutex_lock(&etnaviv_obj->lock);
386 ret = etnaviv_gem_get_pages(etnaviv_obj);
387 mutex_unlock(&etnaviv_obj->lock);
388 if (IS_ERR(ret))
389 return PTR_ERR(ret);
390 }
391
46a269da 392 if (op & ETNA_PREP_NOSYNC) {
52791eee 393 if (!dma_resv_test_signaled_rcu(obj->resv,
46a269da
LS
394 write))
395 return -EBUSY;
396 } else {
397 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
398
52791eee 399 ret = dma_resv_wait_timeout_rcu(obj->resv,
46a269da
LS
400 write, true, remain);
401 if (ret <= 0)
402 return ret == 0 ? -ETIMEDOUT : ret;
403 }
a8c21a54
T
404
405 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
182354a5
MS
406 dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
407 etnaviv_op_to_dma_dir(op));
a8c21a54
T
408 etnaviv_obj->last_cpu_prep_op = op;
409 }
410
411 return 0;
412}
413
414int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
415{
416 struct drm_device *dev = obj->dev;
417 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
418
419 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
420 /* fini without a prep is almost certainly a userspace error */
421 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
182354a5 422 dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
a8c21a54
T
423 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
424 etnaviv_obj->last_cpu_prep_op = 0;
425 }
426
427 return 0;
428}
429
430int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
38c4a4cf 431 struct drm_etnaviv_timespec *timeout)
a8c21a54
T
432{
433 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
434
435 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
436}
437
438#ifdef CONFIG_DEBUG_FS
f54d1867 439static void etnaviv_gem_describe_fence(struct dma_fence *fence,
a8c21a54
T
440 const char *type, struct seq_file *m)
441{
f54d1867 442 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
3415701a 443 seq_printf(m, "\t%9s: %s %s seq %llu\n",
a8c21a54
T
444 type,
445 fence->ops->get_driver_name(fence),
446 fence->ops->get_timeline_name(fence),
447 fence->seqno);
448}
449
450static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
451{
452 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
52791eee
CK
453 struct dma_resv *robj = obj->resv;
454 struct dma_resv_list *fobj;
f54d1867 455 struct dma_fence *fence;
a8c21a54
T
456 unsigned long off = drm_vma_node_start(&obj->vma_node);
457
458 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
459 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
2c935bc5 460 obj->name, kref_read(&obj->refcount),
a8c21a54
T
461 off, etnaviv_obj->vaddr, obj->size);
462
463 rcu_read_lock();
fb5ce730 464 fobj = dma_resv_shared_list(robj);
a8c21a54
T
465 if (fobj) {
466 unsigned int i, shared_count = fobj->shared_count;
467
468 for (i = 0; i < shared_count; i++) {
469 fence = rcu_dereference(fobj->shared[i]);
470 etnaviv_gem_describe_fence(fence, "Shared", m);
471 }
472 }
473
6edbd6ab 474 fence = dma_resv_excl_fence(robj);
a8c21a54
T
475 if (fence)
476 etnaviv_gem_describe_fence(fence, "Exclusive", m);
477 rcu_read_unlock();
478}
479
480void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
481 struct seq_file *m)
482{
483 struct etnaviv_gem_object *etnaviv_obj;
484 int count = 0;
485 size_t size = 0;
486
487 mutex_lock(&priv->gem_lock);
488 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
489 struct drm_gem_object *obj = &etnaviv_obj->base;
490
491 seq_puts(m, " ");
492 etnaviv_gem_describe(obj, m);
493 count++;
494 size += obj->size;
495 }
496 mutex_unlock(&priv->gem_lock);
497
498 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
499}
500#endif
501
502static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
503{
8c6e6188 504 vunmap(etnaviv_obj->vaddr);
a8c21a54
T
505 put_pages(etnaviv_obj);
506}
507
508static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
509 .get_pages = etnaviv_gem_shmem_get_pages,
510 .release = etnaviv_gem_shmem_release,
a0a5ab3e 511 .vmap = etnaviv_gem_vmap_impl,
a10e2bde 512 .mmap = etnaviv_gem_mmap_obj,
a8c21a54
T
513};
514
515void etnaviv_gem_free_object(struct drm_gem_object *obj)
516{
517 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
51841752 518 struct etnaviv_drm_private *priv = obj->dev->dev_private;
a8c21a54
T
519 struct etnaviv_vram_mapping *mapping, *tmp;
520
521 /* object should not be active */
522 WARN_ON(is_active(etnaviv_obj));
523
51841752 524 mutex_lock(&priv->gem_lock);
a8c21a54 525 list_del(&etnaviv_obj->gem_node);
51841752 526 mutex_unlock(&priv->gem_lock);
a8c21a54
T
527
528 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
529 obj_node) {
27b67278 530 struct etnaviv_iommu_context *context = mapping->context;
a8c21a54
T
531
532 WARN_ON(mapping->use);
533
e6364d70 534 if (context) {
27b67278 535 etnaviv_iommu_unmap_gem(context, mapping);
e6364d70
LS
536 etnaviv_iommu_context_put(context);
537 }
a8c21a54
T
538
539 list_del(&mapping->obj_node);
540 kfree(mapping);
541 }
542
543 drm_gem_free_mmap_offset(obj);
544 etnaviv_obj->ops->release(etnaviv_obj);
a8c21a54
T
545 drm_gem_object_release(obj);
546
547 kfree(etnaviv_obj);
548}
549
54f09288 550void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
a8c21a54
T
551{
552 struct etnaviv_drm_private *priv = dev->dev_private;
553 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
554
555 mutex_lock(&priv->gem_lock);
556 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
557 mutex_unlock(&priv->gem_lock);
a8c21a54
T
558}
559
a7730627
TZ
560static const struct vm_operations_struct vm_ops = {
561 .fault = etnaviv_gem_fault,
562 .open = drm_gem_vm_open,
563 .close = drm_gem_vm_close,
564};
565
566static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
567 .free = etnaviv_gem_free_object,
568 .pin = etnaviv_gem_prime_pin,
569 .unpin = etnaviv_gem_prime_unpin,
570 .get_sg_table = etnaviv_gem_prime_get_sg_table,
571 .vmap = etnaviv_gem_prime_vmap,
a7730627
TZ
572 .vm_ops = &vm_ops,
573};
574
a8c21a54 575static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
c6be8086 576 const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
a8c21a54
T
577{
578 struct etnaviv_gem_object *etnaviv_obj;
579 unsigned sz = sizeof(*etnaviv_obj);
580 bool valid = true;
581
582 /* validate flags */
583 switch (flags & ETNA_BO_CACHE_MASK) {
584 case ETNA_BO_UNCACHED:
585 case ETNA_BO_CACHED:
586 case ETNA_BO_WC:
587 break;
588 default:
589 valid = false;
590 }
591
592 if (!valid) {
593 dev_err(dev->dev, "invalid cache flag: %x\n",
594 (flags & ETNA_BO_CACHE_MASK));
595 return -EINVAL;
596 }
597
598 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
599 if (!etnaviv_obj)
600 return -ENOMEM;
601
602 etnaviv_obj->flags = flags;
603 etnaviv_obj->ops = ops;
a8c21a54
T
604
605 mutex_init(&etnaviv_obj->lock);
606 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
607
608 *obj = &etnaviv_obj->base;
a7730627 609 (*obj)->funcs = &etnaviv_gem_object_funcs;
a8c21a54
T
610
611 return 0;
612}
613
cdd32563
LS
614/* convenience method to construct a GEM buffer object, and userspace handle */
615int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
616 u32 size, u32 flags, u32 *handle)
a8c21a54 617{
b72af445 618 struct etnaviv_drm_private *priv = dev->dev_private;
a8c21a54
T
619 struct drm_gem_object *obj = NULL;
620 int ret;
621
622 size = PAGE_ALIGN(size);
623
c6be8086 624 ret = etnaviv_gem_new_impl(dev, size, flags,
a8c21a54
T
625 &etnaviv_gem_shmem_ops, &obj);
626 if (ret)
627 goto fail;
628
d6a8743d
LS
629 lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
630
a8c21a54 631 ret = drm_gem_object_init(dev, obj, size);
a8c21a54
T
632 if (ret)
633 goto fail;
634
fd2450a7
LS
635 /*
636 * Our buffers are kept pinned, so allocating them from the MOVABLE
637 * zone is a really bad idea, and conflicts with CMA. See comments
638 * above new_inode() why this is required _and_ expected if you're
639 * going to pin these pages.
640 */
b72af445 641 mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
fd2450a7 642
54f09288 643 etnaviv_gem_obj_add(dev, obj);
a8c21a54
T
644
645 ret = drm_gem_handle_create(file, obj, handle);
646
647 /* drop reference from allocate - handle holds it now */
cdd32563 648fail:
6780bf32 649 drm_gem_object_put(obj);
a8c21a54
T
650
651 return ret;
652}
653
a8c21a54 654int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
c6be8086 655 const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
a8c21a54
T
656{
657 struct drm_gem_object *obj;
658 int ret;
659
c6be8086 660 ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
a8c21a54
T
661 if (ret)
662 return ret;
663
664 drm_gem_private_object_init(dev, obj, size);
665
666 *res = to_etnaviv_bo(obj);
667
668 return 0;
669}
670
a8c21a54
T
671static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
672{
673 struct page **pvec = NULL;
b2295c24
LS
674 struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
675 int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
a8c21a54 676
da1c55f1 677 might_lock_read(&current->mm->mmap_lock);
783c06cb 678
b2295c24
LS
679 if (userptr->mm != current->mm)
680 return -EPERM;
a8c21a54 681
b2295c24
LS
682 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
683 if (!pvec)
684 return -ENOMEM;
685
686 do {
687 unsigned num_pages = npages - pinned;
688 uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
689 struct page **pages = pvec + pinned;
a8c21a54 690
86824e60 691 ret = pin_user_pages_fast(ptr, num_pages,
50891bea
DV
692 FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM,
693 pages);
b2295c24 694 if (ret < 0) {
86824e60 695 unpin_user_pages(pvec, pinned);
2098105e 696 kvfree(pvec);
b2295c24 697 return ret;
a8c21a54
T
698 }
699
b2295c24 700 pinned += ret;
a8c21a54 701
b2295c24 702 } while (pinned < npages);
a8c21a54 703
b2295c24 704 etnaviv_obj->pages = pvec;
a8c21a54 705
b2295c24 706 return 0;
a8c21a54
T
707}
708
709static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
710{
711 if (etnaviv_obj->sgt) {
712 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
713 sg_free_table(etnaviv_obj->sgt);
714 kfree(etnaviv_obj->sgt);
715 }
716 if (etnaviv_obj->pages) {
717 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
718
86824e60 719 unpin_user_pages(etnaviv_obj->pages, npages);
2098105e 720 kvfree(etnaviv_obj->pages);
a8c21a54 721 }
a8c21a54
T
722}
723
a10e2bde
LS
724static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
725 struct vm_area_struct *vma)
726{
727 return -EINVAL;
728}
729
a8c21a54
T
730static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
731 .get_pages = etnaviv_gem_userptr_get_pages,
732 .release = etnaviv_gem_userptr_release,
a0a5ab3e 733 .vmap = etnaviv_gem_vmap_impl,
a10e2bde 734 .mmap = etnaviv_gem_userptr_mmap_obj,
a8c21a54
T
735};
736
737int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
738 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
739{
740 struct etnaviv_gem_object *etnaviv_obj;
741 int ret;
742
c6be8086 743 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
a8c21a54
T
744 &etnaviv_gem_userptr_ops, &etnaviv_obj);
745 if (ret)
746 return ret;
747
d6a8743d
LS
748 lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
749
a8c21a54 750 etnaviv_obj->userptr.ptr = ptr;
b2295c24 751 etnaviv_obj->userptr.mm = current->mm;
a8c21a54 752 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
a8c21a54 753
54f09288 754 etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
a8c21a54
T
755
756 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
54f09288 757
a8c21a54 758 /* drop reference from allocate - handle holds it now */
6780bf32 759 drm_gem_object_put(&etnaviv_obj->base);
a8c21a54
T
760 return ret;
761}