mm: replace vma->vm_flags direct modifications with modifier calls
[linux-block.git] / drivers / gpu / drm / etnaviv / etnaviv_gem.c
CommitLineData
f6ffbd4f 1// SPDX-License-Identifier: GPL-2.0
a8c21a54 2/*
f6ffbd4f 3 * Copyright (C) 2015-2018 Etnaviv Project
a8c21a54
T
4 */
5
6eae41fe
SR
6#include <drm/drm_prime.h>
7#include <linux/dma-mapping.h>
a8c21a54 8#include <linux/shmem_fs.h>
6eae41fe
SR
9#include <linux/spinlock.h>
10#include <linux/vmalloc.h>
a8c21a54
T
11
12#include "etnaviv_drv.h"
13#include "etnaviv_gem.h"
14#include "etnaviv_gpu.h"
15#include "etnaviv_mmu.h"
16
d6a8743d
LS
17static struct lock_class_key etnaviv_shm_lock_class;
18static struct lock_class_key etnaviv_userptr_lock_class;
19
a8c21a54
T
20static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
21{
22 struct drm_device *dev = etnaviv_obj->base.dev;
23 struct sg_table *sgt = etnaviv_obj->sgt;
24
25 /*
26 * For non-cached buffers, ensure the new pages are clean
27 * because display controller, GPU, etc. are not coherent.
28 */
29 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
182354a5 30 dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
a8c21a54
T
31}
32
33static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
34{
35 struct drm_device *dev = etnaviv_obj->base.dev;
36 struct sg_table *sgt = etnaviv_obj->sgt;
37
38 /*
39 * For non-cached buffers, ensure the new pages are clean
40 * because display controller, GPU, etc. are not coherent:
41 *
42 * WARNING: The DMA API does not support concurrent CPU
43 * and device access to the memory area. With BIDIRECTIONAL,
44 * we will clean the cache lines which overlap the region,
45 * and invalidate all cache lines (partially) contained in
46 * the region.
47 *
48 * If you have dirty data in the overlapping cache lines,
49 * that will corrupt the GPU-written data. If you have
50 * written into the remainder of the region, this can
51 * discard those writes.
52 */
53 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
182354a5 54 dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
a8c21a54
T
55}
56
57/* called with etnaviv_obj->lock held */
58static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
59{
60 struct drm_device *dev = etnaviv_obj->base.dev;
61 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
62
63 if (IS_ERR(p)) {
f91ac470 64 dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
a8c21a54
T
65 return PTR_ERR(p);
66 }
67
68 etnaviv_obj->pages = p;
69
70 return 0;
71}
72
73static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
74{
75 if (etnaviv_obj->sgt) {
76 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
77 sg_free_table(etnaviv_obj->sgt);
78 kfree(etnaviv_obj->sgt);
79 etnaviv_obj->sgt = NULL;
80 }
81 if (etnaviv_obj->pages) {
82 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
83 true, false);
84
85 etnaviv_obj->pages = NULL;
86 }
87}
88
89struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
90{
91 int ret;
92
93 lockdep_assert_held(&etnaviv_obj->lock);
94
95 if (!etnaviv_obj->pages) {
96 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
97 if (ret < 0)
98 return ERR_PTR(ret);
99 }
100
101 if (!etnaviv_obj->sgt) {
102 struct drm_device *dev = etnaviv_obj->base.dev;
103 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
104 struct sg_table *sgt;
105
707d561f
GH
106 sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
107 etnaviv_obj->pages, npages);
a8c21a54
T
108 if (IS_ERR(sgt)) {
109 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
110 PTR_ERR(sgt));
111 return ERR_CAST(sgt);
112 }
113
114 etnaviv_obj->sgt = sgt;
115
116 etnaviv_gem_scatter_map(etnaviv_obj);
117 }
118
119 return etnaviv_obj->pages;
120}
121
122void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
123{
124 lockdep_assert_held(&etnaviv_obj->lock);
125 /* when we start tracking the pin count, then do something here */
126}
127
0e7f26e6 128static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
a8c21a54
T
129 struct vm_area_struct *vma)
130{
a8c21a54
T
131 pgprot_t vm_page_prot;
132
1c71222e 133 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
a8c21a54
T
134
135 vm_page_prot = vm_get_page_prot(vma->vm_flags);
136
137 if (etnaviv_obj->flags & ETNA_BO_WC) {
138 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
139 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
140 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
141 } else {
142 /*
143 * Shunt off cached objs to shmem file so they have their own
144 * address_space (so unmap_mapping_range does what we want,
145 * in particular in the case of mmap'd dmabufs)
146 */
a8c21a54 147 vma->vm_pgoff = 0;
295992fb 148 vma_set_file(vma, etnaviv_obj->base.filp);
a8c21a54
T
149
150 vma->vm_page_prot = vm_page_prot;
151 }
152
153 return 0;
154}
155
81fd23e2 156static int etnaviv_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
a8c21a54 157{
81fd23e2 158 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
a8c21a54 159
81fd23e2 160 return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
a8c21a54
T
161}
162
a7730627 163static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
a8c21a54 164{
11bac800 165 struct vm_area_struct *vma = vmf->vma;
a8c21a54
T
166 struct drm_gem_object *obj = vma->vm_private_data;
167 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
a3b4c2f9
LS
168 struct page **pages;
169 unsigned long pfn;
a8c21a54 170 pgoff_t pgoff;
cfad05a2 171 int err;
a8c21a54
T
172
173 /*
174 * Make sure we don't parallel update on a fault, nor move or remove
cfad05a2 175 * something from beneath our feet. Note that vmf_insert_page() is
a8c21a54
T
176 * specifically coded to take care of this, so we don't have to.
177 */
cfad05a2
SJ
178 err = mutex_lock_interruptible(&etnaviv_obj->lock);
179 if (err)
180 return VM_FAULT_NOPAGE;
a8c21a54
T
181 /* make sure we have pages attached now */
182 pages = etnaviv_gem_get_pages(etnaviv_obj);
183 mutex_unlock(&etnaviv_obj->lock);
184
185 if (IS_ERR(pages)) {
cfad05a2
SJ
186 err = PTR_ERR(pages);
187 return vmf_error(err);
a8c21a54
T
188 }
189
190 /* We don't use vmf->pgoff since that has the fake offset: */
1a29d85e 191 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
a8c21a54 192
a3b4c2f9 193 pfn = page_to_pfn(pages[pgoff]);
a8c21a54 194
1a29d85e 195 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
a3b4c2f9 196 pfn, pfn << PAGE_SHIFT);
a8c21a54 197
a3b4c2f9 198 return vmf_insert_pfn(vma, vmf->address, pfn);
a8c21a54
T
199}
200
201int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
202{
203 int ret;
204
205 /* Make it mmapable */
206 ret = drm_gem_create_mmap_offset(obj);
207 if (ret)
208 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
209 else
210 *offset = drm_vma_node_offset_addr(&obj->vma_node);
211
212 return ret;
213}
214
215static struct etnaviv_vram_mapping *
216etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
27b67278 217 struct etnaviv_iommu_context *context)
a8c21a54
T
218{
219 struct etnaviv_vram_mapping *mapping;
220
221 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
27b67278 222 if (mapping->context == context)
a8c21a54
T
223 return mapping;
224 }
225
226 return NULL;
227}
228
b6325f40
RK
229void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
230{
231 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
232
233 mutex_lock(&etnaviv_obj->lock);
234 WARN_ON(mapping->use == 0);
235 mapping->use -= 1;
236 mutex_unlock(&etnaviv_obj->lock);
237
6780bf32 238 drm_gem_object_put(&etnaviv_obj->base);
b6325f40
RK
239}
240
241struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
088880dd
LS
242 struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
243 u64 va)
a8c21a54
T
244{
245 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
246 struct etnaviv_vram_mapping *mapping;
247 struct page **pages;
248 int ret = 0;
249
250 mutex_lock(&etnaviv_obj->lock);
e6364d70 251 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
a8c21a54
T
252 if (mapping) {
253 /*
254 * Holding the object lock prevents the use count changing
255 * beneath us. If the use count is zero, the MMU might be
256 * reaping this object, so take the lock and re-check that
257 * the MMU owns this mapping to close this race.
258 */
259 if (mapping->use == 0) {
e6364d70
LS
260 mutex_lock(&mmu_context->lock);
261 if (mapping->context == mmu_context)
332f8472
LS
262 if (va && mapping->iova != va) {
263 etnaviv_iommu_reap_mapping(mapping);
264 mapping = NULL;
265 } else {
266 mapping->use += 1;
267 }
a8c21a54
T
268 else
269 mapping = NULL;
e6364d70 270 mutex_unlock(&mmu_context->lock);
a8c21a54
T
271 if (mapping)
272 goto out;
273 } else {
274 mapping->use += 1;
275 goto out;
276 }
277 }
278
279 pages = etnaviv_gem_get_pages(etnaviv_obj);
280 if (IS_ERR(pages)) {
281 ret = PTR_ERR(pages);
282 goto out;
283 }
284
285 /*
286 * See if we have a reaped vram mapping we can re-use before
287 * allocating a fresh mapping.
288 */
289 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
290 if (!mapping) {
291 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
ed94add0
DC
292 if (!mapping) {
293 ret = -ENOMEM;
294 goto out;
295 }
a8c21a54
T
296
297 INIT_LIST_HEAD(&mapping->scan_node);
298 mapping->object = etnaviv_obj;
299 } else {
300 list_del(&mapping->obj_node);
301 }
302
a8c21a54
T
303 mapping->use = 1;
304
17e4660a 305 ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
17eae23b 306 mmu_context->global->memory_base,
088880dd 307 mapping, va);
11ad6a1f 308 if (ret < 0)
a8c21a54 309 kfree(mapping);
11ad6a1f 310 else
a8c21a54
T
311 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
312
313out:
314 mutex_unlock(&etnaviv_obj->lock);
315
b6325f40
RK
316 if (ret)
317 return ERR_PTR(ret);
a8c21a54 318
b6325f40 319 /* Take a reference on the object */
23d1dd03 320 drm_gem_object_get(obj);
b6325f40 321 return mapping;
a8c21a54
T
322}
323
ce3088fd 324void *etnaviv_gem_vmap(struct drm_gem_object *obj)
a8c21a54
T
325{
326 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
327
a0a5ab3e
LS
328 if (etnaviv_obj->vaddr)
329 return etnaviv_obj->vaddr;
a8c21a54 330
a0a5ab3e
LS
331 mutex_lock(&etnaviv_obj->lock);
332 /*
333 * Need to check again, as we might have raced with another thread
334 * while waiting for the mutex.
335 */
336 if (!etnaviv_obj->vaddr)
337 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
a8c21a54
T
338 mutex_unlock(&etnaviv_obj->lock);
339
340 return etnaviv_obj->vaddr;
341}
342
a0a5ab3e
LS
343static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
344{
345 struct page **pages;
346
347 lockdep_assert_held(&obj->lock);
348
349 pages = etnaviv_gem_get_pages(obj);
350 if (IS_ERR(pages))
351 return NULL;
352
353 return vmap(pages, obj->base.size >> PAGE_SHIFT,
354 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
355}
356
a8c21a54
T
357static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
358{
359 if (op & ETNA_PREP_READ)
360 return DMA_FROM_DEVICE;
361 else if (op & ETNA_PREP_WRITE)
362 return DMA_TO_DEVICE;
363 else
364 return DMA_BIDIRECTIONAL;
365}
366
367int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
38c4a4cf 368 struct drm_etnaviv_timespec *timeout)
a8c21a54
T
369{
370 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
371 struct drm_device *dev = obj->dev;
372 bool write = !!(op & ETNA_PREP_WRITE);
46a269da
LS
373 int ret;
374
8cc47b3e
LS
375 if (!etnaviv_obj->sgt) {
376 void *ret;
377
378 mutex_lock(&etnaviv_obj->lock);
379 ret = etnaviv_gem_get_pages(etnaviv_obj);
380 mutex_unlock(&etnaviv_obj->lock);
381 if (IS_ERR(ret))
382 return PTR_ERR(ret);
383 }
384
46a269da 385 if (op & ETNA_PREP_NOSYNC) {
7bc80a54
CK
386 if (!dma_resv_test_signaled(obj->resv,
387 dma_resv_usage_rw(write)))
46a269da
LS
388 return -EBUSY;
389 } else {
390 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
391
7bc80a54
CK
392 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
393 true, remain);
46a269da
LS
394 if (ret <= 0)
395 return ret == 0 ? -ETIMEDOUT : ret;
396 }
a8c21a54
T
397
398 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
182354a5
MS
399 dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
400 etnaviv_op_to_dma_dir(op));
a8c21a54
T
401 etnaviv_obj->last_cpu_prep_op = op;
402 }
403
404 return 0;
405}
406
407int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
408{
409 struct drm_device *dev = obj->dev;
410 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
411
412 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
413 /* fini without a prep is almost certainly a userspace error */
414 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
182354a5 415 dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
a8c21a54
T
416 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
417 etnaviv_obj->last_cpu_prep_op = 0;
418 }
419
420 return 0;
421}
422
423int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
38c4a4cf 424 struct drm_etnaviv_timespec *timeout)
a8c21a54
T
425{
426 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
427
428 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
429}
430
431#ifdef CONFIG_DEBUG_FS
a8c21a54
T
432static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
433{
434 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
52791eee 435 struct dma_resv *robj = obj->resv;
a8c21a54 436 unsigned long off = drm_vma_node_start(&obj->vma_node);
790f27e0 437 int r;
a8c21a54
T
438
439 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
440 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
2c935bc5 441 obj->name, kref_read(&obj->refcount),
a8c21a54
T
442 off, etnaviv_obj->vaddr, obj->size);
443
790f27e0
CK
444 r = dma_resv_lock(robj, NULL);
445 if (r)
446 return;
447
448 dma_resv_describe(robj, m);
449 dma_resv_unlock(robj);
a8c21a54
T
450}
451
452void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
453 struct seq_file *m)
454{
455 struct etnaviv_gem_object *etnaviv_obj;
456 int count = 0;
457 size_t size = 0;
458
459 mutex_lock(&priv->gem_lock);
460 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
461 struct drm_gem_object *obj = &etnaviv_obj->base;
462
463 seq_puts(m, " ");
464 etnaviv_gem_describe(obj, m);
465 count++;
466 size += obj->size;
467 }
468 mutex_unlock(&priv->gem_lock);
469
470 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
471}
472#endif
473
474static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
475{
8c6e6188 476 vunmap(etnaviv_obj->vaddr);
a8c21a54
T
477 put_pages(etnaviv_obj);
478}
479
480static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
481 .get_pages = etnaviv_gem_shmem_get_pages,
482 .release = etnaviv_gem_shmem_release,
a0a5ab3e 483 .vmap = etnaviv_gem_vmap_impl,
a10e2bde 484 .mmap = etnaviv_gem_mmap_obj,
a8c21a54
T
485};
486
487void etnaviv_gem_free_object(struct drm_gem_object *obj)
488{
489 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
51841752 490 struct etnaviv_drm_private *priv = obj->dev->dev_private;
a8c21a54
T
491 struct etnaviv_vram_mapping *mapping, *tmp;
492
493 /* object should not be active */
494 WARN_ON(is_active(etnaviv_obj));
495
51841752 496 mutex_lock(&priv->gem_lock);
a8c21a54 497 list_del(&etnaviv_obj->gem_node);
51841752 498 mutex_unlock(&priv->gem_lock);
a8c21a54
T
499
500 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
501 obj_node) {
27b67278 502 struct etnaviv_iommu_context *context = mapping->context;
a8c21a54
T
503
504 WARN_ON(mapping->use);
505
11ad6a1f 506 if (context)
27b67278 507 etnaviv_iommu_unmap_gem(context, mapping);
a8c21a54
T
508
509 list_del(&mapping->obj_node);
510 kfree(mapping);
511 }
512
a8c21a54 513 etnaviv_obj->ops->release(etnaviv_obj);
a8c21a54
T
514 drm_gem_object_release(obj);
515
516 kfree(etnaviv_obj);
517}
518
54f09288 519void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
a8c21a54
T
520{
521 struct etnaviv_drm_private *priv = dev->dev_private;
522 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
523
524 mutex_lock(&priv->gem_lock);
525 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
526 mutex_unlock(&priv->gem_lock);
a8c21a54
T
527}
528
a7730627
TZ
529static const struct vm_operations_struct vm_ops = {
530 .fault = etnaviv_gem_fault,
531 .open = drm_gem_vm_open,
532 .close = drm_gem_vm_close,
533};
534
535static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
536 .free = etnaviv_gem_free_object,
537 .pin = etnaviv_gem_prime_pin,
538 .unpin = etnaviv_gem_prime_unpin,
539 .get_sg_table = etnaviv_gem_prime_get_sg_table,
540 .vmap = etnaviv_gem_prime_vmap,
81fd23e2 541 .mmap = etnaviv_gem_mmap,
a7730627
TZ
542 .vm_ops = &vm_ops,
543};
544
a8c21a54 545static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
c6be8086 546 const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
a8c21a54
T
547{
548 struct etnaviv_gem_object *etnaviv_obj;
549 unsigned sz = sizeof(*etnaviv_obj);
550 bool valid = true;
551
552 /* validate flags */
553 switch (flags & ETNA_BO_CACHE_MASK) {
554 case ETNA_BO_UNCACHED:
555 case ETNA_BO_CACHED:
556 case ETNA_BO_WC:
557 break;
558 default:
559 valid = false;
560 }
561
562 if (!valid) {
563 dev_err(dev->dev, "invalid cache flag: %x\n",
564 (flags & ETNA_BO_CACHE_MASK));
565 return -EINVAL;
566 }
567
568 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
569 if (!etnaviv_obj)
570 return -ENOMEM;
571
572 etnaviv_obj->flags = flags;
573 etnaviv_obj->ops = ops;
a8c21a54
T
574
575 mutex_init(&etnaviv_obj->lock);
576 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
577
578 *obj = &etnaviv_obj->base;
a7730627 579 (*obj)->funcs = &etnaviv_gem_object_funcs;
a8c21a54
T
580
581 return 0;
582}
583
cdd32563
LS
584/* convenience method to construct a GEM buffer object, and userspace handle */
585int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
586 u32 size, u32 flags, u32 *handle)
a8c21a54 587{
b72af445 588 struct etnaviv_drm_private *priv = dev->dev_private;
a8c21a54
T
589 struct drm_gem_object *obj = NULL;
590 int ret;
591
592 size = PAGE_ALIGN(size);
593
c6be8086 594 ret = etnaviv_gem_new_impl(dev, size, flags,
a8c21a54
T
595 &etnaviv_gem_shmem_ops, &obj);
596 if (ret)
597 goto fail;
598
d6a8743d
LS
599 lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
600
a8c21a54 601 ret = drm_gem_object_init(dev, obj, size);
a8c21a54
T
602 if (ret)
603 goto fail;
604
fd2450a7
LS
605 /*
606 * Our buffers are kept pinned, so allocating them from the MOVABLE
607 * zone is a really bad idea, and conflicts with CMA. See comments
608 * above new_inode() why this is required _and_ expected if you're
609 * going to pin these pages.
610 */
b72af445 611 mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
fd2450a7 612
54f09288 613 etnaviv_gem_obj_add(dev, obj);
a8c21a54
T
614
615 ret = drm_gem_handle_create(file, obj, handle);
616
617 /* drop reference from allocate - handle holds it now */
cdd32563 618fail:
6780bf32 619 drm_gem_object_put(obj);
a8c21a54
T
620
621 return ret;
622}
623
a8c21a54 624int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
c6be8086 625 const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
a8c21a54
T
626{
627 struct drm_gem_object *obj;
628 int ret;
629
c6be8086 630 ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
a8c21a54
T
631 if (ret)
632 return ret;
633
634 drm_gem_private_object_init(dev, obj, size);
635
636 *res = to_etnaviv_bo(obj);
637
638 return 0;
639}
640
a8c21a54
T
641static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
642{
643 struct page **pvec = NULL;
b2295c24
LS
644 struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
645 int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
7d96eb6a 646 unsigned int gup_flags = FOLL_LONGTERM;
a8c21a54 647
da1c55f1 648 might_lock_read(&current->mm->mmap_lock);
783c06cb 649
b2295c24
LS
650 if (userptr->mm != current->mm)
651 return -EPERM;
a8c21a54 652
b2295c24
LS
653 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
654 if (!pvec)
655 return -ENOMEM;
656
7d96eb6a
DH
657 if (!userptr->ro)
658 gup_flags |= FOLL_WRITE;
659
b2295c24
LS
660 do {
661 unsigned num_pages = npages - pinned;
662 uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
663 struct page **pages = pvec + pinned;
a8c21a54 664
7d96eb6a 665 ret = pin_user_pages_fast(ptr, num_pages, gup_flags, pages);
b2295c24 666 if (ret < 0) {
86824e60 667 unpin_user_pages(pvec, pinned);
2098105e 668 kvfree(pvec);
b2295c24 669 return ret;
a8c21a54
T
670 }
671
b2295c24 672 pinned += ret;
a8c21a54 673
b2295c24 674 } while (pinned < npages);
a8c21a54 675
b2295c24 676 etnaviv_obj->pages = pvec;
a8c21a54 677
b2295c24 678 return 0;
a8c21a54
T
679}
680
681static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
682{
683 if (etnaviv_obj->sgt) {
684 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
685 sg_free_table(etnaviv_obj->sgt);
686 kfree(etnaviv_obj->sgt);
687 }
688 if (etnaviv_obj->pages) {
689 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
690
86824e60 691 unpin_user_pages(etnaviv_obj->pages, npages);
2098105e 692 kvfree(etnaviv_obj->pages);
a8c21a54 693 }
a8c21a54
T
694}
695
a10e2bde
LS
696static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
697 struct vm_area_struct *vma)
698{
699 return -EINVAL;
700}
701
a8c21a54
T
702static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
703 .get_pages = etnaviv_gem_userptr_get_pages,
704 .release = etnaviv_gem_userptr_release,
a0a5ab3e 705 .vmap = etnaviv_gem_vmap_impl,
a10e2bde 706 .mmap = etnaviv_gem_userptr_mmap_obj,
a8c21a54
T
707};
708
709int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
710 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
711{
712 struct etnaviv_gem_object *etnaviv_obj;
713 int ret;
714
c6be8086 715 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
a8c21a54
T
716 &etnaviv_gem_userptr_ops, &etnaviv_obj);
717 if (ret)
718 return ret;
719
d6a8743d
LS
720 lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
721
a8c21a54 722 etnaviv_obj->userptr.ptr = ptr;
b2295c24 723 etnaviv_obj->userptr.mm = current->mm;
a8c21a54 724 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
a8c21a54 725
54f09288 726 etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
a8c21a54
T
727
728 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
54f09288 729
a8c21a54 730 /* drop reference from allocate - handle holds it now */
6780bf32 731 drm_gem_object_put(&etnaviv_obj->base);
a8c21a54
T
732 return ret;
733}