Commit | Line | Data |
---|---|---|
a8c21a54 T |
1 | /* |
2 | * Copyright (C) 2015 Etnaviv Project | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License version 2 as published by | |
6 | * the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | ||
17 | #include <linux/spinlock.h> | |
18 | #include <linux/shmem_fs.h> | |
19 | ||
20 | #include "etnaviv_drv.h" | |
21 | #include "etnaviv_gem.h" | |
22 | #include "etnaviv_gpu.h" | |
23 | #include "etnaviv_mmu.h" | |
24 | ||
25 | static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj) | |
26 | { | |
27 | struct drm_device *dev = etnaviv_obj->base.dev; | |
28 | struct sg_table *sgt = etnaviv_obj->sgt; | |
29 | ||
30 | /* | |
31 | * For non-cached buffers, ensure the new pages are clean | |
32 | * because display controller, GPU, etc. are not coherent. | |
33 | */ | |
34 | if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) | |
35 | dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); | |
36 | } | |
37 | ||
38 | static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj) | |
39 | { | |
40 | struct drm_device *dev = etnaviv_obj->base.dev; | |
41 | struct sg_table *sgt = etnaviv_obj->sgt; | |
42 | ||
43 | /* | |
44 | * For non-cached buffers, ensure the new pages are clean | |
45 | * because display controller, GPU, etc. are not coherent: | |
46 | * | |
47 | * WARNING: The DMA API does not support concurrent CPU | |
48 | * and device access to the memory area. With BIDIRECTIONAL, | |
49 | * we will clean the cache lines which overlap the region, | |
50 | * and invalidate all cache lines (partially) contained in | |
51 | * the region. | |
52 | * | |
53 | * If you have dirty data in the overlapping cache lines, | |
54 | * that will corrupt the GPU-written data. If you have | |
55 | * written into the remainder of the region, this can | |
56 | * discard those writes. | |
57 | */ | |
58 | if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK) | |
59 | dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); | |
60 | } | |
61 | ||
62 | /* called with etnaviv_obj->lock held */ | |
63 | static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj) | |
64 | { | |
65 | struct drm_device *dev = etnaviv_obj->base.dev; | |
66 | struct page **p = drm_gem_get_pages(&etnaviv_obj->base); | |
67 | ||
68 | if (IS_ERR(p)) { | |
69 | dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p)); | |
70 | return PTR_ERR(p); | |
71 | } | |
72 | ||
73 | etnaviv_obj->pages = p; | |
74 | ||
75 | return 0; | |
76 | } | |
77 | ||
78 | static void put_pages(struct etnaviv_gem_object *etnaviv_obj) | |
79 | { | |
80 | if (etnaviv_obj->sgt) { | |
81 | etnaviv_gem_scatterlist_unmap(etnaviv_obj); | |
82 | sg_free_table(etnaviv_obj->sgt); | |
83 | kfree(etnaviv_obj->sgt); | |
84 | etnaviv_obj->sgt = NULL; | |
85 | } | |
86 | if (etnaviv_obj->pages) { | |
87 | drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages, | |
88 | true, false); | |
89 | ||
90 | etnaviv_obj->pages = NULL; | |
91 | } | |
92 | } | |
93 | ||
94 | struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj) | |
95 | { | |
96 | int ret; | |
97 | ||
98 | lockdep_assert_held(&etnaviv_obj->lock); | |
99 | ||
100 | if (!etnaviv_obj->pages) { | |
101 | ret = etnaviv_obj->ops->get_pages(etnaviv_obj); | |
102 | if (ret < 0) | |
103 | return ERR_PTR(ret); | |
104 | } | |
105 | ||
106 | if (!etnaviv_obj->sgt) { | |
107 | struct drm_device *dev = etnaviv_obj->base.dev; | |
108 | int npages = etnaviv_obj->base.size >> PAGE_SHIFT; | |
109 | struct sg_table *sgt; | |
110 | ||
111 | sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages); | |
112 | if (IS_ERR(sgt)) { | |
113 | dev_err(dev->dev, "failed to allocate sgt: %ld\n", | |
114 | PTR_ERR(sgt)); | |
115 | return ERR_CAST(sgt); | |
116 | } | |
117 | ||
118 | etnaviv_obj->sgt = sgt; | |
119 | ||
120 | etnaviv_gem_scatter_map(etnaviv_obj); | |
121 | } | |
122 | ||
123 | return etnaviv_obj->pages; | |
124 | } | |
125 | ||
126 | void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj) | |
127 | { | |
128 | lockdep_assert_held(&etnaviv_obj->lock); | |
129 | /* when we start tracking the pin count, then do something here */ | |
130 | } | |
131 | ||
0e7f26e6 | 132 | static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj, |
a8c21a54 T |
133 | struct vm_area_struct *vma) |
134 | { | |
a8c21a54 T |
135 | pgprot_t vm_page_prot; |
136 | ||
137 | vma->vm_flags &= ~VM_PFNMAP; | |
138 | vma->vm_flags |= VM_MIXEDMAP; | |
139 | ||
140 | vm_page_prot = vm_get_page_prot(vma->vm_flags); | |
141 | ||
142 | if (etnaviv_obj->flags & ETNA_BO_WC) { | |
143 | vma->vm_page_prot = pgprot_writecombine(vm_page_prot); | |
144 | } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) { | |
145 | vma->vm_page_prot = pgprot_noncached(vm_page_prot); | |
146 | } else { | |
147 | /* | |
148 | * Shunt off cached objs to shmem file so they have their own | |
149 | * address_space (so unmap_mapping_range does what we want, | |
150 | * in particular in the case of mmap'd dmabufs) | |
151 | */ | |
152 | fput(vma->vm_file); | |
0e7f26e6 | 153 | get_file(etnaviv_obj->base.filp); |
a8c21a54 | 154 | vma->vm_pgoff = 0; |
0e7f26e6 | 155 | vma->vm_file = etnaviv_obj->base.filp; |
a8c21a54 T |
156 | |
157 | vma->vm_page_prot = vm_page_prot; | |
158 | } | |
159 | ||
160 | return 0; | |
161 | } | |
162 | ||
163 | int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |
164 | { | |
165 | struct etnaviv_gem_object *obj; | |
166 | int ret; | |
167 | ||
168 | ret = drm_gem_mmap(filp, vma); | |
169 | if (ret) { | |
170 | DBG("mmap failed: %d", ret); | |
171 | return ret; | |
172 | } | |
173 | ||
174 | obj = to_etnaviv_bo(vma->vm_private_data); | |
a10e2bde | 175 | return obj->ops->mmap(obj, vma); |
a8c21a54 T |
176 | } |
177 | ||
178 | int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
179 | { | |
180 | struct drm_gem_object *obj = vma->vm_private_data; | |
181 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | |
182 | struct page **pages, *page; | |
183 | pgoff_t pgoff; | |
184 | int ret; | |
185 | ||
186 | /* | |
187 | * Make sure we don't parallel update on a fault, nor move or remove | |
188 | * something from beneath our feet. Note that vm_insert_page() is | |
189 | * specifically coded to take care of this, so we don't have to. | |
190 | */ | |
191 | ret = mutex_lock_interruptible(&etnaviv_obj->lock); | |
192 | if (ret) | |
193 | goto out; | |
194 | ||
195 | /* make sure we have pages attached now */ | |
196 | pages = etnaviv_gem_get_pages(etnaviv_obj); | |
197 | mutex_unlock(&etnaviv_obj->lock); | |
198 | ||
199 | if (IS_ERR(pages)) { | |
200 | ret = PTR_ERR(pages); | |
201 | goto out; | |
202 | } | |
203 | ||
204 | /* We don't use vmf->pgoff since that has the fake offset: */ | |
205 | pgoff = ((unsigned long)vmf->virtual_address - | |
206 | vma->vm_start) >> PAGE_SHIFT; | |
207 | ||
208 | page = pages[pgoff]; | |
209 | ||
210 | VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, | |
211 | page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT); | |
212 | ||
213 | ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); | |
214 | ||
215 | out: | |
216 | switch (ret) { | |
217 | case -EAGAIN: | |
218 | case 0: | |
219 | case -ERESTARTSYS: | |
220 | case -EINTR: | |
221 | case -EBUSY: | |
222 | /* | |
223 | * EBUSY is ok: this just means that another thread | |
224 | * already did the job. | |
225 | */ | |
226 | return VM_FAULT_NOPAGE; | |
227 | case -ENOMEM: | |
228 | return VM_FAULT_OOM; | |
229 | default: | |
230 | return VM_FAULT_SIGBUS; | |
231 | } | |
232 | } | |
233 | ||
234 | int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset) | |
235 | { | |
236 | int ret; | |
237 | ||
238 | /* Make it mmapable */ | |
239 | ret = drm_gem_create_mmap_offset(obj); | |
240 | if (ret) | |
241 | dev_err(obj->dev->dev, "could not allocate mmap offset\n"); | |
242 | else | |
243 | *offset = drm_vma_node_offset_addr(&obj->vma_node); | |
244 | ||
245 | return ret; | |
246 | } | |
247 | ||
248 | static struct etnaviv_vram_mapping * | |
249 | etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj, | |
250 | struct etnaviv_iommu *mmu) | |
251 | { | |
252 | struct etnaviv_vram_mapping *mapping; | |
253 | ||
254 | list_for_each_entry(mapping, &obj->vram_list, obj_node) { | |
255 | if (mapping->mmu == mmu) | |
256 | return mapping; | |
257 | } | |
258 | ||
259 | return NULL; | |
260 | } | |
261 | ||
b6325f40 RK |
262 | void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping) |
263 | { | |
264 | struct etnaviv_gem_object *etnaviv_obj = mapping->object; | |
265 | ||
266 | drm_gem_object_reference(&etnaviv_obj->base); | |
267 | ||
268 | mutex_lock(&etnaviv_obj->lock); | |
269 | WARN_ON(mapping->use == 0); | |
270 | mapping->use += 1; | |
271 | mutex_unlock(&etnaviv_obj->lock); | |
272 | } | |
273 | ||
274 | void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) | |
275 | { | |
276 | struct etnaviv_gem_object *etnaviv_obj = mapping->object; | |
277 | ||
278 | mutex_lock(&etnaviv_obj->lock); | |
279 | WARN_ON(mapping->use == 0); | |
280 | mapping->use -= 1; | |
281 | mutex_unlock(&etnaviv_obj->lock); | |
282 | ||
283 | drm_gem_object_unreference_unlocked(&etnaviv_obj->base); | |
284 | } | |
285 | ||
286 | struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( | |
287 | struct drm_gem_object *obj, struct etnaviv_gpu *gpu) | |
a8c21a54 T |
288 | { |
289 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | |
290 | struct etnaviv_vram_mapping *mapping; | |
291 | struct page **pages; | |
292 | int ret = 0; | |
293 | ||
294 | mutex_lock(&etnaviv_obj->lock); | |
295 | mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu); | |
296 | if (mapping) { | |
297 | /* | |
298 | * Holding the object lock prevents the use count changing | |
299 | * beneath us. If the use count is zero, the MMU might be | |
300 | * reaping this object, so take the lock and re-check that | |
301 | * the MMU owns this mapping to close this race. | |
302 | */ | |
303 | if (mapping->use == 0) { | |
304 | mutex_lock(&gpu->mmu->lock); | |
305 | if (mapping->mmu == gpu->mmu) | |
306 | mapping->use += 1; | |
307 | else | |
308 | mapping = NULL; | |
309 | mutex_unlock(&gpu->mmu->lock); | |
310 | if (mapping) | |
311 | goto out; | |
312 | } else { | |
313 | mapping->use += 1; | |
314 | goto out; | |
315 | } | |
316 | } | |
317 | ||
318 | pages = etnaviv_gem_get_pages(etnaviv_obj); | |
319 | if (IS_ERR(pages)) { | |
320 | ret = PTR_ERR(pages); | |
321 | goto out; | |
322 | } | |
323 | ||
324 | /* | |
325 | * See if we have a reaped vram mapping we can re-use before | |
326 | * allocating a fresh mapping. | |
327 | */ | |
328 | mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL); | |
329 | if (!mapping) { | |
330 | mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); | |
ed94add0 DC |
331 | if (!mapping) { |
332 | ret = -ENOMEM; | |
333 | goto out; | |
334 | } | |
a8c21a54 T |
335 | |
336 | INIT_LIST_HEAD(&mapping->scan_node); | |
337 | mapping->object = etnaviv_obj; | |
338 | } else { | |
339 | list_del(&mapping->obj_node); | |
340 | } | |
341 | ||
342 | mapping->mmu = gpu->mmu; | |
343 | mapping->use = 1; | |
344 | ||
345 | ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base, | |
346 | mapping); | |
347 | if (ret < 0) | |
348 | kfree(mapping); | |
349 | else | |
350 | list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list); | |
351 | ||
352 | out: | |
353 | mutex_unlock(&etnaviv_obj->lock); | |
354 | ||
b6325f40 RK |
355 | if (ret) |
356 | return ERR_PTR(ret); | |
a8c21a54 | 357 | |
b6325f40 RK |
358 | /* Take a reference on the object */ |
359 | drm_gem_object_reference(obj); | |
360 | return mapping; | |
a8c21a54 T |
361 | } |
362 | ||
ce3088fd | 363 | void *etnaviv_gem_vmap(struct drm_gem_object *obj) |
a8c21a54 T |
364 | { |
365 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | |
366 | ||
a0a5ab3e LS |
367 | if (etnaviv_obj->vaddr) |
368 | return etnaviv_obj->vaddr; | |
a8c21a54 | 369 | |
a0a5ab3e LS |
370 | mutex_lock(&etnaviv_obj->lock); |
371 | /* | |
372 | * Need to check again, as we might have raced with another thread | |
373 | * while waiting for the mutex. | |
374 | */ | |
375 | if (!etnaviv_obj->vaddr) | |
376 | etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj); | |
a8c21a54 T |
377 | mutex_unlock(&etnaviv_obj->lock); |
378 | ||
379 | return etnaviv_obj->vaddr; | |
380 | } | |
381 | ||
a0a5ab3e LS |
382 | static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj) |
383 | { | |
384 | struct page **pages; | |
385 | ||
386 | lockdep_assert_held(&obj->lock); | |
387 | ||
388 | pages = etnaviv_gem_get_pages(obj); | |
389 | if (IS_ERR(pages)) | |
390 | return NULL; | |
391 | ||
392 | return vmap(pages, obj->base.size >> PAGE_SHIFT, | |
393 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | |
394 | } | |
395 | ||
a8c21a54 T |
396 | static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op) |
397 | { | |
398 | if (op & ETNA_PREP_READ) | |
399 | return DMA_FROM_DEVICE; | |
400 | else if (op & ETNA_PREP_WRITE) | |
401 | return DMA_TO_DEVICE; | |
402 | else | |
403 | return DMA_BIDIRECTIONAL; | |
404 | } | |
405 | ||
406 | int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, | |
407 | struct timespec *timeout) | |
408 | { | |
409 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | |
410 | struct drm_device *dev = obj->dev; | |
411 | bool write = !!(op & ETNA_PREP_WRITE); | |
412 | int ret; | |
413 | ||
414 | if (op & ETNA_PREP_NOSYNC) { | |
415 | if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv, | |
416 | write)) | |
417 | return -EBUSY; | |
418 | } else { | |
419 | unsigned long remain = etnaviv_timeout_to_jiffies(timeout); | |
420 | ||
421 | ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv, | |
422 | write, true, remain); | |
423 | if (ret <= 0) | |
424 | return ret == 0 ? -ETIMEDOUT : ret; | |
425 | } | |
426 | ||
427 | if (etnaviv_obj->flags & ETNA_BO_CACHED) { | |
428 | if (!etnaviv_obj->sgt) { | |
429 | void *ret; | |
430 | ||
431 | mutex_lock(&etnaviv_obj->lock); | |
432 | ret = etnaviv_gem_get_pages(etnaviv_obj); | |
433 | mutex_unlock(&etnaviv_obj->lock); | |
434 | if (IS_ERR(ret)) | |
435 | return PTR_ERR(ret); | |
436 | } | |
437 | ||
438 | dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl, | |
439 | etnaviv_obj->sgt->nents, | |
440 | etnaviv_op_to_dma_dir(op)); | |
441 | etnaviv_obj->last_cpu_prep_op = op; | |
442 | } | |
443 | ||
444 | return 0; | |
445 | } | |
446 | ||
447 | int etnaviv_gem_cpu_fini(struct drm_gem_object *obj) | |
448 | { | |
449 | struct drm_device *dev = obj->dev; | |
450 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | |
451 | ||
452 | if (etnaviv_obj->flags & ETNA_BO_CACHED) { | |
453 | /* fini without a prep is almost certainly a userspace error */ | |
454 | WARN_ON(etnaviv_obj->last_cpu_prep_op == 0); | |
455 | dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl, | |
456 | etnaviv_obj->sgt->nents, | |
457 | etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op)); | |
458 | etnaviv_obj->last_cpu_prep_op = 0; | |
459 | } | |
460 | ||
461 | return 0; | |
462 | } | |
463 | ||
464 | int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, | |
465 | struct timespec *timeout) | |
466 | { | |
467 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | |
468 | ||
469 | return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout); | |
470 | } | |
471 | ||
472 | #ifdef CONFIG_DEBUG_FS | |
473 | static void etnaviv_gem_describe_fence(struct fence *fence, | |
474 | const char *type, struct seq_file *m) | |
475 | { | |
476 | if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | |
477 | seq_printf(m, "\t%9s: %s %s seq %u\n", | |
478 | type, | |
479 | fence->ops->get_driver_name(fence), | |
480 | fence->ops->get_timeline_name(fence), | |
481 | fence->seqno); | |
482 | } | |
483 | ||
484 | static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) | |
485 | { | |
486 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | |
487 | struct reservation_object *robj = etnaviv_obj->resv; | |
488 | struct reservation_object_list *fobj; | |
489 | struct fence *fence; | |
490 | unsigned long off = drm_vma_node_start(&obj->vma_node); | |
491 | ||
492 | seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n", | |
493 | etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I', | |
494 | obj->name, obj->refcount.refcount.counter, | |
495 | off, etnaviv_obj->vaddr, obj->size); | |
496 | ||
497 | rcu_read_lock(); | |
498 | fobj = rcu_dereference(robj->fence); | |
499 | if (fobj) { | |
500 | unsigned int i, shared_count = fobj->shared_count; | |
501 | ||
502 | for (i = 0; i < shared_count; i++) { | |
503 | fence = rcu_dereference(fobj->shared[i]); | |
504 | etnaviv_gem_describe_fence(fence, "Shared", m); | |
505 | } | |
506 | } | |
507 | ||
508 | fence = rcu_dereference(robj->fence_excl); | |
509 | if (fence) | |
510 | etnaviv_gem_describe_fence(fence, "Exclusive", m); | |
511 | rcu_read_unlock(); | |
512 | } | |
513 | ||
514 | void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv, | |
515 | struct seq_file *m) | |
516 | { | |
517 | struct etnaviv_gem_object *etnaviv_obj; | |
518 | int count = 0; | |
519 | size_t size = 0; | |
520 | ||
521 | mutex_lock(&priv->gem_lock); | |
522 | list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) { | |
523 | struct drm_gem_object *obj = &etnaviv_obj->base; | |
524 | ||
525 | seq_puts(m, " "); | |
526 | etnaviv_gem_describe(obj, m); | |
527 | count++; | |
528 | size += obj->size; | |
529 | } | |
530 | mutex_unlock(&priv->gem_lock); | |
531 | ||
532 | seq_printf(m, "Total %d objects, %zu bytes\n", count, size); | |
533 | } | |
534 | #endif | |
535 | ||
536 | static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj) | |
537 | { | |
538 | if (etnaviv_obj->vaddr) | |
539 | vunmap(etnaviv_obj->vaddr); | |
540 | put_pages(etnaviv_obj); | |
541 | } | |
542 | ||
543 | static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = { | |
544 | .get_pages = etnaviv_gem_shmem_get_pages, | |
545 | .release = etnaviv_gem_shmem_release, | |
a0a5ab3e | 546 | .vmap = etnaviv_gem_vmap_impl, |
a10e2bde | 547 | .mmap = etnaviv_gem_mmap_obj, |
a8c21a54 T |
548 | }; |
549 | ||
550 | void etnaviv_gem_free_object(struct drm_gem_object *obj) | |
551 | { | |
552 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | |
553 | struct etnaviv_vram_mapping *mapping, *tmp; | |
554 | ||
555 | /* object should not be active */ | |
556 | WARN_ON(is_active(etnaviv_obj)); | |
557 | ||
558 | list_del(&etnaviv_obj->gem_node); | |
559 | ||
560 | list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list, | |
561 | obj_node) { | |
562 | struct etnaviv_iommu *mmu = mapping->mmu; | |
563 | ||
564 | WARN_ON(mapping->use); | |
565 | ||
566 | if (mmu) | |
567 | etnaviv_iommu_unmap_gem(mmu, mapping); | |
568 | ||
569 | list_del(&mapping->obj_node); | |
570 | kfree(mapping); | |
571 | } | |
572 | ||
573 | drm_gem_free_mmap_offset(obj); | |
574 | etnaviv_obj->ops->release(etnaviv_obj); | |
575 | if (etnaviv_obj->resv == &etnaviv_obj->_resv) | |
576 | reservation_object_fini(&etnaviv_obj->_resv); | |
577 | drm_gem_object_release(obj); | |
578 | ||
579 | kfree(etnaviv_obj); | |
580 | } | |
581 | ||
582 | int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj) | |
583 | { | |
584 | struct etnaviv_drm_private *priv = dev->dev_private; | |
585 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | |
586 | ||
587 | mutex_lock(&priv->gem_lock); | |
588 | list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list); | |
589 | mutex_unlock(&priv->gem_lock); | |
590 | ||
591 | return 0; | |
592 | } | |
593 | ||
594 | static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags, | |
595 | struct reservation_object *robj, const struct etnaviv_gem_ops *ops, | |
596 | struct drm_gem_object **obj) | |
597 | { | |
598 | struct etnaviv_gem_object *etnaviv_obj; | |
599 | unsigned sz = sizeof(*etnaviv_obj); | |
600 | bool valid = true; | |
601 | ||
602 | /* validate flags */ | |
603 | switch (flags & ETNA_BO_CACHE_MASK) { | |
604 | case ETNA_BO_UNCACHED: | |
605 | case ETNA_BO_CACHED: | |
606 | case ETNA_BO_WC: | |
607 | break; | |
608 | default: | |
609 | valid = false; | |
610 | } | |
611 | ||
612 | if (!valid) { | |
613 | dev_err(dev->dev, "invalid cache flag: %x\n", | |
614 | (flags & ETNA_BO_CACHE_MASK)); | |
615 | return -EINVAL; | |
616 | } | |
617 | ||
618 | etnaviv_obj = kzalloc(sz, GFP_KERNEL); | |
619 | if (!etnaviv_obj) | |
620 | return -ENOMEM; | |
621 | ||
622 | etnaviv_obj->flags = flags; | |
623 | etnaviv_obj->ops = ops; | |
624 | if (robj) { | |
625 | etnaviv_obj->resv = robj; | |
626 | } else { | |
627 | etnaviv_obj->resv = &etnaviv_obj->_resv; | |
628 | reservation_object_init(&etnaviv_obj->_resv); | |
629 | } | |
630 | ||
631 | mutex_init(&etnaviv_obj->lock); | |
632 | INIT_LIST_HEAD(&etnaviv_obj->vram_list); | |
633 | ||
634 | *obj = &etnaviv_obj->base; | |
635 | ||
636 | return 0; | |
637 | } | |
638 | ||
639 | static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev, | |
640 | u32 size, u32 flags) | |
641 | { | |
642 | struct drm_gem_object *obj = NULL; | |
643 | int ret; | |
644 | ||
645 | size = PAGE_ALIGN(size); | |
646 | ||
647 | ret = etnaviv_gem_new_impl(dev, size, flags, NULL, | |
648 | &etnaviv_gem_shmem_ops, &obj); | |
649 | if (ret) | |
650 | goto fail; | |
651 | ||
652 | ret = drm_gem_object_init(dev, obj, size); | |
653 | if (ret == 0) { | |
654 | struct address_space *mapping; | |
655 | ||
656 | /* | |
657 | * Our buffers are kept pinned, so allocating them | |
658 | * from the MOVABLE zone is a really bad idea, and | |
659 | * conflicts with CMA. See coments above new_inode() | |
660 | * why this is required _and_ expected if you're | |
661 | * going to pin these pages. | |
662 | */ | |
93c76a3d | 663 | mapping = obj->filp->f_mapping; |
a8c21a54 T |
664 | mapping_set_gfp_mask(mapping, GFP_HIGHUSER); |
665 | } | |
666 | ||
667 | if (ret) | |
668 | goto fail; | |
669 | ||
670 | return obj; | |
671 | ||
672 | fail: | |
673 | if (obj) | |
674 | drm_gem_object_unreference_unlocked(obj); | |
675 | ||
676 | return ERR_PTR(ret); | |
677 | } | |
678 | ||
679 | /* convenience method to construct a GEM buffer object, and userspace handle */ | |
680 | int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file, | |
681 | u32 size, u32 flags, u32 *handle) | |
682 | { | |
683 | struct drm_gem_object *obj; | |
684 | int ret; | |
685 | ||
686 | obj = __etnaviv_gem_new(dev, size, flags); | |
687 | if (IS_ERR(obj)) | |
688 | return PTR_ERR(obj); | |
689 | ||
690 | ret = etnaviv_gem_obj_add(dev, obj); | |
691 | if (ret < 0) { | |
692 | drm_gem_object_unreference_unlocked(obj); | |
693 | return ret; | |
694 | } | |
695 | ||
696 | ret = drm_gem_handle_create(file, obj, handle); | |
697 | ||
698 | /* drop reference from allocate - handle holds it now */ | |
699 | drm_gem_object_unreference_unlocked(obj); | |
700 | ||
701 | return ret; | |
702 | } | |
703 | ||
704 | struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev, | |
705 | u32 size, u32 flags) | |
706 | { | |
707 | struct drm_gem_object *obj; | |
708 | int ret; | |
709 | ||
710 | obj = __etnaviv_gem_new(dev, size, flags); | |
711 | if (IS_ERR(obj)) | |
712 | return obj; | |
713 | ||
714 | ret = etnaviv_gem_obj_add(dev, obj); | |
715 | if (ret < 0) { | |
716 | drm_gem_object_unreference_unlocked(obj); | |
717 | return ERR_PTR(ret); | |
718 | } | |
719 | ||
720 | return obj; | |
721 | } | |
722 | ||
723 | int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, | |
724 | struct reservation_object *robj, const struct etnaviv_gem_ops *ops, | |
725 | struct etnaviv_gem_object **res) | |
726 | { | |
727 | struct drm_gem_object *obj; | |
728 | int ret; | |
729 | ||
730 | ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj); | |
731 | if (ret) | |
732 | return ret; | |
733 | ||
734 | drm_gem_private_object_init(dev, obj, size); | |
735 | ||
736 | *res = to_etnaviv_bo(obj); | |
737 | ||
738 | return 0; | |
739 | } | |
740 | ||
741 | struct get_pages_work { | |
742 | struct work_struct work; | |
743 | struct mm_struct *mm; | |
744 | struct task_struct *task; | |
745 | struct etnaviv_gem_object *etnaviv_obj; | |
746 | }; | |
747 | ||
748 | static struct page **etnaviv_gem_userptr_do_get_pages( | |
749 | struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task) | |
750 | { | |
751 | int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; | |
752 | struct page **pvec; | |
753 | uintptr_t ptr; | |
754 | ||
755 | pvec = drm_malloc_ab(npages, sizeof(struct page *)); | |
756 | if (!pvec) | |
757 | return ERR_PTR(-ENOMEM); | |
758 | ||
759 | pinned = 0; | |
760 | ptr = etnaviv_obj->userptr.ptr; | |
761 | ||
762 | down_read(&mm->mmap_sem); | |
763 | while (pinned < npages) { | |
1e987790 DH |
764 | ret = get_user_pages_remote(task, mm, ptr, npages - pinned, |
765 | !etnaviv_obj->userptr.ro, 0, | |
766 | pvec + pinned, NULL); | |
a8c21a54 T |
767 | if (ret < 0) |
768 | break; | |
769 | ||
770 | ptr += ret * PAGE_SIZE; | |
771 | pinned += ret; | |
772 | } | |
773 | up_read(&mm->mmap_sem); | |
774 | ||
775 | if (ret < 0) { | |
776 | release_pages(pvec, pinned, 0); | |
777 | drm_free_large(pvec); | |
778 | return ERR_PTR(ret); | |
779 | } | |
780 | ||
781 | return pvec; | |
782 | } | |
783 | ||
784 | static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work) | |
785 | { | |
786 | struct get_pages_work *work = container_of(_work, typeof(*work), work); | |
787 | struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj; | |
788 | struct page **pvec; | |
789 | ||
790 | pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task); | |
791 | ||
792 | mutex_lock(&etnaviv_obj->lock); | |
793 | if (IS_ERR(pvec)) { | |
794 | etnaviv_obj->userptr.work = ERR_CAST(pvec); | |
795 | } else { | |
796 | etnaviv_obj->userptr.work = NULL; | |
797 | etnaviv_obj->pages = pvec; | |
798 | } | |
799 | ||
800 | mutex_unlock(&etnaviv_obj->lock); | |
801 | drm_gem_object_unreference_unlocked(&etnaviv_obj->base); | |
802 | ||
803 | mmput(work->mm); | |
804 | put_task_struct(work->task); | |
805 | kfree(work); | |
806 | } | |
807 | ||
808 | static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) | |
809 | { | |
810 | struct page **pvec = NULL; | |
811 | struct get_pages_work *work; | |
812 | struct mm_struct *mm; | |
813 | int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; | |
814 | ||
815 | if (etnaviv_obj->userptr.work) { | |
816 | if (IS_ERR(etnaviv_obj->userptr.work)) { | |
817 | ret = PTR_ERR(etnaviv_obj->userptr.work); | |
818 | etnaviv_obj->userptr.work = NULL; | |
819 | } else { | |
820 | ret = -EAGAIN; | |
821 | } | |
822 | return ret; | |
823 | } | |
824 | ||
825 | mm = get_task_mm(etnaviv_obj->userptr.task); | |
826 | pinned = 0; | |
827 | if (mm == current->mm) { | |
828 | pvec = drm_malloc_ab(npages, sizeof(struct page *)); | |
829 | if (!pvec) { | |
830 | mmput(mm); | |
831 | return -ENOMEM; | |
832 | } | |
833 | ||
834 | pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages, | |
835 | !etnaviv_obj->userptr.ro, pvec); | |
836 | if (pinned < 0) { | |
837 | drm_free_large(pvec); | |
838 | mmput(mm); | |
839 | return pinned; | |
840 | } | |
841 | ||
842 | if (pinned == npages) { | |
843 | etnaviv_obj->pages = pvec; | |
844 | mmput(mm); | |
845 | return 0; | |
846 | } | |
847 | } | |
848 | ||
849 | release_pages(pvec, pinned, 0); | |
850 | drm_free_large(pvec); | |
851 | ||
852 | work = kmalloc(sizeof(*work), GFP_KERNEL); | |
853 | if (!work) { | |
854 | mmput(mm); | |
855 | return -ENOMEM; | |
856 | } | |
857 | ||
858 | get_task_struct(current); | |
859 | drm_gem_object_reference(&etnaviv_obj->base); | |
860 | ||
861 | work->mm = mm; | |
862 | work->task = current; | |
863 | work->etnaviv_obj = etnaviv_obj; | |
864 | ||
865 | etnaviv_obj->userptr.work = &work->work; | |
866 | INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages); | |
867 | ||
868 | etnaviv_queue_work(etnaviv_obj->base.dev, &work->work); | |
869 | ||
870 | return -EAGAIN; | |
871 | } | |
872 | ||
873 | static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) | |
874 | { | |
875 | if (etnaviv_obj->sgt) { | |
876 | etnaviv_gem_scatterlist_unmap(etnaviv_obj); | |
877 | sg_free_table(etnaviv_obj->sgt); | |
878 | kfree(etnaviv_obj->sgt); | |
879 | } | |
880 | if (etnaviv_obj->pages) { | |
881 | int npages = etnaviv_obj->base.size >> PAGE_SHIFT; | |
882 | ||
883 | release_pages(etnaviv_obj->pages, npages, 0); | |
884 | drm_free_large(etnaviv_obj->pages); | |
885 | } | |
886 | put_task_struct(etnaviv_obj->userptr.task); | |
887 | } | |
888 | ||
a10e2bde LS |
889 | static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj, |
890 | struct vm_area_struct *vma) | |
891 | { | |
892 | return -EINVAL; | |
893 | } | |
894 | ||
a8c21a54 T |
895 | static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = { |
896 | .get_pages = etnaviv_gem_userptr_get_pages, | |
897 | .release = etnaviv_gem_userptr_release, | |
a0a5ab3e | 898 | .vmap = etnaviv_gem_vmap_impl, |
a10e2bde | 899 | .mmap = etnaviv_gem_userptr_mmap_obj, |
a8c21a54 T |
900 | }; |
901 | ||
902 | int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, | |
903 | uintptr_t ptr, u32 size, u32 flags, u32 *handle) | |
904 | { | |
905 | struct etnaviv_gem_object *etnaviv_obj; | |
906 | int ret; | |
907 | ||
908 | ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL, | |
909 | &etnaviv_gem_userptr_ops, &etnaviv_obj); | |
910 | if (ret) | |
911 | return ret; | |
912 | ||
913 | etnaviv_obj->userptr.ptr = ptr; | |
914 | etnaviv_obj->userptr.task = current; | |
915 | etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE); | |
916 | get_task_struct(current); | |
917 | ||
918 | ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base); | |
919 | if (ret) { | |
920 | drm_gem_object_unreference_unlocked(&etnaviv_obj->base); | |
921 | return ret; | |
922 | } | |
923 | ||
924 | ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle); | |
925 | ||
926 | /* drop reference from allocate - handle holds it now */ | |
927 | drm_gem_object_unreference_unlocked(&etnaviv_obj->base); | |
928 | ||
929 | return ret; | |
930 | } |