Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux-2.6-block.git] / drivers / gpu / drm / vc4 / vc4_bo.c
CommitLineData
c8b75bca
EA
1/*
2 * Copyright © 2015 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/* DOC: VC4 GEM BO management support.
10 *
11 * The VC4 GPU architecture (both scanout and rendering) has direct
12 * access to system memory with no MMU in between. To support it, we
13 * use the GEM CMA helper functions to allocate contiguous ranges of
14 * physical memory for our BOs.
c826a6e1
EA
15 *
16 * Since the CMA allocator is very slow, we keep a cache of recently
17 * freed BOs around so that the kernel's allocation of objects for 3D
18 * rendering can return quickly.
c8b75bca
EA
19 */
20
21#include "vc4_drv.h"
d5bc60f6 22#include "uapi/drm/vc4_drm.h"
c8b75bca 23
c826a6e1
EA
24static void vc4_bo_stats_dump(struct vc4_dev *vc4)
25{
26 DRM_INFO("num bos allocated: %d\n",
27 vc4->bo_stats.num_allocated);
28 DRM_INFO("size bos allocated: %dkb\n",
29 vc4->bo_stats.size_allocated / 1024);
30 DRM_INFO("num bos used: %d\n",
31 vc4->bo_stats.num_allocated - vc4->bo_stats.num_cached);
32 DRM_INFO("size bos used: %dkb\n",
33 (vc4->bo_stats.size_allocated -
34 vc4->bo_stats.size_cached) / 1024);
35 DRM_INFO("num bos cached: %d\n",
36 vc4->bo_stats.num_cached);
37 DRM_INFO("size bos cached: %dkb\n",
38 vc4->bo_stats.size_cached / 1024);
39}
40
41#ifdef CONFIG_DEBUG_FS
42int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
43{
44 struct drm_info_node *node = (struct drm_info_node *)m->private;
45 struct drm_device *dev = node->minor->dev;
46 struct vc4_dev *vc4 = to_vc4_dev(dev);
47 struct vc4_bo_stats stats;
48
49 /* Take a snapshot of the current stats with the lock held. */
50 mutex_lock(&vc4->bo_lock);
51 stats = vc4->bo_stats;
52 mutex_unlock(&vc4->bo_lock);
53
54 seq_printf(m, "num bos allocated: %d\n",
55 stats.num_allocated);
56 seq_printf(m, "size bos allocated: %dkb\n",
57 stats.size_allocated / 1024);
58 seq_printf(m, "num bos used: %d\n",
59 stats.num_allocated - stats.num_cached);
60 seq_printf(m, "size bos used: %dkb\n",
61 (stats.size_allocated - stats.size_cached) / 1024);
62 seq_printf(m, "num bos cached: %d\n",
63 stats.num_cached);
64 seq_printf(m, "size bos cached: %dkb\n",
65 stats.size_cached / 1024);
66
67 return 0;
68}
69#endif
70
71static uint32_t bo_page_index(size_t size)
72{
73 return (size / PAGE_SIZE) - 1;
74}
75
76/* Must be called with bo_lock held. */
77static void vc4_bo_destroy(struct vc4_bo *bo)
c8b75bca 78{
c826a6e1
EA
79 struct drm_gem_object *obj = &bo->base.base;
80 struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
81
463873d5
EA
82 if (bo->validated_shader) {
83 kfree(bo->validated_shader->texture_samples);
84 kfree(bo->validated_shader);
85 bo->validated_shader = NULL;
86 }
87
c826a6e1
EA
88 vc4->bo_stats.num_allocated--;
89 vc4->bo_stats.size_allocated -= obj->size;
90 drm_gem_cma_free_object(obj);
91}
92
93/* Must be called with bo_lock held. */
94static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
95{
96 struct drm_gem_object *obj = &bo->base.base;
97 struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
98
99 vc4->bo_stats.num_cached--;
100 vc4->bo_stats.size_cached -= obj->size;
101
102 list_del(&bo->unref_head);
103 list_del(&bo->size_head);
104}
105
106static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
107 size_t size)
108{
109 struct vc4_dev *vc4 = to_vc4_dev(dev);
110 uint32_t page_index = bo_page_index(size);
111
112 if (vc4->bo_cache.size_list_size <= page_index) {
113 uint32_t new_size = max(vc4->bo_cache.size_list_size * 2,
114 page_index + 1);
115 struct list_head *new_list;
116 uint32_t i;
117
118 new_list = kmalloc_array(new_size, sizeof(struct list_head),
119 GFP_KERNEL);
120 if (!new_list)
121 return NULL;
122
123 /* Rebase the old cached BO lists to their new list
124 * head locations.
125 */
126 for (i = 0; i < vc4->bo_cache.size_list_size; i++) {
127 struct list_head *old_list =
128 &vc4->bo_cache.size_list[i];
129
130 if (list_empty(old_list))
131 INIT_LIST_HEAD(&new_list[i]);
132 else
133 list_replace(old_list, &new_list[i]);
134 }
135 /* And initialize the brand new BO list heads. */
136 for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
137 INIT_LIST_HEAD(&new_list[i]);
138
139 kfree(vc4->bo_cache.size_list);
140 vc4->bo_cache.size_list = new_list;
141 vc4->bo_cache.size_list_size = new_size;
142 }
143
144 return &vc4->bo_cache.size_list[page_index];
145}
146
147void vc4_bo_cache_purge(struct drm_device *dev)
148{
149 struct vc4_dev *vc4 = to_vc4_dev(dev);
150
151 mutex_lock(&vc4->bo_lock);
152 while (!list_empty(&vc4->bo_cache.time_list)) {
153 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
154 struct vc4_bo, unref_head);
155 vc4_bo_remove_from_cache(bo);
156 vc4_bo_destroy(bo);
157 }
158 mutex_unlock(&vc4->bo_lock);
159}
160
161static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
162 uint32_t size)
163{
164 struct vc4_dev *vc4 = to_vc4_dev(dev);
165 uint32_t page_index = bo_page_index(size);
166 struct vc4_bo *bo = NULL;
167
168 size = roundup(size, PAGE_SIZE);
169
170 mutex_lock(&vc4->bo_lock);
171 if (page_index >= vc4->bo_cache.size_list_size)
172 goto out;
173
174 if (list_empty(&vc4->bo_cache.size_list[page_index]))
175 goto out;
176
177 bo = list_first_entry(&vc4->bo_cache.size_list[page_index],
178 struct vc4_bo, size_head);
179 vc4_bo_remove_from_cache(bo);
180 kref_init(&bo->base.base.refcount);
181
182out:
183 mutex_unlock(&vc4->bo_lock);
184 return bo;
185}
186
187/**
188 * vc4_gem_create_object - Implementation of driver->gem_create_object.
189 *
190 * This lets the CMA helpers allocate object structs for us, and keep
191 * our BO stats correct.
192 */
193struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
194{
195 struct vc4_dev *vc4 = to_vc4_dev(dev);
196 struct vc4_bo *bo;
197
198 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
199 if (!bo)
200 return ERR_PTR(-ENOMEM);
201
202 mutex_lock(&vc4->bo_lock);
203 vc4->bo_stats.num_allocated++;
204 vc4->bo_stats.size_allocated += size;
205 mutex_unlock(&vc4->bo_lock);
206
207 return &bo->base.base;
208}
209
210struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
211 bool from_cache)
212{
213 size_t size = roundup(unaligned_size, PAGE_SIZE);
214 struct vc4_dev *vc4 = to_vc4_dev(dev);
c8b75bca
EA
215 struct drm_gem_cma_object *cma_obj;
216
c826a6e1 217 if (size == 0)
2c68f1fc 218 return ERR_PTR(-EINVAL);
c826a6e1
EA
219
220 /* First, try to get a vc4_bo from the kernel BO cache. */
221 if (from_cache) {
222 struct vc4_bo *bo = vc4_bo_get_from_cache(dev, size);
223
224 if (bo)
225 return bo;
226 }
227
228 cma_obj = drm_gem_cma_create(dev, size);
229 if (IS_ERR(cma_obj)) {
230 /*
231 * If we've run out of CMA memory, kill the cache of
232 * CMA allocations we've got laying around and try again.
233 */
234 vc4_bo_cache_purge(dev);
235
236 cma_obj = drm_gem_cma_create(dev, size);
237 if (IS_ERR(cma_obj)) {
238 DRM_ERROR("Failed to allocate from CMA:\n");
239 vc4_bo_stats_dump(vc4);
2c68f1fc 240 return ERR_PTR(-ENOMEM);
c826a6e1
EA
241 }
242 }
243
244 return to_vc4_bo(&cma_obj->base);
c8b75bca
EA
245}
246
247int vc4_dumb_create(struct drm_file *file_priv,
248 struct drm_device *dev,
249 struct drm_mode_create_dumb *args)
250{
251 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
252 struct vc4_bo *bo = NULL;
253 int ret;
254
255 if (args->pitch < min_pitch)
256 args->pitch = min_pitch;
257
258 if (args->size < args->pitch * args->height)
259 args->size = args->pitch * args->height;
260
c826a6e1 261 bo = vc4_bo_create(dev, args->size, false);
2c68f1fc
EA
262 if (IS_ERR(bo))
263 return PTR_ERR(bo);
c8b75bca
EA
264
265 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
266 drm_gem_object_unreference_unlocked(&bo->base.base);
267
268 return ret;
269}
c826a6e1
EA
270
271/* Must be called with bo_lock held. */
272static void vc4_bo_cache_free_old(struct drm_device *dev)
273{
274 struct vc4_dev *vc4 = to_vc4_dev(dev);
275 unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
276
277 while (!list_empty(&vc4->bo_cache.time_list)) {
278 struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
279 struct vc4_bo, unref_head);
280 if (time_before(expire_time, bo->free_time)) {
281 mod_timer(&vc4->bo_cache.time_timer,
282 round_jiffies_up(jiffies +
283 msecs_to_jiffies(1000)));
284 return;
285 }
286
287 vc4_bo_remove_from_cache(bo);
288 vc4_bo_destroy(bo);
289 }
290}
291
292/* Called on the last userspace/kernel unreference of the BO. Returns
293 * it to the BO cache if possible, otherwise frees it.
294 *
295 * Note that this is called with the struct_mutex held.
296 */
297void vc4_free_object(struct drm_gem_object *gem_bo)
298{
299 struct drm_device *dev = gem_bo->dev;
300 struct vc4_dev *vc4 = to_vc4_dev(dev);
301 struct vc4_bo *bo = to_vc4_bo(gem_bo);
302 struct list_head *cache_list;
303
304 mutex_lock(&vc4->bo_lock);
305 /* If the object references someone else's memory, we can't cache it.
306 */
307 if (gem_bo->import_attach) {
308 vc4_bo_destroy(bo);
309 goto out;
310 }
311
312 /* Don't cache if it was publicly named. */
313 if (gem_bo->name) {
314 vc4_bo_destroy(bo);
315 goto out;
316 }
317
318 cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size);
319 if (!cache_list) {
320 vc4_bo_destroy(bo);
321 goto out;
322 }
323
463873d5
EA
324 if (bo->validated_shader) {
325 kfree(bo->validated_shader->texture_samples);
326 kfree(bo->validated_shader);
327 bo->validated_shader = NULL;
328 }
329
c826a6e1
EA
330 bo->free_time = jiffies;
331 list_add(&bo->size_head, cache_list);
332 list_add(&bo->unref_head, &vc4->bo_cache.time_list);
333
334 vc4->bo_stats.num_cached++;
335 vc4->bo_stats.size_cached += gem_bo->size;
336
337 vc4_bo_cache_free_old(dev);
338
339out:
340 mutex_unlock(&vc4->bo_lock);
341}
342
343static void vc4_bo_cache_time_work(struct work_struct *work)
344{
345 struct vc4_dev *vc4 =
346 container_of(work, struct vc4_dev, bo_cache.time_work);
347 struct drm_device *dev = vc4->dev;
348
349 mutex_lock(&vc4->bo_lock);
350 vc4_bo_cache_free_old(dev);
351 mutex_unlock(&vc4->bo_lock);
352}
353
354static void vc4_bo_cache_time_timer(unsigned long data)
355{
356 struct drm_device *dev = (struct drm_device *)data;
357 struct vc4_dev *vc4 = to_vc4_dev(dev);
358
359 schedule_work(&vc4->bo_cache.time_work);
360}
361
463873d5
EA
362struct dma_buf *
363vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
364{
365 struct vc4_bo *bo = to_vc4_bo(obj);
366
367 if (bo->validated_shader) {
368 DRM_ERROR("Attempting to export shader BO\n");
369 return ERR_PTR(-EINVAL);
370 }
371
372 return drm_gem_prime_export(dev, obj, flags);
373}
374
375int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
376{
377 struct drm_gem_object *gem_obj;
378 struct vc4_bo *bo;
379 int ret;
380
381 ret = drm_gem_mmap(filp, vma);
382 if (ret)
383 return ret;
384
385 gem_obj = vma->vm_private_data;
386 bo = to_vc4_bo(gem_obj);
387
388 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
389 DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
390 return -EINVAL;
391 }
392
393 /*
394 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
395 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
396 * the whole buffer.
397 */
398 vma->vm_flags &= ~VM_PFNMAP;
399 vma->vm_pgoff = 0;
400
401 ret = dma_mmap_writecombine(bo->base.base.dev->dev, vma,
402 bo->base.vaddr, bo->base.paddr,
403 vma->vm_end - vma->vm_start);
404 if (ret)
405 drm_gem_vm_close(vma);
406
407 return ret;
408}
409
410int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
411{
412 struct vc4_bo *bo = to_vc4_bo(obj);
413
414 if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
415 DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
416 return -EINVAL;
417 }
418
419 return drm_gem_cma_prime_mmap(obj, vma);
420}
421
422void *vc4_prime_vmap(struct drm_gem_object *obj)
423{
424 struct vc4_bo *bo = to_vc4_bo(obj);
425
426 if (bo->validated_shader) {
427 DRM_ERROR("mmaping of shader BOs not allowed.\n");
428 return ERR_PTR(-EINVAL);
429 }
430
431 return drm_gem_cma_prime_vmap(obj);
432}
433
d5bc60f6
EA
434int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
435 struct drm_file *file_priv)
436{
437 struct drm_vc4_create_bo *args = data;
438 struct vc4_bo *bo = NULL;
439 int ret;
440
441 /*
442 * We can't allocate from the BO cache, because the BOs don't
443 * get zeroed, and that might leak data between users.
444 */
445 bo = vc4_bo_create(dev, args->size, false);
2c68f1fc
EA
446 if (IS_ERR(bo))
447 return PTR_ERR(bo);
d5bc60f6
EA
448
449 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
450 drm_gem_object_unreference_unlocked(&bo->base.base);
451
452 return ret;
453}
454
455int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
456 struct drm_file *file_priv)
457{
458 struct drm_vc4_mmap_bo *args = data;
459 struct drm_gem_object *gem_obj;
460
461 gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
462 if (!gem_obj) {
463 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
464 return -EINVAL;
465 }
466
467 /* The mmap offset was set up at BO allocation time. */
468 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
469
470 drm_gem_object_unreference_unlocked(gem_obj);
471 return 0;
472}
473
463873d5
EA
474int
475vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
476 struct drm_file *file_priv)
477{
478 struct drm_vc4_create_shader_bo *args = data;
479 struct vc4_bo *bo = NULL;
480 int ret;
481
482 if (args->size == 0)
483 return -EINVAL;
484
485 if (args->size % sizeof(u64) != 0)
486 return -EINVAL;
487
488 if (args->flags != 0) {
489 DRM_INFO("Unknown flags set: 0x%08x\n", args->flags);
490 return -EINVAL;
491 }
492
493 if (args->pad != 0) {
494 DRM_INFO("Pad set: 0x%08x\n", args->pad);
495 return -EINVAL;
496 }
497
498 bo = vc4_bo_create(dev, args->size, true);
2c68f1fc
EA
499 if (IS_ERR(bo))
500 return PTR_ERR(bo);
463873d5
EA
501
502 ret = copy_from_user(bo->base.vaddr,
503 (void __user *)(uintptr_t)args->data,
504 args->size);
505 if (ret != 0)
506 goto fail;
507 /* Clear the rest of the memory from allocating from the BO
508 * cache.
509 */
510 memset(bo->base.vaddr + args->size, 0,
511 bo->base.base.size - args->size);
512
513 bo->validated_shader = vc4_validate_shader(&bo->base);
514 if (!bo->validated_shader) {
515 ret = -EINVAL;
516 goto fail;
517 }
518
519 /* We have to create the handle after validation, to avoid
520 * races for users to do doing things like mmap the shader BO.
521 */
522 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
523
524 fail:
525 drm_gem_object_unreference_unlocked(&bo->base.base);
526
527 return ret;
528}
529
c826a6e1
EA
530void vc4_bo_cache_init(struct drm_device *dev)
531{
532 struct vc4_dev *vc4 = to_vc4_dev(dev);
533
534 mutex_init(&vc4->bo_lock);
535
536 INIT_LIST_HEAD(&vc4->bo_cache.time_list);
537
538 INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
539 setup_timer(&vc4->bo_cache.time_timer,
540 vc4_bo_cache_time_timer,
541 (unsigned long)dev);
542}
543
544void vc4_bo_cache_destroy(struct drm_device *dev)
545{
546 struct vc4_dev *vc4 = to_vc4_dev(dev);
547
548 del_timer(&vc4->bo_cache.time_timer);
549 cancel_work_sync(&vc4->bo_cache.time_work);
550
551 vc4_bo_cache_purge(dev);
552
553 if (vc4->bo_stats.num_allocated) {
554 DRM_ERROR("Destroying BO cache while BOs still allocated:\n");
555 vc4_bo_stats_dump(vc4);
556 }
557}