Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski...
[linux-2.6-block.git] / drivers / gpu / drm / armada / armada_gem.c
CommitLineData
96f60e37
RK
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/dma-buf.h>
9#include <linux/dma-mapping.h>
10#include <linux/shmem_fs.h>
11#include <drm/drmP.h>
12#include "armada_drm.h"
13#include "armada_gem.h"
14#include <drm/armada_drm.h>
15#include "armada_ioctlP.h"
16
17static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
18{
19 struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
20 unsigned long addr = (unsigned long)vmf->virtual_address;
21 unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
22 int ret;
23
24 pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
25 ret = vm_insert_pfn(vma, addr, pfn);
26
27 switch (ret) {
28 case 0:
29 case -EBUSY:
30 return VM_FAULT_NOPAGE;
31 case -ENOMEM:
32 return VM_FAULT_OOM;
33 default:
34 return VM_FAULT_SIGBUS;
35 }
36}
37
38const struct vm_operations_struct armada_gem_vm_ops = {
39 .fault = armada_gem_vm_fault,
40 .open = drm_gem_vm_open,
41 .close = drm_gem_vm_close,
42};
43
44static size_t roundup_gem_size(size_t size)
45{
46 return roundup(size, PAGE_SIZE);
47}
48
96f60e37
RK
49void armada_gem_free_object(struct drm_gem_object *obj)
50{
51 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
0b8ebeac 52 struct armada_private *priv = obj->dev->dev_private;
96f60e37
RK
53
54 DRM_DEBUG_DRIVER("release obj %p\n", dobj);
55
56 drm_gem_free_mmap_offset(&dobj->obj);
57
0b8ebeac
DV
58 might_lock(&priv->linear_lock);
59
96f60e37
RK
60 if (dobj->page) {
61 /* page backed memory */
62 unsigned int order = get_order(dobj->obj.size);
63 __free_pages(dobj->page, order);
64 } else if (dobj->linear) {
65 /* linear backed memory */
0b8ebeac 66 mutex_lock(&priv->linear_lock);
96f60e37 67 drm_mm_remove_node(dobj->linear);
0b8ebeac 68 mutex_unlock(&priv->linear_lock);
96f60e37
RK
69 kfree(dobj->linear);
70 if (dobj->addr)
71 iounmap(dobj->addr);
72 }
73
74 if (dobj->obj.import_attach) {
75 /* We only ever display imported data */
0481c8c4
RK
76 if (dobj->sgt)
77 dma_buf_unmap_attachment(dobj->obj.import_attach,
78 dobj->sgt, DMA_TO_DEVICE);
96f60e37
RK
79 drm_prime_gem_destroy(&dobj->obj, NULL);
80 }
81
82 drm_gem_object_release(&dobj->obj);
83
84 kfree(dobj);
85}
86
87int
88armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
89{
90 struct armada_private *priv = dev->dev_private;
91 size_t size = obj->obj.size;
92
93 if (obj->page || obj->linear)
94 return 0;
95
96 /*
97 * If it is a small allocation (typically cursor, which will
98 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
99 * Framebuffers will never be this small (our minimum size for
100 * framebuffers is larger than this anyway.) Such objects are
101 * only accessed by the CPU so we don't need any special handing
102 * here.
103 */
104 if (size <= 8192) {
105 unsigned int order = get_order(size);
106 struct page *p = alloc_pages(GFP_KERNEL, order);
107
108 if (p) {
109 obj->addr = page_address(p);
110 obj->phys_addr = page_to_phys(p);
111 obj->page = p;
112
113 memset(obj->addr, 0, PAGE_ALIGN(size));
114 }
115 }
116
117 /*
118 * We could grab something from CMA if it's enabled, but that
119 * involves building in a problem:
120 *
121 * CMA's interface uses dma_alloc_coherent(), which provides us
122 * with an CPU virtual address and a device address.
123 *
124 * The CPU virtual address may be either an address in the kernel
125 * direct mapped region (for example, as it would be on x86) or
126 * it may be remapped into another part of kernel memory space
127 * (eg, as it would be on ARM.) This means virt_to_phys() on the
128 * returned virtual address is invalid depending on the architecture
129 * implementation.
130 *
131 * The device address may also not be a physical address; it may
132 * be that there is some kind of remapping between the device and
133 * system RAM, which makes the use of the device address also
134 * unsafe to re-use as a physical address.
135 *
136 * This makes DRM usage of dma_alloc_coherent() in a generic way
137 * at best very questionable and unsafe.
138 */
139
140 /* Otherwise, grab it from our linear allocation */
141 if (!obj->page) {
142 struct drm_mm_node *node;
143 unsigned align = min_t(unsigned, size, SZ_2M);
144 void __iomem *ptr;
145 int ret;
146
147 node = kzalloc(sizeof(*node), GFP_KERNEL);
148 if (!node)
149 return -ENOSPC;
150
0b8ebeac 151 mutex_lock(&priv->linear_lock);
96f60e37
RK
152 ret = drm_mm_insert_node(&priv->linear, node, size, align,
153 DRM_MM_SEARCH_DEFAULT);
0b8ebeac 154 mutex_unlock(&priv->linear_lock);
96f60e37
RK
155 if (ret) {
156 kfree(node);
157 return ret;
158 }
159
160 obj->linear = node;
161
162 /* Ensure that the memory we're returning is cleared. */
163 ptr = ioremap_wc(obj->linear->start, size);
164 if (!ptr) {
0b8ebeac 165 mutex_lock(&priv->linear_lock);
96f60e37 166 drm_mm_remove_node(obj->linear);
0b8ebeac 167 mutex_unlock(&priv->linear_lock);
96f60e37
RK
168 kfree(obj->linear);
169 obj->linear = NULL;
170 return -ENOMEM;
171 }
172
173 memset_io(ptr, 0, size);
174 iounmap(ptr);
175
176 obj->phys_addr = obj->linear->start;
177 obj->dev_addr = obj->linear->start;
178 }
179
7513e095
RK
180 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
181 (unsigned long long)obj->phys_addr,
182 (unsigned long long)obj->dev_addr);
96f60e37
RK
183
184 return 0;
185}
186
187void *
188armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
189{
190 /* only linear objects need to be ioremap'd */
191 if (!dobj->addr && dobj->linear)
192 dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
193 return dobj->addr;
194}
195
196struct armada_gem_object *
197armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
198{
199 struct armada_gem_object *obj;
200
201 size = roundup_gem_size(size);
202
203 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
204 if (!obj)
205 return NULL;
206
207 drm_gem_private_object_init(dev, &obj->obj, size);
208 obj->dev_addr = DMA_ERROR_CODE;
209
210 DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
211
212 return obj;
213}
214
215struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
216 size_t size)
217{
218 struct armada_gem_object *obj;
219 struct address_space *mapping;
220
221 size = roundup_gem_size(size);
222
223 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
224 if (!obj)
225 return NULL;
226
227 if (drm_gem_object_init(dev, &obj->obj, size)) {
228 kfree(obj);
229 return NULL;
230 }
231
232 obj->dev_addr = DMA_ERROR_CODE;
233
a455589f 234 mapping = file_inode(obj->obj.filp)->i_mapping;
96f60e37
RK
235 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
236
237 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
238
239 return obj;
240}
241
242/* Dumb alloc support */
243int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
244 struct drm_mode_create_dumb *args)
245{
246 struct armada_gem_object *dobj;
247 u32 handle;
248 size_t size;
249 int ret;
250
251 args->pitch = armada_pitch(args->width, args->bpp);
252 args->size = size = args->pitch * args->height;
253
254 dobj = armada_gem_alloc_private_object(dev, size);
255 if (dobj == NULL)
256 return -ENOMEM;
257
258 ret = armada_gem_linear_back(dev, dobj);
259 if (ret)
260 goto err;
261
262 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
263 if (ret)
264 goto err;
265
266 args->handle = handle;
267
268 /* drop reference from allocate - handle holds it now */
269 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
270 err:
271 drm_gem_object_unreference_unlocked(&dobj->obj);
272 return ret;
273}
274
275int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
276 uint32_t handle, uint64_t *offset)
277{
278 struct armada_gem_object *obj;
279 int ret = 0;
280
96f60e37
RK
281 obj = armada_gem_object_lookup(dev, file, handle);
282 if (!obj) {
283 DRM_ERROR("failed to lookup gem object\n");
39146d6f 284 return -EINVAL;
96f60e37
RK
285 }
286
287 /* Don't allow imported objects to be mapped */
288 if (obj->obj.import_attach) {
289 ret = -EINVAL;
8d6185b5 290 goto err_unref;
96f60e37
RK
291 }
292
293 ret = drm_gem_create_mmap_offset(&obj->obj);
294 if (ret == 0) {
295 *offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
296 DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
297 }
298
8d6185b5 299 err_unref:
39146d6f 300 drm_gem_object_unreference_unlocked(&obj->obj);
96f60e37
RK
301
302 return ret;
303}
304
305int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
306 uint32_t handle)
307{
308 return drm_gem_handle_delete(file, handle);
309}
310
311/* Private driver gem ioctls */
312int armada_gem_create_ioctl(struct drm_device *dev, void *data,
313 struct drm_file *file)
314{
315 struct drm_armada_gem_create *args = data;
316 struct armada_gem_object *dobj;
317 size_t size;
318 u32 handle;
319 int ret;
320
321 if (args->size == 0)
322 return -ENOMEM;
323
324 size = args->size;
325
326 dobj = armada_gem_alloc_object(dev, size);
327 if (dobj == NULL)
328 return -ENOMEM;
329
330 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
331 if (ret)
332 goto err;
333
334 args->handle = handle;
335
336 /* drop reference from allocate - handle holds it now */
337 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
338 err:
339 drm_gem_object_unreference_unlocked(&dobj->obj);
340 return ret;
341}
342
343/* Map a shmem-backed object into process memory space */
344int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
345 struct drm_file *file)
346{
347 struct drm_armada_gem_mmap *args = data;
348 struct armada_gem_object *dobj;
349 unsigned long addr;
350
351 dobj = armada_gem_object_lookup(dev, file, args->handle);
352 if (dobj == NULL)
353 return -ENOENT;
354
355 if (!dobj->obj.filp) {
7a6f7133 356 drm_gem_object_unreference_unlocked(&dobj->obj);
96f60e37
RK
357 return -EINVAL;
358 }
359
360 addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
361 MAP_SHARED, args->offset);
7a6f7133 362 drm_gem_object_unreference_unlocked(&dobj->obj);
96f60e37
RK
363 if (IS_ERR_VALUE(addr))
364 return addr;
365
366 args->addr = addr;
367
368 return 0;
369}
370
371int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
372 struct drm_file *file)
373{
374 struct drm_armada_gem_pwrite *args = data;
375 struct armada_gem_object *dobj;
376 char __user *ptr;
377 int ret;
378
379 DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
380 args->handle, args->offset, args->size, args->ptr);
381
382 if (args->size == 0)
383 return 0;
384
385 ptr = (char __user *)(uintptr_t)args->ptr;
386
387 if (!access_ok(VERIFY_READ, ptr, args->size))
388 return -EFAULT;
389
390 ret = fault_in_multipages_readable(ptr, args->size);
391 if (ret)
392 return ret;
393
394 dobj = armada_gem_object_lookup(dev, file, args->handle);
395 if (dobj == NULL)
396 return -ENOENT;
397
398 /* Must be a kernel-mapped object */
399 if (!dobj->addr)
400 return -EINVAL;
401
402 if (args->offset > dobj->obj.size ||
403 args->size > dobj->obj.size - args->offset) {
404 DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
405 ret = -EINVAL;
406 goto unref;
407 }
408
409 if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
410 ret = -EFAULT;
411 } else if (dobj->update) {
412 dobj->update(dobj->update_data);
413 ret = 0;
414 }
415
416 unref:
417 drm_gem_object_unreference_unlocked(&dobj->obj);
418 return ret;
419}
420
421/* Prime support */
422struct sg_table *
423armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
424 enum dma_data_direction dir)
425{
426 struct drm_gem_object *obj = attach->dmabuf->priv;
427 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
428 struct scatterlist *sg;
429 struct sg_table *sgt;
430 int i, num;
431
432 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
433 if (!sgt)
434 return NULL;
435
436 if (dobj->obj.filp) {
437 struct address_space *mapping;
96f60e37
RK
438 int count;
439
440 count = dobj->obj.size / PAGE_SIZE;
441 if (sg_alloc_table(sgt, count, GFP_KERNEL))
442 goto free_sgt;
443
444 mapping = file_inode(dobj->obj.filp)->i_mapping;
96f60e37
RK
445
446 for_each_sg(sgt->sgl, sg, count, i) {
447 struct page *page;
448
2524fc7f 449 page = shmem_read_mapping_page(mapping, i);
96f60e37
RK
450 if (IS_ERR(page)) {
451 num = i;
452 goto release;
453 }
454
455 sg_set_page(sg, page, PAGE_SIZE, 0);
456 }
457
458 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
459 num = sgt->nents;
460 goto release;
461 }
462 } else if (dobj->page) {
463 /* Single contiguous page */
464 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
465 goto free_sgt;
466
467 sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
468
469 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
470 goto free_table;
471 } else if (dobj->linear) {
472 /* Single contiguous physical region - no struct page */
473 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
474 goto free_sgt;
475 sg_dma_address(sgt->sgl) = dobj->dev_addr;
476 sg_dma_len(sgt->sgl) = dobj->obj.size;
477 } else {
478 goto free_sgt;
479 }
480 return sgt;
481
482 release:
483 for_each_sg(sgt->sgl, sg, num, i)
09cbfeaf 484 put_page(sg_page(sg));
96f60e37
RK
485 free_table:
486 sg_free_table(sgt);
487 free_sgt:
488 kfree(sgt);
489 return NULL;
490}
491
492static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
493 struct sg_table *sgt, enum dma_data_direction dir)
494{
495 struct drm_gem_object *obj = attach->dmabuf->priv;
496 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
497 int i;
498
499 if (!dobj->linear)
500 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
501
502 if (dobj->obj.filp) {
503 struct scatterlist *sg;
504 for_each_sg(sgt->sgl, sg, sgt->nents, i)
09cbfeaf 505 put_page(sg_page(sg));
96f60e37
RK
506 }
507
508 sg_free_table(sgt);
509 kfree(sgt);
510}
511
512static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
513{
514 return NULL;
515}
516
517static void
518armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
519{
520}
521
522static int
523armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
524{
525 return -EINVAL;
526}
527
528static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
529 .map_dma_buf = armada_gem_prime_map_dma_buf,
530 .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
531 .release = drm_gem_dmabuf_release,
532 .kmap_atomic = armada_gem_dmabuf_no_kmap,
533 .kunmap_atomic = armada_gem_dmabuf_no_kunmap,
534 .kmap = armada_gem_dmabuf_no_kmap,
535 .kunmap = armada_gem_dmabuf_no_kunmap,
536 .mmap = armada_gem_dmabuf_mmap,
537};
538
539struct dma_buf *
540armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
541 int flags)
542{
d8fbe341
SS
543 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
544
545 exp_info.ops = &armada_gem_prime_dmabuf_ops;
546 exp_info.size = obj->size;
547 exp_info.flags = O_RDWR;
548 exp_info.priv = obj;
549
550 return dma_buf_export(&exp_info);
96f60e37
RK
551}
552
553struct drm_gem_object *
554armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
555{
556 struct dma_buf_attachment *attach;
557 struct armada_gem_object *dobj;
558
559 if (buf->ops == &armada_gem_prime_dmabuf_ops) {
560 struct drm_gem_object *obj = buf->priv;
561 if (obj->dev == dev) {
562 /*
563 * Importing our own dmabuf(s) increases the
564 * refcount on the gem object itself.
565 */
566 drm_gem_object_reference(obj);
96f60e37
RK
567 return obj;
568 }
569 }
570
571 attach = dma_buf_attach(buf, dev->dev);
572 if (IS_ERR(attach))
573 return ERR_CAST(attach);
574
575 dobj = armada_gem_alloc_private_object(dev, buf->size);
576 if (!dobj) {
577 dma_buf_detach(buf, attach);
578 return ERR_PTR(-ENOMEM);
579 }
580
581 dobj->obj.import_attach = attach;
5cd52688 582 get_dma_buf(buf);
96f60e37
RK
583
584 /*
585 * Don't call dma_buf_map_attachment() here - it maps the
586 * scatterlist immediately for DMA, and this is not always
587 * an appropriate thing to do.
588 */
589 return &dobj->obj;
590}
591
592int armada_gem_map_import(struct armada_gem_object *dobj)
593{
594 int ret;
595
596 dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
597 DMA_TO_DEVICE);
598 if (!dobj->sgt) {
599 DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
600 return -EINVAL;
601 }
602 if (IS_ERR(dobj->sgt)) {
603 ret = PTR_ERR(dobj->sgt);
604 dobj->sgt = NULL;
605 DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
606 return ret;
607 }
608 if (dobj->sgt->nents > 1) {
609 DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
610 return -EINVAL;
611 }
612 if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
613 DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
614 return -EINVAL;
615 }
616 dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
617 return 0;
618}