Merge tag 'regmap-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[linux-2.6-block.git] / drivers / gpu / drm / armada / armada_gem.c
CommitLineData
96f60e37
RK
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/dma-buf.h>
9#include <linux/dma-mapping.h>
10#include <linux/shmem_fs.h>
11#include <drm/drmP.h>
12#include "armada_drm.h"
13#include "armada_gem.h"
14#include <drm/armada_drm.h>
15#include "armada_ioctlP.h"
16
17static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
18{
19 struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
96f60e37
RK
20 unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
21 int ret;
22
1a29d85e
JK
23 pfn += (vmf->address - vma->vm_start) >> PAGE_SHIFT;
24 ret = vm_insert_pfn(vma, vmf->address, pfn);
96f60e37
RK
25
26 switch (ret) {
27 case 0:
28 case -EBUSY:
29 return VM_FAULT_NOPAGE;
30 case -ENOMEM:
31 return VM_FAULT_OOM;
32 default:
33 return VM_FAULT_SIGBUS;
34 }
35}
36
37const struct vm_operations_struct armada_gem_vm_ops = {
38 .fault = armada_gem_vm_fault,
39 .open = drm_gem_vm_open,
40 .close = drm_gem_vm_close,
41};
42
43static size_t roundup_gem_size(size_t size)
44{
45 return roundup(size, PAGE_SIZE);
46}
47
96f60e37
RK
48void armada_gem_free_object(struct drm_gem_object *obj)
49{
50 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
0b8ebeac 51 struct armada_private *priv = obj->dev->dev_private;
96f60e37
RK
52
53 DRM_DEBUG_DRIVER("release obj %p\n", dobj);
54
55 drm_gem_free_mmap_offset(&dobj->obj);
56
0b8ebeac
DV
57 might_lock(&priv->linear_lock);
58
96f60e37
RK
59 if (dobj->page) {
60 /* page backed memory */
61 unsigned int order = get_order(dobj->obj.size);
62 __free_pages(dobj->page, order);
63 } else if (dobj->linear) {
64 /* linear backed memory */
0b8ebeac 65 mutex_lock(&priv->linear_lock);
96f60e37 66 drm_mm_remove_node(dobj->linear);
0b8ebeac 67 mutex_unlock(&priv->linear_lock);
96f60e37
RK
68 kfree(dobj->linear);
69 if (dobj->addr)
70 iounmap(dobj->addr);
71 }
72
73 if (dobj->obj.import_attach) {
74 /* We only ever display imported data */
0481c8c4
RK
75 if (dobj->sgt)
76 dma_buf_unmap_attachment(dobj->obj.import_attach,
77 dobj->sgt, DMA_TO_DEVICE);
96f60e37
RK
78 drm_prime_gem_destroy(&dobj->obj, NULL);
79 }
80
81 drm_gem_object_release(&dobj->obj);
82
83 kfree(dobj);
84}
85
86int
87armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
88{
89 struct armada_private *priv = dev->dev_private;
90 size_t size = obj->obj.size;
91
92 if (obj->page || obj->linear)
93 return 0;
94
95 /*
96 * If it is a small allocation (typically cursor, which will
97 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
98 * Framebuffers will never be this small (our minimum size for
99 * framebuffers is larger than this anyway.) Such objects are
100 * only accessed by the CPU so we don't need any special handing
101 * here.
102 */
103 if (size <= 8192) {
104 unsigned int order = get_order(size);
105 struct page *p = alloc_pages(GFP_KERNEL, order);
106
107 if (p) {
108 obj->addr = page_address(p);
109 obj->phys_addr = page_to_phys(p);
110 obj->page = p;
111
112 memset(obj->addr, 0, PAGE_ALIGN(size));
113 }
114 }
115
116 /*
117 * We could grab something from CMA if it's enabled, but that
118 * involves building in a problem:
119 *
120 * CMA's interface uses dma_alloc_coherent(), which provides us
121 * with an CPU virtual address and a device address.
122 *
123 * The CPU virtual address may be either an address in the kernel
124 * direct mapped region (for example, as it would be on x86) or
125 * it may be remapped into another part of kernel memory space
126 * (eg, as it would be on ARM.) This means virt_to_phys() on the
127 * returned virtual address is invalid depending on the architecture
128 * implementation.
129 *
130 * The device address may also not be a physical address; it may
131 * be that there is some kind of remapping between the device and
132 * system RAM, which makes the use of the device address also
133 * unsafe to re-use as a physical address.
134 *
135 * This makes DRM usage of dma_alloc_coherent() in a generic way
136 * at best very questionable and unsafe.
137 */
138
139 /* Otherwise, grab it from our linear allocation */
140 if (!obj->page) {
141 struct drm_mm_node *node;
142 unsigned align = min_t(unsigned, size, SZ_2M);
143 void __iomem *ptr;
144 int ret;
145
146 node = kzalloc(sizeof(*node), GFP_KERNEL);
147 if (!node)
148 return -ENOSPC;
149
0b8ebeac 150 mutex_lock(&priv->linear_lock);
96f60e37
RK
151 ret = drm_mm_insert_node(&priv->linear, node, size, align,
152 DRM_MM_SEARCH_DEFAULT);
0b8ebeac 153 mutex_unlock(&priv->linear_lock);
96f60e37
RK
154 if (ret) {
155 kfree(node);
156 return ret;
157 }
158
159 obj->linear = node;
160
161 /* Ensure that the memory we're returning is cleared. */
162 ptr = ioremap_wc(obj->linear->start, size);
163 if (!ptr) {
0b8ebeac 164 mutex_lock(&priv->linear_lock);
96f60e37 165 drm_mm_remove_node(obj->linear);
0b8ebeac 166 mutex_unlock(&priv->linear_lock);
96f60e37
RK
167 kfree(obj->linear);
168 obj->linear = NULL;
169 return -ENOMEM;
170 }
171
172 memset_io(ptr, 0, size);
173 iounmap(ptr);
174
175 obj->phys_addr = obj->linear->start;
176 obj->dev_addr = obj->linear->start;
177 }
178
7513e095
RK
179 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
180 (unsigned long long)obj->phys_addr,
181 (unsigned long long)obj->dev_addr);
96f60e37
RK
182
183 return 0;
184}
185
186void *
187armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
188{
189 /* only linear objects need to be ioremap'd */
190 if (!dobj->addr && dobj->linear)
191 dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
192 return dobj->addr;
193}
194
195struct armada_gem_object *
196armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
197{
198 struct armada_gem_object *obj;
199
200 size = roundup_gem_size(size);
201
202 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
203 if (!obj)
204 return NULL;
205
206 drm_gem_private_object_init(dev, &obj->obj, size);
207 obj->dev_addr = DMA_ERROR_CODE;
208
209 DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
210
211 return obj;
212}
213
42b45459 214static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
96f60e37
RK
215 size_t size)
216{
217 struct armada_gem_object *obj;
218 struct address_space *mapping;
219
220 size = roundup_gem_size(size);
221
222 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
223 if (!obj)
224 return NULL;
225
226 if (drm_gem_object_init(dev, &obj->obj, size)) {
227 kfree(obj);
228 return NULL;
229 }
230
231 obj->dev_addr = DMA_ERROR_CODE;
232
93c76a3d 233 mapping = obj->obj.filp->f_mapping;
96f60e37
RK
234 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
235
236 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
237
238 return obj;
239}
240
241/* Dumb alloc support */
242int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
243 struct drm_mode_create_dumb *args)
244{
245 struct armada_gem_object *dobj;
246 u32 handle;
247 size_t size;
248 int ret;
249
250 args->pitch = armada_pitch(args->width, args->bpp);
251 args->size = size = args->pitch * args->height;
252
253 dobj = armada_gem_alloc_private_object(dev, size);
254 if (dobj == NULL)
255 return -ENOMEM;
256
257 ret = armada_gem_linear_back(dev, dobj);
258 if (ret)
259 goto err;
260
261 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
262 if (ret)
263 goto err;
264
265 args->handle = handle;
266
267 /* drop reference from allocate - handle holds it now */
268 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
269 err:
270 drm_gem_object_unreference_unlocked(&dobj->obj);
271 return ret;
272}
273
274int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
275 uint32_t handle, uint64_t *offset)
276{
277 struct armada_gem_object *obj;
278 int ret = 0;
279
a8ad0bd8 280 obj = armada_gem_object_lookup(file, handle);
96f60e37
RK
281 if (!obj) {
282 DRM_ERROR("failed to lookup gem object\n");
39146d6f 283 return -EINVAL;
96f60e37
RK
284 }
285
286 /* Don't allow imported objects to be mapped */
287 if (obj->obj.import_attach) {
288 ret = -EINVAL;
8d6185b5 289 goto err_unref;
96f60e37
RK
290 }
291
292 ret = drm_gem_create_mmap_offset(&obj->obj);
293 if (ret == 0) {
294 *offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
295 DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
296 }
297
8d6185b5 298 err_unref:
39146d6f 299 drm_gem_object_unreference_unlocked(&obj->obj);
96f60e37
RK
300
301 return ret;
302}
303
304int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
305 uint32_t handle)
306{
307 return drm_gem_handle_delete(file, handle);
308}
309
310/* Private driver gem ioctls */
311int armada_gem_create_ioctl(struct drm_device *dev, void *data,
312 struct drm_file *file)
313{
314 struct drm_armada_gem_create *args = data;
315 struct armada_gem_object *dobj;
316 size_t size;
317 u32 handle;
318 int ret;
319
320 if (args->size == 0)
321 return -ENOMEM;
322
323 size = args->size;
324
325 dobj = armada_gem_alloc_object(dev, size);
326 if (dobj == NULL)
327 return -ENOMEM;
328
329 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
330 if (ret)
331 goto err;
332
333 args->handle = handle;
334
335 /* drop reference from allocate - handle holds it now */
336 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
337 err:
338 drm_gem_object_unreference_unlocked(&dobj->obj);
339 return ret;
340}
341
342/* Map a shmem-backed object into process memory space */
343int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
344 struct drm_file *file)
345{
346 struct drm_armada_gem_mmap *args = data;
347 struct armada_gem_object *dobj;
348 unsigned long addr;
349
a8ad0bd8 350 dobj = armada_gem_object_lookup(file, args->handle);
96f60e37
RK
351 if (dobj == NULL)
352 return -ENOENT;
353
354 if (!dobj->obj.filp) {
7a6f7133 355 drm_gem_object_unreference_unlocked(&dobj->obj);
96f60e37
RK
356 return -EINVAL;
357 }
358
359 addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
360 MAP_SHARED, args->offset);
7a6f7133 361 drm_gem_object_unreference_unlocked(&dobj->obj);
96f60e37
RK
362 if (IS_ERR_VALUE(addr))
363 return addr;
364
365 args->addr = addr;
366
367 return 0;
368}
369
370int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
371 struct drm_file *file)
372{
373 struct drm_armada_gem_pwrite *args = data;
374 struct armada_gem_object *dobj;
375 char __user *ptr;
376 int ret;
377
378 DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
379 args->handle, args->offset, args->size, args->ptr);
380
381 if (args->size == 0)
382 return 0;
383
384 ptr = (char __user *)(uintptr_t)args->ptr;
385
386 if (!access_ok(VERIFY_READ, ptr, args->size))
387 return -EFAULT;
388
4bce9f6e 389 ret = fault_in_pages_readable(ptr, args->size);
96f60e37
RK
390 if (ret)
391 return ret;
392
a8ad0bd8 393 dobj = armada_gem_object_lookup(file, args->handle);
96f60e37
RK
394 if (dobj == NULL)
395 return -ENOENT;
396
397 /* Must be a kernel-mapped object */
398 if (!dobj->addr)
399 return -EINVAL;
400
401 if (args->offset > dobj->obj.size ||
402 args->size > dobj->obj.size - args->offset) {
403 DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
404 ret = -EINVAL;
405 goto unref;
406 }
407
408 if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
409 ret = -EFAULT;
410 } else if (dobj->update) {
411 dobj->update(dobj->update_data);
412 ret = 0;
413 }
414
415 unref:
416 drm_gem_object_unreference_unlocked(&dobj->obj);
417 return ret;
418}
419
420/* Prime support */
42b45459 421static struct sg_table *
96f60e37
RK
422armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
423 enum dma_data_direction dir)
424{
425 struct drm_gem_object *obj = attach->dmabuf->priv;
426 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
427 struct scatterlist *sg;
428 struct sg_table *sgt;
429 int i, num;
430
431 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
432 if (!sgt)
433 return NULL;
434
435 if (dobj->obj.filp) {
436 struct address_space *mapping;
96f60e37
RK
437 int count;
438
439 count = dobj->obj.size / PAGE_SIZE;
440 if (sg_alloc_table(sgt, count, GFP_KERNEL))
441 goto free_sgt;
442
93c76a3d 443 mapping = dobj->obj.filp->f_mapping;
96f60e37
RK
444
445 for_each_sg(sgt->sgl, sg, count, i) {
446 struct page *page;
447
2524fc7f 448 page = shmem_read_mapping_page(mapping, i);
96f60e37
RK
449 if (IS_ERR(page)) {
450 num = i;
451 goto release;
452 }
453
454 sg_set_page(sg, page, PAGE_SIZE, 0);
455 }
456
457 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
458 num = sgt->nents;
459 goto release;
460 }
461 } else if (dobj->page) {
462 /* Single contiguous page */
463 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
464 goto free_sgt;
465
466 sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
467
468 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
469 goto free_table;
470 } else if (dobj->linear) {
471 /* Single contiguous physical region - no struct page */
472 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
473 goto free_sgt;
474 sg_dma_address(sgt->sgl) = dobj->dev_addr;
475 sg_dma_len(sgt->sgl) = dobj->obj.size;
476 } else {
477 goto free_sgt;
478 }
479 return sgt;
480
481 release:
482 for_each_sg(sgt->sgl, sg, num, i)
09cbfeaf 483 put_page(sg_page(sg));
96f60e37
RK
484 free_table:
485 sg_free_table(sgt);
486 free_sgt:
487 kfree(sgt);
488 return NULL;
489}
490
491static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
492 struct sg_table *sgt, enum dma_data_direction dir)
493{
494 struct drm_gem_object *obj = attach->dmabuf->priv;
495 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
496 int i;
497
498 if (!dobj->linear)
499 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
500
501 if (dobj->obj.filp) {
502 struct scatterlist *sg;
503 for_each_sg(sgt->sgl, sg, sgt->nents, i)
09cbfeaf 504 put_page(sg_page(sg));
96f60e37
RK
505 }
506
507 sg_free_table(sgt);
508 kfree(sgt);
509}
510
511static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
512{
513 return NULL;
514}
515
516static void
517armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
518{
519}
520
521static int
522armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
523{
524 return -EINVAL;
525}
526
527static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
528 .map_dma_buf = armada_gem_prime_map_dma_buf,
529 .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
530 .release = drm_gem_dmabuf_release,
531 .kmap_atomic = armada_gem_dmabuf_no_kmap,
532 .kunmap_atomic = armada_gem_dmabuf_no_kunmap,
533 .kmap = armada_gem_dmabuf_no_kmap,
534 .kunmap = armada_gem_dmabuf_no_kunmap,
535 .mmap = armada_gem_dmabuf_mmap,
536};
537
538struct dma_buf *
539armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
540 int flags)
541{
d8fbe341
SS
542 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
543
544 exp_info.ops = &armada_gem_prime_dmabuf_ops;
545 exp_info.size = obj->size;
546 exp_info.flags = O_RDWR;
547 exp_info.priv = obj;
548
a4fce9cb 549 return drm_gem_dmabuf_export(dev, &exp_info);
96f60e37
RK
550}
551
552struct drm_gem_object *
553armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
554{
555 struct dma_buf_attachment *attach;
556 struct armada_gem_object *dobj;
557
558 if (buf->ops == &armada_gem_prime_dmabuf_ops) {
559 struct drm_gem_object *obj = buf->priv;
560 if (obj->dev == dev) {
561 /*
562 * Importing our own dmabuf(s) increases the
563 * refcount on the gem object itself.
564 */
565 drm_gem_object_reference(obj);
96f60e37
RK
566 return obj;
567 }
568 }
569
570 attach = dma_buf_attach(buf, dev->dev);
571 if (IS_ERR(attach))
572 return ERR_CAST(attach);
573
574 dobj = armada_gem_alloc_private_object(dev, buf->size);
575 if (!dobj) {
576 dma_buf_detach(buf, attach);
577 return ERR_PTR(-ENOMEM);
578 }
579
580 dobj->obj.import_attach = attach;
5cd52688 581 get_dma_buf(buf);
96f60e37
RK
582
583 /*
584 * Don't call dma_buf_map_attachment() here - it maps the
585 * scatterlist immediately for DMA, and this is not always
586 * an appropriate thing to do.
587 */
588 return &dobj->obj;
589}
590
591int armada_gem_map_import(struct armada_gem_object *dobj)
592{
593 int ret;
594
595 dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
244a2419 596 DMA_TO_DEVICE);
96f60e37
RK
597 if (IS_ERR(dobj->sgt)) {
598 ret = PTR_ERR(dobj->sgt);
599 dobj->sgt = NULL;
600 DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
601 return ret;
602 }
603 if (dobj->sgt->nents > 1) {
604 DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
605 return -EINVAL;
606 }
607 if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
608 DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
609 return -EINVAL;
610 }
611 dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
612 return 0;
613}