drm/armada: drop struct_mutex from cursor paths
[linux-2.6-block.git] / drivers / gpu / drm / armada / armada_gem.c
CommitLineData
96f60e37
RK
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/dma-buf.h>
9#include <linux/dma-mapping.h>
10#include <linux/shmem_fs.h>
11#include <drm/drmP.h>
12#include "armada_drm.h"
13#include "armada_gem.h"
14#include <drm/armada_drm.h>
15#include "armada_ioctlP.h"
16
17static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
18{
19 struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
20 unsigned long addr = (unsigned long)vmf->virtual_address;
21 unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
22 int ret;
23
24 pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
25 ret = vm_insert_pfn(vma, addr, pfn);
26
27 switch (ret) {
28 case 0:
29 case -EBUSY:
30 return VM_FAULT_NOPAGE;
31 case -ENOMEM:
32 return VM_FAULT_OOM;
33 default:
34 return VM_FAULT_SIGBUS;
35 }
36}
37
38const struct vm_operations_struct armada_gem_vm_ops = {
39 .fault = armada_gem_vm_fault,
40 .open = drm_gem_vm_open,
41 .close = drm_gem_vm_close,
42};
43
44static size_t roundup_gem_size(size_t size)
45{
46 return roundup(size, PAGE_SIZE);
47}
48
49/* dev->struct_mutex is held here */
50void armada_gem_free_object(struct drm_gem_object *obj)
51{
52 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
53
54 DRM_DEBUG_DRIVER("release obj %p\n", dobj);
55
56 drm_gem_free_mmap_offset(&dobj->obj);
57
58 if (dobj->page) {
59 /* page backed memory */
60 unsigned int order = get_order(dobj->obj.size);
61 __free_pages(dobj->page, order);
62 } else if (dobj->linear) {
63 /* linear backed memory */
64 drm_mm_remove_node(dobj->linear);
65 kfree(dobj->linear);
66 if (dobj->addr)
67 iounmap(dobj->addr);
68 }
69
70 if (dobj->obj.import_attach) {
71 /* We only ever display imported data */
0481c8c4
RK
72 if (dobj->sgt)
73 dma_buf_unmap_attachment(dobj->obj.import_attach,
74 dobj->sgt, DMA_TO_DEVICE);
96f60e37
RK
75 drm_prime_gem_destroy(&dobj->obj, NULL);
76 }
77
78 drm_gem_object_release(&dobj->obj);
79
80 kfree(dobj);
81}
82
83int
84armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
85{
86 struct armada_private *priv = dev->dev_private;
87 size_t size = obj->obj.size;
88
89 if (obj->page || obj->linear)
90 return 0;
91
92 /*
93 * If it is a small allocation (typically cursor, which will
94 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
95 * Framebuffers will never be this small (our minimum size for
96 * framebuffers is larger than this anyway.) Such objects are
97 * only accessed by the CPU so we don't need any special handing
98 * here.
99 */
100 if (size <= 8192) {
101 unsigned int order = get_order(size);
102 struct page *p = alloc_pages(GFP_KERNEL, order);
103
104 if (p) {
105 obj->addr = page_address(p);
106 obj->phys_addr = page_to_phys(p);
107 obj->page = p;
108
109 memset(obj->addr, 0, PAGE_ALIGN(size));
110 }
111 }
112
113 /*
114 * We could grab something from CMA if it's enabled, but that
115 * involves building in a problem:
116 *
117 * CMA's interface uses dma_alloc_coherent(), which provides us
118 * with an CPU virtual address and a device address.
119 *
120 * The CPU virtual address may be either an address in the kernel
121 * direct mapped region (for example, as it would be on x86) or
122 * it may be remapped into another part of kernel memory space
123 * (eg, as it would be on ARM.) This means virt_to_phys() on the
124 * returned virtual address is invalid depending on the architecture
125 * implementation.
126 *
127 * The device address may also not be a physical address; it may
128 * be that there is some kind of remapping between the device and
129 * system RAM, which makes the use of the device address also
130 * unsafe to re-use as a physical address.
131 *
132 * This makes DRM usage of dma_alloc_coherent() in a generic way
133 * at best very questionable and unsafe.
134 */
135
136 /* Otherwise, grab it from our linear allocation */
137 if (!obj->page) {
138 struct drm_mm_node *node;
139 unsigned align = min_t(unsigned, size, SZ_2M);
140 void __iomem *ptr;
141 int ret;
142
143 node = kzalloc(sizeof(*node), GFP_KERNEL);
144 if (!node)
145 return -ENOSPC;
146
147 mutex_lock(&dev->struct_mutex);
148 ret = drm_mm_insert_node(&priv->linear, node, size, align,
149 DRM_MM_SEARCH_DEFAULT);
150 mutex_unlock(&dev->struct_mutex);
151 if (ret) {
152 kfree(node);
153 return ret;
154 }
155
156 obj->linear = node;
157
158 /* Ensure that the memory we're returning is cleared. */
159 ptr = ioremap_wc(obj->linear->start, size);
160 if (!ptr) {
161 mutex_lock(&dev->struct_mutex);
162 drm_mm_remove_node(obj->linear);
163 mutex_unlock(&dev->struct_mutex);
164 kfree(obj->linear);
165 obj->linear = NULL;
166 return -ENOMEM;
167 }
168
169 memset_io(ptr, 0, size);
170 iounmap(ptr);
171
172 obj->phys_addr = obj->linear->start;
173 obj->dev_addr = obj->linear->start;
174 }
175
7513e095
RK
176 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
177 (unsigned long long)obj->phys_addr,
178 (unsigned long long)obj->dev_addr);
96f60e37
RK
179
180 return 0;
181}
182
183void *
184armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
185{
186 /* only linear objects need to be ioremap'd */
187 if (!dobj->addr && dobj->linear)
188 dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
189 return dobj->addr;
190}
191
192struct armada_gem_object *
193armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
194{
195 struct armada_gem_object *obj;
196
197 size = roundup_gem_size(size);
198
199 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
200 if (!obj)
201 return NULL;
202
203 drm_gem_private_object_init(dev, &obj->obj, size);
204 obj->dev_addr = DMA_ERROR_CODE;
205
206 DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
207
208 return obj;
209}
210
211struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
212 size_t size)
213{
214 struct armada_gem_object *obj;
215 struct address_space *mapping;
216
217 size = roundup_gem_size(size);
218
219 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
220 if (!obj)
221 return NULL;
222
223 if (drm_gem_object_init(dev, &obj->obj, size)) {
224 kfree(obj);
225 return NULL;
226 }
227
228 obj->dev_addr = DMA_ERROR_CODE;
229
a455589f 230 mapping = file_inode(obj->obj.filp)->i_mapping;
96f60e37
RK
231 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
232
233 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
234
235 return obj;
236}
237
238/* Dumb alloc support */
239int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
240 struct drm_mode_create_dumb *args)
241{
242 struct armada_gem_object *dobj;
243 u32 handle;
244 size_t size;
245 int ret;
246
247 args->pitch = armada_pitch(args->width, args->bpp);
248 args->size = size = args->pitch * args->height;
249
250 dobj = armada_gem_alloc_private_object(dev, size);
251 if (dobj == NULL)
252 return -ENOMEM;
253
254 ret = armada_gem_linear_back(dev, dobj);
255 if (ret)
256 goto err;
257
258 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
259 if (ret)
260 goto err;
261
262 args->handle = handle;
263
264 /* drop reference from allocate - handle holds it now */
265 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
266 err:
267 drm_gem_object_unreference_unlocked(&dobj->obj);
268 return ret;
269}
270
271int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
272 uint32_t handle, uint64_t *offset)
273{
274 struct armada_gem_object *obj;
275 int ret = 0;
276
96f60e37
RK
277 obj = armada_gem_object_lookup(dev, file, handle);
278 if (!obj) {
279 DRM_ERROR("failed to lookup gem object\n");
39146d6f 280 return -EINVAL;
96f60e37
RK
281 }
282
283 /* Don't allow imported objects to be mapped */
284 if (obj->obj.import_attach) {
285 ret = -EINVAL;
8d6185b5 286 goto err_unref;
96f60e37
RK
287 }
288
289 ret = drm_gem_create_mmap_offset(&obj->obj);
290 if (ret == 0) {
291 *offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
292 DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
293 }
294
8d6185b5 295 err_unref:
39146d6f 296 drm_gem_object_unreference_unlocked(&obj->obj);
96f60e37
RK
297
298 return ret;
299}
300
301int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
302 uint32_t handle)
303{
304 return drm_gem_handle_delete(file, handle);
305}
306
307/* Private driver gem ioctls */
308int armada_gem_create_ioctl(struct drm_device *dev, void *data,
309 struct drm_file *file)
310{
311 struct drm_armada_gem_create *args = data;
312 struct armada_gem_object *dobj;
313 size_t size;
314 u32 handle;
315 int ret;
316
317 if (args->size == 0)
318 return -ENOMEM;
319
320 size = args->size;
321
322 dobj = armada_gem_alloc_object(dev, size);
323 if (dobj == NULL)
324 return -ENOMEM;
325
326 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
327 if (ret)
328 goto err;
329
330 args->handle = handle;
331
332 /* drop reference from allocate - handle holds it now */
333 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
334 err:
335 drm_gem_object_unreference_unlocked(&dobj->obj);
336 return ret;
337}
338
339/* Map a shmem-backed object into process memory space */
340int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
341 struct drm_file *file)
342{
343 struct drm_armada_gem_mmap *args = data;
344 struct armada_gem_object *dobj;
345 unsigned long addr;
346
347 dobj = armada_gem_object_lookup(dev, file, args->handle);
348 if (dobj == NULL)
349 return -ENOENT;
350
351 if (!dobj->obj.filp) {
7a6f7133 352 drm_gem_object_unreference_unlocked(&dobj->obj);
96f60e37
RK
353 return -EINVAL;
354 }
355
356 addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
357 MAP_SHARED, args->offset);
7a6f7133 358 drm_gem_object_unreference_unlocked(&dobj->obj);
96f60e37
RK
359 if (IS_ERR_VALUE(addr))
360 return addr;
361
362 args->addr = addr;
363
364 return 0;
365}
366
367int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
368 struct drm_file *file)
369{
370 struct drm_armada_gem_pwrite *args = data;
371 struct armada_gem_object *dobj;
372 char __user *ptr;
373 int ret;
374
375 DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
376 args->handle, args->offset, args->size, args->ptr);
377
378 if (args->size == 0)
379 return 0;
380
381 ptr = (char __user *)(uintptr_t)args->ptr;
382
383 if (!access_ok(VERIFY_READ, ptr, args->size))
384 return -EFAULT;
385
386 ret = fault_in_multipages_readable(ptr, args->size);
387 if (ret)
388 return ret;
389
390 dobj = armada_gem_object_lookup(dev, file, args->handle);
391 if (dobj == NULL)
392 return -ENOENT;
393
394 /* Must be a kernel-mapped object */
395 if (!dobj->addr)
396 return -EINVAL;
397
398 if (args->offset > dobj->obj.size ||
399 args->size > dobj->obj.size - args->offset) {
400 DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
401 ret = -EINVAL;
402 goto unref;
403 }
404
405 if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
406 ret = -EFAULT;
407 } else if (dobj->update) {
408 dobj->update(dobj->update_data);
409 ret = 0;
410 }
411
412 unref:
413 drm_gem_object_unreference_unlocked(&dobj->obj);
414 return ret;
415}
416
417/* Prime support */
418struct sg_table *
419armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
420 enum dma_data_direction dir)
421{
422 struct drm_gem_object *obj = attach->dmabuf->priv;
423 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
424 struct scatterlist *sg;
425 struct sg_table *sgt;
426 int i, num;
427
428 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
429 if (!sgt)
430 return NULL;
431
432 if (dobj->obj.filp) {
433 struct address_space *mapping;
96f60e37
RK
434 int count;
435
436 count = dobj->obj.size / PAGE_SIZE;
437 if (sg_alloc_table(sgt, count, GFP_KERNEL))
438 goto free_sgt;
439
440 mapping = file_inode(dobj->obj.filp)->i_mapping;
96f60e37
RK
441
442 for_each_sg(sgt->sgl, sg, count, i) {
443 struct page *page;
444
2524fc7f 445 page = shmem_read_mapping_page(mapping, i);
96f60e37
RK
446 if (IS_ERR(page)) {
447 num = i;
448 goto release;
449 }
450
451 sg_set_page(sg, page, PAGE_SIZE, 0);
452 }
453
454 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
455 num = sgt->nents;
456 goto release;
457 }
458 } else if (dobj->page) {
459 /* Single contiguous page */
460 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
461 goto free_sgt;
462
463 sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
464
465 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
466 goto free_table;
467 } else if (dobj->linear) {
468 /* Single contiguous physical region - no struct page */
469 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
470 goto free_sgt;
471 sg_dma_address(sgt->sgl) = dobj->dev_addr;
472 sg_dma_len(sgt->sgl) = dobj->obj.size;
473 } else {
474 goto free_sgt;
475 }
476 return sgt;
477
478 release:
479 for_each_sg(sgt->sgl, sg, num, i)
480 page_cache_release(sg_page(sg));
481 free_table:
482 sg_free_table(sgt);
483 free_sgt:
484 kfree(sgt);
485 return NULL;
486}
487
488static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
489 struct sg_table *sgt, enum dma_data_direction dir)
490{
491 struct drm_gem_object *obj = attach->dmabuf->priv;
492 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
493 int i;
494
495 if (!dobj->linear)
496 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
497
498 if (dobj->obj.filp) {
499 struct scatterlist *sg;
500 for_each_sg(sgt->sgl, sg, sgt->nents, i)
501 page_cache_release(sg_page(sg));
502 }
503
504 sg_free_table(sgt);
505 kfree(sgt);
506}
507
508static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
509{
510 return NULL;
511}
512
513static void
514armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
515{
516}
517
518static int
519armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
520{
521 return -EINVAL;
522}
523
524static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
525 .map_dma_buf = armada_gem_prime_map_dma_buf,
526 .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
527 .release = drm_gem_dmabuf_release,
528 .kmap_atomic = armada_gem_dmabuf_no_kmap,
529 .kunmap_atomic = armada_gem_dmabuf_no_kunmap,
530 .kmap = armada_gem_dmabuf_no_kmap,
531 .kunmap = armada_gem_dmabuf_no_kunmap,
532 .mmap = armada_gem_dmabuf_mmap,
533};
534
535struct dma_buf *
536armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
537 int flags)
538{
d8fbe341
SS
539 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
540
541 exp_info.ops = &armada_gem_prime_dmabuf_ops;
542 exp_info.size = obj->size;
543 exp_info.flags = O_RDWR;
544 exp_info.priv = obj;
545
546 return dma_buf_export(&exp_info);
96f60e37
RK
547}
548
549struct drm_gem_object *
550armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
551{
552 struct dma_buf_attachment *attach;
553 struct armada_gem_object *dobj;
554
555 if (buf->ops == &armada_gem_prime_dmabuf_ops) {
556 struct drm_gem_object *obj = buf->priv;
557 if (obj->dev == dev) {
558 /*
559 * Importing our own dmabuf(s) increases the
560 * refcount on the gem object itself.
561 */
562 drm_gem_object_reference(obj);
96f60e37
RK
563 return obj;
564 }
565 }
566
567 attach = dma_buf_attach(buf, dev->dev);
568 if (IS_ERR(attach))
569 return ERR_CAST(attach);
570
571 dobj = armada_gem_alloc_private_object(dev, buf->size);
572 if (!dobj) {
573 dma_buf_detach(buf, attach);
574 return ERR_PTR(-ENOMEM);
575 }
576
577 dobj->obj.import_attach = attach;
5cd52688 578 get_dma_buf(buf);
96f60e37
RK
579
580 /*
581 * Don't call dma_buf_map_attachment() here - it maps the
582 * scatterlist immediately for DMA, and this is not always
583 * an appropriate thing to do.
584 */
585 return &dobj->obj;
586}
587
588int armada_gem_map_import(struct armada_gem_object *dobj)
589{
590 int ret;
591
592 dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
593 DMA_TO_DEVICE);
594 if (!dobj->sgt) {
595 DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
596 return -EINVAL;
597 }
598 if (IS_ERR(dobj->sgt)) {
599 ret = PTR_ERR(dobj->sgt);
600 dobj->sgt = NULL;
601 DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
602 return ret;
603 }
604 if (dobj->sgt->nents > 1) {
605 DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
606 return -EINVAL;
607 }
608 if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
609 DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
610 return -EINVAL;
611 }
612 dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
613 return 0;
614}