| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2014-2018 Etnaviv Project |
| 4 | */ |
| 5 | |
| 6 | #include <drm/drm_prime.h> |
| 7 | #include <linux/dma-buf.h> |
| 8 | |
| 9 | #include "etnaviv_drv.h" |
| 10 | #include "etnaviv_gem.h" |
| 11 | |
| 12 | static struct lock_class_key etnaviv_prime_lock_class; |
| 13 | |
| 14 | struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) |
| 15 | { |
| 16 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); |
| 17 | int npages = obj->size >> PAGE_SHIFT; |
| 18 | |
| 19 | if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */ |
| 20 | return ERR_PTR(-EINVAL); |
| 21 | |
| 22 | return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages); |
| 23 | } |
| 24 | |
| 25 | void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj) |
| 26 | { |
| 27 | return etnaviv_gem_vmap(obj); |
| 28 | } |
| 29 | |
| 30 | void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) |
| 31 | { |
| 32 | /* TODO msm_gem_vunmap() */ |
| 33 | } |
| 34 | |
| 35 | int etnaviv_gem_prime_mmap(struct drm_gem_object *obj, |
| 36 | struct vm_area_struct *vma) |
| 37 | { |
| 38 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); |
| 39 | int ret; |
| 40 | |
| 41 | ret = drm_gem_mmap_obj(obj, obj->size, vma); |
| 42 | if (ret < 0) |
| 43 | return ret; |
| 44 | |
| 45 | return etnaviv_obj->ops->mmap(etnaviv_obj, vma); |
| 46 | } |
| 47 | |
| 48 | int etnaviv_gem_prime_pin(struct drm_gem_object *obj) |
| 49 | { |
| 50 | if (!obj->import_attach) { |
| 51 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); |
| 52 | |
| 53 | mutex_lock(&etnaviv_obj->lock); |
| 54 | etnaviv_gem_get_pages(etnaviv_obj); |
| 55 | mutex_unlock(&etnaviv_obj->lock); |
| 56 | } |
| 57 | return 0; |
| 58 | } |
| 59 | |
| 60 | void etnaviv_gem_prime_unpin(struct drm_gem_object *obj) |
| 61 | { |
| 62 | if (!obj->import_attach) { |
| 63 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); |
| 64 | |
| 65 | mutex_lock(&etnaviv_obj->lock); |
| 66 | etnaviv_gem_put_pages(to_etnaviv_bo(obj)); |
| 67 | mutex_unlock(&etnaviv_obj->lock); |
| 68 | } |
| 69 | } |
| 70 | |
| 71 | static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) |
| 72 | { |
| 73 | if (etnaviv_obj->vaddr) |
| 74 | dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf, |
| 75 | etnaviv_obj->vaddr); |
| 76 | |
| 77 | /* Don't drop the pages for imported dmabuf, as they are not |
| 78 | * ours, just free the array we allocated: |
| 79 | */ |
| 80 | if (etnaviv_obj->pages) |
| 81 | kvfree(etnaviv_obj->pages); |
| 82 | |
| 83 | drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt); |
| 84 | } |
| 85 | |
| 86 | static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj) |
| 87 | { |
| 88 | struct dma_buf_map map; |
| 89 | int ret; |
| 90 | |
| 91 | lockdep_assert_held(&etnaviv_obj->lock); |
| 92 | |
| 93 | ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map); |
| 94 | if (ret) |
| 95 | return NULL; |
| 96 | return map.vaddr; |
| 97 | } |
| 98 | |
| 99 | static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj, |
| 100 | struct vm_area_struct *vma) |
| 101 | { |
| 102 | return dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0); |
| 103 | } |
| 104 | |
| 105 | static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = { |
| 106 | /* .get_pages should never be called */ |
| 107 | .release = etnaviv_gem_prime_release, |
| 108 | .vmap = etnaviv_gem_prime_vmap_impl, |
| 109 | .mmap = etnaviv_gem_prime_mmap_obj, |
| 110 | }; |
| 111 | |
| 112 | struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, |
| 113 | struct dma_buf_attachment *attach, struct sg_table *sgt) |
| 114 | { |
| 115 | struct etnaviv_gem_object *etnaviv_obj; |
| 116 | size_t size = PAGE_ALIGN(attach->dmabuf->size); |
| 117 | int ret, npages; |
| 118 | |
| 119 | ret = etnaviv_gem_new_private(dev, size, ETNA_BO_WC, |
| 120 | &etnaviv_gem_prime_ops, &etnaviv_obj); |
| 121 | if (ret < 0) |
| 122 | return ERR_PTR(ret); |
| 123 | |
| 124 | lockdep_set_class(&etnaviv_obj->lock, &etnaviv_prime_lock_class); |
| 125 | |
| 126 | npages = size / PAGE_SIZE; |
| 127 | |
| 128 | etnaviv_obj->sgt = sgt; |
| 129 | etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
| 130 | if (!etnaviv_obj->pages) { |
| 131 | ret = -ENOMEM; |
| 132 | goto fail; |
| 133 | } |
| 134 | |
| 135 | ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages, |
| 136 | NULL, npages); |
| 137 | if (ret) |
| 138 | goto fail; |
| 139 | |
| 140 | etnaviv_gem_obj_add(dev, &etnaviv_obj->base); |
| 141 | |
| 142 | return &etnaviv_obj->base; |
| 143 | |
| 144 | fail: |
| 145 | drm_gem_object_put(&etnaviv_obj->base); |
| 146 | |
| 147 | return ERR_PTR(ret); |
| 148 | } |