drm/virtio: add iommu support.
[linux-2.6-block.git] / drivers / dma-buf / udmabuf.c
CommitLineData
fbb0de79
GH
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/init.h>
3#include <linux/module.h>
4#include <linux/device.h>
5#include <linux/kernel.h>
6#include <linux/slab.h>
7#include <linux/miscdevice.h>
8#include <linux/dma-buf.h>
9#include <linux/highmem.h>
10#include <linux/cred.h>
11#include <linux/shmem_fs.h>
12#include <linux/memfd.h>
13
14#include <uapi/linux/udmabuf.h>
15
16struct udmabuf {
17 u32 pagecount;
18 struct page **pages;
19};
20
21static int udmabuf_vm_fault(struct vm_fault *vmf)
22{
23 struct vm_area_struct *vma = vmf->vma;
24 struct udmabuf *ubuf = vma->vm_private_data;
25
26 if (WARN_ON(vmf->pgoff >= ubuf->pagecount))
27 return VM_FAULT_SIGBUS;
28
29 vmf->page = ubuf->pages[vmf->pgoff];
30 get_page(vmf->page);
31 return 0;
32}
33
34static const struct vm_operations_struct udmabuf_vm_ops = {
35 .fault = udmabuf_vm_fault,
36};
37
38static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
39{
40 struct udmabuf *ubuf = buf->priv;
41
42 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
43 return -EINVAL;
44
45 vma->vm_ops = &udmabuf_vm_ops;
46 vma->vm_private_data = ubuf;
47 return 0;
48}
49
50static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
51 enum dma_data_direction direction)
52{
53 struct udmabuf *ubuf = at->dmabuf->priv;
54 struct sg_table *sg;
55
56 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
57 if (!sg)
58 goto err1;
59 if (sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
60 0, ubuf->pagecount << PAGE_SHIFT,
61 GFP_KERNEL) < 0)
62 goto err2;
63 if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction))
64 goto err3;
65
66 return sg;
67
68err3:
69 sg_free_table(sg);
70err2:
71 kfree(sg);
72err1:
73 return ERR_PTR(-ENOMEM);
74}
75
76static void unmap_udmabuf(struct dma_buf_attachment *at,
77 struct sg_table *sg,
78 enum dma_data_direction direction)
79{
80 sg_free_table(sg);
81 kfree(sg);
82}
83
84static void release_udmabuf(struct dma_buf *buf)
85{
86 struct udmabuf *ubuf = buf->priv;
87 pgoff_t pg;
88
89 for (pg = 0; pg < ubuf->pagecount; pg++)
90 put_page(ubuf->pages[pg]);
91 kfree(ubuf->pages);
92 kfree(ubuf);
93}
94
95static void *kmap_udmabuf(struct dma_buf *buf, unsigned long page_num)
96{
97 struct udmabuf *ubuf = buf->priv;
98 struct page *page = ubuf->pages[page_num];
99
100 return kmap(page);
101}
102
103static void kunmap_udmabuf(struct dma_buf *buf, unsigned long page_num,
104 void *vaddr)
105{
106 kunmap(vaddr);
107}
108
109static struct dma_buf_ops udmabuf_ops = {
110 .map_dma_buf = map_udmabuf,
111 .unmap_dma_buf = unmap_udmabuf,
112 .release = release_udmabuf,
113 .map = kmap_udmabuf,
114 .unmap = kunmap_udmabuf,
115 .mmap = mmap_udmabuf,
116};
117
118#define SEALS_WANTED (F_SEAL_SHRINK)
119#define SEALS_DENIED (F_SEAL_WRITE)
120
121static long udmabuf_create(struct udmabuf_create_list *head,
122 struct udmabuf_create_item *list)
123{
124 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
125 struct file *memfd = NULL;
126 struct udmabuf *ubuf;
127 struct dma_buf *buf;
128 pgoff_t pgoff, pgcnt, pgidx, pgbuf;
129 struct page *page;
130 int seals, ret = -EINVAL;
131 u32 i, flags;
132
133 ubuf = kzalloc(sizeof(struct udmabuf), GFP_KERNEL);
134 if (!ubuf)
135 return -ENOMEM;
136
137 for (i = 0; i < head->count; i++) {
138 if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
139 goto err_free_ubuf;
140 if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
141 goto err_free_ubuf;
142 ubuf->pagecount += list[i].size >> PAGE_SHIFT;
143 }
144 ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(struct page *),
145 GFP_KERNEL);
146 if (!ubuf->pages) {
147 ret = -ENOMEM;
148 goto err_free_ubuf;
149 }
150
151 pgbuf = 0;
152 for (i = 0; i < head->count; i++) {
153 memfd = fget(list[i].memfd);
154 if (!memfd)
155 goto err_put_pages;
156 if (!shmem_mapping(file_inode(memfd)->i_mapping))
157 goto err_put_pages;
158 seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
159 if (seals == -EINVAL ||
160 (seals & SEALS_WANTED) != SEALS_WANTED ||
161 (seals & SEALS_DENIED) != 0)
162 goto err_put_pages;
163 pgoff = list[i].offset >> PAGE_SHIFT;
164 pgcnt = list[i].size >> PAGE_SHIFT;
165 for (pgidx = 0; pgidx < pgcnt; pgidx++) {
166 page = shmem_read_mapping_page(
167 file_inode(memfd)->i_mapping, pgoff + pgidx);
168 if (IS_ERR(page)) {
169 ret = PTR_ERR(page);
170 goto err_put_pages;
171 }
172 ubuf->pages[pgbuf++] = page;
173 }
174 fput(memfd);
175 }
176 memfd = NULL;
177
178 exp_info.ops = &udmabuf_ops;
179 exp_info.size = ubuf->pagecount << PAGE_SHIFT;
180 exp_info.priv = ubuf;
181
182 buf = dma_buf_export(&exp_info);
183 if (IS_ERR(buf)) {
184 ret = PTR_ERR(buf);
185 goto err_put_pages;
186 }
187
188 flags = 0;
189 if (head->flags & UDMABUF_FLAGS_CLOEXEC)
190 flags |= O_CLOEXEC;
191 return dma_buf_fd(buf, flags);
192
193err_put_pages:
194 while (pgbuf > 0)
195 put_page(ubuf->pages[--pgbuf]);
196err_free_ubuf:
197 fput(memfd);
198 kfree(ubuf->pages);
199 kfree(ubuf);
200 return ret;
201}
202
203static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
204{
205 struct udmabuf_create create;
206 struct udmabuf_create_list head;
207 struct udmabuf_create_item list;
208
209 if (copy_from_user(&create, (void __user *)arg,
210 sizeof(struct udmabuf_create)))
211 return -EFAULT;
212
213 head.flags = create.flags;
214 head.count = 1;
215 list.memfd = create.memfd;
216 list.offset = create.offset;
217 list.size = create.size;
218
219 return udmabuf_create(&head, &list);
220}
221
222static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
223{
224 struct udmabuf_create_list head;
225 struct udmabuf_create_item *list;
226 int ret = -EINVAL;
227 u32 lsize;
228
229 if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
230 return -EFAULT;
231 if (head.count > 1024)
232 return -EINVAL;
233 lsize = sizeof(struct udmabuf_create_item) * head.count;
234 list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
235 if (IS_ERR(list))
236 return PTR_ERR(list);
237
238 ret = udmabuf_create(&head, list);
239 kfree(list);
240 return ret;
241}
242
243static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
244 unsigned long arg)
245{
246 long ret;
247
248 switch (ioctl) {
249 case UDMABUF_CREATE:
250 ret = udmabuf_ioctl_create(filp, arg);
251 break;
252 case UDMABUF_CREATE_LIST:
253 ret = udmabuf_ioctl_create_list(filp, arg);
254 break;
255 default:
256 ret = -EINVAL;
257 break;
258 }
259 return ret;
260}
261
262static const struct file_operations udmabuf_fops = {
263 .owner = THIS_MODULE,
264 .unlocked_ioctl = udmabuf_ioctl,
265};
266
267static struct miscdevice udmabuf_misc = {
268 .minor = MISC_DYNAMIC_MINOR,
269 .name = "udmabuf",
270 .fops = &udmabuf_fops,
271};
272
273static int __init udmabuf_dev_init(void)
274{
275 return misc_register(&udmabuf_misc);
276}
277
278static void __exit udmabuf_dev_exit(void)
279{
280 misc_deregister(&udmabuf_misc);
281}
282
283module_init(udmabuf_dev_init)
284module_exit(udmabuf_dev_exit)
285
286MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
287MODULE_LICENSE("GPL v2");