Merge tag 'unmap-fix-20230629' of git://git.infradead.org/users/dwmw2/linux
[linux-block.git] / drivers / dma-buf / udmabuf.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cred.h>
3 #include <linux/device.h>
4 #include <linux/dma-buf.h>
5 #include <linux/dma-resv.h>
6 #include <linux/highmem.h>
7 #include <linux/init.h>
8 #include <linux/kernel.h>
9 #include <linux/memfd.h>
10 #include <linux/miscdevice.h>
11 #include <linux/module.h>
12 #include <linux/shmem_fs.h>
13 #include <linux/slab.h>
14 #include <linux/udmabuf.h>
15 #include <linux/vmalloc.h>
16 #include <linux/iosys-map.h>
17
18 static int list_limit = 1024;
19 module_param(list_limit, int, 0644);
20 MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024.");
21
22 static int size_limit_mb = 64;
23 module_param(size_limit_mb, int, 0644);
24 MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64.");
25
26 struct udmabuf {
27         pgoff_t pagecount;
28         struct page **pages;
29         struct sg_table *sg;
30         struct miscdevice *device;
31 };
32
33 static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
34 {
35         struct vm_area_struct *vma = vmf->vma;
36         struct udmabuf *ubuf = vma->vm_private_data;
37         pgoff_t pgoff = vmf->pgoff;
38
39         if (pgoff >= ubuf->pagecount)
40                 return VM_FAULT_SIGBUS;
41         vmf->page = ubuf->pages[pgoff];
42         get_page(vmf->page);
43         return 0;
44 }
45
46 static const struct vm_operations_struct udmabuf_vm_ops = {
47         .fault = udmabuf_vm_fault,
48 };
49
50 static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
51 {
52         struct udmabuf *ubuf = buf->priv;
53
54         dma_resv_assert_held(buf->resv);
55
56         if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
57                 return -EINVAL;
58
59         vma->vm_ops = &udmabuf_vm_ops;
60         vma->vm_private_data = ubuf;
61         return 0;
62 }
63
64 static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
65 {
66         struct udmabuf *ubuf = buf->priv;
67         void *vaddr;
68
69         dma_resv_assert_held(buf->resv);
70
71         vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
72         if (!vaddr)
73                 return -EINVAL;
74
75         iosys_map_set_vaddr(map, vaddr);
76         return 0;
77 }
78
79 static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
80 {
81         struct udmabuf *ubuf = buf->priv;
82
83         dma_resv_assert_held(buf->resv);
84
85         vm_unmap_ram(map->vaddr, ubuf->pagecount);
86 }
87
88 static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
89                                      enum dma_data_direction direction)
90 {
91         struct udmabuf *ubuf = buf->priv;
92         struct sg_table *sg;
93         int ret;
94
95         sg = kzalloc(sizeof(*sg), GFP_KERNEL);
96         if (!sg)
97                 return ERR_PTR(-ENOMEM);
98         ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
99                                         0, ubuf->pagecount << PAGE_SHIFT,
100                                         GFP_KERNEL);
101         if (ret < 0)
102                 goto err;
103         ret = dma_map_sgtable(dev, sg, direction, 0);
104         if (ret < 0)
105                 goto err;
106         return sg;
107
108 err:
109         sg_free_table(sg);
110         kfree(sg);
111         return ERR_PTR(ret);
112 }
113
114 static void put_sg_table(struct device *dev, struct sg_table *sg,
115                          enum dma_data_direction direction)
116 {
117         dma_unmap_sgtable(dev, sg, direction, 0);
118         sg_free_table(sg);
119         kfree(sg);
120 }
121
122 static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
123                                     enum dma_data_direction direction)
124 {
125         return get_sg_table(at->dev, at->dmabuf, direction);
126 }
127
128 static void unmap_udmabuf(struct dma_buf_attachment *at,
129                           struct sg_table *sg,
130                           enum dma_data_direction direction)
131 {
132         return put_sg_table(at->dev, sg, direction);
133 }
134
135 static void release_udmabuf(struct dma_buf *buf)
136 {
137         struct udmabuf *ubuf = buf->priv;
138         struct device *dev = ubuf->device->this_device;
139         pgoff_t pg;
140
141         if (ubuf->sg)
142                 put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
143
144         for (pg = 0; pg < ubuf->pagecount; pg++)
145                 put_page(ubuf->pages[pg]);
146         kfree(ubuf->pages);
147         kfree(ubuf);
148 }
149
150 static int begin_cpu_udmabuf(struct dma_buf *buf,
151                              enum dma_data_direction direction)
152 {
153         struct udmabuf *ubuf = buf->priv;
154         struct device *dev = ubuf->device->this_device;
155         int ret = 0;
156
157         if (!ubuf->sg) {
158                 ubuf->sg = get_sg_table(dev, buf, direction);
159                 if (IS_ERR(ubuf->sg)) {
160                         ret = PTR_ERR(ubuf->sg);
161                         ubuf->sg = NULL;
162                 }
163         } else {
164                 dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
165                                     direction);
166         }
167
168         return ret;
169 }
170
171 static int end_cpu_udmabuf(struct dma_buf *buf,
172                            enum dma_data_direction direction)
173 {
174         struct udmabuf *ubuf = buf->priv;
175         struct device *dev = ubuf->device->this_device;
176
177         if (!ubuf->sg)
178                 return -EINVAL;
179
180         dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
181         return 0;
182 }
183
184 static const struct dma_buf_ops udmabuf_ops = {
185         .cache_sgt_mapping = true,
186         .map_dma_buf       = map_udmabuf,
187         .unmap_dma_buf     = unmap_udmabuf,
188         .release           = release_udmabuf,
189         .mmap              = mmap_udmabuf,
190         .vmap              = vmap_udmabuf,
191         .vunmap            = vunmap_udmabuf,
192         .begin_cpu_access  = begin_cpu_udmabuf,
193         .end_cpu_access    = end_cpu_udmabuf,
194 };
195
196 #define SEALS_WANTED (F_SEAL_SHRINK)
197 #define SEALS_DENIED (F_SEAL_WRITE)
198
199 static long udmabuf_create(struct miscdevice *device,
200                            struct udmabuf_create_list *head,
201                            struct udmabuf_create_item *list)
202 {
203         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
204         struct file *memfd = NULL;
205         struct address_space *mapping = NULL;
206         struct udmabuf *ubuf;
207         struct dma_buf *buf;
208         pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
209         struct page *page;
210         int seals, ret = -EINVAL;
211         u32 i, flags;
212
213         ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
214         if (!ubuf)
215                 return -ENOMEM;
216
217         pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
218         for (i = 0; i < head->count; i++) {
219                 if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
220                         goto err;
221                 if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
222                         goto err;
223                 ubuf->pagecount += list[i].size >> PAGE_SHIFT;
224                 if (ubuf->pagecount > pglimit)
225                         goto err;
226         }
227
228         if (!ubuf->pagecount)
229                 goto err;
230
231         ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
232                                     GFP_KERNEL);
233         if (!ubuf->pages) {
234                 ret = -ENOMEM;
235                 goto err;
236         }
237
238         pgbuf = 0;
239         for (i = 0; i < head->count; i++) {
240                 ret = -EBADFD;
241                 memfd = fget(list[i].memfd);
242                 if (!memfd)
243                         goto err;
244                 mapping = memfd->f_mapping;
245                 if (!shmem_mapping(mapping))
246                         goto err;
247                 seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
248                 if (seals == -EINVAL)
249                         goto err;
250                 ret = -EINVAL;
251                 if ((seals & SEALS_WANTED) != SEALS_WANTED ||
252                     (seals & SEALS_DENIED) != 0)
253                         goto err;
254                 pgoff = list[i].offset >> PAGE_SHIFT;
255                 pgcnt = list[i].size   >> PAGE_SHIFT;
256                 for (pgidx = 0; pgidx < pgcnt; pgidx++) {
257                         page = shmem_read_mapping_page(mapping, pgoff + pgidx);
258                         if (IS_ERR(page)) {
259                                 ret = PTR_ERR(page);
260                                 goto err;
261                         }
262                         ubuf->pages[pgbuf++] = page;
263                 }
264                 fput(memfd);
265                 memfd = NULL;
266         }
267
268         exp_info.ops  = &udmabuf_ops;
269         exp_info.size = ubuf->pagecount << PAGE_SHIFT;
270         exp_info.priv = ubuf;
271         exp_info.flags = O_RDWR;
272
273         ubuf->device = device;
274         buf = dma_buf_export(&exp_info);
275         if (IS_ERR(buf)) {
276                 ret = PTR_ERR(buf);
277                 goto err;
278         }
279
280         flags = 0;
281         if (head->flags & UDMABUF_FLAGS_CLOEXEC)
282                 flags |= O_CLOEXEC;
283         return dma_buf_fd(buf, flags);
284
285 err:
286         while (pgbuf > 0)
287                 put_page(ubuf->pages[--pgbuf]);
288         if (memfd)
289                 fput(memfd);
290         kfree(ubuf->pages);
291         kfree(ubuf);
292         return ret;
293 }
294
295 static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
296 {
297         struct udmabuf_create create;
298         struct udmabuf_create_list head;
299         struct udmabuf_create_item list;
300
301         if (copy_from_user(&create, (void __user *)arg,
302                            sizeof(create)))
303                 return -EFAULT;
304
305         head.flags  = create.flags;
306         head.count  = 1;
307         list.memfd  = create.memfd;
308         list.offset = create.offset;
309         list.size   = create.size;
310
311         return udmabuf_create(filp->private_data, &head, &list);
312 }
313
314 static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
315 {
316         struct udmabuf_create_list head;
317         struct udmabuf_create_item *list;
318         int ret = -EINVAL;
319         u32 lsize;
320
321         if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
322                 return -EFAULT;
323         if (head.count > list_limit)
324                 return -EINVAL;
325         lsize = sizeof(struct udmabuf_create_item) * head.count;
326         list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
327         if (IS_ERR(list))
328                 return PTR_ERR(list);
329
330         ret = udmabuf_create(filp->private_data, &head, list);
331         kfree(list);
332         return ret;
333 }
334
335 static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
336                           unsigned long arg)
337 {
338         long ret;
339
340         switch (ioctl) {
341         case UDMABUF_CREATE:
342                 ret = udmabuf_ioctl_create(filp, arg);
343                 break;
344         case UDMABUF_CREATE_LIST:
345                 ret = udmabuf_ioctl_create_list(filp, arg);
346                 break;
347         default:
348                 ret = -ENOTTY;
349                 break;
350         }
351         return ret;
352 }
353
354 static const struct file_operations udmabuf_fops = {
355         .owner          = THIS_MODULE,
356         .unlocked_ioctl = udmabuf_ioctl,
357 #ifdef CONFIG_COMPAT
358         .compat_ioctl   = udmabuf_ioctl,
359 #endif
360 };
361
362 static struct miscdevice udmabuf_misc = {
363         .minor          = MISC_DYNAMIC_MINOR,
364         .name           = "udmabuf",
365         .fops           = &udmabuf_fops,
366 };
367
368 static int __init udmabuf_dev_init(void)
369 {
370         int ret;
371
372         ret = misc_register(&udmabuf_misc);
373         if (ret < 0) {
374                 pr_err("Could not initialize udmabuf device\n");
375                 return ret;
376         }
377
378         ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
379                                            DMA_BIT_MASK(64));
380         if (ret < 0) {
381                 pr_err("Could not setup DMA mask for udmabuf device\n");
382                 misc_deregister(&udmabuf_misc);
383                 return ret;
384         }
385
386         return 0;
387 }
388
389 static void __exit udmabuf_dev_exit(void)
390 {
391         misc_deregister(&udmabuf_misc);
392 }
393
394 module_init(udmabuf_dev_init)
395 module_exit(udmabuf_dev_exit)
396
397 MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");