Merge patch series "riscv,isa fixups"
[linux-block.git] / drivers / dma-buf / heaps / system_heap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DMABUF System heap exporter
4  *
5  * Copyright (C) 2011 Google, Inc.
6  * Copyright (C) 2019, 2020 Linaro Ltd.
7  *
8  * Portions based off of Andrew Davis' SRAM heap:
9  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10  *      Andrew F. Davis <afd@ti.com>
11  */
12
13 #include <linux/dma-buf.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dma-heap.h>
16 #include <linux/dma-resv.h>
17 #include <linux/err.h>
18 #include <linux/highmem.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24
25 static struct dma_heap *sys_heap;
26
27 struct system_heap_buffer {
28         struct dma_heap *heap;
29         struct list_head attachments;
30         struct mutex lock;
31         unsigned long len;
32         struct sg_table sg_table;
33         int vmap_cnt;
34         void *vaddr;
35 };
36
37 struct dma_heap_attachment {
38         struct device *dev;
39         struct sg_table *table;
40         struct list_head list;
41         bool mapped;
42 };
43
44 #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
45 #define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN)
46 #define HIGH_ORDER_GFP  (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
47                                 | __GFP_NORETRY) & ~__GFP_RECLAIM) \
48                                 | __GFP_COMP)
49 static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP};
50 /*
51  * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
52  * to match with the sizes often found in IOMMUs. Using order 4 pages instead
53  * of order 0 pages can significantly improve the performance of many IOMMUs
54  * by reducing TLB pressure and time spent updating page tables.
55  */
56 static const unsigned int orders[] = {8, 4, 0};
57 #define NUM_ORDERS ARRAY_SIZE(orders)
58
59 static struct sg_table *dup_sg_table(struct sg_table *table)
60 {
61         struct sg_table *new_table;
62         int ret, i;
63         struct scatterlist *sg, *new_sg;
64
65         new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
66         if (!new_table)
67                 return ERR_PTR(-ENOMEM);
68
69         ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
70         if (ret) {
71                 kfree(new_table);
72                 return ERR_PTR(-ENOMEM);
73         }
74
75         new_sg = new_table->sgl;
76         for_each_sgtable_sg(table, sg, i) {
77                 sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
78                 new_sg = sg_next(new_sg);
79         }
80
81         return new_table;
82 }
83
84 static int system_heap_attach(struct dma_buf *dmabuf,
85                               struct dma_buf_attachment *attachment)
86 {
87         struct system_heap_buffer *buffer = dmabuf->priv;
88         struct dma_heap_attachment *a;
89         struct sg_table *table;
90
91         a = kzalloc(sizeof(*a), GFP_KERNEL);
92         if (!a)
93                 return -ENOMEM;
94
95         table = dup_sg_table(&buffer->sg_table);
96         if (IS_ERR(table)) {
97                 kfree(a);
98                 return -ENOMEM;
99         }
100
101         a->table = table;
102         a->dev = attachment->dev;
103         INIT_LIST_HEAD(&a->list);
104         a->mapped = false;
105
106         attachment->priv = a;
107
108         mutex_lock(&buffer->lock);
109         list_add(&a->list, &buffer->attachments);
110         mutex_unlock(&buffer->lock);
111
112         return 0;
113 }
114
115 static void system_heap_detach(struct dma_buf *dmabuf,
116                                struct dma_buf_attachment *attachment)
117 {
118         struct system_heap_buffer *buffer = dmabuf->priv;
119         struct dma_heap_attachment *a = attachment->priv;
120
121         mutex_lock(&buffer->lock);
122         list_del(&a->list);
123         mutex_unlock(&buffer->lock);
124
125         sg_free_table(a->table);
126         kfree(a->table);
127         kfree(a);
128 }
129
130 static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
131                                                 enum dma_data_direction direction)
132 {
133         struct dma_heap_attachment *a = attachment->priv;
134         struct sg_table *table = a->table;
135         int ret;
136
137         ret = dma_map_sgtable(attachment->dev, table, direction, 0);
138         if (ret)
139                 return ERR_PTR(ret);
140
141         a->mapped = true;
142         return table;
143 }
144
145 static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
146                                       struct sg_table *table,
147                                       enum dma_data_direction direction)
148 {
149         struct dma_heap_attachment *a = attachment->priv;
150
151         a->mapped = false;
152         dma_unmap_sgtable(attachment->dev, table, direction, 0);
153 }
154
155 static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
156                                                 enum dma_data_direction direction)
157 {
158         struct system_heap_buffer *buffer = dmabuf->priv;
159         struct dma_heap_attachment *a;
160
161         mutex_lock(&buffer->lock);
162
163         if (buffer->vmap_cnt)
164                 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
165
166         list_for_each_entry(a, &buffer->attachments, list) {
167                 if (!a->mapped)
168                         continue;
169                 dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
170         }
171         mutex_unlock(&buffer->lock);
172
173         return 0;
174 }
175
176 static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
177                                               enum dma_data_direction direction)
178 {
179         struct system_heap_buffer *buffer = dmabuf->priv;
180         struct dma_heap_attachment *a;
181
182         mutex_lock(&buffer->lock);
183
184         if (buffer->vmap_cnt)
185                 flush_kernel_vmap_range(buffer->vaddr, buffer->len);
186
187         list_for_each_entry(a, &buffer->attachments, list) {
188                 if (!a->mapped)
189                         continue;
190                 dma_sync_sgtable_for_device(a->dev, a->table, direction);
191         }
192         mutex_unlock(&buffer->lock);
193
194         return 0;
195 }
196
197 static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
198 {
199         struct system_heap_buffer *buffer = dmabuf->priv;
200         struct sg_table *table = &buffer->sg_table;
201         unsigned long addr = vma->vm_start;
202         struct sg_page_iter piter;
203         int ret;
204
205         dma_resv_assert_held(dmabuf->resv);
206
207         for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
208                 struct page *page = sg_page_iter_page(&piter);
209
210                 ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
211                                       vma->vm_page_prot);
212                 if (ret)
213                         return ret;
214                 addr += PAGE_SIZE;
215                 if (addr >= vma->vm_end)
216                         return 0;
217         }
218         return 0;
219 }
220
221 static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
222 {
223         struct sg_table *table = &buffer->sg_table;
224         int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
225         struct page **pages = vmalloc(sizeof(struct page *) * npages);
226         struct page **tmp = pages;
227         struct sg_page_iter piter;
228         void *vaddr;
229
230         if (!pages)
231                 return ERR_PTR(-ENOMEM);
232
233         for_each_sgtable_page(table, &piter, 0) {
234                 WARN_ON(tmp - pages >= npages);
235                 *tmp++ = sg_page_iter_page(&piter);
236         }
237
238         vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
239         vfree(pages);
240
241         if (!vaddr)
242                 return ERR_PTR(-ENOMEM);
243
244         return vaddr;
245 }
246
247 static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
248 {
249         struct system_heap_buffer *buffer = dmabuf->priv;
250         void *vaddr;
251         int ret = 0;
252
253         mutex_lock(&buffer->lock);
254         if (buffer->vmap_cnt) {
255                 buffer->vmap_cnt++;
256                 iosys_map_set_vaddr(map, buffer->vaddr);
257                 goto out;
258         }
259
260         vaddr = system_heap_do_vmap(buffer);
261         if (IS_ERR(vaddr)) {
262                 ret = PTR_ERR(vaddr);
263                 goto out;
264         }
265
266         buffer->vaddr = vaddr;
267         buffer->vmap_cnt++;
268         iosys_map_set_vaddr(map, buffer->vaddr);
269 out:
270         mutex_unlock(&buffer->lock);
271
272         return ret;
273 }
274
275 static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
276 {
277         struct system_heap_buffer *buffer = dmabuf->priv;
278
279         mutex_lock(&buffer->lock);
280         if (!--buffer->vmap_cnt) {
281                 vunmap(buffer->vaddr);
282                 buffer->vaddr = NULL;
283         }
284         mutex_unlock(&buffer->lock);
285         iosys_map_clear(map);
286 }
287
288 static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
289 {
290         struct system_heap_buffer *buffer = dmabuf->priv;
291         struct sg_table *table;
292         struct scatterlist *sg;
293         int i;
294
295         table = &buffer->sg_table;
296         for_each_sgtable_sg(table, sg, i) {
297                 struct page *page = sg_page(sg);
298
299                 __free_pages(page, compound_order(page));
300         }
301         sg_free_table(table);
302         kfree(buffer);
303 }
304
305 static const struct dma_buf_ops system_heap_buf_ops = {
306         .attach = system_heap_attach,
307         .detach = system_heap_detach,
308         .map_dma_buf = system_heap_map_dma_buf,
309         .unmap_dma_buf = system_heap_unmap_dma_buf,
310         .begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
311         .end_cpu_access = system_heap_dma_buf_end_cpu_access,
312         .mmap = system_heap_mmap,
313         .vmap = system_heap_vmap,
314         .vunmap = system_heap_vunmap,
315         .release = system_heap_dma_buf_release,
316 };
317
318 static struct page *alloc_largest_available(unsigned long size,
319                                             unsigned int max_order)
320 {
321         struct page *page;
322         int i;
323
324         for (i = 0; i < NUM_ORDERS; i++) {
325                 if (size <  (PAGE_SIZE << orders[i]))
326                         continue;
327                 if (max_order < orders[i])
328                         continue;
329
330                 page = alloc_pages(order_flags[i], orders[i]);
331                 if (!page)
332                         continue;
333                 return page;
334         }
335         return NULL;
336 }
337
338 static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
339                                             unsigned long len,
340                                             unsigned long fd_flags,
341                                             unsigned long heap_flags)
342 {
343         struct system_heap_buffer *buffer;
344         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
345         unsigned long size_remaining = len;
346         unsigned int max_order = orders[0];
347         struct dma_buf *dmabuf;
348         struct sg_table *table;
349         struct scatterlist *sg;
350         struct list_head pages;
351         struct page *page, *tmp_page;
352         int i, ret = -ENOMEM;
353
354         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
355         if (!buffer)
356                 return ERR_PTR(-ENOMEM);
357
358         INIT_LIST_HEAD(&buffer->attachments);
359         mutex_init(&buffer->lock);
360         buffer->heap = heap;
361         buffer->len = len;
362
363         INIT_LIST_HEAD(&pages);
364         i = 0;
365         while (size_remaining > 0) {
366                 /*
367                  * Avoid trying to allocate memory if the process
368                  * has been killed by SIGKILL
369                  */
370                 if (fatal_signal_pending(current)) {
371                         ret = -EINTR;
372                         goto free_buffer;
373                 }
374
375                 page = alloc_largest_available(size_remaining, max_order);
376                 if (!page)
377                         goto free_buffer;
378
379                 list_add_tail(&page->lru, &pages);
380                 size_remaining -= page_size(page);
381                 max_order = compound_order(page);
382                 i++;
383         }
384
385         table = &buffer->sg_table;
386         if (sg_alloc_table(table, i, GFP_KERNEL))
387                 goto free_buffer;
388
389         sg = table->sgl;
390         list_for_each_entry_safe(page, tmp_page, &pages, lru) {
391                 sg_set_page(sg, page, page_size(page), 0);
392                 sg = sg_next(sg);
393                 list_del(&page->lru);
394         }
395
396         /* create the dmabuf */
397         exp_info.exp_name = dma_heap_get_name(heap);
398         exp_info.ops = &system_heap_buf_ops;
399         exp_info.size = buffer->len;
400         exp_info.flags = fd_flags;
401         exp_info.priv = buffer;
402         dmabuf = dma_buf_export(&exp_info);
403         if (IS_ERR(dmabuf)) {
404                 ret = PTR_ERR(dmabuf);
405                 goto free_pages;
406         }
407         return dmabuf;
408
409 free_pages:
410         for_each_sgtable_sg(table, sg, i) {
411                 struct page *p = sg_page(sg);
412
413                 __free_pages(p, compound_order(p));
414         }
415         sg_free_table(table);
416 free_buffer:
417         list_for_each_entry_safe(page, tmp_page, &pages, lru)
418                 __free_pages(page, compound_order(page));
419         kfree(buffer);
420
421         return ERR_PTR(ret);
422 }
423
424 static const struct dma_heap_ops system_heap_ops = {
425         .allocate = system_heap_allocate,
426 };
427
428 static int system_heap_create(void)
429 {
430         struct dma_heap_export_info exp_info;
431
432         exp_info.name = "system";
433         exp_info.ops = &system_heap_ops;
434         exp_info.priv = NULL;
435
436         sys_heap = dma_heap_add(&exp_info);
437         if (IS_ERR(sys_heap))
438                 return PTR_ERR(sys_heap);
439
440         return 0;
441 }
442 module_init(system_heap_create);
443 MODULE_LICENSE("GPL v2");