Merge tag 'mm-stable-2023-04-27-15-30' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / drivers / dma-buf / heaps / system_heap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DMABUF System heap exporter
4  *
5  * Copyright (C) 2011 Google, Inc.
6  * Copyright (C) 2019, 2020 Linaro Ltd.
7  *
8  * Portions based off of Andrew Davis' SRAM heap:
9  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10  *      Andrew F. Davis <afd@ti.com>
11  */
12
13 #include <linux/dma-buf.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dma-heap.h>
16 #include <linux/dma-resv.h>
17 #include <linux/err.h>
18 #include <linux/highmem.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24
25 static struct dma_heap *sys_heap;
26
27 struct system_heap_buffer {
28         struct dma_heap *heap;
29         struct list_head attachments;
30         struct mutex lock;
31         unsigned long len;
32         struct sg_table sg_table;
33         int vmap_cnt;
34         void *vaddr;
35 };
36
37 struct dma_heap_attachment {
38         struct device *dev;
39         struct sg_table *table;
40         struct list_head list;
41         bool mapped;
42 };
43
44 #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO)
45 #define HIGH_ORDER_GFP  (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
46                                 | __GFP_NORETRY) & ~__GFP_RECLAIM) \
47                                 | __GFP_COMP)
48 static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
49 /*
50  * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
51  * to match with the sizes often found in IOMMUs. Using order 4 pages instead
52  * of order 0 pages can significantly improve the performance of many IOMMUs
53  * by reducing TLB pressure and time spent updating page tables.
54  */
55 static const unsigned int orders[] = {8, 4, 0};
56 #define NUM_ORDERS ARRAY_SIZE(orders)
57
58 static struct sg_table *dup_sg_table(struct sg_table *table)
59 {
60         struct sg_table *new_table;
61         int ret, i;
62         struct scatterlist *sg, *new_sg;
63
64         new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
65         if (!new_table)
66                 return ERR_PTR(-ENOMEM);
67
68         ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
69         if (ret) {
70                 kfree(new_table);
71                 return ERR_PTR(-ENOMEM);
72         }
73
74         new_sg = new_table->sgl;
75         for_each_sgtable_sg(table, sg, i) {
76                 sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
77                 new_sg = sg_next(new_sg);
78         }
79
80         return new_table;
81 }
82
83 static int system_heap_attach(struct dma_buf *dmabuf,
84                               struct dma_buf_attachment *attachment)
85 {
86         struct system_heap_buffer *buffer = dmabuf->priv;
87         struct dma_heap_attachment *a;
88         struct sg_table *table;
89
90         a = kzalloc(sizeof(*a), GFP_KERNEL);
91         if (!a)
92                 return -ENOMEM;
93
94         table = dup_sg_table(&buffer->sg_table);
95         if (IS_ERR(table)) {
96                 kfree(a);
97                 return -ENOMEM;
98         }
99
100         a->table = table;
101         a->dev = attachment->dev;
102         INIT_LIST_HEAD(&a->list);
103         a->mapped = false;
104
105         attachment->priv = a;
106
107         mutex_lock(&buffer->lock);
108         list_add(&a->list, &buffer->attachments);
109         mutex_unlock(&buffer->lock);
110
111         return 0;
112 }
113
114 static void system_heap_detach(struct dma_buf *dmabuf,
115                                struct dma_buf_attachment *attachment)
116 {
117         struct system_heap_buffer *buffer = dmabuf->priv;
118         struct dma_heap_attachment *a = attachment->priv;
119
120         mutex_lock(&buffer->lock);
121         list_del(&a->list);
122         mutex_unlock(&buffer->lock);
123
124         sg_free_table(a->table);
125         kfree(a->table);
126         kfree(a);
127 }
128
129 static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
130                                                 enum dma_data_direction direction)
131 {
132         struct dma_heap_attachment *a = attachment->priv;
133         struct sg_table *table = a->table;
134         int ret;
135
136         ret = dma_map_sgtable(attachment->dev, table, direction, 0);
137         if (ret)
138                 return ERR_PTR(ret);
139
140         a->mapped = true;
141         return table;
142 }
143
144 static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
145                                       struct sg_table *table,
146                                       enum dma_data_direction direction)
147 {
148         struct dma_heap_attachment *a = attachment->priv;
149
150         a->mapped = false;
151         dma_unmap_sgtable(attachment->dev, table, direction, 0);
152 }
153
154 static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
155                                                 enum dma_data_direction direction)
156 {
157         struct system_heap_buffer *buffer = dmabuf->priv;
158         struct dma_heap_attachment *a;
159
160         mutex_lock(&buffer->lock);
161
162         if (buffer->vmap_cnt)
163                 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
164
165         list_for_each_entry(a, &buffer->attachments, list) {
166                 if (!a->mapped)
167                         continue;
168                 dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
169         }
170         mutex_unlock(&buffer->lock);
171
172         return 0;
173 }
174
175 static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
176                                               enum dma_data_direction direction)
177 {
178         struct system_heap_buffer *buffer = dmabuf->priv;
179         struct dma_heap_attachment *a;
180
181         mutex_lock(&buffer->lock);
182
183         if (buffer->vmap_cnt)
184                 flush_kernel_vmap_range(buffer->vaddr, buffer->len);
185
186         list_for_each_entry(a, &buffer->attachments, list) {
187                 if (!a->mapped)
188                         continue;
189                 dma_sync_sgtable_for_device(a->dev, a->table, direction);
190         }
191         mutex_unlock(&buffer->lock);
192
193         return 0;
194 }
195
196 static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
197 {
198         struct system_heap_buffer *buffer = dmabuf->priv;
199         struct sg_table *table = &buffer->sg_table;
200         unsigned long addr = vma->vm_start;
201         struct sg_page_iter piter;
202         int ret;
203
204         dma_resv_assert_held(dmabuf->resv);
205
206         for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
207                 struct page *page = sg_page_iter_page(&piter);
208
209                 ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
210                                       vma->vm_page_prot);
211                 if (ret)
212                         return ret;
213                 addr += PAGE_SIZE;
214                 if (addr >= vma->vm_end)
215                         return 0;
216         }
217         return 0;
218 }
219
220 static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
221 {
222         struct sg_table *table = &buffer->sg_table;
223         int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
224         struct page **pages = vmalloc(sizeof(struct page *) * npages);
225         struct page **tmp = pages;
226         struct sg_page_iter piter;
227         void *vaddr;
228
229         if (!pages)
230                 return ERR_PTR(-ENOMEM);
231
232         for_each_sgtable_page(table, &piter, 0) {
233                 WARN_ON(tmp - pages >= npages);
234                 *tmp++ = sg_page_iter_page(&piter);
235         }
236
237         vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
238         vfree(pages);
239
240         if (!vaddr)
241                 return ERR_PTR(-ENOMEM);
242
243         return vaddr;
244 }
245
246 static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
247 {
248         struct system_heap_buffer *buffer = dmabuf->priv;
249         void *vaddr;
250         int ret = 0;
251
252         mutex_lock(&buffer->lock);
253         if (buffer->vmap_cnt) {
254                 buffer->vmap_cnt++;
255                 iosys_map_set_vaddr(map, buffer->vaddr);
256                 goto out;
257         }
258
259         vaddr = system_heap_do_vmap(buffer);
260         if (IS_ERR(vaddr)) {
261                 ret = PTR_ERR(vaddr);
262                 goto out;
263         }
264
265         buffer->vaddr = vaddr;
266         buffer->vmap_cnt++;
267         iosys_map_set_vaddr(map, buffer->vaddr);
268 out:
269         mutex_unlock(&buffer->lock);
270
271         return ret;
272 }
273
274 static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
275 {
276         struct system_heap_buffer *buffer = dmabuf->priv;
277
278         mutex_lock(&buffer->lock);
279         if (!--buffer->vmap_cnt) {
280                 vunmap(buffer->vaddr);
281                 buffer->vaddr = NULL;
282         }
283         mutex_unlock(&buffer->lock);
284         iosys_map_clear(map);
285 }
286
287 static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
288 {
289         struct system_heap_buffer *buffer = dmabuf->priv;
290         struct sg_table *table;
291         struct scatterlist *sg;
292         int i;
293
294         table = &buffer->sg_table;
295         for_each_sgtable_sg(table, sg, i) {
296                 struct page *page = sg_page(sg);
297
298                 __free_pages(page, compound_order(page));
299         }
300         sg_free_table(table);
301         kfree(buffer);
302 }
303
304 static const struct dma_buf_ops system_heap_buf_ops = {
305         .attach = system_heap_attach,
306         .detach = system_heap_detach,
307         .map_dma_buf = system_heap_map_dma_buf,
308         .unmap_dma_buf = system_heap_unmap_dma_buf,
309         .begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
310         .end_cpu_access = system_heap_dma_buf_end_cpu_access,
311         .mmap = system_heap_mmap,
312         .vmap = system_heap_vmap,
313         .vunmap = system_heap_vunmap,
314         .release = system_heap_dma_buf_release,
315 };
316
317 static struct page *alloc_largest_available(unsigned long size,
318                                             unsigned int max_order)
319 {
320         struct page *page;
321         int i;
322
323         for (i = 0; i < NUM_ORDERS; i++) {
324                 if (size <  (PAGE_SIZE << orders[i]))
325                         continue;
326                 if (max_order < orders[i])
327                         continue;
328
329                 page = alloc_pages(order_flags[i], orders[i]);
330                 if (!page)
331                         continue;
332                 return page;
333         }
334         return NULL;
335 }
336
337 static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
338                                             unsigned long len,
339                                             unsigned long fd_flags,
340                                             unsigned long heap_flags)
341 {
342         struct system_heap_buffer *buffer;
343         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
344         unsigned long size_remaining = len;
345         unsigned int max_order = orders[0];
346         struct dma_buf *dmabuf;
347         struct sg_table *table;
348         struct scatterlist *sg;
349         struct list_head pages;
350         struct page *page, *tmp_page;
351         int i, ret = -ENOMEM;
352
353         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
354         if (!buffer)
355                 return ERR_PTR(-ENOMEM);
356
357         INIT_LIST_HEAD(&buffer->attachments);
358         mutex_init(&buffer->lock);
359         buffer->heap = heap;
360         buffer->len = len;
361
362         INIT_LIST_HEAD(&pages);
363         i = 0;
364         while (size_remaining > 0) {
365                 /*
366                  * Avoid trying to allocate memory if the process
367                  * has been killed by SIGKILL
368                  */
369                 if (fatal_signal_pending(current)) {
370                         ret = -EINTR;
371                         goto free_buffer;
372                 }
373
374                 page = alloc_largest_available(size_remaining, max_order);
375                 if (!page)
376                         goto free_buffer;
377
378                 list_add_tail(&page->lru, &pages);
379                 size_remaining -= page_size(page);
380                 max_order = compound_order(page);
381                 i++;
382         }
383
384         table = &buffer->sg_table;
385         if (sg_alloc_table(table, i, GFP_KERNEL))
386                 goto free_buffer;
387
388         sg = table->sgl;
389         list_for_each_entry_safe(page, tmp_page, &pages, lru) {
390                 sg_set_page(sg, page, page_size(page), 0);
391                 sg = sg_next(sg);
392                 list_del(&page->lru);
393         }
394
395         /* create the dmabuf */
396         exp_info.exp_name = dma_heap_get_name(heap);
397         exp_info.ops = &system_heap_buf_ops;
398         exp_info.size = buffer->len;
399         exp_info.flags = fd_flags;
400         exp_info.priv = buffer;
401         dmabuf = dma_buf_export(&exp_info);
402         if (IS_ERR(dmabuf)) {
403                 ret = PTR_ERR(dmabuf);
404                 goto free_pages;
405         }
406         return dmabuf;
407
408 free_pages:
409         for_each_sgtable_sg(table, sg, i) {
410                 struct page *p = sg_page(sg);
411
412                 __free_pages(p, compound_order(p));
413         }
414         sg_free_table(table);
415 free_buffer:
416         list_for_each_entry_safe(page, tmp_page, &pages, lru)
417                 __free_pages(page, compound_order(page));
418         kfree(buffer);
419
420         return ERR_PTR(ret);
421 }
422
423 static const struct dma_heap_ops system_heap_ops = {
424         .allocate = system_heap_allocate,
425 };
426
427 static int system_heap_create(void)
428 {
429         struct dma_heap_export_info exp_info;
430
431         exp_info.name = "system";
432         exp_info.ops = &system_heap_ops;
433         exp_info.priv = NULL;
434
435         sys_heap = dma_heap_add(&exp_info);
436         if (IS_ERR(sys_heap))
437                 return PTR_ERR(sys_heap);
438
439         return 0;
440 }
441 module_init(system_heap_create);