Merge tag 'mm-stable-2023-04-27-15-30' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / drivers / dma-buf / heaps / system_heap.c
CommitLineData
efa04fef
JS
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DMABUF System heap exporter
4 *
5 * Copyright (C) 2011 Google, Inc.
38129575
JS
6 * Copyright (C) 2019, 2020 Linaro Ltd.
7 *
8 * Portions based off of Andrew Davis' SRAM heap:
9 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10 * Andrew F. Davis <afd@ti.com>
efa04fef
JS
11 */
12
13#include <linux/dma-buf.h>
14#include <linux/dma-mapping.h>
15#include <linux/dma-heap.h>
27f3733a 16#include <linux/dma-resv.h>
efa04fef
JS
17#include <linux/err.h>
18#include <linux/highmem.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/scatterlist.h>
22#include <linux/slab.h>
38129575
JS
23#include <linux/vmalloc.h>
24
25static struct dma_heap *sys_heap;
efa04fef 26
38129575
JS
27struct system_heap_buffer {
28 struct dma_heap *heap;
29 struct list_head attachments;
30 struct mutex lock;
31 unsigned long len;
32 struct sg_table sg_table;
33 int vmap_cnt;
34 void *vaddr;
35};
efa04fef 36
38129575
JS
37struct dma_heap_attachment {
38 struct device *dev;
39 struct sg_table *table;
40 struct list_head list;
4c68e499 41 bool mapped;
38129575 42};
efa04fef 43
3ccefdea 44#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO)
d963ab0f
JS
45#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
46 | __GFP_NORETRY) & ~__GFP_RECLAIM) \
47 | __GFP_COMP)
3ccefdea 48static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
d963ab0f
JS
49/*
50 * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
51 * to match with the sizes often found in IOMMUs. Using order 4 pages instead
52 * of order 0 pages can significantly improve the performance of many IOMMUs
53 * by reducing TLB pressure and time spent updating page tables.
54 */
55static const unsigned int orders[] = {8, 4, 0};
56#define NUM_ORDERS ARRAY_SIZE(orders)
57
38129575 58static struct sg_table *dup_sg_table(struct sg_table *table)
efa04fef 59{
38129575
JS
60 struct sg_table *new_table;
61 int ret, i;
62 struct scatterlist *sg, *new_sg;
63
64 new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
65 if (!new_table)
66 return ERR_PTR(-ENOMEM);
67
68 ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
69 if (ret) {
70 kfree(new_table);
71 return ERR_PTR(-ENOMEM);
72 }
73
74 new_sg = new_table->sgl;
75 for_each_sgtable_sg(table, sg, i) {
76 sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
77 new_sg = sg_next(new_sg);
78 }
79
80 return new_table;
81}
82
83static int system_heap_attach(struct dma_buf *dmabuf,
84 struct dma_buf_attachment *attachment)
85{
86 struct system_heap_buffer *buffer = dmabuf->priv;
87 struct dma_heap_attachment *a;
88 struct sg_table *table;
89
90 a = kzalloc(sizeof(*a), GFP_KERNEL);
91 if (!a)
92 return -ENOMEM;
93
94 table = dup_sg_table(&buffer->sg_table);
95 if (IS_ERR(table)) {
96 kfree(a);
97 return -ENOMEM;
98 }
99
100 a->table = table;
101 a->dev = attachment->dev;
102 INIT_LIST_HEAD(&a->list);
4c68e499 103 a->mapped = false;
38129575
JS
104
105 attachment->priv = a;
106
107 mutex_lock(&buffer->lock);
108 list_add(&a->list, &buffer->attachments);
109 mutex_unlock(&buffer->lock);
110
111 return 0;
112}
113
114static void system_heap_detach(struct dma_buf *dmabuf,
115 struct dma_buf_attachment *attachment)
116{
117 struct system_heap_buffer *buffer = dmabuf->priv;
118 struct dma_heap_attachment *a = attachment->priv;
119
120 mutex_lock(&buffer->lock);
121 list_del(&a->list);
122 mutex_unlock(&buffer->lock);
123
124 sg_free_table(a->table);
125 kfree(a->table);
126 kfree(a);
127}
128
129static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
130 enum dma_data_direction direction)
131{
132 struct dma_heap_attachment *a = attachment->priv;
133 struct sg_table *table = a->table;
134 int ret;
135
136 ret = dma_map_sgtable(attachment->dev, table, direction, 0);
137 if (ret)
138 return ERR_PTR(ret);
139
4c68e499 140 a->mapped = true;
38129575
JS
141 return table;
142}
143
144static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
145 struct sg_table *table,
146 enum dma_data_direction direction)
147{
4c68e499
JS
148 struct dma_heap_attachment *a = attachment->priv;
149
150 a->mapped = false;
38129575
JS
151 dma_unmap_sgtable(attachment->dev, table, direction, 0);
152}
153
154static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
155 enum dma_data_direction direction)
156{
157 struct system_heap_buffer *buffer = dmabuf->priv;
158 struct dma_heap_attachment *a;
159
160 mutex_lock(&buffer->lock);
161
162 if (buffer->vmap_cnt)
163 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
164
165 list_for_each_entry(a, &buffer->attachments, list) {
4c68e499
JS
166 if (!a->mapped)
167 continue;
38129575
JS
168 dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
169 }
170 mutex_unlock(&buffer->lock);
171
172 return 0;
173}
174
175static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
176 enum dma_data_direction direction)
177{
178 struct system_heap_buffer *buffer = dmabuf->priv;
179 struct dma_heap_attachment *a;
180
181 mutex_lock(&buffer->lock);
efa04fef 182
38129575
JS
183 if (buffer->vmap_cnt)
184 flush_kernel_vmap_range(buffer->vaddr, buffer->len);
185
186 list_for_each_entry(a, &buffer->attachments, list) {
4c68e499
JS
187 if (!a->mapped)
188 continue;
38129575
JS
189 dma_sync_sgtable_for_device(a->dev, a->table, direction);
190 }
191 mutex_unlock(&buffer->lock);
192
193 return 0;
194}
195
196static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
197{
198 struct system_heap_buffer *buffer = dmabuf->priv;
199 struct sg_table *table = &buffer->sg_table;
200 unsigned long addr = vma->vm_start;
201 struct sg_page_iter piter;
202 int ret;
203
27f3733a
DO
204 dma_resv_assert_held(dmabuf->resv);
205
38129575
JS
206 for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
207 struct page *page = sg_page_iter_page(&piter);
208
209 ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
210 vma->vm_page_prot);
211 if (ret)
212 return ret;
213 addr += PAGE_SIZE;
214 if (addr >= vma->vm_end)
215 return 0;
216 }
217 return 0;
218}
219
220static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
221{
222 struct sg_table *table = &buffer->sg_table;
223 int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
224 struct page **pages = vmalloc(sizeof(struct page *) * npages);
225 struct page **tmp = pages;
226 struct sg_page_iter piter;
227 void *vaddr;
228
229 if (!pages)
230 return ERR_PTR(-ENOMEM);
231
232 for_each_sgtable_page(table, &piter, 0) {
233 WARN_ON(tmp - pages >= npages);
234 *tmp++ = sg_page_iter_page(&piter);
235 }
236
237 vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
238 vfree(pages);
239
240 if (!vaddr)
241 return ERR_PTR(-ENOMEM);
242
243 return vaddr;
244}
245
7938f421 246static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
38129575
JS
247{
248 struct system_heap_buffer *buffer = dmabuf->priv;
249 void *vaddr;
250 int ret = 0;
251
252 mutex_lock(&buffer->lock);
253 if (buffer->vmap_cnt) {
254 buffer->vmap_cnt++;
7938f421 255 iosys_map_set_vaddr(map, buffer->vaddr);
38129575
JS
256 goto out;
257 }
258
259 vaddr = system_heap_do_vmap(buffer);
260 if (IS_ERR(vaddr)) {
261 ret = PTR_ERR(vaddr);
262 goto out;
263 }
264
265 buffer->vaddr = vaddr;
266 buffer->vmap_cnt++;
7938f421 267 iosys_map_set_vaddr(map, buffer->vaddr);
38129575
JS
268out:
269 mutex_unlock(&buffer->lock);
270
271 return ret;
272}
273
7938f421 274static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
38129575
JS
275{
276 struct system_heap_buffer *buffer = dmabuf->priv;
277
278 mutex_lock(&buffer->lock);
279 if (!--buffer->vmap_cnt) {
280 vunmap(buffer->vaddr);
281 buffer->vaddr = NULL;
282 }
283 mutex_unlock(&buffer->lock);
7938f421 284 iosys_map_clear(map);
38129575
JS
285}
286
287static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
288{
289 struct system_heap_buffer *buffer = dmabuf->priv;
290 struct sg_table *table;
291 struct scatterlist *sg;
292 int i;
293
294 table = &buffer->sg_table;
679d94cd 295 for_each_sgtable_sg(table, sg, i) {
d963ab0f
JS
296 struct page *page = sg_page(sg);
297
298 __free_pages(page, compound_order(page));
299 }
38129575 300 sg_free_table(table);
efa04fef
JS
301 kfree(buffer);
302}
303
38129575
JS
304static const struct dma_buf_ops system_heap_buf_ops = {
305 .attach = system_heap_attach,
306 .detach = system_heap_detach,
307 .map_dma_buf = system_heap_map_dma_buf,
308 .unmap_dma_buf = system_heap_unmap_dma_buf,
309 .begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
310 .end_cpu_access = system_heap_dma_buf_end_cpu_access,
311 .mmap = system_heap_mmap,
312 .vmap = system_heap_vmap,
313 .vunmap = system_heap_vunmap,
314 .release = system_heap_dma_buf_release,
315};
316
d963ab0f
JS
317static struct page *alloc_largest_available(unsigned long size,
318 unsigned int max_order)
319{
320 struct page *page;
321 int i;
322
323 for (i = 0; i < NUM_ORDERS; i++) {
324 if (size < (PAGE_SIZE << orders[i]))
325 continue;
326 if (max_order < orders[i])
327 continue;
328
329 page = alloc_pages(order_flags[i], orders[i]);
330 if (!page)
331 continue;
332 return page;
333 }
334 return NULL;
335}
336
c7f59e3d
JS
337static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
338 unsigned long len,
339 unsigned long fd_flags,
340 unsigned long heap_flags)
efa04fef 341{
38129575
JS
342 struct system_heap_buffer *buffer;
343 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
d963ab0f
JS
344 unsigned long size_remaining = len;
345 unsigned int max_order = orders[0];
efa04fef 346 struct dma_buf *dmabuf;
38129575
JS
347 struct sg_table *table;
348 struct scatterlist *sg;
d963ab0f
JS
349 struct list_head pages;
350 struct page *page, *tmp_page;
38129575 351 int i, ret = -ENOMEM;
efa04fef 352
38129575
JS
353 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
354 if (!buffer)
c7f59e3d 355 return ERR_PTR(-ENOMEM);
efa04fef 356
38129575
JS
357 INIT_LIST_HEAD(&buffer->attachments);
358 mutex_init(&buffer->lock);
359 buffer->heap = heap;
360 buffer->len = len;
361
d963ab0f
JS
362 INIT_LIST_HEAD(&pages);
363 i = 0;
364 while (size_remaining > 0) {
efa04fef
JS
365 /*
366 * Avoid trying to allocate memory if the process
38129575 367 * has been killed by SIGKILL
efa04fef 368 */
14a11725
JS
369 if (fatal_signal_pending(current)) {
370 ret = -EINTR;
d963ab0f 371 goto free_buffer;
14a11725 372 }
d963ab0f
JS
373
374 page = alloc_largest_available(size_remaining, max_order);
38129575 375 if (!page)
d963ab0f
JS
376 goto free_buffer;
377
378 list_add_tail(&page->lru, &pages);
379 size_remaining -= page_size(page);
380 max_order = compound_order(page);
381 i++;
382 }
383
384 table = &buffer->sg_table;
385 if (sg_alloc_table(table, i, GFP_KERNEL))
386 goto free_buffer;
387
388 sg = table->sgl;
389 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
38129575
JS
390 sg_set_page(sg, page, page_size(page), 0);
391 sg = sg_next(sg);
d963ab0f 392 list_del(&page->lru);
efa04fef
JS
393 }
394
395 /* create the dmabuf */
2eebbdba 396 exp_info.exp_name = dma_heap_get_name(heap);
38129575
JS
397 exp_info.ops = &system_heap_buf_ops;
398 exp_info.size = buffer->len;
399 exp_info.flags = fd_flags;
400 exp_info.priv = buffer;
401 dmabuf = dma_buf_export(&exp_info);
efa04fef
JS
402 if (IS_ERR(dmabuf)) {
403 ret = PTR_ERR(dmabuf);
38129575 404 goto free_pages;
efa04fef 405 }
c7f59e3d 406 return dmabuf;
efa04fef 407
38129575 408free_pages:
d963ab0f
JS
409 for_each_sgtable_sg(table, sg, i) {
410 struct page *p = sg_page(sg);
411
412 __free_pages(p, compound_order(p));
413 }
38129575
JS
414 sg_free_table(table);
415free_buffer:
d963ab0f
JS
416 list_for_each_entry_safe(page, tmp_page, &pages, lru)
417 __free_pages(page, compound_order(page));
38129575 418 kfree(buffer);
efa04fef 419
c7f59e3d 420 return ERR_PTR(ret);
efa04fef
JS
421}
422
423static const struct dma_heap_ops system_heap_ops = {
424 .allocate = system_heap_allocate,
425};
426
427static int system_heap_create(void)
428{
429 struct dma_heap_export_info exp_info;
efa04fef 430
263e38f8 431 exp_info.name = "system";
efa04fef
JS
432 exp_info.ops = &system_heap_ops;
433 exp_info.priv = NULL;
434
435 sys_heap = dma_heap_add(&exp_info);
436 if (IS_ERR(sys_heap))
38129575 437 return PTR_ERR(sys_heap);
efa04fef 438
38129575 439 return 0;
efa04fef
JS
440}
441module_init(system_heap_create);