1 // SPDX-License-Identifier: GPL-2.0
3 * DMABUF System heap exporter
5 * Copyright (C) 2011 Google, Inc.
6 * Copyright (C) 2019, 2020 Linaro Ltd.
8 * Portions based off of Andrew Davis' SRAM heap:
9 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10 * Andrew F. Davis <afd@ti.com>
13 #include <linux/dma-buf.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dma-heap.h>
16 #include <linux/dma-resv.h>
17 #include <linux/err.h>
18 #include <linux/highmem.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
25 static struct dma_heap *sys_heap;
27 struct system_heap_buffer {
28 struct dma_heap *heap;
29 struct list_head attachments;
32 struct sg_table sg_table;
37 struct dma_heap_attachment {
39 struct sg_table *table;
40 struct list_head list;
44 #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
45 #define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN)
46 #define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
47 | __GFP_NORETRY) & ~__GFP_RECLAIM) \
49 static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP};
51 * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
52 * to match with the sizes often found in IOMMUs. Using order 4 pages instead
53 * of order 0 pages can significantly improve the performance of many IOMMUs
54 * by reducing TLB pressure and time spent updating page tables.
56 static const unsigned int orders[] = {8, 4, 0};
57 #define NUM_ORDERS ARRAY_SIZE(orders)
59 static struct sg_table *dup_sg_table(struct sg_table *table)
61 struct sg_table *new_table;
63 struct scatterlist *sg, *new_sg;
65 new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
67 return ERR_PTR(-ENOMEM);
69 ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
72 return ERR_PTR(-ENOMEM);
75 new_sg = new_table->sgl;
76 for_each_sgtable_sg(table, sg, i) {
77 sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
78 new_sg = sg_next(new_sg);
84 static int system_heap_attach(struct dma_buf *dmabuf,
85 struct dma_buf_attachment *attachment)
87 struct system_heap_buffer *buffer = dmabuf->priv;
88 struct dma_heap_attachment *a;
89 struct sg_table *table;
91 a = kzalloc(sizeof(*a), GFP_KERNEL);
95 table = dup_sg_table(&buffer->sg_table);
102 a->dev = attachment->dev;
103 INIT_LIST_HEAD(&a->list);
106 attachment->priv = a;
108 mutex_lock(&buffer->lock);
109 list_add(&a->list, &buffer->attachments);
110 mutex_unlock(&buffer->lock);
115 static void system_heap_detach(struct dma_buf *dmabuf,
116 struct dma_buf_attachment *attachment)
118 struct system_heap_buffer *buffer = dmabuf->priv;
119 struct dma_heap_attachment *a = attachment->priv;
121 mutex_lock(&buffer->lock);
123 mutex_unlock(&buffer->lock);
125 sg_free_table(a->table);
130 static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
131 enum dma_data_direction direction)
133 struct dma_heap_attachment *a = attachment->priv;
134 struct sg_table *table = a->table;
137 ret = dma_map_sgtable(attachment->dev, table, direction, 0);
145 static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
146 struct sg_table *table,
147 enum dma_data_direction direction)
149 struct dma_heap_attachment *a = attachment->priv;
152 dma_unmap_sgtable(attachment->dev, table, direction, 0);
155 static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
156 enum dma_data_direction direction)
158 struct system_heap_buffer *buffer = dmabuf->priv;
159 struct dma_heap_attachment *a;
161 mutex_lock(&buffer->lock);
163 if (buffer->vmap_cnt)
164 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
166 list_for_each_entry(a, &buffer->attachments, list) {
169 dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
171 mutex_unlock(&buffer->lock);
176 static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
177 enum dma_data_direction direction)
179 struct system_heap_buffer *buffer = dmabuf->priv;
180 struct dma_heap_attachment *a;
182 mutex_lock(&buffer->lock);
184 if (buffer->vmap_cnt)
185 flush_kernel_vmap_range(buffer->vaddr, buffer->len);
187 list_for_each_entry(a, &buffer->attachments, list) {
190 dma_sync_sgtable_for_device(a->dev, a->table, direction);
192 mutex_unlock(&buffer->lock);
197 static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
199 struct system_heap_buffer *buffer = dmabuf->priv;
200 struct sg_table *table = &buffer->sg_table;
201 unsigned long addr = vma->vm_start;
202 struct sg_page_iter piter;
205 dma_resv_assert_held(dmabuf->resv);
207 for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
208 struct page *page = sg_page_iter_page(&piter);
210 ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
215 if (addr >= vma->vm_end)
221 static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
223 struct sg_table *table = &buffer->sg_table;
224 int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
225 struct page **pages = vmalloc(sizeof(struct page *) * npages);
226 struct page **tmp = pages;
227 struct sg_page_iter piter;
231 return ERR_PTR(-ENOMEM);
233 for_each_sgtable_page(table, &piter, 0) {
234 WARN_ON(tmp - pages >= npages);
235 *tmp++ = sg_page_iter_page(&piter);
238 vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
242 return ERR_PTR(-ENOMEM);
247 static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
249 struct system_heap_buffer *buffer = dmabuf->priv;
253 mutex_lock(&buffer->lock);
254 if (buffer->vmap_cnt) {
256 iosys_map_set_vaddr(map, buffer->vaddr);
260 vaddr = system_heap_do_vmap(buffer);
262 ret = PTR_ERR(vaddr);
266 buffer->vaddr = vaddr;
268 iosys_map_set_vaddr(map, buffer->vaddr);
270 mutex_unlock(&buffer->lock);
275 static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
277 struct system_heap_buffer *buffer = dmabuf->priv;
279 mutex_lock(&buffer->lock);
280 if (!--buffer->vmap_cnt) {
281 vunmap(buffer->vaddr);
282 buffer->vaddr = NULL;
284 mutex_unlock(&buffer->lock);
285 iosys_map_clear(map);
288 static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
290 struct system_heap_buffer *buffer = dmabuf->priv;
291 struct sg_table *table;
292 struct scatterlist *sg;
295 table = &buffer->sg_table;
296 for_each_sgtable_sg(table, sg, i) {
297 struct page *page = sg_page(sg);
299 __free_pages(page, compound_order(page));
301 sg_free_table(table);
305 static const struct dma_buf_ops system_heap_buf_ops = {
306 .attach = system_heap_attach,
307 .detach = system_heap_detach,
308 .map_dma_buf = system_heap_map_dma_buf,
309 .unmap_dma_buf = system_heap_unmap_dma_buf,
310 .begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
311 .end_cpu_access = system_heap_dma_buf_end_cpu_access,
312 .mmap = system_heap_mmap,
313 .vmap = system_heap_vmap,
314 .vunmap = system_heap_vunmap,
315 .release = system_heap_dma_buf_release,
318 static struct page *alloc_largest_available(unsigned long size,
319 unsigned int max_order)
324 for (i = 0; i < NUM_ORDERS; i++) {
325 if (size < (PAGE_SIZE << orders[i]))
327 if (max_order < orders[i])
330 page = alloc_pages(order_flags[i], orders[i]);
338 static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
340 unsigned long fd_flags,
341 unsigned long heap_flags)
343 struct system_heap_buffer *buffer;
344 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
345 unsigned long size_remaining = len;
346 unsigned int max_order = orders[0];
347 struct dma_buf *dmabuf;
348 struct sg_table *table;
349 struct scatterlist *sg;
350 struct list_head pages;
351 struct page *page, *tmp_page;
352 int i, ret = -ENOMEM;
354 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
356 return ERR_PTR(-ENOMEM);
358 INIT_LIST_HEAD(&buffer->attachments);
359 mutex_init(&buffer->lock);
363 INIT_LIST_HEAD(&pages);
365 while (size_remaining > 0) {
367 * Avoid trying to allocate memory if the process
368 * has been killed by SIGKILL
370 if (fatal_signal_pending(current)) {
375 page = alloc_largest_available(size_remaining, max_order);
379 list_add_tail(&page->lru, &pages);
380 size_remaining -= page_size(page);
381 max_order = compound_order(page);
385 table = &buffer->sg_table;
386 if (sg_alloc_table(table, i, GFP_KERNEL))
390 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
391 sg_set_page(sg, page, page_size(page), 0);
393 list_del(&page->lru);
396 /* create the dmabuf */
397 exp_info.exp_name = dma_heap_get_name(heap);
398 exp_info.ops = &system_heap_buf_ops;
399 exp_info.size = buffer->len;
400 exp_info.flags = fd_flags;
401 exp_info.priv = buffer;
402 dmabuf = dma_buf_export(&exp_info);
403 if (IS_ERR(dmabuf)) {
404 ret = PTR_ERR(dmabuf);
410 for_each_sgtable_sg(table, sg, i) {
411 struct page *p = sg_page(sg);
413 __free_pages(p, compound_order(p));
415 sg_free_table(table);
417 list_for_each_entry_safe(page, tmp_page, &pages, lru)
418 __free_pages(page, compound_order(page));
424 static const struct dma_heap_ops system_heap_ops = {
425 .allocate = system_heap_allocate,
428 static int system_heap_create(void)
430 struct dma_heap_export_info exp_info;
432 exp_info.name = "system";
433 exp_info.ops = &system_heap_ops;
434 exp_info.priv = NULL;
436 sys_heap = dma_heap_add(&exp_info);
437 if (IS_ERR(sys_heap))
438 return PTR_ERR(sys_heap);
442 module_init(system_heap_create);
443 MODULE_LICENSE("GPL v2");