Merge tag 'for-linus-2021-01-24' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / dma-buf / heaps / cma_heap.c
CommitLineData
b61614ec
JS
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DMABUF CMA heap exporter
4 *
a5d2d29e 5 * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
b61614ec 6 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
a5d2d29e
JS
7 *
8 * Also utilizing parts of Andrew Davis' SRAM heap:
9 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10 * Andrew F. Davis <afd@ti.com>
b61614ec 11 */
b61614ec 12#include <linux/cma.h>
b61614ec
JS
13#include <linux/dma-buf.h>
14#include <linux/dma-heap.h>
0b1abd1f 15#include <linux/dma-map-ops.h>
b61614ec 16#include <linux/err.h>
b61614ec 17#include <linux/highmem.h>
a5d2d29e
JS
18#include <linux/io.h>
19#include <linux/mm.h>
b61614ec 20#include <linux/module.h>
b61614ec 21#include <linux/scatterlist.h>
a5d2d29e 22#include <linux/slab.h>
8075c300 23#include <linux/vmalloc.h>
b61614ec 24
b61614ec
JS
25
26struct cma_heap {
27 struct dma_heap *heap;
28 struct cma *cma;
29};
30
a5d2d29e
JS
31struct cma_heap_buffer {
32 struct cma_heap *heap;
33 struct list_head attachments;
34 struct mutex lock;
35 unsigned long len;
36 struct page *cma_pages;
37 struct page **pages;
38 pgoff_t pagecount;
39 int vmap_cnt;
40 void *vaddr;
41};
42
43struct dma_heap_attachment {
44 struct device *dev;
45 struct sg_table table;
46 struct list_head list;
4c68e499 47 bool mapped;
a5d2d29e
JS
48};
49
50static int cma_heap_attach(struct dma_buf *dmabuf,
51 struct dma_buf_attachment *attachment)
b61614ec 52{
a5d2d29e
JS
53 struct cma_heap_buffer *buffer = dmabuf->priv;
54 struct dma_heap_attachment *a;
55 int ret;
b61614ec 56
a5d2d29e
JS
57 a = kzalloc(sizeof(*a), GFP_KERNEL);
58 if (!a)
59 return -ENOMEM;
60
61 ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
62 buffer->pagecount, 0,
63 buffer->pagecount << PAGE_SHIFT,
64 GFP_KERNEL);
65 if (ret) {
66 kfree(a);
67 return ret;
68 }
69
70 a->dev = attachment->dev;
71 INIT_LIST_HEAD(&a->list);
4c68e499 72 a->mapped = false;
a5d2d29e
JS
73
74 attachment->priv = a;
75
76 mutex_lock(&buffer->lock);
77 list_add(&a->list, &buffer->attachments);
78 mutex_unlock(&buffer->lock);
79
80 return 0;
81}
82
83static void cma_heap_detach(struct dma_buf *dmabuf,
84 struct dma_buf_attachment *attachment)
85{
86 struct cma_heap_buffer *buffer = dmabuf->priv;
87 struct dma_heap_attachment *a = attachment->priv;
88
89 mutex_lock(&buffer->lock);
90 list_del(&a->list);
91 mutex_unlock(&buffer->lock);
92
93 sg_free_table(&a->table);
94 kfree(a);
95}
96
97static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
98 enum dma_data_direction direction)
99{
100 struct dma_heap_attachment *a = attachment->priv;
101 struct sg_table *table = &a->table;
102 int ret;
103
104 ret = dma_map_sgtable(attachment->dev, table, direction, 0);
105 if (ret)
106 return ERR_PTR(-ENOMEM);
4c68e499 107 a->mapped = true;
a5d2d29e
JS
108 return table;
109}
110
111static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
112 struct sg_table *table,
113 enum dma_data_direction direction)
114{
4c68e499
JS
115 struct dma_heap_attachment *a = attachment->priv;
116
117 a->mapped = false;
a5d2d29e
JS
118 dma_unmap_sgtable(attachment->dev, table, direction, 0);
119}
120
121static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
122 enum dma_data_direction direction)
123{
124 struct cma_heap_buffer *buffer = dmabuf->priv;
125 struct dma_heap_attachment *a;
126
127 if (buffer->vmap_cnt)
128 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
129
130 mutex_lock(&buffer->lock);
131 list_for_each_entry(a, &buffer->attachments, list) {
4c68e499
JS
132 if (!a->mapped)
133 continue;
a5d2d29e
JS
134 dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
135 }
136 mutex_unlock(&buffer->lock);
137
138 return 0;
139}
140
141static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
142 enum dma_data_direction direction)
143{
144 struct cma_heap_buffer *buffer = dmabuf->priv;
145 struct dma_heap_attachment *a;
146
147 if (buffer->vmap_cnt)
148 flush_kernel_vmap_range(buffer->vaddr, buffer->len);
149
150 mutex_lock(&buffer->lock);
151 list_for_each_entry(a, &buffer->attachments, list) {
4c68e499
JS
152 if (!a->mapped)
153 continue;
a5d2d29e
JS
154 dma_sync_sgtable_for_device(a->dev, &a->table, direction);
155 }
156 mutex_unlock(&buffer->lock);
157
158 return 0;
159}
160
161static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
162{
163 struct vm_area_struct *vma = vmf->vma;
164 struct cma_heap_buffer *buffer = vma->vm_private_data;
165
166 if (vmf->pgoff > buffer->pagecount)
167 return VM_FAULT_SIGBUS;
168
169 vmf->page = buffer->pages[vmf->pgoff];
170 get_page(vmf->page);
171
172 return 0;
173}
174
175static const struct vm_operations_struct dma_heap_vm_ops = {
176 .fault = cma_heap_vm_fault,
177};
178
179static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
180{
181 struct cma_heap_buffer *buffer = dmabuf->priv;
182
183 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
184 return -EINVAL;
185
186 vma->vm_ops = &dma_heap_vm_ops;
187 vma->vm_private_data = buffer;
188
189 return 0;
190}
191
192static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
193{
194 void *vaddr;
195
196 vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
197 if (!vaddr)
198 return ERR_PTR(-ENOMEM);
199
200 return vaddr;
201}
202
203static int cma_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
204{
205 struct cma_heap_buffer *buffer = dmabuf->priv;
206 void *vaddr;
207 int ret = 0;
208
209 mutex_lock(&buffer->lock);
210 if (buffer->vmap_cnt) {
211 buffer->vmap_cnt++;
212 dma_buf_map_set_vaddr(map, buffer->vaddr);
213 goto out;
214 }
215
216 vaddr = cma_heap_do_vmap(buffer);
217 if (IS_ERR(vaddr)) {
218 ret = PTR_ERR(vaddr);
219 goto out;
220 }
221 buffer->vaddr = vaddr;
222 buffer->vmap_cnt++;
223 dma_buf_map_set_vaddr(map, buffer->vaddr);
224out:
225 mutex_unlock(&buffer->lock);
226
227 return ret;
228}
229
230static void cma_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
231{
232 struct cma_heap_buffer *buffer = dmabuf->priv;
233
234 mutex_lock(&buffer->lock);
235 if (!--buffer->vmap_cnt) {
236 vunmap(buffer->vaddr);
237 buffer->vaddr = NULL;
238 }
239 mutex_unlock(&buffer->lock);
240 dma_buf_map_clear(map);
241}
242
243static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
244{
245 struct cma_heap_buffer *buffer = dmabuf->priv;
246 struct cma_heap *cma_heap = buffer->heap;
247
248 if (buffer->vmap_cnt > 0) {
249 WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
250 vunmap(buffer->vaddr);
251 buffer->vaddr = NULL;
252 }
253
a0adc8ea
JS
254 /* free page list */
255 kfree(buffer->pages);
256 /* release memory */
a5d2d29e 257 cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
b61614ec
JS
258 kfree(buffer);
259}
260
a5d2d29e
JS
261static const struct dma_buf_ops cma_heap_buf_ops = {
262 .attach = cma_heap_attach,
263 .detach = cma_heap_detach,
264 .map_dma_buf = cma_heap_map_dma_buf,
265 .unmap_dma_buf = cma_heap_unmap_dma_buf,
266 .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
267 .end_cpu_access = cma_heap_dma_buf_end_cpu_access,
268 .mmap = cma_heap_mmap,
269 .vmap = cma_heap_vmap,
270 .vunmap = cma_heap_vunmap,
271 .release = cma_heap_dma_buf_release,
272};
273
b61614ec 274static int cma_heap_allocate(struct dma_heap *heap,
a5d2d29e
JS
275 unsigned long len,
276 unsigned long fd_flags,
277 unsigned long heap_flags)
b61614ec
JS
278{
279 struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
a5d2d29e
JS
280 struct cma_heap_buffer *buffer;
281 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
b61614ec 282 size_t size = PAGE_ALIGN(len);
a5d2d29e 283 pgoff_t pagecount = size >> PAGE_SHIFT;
b61614ec 284 unsigned long align = get_order(size);
a5d2d29e 285 struct page *cma_pages;
b61614ec
JS
286 struct dma_buf *dmabuf;
287 int ret = -ENOMEM;
288 pgoff_t pg;
289
a5d2d29e
JS
290 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
291 if (!buffer)
b61614ec
JS
292 return -ENOMEM;
293
a5d2d29e
JS
294 INIT_LIST_HEAD(&buffer->attachments);
295 mutex_init(&buffer->lock);
296 buffer->len = size;
297
298 if (align > CONFIG_CMA_ALIGNMENT)
299 align = CONFIG_CMA_ALIGNMENT;
b61614ec 300
a5d2d29e 301 cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
b61614ec 302 if (!cma_pages)
a5d2d29e 303 goto free_buffer;
b61614ec 304
a5d2d29e 305 /* Clear the cma pages */
b61614ec 306 if (PageHighMem(cma_pages)) {
a5d2d29e 307 unsigned long nr_clear_pages = pagecount;
b61614ec
JS
308 struct page *page = cma_pages;
309
310 while (nr_clear_pages > 0) {
311 void *vaddr = kmap_atomic(page);
312
313 memset(vaddr, 0, PAGE_SIZE);
314 kunmap_atomic(vaddr);
315 /*
316 * Avoid wasting time zeroing memory if the process
317 * has been killed by by SIGKILL
318 */
319 if (fatal_signal_pending(current))
320 goto free_cma;
b61614ec
JS
321 page++;
322 nr_clear_pages--;
323 }
324 } else {
325 memset(page_address(cma_pages), 0, size);
326 }
327
a5d2d29e
JS
328 buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
329 if (!buffer->pages) {
b61614ec
JS
330 ret = -ENOMEM;
331 goto free_cma;
332 }
333
a5d2d29e
JS
334 for (pg = 0; pg < pagecount; pg++)
335 buffer->pages[pg] = &cma_pages[pg];
336
337 buffer->cma_pages = cma_pages;
338 buffer->heap = cma_heap;
339 buffer->pagecount = pagecount;
b61614ec
JS
340
341 /* create the dmabuf */
a5d2d29e
JS
342 exp_info.ops = &cma_heap_buf_ops;
343 exp_info.size = buffer->len;
344 exp_info.flags = fd_flags;
345 exp_info.priv = buffer;
346 dmabuf = dma_buf_export(&exp_info);
b61614ec
JS
347 if (IS_ERR(dmabuf)) {
348 ret = PTR_ERR(dmabuf);
349 goto free_pages;
350 }
351
b61614ec
JS
352 ret = dma_buf_fd(dmabuf, fd_flags);
353 if (ret < 0) {
354 dma_buf_put(dmabuf);
355 /* just return, as put will call release and that will free */
356 return ret;
357 }
358
359 return ret;
360
361free_pages:
a5d2d29e 362 kfree(buffer->pages);
b61614ec 363free_cma:
a5d2d29e
JS
364 cma_release(cma_heap->cma, cma_pages, pagecount);
365free_buffer:
366 kfree(buffer);
367
b61614ec
JS
368 return ret;
369}
370
371static const struct dma_heap_ops cma_heap_ops = {
372 .allocate = cma_heap_allocate,
373};
374
375static int __add_cma_heap(struct cma *cma, void *data)
376{
377 struct cma_heap *cma_heap;
378 struct dma_heap_export_info exp_info;
379
380 cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
381 if (!cma_heap)
382 return -ENOMEM;
383 cma_heap->cma = cma;
384
385 exp_info.name = cma_get_name(cma);
386 exp_info.ops = &cma_heap_ops;
387 exp_info.priv = cma_heap;
388
389 cma_heap->heap = dma_heap_add(&exp_info);
390 if (IS_ERR(cma_heap->heap)) {
391 int ret = PTR_ERR(cma_heap->heap);
392
393 kfree(cma_heap);
394 return ret;
395 }
396
397 return 0;
398}
399
400static int add_default_cma_heap(void)
401{
402 struct cma *default_cma = dev_get_cma_area(NULL);
403 int ret = 0;
404
405 if (default_cma)
406 ret = __add_cma_heap(default_cma, NULL);
407
408 return ret;
409}
410module_init(add_default_cma_heap);
411MODULE_DESCRIPTION("DMA-BUF CMA Heap");
412MODULE_LICENSE("GPL v2");