Commit | Line | Data |
---|---|---|
1a59d1b8 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 | 2 | /* |
c1017a4c | 3 | * Copyright (c) by Jaroslav Kysela <perex@perex.cz> |
1da177e4 LT |
4 | * Takashi Iwai <tiwai@suse.de> |
5 | * | |
6 | * Generic memory allocators | |
1da177e4 LT |
7 | */ |
8 | ||
1da177e4 LT |
9 | #include <linux/slab.h> |
10 | #include <linux/mm.h> | |
11 | #include <linux/dma-mapping.h> | |
9736a325 | 12 | #include <linux/dma-map-ops.h> |
05503214 | 13 | #include <linux/genalloc.h> |
a25684a9 | 14 | #include <linux/highmem.h> |
1fe7f397 | 15 | #include <linux/vmalloc.h> |
42e748a0 TI |
16 | #ifdef CONFIG_X86 |
17 | #include <asm/set_memory.h> | |
18 | #endif | |
1da177e4 | 19 | #include <sound/memalloc.h> |
37af81c5 | 20 | #include "memalloc_local.h" |
1da177e4 | 21 | |
dd164fbf TI |
22 | #define DEFAULT_GFP \ |
23 | (GFP_KERNEL | \ | |
a61c7d88 | 24 | __GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \ |
dd164fbf TI |
25 | __GFP_NOWARN) /* no stack trace print - this call is non-critical */ |
26 | ||
37af81c5 | 27 | static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab); |
1da177e4 | 28 | |
a8d302a0 | 29 | #ifdef CONFIG_SND_DMA_SGBUF |
a8d302a0 TI |
30 | static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size); |
31 | #endif | |
32 | ||
723c1252 | 33 | static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size) |
05503214 | 34 | { |
37af81c5 | 35 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); |
05503214 | 36 | |
37af81c5 | 37 | if (WARN_ON_ONCE(!ops || !ops->alloc)) |
723c1252 | 38 | return NULL; |
37af81c5 | 39 | return ops->alloc(dmab, size); |
08422d2c | 40 | } |
1da177e4 LT |
41 | |
42 | /** | |
a25684a9 TI |
43 | * snd_dma_alloc_dir_pages - allocate the buffer area according to the given |
44 | * type and direction | |
1da177e4 LT |
45 | * @type: the DMA buffer type |
46 | * @device: the device pointer | |
a25684a9 | 47 | * @dir: DMA direction |
1da177e4 LT |
48 | * @size: the buffer size to allocate |
49 | * @dmab: buffer allocation record to store the allocated data | |
50 | * | |
51 | * Calls the memory-allocator function for the corresponding | |
52 | * buffer type. | |
eb7c06e8 YB |
53 | * |
54 | * Return: Zero if the buffer with the given size is allocated successfully, | |
55 | * otherwise a negative value on error. | |
1da177e4 | 56 | */ |
a25684a9 TI |
57 | int snd_dma_alloc_dir_pages(int type, struct device *device, |
58 | enum dma_data_direction dir, size_t size, | |
59 | struct snd_dma_buffer *dmab) | |
1da177e4 | 60 | { |
7eaa943c TI |
61 | if (WARN_ON(!size)) |
62 | return -ENXIO; | |
63 | if (WARN_ON(!dmab)) | |
64 | return -ENXIO; | |
1da177e4 | 65 | |
5c1733e3 | 66 | size = PAGE_ALIGN(size); |
1da177e4 LT |
67 | dmab->dev.type = type; |
68 | dmab->dev.dev = device; | |
a25684a9 | 69 | dmab->dev.dir = dir; |
1da177e4 | 70 | dmab->bytes = 0; |
28e60dbb TI |
71 | dmab->addr = 0; |
72 | dmab->private_data = NULL; | |
723c1252 | 73 | dmab->area = __snd_dma_alloc_pages(dmab, size); |
37af81c5 | 74 | if (!dmab->area) |
1da177e4 LT |
75 | return -ENOMEM; |
76 | dmab->bytes = size; | |
77 | return 0; | |
78 | } | |
a25684a9 | 79 | EXPORT_SYMBOL(snd_dma_alloc_dir_pages); |
1da177e4 LT |
80 | |
81 | /** | |
82 | * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback | |
83 | * @type: the DMA buffer type | |
84 | * @device: the device pointer | |
85 | * @size: the buffer size to allocate | |
86 | * @dmab: buffer allocation record to store the allocated data | |
87 | * | |
88 | * Calls the memory-allocator function for the corresponding | |
89 | * buffer type. When no space is left, this function reduces the size and | |
90 | * tries to allocate again. The size actually allocated is stored in | |
91 | * res_size argument. | |
eb7c06e8 YB |
92 | * |
93 | * Return: Zero if the buffer with the given size is allocated successfully, | |
94 | * otherwise a negative value on error. | |
1da177e4 LT |
95 | */ |
96 | int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, | |
97 | struct snd_dma_buffer *dmab) | |
98 | { | |
99 | int err; | |
100 | ||
1da177e4 LT |
101 | while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { |
102 | if (err != -ENOMEM) | |
103 | return err; | |
1da177e4 LT |
104 | if (size <= PAGE_SIZE) |
105 | return -ENOMEM; | |
dfef01e1 TI |
106 | size >>= 1; |
107 | size = PAGE_SIZE << get_order(size); | |
1da177e4 LT |
108 | } |
109 | if (! dmab->area) | |
110 | return -ENOMEM; | |
111 | return 0; | |
112 | } | |
35f80014 | 113 | EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); |
1da177e4 | 114 | |
1da177e4 LT |
115 | /** |
116 | * snd_dma_free_pages - release the allocated buffer | |
117 | * @dmab: the buffer allocation record to release | |
118 | * | |
119 | * Releases the allocated buffer via snd_dma_alloc_pages(). | |
120 | */ | |
121 | void snd_dma_free_pages(struct snd_dma_buffer *dmab) | |
122 | { | |
37af81c5 TI |
123 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); |
124 | ||
125 | if (ops && ops->free) | |
126 | ops->free(dmab); | |
127 | } | |
128 | EXPORT_SYMBOL(snd_dma_free_pages); | |
129 | ||
427ae268 TI |
130 | /* called by devres */ |
131 | static void __snd_release_pages(struct device *dev, void *res) | |
132 | { | |
133 | snd_dma_free_pages(res); | |
134 | } | |
135 | ||
136 | /** | |
a25684a9 | 137 | * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres |
427ae268 TI |
138 | * @dev: the device pointer |
139 | * @type: the DMA buffer type | |
a25684a9 | 140 | * @dir: DMA direction |
427ae268 TI |
141 | * @size: the buffer size to allocate |
142 | * | |
143 | * Allocate buffer pages depending on the given type and manage using devres. | |
144 | * The pages will be released automatically at the device removal. | |
145 | * | |
146 | * Unlike snd_dma_alloc_pages(), this function requires the real device pointer, | |
147 | * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or | |
148 | * SNDRV_DMA_TYPE_VMALLOC type. | |
149 | * | |
6eba99d4 | 150 | * Return: the snd_dma_buffer object at success, or NULL if failed |
427ae268 TI |
151 | */ |
152 | struct snd_dma_buffer * | |
a25684a9 TI |
153 | snd_devm_alloc_dir_pages(struct device *dev, int type, |
154 | enum dma_data_direction dir, size_t size) | |
427ae268 TI |
155 | { |
156 | struct snd_dma_buffer *dmab; | |
157 | int err; | |
158 | ||
159 | if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS || | |
160 | type == SNDRV_DMA_TYPE_VMALLOC)) | |
161 | return NULL; | |
162 | ||
163 | dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL); | |
164 | if (!dmab) | |
165 | return NULL; | |
166 | ||
a25684a9 | 167 | err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab); |
427ae268 TI |
168 | if (err < 0) { |
169 | devres_free(dmab); | |
170 | return NULL; | |
171 | } | |
172 | ||
173 | devres_add(dev, dmab); | |
174 | return dmab; | |
175 | } | |
a25684a9 | 176 | EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages); |
427ae268 | 177 | |
a202bd1a TI |
178 | /** |
179 | * snd_dma_buffer_mmap - perform mmap of the given DMA buffer | |
180 | * @dmab: buffer allocation information | |
181 | * @area: VM area information | |
6eba99d4 TI |
182 | * |
183 | * Return: zero if successful, or a negative error code | |
a202bd1a TI |
184 | */ |
185 | int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab, | |
186 | struct vm_area_struct *area) | |
187 | { | |
8e537d5d | 188 | const struct snd_malloc_ops *ops; |
a202bd1a | 189 | |
8e537d5d TI |
190 | if (!dmab) |
191 | return -ENOENT; | |
192 | ops = snd_dma_get_ops(dmab); | |
a202bd1a TI |
193 | if (ops && ops->mmap) |
194 | return ops->mmap(dmab, area); | |
195 | else | |
196 | return -ENOENT; | |
197 | } | |
198 | EXPORT_SYMBOL(snd_dma_buffer_mmap); | |
199 | ||
a25684a9 TI |
200 | #ifdef CONFIG_HAS_DMA |
201 | /** | |
202 | * snd_dma_buffer_sync - sync DMA buffer between CPU and device | |
203 | * @dmab: buffer allocation information | |
f917c04f | 204 | * @mode: sync mode |
a25684a9 TI |
205 | */ |
206 | void snd_dma_buffer_sync(struct snd_dma_buffer *dmab, | |
207 | enum snd_dma_sync_mode mode) | |
208 | { | |
209 | const struct snd_malloc_ops *ops; | |
210 | ||
211 | if (!dmab || !dmab->dev.need_sync) | |
212 | return; | |
213 | ops = snd_dma_get_ops(dmab); | |
214 | if (ops && ops->sync) | |
215 | ops->sync(dmab, mode); | |
216 | } | |
217 | EXPORT_SYMBOL_GPL(snd_dma_buffer_sync); | |
218 | #endif /* CONFIG_HAS_DMA */ | |
219 | ||
37af81c5 TI |
220 | /** |
221 | * snd_sgbuf_get_addr - return the physical address at the corresponding offset | |
222 | * @dmab: buffer allocation information | |
223 | * @offset: offset in the ring buffer | |
6eba99d4 TI |
224 | * |
225 | * Return: the physical address | |
37af81c5 TI |
226 | */ |
227 | dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset) | |
228 | { | |
229 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); | |
230 | ||
231 | if (ops && ops->get_addr) | |
232 | return ops->get_addr(dmab, offset); | |
233 | else | |
234 | return dmab->addr + offset; | |
235 | } | |
236 | EXPORT_SYMBOL(snd_sgbuf_get_addr); | |
237 | ||
238 | /** | |
239 | * snd_sgbuf_get_page - return the physical page at the corresponding offset | |
240 | * @dmab: buffer allocation information | |
241 | * @offset: offset in the ring buffer | |
6eba99d4 TI |
242 | * |
243 | * Return: the page pointer | |
37af81c5 TI |
244 | */ |
245 | struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset) | |
246 | { | |
247 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); | |
248 | ||
249 | if (ops && ops->get_page) | |
250 | return ops->get_page(dmab, offset); | |
251 | else | |
252 | return virt_to_page(dmab->area + offset); | |
253 | } | |
254 | EXPORT_SYMBOL(snd_sgbuf_get_page); | |
255 | ||
256 | /** | |
257 | * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages | |
258 | * on sg-buffer | |
259 | * @dmab: buffer allocation information | |
260 | * @ofs: offset in the ring buffer | |
261 | * @size: the requested size | |
6eba99d4 TI |
262 | * |
263 | * Return: the chunk size | |
37af81c5 TI |
264 | */ |
265 | unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, | |
266 | unsigned int ofs, unsigned int size) | |
267 | { | |
268 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); | |
269 | ||
270 | if (ops && ops->get_chunk_size) | |
271 | return ops->get_chunk_size(dmab, ofs, size); | |
272 | else | |
273 | return size; | |
274 | } | |
275 | EXPORT_SYMBOL(snd_sgbuf_get_chunk_size); | |
276 | ||
277 | /* | |
278 | * Continuous pages allocator | |
279 | */ | |
dd164fbf TI |
280 | static void *do_alloc_pages(struct device *dev, size_t size, dma_addr_t *addr, |
281 | bool wc) | |
37af81c5 | 282 | { |
dd164fbf TI |
283 | void *p; |
284 | gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; | |
37af81c5 | 285 | |
dd164fbf TI |
286 | again: |
287 | p = alloc_pages_exact(size, gfp); | |
288 | if (!p) | |
289 | return NULL; | |
290 | *addr = page_to_phys(virt_to_page(p)); | |
291 | if (!dev) | |
292 | return p; | |
293 | if ((*addr + size - 1) & ~dev->coherent_dma_mask) { | |
294 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) { | |
295 | gfp |= GFP_DMA32; | |
296 | goto again; | |
297 | } | |
298 | if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { | |
299 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; | |
300 | goto again; | |
301 | } | |
302 | } | |
303 | #ifdef CONFIG_X86 | |
304 | if (wc) | |
305 | set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT); | |
306 | #endif | |
f84ba106 | 307 | return p; |
37af81c5 TI |
308 | } |
309 | ||
dd164fbf TI |
310 | static void do_free_pages(void *p, size_t size, bool wc) |
311 | { | |
312 | #ifdef CONFIG_X86 | |
313 | if (wc) | |
314 | set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT); | |
315 | #endif | |
316 | free_pages_exact(p, size); | |
317 | } | |
318 | ||
319 | ||
a8d302a0 TI |
320 | static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size) |
321 | { | |
dd164fbf | 322 | return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, false); |
a8d302a0 TI |
323 | } |
324 | ||
37af81c5 TI |
325 | static void snd_dma_continuous_free(struct snd_dma_buffer *dmab) |
326 | { | |
dd164fbf | 327 | do_free_pages(dmab->area, dmab->bytes, false); |
37af81c5 TI |
328 | } |
329 | ||
30b7ba69 TI |
330 | static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab, |
331 | struct vm_area_struct *area) | |
332 | { | |
333 | return remap_pfn_range(area, area->vm_start, | |
f84ba106 | 334 | dmab->addr >> PAGE_SHIFT, |
30b7ba69 TI |
335 | area->vm_end - area->vm_start, |
336 | area->vm_page_prot); | |
337 | } | |
338 | ||
37af81c5 TI |
339 | static const struct snd_malloc_ops snd_dma_continuous_ops = { |
340 | .alloc = snd_dma_continuous_alloc, | |
341 | .free = snd_dma_continuous_free, | |
30b7ba69 | 342 | .mmap = snd_dma_continuous_mmap, |
37af81c5 TI |
343 | }; |
344 | ||
345 | /* | |
346 | * VMALLOC allocator | |
347 | */ | |
723c1252 | 348 | static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size) |
37af81c5 | 349 | { |
dd164fbf | 350 | return vmalloc(size); |
37af81c5 TI |
351 | } |
352 | ||
353 | static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab) | |
354 | { | |
355 | vfree(dmab->area); | |
356 | } | |
357 | ||
30b7ba69 TI |
358 | static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab, |
359 | struct vm_area_struct *area) | |
360 | { | |
361 | return remap_vmalloc_range(area, dmab->area, 0); | |
362 | } | |
363 | ||
bda36b0f TI |
364 | #define get_vmalloc_page_addr(dmab, offset) \ |
365 | page_to_phys(vmalloc_to_page((dmab)->area + (offset))) | |
366 | ||
37af81c5 TI |
367 | static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab, |
368 | size_t offset) | |
369 | { | |
bda36b0f | 370 | return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE; |
37af81c5 TI |
371 | } |
372 | ||
373 | static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab, | |
374 | size_t offset) | |
375 | { | |
376 | return vmalloc_to_page(dmab->area + offset); | |
377 | } | |
378 | ||
379 | static unsigned int | |
380 | snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab, | |
381 | unsigned int ofs, unsigned int size) | |
382 | { | |
bda36b0f TI |
383 | unsigned int start, end; |
384 | unsigned long addr; | |
385 | ||
386 | start = ALIGN_DOWN(ofs, PAGE_SIZE); | |
387 | end = ofs + size - 1; /* the last byte address */ | |
388 | /* check page continuity */ | |
389 | addr = get_vmalloc_page_addr(dmab, start); | |
390 | for (;;) { | |
391 | start += PAGE_SIZE; | |
392 | if (start > end) | |
393 | break; | |
394 | addr += PAGE_SIZE; | |
395 | if (get_vmalloc_page_addr(dmab, start) != addr) | |
396 | return start - ofs; | |
397 | } | |
398 | /* ok, all on continuous pages */ | |
399 | return size; | |
37af81c5 TI |
400 | } |
401 | ||
402 | static const struct snd_malloc_ops snd_dma_vmalloc_ops = { | |
403 | .alloc = snd_dma_vmalloc_alloc, | |
404 | .free = snd_dma_vmalloc_free, | |
30b7ba69 | 405 | .mmap = snd_dma_vmalloc_mmap, |
37af81c5 TI |
406 | .get_addr = snd_dma_vmalloc_get_addr, |
407 | .get_page = snd_dma_vmalloc_get_page, | |
408 | .get_chunk_size = snd_dma_vmalloc_get_chunk_size, | |
409 | }; | |
410 | ||
8f11551b | 411 | #ifdef CONFIG_HAS_DMA |
37af81c5 TI |
412 | /* |
413 | * IRAM allocator | |
414 | */ | |
a5606f85 | 415 | #ifdef CONFIG_GENERIC_ALLOCATOR |
723c1252 | 416 | static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size) |
37af81c5 TI |
417 | { |
418 | struct device *dev = dmab->dev.dev; | |
419 | struct gen_pool *pool; | |
723c1252 | 420 | void *p; |
37af81c5 TI |
421 | |
422 | if (dev->of_node) { | |
423 | pool = of_gen_pool_get(dev->of_node, "iram", 0); | |
424 | /* Assign the pool into private_data field */ | |
425 | dmab->private_data = pool; | |
426 | ||
723c1252 TI |
427 | p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE); |
428 | if (p) | |
429 | return p; | |
37af81c5 TI |
430 | } |
431 | ||
432 | /* Internal memory might have limited size and no enough space, | |
433 | * so if we fail to malloc, try to fetch memory traditionally. | |
434 | */ | |
435 | dmab->dev.type = SNDRV_DMA_TYPE_DEV; | |
436 | return __snd_dma_alloc_pages(dmab, size); | |
437 | } | |
438 | ||
439 | static void snd_dma_iram_free(struct snd_dma_buffer *dmab) | |
440 | { | |
441 | struct gen_pool *pool = dmab->private_data; | |
442 | ||
443 | if (pool && dmab->area) | |
444 | gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); | |
445 | } | |
446 | ||
a202bd1a TI |
447 | static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab, |
448 | struct vm_area_struct *area) | |
449 | { | |
450 | area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); | |
451 | return remap_pfn_range(area, area->vm_start, | |
452 | dmab->addr >> PAGE_SHIFT, | |
453 | area->vm_end - area->vm_start, | |
454 | area->vm_page_prot); | |
455 | } | |
456 | ||
37af81c5 TI |
457 | static const struct snd_malloc_ops snd_dma_iram_ops = { |
458 | .alloc = snd_dma_iram_alloc, | |
459 | .free = snd_dma_iram_free, | |
a202bd1a | 460 | .mmap = snd_dma_iram_mmap, |
37af81c5 | 461 | }; |
a5606f85 | 462 | #endif /* CONFIG_GENERIC_ALLOCATOR */ |
37af81c5 TI |
463 | |
464 | /* | |
465 | * Coherent device pages allocator | |
466 | */ | |
723c1252 | 467 | static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size) |
37af81c5 | 468 | { |
9882d63b | 469 | return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); |
37af81c5 TI |
470 | } |
471 | ||
472 | static void snd_dma_dev_free(struct snd_dma_buffer *dmab) | |
473 | { | |
37af81c5 TI |
474 | dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); |
475 | } | |
476 | ||
a202bd1a TI |
477 | static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab, |
478 | struct vm_area_struct *area) | |
479 | { | |
480 | return dma_mmap_coherent(dmab->dev.dev, area, | |
481 | dmab->area, dmab->addr, dmab->bytes); | |
482 | } | |
483 | ||
37af81c5 TI |
484 | static const struct snd_malloc_ops snd_dma_dev_ops = { |
485 | .alloc = snd_dma_dev_alloc, | |
486 | .free = snd_dma_dev_free, | |
a202bd1a | 487 | .mmap = snd_dma_dev_mmap, |
37af81c5 | 488 | }; |
d5c50558 TI |
489 | |
490 | /* | |
491 | * Write-combined pages | |
492 | */ | |
a8d302a0 TI |
493 | /* x86-specific allocations */ |
494 | #ifdef CONFIG_SND_DMA_SGBUF | |
495 | static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) | |
496 | { | |
dd164fbf | 497 | return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true); |
a8d302a0 TI |
498 | } |
499 | ||
500 | static void snd_dma_wc_free(struct snd_dma_buffer *dmab) | |
501 | { | |
dd164fbf | 502 | do_free_pages(dmab->area, dmab->bytes, true); |
a8d302a0 TI |
503 | } |
504 | ||
505 | static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab, | |
506 | struct vm_area_struct *area) | |
507 | { | |
508 | area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); | |
509 | return snd_dma_continuous_mmap(dmab, area); | |
510 | } | |
511 | #else | |
d5c50558 TI |
512 | static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) |
513 | { | |
514 | return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); | |
515 | } | |
516 | ||
517 | static void snd_dma_wc_free(struct snd_dma_buffer *dmab) | |
518 | { | |
519 | dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); | |
520 | } | |
521 | ||
522 | static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab, | |
523 | struct vm_area_struct *area) | |
524 | { | |
525 | return dma_mmap_wc(dmab->dev.dev, area, | |
526 | dmab->area, dmab->addr, dmab->bytes); | |
527 | } | |
a8d302a0 | 528 | #endif /* CONFIG_SND_DMA_SGBUF */ |
d5c50558 TI |
529 | |
530 | static const struct snd_malloc_ops snd_dma_wc_ops = { | |
531 | .alloc = snd_dma_wc_alloc, | |
532 | .free = snd_dma_wc_free, | |
533 | .mmap = snd_dma_wc_mmap, | |
534 | }; | |
a25684a9 TI |
535 | |
536 | /* | |
537 | * Non-contiguous pages allocator | |
538 | */ | |
539 | static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size) | |
540 | { | |
541 | struct sg_table *sgt; | |
542 | void *p; | |
543 | ||
53466ebd TI |
544 | #ifdef CONFIG_SND_DMA_SGBUF |
545 | if (cpu_feature_enabled(X86_FEATURE_XENPV)) | |
546 | return snd_dma_sg_fallback_alloc(dmab, size); | |
547 | #endif | |
9d8e536d | 548 | sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir, |
db918321 | 549 | DEFAULT_GFP, 0); |
925ca893 | 550 | #ifdef CONFIG_SND_DMA_SGBUF |
53466ebd | 551 | if (!sgt && !get_dma_ops(dmab->dev.dev)) |
925ca893 | 552 | return snd_dma_sg_fallback_alloc(dmab, size); |
9736a325 | 553 | #endif |
9736a325 TI |
554 | if (!sgt) |
555 | return NULL; | |
925ca893 | 556 | |
8e1741c6 TI |
557 | dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, |
558 | sg_dma_address(sgt->sgl)); | |
a25684a9 | 559 | p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt); |
37137ec2 | 560 | if (p) { |
a25684a9 | 561 | dmab->private_data = sgt; |
37137ec2 TI |
562 | /* store the first page address for convenience */ |
563 | dmab->addr = snd_sgbuf_get_addr(dmab, 0); | |
564 | } else { | |
a25684a9 | 565 | dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir); |
37137ec2 | 566 | } |
a25684a9 TI |
567 | return p; |
568 | } | |
569 | ||
570 | static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab) | |
571 | { | |
572 | dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area); | |
573 | dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data, | |
574 | dmab->dev.dir); | |
575 | } | |
576 | ||
577 | static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab, | |
578 | struct vm_area_struct *area) | |
579 | { | |
580 | return dma_mmap_noncontiguous(dmab->dev.dev, area, | |
581 | dmab->bytes, dmab->private_data); | |
582 | } | |
583 | ||
584 | static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab, | |
585 | enum snd_dma_sync_mode mode) | |
586 | { | |
587 | if (mode == SNDRV_DMA_SYNC_CPU) { | |
588 | if (dmab->dev.dir == DMA_TO_DEVICE) | |
589 | return; | |
3e16dc50 | 590 | invalidate_kernel_vmap_range(dmab->area, dmab->bytes); |
a25684a9 TI |
591 | dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data, |
592 | dmab->dev.dir); | |
a25684a9 TI |
593 | } else { |
594 | if (dmab->dev.dir == DMA_FROM_DEVICE) | |
595 | return; | |
596 | flush_kernel_vmap_range(dmab->area, dmab->bytes); | |
597 | dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data, | |
598 | dmab->dev.dir); | |
599 | } | |
600 | } | |
601 | ||
ad4f93ca TI |
602 | static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab, |
603 | struct sg_page_iter *piter, | |
604 | size_t offset) | |
605 | { | |
606 | struct sg_table *sgt = dmab->private_data; | |
607 | ||
608 | __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents, | |
609 | offset >> PAGE_SHIFT); | |
610 | } | |
611 | ||
612 | static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab, | |
613 | size_t offset) | |
614 | { | |
615 | struct sg_dma_page_iter iter; | |
616 | ||
617 | snd_dma_noncontig_iter_set(dmab, &iter.base, offset); | |
618 | __sg_page_iter_dma_next(&iter); | |
619 | return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE; | |
620 | } | |
621 | ||
622 | static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab, | |
623 | size_t offset) | |
624 | { | |
625 | struct sg_page_iter iter; | |
626 | ||
627 | snd_dma_noncontig_iter_set(dmab, &iter, offset); | |
628 | __sg_page_iter_next(&iter); | |
629 | return sg_page_iter_page(&iter); | |
630 | } | |
631 | ||
632 | static unsigned int | |
633 | snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab, | |
634 | unsigned int ofs, unsigned int size) | |
635 | { | |
636 | struct sg_dma_page_iter iter; | |
637 | unsigned int start, end; | |
638 | unsigned long addr; | |
639 | ||
640 | start = ALIGN_DOWN(ofs, PAGE_SIZE); | |
641 | end = ofs + size - 1; /* the last byte address */ | |
642 | snd_dma_noncontig_iter_set(dmab, &iter.base, start); | |
643 | if (!__sg_page_iter_dma_next(&iter)) | |
644 | return 0; | |
645 | /* check page continuity */ | |
646 | addr = sg_page_iter_dma_address(&iter); | |
647 | for (;;) { | |
648 | start += PAGE_SIZE; | |
649 | if (start > end) | |
650 | break; | |
651 | addr += PAGE_SIZE; | |
652 | if (!__sg_page_iter_dma_next(&iter) || | |
653 | sg_page_iter_dma_address(&iter) != addr) | |
654 | return start - ofs; | |
655 | } | |
656 | /* ok, all on continuous pages */ | |
657 | return size; | |
658 | } | |
659 | ||
a25684a9 TI |
660 | static const struct snd_malloc_ops snd_dma_noncontig_ops = { |
661 | .alloc = snd_dma_noncontig_alloc, | |
662 | .free = snd_dma_noncontig_free, | |
663 | .mmap = snd_dma_noncontig_mmap, | |
664 | .sync = snd_dma_noncontig_sync, | |
ad4f93ca TI |
665 | .get_addr = snd_dma_noncontig_get_addr, |
666 | .get_page = snd_dma_noncontig_get_page, | |
667 | .get_chunk_size = snd_dma_noncontig_get_chunk_size, | |
a25684a9 TI |
668 | }; |
669 | ||
2c95b92e TI |
670 | /* x86-specific SG-buffer with WC pages */ |
671 | #ifdef CONFIG_SND_DMA_SGBUF | |
672 | #define sg_wc_address(it) ((unsigned long)page_address(sg_page_iter_page(it))) | |
673 | ||
674 | static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size) | |
675 | { | |
676 | void *p = snd_dma_noncontig_alloc(dmab, size); | |
677 | struct sg_table *sgt = dmab->private_data; | |
678 | struct sg_page_iter iter; | |
679 | ||
680 | if (!p) | |
681 | return NULL; | |
925ca893 TI |
682 | if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG) |
683 | return p; | |
2c95b92e TI |
684 | for_each_sgtable_page(sgt, &iter, 0) |
685 | set_memory_wc(sg_wc_address(&iter), 1); | |
686 | return p; | |
687 | } | |
688 | ||
689 | static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab) | |
690 | { | |
691 | struct sg_table *sgt = dmab->private_data; | |
692 | struct sg_page_iter iter; | |
693 | ||
694 | for_each_sgtable_page(sgt, &iter, 0) | |
695 | set_memory_wb(sg_wc_address(&iter), 1); | |
696 | snd_dma_noncontig_free(dmab); | |
697 | } | |
698 | ||
699 | static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab, | |
700 | struct vm_area_struct *area) | |
701 | { | |
702 | area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); | |
703 | return dma_mmap_noncontiguous(dmab->dev.dev, area, | |
704 | dmab->bytes, dmab->private_data); | |
705 | } | |
706 | ||
707 | static const struct snd_malloc_ops snd_dma_sg_wc_ops = { | |
708 | .alloc = snd_dma_sg_wc_alloc, | |
709 | .free = snd_dma_sg_wc_free, | |
710 | .mmap = snd_dma_sg_wc_mmap, | |
711 | .sync = snd_dma_noncontig_sync, | |
712 | .get_addr = snd_dma_noncontig_get_addr, | |
713 | .get_page = snd_dma_noncontig_get_page, | |
714 | .get_chunk_size = snd_dma_noncontig_get_chunk_size, | |
715 | }; | |
925ca893 TI |
716 | |
717 | /* Fallback SG-buffer allocations for x86 */ | |
718 | struct snd_dma_sg_fallback { | |
53466ebd | 719 | bool use_dma_alloc_coherent; |
925ca893 TI |
720 | size_t count; |
721 | struct page **pages; | |
53466ebd TI |
722 | /* DMA address array; the first page contains #pages in ~PAGE_MASK */ |
723 | dma_addr_t *addrs; | |
925ca893 TI |
724 | }; |
725 | ||
726 | static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab, | |
727 | struct snd_dma_sg_fallback *sgbuf) | |
728 | { | |
53466ebd TI |
729 | size_t i, size; |
730 | ||
731 | if (sgbuf->pages && sgbuf->addrs) { | |
732 | i = 0; | |
733 | while (i < sgbuf->count) { | |
734 | if (!sgbuf->pages[i] || !sgbuf->addrs[i]) | |
735 | break; | |
736 | size = sgbuf->addrs[i] & ~PAGE_MASK; | |
737 | if (WARN_ON(!size)) | |
738 | break; | |
739 | if (sgbuf->use_dma_alloc_coherent) | |
740 | dma_free_coherent(dmab->dev.dev, size << PAGE_SHIFT, | |
741 | page_address(sgbuf->pages[i]), | |
742 | sgbuf->addrs[i] & PAGE_MASK); | |
743 | else | |
744 | do_free_pages(page_address(sgbuf->pages[i]), | |
745 | size << PAGE_SHIFT, false); | |
746 | i += size; | |
747 | } | |
748 | } | |
925ca893 | 749 | kvfree(sgbuf->pages); |
53466ebd | 750 | kvfree(sgbuf->addrs); |
925ca893 TI |
751 | kfree(sgbuf); |
752 | } | |
753 | ||
754 | static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size) | |
755 | { | |
756 | struct snd_dma_sg_fallback *sgbuf; | |
cc265163 TI |
757 | struct page **pagep, *curp; |
758 | size_t chunk, npages; | |
53466ebd | 759 | dma_addr_t *addrp; |
cc265163 | 760 | dma_addr_t addr; |
925ca893 | 761 | void *p; |
53466ebd TI |
762 | |
763 | /* correct the type */ | |
764 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) | |
765 | dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK; | |
766 | else if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) | |
767 | dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK; | |
925ca893 TI |
768 | |
769 | sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); | |
770 | if (!sgbuf) | |
771 | return NULL; | |
53466ebd | 772 | sgbuf->use_dma_alloc_coherent = cpu_feature_enabled(X86_FEATURE_XENPV); |
cc265163 TI |
773 | size = PAGE_ALIGN(size); |
774 | sgbuf->count = size >> PAGE_SHIFT; | |
775 | sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL); | |
53466ebd TI |
776 | sgbuf->addrs = kvcalloc(sgbuf->count, sizeof(*sgbuf->addrs), GFP_KERNEL); |
777 | if (!sgbuf->pages || !sgbuf->addrs) | |
925ca893 TI |
778 | goto error; |
779 | ||
cc265163 | 780 | pagep = sgbuf->pages; |
53466ebd TI |
781 | addrp = sgbuf->addrs; |
782 | chunk = (PAGE_SIZE - 1) << PAGE_SHIFT; /* to fit in low bits in addrs */ | |
cc265163 TI |
783 | while (size > 0) { |
784 | chunk = min(size, chunk); | |
53466ebd TI |
785 | if (sgbuf->use_dma_alloc_coherent) |
786 | p = dma_alloc_coherent(dmab->dev.dev, chunk, &addr, DEFAULT_GFP); | |
787 | else | |
788 | p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false); | |
cc265163 TI |
789 | if (!p) { |
790 | if (chunk <= PAGE_SIZE) | |
791 | goto error; | |
792 | chunk >>= 1; | |
793 | chunk = PAGE_SIZE << get_order(chunk); | |
794 | continue; | |
795 | } | |
796 | ||
797 | size -= chunk; | |
798 | /* fill pages */ | |
799 | npages = chunk >> PAGE_SHIFT; | |
53466ebd | 800 | *addrp = npages; /* store in lower bits */ |
cc265163 | 801 | curp = virt_to_page(p); |
53466ebd | 802 | while (npages--) { |
cc265163 | 803 | *pagep++ = curp++; |
53466ebd TI |
804 | *addrp++ |= addr; |
805 | addr += PAGE_SIZE; | |
806 | } | |
925ca893 TI |
807 | } |
808 | ||
cc265163 | 809 | p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL); |
925ca893 TI |
810 | if (!p) |
811 | goto error; | |
53466ebd TI |
812 | |
813 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK) | |
814 | set_pages_array_wc(sgbuf->pages, sgbuf->count); | |
815 | ||
925ca893 | 816 | dmab->private_data = sgbuf; |
37137ec2 | 817 | /* store the first page address for convenience */ |
53466ebd | 818 | dmab->addr = sgbuf->addrs[0] & PAGE_MASK; |
925ca893 TI |
819 | return p; |
820 | ||
821 | error: | |
822 | __snd_dma_sg_fallback_free(dmab, sgbuf); | |
823 | return NULL; | |
824 | } | |
825 | ||
826 | static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab) | |
827 | { | |
53466ebd TI |
828 | struct snd_dma_sg_fallback *sgbuf = dmab->private_data; |
829 | ||
830 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK) | |
831 | set_pages_array_wb(sgbuf->pages, sgbuf->count); | |
925ca893 TI |
832 | vunmap(dmab->area); |
833 | __snd_dma_sg_fallback_free(dmab, dmab->private_data); | |
834 | } | |
835 | ||
53466ebd TI |
836 | static dma_addr_t snd_dma_sg_fallback_get_addr(struct snd_dma_buffer *dmab, |
837 | size_t offset) | |
838 | { | |
839 | struct snd_dma_sg_fallback *sgbuf = dmab->private_data; | |
840 | size_t index = offset >> PAGE_SHIFT; | |
841 | ||
842 | return (sgbuf->addrs[index] & PAGE_MASK) | (offset & ~PAGE_MASK); | |
843 | } | |
844 | ||
925ca893 TI |
845 | static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab, |
846 | struct vm_area_struct *area) | |
847 | { | |
848 | struct snd_dma_sg_fallback *sgbuf = dmab->private_data; | |
849 | ||
850 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK) | |
851 | area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); | |
852 | return vm_map_pages(area, sgbuf->pages, sgbuf->count); | |
853 | } | |
854 | ||
855 | static const struct snd_malloc_ops snd_dma_sg_fallback_ops = { | |
856 | .alloc = snd_dma_sg_fallback_alloc, | |
857 | .free = snd_dma_sg_fallback_free, | |
858 | .mmap = snd_dma_sg_fallback_mmap, | |
53466ebd | 859 | .get_addr = snd_dma_sg_fallback_get_addr, |
925ca893 | 860 | /* reuse vmalloc helpers */ |
925ca893 TI |
861 | .get_page = snd_dma_vmalloc_get_page, |
862 | .get_chunk_size = snd_dma_vmalloc_get_chunk_size, | |
863 | }; | |
2c95b92e TI |
864 | #endif /* CONFIG_SND_DMA_SGBUF */ |
865 | ||
73325f60 TI |
866 | /* |
867 | * Non-coherent pages allocator | |
868 | */ | |
869 | static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size) | |
870 | { | |
8e1741c6 TI |
871 | void *p; |
872 | ||
873 | p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr, | |
db918321 | 874 | dmab->dev.dir, DEFAULT_GFP); |
8e1741c6 TI |
875 | if (p) |
876 | dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr); | |
877 | return p; | |
73325f60 TI |
878 | } |
879 | ||
880 | static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab) | |
881 | { | |
882 | dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area, | |
883 | dmab->addr, dmab->dev.dir); | |
884 | } | |
885 | ||
886 | static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab, | |
887 | struct vm_area_struct *area) | |
888 | { | |
889 | area->vm_page_prot = vm_get_page_prot(area->vm_flags); | |
890 | return dma_mmap_pages(dmab->dev.dev, area, | |
891 | area->vm_end - area->vm_start, | |
892 | virt_to_page(dmab->area)); | |
893 | } | |
894 | ||
895 | static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab, | |
896 | enum snd_dma_sync_mode mode) | |
897 | { | |
898 | if (mode == SNDRV_DMA_SYNC_CPU) { | |
899 | if (dmab->dev.dir != DMA_TO_DEVICE) | |
900 | dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr, | |
901 | dmab->bytes, dmab->dev.dir); | |
902 | } else { | |
903 | if (dmab->dev.dir != DMA_FROM_DEVICE) | |
904 | dma_sync_single_for_device(dmab->dev.dev, dmab->addr, | |
905 | dmab->bytes, dmab->dev.dir); | |
906 | } | |
907 | } | |
908 | ||
909 | static const struct snd_malloc_ops snd_dma_noncoherent_ops = { | |
910 | .alloc = snd_dma_noncoherent_alloc, | |
911 | .free = snd_dma_noncoherent_free, | |
912 | .mmap = snd_dma_noncoherent_mmap, | |
913 | .sync = snd_dma_noncoherent_sync, | |
914 | }; | |
915 | ||
37af81c5 TI |
916 | #endif /* CONFIG_HAS_DMA */ |
917 | ||
918 | /* | |
919 | * Entry points | |
920 | */ | |
9736a325 | 921 | static const struct snd_malloc_ops *snd_dma_ops[] = { |
37af81c5 TI |
922 | [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops, |
923 | [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops, | |
924 | #ifdef CONFIG_HAS_DMA | |
925 | [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops, | |
d5c50558 | 926 | [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops, |
a25684a9 | 927 | [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops, |
73325f60 | 928 | [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops, |
2c95b92e TI |
929 | #ifdef CONFIG_SND_DMA_SGBUF |
930 | [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops, | |
931 | #endif | |
37af81c5 TI |
932 | #ifdef CONFIG_GENERIC_ALLOCATOR |
933 | [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops, | |
934 | #endif /* CONFIG_GENERIC_ALLOCATOR */ | |
925ca893 TI |
935 | #ifdef CONFIG_SND_DMA_SGBUF |
936 | [SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops, | |
937 | [SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops, | |
938 | #endif | |
37af81c5 | 939 | #endif /* CONFIG_HAS_DMA */ |
37af81c5 TI |
940 | }; |
941 | ||
942 | static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab) | |
943 | { | |
dce94461 TI |
944 | if (WARN_ON_ONCE(!dmab)) |
945 | return NULL; | |
37af81c5 | 946 | if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN || |
9736a325 | 947 | dmab->dev.type >= ARRAY_SIZE(snd_dma_ops))) |
37af81c5 | 948 | return NULL; |
9736a325 | 949 | return snd_dma_ops[dmab->dev.type]; |
1da177e4 | 950 | } |