Commit | Line | Data |
---|---|---|
1a59d1b8 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 | 2 | /* |
c1017a4c | 3 | * Copyright (c) by Jaroslav Kysela <perex@perex.cz> |
1da177e4 LT |
4 | * Takashi Iwai <tiwai@suse.de> |
5 | * | |
6 | * Generic memory allocators | |
1da177e4 LT |
7 | */ |
8 | ||
1da177e4 LT |
9 | #include <linux/slab.h> |
10 | #include <linux/mm.h> | |
11 | #include <linux/dma-mapping.h> | |
05503214 | 12 | #include <linux/genalloc.h> |
a25684a9 | 13 | #include <linux/highmem.h> |
1fe7f397 | 14 | #include <linux/vmalloc.h> |
42e748a0 TI |
15 | #ifdef CONFIG_X86 |
16 | #include <asm/set_memory.h> | |
17 | #endif | |
1da177e4 | 18 | #include <sound/memalloc.h> |
37af81c5 | 19 | #include "memalloc_local.h" |
1da177e4 | 20 | |
37af81c5 | 21 | static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab); |
1da177e4 | 22 | |
37af81c5 TI |
23 | /* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */ |
24 | static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab, | |
25 | gfp_t default_gfp) | |
1da177e4 | 26 | { |
37af81c5 TI |
27 | if (!dmab->dev.dev) |
28 | return default_gfp; | |
29 | else | |
30 | return (__force gfp_t)(unsigned long)dmab->dev.dev; | |
1da177e4 | 31 | } |
05503214 | 32 | |
723c1252 | 33 | static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size) |
05503214 | 34 | { |
37af81c5 | 35 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); |
05503214 | 36 | |
37af81c5 | 37 | if (WARN_ON_ONCE(!ops || !ops->alloc)) |
723c1252 | 38 | return NULL; |
37af81c5 | 39 | return ops->alloc(dmab, size); |
08422d2c | 40 | } |
1da177e4 LT |
41 | |
42 | /** | |
a25684a9 TI |
43 | * snd_dma_alloc_dir_pages - allocate the buffer area according to the given |
44 | * type and direction | |
1da177e4 LT |
45 | * @type: the DMA buffer type |
46 | * @device: the device pointer | |
a25684a9 | 47 | * @dir: DMA direction |
1da177e4 LT |
48 | * @size: the buffer size to allocate |
49 | * @dmab: buffer allocation record to store the allocated data | |
50 | * | |
51 | * Calls the memory-allocator function for the corresponding | |
52 | * buffer type. | |
eb7c06e8 YB |
53 | * |
54 | * Return: Zero if the buffer with the given size is allocated successfully, | |
55 | * otherwise a negative value on error. | |
1da177e4 | 56 | */ |
a25684a9 TI |
57 | int snd_dma_alloc_dir_pages(int type, struct device *device, |
58 | enum dma_data_direction dir, size_t size, | |
59 | struct snd_dma_buffer *dmab) | |
1da177e4 | 60 | { |
7eaa943c TI |
61 | if (WARN_ON(!size)) |
62 | return -ENXIO; | |
63 | if (WARN_ON(!dmab)) | |
64 | return -ENXIO; | |
1da177e4 | 65 | |
5c1733e3 | 66 | size = PAGE_ALIGN(size); |
1da177e4 LT |
67 | dmab->dev.type = type; |
68 | dmab->dev.dev = device; | |
a25684a9 | 69 | dmab->dev.dir = dir; |
1da177e4 | 70 | dmab->bytes = 0; |
28e60dbb TI |
71 | dmab->addr = 0; |
72 | dmab->private_data = NULL; | |
723c1252 | 73 | dmab->area = __snd_dma_alloc_pages(dmab, size); |
37af81c5 | 74 | if (!dmab->area) |
1da177e4 LT |
75 | return -ENOMEM; |
76 | dmab->bytes = size; | |
77 | return 0; | |
78 | } | |
a25684a9 | 79 | EXPORT_SYMBOL(snd_dma_alloc_dir_pages); |
1da177e4 LT |
80 | |
81 | /** | |
82 | * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback | |
83 | * @type: the DMA buffer type | |
84 | * @device: the device pointer | |
85 | * @size: the buffer size to allocate | |
86 | * @dmab: buffer allocation record to store the allocated data | |
87 | * | |
88 | * Calls the memory-allocator function for the corresponding | |
89 | * buffer type. When no space is left, this function reduces the size and | |
90 | * tries to allocate again. The size actually allocated is stored in | |
91 | * res_size argument. | |
eb7c06e8 YB |
92 | * |
93 | * Return: Zero if the buffer with the given size is allocated successfully, | |
94 | * otherwise a negative value on error. | |
1da177e4 LT |
95 | */ |
96 | int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, | |
97 | struct snd_dma_buffer *dmab) | |
98 | { | |
99 | int err; | |
100 | ||
1da177e4 LT |
101 | while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { |
102 | if (err != -ENOMEM) | |
103 | return err; | |
1da177e4 LT |
104 | if (size <= PAGE_SIZE) |
105 | return -ENOMEM; | |
dfef01e1 TI |
106 | size >>= 1; |
107 | size = PAGE_SIZE << get_order(size); | |
1da177e4 LT |
108 | } |
109 | if (! dmab->area) | |
110 | return -ENOMEM; | |
111 | return 0; | |
112 | } | |
35f80014 | 113 | EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); |
1da177e4 | 114 | |
1da177e4 LT |
115 | /** |
116 | * snd_dma_free_pages - release the allocated buffer | |
117 | * @dmab: the buffer allocation record to release | |
118 | * | |
119 | * Releases the allocated buffer via snd_dma_alloc_pages(). | |
120 | */ | |
121 | void snd_dma_free_pages(struct snd_dma_buffer *dmab) | |
122 | { | |
37af81c5 TI |
123 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); |
124 | ||
125 | if (ops && ops->free) | |
126 | ops->free(dmab); | |
127 | } | |
128 | EXPORT_SYMBOL(snd_dma_free_pages); | |
129 | ||
427ae268 TI |
130 | /* called by devres */ |
131 | static void __snd_release_pages(struct device *dev, void *res) | |
132 | { | |
133 | snd_dma_free_pages(res); | |
134 | } | |
135 | ||
136 | /** | |
a25684a9 | 137 | * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres |
427ae268 TI |
138 | * @dev: the device pointer |
139 | * @type: the DMA buffer type | |
a25684a9 | 140 | * @dir: DMA direction |
427ae268 TI |
141 | * @size: the buffer size to allocate |
142 | * | |
143 | * Allocate buffer pages depending on the given type and manage using devres. | |
144 | * The pages will be released automatically at the device removal. | |
145 | * | |
146 | * Unlike snd_dma_alloc_pages(), this function requires the real device pointer, | |
147 | * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or | |
148 | * SNDRV_DMA_TYPE_VMALLOC type. | |
149 | * | |
150 | * The function returns the snd_dma_buffer object at success, or NULL if failed. | |
151 | */ | |
152 | struct snd_dma_buffer * | |
a25684a9 TI |
153 | snd_devm_alloc_dir_pages(struct device *dev, int type, |
154 | enum dma_data_direction dir, size_t size) | |
427ae268 TI |
155 | { |
156 | struct snd_dma_buffer *dmab; | |
157 | int err; | |
158 | ||
159 | if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS || | |
160 | type == SNDRV_DMA_TYPE_VMALLOC)) | |
161 | return NULL; | |
162 | ||
163 | dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL); | |
164 | if (!dmab) | |
165 | return NULL; | |
166 | ||
a25684a9 | 167 | err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab); |
427ae268 TI |
168 | if (err < 0) { |
169 | devres_free(dmab); | |
170 | return NULL; | |
171 | } | |
172 | ||
173 | devres_add(dev, dmab); | |
174 | return dmab; | |
175 | } | |
a25684a9 | 176 | EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages); |
427ae268 | 177 | |
a202bd1a TI |
178 | /** |
179 | * snd_dma_buffer_mmap - perform mmap of the given DMA buffer | |
180 | * @dmab: buffer allocation information | |
181 | * @area: VM area information | |
182 | */ | |
183 | int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab, | |
184 | struct vm_area_struct *area) | |
185 | { | |
8e537d5d | 186 | const struct snd_malloc_ops *ops; |
a202bd1a | 187 | |
8e537d5d TI |
188 | if (!dmab) |
189 | return -ENOENT; | |
190 | ops = snd_dma_get_ops(dmab); | |
a202bd1a TI |
191 | if (ops && ops->mmap) |
192 | return ops->mmap(dmab, area); | |
193 | else | |
194 | return -ENOENT; | |
195 | } | |
196 | EXPORT_SYMBOL(snd_dma_buffer_mmap); | |
197 | ||
a25684a9 TI |
198 | #ifdef CONFIG_HAS_DMA |
199 | /** | |
200 | * snd_dma_buffer_sync - sync DMA buffer between CPU and device | |
201 | * @dmab: buffer allocation information | |
f917c04f | 202 | * @mode: sync mode |
a25684a9 TI |
203 | */ |
204 | void snd_dma_buffer_sync(struct snd_dma_buffer *dmab, | |
205 | enum snd_dma_sync_mode mode) | |
206 | { | |
207 | const struct snd_malloc_ops *ops; | |
208 | ||
209 | if (!dmab || !dmab->dev.need_sync) | |
210 | return; | |
211 | ops = snd_dma_get_ops(dmab); | |
212 | if (ops && ops->sync) | |
213 | ops->sync(dmab, mode); | |
214 | } | |
215 | EXPORT_SYMBOL_GPL(snd_dma_buffer_sync); | |
216 | #endif /* CONFIG_HAS_DMA */ | |
217 | ||
37af81c5 TI |
218 | /** |
219 | * snd_sgbuf_get_addr - return the physical address at the corresponding offset | |
220 | * @dmab: buffer allocation information | |
221 | * @offset: offset in the ring buffer | |
222 | */ | |
223 | dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset) | |
224 | { | |
225 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); | |
226 | ||
227 | if (ops && ops->get_addr) | |
228 | return ops->get_addr(dmab, offset); | |
229 | else | |
230 | return dmab->addr + offset; | |
231 | } | |
232 | EXPORT_SYMBOL(snd_sgbuf_get_addr); | |
233 | ||
234 | /** | |
235 | * snd_sgbuf_get_page - return the physical page at the corresponding offset | |
236 | * @dmab: buffer allocation information | |
237 | * @offset: offset in the ring buffer | |
238 | */ | |
239 | struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset) | |
240 | { | |
241 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); | |
242 | ||
243 | if (ops && ops->get_page) | |
244 | return ops->get_page(dmab, offset); | |
245 | else | |
246 | return virt_to_page(dmab->area + offset); | |
247 | } | |
248 | EXPORT_SYMBOL(snd_sgbuf_get_page); | |
249 | ||
250 | /** | |
251 | * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages | |
252 | * on sg-buffer | |
253 | * @dmab: buffer allocation information | |
254 | * @ofs: offset in the ring buffer | |
255 | * @size: the requested size | |
256 | */ | |
257 | unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, | |
258 | unsigned int ofs, unsigned int size) | |
259 | { | |
260 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); | |
261 | ||
262 | if (ops && ops->get_chunk_size) | |
263 | return ops->get_chunk_size(dmab, ofs, size); | |
264 | else | |
265 | return size; | |
266 | } | |
267 | EXPORT_SYMBOL(snd_sgbuf_get_chunk_size); | |
268 | ||
269 | /* | |
270 | * Continuous pages allocator | |
271 | */ | |
723c1252 | 272 | static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size) |
37af81c5 TI |
273 | { |
274 | gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL); | |
f84ba106 | 275 | void *p = alloc_pages_exact(size, gfp); |
37af81c5 | 276 | |
f84ba106 TI |
277 | if (p) |
278 | dmab->addr = page_to_phys(virt_to_page(p)); | |
279 | return p; | |
37af81c5 TI |
280 | } |
281 | ||
282 | static void snd_dma_continuous_free(struct snd_dma_buffer *dmab) | |
283 | { | |
284 | free_pages_exact(dmab->area, dmab->bytes); | |
285 | } | |
286 | ||
30b7ba69 TI |
287 | static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab, |
288 | struct vm_area_struct *area) | |
289 | { | |
290 | return remap_pfn_range(area, area->vm_start, | |
f84ba106 | 291 | dmab->addr >> PAGE_SHIFT, |
30b7ba69 TI |
292 | area->vm_end - area->vm_start, |
293 | area->vm_page_prot); | |
294 | } | |
295 | ||
37af81c5 TI |
296 | static const struct snd_malloc_ops snd_dma_continuous_ops = { |
297 | .alloc = snd_dma_continuous_alloc, | |
298 | .free = snd_dma_continuous_free, | |
30b7ba69 | 299 | .mmap = snd_dma_continuous_mmap, |
37af81c5 TI |
300 | }; |
301 | ||
302 | /* | |
303 | * VMALLOC allocator | |
304 | */ | |
723c1252 | 305 | static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size) |
37af81c5 TI |
306 | { |
307 | gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM); | |
308 | ||
723c1252 | 309 | return __vmalloc(size, gfp); |
37af81c5 TI |
310 | } |
311 | ||
312 | static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab) | |
313 | { | |
314 | vfree(dmab->area); | |
315 | } | |
316 | ||
30b7ba69 TI |
317 | static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab, |
318 | struct vm_area_struct *area) | |
319 | { | |
320 | return remap_vmalloc_range(area, dmab->area, 0); | |
321 | } | |
322 | ||
bda36b0f TI |
323 | #define get_vmalloc_page_addr(dmab, offset) \ |
324 | page_to_phys(vmalloc_to_page((dmab)->area + (offset))) | |
325 | ||
37af81c5 TI |
326 | static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab, |
327 | size_t offset) | |
328 | { | |
bda36b0f | 329 | return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE; |
37af81c5 TI |
330 | } |
331 | ||
332 | static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab, | |
333 | size_t offset) | |
334 | { | |
335 | return vmalloc_to_page(dmab->area + offset); | |
336 | } | |
337 | ||
338 | static unsigned int | |
339 | snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab, | |
340 | unsigned int ofs, unsigned int size) | |
341 | { | |
bda36b0f TI |
342 | unsigned int start, end; |
343 | unsigned long addr; | |
344 | ||
345 | start = ALIGN_DOWN(ofs, PAGE_SIZE); | |
346 | end = ofs + size - 1; /* the last byte address */ | |
347 | /* check page continuity */ | |
348 | addr = get_vmalloc_page_addr(dmab, start); | |
349 | for (;;) { | |
350 | start += PAGE_SIZE; | |
351 | if (start > end) | |
352 | break; | |
353 | addr += PAGE_SIZE; | |
354 | if (get_vmalloc_page_addr(dmab, start) != addr) | |
355 | return start - ofs; | |
356 | } | |
357 | /* ok, all on continuous pages */ | |
358 | return size; | |
37af81c5 TI |
359 | } |
360 | ||
361 | static const struct snd_malloc_ops snd_dma_vmalloc_ops = { | |
362 | .alloc = snd_dma_vmalloc_alloc, | |
363 | .free = snd_dma_vmalloc_free, | |
30b7ba69 | 364 | .mmap = snd_dma_vmalloc_mmap, |
37af81c5 TI |
365 | .get_addr = snd_dma_vmalloc_get_addr, |
366 | .get_page = snd_dma_vmalloc_get_page, | |
367 | .get_chunk_size = snd_dma_vmalloc_get_chunk_size, | |
368 | }; | |
369 | ||
8f11551b | 370 | #ifdef CONFIG_HAS_DMA |
37af81c5 TI |
371 | /* |
372 | * IRAM allocator | |
373 | */ | |
a5606f85 | 374 | #ifdef CONFIG_GENERIC_ALLOCATOR |
723c1252 | 375 | static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size) |
37af81c5 TI |
376 | { |
377 | struct device *dev = dmab->dev.dev; | |
378 | struct gen_pool *pool; | |
723c1252 | 379 | void *p; |
37af81c5 TI |
380 | |
381 | if (dev->of_node) { | |
382 | pool = of_gen_pool_get(dev->of_node, "iram", 0); | |
383 | /* Assign the pool into private_data field */ | |
384 | dmab->private_data = pool; | |
385 | ||
723c1252 TI |
386 | p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE); |
387 | if (p) | |
388 | return p; | |
37af81c5 TI |
389 | } |
390 | ||
391 | /* Internal memory might have limited size and no enough space, | |
392 | * so if we fail to malloc, try to fetch memory traditionally. | |
393 | */ | |
394 | dmab->dev.type = SNDRV_DMA_TYPE_DEV; | |
395 | return __snd_dma_alloc_pages(dmab, size); | |
396 | } | |
397 | ||
398 | static void snd_dma_iram_free(struct snd_dma_buffer *dmab) | |
399 | { | |
400 | struct gen_pool *pool = dmab->private_data; | |
401 | ||
402 | if (pool && dmab->area) | |
403 | gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); | |
404 | } | |
405 | ||
a202bd1a TI |
406 | static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab, |
407 | struct vm_area_struct *area) | |
408 | { | |
409 | area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); | |
410 | return remap_pfn_range(area, area->vm_start, | |
411 | dmab->addr >> PAGE_SHIFT, | |
412 | area->vm_end - area->vm_start, | |
413 | area->vm_page_prot); | |
414 | } | |
415 | ||
37af81c5 TI |
416 | static const struct snd_malloc_ops snd_dma_iram_ops = { |
417 | .alloc = snd_dma_iram_alloc, | |
418 | .free = snd_dma_iram_free, | |
a202bd1a | 419 | .mmap = snd_dma_iram_mmap, |
37af81c5 | 420 | }; |
a5606f85 | 421 | #endif /* CONFIG_GENERIC_ALLOCATOR */ |
37af81c5 | 422 | |
d5c50558 TI |
423 | #define DEFAULT_GFP \ |
424 | (GFP_KERNEL | \ | |
425 | __GFP_COMP | /* compound page lets parts be mapped */ \ | |
426 | __GFP_NORETRY | /* don't trigger OOM-killer */ \ | |
427 | __GFP_NOWARN) /* no stack trace print - this call is non-critical */ | |
428 | ||
37af81c5 TI |
429 | /* |
430 | * Coherent device pages allocator | |
431 | */ | |
723c1252 | 432 | static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size) |
37af81c5 | 433 | { |
723c1252 | 434 | void *p; |
37af81c5 | 435 | |
d5c50558 | 436 | p = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); |
37af81c5 | 437 | #ifdef CONFIG_X86 |
58a95dfa | 438 | if (p && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC) |
723c1252 | 439 | set_memory_wc((unsigned long)p, PAGE_ALIGN(size) >> PAGE_SHIFT); |
cc6a8acd | 440 | #endif |
723c1252 | 441 | return p; |
37af81c5 TI |
442 | } |
443 | ||
444 | static void snd_dma_dev_free(struct snd_dma_buffer *dmab) | |
445 | { | |
446 | #ifdef CONFIG_X86 | |
58a95dfa | 447 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC) |
37af81c5 TI |
448 | set_memory_wb((unsigned long)dmab->area, |
449 | PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT); | |
450 | #endif | |
451 | dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); | |
452 | } | |
453 | ||
a202bd1a TI |
454 | static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab, |
455 | struct vm_area_struct *area) | |
456 | { | |
623c1010 TI |
457 | #ifdef CONFIG_X86 |
458 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC) | |
459 | area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); | |
460 | #endif | |
a202bd1a TI |
461 | return dma_mmap_coherent(dmab->dev.dev, area, |
462 | dmab->area, dmab->addr, dmab->bytes); | |
463 | } | |
464 | ||
37af81c5 TI |
465 | static const struct snd_malloc_ops snd_dma_dev_ops = { |
466 | .alloc = snd_dma_dev_alloc, | |
467 | .free = snd_dma_dev_free, | |
a202bd1a | 468 | .mmap = snd_dma_dev_mmap, |
37af81c5 | 469 | }; |
d5c50558 TI |
470 | |
471 | /* | |
472 | * Write-combined pages | |
473 | */ | |
474 | #ifdef CONFIG_X86 | |
475 | /* On x86, share the same ops as the standard dev ops */ | |
476 | #define snd_dma_wc_ops snd_dma_dev_ops | |
477 | #else /* CONFIG_X86 */ | |
478 | static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) | |
479 | { | |
480 | return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); | |
481 | } | |
482 | ||
483 | static void snd_dma_wc_free(struct snd_dma_buffer *dmab) | |
484 | { | |
485 | dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); | |
486 | } | |
487 | ||
488 | static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab, | |
489 | struct vm_area_struct *area) | |
490 | { | |
491 | return dma_mmap_wc(dmab->dev.dev, area, | |
492 | dmab->area, dmab->addr, dmab->bytes); | |
493 | } | |
494 | ||
495 | static const struct snd_malloc_ops snd_dma_wc_ops = { | |
496 | .alloc = snd_dma_wc_alloc, | |
497 | .free = snd_dma_wc_free, | |
498 | .mmap = snd_dma_wc_mmap, | |
499 | }; | |
500 | #endif /* CONFIG_X86 */ | |
a25684a9 | 501 | |
925ca893 TI |
502 | #ifdef CONFIG_SND_DMA_SGBUF |
503 | static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size); | |
504 | #endif | |
505 | ||
a25684a9 TI |
506 | /* |
507 | * Non-contiguous pages allocator | |
508 | */ | |
509 | static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size) | |
510 | { | |
511 | struct sg_table *sgt; | |
512 | void *p; | |
513 | ||
514 | sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir, | |
515 | DEFAULT_GFP, 0); | |
925ca893 TI |
516 | if (!sgt) { |
517 | #ifdef CONFIG_SND_DMA_SGBUF | |
518 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) | |
519 | dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK; | |
520 | else | |
521 | dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK; | |
522 | return snd_dma_sg_fallback_alloc(dmab, size); | |
523 | #else | |
a25684a9 | 524 | return NULL; |
925ca893 TI |
525 | #endif |
526 | } | |
527 | ||
8e1741c6 TI |
528 | dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, |
529 | sg_dma_address(sgt->sgl)); | |
a25684a9 TI |
530 | p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt); |
531 | if (p) | |
532 | dmab->private_data = sgt; | |
533 | else | |
534 | dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir); | |
535 | return p; | |
536 | } | |
537 | ||
538 | static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab) | |
539 | { | |
540 | dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area); | |
541 | dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data, | |
542 | dmab->dev.dir); | |
543 | } | |
544 | ||
545 | static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab, | |
546 | struct vm_area_struct *area) | |
547 | { | |
548 | return dma_mmap_noncontiguous(dmab->dev.dev, area, | |
549 | dmab->bytes, dmab->private_data); | |
550 | } | |
551 | ||
552 | static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab, | |
553 | enum snd_dma_sync_mode mode) | |
554 | { | |
555 | if (mode == SNDRV_DMA_SYNC_CPU) { | |
556 | if (dmab->dev.dir == DMA_TO_DEVICE) | |
557 | return; | |
3e16dc50 | 558 | invalidate_kernel_vmap_range(dmab->area, dmab->bytes); |
a25684a9 TI |
559 | dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data, |
560 | dmab->dev.dir); | |
a25684a9 TI |
561 | } else { |
562 | if (dmab->dev.dir == DMA_FROM_DEVICE) | |
563 | return; | |
564 | flush_kernel_vmap_range(dmab->area, dmab->bytes); | |
565 | dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data, | |
566 | dmab->dev.dir); | |
567 | } | |
568 | } | |
569 | ||
ad4f93ca TI |
570 | static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab, |
571 | struct sg_page_iter *piter, | |
572 | size_t offset) | |
573 | { | |
574 | struct sg_table *sgt = dmab->private_data; | |
575 | ||
576 | __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents, | |
577 | offset >> PAGE_SHIFT); | |
578 | } | |
579 | ||
580 | static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab, | |
581 | size_t offset) | |
582 | { | |
583 | struct sg_dma_page_iter iter; | |
584 | ||
585 | snd_dma_noncontig_iter_set(dmab, &iter.base, offset); | |
586 | __sg_page_iter_dma_next(&iter); | |
587 | return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE; | |
588 | } | |
589 | ||
590 | static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab, | |
591 | size_t offset) | |
592 | { | |
593 | struct sg_page_iter iter; | |
594 | ||
595 | snd_dma_noncontig_iter_set(dmab, &iter, offset); | |
596 | __sg_page_iter_next(&iter); | |
597 | return sg_page_iter_page(&iter); | |
598 | } | |
599 | ||
600 | static unsigned int | |
601 | snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab, | |
602 | unsigned int ofs, unsigned int size) | |
603 | { | |
604 | struct sg_dma_page_iter iter; | |
605 | unsigned int start, end; | |
606 | unsigned long addr; | |
607 | ||
608 | start = ALIGN_DOWN(ofs, PAGE_SIZE); | |
609 | end = ofs + size - 1; /* the last byte address */ | |
610 | snd_dma_noncontig_iter_set(dmab, &iter.base, start); | |
611 | if (!__sg_page_iter_dma_next(&iter)) | |
612 | return 0; | |
613 | /* check page continuity */ | |
614 | addr = sg_page_iter_dma_address(&iter); | |
615 | for (;;) { | |
616 | start += PAGE_SIZE; | |
617 | if (start > end) | |
618 | break; | |
619 | addr += PAGE_SIZE; | |
620 | if (!__sg_page_iter_dma_next(&iter) || | |
621 | sg_page_iter_dma_address(&iter) != addr) | |
622 | return start - ofs; | |
623 | } | |
624 | /* ok, all on continuous pages */ | |
625 | return size; | |
626 | } | |
627 | ||
a25684a9 TI |
628 | static const struct snd_malloc_ops snd_dma_noncontig_ops = { |
629 | .alloc = snd_dma_noncontig_alloc, | |
630 | .free = snd_dma_noncontig_free, | |
631 | .mmap = snd_dma_noncontig_mmap, | |
632 | .sync = snd_dma_noncontig_sync, | |
ad4f93ca TI |
633 | .get_addr = snd_dma_noncontig_get_addr, |
634 | .get_page = snd_dma_noncontig_get_page, | |
635 | .get_chunk_size = snd_dma_noncontig_get_chunk_size, | |
a25684a9 TI |
636 | }; |
637 | ||
2c95b92e TI |
638 | /* x86-specific SG-buffer with WC pages */ |
639 | #ifdef CONFIG_SND_DMA_SGBUF | |
640 | #define sg_wc_address(it) ((unsigned long)page_address(sg_page_iter_page(it))) | |
641 | ||
642 | static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size) | |
643 | { | |
644 | void *p = snd_dma_noncontig_alloc(dmab, size); | |
645 | struct sg_table *sgt = dmab->private_data; | |
646 | struct sg_page_iter iter; | |
647 | ||
648 | if (!p) | |
649 | return NULL; | |
925ca893 TI |
650 | if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG) |
651 | return p; | |
2c95b92e TI |
652 | for_each_sgtable_page(sgt, &iter, 0) |
653 | set_memory_wc(sg_wc_address(&iter), 1); | |
654 | return p; | |
655 | } | |
656 | ||
657 | static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab) | |
658 | { | |
659 | struct sg_table *sgt = dmab->private_data; | |
660 | struct sg_page_iter iter; | |
661 | ||
662 | for_each_sgtable_page(sgt, &iter, 0) | |
663 | set_memory_wb(sg_wc_address(&iter), 1); | |
664 | snd_dma_noncontig_free(dmab); | |
665 | } | |
666 | ||
667 | static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab, | |
668 | struct vm_area_struct *area) | |
669 | { | |
670 | area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); | |
671 | return dma_mmap_noncontiguous(dmab->dev.dev, area, | |
672 | dmab->bytes, dmab->private_data); | |
673 | } | |
674 | ||
675 | static const struct snd_malloc_ops snd_dma_sg_wc_ops = { | |
676 | .alloc = snd_dma_sg_wc_alloc, | |
677 | .free = snd_dma_sg_wc_free, | |
678 | .mmap = snd_dma_sg_wc_mmap, | |
679 | .sync = snd_dma_noncontig_sync, | |
680 | .get_addr = snd_dma_noncontig_get_addr, | |
681 | .get_page = snd_dma_noncontig_get_page, | |
682 | .get_chunk_size = snd_dma_noncontig_get_chunk_size, | |
683 | }; | |
925ca893 TI |
684 | |
685 | /* Fallback SG-buffer allocations for x86 */ | |
686 | struct snd_dma_sg_fallback { | |
687 | size_t count; | |
688 | struct page **pages; | |
689 | dma_addr_t *addrs; | |
690 | }; | |
691 | ||
692 | static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab, | |
693 | struct snd_dma_sg_fallback *sgbuf) | |
694 | { | |
695 | size_t i; | |
696 | ||
697 | if (sgbuf->count && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK) | |
698 | set_pages_array_wb(sgbuf->pages, sgbuf->count); | |
699 | for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++) | |
700 | dma_free_coherent(dmab->dev.dev, PAGE_SIZE, | |
701 | page_address(sgbuf->pages[i]), | |
702 | sgbuf->addrs[i]); | |
703 | kvfree(sgbuf->pages); | |
704 | kvfree(sgbuf->addrs); | |
705 | kfree(sgbuf); | |
706 | } | |
707 | ||
708 | static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size) | |
709 | { | |
710 | struct snd_dma_sg_fallback *sgbuf; | |
711 | struct page **pages; | |
712 | size_t i, count; | |
713 | void *p; | |
714 | ||
715 | sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); | |
716 | if (!sgbuf) | |
717 | return NULL; | |
718 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
719 | pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL); | |
720 | if (!pages) | |
721 | goto error; | |
722 | sgbuf->pages = pages; | |
723 | sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL); | |
724 | if (!sgbuf->addrs) | |
725 | goto error; | |
726 | ||
727 | for (i = 0; i < count; sgbuf->count++, i++) { | |
728 | p = dma_alloc_coherent(dmab->dev.dev, PAGE_SIZE, | |
729 | &sgbuf->addrs[i], DEFAULT_GFP); | |
730 | if (!p) | |
731 | goto error; | |
732 | sgbuf->pages[i] = virt_to_page(p); | |
733 | } | |
734 | ||
735 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK) | |
736 | set_pages_array_wc(pages, count); | |
737 | p = vmap(pages, count, VM_MAP, PAGE_KERNEL); | |
738 | if (!p) | |
739 | goto error; | |
740 | dmab->private_data = sgbuf; | |
741 | return p; | |
742 | ||
743 | error: | |
744 | __snd_dma_sg_fallback_free(dmab, sgbuf); | |
745 | return NULL; | |
746 | } | |
747 | ||
748 | static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab) | |
749 | { | |
750 | vunmap(dmab->area); | |
751 | __snd_dma_sg_fallback_free(dmab, dmab->private_data); | |
752 | } | |
753 | ||
754 | static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab, | |
755 | struct vm_area_struct *area) | |
756 | { | |
757 | struct snd_dma_sg_fallback *sgbuf = dmab->private_data; | |
758 | ||
759 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK) | |
760 | area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); | |
761 | return vm_map_pages(area, sgbuf->pages, sgbuf->count); | |
762 | } | |
763 | ||
764 | static const struct snd_malloc_ops snd_dma_sg_fallback_ops = { | |
765 | .alloc = snd_dma_sg_fallback_alloc, | |
766 | .free = snd_dma_sg_fallback_free, | |
767 | .mmap = snd_dma_sg_fallback_mmap, | |
768 | /* reuse vmalloc helpers */ | |
769 | .get_addr = snd_dma_vmalloc_get_addr, | |
770 | .get_page = snd_dma_vmalloc_get_page, | |
771 | .get_chunk_size = snd_dma_vmalloc_get_chunk_size, | |
772 | }; | |
2c95b92e TI |
773 | #endif /* CONFIG_SND_DMA_SGBUF */ |
774 | ||
73325f60 TI |
775 | /* |
776 | * Non-coherent pages allocator | |
777 | */ | |
778 | static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size) | |
779 | { | |
8e1741c6 TI |
780 | void *p; |
781 | ||
782 | p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr, | |
783 | dmab->dev.dir, DEFAULT_GFP); | |
784 | if (p) | |
785 | dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr); | |
786 | return p; | |
73325f60 TI |
787 | } |
788 | ||
789 | static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab) | |
790 | { | |
791 | dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area, | |
792 | dmab->addr, dmab->dev.dir); | |
793 | } | |
794 | ||
795 | static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab, | |
796 | struct vm_area_struct *area) | |
797 | { | |
798 | area->vm_page_prot = vm_get_page_prot(area->vm_flags); | |
799 | return dma_mmap_pages(dmab->dev.dev, area, | |
800 | area->vm_end - area->vm_start, | |
801 | virt_to_page(dmab->area)); | |
802 | } | |
803 | ||
804 | static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab, | |
805 | enum snd_dma_sync_mode mode) | |
806 | { | |
807 | if (mode == SNDRV_DMA_SYNC_CPU) { | |
808 | if (dmab->dev.dir != DMA_TO_DEVICE) | |
809 | dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr, | |
810 | dmab->bytes, dmab->dev.dir); | |
811 | } else { | |
812 | if (dmab->dev.dir != DMA_FROM_DEVICE) | |
813 | dma_sync_single_for_device(dmab->dev.dev, dmab->addr, | |
814 | dmab->bytes, dmab->dev.dir); | |
815 | } | |
816 | } | |
817 | ||
818 | static const struct snd_malloc_ops snd_dma_noncoherent_ops = { | |
819 | .alloc = snd_dma_noncoherent_alloc, | |
820 | .free = snd_dma_noncoherent_free, | |
821 | .mmap = snd_dma_noncoherent_mmap, | |
822 | .sync = snd_dma_noncoherent_sync, | |
823 | }; | |
824 | ||
37af81c5 TI |
825 | #endif /* CONFIG_HAS_DMA */ |
826 | ||
827 | /* | |
828 | * Entry points | |
829 | */ | |
830 | static const struct snd_malloc_ops *dma_ops[] = { | |
831 | [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops, | |
832 | [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops, | |
833 | #ifdef CONFIG_HAS_DMA | |
834 | [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops, | |
d5c50558 | 835 | [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops, |
a25684a9 | 836 | [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops, |
73325f60 | 837 | [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops, |
2c95b92e TI |
838 | #ifdef CONFIG_SND_DMA_SGBUF |
839 | [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops, | |
840 | #endif | |
37af81c5 TI |
841 | #ifdef CONFIG_GENERIC_ALLOCATOR |
842 | [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops, | |
843 | #endif /* CONFIG_GENERIC_ALLOCATOR */ | |
925ca893 TI |
844 | #ifdef CONFIG_SND_DMA_SGBUF |
845 | [SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops, | |
846 | [SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops, | |
847 | #endif | |
37af81c5 | 848 | #endif /* CONFIG_HAS_DMA */ |
37af81c5 TI |
849 | }; |
850 | ||
851 | static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab) | |
852 | { | |
dce94461 TI |
853 | if (WARN_ON_ONCE(!dmab)) |
854 | return NULL; | |
37af81c5 TI |
855 | if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN || |
856 | dmab->dev.type >= ARRAY_SIZE(dma_ops))) | |
857 | return NULL; | |
858 | return dma_ops[dmab->dev.type]; | |
1da177e4 | 859 | } |