Commit | Line | Data |
---|---|---|
1a59d1b8 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 | 2 | /* |
c1017a4c | 3 | * Copyright (c) by Jaroslav Kysela <perex@perex.cz> |
1da177e4 LT |
4 | * Takashi Iwai <tiwai@suse.de> |
5 | * | |
6 | * Generic memory allocators | |
1da177e4 LT |
7 | */ |
8 | ||
1da177e4 LT |
9 | #include <linux/slab.h> |
10 | #include <linux/mm.h> | |
11 | #include <linux/dma-mapping.h> | |
05503214 | 12 | #include <linux/genalloc.h> |
1fe7f397 | 13 | #include <linux/vmalloc.h> |
42e748a0 TI |
14 | #ifdef CONFIG_X86 |
15 | #include <asm/set_memory.h> | |
16 | #endif | |
1da177e4 | 17 | #include <sound/memalloc.h> |
37af81c5 | 18 | #include "memalloc_local.h" |
1da177e4 | 19 | |
37af81c5 | 20 | static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab); |
1da177e4 | 21 | |
37af81c5 TI |
22 | /* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */ |
23 | static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab, | |
24 | gfp_t default_gfp) | |
1da177e4 | 25 | { |
37af81c5 TI |
26 | if (!dmab->dev.dev) |
27 | return default_gfp; | |
28 | else | |
29 | return (__force gfp_t)(unsigned long)dmab->dev.dev; | |
1da177e4 | 30 | } |
05503214 | 31 | |
37af81c5 | 32 | static int __snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size) |
05503214 | 33 | { |
37af81c5 | 34 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); |
05503214 | 35 | |
37af81c5 TI |
36 | if (WARN_ON_ONCE(!ops || !ops->alloc)) |
37 | return -EINVAL; | |
38 | return ops->alloc(dmab, size); | |
08422d2c | 39 | } |
1da177e4 LT |
40 | |
41 | /** | |
42 | * snd_dma_alloc_pages - allocate the buffer area according to the given type | |
43 | * @type: the DMA buffer type | |
44 | * @device: the device pointer | |
45 | * @size: the buffer size to allocate | |
46 | * @dmab: buffer allocation record to store the allocated data | |
47 | * | |
48 | * Calls the memory-allocator function for the corresponding | |
49 | * buffer type. | |
eb7c06e8 YB |
50 | * |
51 | * Return: Zero if the buffer with the given size is allocated successfully, | |
52 | * otherwise a negative value on error. | |
1da177e4 LT |
53 | */ |
54 | int snd_dma_alloc_pages(int type, struct device *device, size_t size, | |
55 | struct snd_dma_buffer *dmab) | |
56 | { | |
37af81c5 | 57 | int err; |
1fe7f397 | 58 | |
7eaa943c TI |
59 | if (WARN_ON(!size)) |
60 | return -ENXIO; | |
61 | if (WARN_ON(!dmab)) | |
62 | return -ENXIO; | |
1da177e4 | 63 | |
5c1733e3 | 64 | size = PAGE_ALIGN(size); |
1da177e4 LT |
65 | dmab->dev.type = type; |
66 | dmab->dev.dev = device; | |
67 | dmab->bytes = 0; | |
28e60dbb TI |
68 | dmab->area = NULL; |
69 | dmab->addr = 0; | |
70 | dmab->private_data = NULL; | |
37af81c5 TI |
71 | err = __snd_dma_alloc_pages(dmab, size); |
72 | if (err < 0) | |
73 | return err; | |
74 | if (!dmab->area) | |
1da177e4 LT |
75 | return -ENOMEM; |
76 | dmab->bytes = size; | |
77 | return 0; | |
78 | } | |
35f80014 | 79 | EXPORT_SYMBOL(snd_dma_alloc_pages); |
1da177e4 LT |
80 | |
81 | /** | |
82 | * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback | |
83 | * @type: the DMA buffer type | |
84 | * @device: the device pointer | |
85 | * @size: the buffer size to allocate | |
86 | * @dmab: buffer allocation record to store the allocated data | |
87 | * | |
88 | * Calls the memory-allocator function for the corresponding | |
89 | * buffer type. When no space is left, this function reduces the size and | |
90 | * tries to allocate again. The size actually allocated is stored in | |
91 | * res_size argument. | |
eb7c06e8 YB |
92 | * |
93 | * Return: Zero if the buffer with the given size is allocated successfully, | |
94 | * otherwise a negative value on error. | |
1da177e4 LT |
95 | */ |
96 | int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, | |
97 | struct snd_dma_buffer *dmab) | |
98 | { | |
99 | int err; | |
100 | ||
1da177e4 LT |
101 | while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { |
102 | if (err != -ENOMEM) | |
103 | return err; | |
1da177e4 LT |
104 | if (size <= PAGE_SIZE) |
105 | return -ENOMEM; | |
dfef01e1 TI |
106 | size >>= 1; |
107 | size = PAGE_SIZE << get_order(size); | |
1da177e4 LT |
108 | } |
109 | if (! dmab->area) | |
110 | return -ENOMEM; | |
111 | return 0; | |
112 | } | |
35f80014 | 113 | EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); |
1da177e4 | 114 | |
1da177e4 LT |
115 | /** |
116 | * snd_dma_free_pages - release the allocated buffer | |
117 | * @dmab: the buffer allocation record to release | |
118 | * | |
119 | * Releases the allocated buffer via snd_dma_alloc_pages(). | |
120 | */ | |
121 | void snd_dma_free_pages(struct snd_dma_buffer *dmab) | |
122 | { | |
37af81c5 TI |
123 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); |
124 | ||
125 | if (ops && ops->free) | |
126 | ops->free(dmab); | |
127 | } | |
128 | EXPORT_SYMBOL(snd_dma_free_pages); | |
129 | ||
a202bd1a TI |
130 | /** |
131 | * snd_dma_buffer_mmap - perform mmap of the given DMA buffer | |
132 | * @dmab: buffer allocation information | |
133 | * @area: VM area information | |
134 | */ | |
135 | int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab, | |
136 | struct vm_area_struct *area) | |
137 | { | |
138 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); | |
139 | ||
140 | if (ops && ops->mmap) | |
141 | return ops->mmap(dmab, area); | |
142 | else | |
143 | return -ENOENT; | |
144 | } | |
145 | EXPORT_SYMBOL(snd_dma_buffer_mmap); | |
146 | ||
37af81c5 TI |
147 | /** |
148 | * snd_sgbuf_get_addr - return the physical address at the corresponding offset | |
149 | * @dmab: buffer allocation information | |
150 | * @offset: offset in the ring buffer | |
151 | */ | |
152 | dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset) | |
153 | { | |
154 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); | |
155 | ||
156 | if (ops && ops->get_addr) | |
157 | return ops->get_addr(dmab, offset); | |
158 | else | |
159 | return dmab->addr + offset; | |
160 | } | |
161 | EXPORT_SYMBOL(snd_sgbuf_get_addr); | |
162 | ||
163 | /** | |
164 | * snd_sgbuf_get_page - return the physical page at the corresponding offset | |
165 | * @dmab: buffer allocation information | |
166 | * @offset: offset in the ring buffer | |
167 | */ | |
168 | struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset) | |
169 | { | |
170 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); | |
171 | ||
172 | if (ops && ops->get_page) | |
173 | return ops->get_page(dmab, offset); | |
174 | else | |
175 | return virt_to_page(dmab->area + offset); | |
176 | } | |
177 | EXPORT_SYMBOL(snd_sgbuf_get_page); | |
178 | ||
179 | /** | |
180 | * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages | |
181 | * on sg-buffer | |
182 | * @dmab: buffer allocation information | |
183 | * @ofs: offset in the ring buffer | |
184 | * @size: the requested size | |
185 | */ | |
186 | unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, | |
187 | unsigned int ofs, unsigned int size) | |
188 | { | |
189 | const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); | |
190 | ||
191 | if (ops && ops->get_chunk_size) | |
192 | return ops->get_chunk_size(dmab, ofs, size); | |
193 | else | |
194 | return size; | |
195 | } | |
196 | EXPORT_SYMBOL(snd_sgbuf_get_chunk_size); | |
197 | ||
198 | /* | |
199 | * Continuous pages allocator | |
200 | */ | |
201 | static int snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size) | |
202 | { | |
203 | gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL); | |
204 | ||
205 | dmab->area = alloc_pages_exact(size, gfp); | |
206 | return 0; | |
207 | } | |
208 | ||
209 | static void snd_dma_continuous_free(struct snd_dma_buffer *dmab) | |
210 | { | |
211 | free_pages_exact(dmab->area, dmab->bytes); | |
212 | } | |
213 | ||
30b7ba69 TI |
214 | static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab, |
215 | struct vm_area_struct *area) | |
216 | { | |
217 | return remap_pfn_range(area, area->vm_start, | |
218 | dmab->addr >> PAGE_SHIFT, | |
219 | area->vm_end - area->vm_start, | |
220 | area->vm_page_prot); | |
221 | } | |
222 | ||
37af81c5 TI |
223 | static const struct snd_malloc_ops snd_dma_continuous_ops = { |
224 | .alloc = snd_dma_continuous_alloc, | |
225 | .free = snd_dma_continuous_free, | |
30b7ba69 | 226 | .mmap = snd_dma_continuous_mmap, |
37af81c5 TI |
227 | }; |
228 | ||
229 | /* | |
230 | * VMALLOC allocator | |
231 | */ | |
232 | static int snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size) | |
233 | { | |
234 | gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM); | |
235 | ||
236 | dmab->area = __vmalloc(size, gfp); | |
237 | return 0; | |
238 | } | |
239 | ||
240 | static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab) | |
241 | { | |
242 | vfree(dmab->area); | |
243 | } | |
244 | ||
30b7ba69 TI |
245 | static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab, |
246 | struct vm_area_struct *area) | |
247 | { | |
248 | return remap_vmalloc_range(area, dmab->area, 0); | |
249 | } | |
250 | ||
37af81c5 TI |
251 | static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab, |
252 | size_t offset) | |
253 | { | |
254 | return page_to_phys(vmalloc_to_page(dmab->area + offset)) + | |
255 | offset % PAGE_SIZE; | |
256 | } | |
257 | ||
258 | static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab, | |
259 | size_t offset) | |
260 | { | |
261 | return vmalloc_to_page(dmab->area + offset); | |
262 | } | |
263 | ||
264 | static unsigned int | |
265 | snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab, | |
266 | unsigned int ofs, unsigned int size) | |
267 | { | |
268 | ofs %= PAGE_SIZE; | |
269 | size += ofs; | |
270 | if (size > PAGE_SIZE) | |
271 | size = PAGE_SIZE; | |
272 | return size - ofs; | |
273 | } | |
274 | ||
275 | static const struct snd_malloc_ops snd_dma_vmalloc_ops = { | |
276 | .alloc = snd_dma_vmalloc_alloc, | |
277 | .free = snd_dma_vmalloc_free, | |
30b7ba69 | 278 | .mmap = snd_dma_vmalloc_mmap, |
37af81c5 TI |
279 | .get_addr = snd_dma_vmalloc_get_addr, |
280 | .get_page = snd_dma_vmalloc_get_page, | |
281 | .get_chunk_size = snd_dma_vmalloc_get_chunk_size, | |
282 | }; | |
283 | ||
8f11551b | 284 | #ifdef CONFIG_HAS_DMA |
37af81c5 TI |
285 | /* |
286 | * IRAM allocator | |
287 | */ | |
a5606f85 | 288 | #ifdef CONFIG_GENERIC_ALLOCATOR |
37af81c5 TI |
289 | static int snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size) |
290 | { | |
291 | struct device *dev = dmab->dev.dev; | |
292 | struct gen_pool *pool; | |
293 | ||
294 | if (dev->of_node) { | |
295 | pool = of_gen_pool_get(dev->of_node, "iram", 0); | |
296 | /* Assign the pool into private_data field */ | |
297 | dmab->private_data = pool; | |
298 | ||
299 | dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr, | |
300 | PAGE_SIZE); | |
301 | if (dmab->area) | |
302 | return 0; | |
303 | } | |
304 | ||
305 | /* Internal memory might have limited size and no enough space, | |
306 | * so if we fail to malloc, try to fetch memory traditionally. | |
307 | */ | |
308 | dmab->dev.type = SNDRV_DMA_TYPE_DEV; | |
309 | return __snd_dma_alloc_pages(dmab, size); | |
310 | } | |
311 | ||
312 | static void snd_dma_iram_free(struct snd_dma_buffer *dmab) | |
313 | { | |
314 | struct gen_pool *pool = dmab->private_data; | |
315 | ||
316 | if (pool && dmab->area) | |
317 | gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); | |
318 | } | |
319 | ||
a202bd1a TI |
320 | static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab, |
321 | struct vm_area_struct *area) | |
322 | { | |
323 | area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); | |
324 | return remap_pfn_range(area, area->vm_start, | |
325 | dmab->addr >> PAGE_SHIFT, | |
326 | area->vm_end - area->vm_start, | |
327 | area->vm_page_prot); | |
328 | } | |
329 | ||
37af81c5 TI |
330 | static const struct snd_malloc_ops snd_dma_iram_ops = { |
331 | .alloc = snd_dma_iram_alloc, | |
332 | .free = snd_dma_iram_free, | |
a202bd1a | 333 | .mmap = snd_dma_iram_mmap, |
37af81c5 | 334 | }; |
a5606f85 | 335 | #endif /* CONFIG_GENERIC_ALLOCATOR */ |
37af81c5 TI |
336 | |
337 | /* | |
338 | * Coherent device pages allocator | |
339 | */ | |
340 | static int snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size) | |
341 | { | |
342 | gfp_t gfp_flags; | |
343 | ||
344 | gfp_flags = GFP_KERNEL | |
345 | | __GFP_COMP /* compound page lets parts be mapped */ | |
346 | | __GFP_NORETRY /* don't trigger OOM-killer */ | |
347 | | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ | |
348 | dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, | |
349 | gfp_flags); | |
350 | #ifdef CONFIG_X86 | |
351 | if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC) | |
352 | set_memory_wc((unsigned long)dmab->area, | |
353 | PAGE_ALIGN(size) >> PAGE_SHIFT); | |
cc6a8acd | 354 | #endif |
37af81c5 TI |
355 | return 0; |
356 | } | |
357 | ||
358 | static void snd_dma_dev_free(struct snd_dma_buffer *dmab) | |
359 | { | |
360 | #ifdef CONFIG_X86 | |
361 | if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC) | |
362 | set_memory_wb((unsigned long)dmab->area, | |
363 | PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT); | |
364 | #endif | |
365 | dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); | |
366 | } | |
367 | ||
a202bd1a TI |
368 | static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab, |
369 | struct vm_area_struct *area) | |
370 | { | |
371 | return dma_mmap_coherent(dmab->dev.dev, area, | |
372 | dmab->area, dmab->addr, dmab->bytes); | |
373 | } | |
374 | ||
37af81c5 TI |
375 | static const struct snd_malloc_ops snd_dma_dev_ops = { |
376 | .alloc = snd_dma_dev_alloc, | |
377 | .free = snd_dma_dev_free, | |
a202bd1a | 378 | .mmap = snd_dma_dev_mmap, |
37af81c5 TI |
379 | }; |
380 | #endif /* CONFIG_HAS_DMA */ | |
381 | ||
382 | /* | |
383 | * Entry points | |
384 | */ | |
385 | static const struct snd_malloc_ops *dma_ops[] = { | |
386 | [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops, | |
387 | [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops, | |
388 | #ifdef CONFIG_HAS_DMA | |
389 | [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops, | |
390 | [SNDRV_DMA_TYPE_DEV_UC] = &snd_dma_dev_ops, | |
391 | #ifdef CONFIG_GENERIC_ALLOCATOR | |
392 | [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops, | |
393 | #endif /* CONFIG_GENERIC_ALLOCATOR */ | |
394 | #endif /* CONFIG_HAS_DMA */ | |
cc6a8acd | 395 | #ifdef CONFIG_SND_DMA_SGBUF |
37af81c5 TI |
396 | [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops, |
397 | [SNDRV_DMA_TYPE_DEV_UC_SG] = &snd_dma_sg_ops, | |
8f11551b | 398 | #endif |
37af81c5 TI |
399 | }; |
400 | ||
401 | static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab) | |
402 | { | |
403 | if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN || | |
404 | dmab->dev.type >= ARRAY_SIZE(dma_ops))) | |
405 | return NULL; | |
406 | return dma_ops[dmab->dev.type]; | |
1da177e4 | 407 | } |