Commit | Line | Data |
---|---|---|
989d42e8 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
c64be2bb MS |
2 | /* |
3 | * Contiguous Memory Allocator for DMA mapping framework | |
4 | * Copyright (c) 2010-2011 by Samsung Electronics. | |
5 | * Written by: | |
6 | * Marek Szyprowski <m.szyprowski@samsung.com> | |
7 | * Michal Nazarewicz <mina86@mina86.com> | |
c64be2bb MS |
8 | */ |
9 | ||
10 | #define pr_fmt(fmt) "cma: " fmt | |
11 | ||
12 | #ifdef CONFIG_CMA_DEBUG | |
13 | #ifndef DEBUG | |
14 | # define DEBUG | |
15 | #endif | |
16 | #endif | |
17 | ||
18 | #include <asm/page.h> | |
19 | #include <asm/dma-contiguous.h> | |
20 | ||
21 | #include <linux/memblock.h> | |
22 | #include <linux/err.h> | |
446c82fc | 23 | #include <linux/sizes.h> |
c64be2bb | 24 | #include <linux/dma-contiguous.h> |
a254129e | 25 | #include <linux/cma.h> |
c64be2bb MS |
26 | |
27 | #ifdef CONFIG_CMA_SIZE_MBYTES | |
28 | #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES | |
29 | #else | |
30 | #define CMA_SIZE_MBYTES 0 | |
31 | #endif | |
32 | ||
a254129e JK |
33 | struct cma *dma_contiguous_default_area; |
34 | ||
c64be2bb MS |
35 | /* |
36 | * Default global CMA area size can be defined in kernel's .config. | |
73678804 | 37 | * This is useful mainly for distro maintainers to create a kernel |
c64be2bb MS |
38 | * that works correctly for most supported systems. |
39 | * The size can be set in bytes or as a percentage of the total memory | |
40 | * in the system. | |
41 | * | |
42 | * Users, who want to set the size of global CMA area for their system | |
43 | * should use cma= kernel parameter. | |
44 | */ | |
ca665368 SS |
45 | static const phys_addr_t size_bytes __initconst = |
46 | (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M; | |
47 | static phys_addr_t size_cmdline __initdata = -1; | |
48 | static phys_addr_t base_cmdline __initdata; | |
49 | static phys_addr_t limit_cmdline __initdata; | |
c64be2bb MS |
50 | |
51 | static int __init early_cma(char *p) | |
52 | { | |
a3ceed87 HZ |
53 | if (!p) { |
54 | pr_err("Config string not provided\n"); | |
55 | return -EINVAL; | |
56 | } | |
57 | ||
c64be2bb | 58 | size_cmdline = memparse(p, &p); |
5ea3b1b2 AM |
59 | if (*p != '@') |
60 | return 0; | |
61 | base_cmdline = memparse(p + 1, &p); | |
62 | if (*p != '-') { | |
63 | limit_cmdline = base_cmdline + size_cmdline; | |
64 | return 0; | |
65 | } | |
66 | limit_cmdline = memparse(p + 1, &p); | |
67 | ||
c64be2bb MS |
68 | return 0; |
69 | } | |
70 | early_param("cma", early_cma); | |
71 | ||
72 | #ifdef CONFIG_CMA_SIZE_PERCENTAGE | |
73 | ||
4009793e | 74 | static phys_addr_t __init __maybe_unused cma_early_percent_memory(void) |
c64be2bb MS |
75 | { |
76 | struct memblock_region *reg; | |
77 | unsigned long total_pages = 0; | |
78 | ||
79 | /* | |
80 | * We cannot use memblock_phys_mem_size() here, because | |
81 | * memblock_analyze() has not been called yet. | |
82 | */ | |
83 | for_each_memblock(memory, reg) | |
84 | total_pages += memblock_region_memory_end_pfn(reg) - | |
85 | memblock_region_memory_base_pfn(reg); | |
86 | ||
87 | return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT; | |
88 | } | |
89 | ||
90 | #else | |
91 | ||
4009793e | 92 | static inline __maybe_unused phys_addr_t cma_early_percent_memory(void) |
c64be2bb MS |
93 | { |
94 | return 0; | |
95 | } | |
96 | ||
97 | #endif | |
98 | ||
99 | /** | |
a2547380 | 100 | * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling |
c64be2bb MS |
101 | * @limit: End address of the reserved memory (optional, 0 for any). |
102 | * | |
103 | * This function reserves memory from early allocator. It should be | |
104 | * called by arch specific code once the early allocator (memblock or bootmem) | |
105 | * has been activated and all other subsystems have already allocated/reserved | |
106 | * memory. | |
107 | */ | |
108 | void __init dma_contiguous_reserve(phys_addr_t limit) | |
109 | { | |
4009793e | 110 | phys_addr_t selected_size = 0; |
5ea3b1b2 AM |
111 | phys_addr_t selected_base = 0; |
112 | phys_addr_t selected_limit = limit; | |
113 | bool fixed = false; | |
c64be2bb MS |
114 | |
115 | pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); | |
116 | ||
117 | if (size_cmdline != -1) { | |
118 | selected_size = size_cmdline; | |
5ea3b1b2 AM |
119 | selected_base = base_cmdline; |
120 | selected_limit = min_not_zero(limit_cmdline, limit); | |
121 | if (base_cmdline + size_cmdline == limit_cmdline) | |
122 | fixed = true; | |
c64be2bb MS |
123 | } else { |
124 | #ifdef CONFIG_CMA_SIZE_SEL_MBYTES | |
125 | selected_size = size_bytes; | |
126 | #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE) | |
127 | selected_size = cma_early_percent_memory(); | |
128 | #elif defined(CONFIG_CMA_SIZE_SEL_MIN) | |
129 | selected_size = min(size_bytes, cma_early_percent_memory()); | |
130 | #elif defined(CONFIG_CMA_SIZE_SEL_MAX) | |
131 | selected_size = max(size_bytes, cma_early_percent_memory()); | |
132 | #endif | |
133 | } | |
134 | ||
a2547380 | 135 | if (selected_size && !dma_contiguous_default_area) { |
c64be2bb | 136 | pr_debug("%s: reserving %ld MiB for global area\n", __func__, |
4009793e | 137 | (unsigned long)selected_size / SZ_1M); |
c64be2bb | 138 | |
5ea3b1b2 AM |
139 | dma_contiguous_reserve_area(selected_size, selected_base, |
140 | selected_limit, | |
141 | &dma_contiguous_default_area, | |
142 | fixed); | |
c64be2bb | 143 | } |
5ea3b1b2 | 144 | } |
c64be2bb | 145 | |
3162bbd7 JK |
146 | /** |
147 | * dma_contiguous_reserve_area() - reserve custom contiguous area | |
148 | * @size: Size of the reserved area (in bytes), | |
149 | * @base: Base address of the reserved area optional, use 0 for any | |
150 | * @limit: End address of the reserved memory (optional, 0 for any). | |
151 | * @res_cma: Pointer to store the created cma region. | |
152 | * @fixed: hint about where to place the reserved area | |
153 | * | |
154 | * This function reserves memory from early allocator. It should be | |
155 | * called by arch specific code once the early allocator (memblock or bootmem) | |
156 | * has been activated and all other subsystems have already allocated/reserved | |
157 | * memory. This function allows to create custom reserved areas for specific | |
158 | * devices. | |
159 | * | |
160 | * If @fixed is true, reserve contiguous area at exactly @base. If false, | |
161 | * reserve in range from @base to @limit. | |
162 | */ | |
163 | int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, | |
164 | phys_addr_t limit, struct cma **res_cma, | |
165 | bool fixed) | |
166 | { | |
167 | int ret; | |
168 | ||
f318dd08 LA |
169 | ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, |
170 | "reserved", res_cma); | |
3162bbd7 JK |
171 | if (ret) |
172 | return ret; | |
173 | ||
174 | /* Architecture specific contiguous memory fixup. */ | |
a254129e JK |
175 | dma_contiguous_early_fixup(cma_get_base(*res_cma), |
176 | cma_get_size(*res_cma)); | |
3162bbd7 JK |
177 | |
178 | return 0; | |
179 | } | |
180 | ||
c64be2bb | 181 | /** |
3162bbd7 JK |
182 | * dma_alloc_from_contiguous() - allocate pages from contiguous area |
183 | * @dev: Pointer to device for which the allocation is performed. | |
184 | * @count: Requested number of pages. | |
185 | * @align: Requested alignment of pages (in PAGE_SIZE order). | |
d834c5ab | 186 | * @no_warn: Avoid printing message about failed allocation. |
c64be2bb | 187 | * |
3162bbd7 JK |
188 | * This function allocates memory buffer for specified device. It uses |
189 | * device specific contiguous memory area if available or the default | |
190 | * global one. Requires architecture specific dev_get_cma_area() helper | |
191 | * function. | |
c64be2bb | 192 | */ |
67a2e213 | 193 | struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, |
d834c5ab | 194 | unsigned int align, bool no_warn) |
c64be2bb | 195 | { |
3162bbd7 JK |
196 | if (align > CONFIG_CMA_ALIGNMENT) |
197 | align = CONFIG_CMA_ALIGNMENT; | |
198 | ||
d834c5ab | 199 | return cma_alloc(dev_get_cma_area(dev), count, align, no_warn); |
c64be2bb | 200 | } |
3162bbd7 JK |
201 | |
202 | /** | |
203 | * dma_release_from_contiguous() - release allocated pages | |
204 | * @dev: Pointer to device for which the pages were allocated. | |
205 | * @pages: Allocated pages. | |
206 | * @count: Number of allocated pages. | |
207 | * | |
208 | * This function releases memory allocated by dma_alloc_from_contiguous(). | |
209 | * It returns false when provided pages do not belong to contiguous area and | |
210 | * true otherwise. | |
211 | */ | |
212 | bool dma_release_from_contiguous(struct device *dev, struct page *pages, | |
213 | int count) | |
214 | { | |
a254129e | 215 | return cma_release(dev_get_cma_area(dev), pages, count); |
3162bbd7 | 216 | } |
de9e14ee | 217 | |
b1d2dc00 NC |
218 | /** |
219 | * dma_alloc_contiguous() - allocate contiguous pages | |
220 | * @dev: Pointer to device for which the allocation is performed. | |
221 | * @size: Requested allocation size. | |
222 | * @gfp: Allocation flags. | |
223 | * | |
224 | * This function allocates contiguous memory buffer for specified device. It | |
225 | * first tries to use device specific contiguous memory area if available or | |
226 | * the default global one, then tries a fallback allocation of normal pages. | |
bd2e7563 NC |
227 | * |
228 | * Note that it byapss one-page size of allocations from the global area as | |
229 | * the addresses within one page are always contiguous, so there is no need | |
230 | * to waste CMA pages for that kind; it also helps reduce fragmentations. | |
b1d2dc00 NC |
231 | */ |
232 | struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) | |
233 | { | |
90ae409f | 234 | size_t count = size >> PAGE_SHIFT; |
b1d2dc00 | 235 | struct page *page = NULL; |
bd2e7563 NC |
236 | struct cma *cma = NULL; |
237 | ||
238 | if (dev && dev->cma_area) | |
239 | cma = dev->cma_area; | |
240 | else if (count > 1) | |
241 | cma = dma_contiguous_default_area; | |
b1d2dc00 NC |
242 | |
243 | /* CMA can be used only in the context which permits sleeping */ | |
244 | if (cma && gfpflags_allow_blocking(gfp)) { | |
90ae409f | 245 | size_t align = get_order(size); |
c6622a42 NC |
246 | size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT); |
247 | ||
248 | page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN); | |
b1d2dc00 NC |
249 | } |
250 | ||
b1d2dc00 NC |
251 | return page; |
252 | } | |
253 | ||
254 | /** | |
255 | * dma_free_contiguous() - release allocated pages | |
256 | * @dev: Pointer to device for which the pages were allocated. | |
257 | * @page: Pointer to the allocated pages. | |
258 | * @size: Size of allocated pages. | |
259 | * | |
260 | * This function releases memory allocated by dma_alloc_contiguous(). As the | |
261 | * cma_release returns false when provided pages do not belong to contiguous | |
262 | * area and true otherwise, this function then does a fallback __free_pages() | |
263 | * upon a false-return. | |
264 | */ | |
265 | void dma_free_contiguous(struct device *dev, struct page *page, size_t size) | |
266 | { | |
f46cc015 NC |
267 | if (!cma_release(dev_get_cma_area(dev), page, |
268 | PAGE_ALIGN(size) >> PAGE_SHIFT)) | |
b1d2dc00 NC |
269 | __free_pages(page, get_order(size)); |
270 | } | |
271 | ||
de9e14ee MS |
272 | /* |
273 | * Support for reserved memory regions defined in device tree | |
274 | */ | |
275 | #ifdef CONFIG_OF_RESERVED_MEM | |
276 | #include <linux/of.h> | |
277 | #include <linux/of_fdt.h> | |
278 | #include <linux/of_reserved_mem.h> | |
279 | ||
280 | #undef pr_fmt | |
281 | #define pr_fmt(fmt) fmt | |
282 | ||
47f29df7 | 283 | static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev) |
de9e14ee MS |
284 | { |
285 | dev_set_cma_area(dev, rmem->priv); | |
47f29df7 | 286 | return 0; |
de9e14ee MS |
287 | } |
288 | ||
289 | static void rmem_cma_device_release(struct reserved_mem *rmem, | |
290 | struct device *dev) | |
291 | { | |
292 | dev_set_cma_area(dev, NULL); | |
293 | } | |
294 | ||
295 | static const struct reserved_mem_ops rmem_cma_ops = { | |
296 | .device_init = rmem_cma_device_init, | |
297 | .device_release = rmem_cma_device_release, | |
298 | }; | |
299 | ||
300 | static int __init rmem_cma_setup(struct reserved_mem *rmem) | |
301 | { | |
302 | phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); | |
303 | phys_addr_t mask = align - 1; | |
304 | unsigned long node = rmem->fdt_node; | |
305 | struct cma *cma; | |
306 | int err; | |
307 | ||
308 | if (!of_get_flat_dt_prop(node, "reusable", NULL) || | |
309 | of_get_flat_dt_prop(node, "no-map", NULL)) | |
310 | return -EINVAL; | |
311 | ||
312 | if ((rmem->base & mask) || (rmem->size & mask)) { | |
313 | pr_err("Reserved memory: incorrect alignment of CMA region\n"); | |
314 | return -EINVAL; | |
315 | } | |
316 | ||
f318dd08 | 317 | err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma); |
de9e14ee MS |
318 | if (err) { |
319 | pr_err("Reserved memory: unable to setup CMA region\n"); | |
320 | return err; | |
321 | } | |
322 | /* Architecture specific contiguous memory fixup. */ | |
323 | dma_contiguous_early_fixup(rmem->base, rmem->size); | |
324 | ||
325 | if (of_get_flat_dt_prop(node, "linux,cma-default", NULL)) | |
326 | dma_contiguous_set_default(cma); | |
327 | ||
328 | rmem->ops = &rmem_cma_ops; | |
329 | rmem->priv = cma; | |
330 | ||
331 | pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n", | |
332 | &rmem->base, (unsigned long)rmem->size / SZ_1M); | |
333 | ||
334 | return 0; | |
335 | } | |
336 | RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup); | |
337 | #endif |