Merge tag 'powerpc-4.17-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux-block.git] / drivers / base / dma-contiguous.c
CommitLineData
989d42e8 1// SPDX-License-Identifier: GPL-2.0+
c64be2bb
MS
2/*
3 * Contiguous Memory Allocator for DMA mapping framework
4 * Copyright (c) 2010-2011 by Samsung Electronics.
5 * Written by:
6 * Marek Szyprowski <m.szyprowski@samsung.com>
7 * Michal Nazarewicz <mina86@mina86.com>
c64be2bb
MS
8 */
9
10#define pr_fmt(fmt) "cma: " fmt
11
12#ifdef CONFIG_CMA_DEBUG
13#ifndef DEBUG
14# define DEBUG
15#endif
16#endif
17
18#include <asm/page.h>
19#include <asm/dma-contiguous.h>
20
21#include <linux/memblock.h>
22#include <linux/err.h>
446c82fc 23#include <linux/sizes.h>
c64be2bb 24#include <linux/dma-contiguous.h>
a254129e 25#include <linux/cma.h>
c64be2bb
MS
26
27#ifdef CONFIG_CMA_SIZE_MBYTES
28#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
29#else
30#define CMA_SIZE_MBYTES 0
31#endif
32
a254129e
JK
33struct cma *dma_contiguous_default_area;
34
c64be2bb
MS
35/*
36 * Default global CMA area size can be defined in kernel's .config.
73678804 37 * This is useful mainly for distro maintainers to create a kernel
c64be2bb
MS
38 * that works correctly for most supported systems.
39 * The size can be set in bytes or as a percentage of the total memory
40 * in the system.
41 *
42 * Users, who want to set the size of global CMA area for their system
43 * should use cma= kernel parameter.
44 */
a785ce9c 45static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
4009793e 46static phys_addr_t size_cmdline = -1;
5ea3b1b2
AM
47static phys_addr_t base_cmdline;
48static phys_addr_t limit_cmdline;
c64be2bb
MS
49
50static int __init early_cma(char *p)
51{
52 pr_debug("%s(%s)\n", __func__, p);
53 size_cmdline = memparse(p, &p);
5ea3b1b2
AM
54 if (*p != '@')
55 return 0;
56 base_cmdline = memparse(p + 1, &p);
57 if (*p != '-') {
58 limit_cmdline = base_cmdline + size_cmdline;
59 return 0;
60 }
61 limit_cmdline = memparse(p + 1, &p);
62
c64be2bb
MS
63 return 0;
64}
65early_param("cma", early_cma);
66
67#ifdef CONFIG_CMA_SIZE_PERCENTAGE
68
4009793e 69static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
c64be2bb
MS
70{
71 struct memblock_region *reg;
72 unsigned long total_pages = 0;
73
74 /*
75 * We cannot use memblock_phys_mem_size() here, because
76 * memblock_analyze() has not been called yet.
77 */
78 for_each_memblock(memory, reg)
79 total_pages += memblock_region_memory_end_pfn(reg) -
80 memblock_region_memory_base_pfn(reg);
81
82 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
83}
84
85#else
86
4009793e 87static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
c64be2bb
MS
88{
89 return 0;
90}
91
92#endif
93
94/**
a2547380 95 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
c64be2bb
MS
96 * @limit: End address of the reserved memory (optional, 0 for any).
97 *
98 * This function reserves memory from early allocator. It should be
99 * called by arch specific code once the early allocator (memblock or bootmem)
100 * has been activated and all other subsystems have already allocated/reserved
101 * memory.
102 */
103void __init dma_contiguous_reserve(phys_addr_t limit)
104{
4009793e 105 phys_addr_t selected_size = 0;
5ea3b1b2
AM
106 phys_addr_t selected_base = 0;
107 phys_addr_t selected_limit = limit;
108 bool fixed = false;
c64be2bb
MS
109
110 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
111
112 if (size_cmdline != -1) {
113 selected_size = size_cmdline;
5ea3b1b2
AM
114 selected_base = base_cmdline;
115 selected_limit = min_not_zero(limit_cmdline, limit);
116 if (base_cmdline + size_cmdline == limit_cmdline)
117 fixed = true;
c64be2bb
MS
118 } else {
119#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
120 selected_size = size_bytes;
121#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
122 selected_size = cma_early_percent_memory();
123#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
124 selected_size = min(size_bytes, cma_early_percent_memory());
125#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
126 selected_size = max(size_bytes, cma_early_percent_memory());
127#endif
128 }
129
a2547380 130 if (selected_size && !dma_contiguous_default_area) {
c64be2bb 131 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
4009793e 132 (unsigned long)selected_size / SZ_1M);
c64be2bb 133
5ea3b1b2
AM
134 dma_contiguous_reserve_area(selected_size, selected_base,
135 selected_limit,
136 &dma_contiguous_default_area,
137 fixed);
c64be2bb 138 }
5ea3b1b2 139}
c64be2bb 140
3162bbd7
JK
141/**
142 * dma_contiguous_reserve_area() - reserve custom contiguous area
143 * @size: Size of the reserved area (in bytes),
144 * @base: Base address of the reserved area optional, use 0 for any
145 * @limit: End address of the reserved memory (optional, 0 for any).
146 * @res_cma: Pointer to store the created cma region.
147 * @fixed: hint about where to place the reserved area
148 *
149 * This function reserves memory from early allocator. It should be
150 * called by arch specific code once the early allocator (memblock or bootmem)
151 * has been activated and all other subsystems have already allocated/reserved
152 * memory. This function allows to create custom reserved areas for specific
153 * devices.
154 *
155 * If @fixed is true, reserve contiguous area at exactly @base. If false,
156 * reserve in range from @base to @limit.
157 */
158int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
159 phys_addr_t limit, struct cma **res_cma,
160 bool fixed)
161{
162 int ret;
163
f318dd08
LA
164 ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
165 "reserved", res_cma);
3162bbd7
JK
166 if (ret)
167 return ret;
168
169 /* Architecture specific contiguous memory fixup. */
a254129e
JK
170 dma_contiguous_early_fixup(cma_get_base(*res_cma),
171 cma_get_size(*res_cma));
3162bbd7
JK
172
173 return 0;
174}
175
c64be2bb 176/**
3162bbd7
JK
177 * dma_alloc_from_contiguous() - allocate pages from contiguous area
178 * @dev: Pointer to device for which the allocation is performed.
179 * @count: Requested number of pages.
180 * @align: Requested alignment of pages (in PAGE_SIZE order).
712c604d 181 * @gfp_mask: GFP flags to use for this allocation.
c64be2bb 182 *
3162bbd7
JK
183 * This function allocates memory buffer for specified device. It uses
184 * device specific contiguous memory area if available or the default
185 * global one. Requires architecture specific dev_get_cma_area() helper
186 * function.
c64be2bb 187 */
67a2e213 188struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
712c604d 189 unsigned int align, gfp_t gfp_mask)
c64be2bb 190{
3162bbd7
JK
191 if (align > CONFIG_CMA_ALIGNMENT)
192 align = CONFIG_CMA_ALIGNMENT;
193
712c604d 194 return cma_alloc(dev_get_cma_area(dev), count, align, gfp_mask);
c64be2bb 195}
3162bbd7
JK
196
197/**
198 * dma_release_from_contiguous() - release allocated pages
199 * @dev: Pointer to device for which the pages were allocated.
200 * @pages: Allocated pages.
201 * @count: Number of allocated pages.
202 *
203 * This function releases memory allocated by dma_alloc_from_contiguous().
204 * It returns false when provided pages do not belong to contiguous area and
205 * true otherwise.
206 */
207bool dma_release_from_contiguous(struct device *dev, struct page *pages,
208 int count)
209{
a254129e 210 return cma_release(dev_get_cma_area(dev), pages, count);
3162bbd7 211}
de9e14ee
MS
212
213/*
214 * Support for reserved memory regions defined in device tree
215 */
216#ifdef CONFIG_OF_RESERVED_MEM
217#include <linux/of.h>
218#include <linux/of_fdt.h>
219#include <linux/of_reserved_mem.h>
220
221#undef pr_fmt
222#define pr_fmt(fmt) fmt
223
47f29df7 224static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
de9e14ee
MS
225{
226 dev_set_cma_area(dev, rmem->priv);
47f29df7 227 return 0;
de9e14ee
MS
228}
229
230static void rmem_cma_device_release(struct reserved_mem *rmem,
231 struct device *dev)
232{
233 dev_set_cma_area(dev, NULL);
234}
235
236static const struct reserved_mem_ops rmem_cma_ops = {
237 .device_init = rmem_cma_device_init,
238 .device_release = rmem_cma_device_release,
239};
240
241static int __init rmem_cma_setup(struct reserved_mem *rmem)
242{
243 phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
244 phys_addr_t mask = align - 1;
245 unsigned long node = rmem->fdt_node;
246 struct cma *cma;
247 int err;
248
249 if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
250 of_get_flat_dt_prop(node, "no-map", NULL))
251 return -EINVAL;
252
253 if ((rmem->base & mask) || (rmem->size & mask)) {
254 pr_err("Reserved memory: incorrect alignment of CMA region\n");
255 return -EINVAL;
256 }
257
f318dd08 258 err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
de9e14ee
MS
259 if (err) {
260 pr_err("Reserved memory: unable to setup CMA region\n");
261 return err;
262 }
263 /* Architecture specific contiguous memory fixup. */
264 dma_contiguous_early_fixup(rmem->base, rmem->size);
265
266 if (of_get_flat_dt_prop(node, "linux,cma-default", NULL))
267 dma_contiguous_set_default(cma);
268
269 rmem->ops = &rmem_cma_ops;
270 rmem->priv = cma;
271
272 pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
273 &rmem->base, (unsigned long)rmem->size / SZ_1M);
274
275 return 0;
276}
277RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
278#endif