Merge tag 'tty-5.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty
[linux-block.git] / kernel / dma / pool.c
CommitLineData
e860c299
DR
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2012 ARM Ltd.
4 * Copyright (C) 2020 Google LLC
5 */
d7e673ec 6#include <linux/cma.h>
2edc5bb3 7#include <linux/debugfs.h>
d7e673ec 8#include <linux/dma-contiguous.h>
e860c299
DR
9#include <linux/dma-direct.h>
10#include <linux/dma-noncoherent.h>
e860c299
DR
11#include <linux/init.h>
12#include <linux/genalloc.h>
76a19940 13#include <linux/set_memory.h>
e860c299 14#include <linux/slab.h>
54adadf9 15#include <linux/workqueue.h>
e860c299 16
c84dc6e6 17static struct gen_pool *atomic_pool_dma __ro_after_init;
2edc5bb3 18static unsigned long pool_size_dma;
c84dc6e6 19static struct gen_pool *atomic_pool_dma32 __ro_after_init;
2edc5bb3 20static unsigned long pool_size_dma32;
c84dc6e6 21static struct gen_pool *atomic_pool_kernel __ro_after_init;
2edc5bb3 22static unsigned long pool_size_kernel;
e860c299 23
1d659236
DR
24/* Size can be defined by the coherent_pool command line */
25static size_t atomic_pool_size;
54adadf9
DR
26
27/* Dynamic background expansion when the atomic pool is near capacity */
28static struct work_struct atomic_pool_work;
e860c299
DR
29
30static int __init early_coherent_pool(char *p)
31{
32 atomic_pool_size = memparse(p, &p);
33 return 0;
34}
35early_param("coherent_pool", early_coherent_pool);
36
2edc5bb3
DR
37static void __init dma_atomic_pool_debugfs_init(void)
38{
39 struct dentry *root;
40
41 root = debugfs_create_dir("dma_pools", NULL);
42 if (IS_ERR_OR_NULL(root))
43 return;
44
45 debugfs_create_ulong("pool_size_dma", 0400, root, &pool_size_dma);
46 debugfs_create_ulong("pool_size_dma32", 0400, root, &pool_size_dma32);
47 debugfs_create_ulong("pool_size_kernel", 0400, root, &pool_size_kernel);
48}
49
50static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
51{
52 if (gfp & __GFP_DMA)
53 pool_size_dma += size;
54 else if (gfp & __GFP_DMA32)
55 pool_size_dma32 += size;
56 else
57 pool_size_kernel += size;
58}
59
d7e673ec
NSJ
60static bool cma_in_zone(gfp_t gfp)
61{
62 unsigned long size;
63 phys_addr_t end;
64 struct cma *cma;
65
66 cma = dev_get_cma_area(NULL);
67 if (!cma)
68 return false;
69
70 size = cma_get_size(cma);
71 if (!size)
72 return false;
73
74 /* CMA can't cross zone boundaries, see cma_activate_area() */
75 end = cma_get_base(cma) + size - 1;
76 if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
77 return end <= DMA_BIT_MASK(zone_dma_bits);
78 if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
79 return end <= DMA_BIT_MASK(32);
80 return true;
81}
82
54adadf9
DR
83static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
84 gfp_t gfp)
e860c299 85{
54adadf9 86 unsigned int order;
e860c299
DR
87 struct page *page;
88 void *addr;
54adadf9
DR
89 int ret = -ENOMEM;
90
91 /* Cannot allocate larger than MAX_ORDER-1 */
92 order = min(get_order(pool_size), MAX_ORDER-1);
93
94 do {
95 pool_size = 1 << (PAGE_SHIFT + order);
d7e673ec
NSJ
96 if (cma_in_zone(gfp))
97 page = dma_alloc_from_contiguous(NULL, 1 << order,
98 order, false);
99 if (!page)
100 page = alloc_pages(gfp, order);
54adadf9 101 } while (!page && order-- > 0);
e860c299
DR
102 if (!page)
103 goto out;
104
c84dc6e6 105 arch_dma_prep_coherent(page, pool_size);
e860c299 106
76a19940 107#ifdef CONFIG_DMA_DIRECT_REMAP
c84dc6e6 108 addr = dma_common_contiguous_remap(page, pool_size,
e860c299
DR
109 pgprot_dmacoherent(PAGE_KERNEL),
110 __builtin_return_address(0));
111 if (!addr)
54adadf9 112 goto free_page;
76a19940
DR
113#else
114 addr = page_to_virt(page);
115#endif
116 /*
117 * Memory in the atomic DMA pools must be unencrypted, the pools do not
118 * shrink so no re-encryption occurs in dma_direct_free_pages().
119 */
120 ret = set_memory_decrypted((unsigned long)page_to_virt(page),
121 1 << order);
122 if (ret)
123 goto remove_mapping;
54adadf9
DR
124 ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
125 pool_size, NUMA_NO_NODE);
e860c299 126 if (ret)
76a19940 127 goto encrypt_mapping;
e860c299 128
2edc5bb3 129 dma_atomic_pool_size_add(gfp, pool_size);
e860c299
DR
130 return 0;
131
76a19940
DR
132encrypt_mapping:
133 ret = set_memory_encrypted((unsigned long)page_to_virt(page),
134 1 << order);
135 if (WARN_ON_ONCE(ret)) {
136 /* Decrypt succeeded but encrypt failed, purposely leak */
137 goto out;
138 }
e860c299 139remove_mapping:
76a19940 140#ifdef CONFIG_DMA_DIRECT_REMAP
c84dc6e6 141 dma_common_free_remap(addr, pool_size);
76a19940
DR
142#endif
143free_page: __maybe_unused
d9765e41 144 __free_pages(page, order);
e860c299 145out:
54adadf9
DR
146 return ret;
147}
148
149static void atomic_pool_resize(struct gen_pool *pool, gfp_t gfp)
150{
151 if (pool && gen_pool_avail(pool) < atomic_pool_size)
152 atomic_pool_expand(pool, gen_pool_size(pool), gfp);
153}
154
155static void atomic_pool_work_fn(struct work_struct *work)
156{
157 if (IS_ENABLED(CONFIG_ZONE_DMA))
158 atomic_pool_resize(atomic_pool_dma,
159 GFP_KERNEL | GFP_DMA);
160 if (IS_ENABLED(CONFIG_ZONE_DMA32))
161 atomic_pool_resize(atomic_pool_dma32,
162 GFP_KERNEL | GFP_DMA32);
163 atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL);
164}
165
166static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
167 gfp_t gfp)
168{
169 struct gen_pool *pool;
170 int ret;
171
172 pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
173 if (!pool)
174 return NULL;
175
176 gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
177
178 ret = atomic_pool_expand(pool, pool_size, gfp);
179 if (ret) {
180 gen_pool_destroy(pool);
181 pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation\n",
182 pool_size >> 10, &gfp);
183 return NULL;
184 }
185
186 pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations\n",
187 gen_pool_size(pool) >> 10, &gfp);
188 return pool;
e860c299 189}
c84dc6e6
DR
190
191static int __init dma_atomic_pool_init(void)
192{
193 int ret = 0;
c84dc6e6 194
1d659236
DR
195 /*
196 * If coherent_pool was not used on the command line, default the pool
197 * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
198 */
199 if (!atomic_pool_size) {
3ee06a6d
GU
200 unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
201 pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES);
202 atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K);
1d659236 203 }
54adadf9
DR
204 INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
205
206 atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
207 GFP_KERNEL);
208 if (!atomic_pool_kernel)
209 ret = -ENOMEM;
c84dc6e6 210 if (IS_ENABLED(CONFIG_ZONE_DMA)) {
54adadf9
DR
211 atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
212 GFP_KERNEL | GFP_DMA);
213 if (!atomic_pool_dma)
214 ret = -ENOMEM;
c84dc6e6
DR
215 }
216 if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
54adadf9
DR
217 atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
218 GFP_KERNEL | GFP_DMA32);
219 if (!atomic_pool_dma32)
220 ret = -ENOMEM;
c84dc6e6 221 }
2edc5bb3
DR
222
223 dma_atomic_pool_debugfs_init();
c84dc6e6
DR
224 return ret;
225}
e860c299
DR
226postcore_initcall(dma_atomic_pool_init);
227
9420139f 228static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
e860c299 229{
9420139f
CH
230 if (prev == NULL) {
231 if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
232 return atomic_pool_dma32;
233 if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
234 return atomic_pool_dma;
235 return atomic_pool_kernel;
236 }
237 if (prev == atomic_pool_kernel)
238 return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
239 if (prev == atomic_pool_dma32)
c84dc6e6 240 return atomic_pool_dma;
9420139f 241 return NULL;
c84dc6e6 242}
e860c299 243
9420139f
CH
244static struct page *__dma_alloc_from_pool(struct device *dev, size_t size,
245 struct gen_pool *pool, void **cpu_addr,
246 bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
48b67038 247{
9420139f
CH
248 unsigned long addr;
249 phys_addr_t phys;
48b67038 250
9420139f
CH
251 addr = gen_pool_alloc(pool, size);
252 if (!addr)
253 return NULL;
48b67038 254
9420139f
CH
255 phys = gen_pool_virt_to_phys(pool, addr);
256 if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) {
257 gen_pool_free(pool, addr, size);
258 return NULL;
259 }
48b67038 260
9420139f
CH
261 if (gen_pool_avail(pool) < atomic_pool_size)
262 schedule_work(&atomic_pool_work);
48b67038 263
9420139f
CH
264 *cpu_addr = (void *)addr;
265 memset(*cpu_addr, 0, size);
266 return pfn_to_page(__phys_to_pfn(phys));
48b67038
NSJ
267}
268
9420139f
CH
269struct page *dma_alloc_from_pool(struct device *dev, size_t size,
270 void **cpu_addr, gfp_t gfp,
271 bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
e860c299 272{
81e9d894 273 struct gen_pool *pool = NULL;
9420139f 274 struct page *page;
81e9d894 275
9420139f
CH
276 while ((pool = dma_guess_pool(pool, gfp))) {
277 page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
278 phys_addr_ok);
279 if (page)
280 return page;
e860c299
DR
281 }
282
9420139f
CH
283 WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
284 return NULL;
e860c299
DR
285}
286
c84dc6e6 287bool dma_free_from_pool(struct device *dev, void *start, size_t size)
e860c299 288{
81e9d894 289 struct gen_pool *pool = NULL;
c84dc6e6 290
9420139f
CH
291 while ((pool = dma_guess_pool(pool, 0))) {
292 if (!gen_pool_has_addr(pool, (unsigned long)start, size))
293 continue;
294 gen_pool_free(pool, (unsigned long)start, size);
295 return true;
81e9d894 296 }
9420139f
CH
297
298 return false;
e860c299 299}