dma-pool: fix coherent pool allocations for IOMMU mappings
[linux-block.git] / kernel / dma / pool.c
CommitLineData
e860c299
DR
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2012 ARM Ltd.
4 * Copyright (C) 2020 Google LLC
5 */
2edc5bb3 6#include <linux/debugfs.h>
e860c299
DR
7#include <linux/dma-direct.h>
8#include <linux/dma-noncoherent.h>
e860c299
DR
9#include <linux/init.h>
10#include <linux/genalloc.h>
76a19940 11#include <linux/set_memory.h>
e860c299 12#include <linux/slab.h>
54adadf9 13#include <linux/workqueue.h>
e860c299 14
c84dc6e6 15static struct gen_pool *atomic_pool_dma __ro_after_init;
2edc5bb3 16static unsigned long pool_size_dma;
c84dc6e6 17static struct gen_pool *atomic_pool_dma32 __ro_after_init;
2edc5bb3 18static unsigned long pool_size_dma32;
c84dc6e6 19static struct gen_pool *atomic_pool_kernel __ro_after_init;
2edc5bb3 20static unsigned long pool_size_kernel;
e860c299 21
1d659236
DR
22/* Size can be defined by the coherent_pool command line */
23static size_t atomic_pool_size;
54adadf9
DR
24
25/* Dynamic background expansion when the atomic pool is near capacity */
26static struct work_struct atomic_pool_work;
e860c299
DR
27
28static int __init early_coherent_pool(char *p)
29{
30 atomic_pool_size = memparse(p, &p);
31 return 0;
32}
33early_param("coherent_pool", early_coherent_pool);
34
2edc5bb3
DR
35static void __init dma_atomic_pool_debugfs_init(void)
36{
37 struct dentry *root;
38
39 root = debugfs_create_dir("dma_pools", NULL);
40 if (IS_ERR_OR_NULL(root))
41 return;
42
43 debugfs_create_ulong("pool_size_dma", 0400, root, &pool_size_dma);
44 debugfs_create_ulong("pool_size_dma32", 0400, root, &pool_size_dma32);
45 debugfs_create_ulong("pool_size_kernel", 0400, root, &pool_size_kernel);
46}
47
48static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
49{
50 if (gfp & __GFP_DMA)
51 pool_size_dma += size;
52 else if (gfp & __GFP_DMA32)
53 pool_size_dma32 += size;
54 else
55 pool_size_kernel += size;
56}
57
54adadf9
DR
58static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
59 gfp_t gfp)
e860c299 60{
54adadf9 61 unsigned int order;
e860c299
DR
62 struct page *page;
63 void *addr;
54adadf9
DR
64 int ret = -ENOMEM;
65
66 /* Cannot allocate larger than MAX_ORDER-1 */
67 order = min(get_order(pool_size), MAX_ORDER-1);
68
69 do {
70 pool_size = 1 << (PAGE_SHIFT + order);
d9765e41 71 page = alloc_pages(gfp, order);
54adadf9 72 } while (!page && order-- > 0);
e860c299
DR
73 if (!page)
74 goto out;
75
c84dc6e6 76 arch_dma_prep_coherent(page, pool_size);
e860c299 77
76a19940 78#ifdef CONFIG_DMA_DIRECT_REMAP
c84dc6e6 79 addr = dma_common_contiguous_remap(page, pool_size,
e860c299
DR
80 pgprot_dmacoherent(PAGE_KERNEL),
81 __builtin_return_address(0));
82 if (!addr)
54adadf9 83 goto free_page;
76a19940
DR
84#else
85 addr = page_to_virt(page);
86#endif
87 /*
88 * Memory in the atomic DMA pools must be unencrypted, the pools do not
89 * shrink so no re-encryption occurs in dma_direct_free_pages().
90 */
91 ret = set_memory_decrypted((unsigned long)page_to_virt(page),
92 1 << order);
93 if (ret)
94 goto remove_mapping;
54adadf9
DR
95 ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
96 pool_size, NUMA_NO_NODE);
e860c299 97 if (ret)
76a19940 98 goto encrypt_mapping;
e860c299 99
2edc5bb3 100 dma_atomic_pool_size_add(gfp, pool_size);
e860c299
DR
101 return 0;
102
76a19940
DR
103encrypt_mapping:
104 ret = set_memory_encrypted((unsigned long)page_to_virt(page),
105 1 << order);
106 if (WARN_ON_ONCE(ret)) {
107 /* Decrypt succeeded but encrypt failed, purposely leak */
108 goto out;
109 }
e860c299 110remove_mapping:
76a19940 111#ifdef CONFIG_DMA_DIRECT_REMAP
c84dc6e6 112 dma_common_free_remap(addr, pool_size);
76a19940
DR
113#endif
114free_page: __maybe_unused
d9765e41 115 __free_pages(page, order);
e860c299 116out:
54adadf9
DR
117 return ret;
118}
119
120static void atomic_pool_resize(struct gen_pool *pool, gfp_t gfp)
121{
122 if (pool && gen_pool_avail(pool) < atomic_pool_size)
123 atomic_pool_expand(pool, gen_pool_size(pool), gfp);
124}
125
126static void atomic_pool_work_fn(struct work_struct *work)
127{
128 if (IS_ENABLED(CONFIG_ZONE_DMA))
129 atomic_pool_resize(atomic_pool_dma,
130 GFP_KERNEL | GFP_DMA);
131 if (IS_ENABLED(CONFIG_ZONE_DMA32))
132 atomic_pool_resize(atomic_pool_dma32,
133 GFP_KERNEL | GFP_DMA32);
134 atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL);
135}
136
137static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
138 gfp_t gfp)
139{
140 struct gen_pool *pool;
141 int ret;
142
143 pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
144 if (!pool)
145 return NULL;
146
147 gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
148
149 ret = atomic_pool_expand(pool, pool_size, gfp);
150 if (ret) {
151 gen_pool_destroy(pool);
152 pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation\n",
153 pool_size >> 10, &gfp);
154 return NULL;
155 }
156
157 pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations\n",
158 gen_pool_size(pool) >> 10, &gfp);
159 return pool;
e860c299 160}
c84dc6e6
DR
161
162static int __init dma_atomic_pool_init(void)
163{
164 int ret = 0;
c84dc6e6 165
1d659236
DR
166 /*
167 * If coherent_pool was not used on the command line, default the pool
168 * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
169 */
170 if (!atomic_pool_size) {
3ee06a6d
GU
171 unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
172 pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES);
173 atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K);
1d659236 174 }
54adadf9
DR
175 INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
176
177 atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
178 GFP_KERNEL);
179 if (!atomic_pool_kernel)
180 ret = -ENOMEM;
c84dc6e6 181 if (IS_ENABLED(CONFIG_ZONE_DMA)) {
54adadf9
DR
182 atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
183 GFP_KERNEL | GFP_DMA);
184 if (!atomic_pool_dma)
185 ret = -ENOMEM;
c84dc6e6
DR
186 }
187 if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
54adadf9
DR
188 atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
189 GFP_KERNEL | GFP_DMA32);
190 if (!atomic_pool_dma32)
191 ret = -ENOMEM;
c84dc6e6 192 }
2edc5bb3
DR
193
194 dma_atomic_pool_debugfs_init();
c84dc6e6
DR
195 return ret;
196}
e860c299
DR
197postcore_initcall(dma_atomic_pool_init);
198
9420139f 199static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
e860c299 200{
9420139f
CH
201 if (prev == NULL) {
202 if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
203 return atomic_pool_dma32;
204 if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
205 return atomic_pool_dma;
206 return atomic_pool_kernel;
207 }
208 if (prev == atomic_pool_kernel)
209 return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
210 if (prev == atomic_pool_dma32)
c84dc6e6 211 return atomic_pool_dma;
9420139f 212 return NULL;
c84dc6e6 213}
e860c299 214
9420139f
CH
215static struct page *__dma_alloc_from_pool(struct device *dev, size_t size,
216 struct gen_pool *pool, void **cpu_addr,
217 bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
48b67038 218{
9420139f
CH
219 unsigned long addr;
220 phys_addr_t phys;
48b67038 221
9420139f
CH
222 addr = gen_pool_alloc(pool, size);
223 if (!addr)
224 return NULL;
48b67038 225
9420139f
CH
226 phys = gen_pool_virt_to_phys(pool, addr);
227 if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) {
228 gen_pool_free(pool, addr, size);
229 return NULL;
230 }
48b67038 231
9420139f
CH
232 if (gen_pool_avail(pool) < atomic_pool_size)
233 schedule_work(&atomic_pool_work);
48b67038 234
9420139f
CH
235 *cpu_addr = (void *)addr;
236 memset(*cpu_addr, 0, size);
237 return pfn_to_page(__phys_to_pfn(phys));
48b67038
NSJ
238}
239
9420139f
CH
240struct page *dma_alloc_from_pool(struct device *dev, size_t size,
241 void **cpu_addr, gfp_t gfp,
242 bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
e860c299 243{
81e9d894 244 struct gen_pool *pool = NULL;
9420139f 245 struct page *page;
81e9d894 246
9420139f
CH
247 while ((pool = dma_guess_pool(pool, gfp))) {
248 page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
249 phys_addr_ok);
250 if (page)
251 return page;
e860c299
DR
252 }
253
9420139f
CH
254 WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
255 return NULL;
e860c299
DR
256}
257
c84dc6e6 258bool dma_free_from_pool(struct device *dev, void *start, size_t size)
e860c299 259{
81e9d894 260 struct gen_pool *pool = NULL;
c84dc6e6 261
9420139f
CH
262 while ((pool = dma_guess_pool(pool, 0))) {
263 if (!gen_pool_has_addr(pool, (unsigned long)start, size))
264 continue;
265 gen_pool_free(pool, (unsigned long)start, size);
266 return true;
81e9d894 267 }
9420139f
CH
268
269 return false;
e860c299 270}