memremap: add a migrate_to_ram method to struct dev_pagemap_ops
[linux-2.6-block.git] / kernel / memremap.c
CommitLineData
5981690d
DW
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
7d3dcf26 3#include <linux/device.h>
92281dee 4#include <linux/io.h>
0207df4f 5#include <linux/kasan.h>
41e94a85 6#include <linux/memory_hotplug.h>
bcfa4b72
MW
7#include <linux/mm.h>
8#include <linux/pfn_t.h>
5042db43
JG
9#include <linux/swap.h>
10#include <linux/swapops.h>
bcfa4b72 11#include <linux/types.h>
e7638488 12#include <linux/wait_bit.h>
bcfa4b72 13#include <linux/xarray.h>
92281dee 14
bcfa4b72 15static DEFINE_XARRAY(pgmap_array);
9476df7d
DW
16#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
17#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
18
f6a55e1a
CH
19#ifdef CONFIG_DEV_PAGEMAP_OPS
20DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
21EXPORT_SYMBOL(devmap_managed_key);
22static atomic_t devmap_managed_enable;
23
24static void devmap_managed_enable_put(void *data)
25{
26 if (atomic_dec_and_test(&devmap_managed_enable))
27 static_branch_disable(&devmap_managed_key);
28}
29
30static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap)
31{
32 if (!pgmap->ops->page_free) {
33 WARN(1, "Missing page_free method\n");
34 return -EINVAL;
35 }
36
37 if (atomic_inc_return(&devmap_managed_enable) == 1)
38 static_branch_enable(&devmap_managed_key);
39 return devm_add_action_or_reset(dev, devmap_managed_enable_put, NULL);
40}
41#else
42static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap)
43{
44 return -EINVAL;
45}
46#endif /* CONFIG_DEV_PAGEMAP_OPS */
47
bcfa4b72 48static void pgmap_array_delete(struct resource *res)
ab1b597e 49{
bcfa4b72
MW
50 xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
51 NULL, GFP_KERNEL);
ab1b597e 52 synchronize_rcu();
9476df7d
DW
53}
54
e7744aa2 55static unsigned long pfn_first(struct dev_pagemap *pgmap)
5c2c2587 56{
e7744aa2
LG
57 const struct resource *res = &pgmap->res;
58 struct vmem_altmap *altmap = &pgmap->altmap;
5c2c2587
DW
59 unsigned long pfn;
60
61 pfn = res->start >> PAGE_SHIFT;
e7744aa2 62 if (pgmap->altmap_valid)
5c2c2587
DW
63 pfn += vmem_altmap_offset(altmap);
64 return pfn;
65}
66
e7744aa2 67static unsigned long pfn_end(struct dev_pagemap *pgmap)
5c2c2587 68{
e7744aa2 69 const struct resource *res = &pgmap->res;
5c2c2587
DW
70
71 return (res->start + resource_size(res)) >> PAGE_SHIFT;
72}
73
949b9325
DW
74static unsigned long pfn_next(unsigned long pfn)
75{
76 if (pfn % 1024 == 0)
77 cond_resched();
78 return pfn + 1;
79}
80
5c2c2587 81#define for_each_device_pfn(pfn, map) \
949b9325 82 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
5c2c2587 83
e8d51348 84static void devm_memremap_pages_release(void *data)
41e94a85 85{
e7744aa2 86 struct dev_pagemap *pgmap = data;
e8d51348 87 struct device *dev = pgmap->dev;
e7744aa2 88 struct resource *res = &pgmap->res;
9476df7d 89 resource_size_t align_start, align_size;
71389703 90 unsigned long pfn;
2c2a5af6 91 int nid;
71389703 92
d8668bb0 93 pgmap->ops->kill(pgmap);
e7744aa2 94 for_each_device_pfn(pfn, pgmap)
71389703 95 put_page(pfn_to_page(pfn));
d8668bb0 96 pgmap->ops->cleanup(pgmap);
9476df7d 97
41e94a85 98 /* pages are dead and unused, undo the arch mapping */
9476df7d 99 align_start = res->start & ~(SECTION_SIZE - 1);
10a0cd6e
JS
100 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
101 - align_start;
b5d24fda 102
2c2a5af6
OS
103 nid = page_to_nid(pfn_to_page(align_start >> PAGE_SHIFT));
104
f931ab47 105 mem_hotplug_begin();
69324b8f
DW
106 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
107 pfn = align_start >> PAGE_SHIFT;
108 __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
109 align_size >> PAGE_SHIFT, NULL);
110 } else {
2c2a5af6 111 arch_remove_memory(nid, align_start, align_size,
69324b8f
DW
112 pgmap->altmap_valid ? &pgmap->altmap : NULL);
113 kasan_remove_zero_shadow(__va(align_start), align_size);
114 }
f931ab47 115 mem_hotplug_done();
b5d24fda 116
9049771f 117 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
bcfa4b72 118 pgmap_array_delete(res);
e7744aa2
LG
119 dev_WARN_ONCE(dev, pgmap->altmap.alloc,
120 "%s: failed to free all reserved pages\n", __func__);
9476df7d
DW
121}
122
4b94ffdc
DW
123/**
124 * devm_memremap_pages - remap and provide memmap backing for the given resource
125 * @dev: hosting device for @res
a95c90f1 126 * @pgmap: pointer to a struct dev_pagemap
4b94ffdc 127 *
5c2c2587 128 * Notes:
1e240e8d
CH
129 * 1/ At a minimum the res, ref and type and ops members of @pgmap must be
130 * initialized by the caller before passing it to this function
e8d51348
CH
131 *
132 * 2/ The altmap field may optionally be initialized, in which case altmap_valid
133 * must be set to true
134 *
50f44ee7
DW
135 * 3/ pgmap->ref must be 'live' on entry and will be killed and reaped
136 * at devm_memremap_pages_release() time, or if this routine fails.
5c2c2587 137 *
e8d51348 138 * 4/ res is expected to be a host memory range that could feasibly be
5c2c2587
DW
139 * treated as a "System RAM" range, i.e. not a device mmio range, but
140 * this is not enforced.
4b94ffdc 141 */
e8d51348 142void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
41e94a85 143{
ab1b597e 144 resource_size_t align_start, align_size, align_end;
e8d51348
CH
145 struct vmem_altmap *altmap = pgmap->altmap_valid ?
146 &pgmap->altmap : NULL;
949b9325 147 struct resource *res = &pgmap->res;
966cf44f 148 struct dev_pagemap *conflict_pgmap;
940519f0
MH
149 struct mhp_restrictions restrictions = {
150 /*
151 * We do not want any optional features only our own memmap
152 */
153 .altmap = altmap,
154 };
9049771f 155 pgprot_t pgprot = PAGE_KERNEL;
949b9325 156 int error, nid, is_ram;
f6a55e1a 157 bool need_devmap_managed = true;
5f29a77c 158
3ed2dcdf
CH
159 switch (pgmap->type) {
160 case MEMORY_DEVICE_PRIVATE:
161 if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
162 WARN(1, "Device private memory not supported\n");
163 return ERR_PTR(-EINVAL);
164 }
897e6365
CH
165 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
166 WARN(1, "Missing migrate_to_ram method\n");
167 return ERR_PTR(-EINVAL);
168 }
3ed2dcdf
CH
169 break;
170 case MEMORY_DEVICE_FS_DAX:
171 if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
172 IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
173 WARN(1, "File system DAX not supported\n");
174 return ERR_PTR(-EINVAL);
175 }
176 break;
177 case MEMORY_DEVICE_DEVDAX:
178 case MEMORY_DEVICE_PCI_P2PDMA:
f6a55e1a 179 need_devmap_managed = false;
3ed2dcdf
CH
180 break;
181 default:
182 WARN(1, "Invalid pgmap type %d\n", pgmap->type);
183 break;
184 }
185
1e240e8d
CH
186 if (!pgmap->ref || !pgmap->ops || !pgmap->ops->kill ||
187 !pgmap->ops->cleanup) {
50f44ee7 188 WARN(1, "Missing reference count teardown definition\n");
a95c90f1 189 return ERR_PTR(-EINVAL);
50f44ee7 190 }
a95c90f1 191
f6a55e1a
CH
192 if (need_devmap_managed) {
193 error = devmap_managed_enable_get(dev, pgmap);
194 if (error)
195 return ERR_PTR(error);
196 }
197
5f29a77c
DW
198 align_start = res->start & ~(SECTION_SIZE - 1);
199 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
200 - align_start;
15d36fec
DJ
201 align_end = align_start + align_size - 1;
202
203 conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
204 if (conflict_pgmap) {
205 dev_WARN(dev, "Conflicting mapping in same section\n");
206 put_dev_pagemap(conflict_pgmap);
50f44ee7
DW
207 error = -ENOMEM;
208 goto err_array;
15d36fec
DJ
209 }
210
211 conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
212 if (conflict_pgmap) {
213 dev_WARN(dev, "Conflicting mapping in same section\n");
214 put_dev_pagemap(conflict_pgmap);
50f44ee7
DW
215 error = -ENOMEM;
216 goto err_array;
15d36fec
DJ
217 }
218
d37a14bb
LT
219 is_ram = region_intersects(align_start, align_size,
220 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
41e94a85 221
06489cfb
DW
222 if (is_ram != REGION_DISJOINT) {
223 WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
224 is_ram == REGION_MIXED ? "mixed" : "ram", res);
a95c90f1
DW
225 error = -ENXIO;
226 goto err_array;
41e94a85
CH
227 }
228
4b94ffdc 229 pgmap->dev = dev;
4b94ffdc 230
bcfa4b72
MW
231 error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
232 PHYS_PFN(res->end), pgmap, GFP_KERNEL));
9476df7d 233 if (error)
bcfa4b72 234 goto err_array;
9476df7d 235
41e94a85
CH
236 nid = dev_to_node(dev);
237 if (nid < 0)
7eff93b7 238 nid = numa_mem_id();
41e94a85 239
9049771f
DW
240 error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
241 align_size);
242 if (error)
243 goto err_pfn_remap;
244
f931ab47 245 mem_hotplug_begin();
69324b8f
DW
246
247 /*
248 * For device private memory we call add_pages() as we only need to
249 * allocate and initialize struct page for the device memory. More-
250 * over the device memory is un-accessible thus we do not want to
251 * create a linear mapping for the memory like arch_add_memory()
252 * would do.
253 *
254 * For all other device memory types, which are accessible by
255 * the CPU, we do want the linear mapping and thus use
256 * arch_add_memory().
257 */
258 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
259 error = add_pages(nid, align_start >> PAGE_SHIFT,
940519f0 260 align_size >> PAGE_SHIFT, &restrictions);
69324b8f
DW
261 } else {
262 error = kasan_add_zero_shadow(__va(align_start), align_size);
263 if (error) {
264 mem_hotplug_done();
265 goto err_kasan;
266 }
267
940519f0
MH
268 error = arch_add_memory(nid, align_start, align_size,
269 &restrictions);
69324b8f
DW
270 }
271
272 if (!error) {
273 struct zone *zone;
274
275 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
276 move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
277 align_size >> PAGE_SHIFT, altmap);
0207df4f
AR
278 }
279
f931ab47 280 mem_hotplug_done();
9476df7d
DW
281 if (error)
282 goto err_add_memory;
41e94a85 283
966cf44f
AD
284 /*
285 * Initialization of the pages has been deferred until now in order
286 * to allow us to do the work while not holding the hotplug lock.
287 */
288 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
289 align_start >> PAGE_SHIFT,
290 align_size >> PAGE_SHIFT, pgmap);
291 percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
e8d51348 292
a95c90f1
DW
293 error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
294 pgmap);
295 if (error)
296 return ERR_PTR(error);
e8d51348 297
41e94a85 298 return __va(res->start);
9476df7d
DW
299
300 err_add_memory:
0207df4f
AR
301 kasan_remove_zero_shadow(__va(align_start), align_size);
302 err_kasan:
9049771f
DW
303 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
304 err_pfn_remap:
bcfa4b72
MW
305 pgmap_array_delete(res);
306 err_array:
d8668bb0
CH
307 pgmap->ops->kill(pgmap);
308 pgmap->ops->cleanup(pgmap);
9476df7d 309 return ERR_PTR(error);
41e94a85 310}
808153e1 311EXPORT_SYMBOL_GPL(devm_memremap_pages);
4b94ffdc 312
2e3f139e
DW
313void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
314{
315 devm_release_action(dev, devm_memremap_pages_release, pgmap);
316}
317EXPORT_SYMBOL_GPL(devm_memunmap_pages);
318
4b94ffdc
DW
319unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
320{
321 /* number of pfns from base where pfn_to_page() is valid */
322 return altmap->reserve + altmap->free;
323}
324
325void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
326{
327 altmap->alloc -= nr_pfns;
328}
329
0822acb8
CH
330/**
331 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
332 * @pfn: page frame number to lookup page_map
333 * @pgmap: optional known pgmap that already has a reference
334 *
832d7aa0
CH
335 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
336 * is non-NULL but does not cover @pfn the reference to it will be released.
0822acb8
CH
337 */
338struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
339 struct dev_pagemap *pgmap)
340{
0822acb8
CH
341 resource_size_t phys = PFN_PHYS(pfn);
342
343 /*
832d7aa0 344 * In the cached case we're already holding a live reference.
0822acb8 345 */
832d7aa0 346 if (pgmap) {
e7744aa2 347 if (phys >= pgmap->res.start && phys <= pgmap->res.end)
832d7aa0
CH
348 return pgmap;
349 put_dev_pagemap(pgmap);
0822acb8
CH
350 }
351
352 /* fall back to slow path lookup */
353 rcu_read_lock();
bcfa4b72 354 pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
0822acb8
CH
355 if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
356 pgmap = NULL;
357 rcu_read_unlock();
358
359 return pgmap;
360}
e7638488 361EXPORT_SYMBOL_GPL(get_dev_pagemap);
7b2d55d2 362
e7638488 363#ifdef CONFIG_DEV_PAGEMAP_OPS
e7638488 364void __put_devmap_managed_page(struct page *page)
7b2d55d2
JG
365{
366 int count = page_ref_dec_return(page);
367
368 /*
369 * If refcount is 1 then page is freed and refcount is stable as nobody
370 * holds a reference on the page.
371 */
372 if (count == 1) {
373 /* Clear Active bit in case of parallel mark_page_accessed */
374 __ClearPageActive(page);
375 __ClearPageWaiters(page);
376
c733a828 377 mem_cgroup_uncharge(page);
7b2d55d2 378
1e240e8d 379 page->pgmap->ops->page_free(page, page->pgmap->data);
7b2d55d2
JG
380 } else if (!count)
381 __put_page(page);
382}
31c5bda3 383EXPORT_SYMBOL(__put_devmap_managed_page);
e7638488 384#endif /* CONFIG_DEV_PAGEMAP_OPS */