mm: Cleanup __put_devmap_managed_page() vs ->page_free()
[linux-block.git] / mm / memremap.c
CommitLineData
5981690d
DW
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
7d3dcf26 3#include <linux/device.h>
92281dee 4#include <linux/io.h>
0207df4f 5#include <linux/kasan.h>
41e94a85 6#include <linux/memory_hotplug.h>
bcfa4b72
MW
7#include <linux/mm.h>
8#include <linux/pfn_t.h>
5042db43
JG
9#include <linux/swap.h>
10#include <linux/swapops.h>
bcfa4b72 11#include <linux/types.h>
e7638488 12#include <linux/wait_bit.h>
bcfa4b72 13#include <linux/xarray.h>
92281dee 14
bcfa4b72 15static DEFINE_XARRAY(pgmap_array);
9476df7d 16
f6a55e1a
CH
17#ifdef CONFIG_DEV_PAGEMAP_OPS
18DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
19EXPORT_SYMBOL(devmap_managed_key);
20static atomic_t devmap_managed_enable;
21
6f42193f 22static void devmap_managed_enable_put(void)
f6a55e1a
CH
23{
24 if (atomic_dec_and_test(&devmap_managed_enable))
25 static_branch_disable(&devmap_managed_key);
26}
27
6f42193f 28static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
f6a55e1a 29{
429589d6
DW
30 if (pgmap->type == MEMORY_DEVICE_PRIVATE &&
31 (!pgmap->ops || !pgmap->ops->page_free)) {
f6a55e1a
CH
32 WARN(1, "Missing page_free method\n");
33 return -EINVAL;
34 }
35
36 if (atomic_inc_return(&devmap_managed_enable) == 1)
37 static_branch_enable(&devmap_managed_key);
6f42193f 38 return 0;
f6a55e1a
CH
39}
40#else
6f42193f 41static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
f6a55e1a
CH
42{
43 return -EINVAL;
44}
6f42193f
CH
45static void devmap_managed_enable_put(void)
46{
47}
f6a55e1a
CH
48#endif /* CONFIG_DEV_PAGEMAP_OPS */
49
bcfa4b72 50static void pgmap_array_delete(struct resource *res)
ab1b597e 51{
bcfa4b72
MW
52 xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
53 NULL, GFP_KERNEL);
ab1b597e 54 synchronize_rcu();
9476df7d
DW
55}
56
e7744aa2 57static unsigned long pfn_first(struct dev_pagemap *pgmap)
5c2c2587 58{
7cc7867f 59 return PHYS_PFN(pgmap->res.start) +
514caf23 60 vmem_altmap_offset(pgmap_altmap(pgmap));
5c2c2587
DW
61}
62
e7744aa2 63static unsigned long pfn_end(struct dev_pagemap *pgmap)
5c2c2587 64{
e7744aa2 65 const struct resource *res = &pgmap->res;
5c2c2587
DW
66
67 return (res->start + resource_size(res)) >> PAGE_SHIFT;
68}
69
949b9325
DW
70static unsigned long pfn_next(unsigned long pfn)
71{
72 if (pfn % 1024 == 0)
73 cond_resched();
74 return pfn + 1;
75}
76
5c2c2587 77#define for_each_device_pfn(pfn, map) \
949b9325 78 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
5c2c2587 79
24917f6b
CH
80static void dev_pagemap_kill(struct dev_pagemap *pgmap)
81{
82 if (pgmap->ops && pgmap->ops->kill)
83 pgmap->ops->kill(pgmap);
84 else
85 percpu_ref_kill(pgmap->ref);
86}
87
88static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
89{
90 if (pgmap->ops && pgmap->ops->cleanup) {
91 pgmap->ops->cleanup(pgmap);
92 } else {
93 wait_for_completion(&pgmap->done);
94 percpu_ref_exit(pgmap->ref);
95 }
06282373
DW
96 /*
97 * Undo the pgmap ref assignment for the internal case as the
98 * caller may re-enable the same pgmap.
99 */
100 if (pgmap->ref == &pgmap->internal_ref)
101 pgmap->ref = NULL;
24917f6b
CH
102}
103
6869b7b2 104void memunmap_pages(struct dev_pagemap *pgmap)
41e94a85 105{
e7744aa2 106 struct resource *res = &pgmap->res;
77e080e7 107 struct page *first_page;
71389703 108 unsigned long pfn;
2c2a5af6 109 int nid;
71389703 110
24917f6b 111 dev_pagemap_kill(pgmap);
e7744aa2 112 for_each_device_pfn(pfn, pgmap)
71389703 113 put_page(pfn_to_page(pfn));
24917f6b 114 dev_pagemap_cleanup(pgmap);
9476df7d 115
77e080e7
AK
116 /* make sure to access a memmap that was actually initialized */
117 first_page = pfn_to_page(pfn_first(pgmap));
118
41e94a85 119 /* pages are dead and unused, undo the arch mapping */
77e080e7 120 nid = page_to_nid(first_page);
2c2a5af6 121
f931ab47 122 mem_hotplug_begin();
69324b8f 123 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
feee6b29 124 __remove_pages(PHYS_PFN(res->start),
77e080e7 125 PHYS_PFN(resource_size(res)), NULL);
69324b8f 126 } else {
7cc7867f 127 arch_remove_memory(nid, res->start, resource_size(res),
514caf23 128 pgmap_altmap(pgmap));
7cc7867f 129 kasan_remove_zero_shadow(__va(res->start), resource_size(res));
69324b8f 130 }
f931ab47 131 mem_hotplug_done();
b5d24fda 132
7cc7867f 133 untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
bcfa4b72 134 pgmap_array_delete(res);
fdc029b1 135 WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
6f42193f 136 devmap_managed_enable_put();
9476df7d 137}
6869b7b2
CH
138EXPORT_SYMBOL_GPL(memunmap_pages);
139
140static void devm_memremap_pages_release(void *data)
141{
142 memunmap_pages(data);
143}
9476df7d 144
24917f6b
CH
145static void dev_pagemap_percpu_release(struct percpu_ref *ref)
146{
147 struct dev_pagemap *pgmap =
148 container_of(ref, struct dev_pagemap, internal_ref);
149
150 complete(&pgmap->done);
151}
152
6869b7b2
CH
153/*
154 * Not device managed version of dev_memremap_pages, undone by
155 * memunmap_pages(). Please use dev_memremap_pages if you have a struct
156 * device available.
4b94ffdc 157 */
6869b7b2 158void *memremap_pages(struct dev_pagemap *pgmap, int nid)
41e94a85 159{
949b9325 160 struct resource *res = &pgmap->res;
966cf44f 161 struct dev_pagemap *conflict_pgmap;
940519f0
MH
162 struct mhp_restrictions restrictions = {
163 /*
164 * We do not want any optional features only our own memmap
7cc7867f 165 */
514caf23 166 .altmap = pgmap_altmap(pgmap),
940519f0 167 };
9049771f 168 pgprot_t pgprot = PAGE_KERNEL;
6869b7b2 169 int error, is_ram;
f6a55e1a 170 bool need_devmap_managed = true;
5f29a77c 171
3ed2dcdf
CH
172 switch (pgmap->type) {
173 case MEMORY_DEVICE_PRIVATE:
174 if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
175 WARN(1, "Device private memory not supported\n");
176 return ERR_PTR(-EINVAL);
177 }
897e6365
CH
178 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
179 WARN(1, "Missing migrate_to_ram method\n");
180 return ERR_PTR(-EINVAL);
181 }
3ed2dcdf
CH
182 break;
183 case MEMORY_DEVICE_FS_DAX:
184 if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
185 IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
186 WARN(1, "File system DAX not supported\n");
187 return ERR_PTR(-EINVAL);
188 }
189 break;
190 case MEMORY_DEVICE_DEVDAX:
191 case MEMORY_DEVICE_PCI_P2PDMA:
f6a55e1a 192 need_devmap_managed = false;
3ed2dcdf
CH
193 break;
194 default:
195 WARN(1, "Invalid pgmap type %d\n", pgmap->type);
196 break;
197 }
198
24917f6b
CH
199 if (!pgmap->ref) {
200 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
201 return ERR_PTR(-EINVAL);
202
203 init_completion(&pgmap->done);
204 error = percpu_ref_init(&pgmap->internal_ref,
205 dev_pagemap_percpu_release, 0, GFP_KERNEL);
206 if (error)
207 return ERR_PTR(error);
208 pgmap->ref = &pgmap->internal_ref;
209 } else {
210 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
211 WARN(1, "Missing reference count teardown definition\n");
212 return ERR_PTR(-EINVAL);
213 }
50f44ee7 214 }
a95c90f1 215
f6a55e1a 216 if (need_devmap_managed) {
6f42193f 217 error = devmap_managed_enable_get(pgmap);
f6a55e1a
CH
218 if (error)
219 return ERR_PTR(error);
220 }
221
7cc7867f 222 conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL);
15d36fec 223 if (conflict_pgmap) {
6869b7b2 224 WARN(1, "Conflicting mapping in same section\n");
15d36fec 225 put_dev_pagemap(conflict_pgmap);
50f44ee7
DW
226 error = -ENOMEM;
227 goto err_array;
15d36fec
DJ
228 }
229
7cc7867f 230 conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->end), NULL);
15d36fec 231 if (conflict_pgmap) {
6869b7b2 232 WARN(1, "Conflicting mapping in same section\n");
15d36fec 233 put_dev_pagemap(conflict_pgmap);
50f44ee7
DW
234 error = -ENOMEM;
235 goto err_array;
15d36fec
DJ
236 }
237
7cc7867f 238 is_ram = region_intersects(res->start, resource_size(res),
d37a14bb 239 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
41e94a85 240
06489cfb
DW
241 if (is_ram != REGION_DISJOINT) {
242 WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
243 is_ram == REGION_MIXED ? "mixed" : "ram", res);
a95c90f1
DW
244 error = -ENXIO;
245 goto err_array;
41e94a85
CH
246 }
247
bcfa4b72
MW
248 error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
249 PHYS_PFN(res->end), pgmap, GFP_KERNEL));
9476df7d 250 if (error)
bcfa4b72 251 goto err_array;
9476df7d 252
41e94a85 253 if (nid < 0)
7eff93b7 254 nid = numa_mem_id();
41e94a85 255
7cc7867f
DW
256 error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0,
257 resource_size(res));
9049771f
DW
258 if (error)
259 goto err_pfn_remap;
260
f931ab47 261 mem_hotplug_begin();
69324b8f
DW
262
263 /*
264 * For device private memory we call add_pages() as we only need to
265 * allocate and initialize struct page for the device memory. More-
266 * over the device memory is un-accessible thus we do not want to
267 * create a linear mapping for the memory like arch_add_memory()
268 * would do.
269 *
270 * For all other device memory types, which are accessible by
271 * the CPU, we do want the linear mapping and thus use
272 * arch_add_memory().
273 */
274 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
7cc7867f
DW
275 error = add_pages(nid, PHYS_PFN(res->start),
276 PHYS_PFN(resource_size(res)), &restrictions);
69324b8f 277 } else {
7cc7867f 278 error = kasan_add_zero_shadow(__va(res->start), resource_size(res));
69324b8f
DW
279 if (error) {
280 mem_hotplug_done();
281 goto err_kasan;
282 }
283
7cc7867f 284 error = arch_add_memory(nid, res->start, resource_size(res),
940519f0 285 &restrictions);
69324b8f
DW
286 }
287
288 if (!error) {
289 struct zone *zone;
290
291 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
7cc7867f
DW
292 move_pfn_range_to_zone(zone, PHYS_PFN(res->start),
293 PHYS_PFN(resource_size(res)), restrictions.altmap);
0207df4f
AR
294 }
295
f931ab47 296 mem_hotplug_done();
9476df7d
DW
297 if (error)
298 goto err_add_memory;
41e94a85 299
966cf44f
AD
300 /*
301 * Initialization of the pages has been deferred until now in order
302 * to allow us to do the work while not holding the hotplug lock.
303 */
304 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
7cc7867f
DW
305 PHYS_PFN(res->start),
306 PHYS_PFN(resource_size(res)), pgmap);
966cf44f 307 percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
41e94a85 308 return __va(res->start);
9476df7d
DW
309
310 err_add_memory:
7cc7867f 311 kasan_remove_zero_shadow(__va(res->start), resource_size(res));
0207df4f 312 err_kasan:
7cc7867f 313 untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
9049771f 314 err_pfn_remap:
bcfa4b72
MW
315 pgmap_array_delete(res);
316 err_array:
24917f6b
CH
317 dev_pagemap_kill(pgmap);
318 dev_pagemap_cleanup(pgmap);
6f42193f 319 devmap_managed_enable_put();
9476df7d 320 return ERR_PTR(error);
41e94a85 321}
6869b7b2
CH
322EXPORT_SYMBOL_GPL(memremap_pages);
323
324/**
325 * devm_memremap_pages - remap and provide memmap backing for the given resource
326 * @dev: hosting device for @res
327 * @pgmap: pointer to a struct dev_pagemap
328 *
329 * Notes:
330 * 1/ At a minimum the res and type members of @pgmap must be initialized
331 * by the caller before passing it to this function
332 *
333 * 2/ The altmap field may optionally be initialized, in which case
334 * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
335 *
336 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
337 * 'live' on entry and will be killed and reaped at
338 * devm_memremap_pages_release() time, or if this routine fails.
339 *
340 * 4/ res is expected to be a host memory range that could feasibly be
341 * treated as a "System RAM" range, i.e. not a device mmio range, but
342 * this is not enforced.
343 */
344void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
345{
346 int error;
347 void *ret;
348
349 ret = memremap_pages(pgmap, dev_to_node(dev));
350 if (IS_ERR(ret))
351 return ret;
352
353 error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
354 pgmap);
355 if (error)
356 return ERR_PTR(error);
357 return ret;
358}
808153e1 359EXPORT_SYMBOL_GPL(devm_memremap_pages);
4b94ffdc 360
2e3f139e
DW
361void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
362{
363 devm_release_action(dev, devm_memremap_pages_release, pgmap);
364}
365EXPORT_SYMBOL_GPL(devm_memunmap_pages);
366
4b94ffdc
DW
367unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
368{
369 /* number of pfns from base where pfn_to_page() is valid */
514caf23
CH
370 if (altmap)
371 return altmap->reserve + altmap->free;
372 return 0;
4b94ffdc
DW
373}
374
375void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
376{
377 altmap->alloc -= nr_pfns;
378}
379
0822acb8
CH
380/**
381 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
382 * @pfn: page frame number to lookup page_map
383 * @pgmap: optional known pgmap that already has a reference
384 *
832d7aa0
CH
385 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
386 * is non-NULL but does not cover @pfn the reference to it will be released.
0822acb8
CH
387 */
388struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
389 struct dev_pagemap *pgmap)
390{
0822acb8
CH
391 resource_size_t phys = PFN_PHYS(pfn);
392
393 /*
832d7aa0 394 * In the cached case we're already holding a live reference.
0822acb8 395 */
832d7aa0 396 if (pgmap) {
e7744aa2 397 if (phys >= pgmap->res.start && phys <= pgmap->res.end)
832d7aa0
CH
398 return pgmap;
399 put_dev_pagemap(pgmap);
0822acb8
CH
400 }
401
402 /* fall back to slow path lookup */
403 rcu_read_lock();
bcfa4b72 404 pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
0822acb8
CH
405 if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
406 pgmap = NULL;
407 rcu_read_unlock();
408
409 return pgmap;
410}
e7638488 411EXPORT_SYMBOL_GPL(get_dev_pagemap);
7b2d55d2 412
e7638488 413#ifdef CONFIG_DEV_PAGEMAP_OPS
e7638488 414void __put_devmap_managed_page(struct page *page)
7b2d55d2
JG
415{
416 int count = page_ref_dec_return(page);
417
429589d6
DW
418 /* still busy */
419 if (count > 1)
420 return;
7b2d55d2 421
429589d6
DW
422 /* only triggered by the dev_pagemap shutdown path */
423 if (count == 0) {
424 __put_page(page);
425 return;
426 }
7b2d55d2 427
429589d6
DW
428 /* notify page idle for dax */
429 if (!is_device_private_page(page)) {
430 wake_up_var(&page->_refcount);
431 return;
432 }
7ab0ad0e 433
429589d6
DW
434 /* Clear Active bit in case of parallel mark_page_accessed */
435 __ClearPageActive(page);
436 __ClearPageWaiters(page);
437
438 mem_cgroup_uncharge(page);
439
440 /*
441 * When a device_private page is freed, the page->mapping field
442 * may still contain a (stale) mapping value. For example, the
443 * lower bits of page->mapping may still identify the page as an
444 * anonymous page. Ultimately, this entire field is just stale
445 * and wrong, and it will cause errors if not cleared. One
446 * example is:
447 *
448 * migrate_vma_pages()
449 * migrate_vma_insert_page()
450 * page_add_new_anon_rmap()
451 * __page_set_anon_rmap()
452 * ...checks page->mapping, via PageAnon(page) call,
453 * and incorrectly concludes that the page is an
454 * anonymous page. Therefore, it incorrectly,
455 * silently fails to set up the new anon rmap.
456 *
457 * For other types of ZONE_DEVICE pages, migration is either
458 * handled differently or not done at all, so there is no need
459 * to clear page->mapping.
460 */
461 page->mapping = NULL;
462 page->pgmap->ops->page_free(page);
7b2d55d2 463}
31c5bda3 464EXPORT_SYMBOL(__put_devmap_managed_page);
e7638488 465#endif /* CONFIG_DEV_PAGEMAP_OPS */