mm/page_alloc: drop stale pageblock comment in memmap_init_zone*()
[linux-block.git] / mm / memremap.c
CommitLineData
5981690d
DW
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
7d3dcf26 3#include <linux/device.h>
92281dee 4#include <linux/io.h>
0207df4f 5#include <linux/kasan.h>
41e94a85 6#include <linux/memory_hotplug.h>
bcfa4b72
MW
7#include <linux/mm.h>
8#include <linux/pfn_t.h>
5042db43 9#include <linux/swap.h>
9ffc1d19 10#include <linux/mmzone.h>
5042db43 11#include <linux/swapops.h>
bcfa4b72 12#include <linux/types.h>
e7638488 13#include <linux/wait_bit.h>
bcfa4b72 14#include <linux/xarray.h>
92281dee 15
bcfa4b72 16static DEFINE_XARRAY(pgmap_array);
9476df7d 17
9ffc1d19
DW
18/*
19 * The memremap() and memremap_pages() interfaces are alternately used
20 * to map persistent memory namespaces. These interfaces place different
21 * constraints on the alignment and size of the mapping (namespace).
22 * memremap() can map individual PAGE_SIZE pages. memremap_pages() can
23 * only map subsections (2MB), and at least one architecture (PowerPC)
24 * the minimum mapping granularity of memremap_pages() is 16MB.
25 *
26 * The role of memremap_compat_align() is to communicate the minimum
27 * arch supported alignment of a namespace such that it can freely
28 * switch modes without violating the arch constraint. Namely, do not
29 * allow a namespace to be PAGE_SIZE aligned since that namespace may be
30 * reconfigured into a mode that requires SUBSECTION_SIZE alignment.
31 */
32#ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN
33unsigned long memremap_compat_align(void)
34{
35 return SUBSECTION_SIZE;
36}
37EXPORT_SYMBOL_GPL(memremap_compat_align);
38#endif
39
f6a55e1a
CH
40#ifdef CONFIG_DEV_PAGEMAP_OPS
41DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
42EXPORT_SYMBOL(devmap_managed_key);
f6a55e1a 43
6f42193f 44static void devmap_managed_enable_put(void)
f6a55e1a 45{
433e7d31 46 static_branch_dec(&devmap_managed_key);
f6a55e1a
CH
47}
48
6f42193f 49static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
f6a55e1a 50{
429589d6
DW
51 if (pgmap->type == MEMORY_DEVICE_PRIVATE &&
52 (!pgmap->ops || !pgmap->ops->page_free)) {
f6a55e1a
CH
53 WARN(1, "Missing page_free method\n");
54 return -EINVAL;
55 }
56
433e7d31 57 static_branch_inc(&devmap_managed_key);
6f42193f 58 return 0;
f6a55e1a
CH
59}
60#else
6f42193f 61static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
f6a55e1a
CH
62{
63 return -EINVAL;
64}
6f42193f
CH
65static void devmap_managed_enable_put(void)
66{
67}
f6a55e1a
CH
68#endif /* CONFIG_DEV_PAGEMAP_OPS */
69
a4574f63 70static void pgmap_array_delete(struct range *range)
ab1b597e 71{
a4574f63 72 xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
bcfa4b72 73 NULL, GFP_KERNEL);
ab1b597e 74 synchronize_rcu();
9476df7d
DW
75}
76
b7b3c01b 77static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id)
5c2c2587 78{
b7b3c01b
DW
79 struct range *range = &pgmap->ranges[range_id];
80 unsigned long pfn = PHYS_PFN(range->start);
81
82 if (range_id)
83 return pfn;
84 return pfn + vmem_altmap_offset(pgmap_altmap(pgmap));
5c2c2587
DW
85}
86
b7b3c01b 87static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id)
5c2c2587 88{
b7b3c01b 89 const struct range *range = &pgmap->ranges[range_id];
5c2c2587 90
a4574f63 91 return (range->start + range_len(range)) >> PAGE_SHIFT;
5c2c2587
DW
92}
93
949b9325
DW
94static unsigned long pfn_next(unsigned long pfn)
95{
96 if (pfn % 1024 == 0)
97 cond_resched();
98 return pfn + 1;
99}
100
b7b3c01b
DW
101#define for_each_device_pfn(pfn, map, i) \
102 for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn))
5c2c2587 103
24917f6b
CH
104static void dev_pagemap_kill(struct dev_pagemap *pgmap)
105{
106 if (pgmap->ops && pgmap->ops->kill)
107 pgmap->ops->kill(pgmap);
108 else
109 percpu_ref_kill(pgmap->ref);
110}
111
112static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
113{
114 if (pgmap->ops && pgmap->ops->cleanup) {
115 pgmap->ops->cleanup(pgmap);
116 } else {
117 wait_for_completion(&pgmap->done);
118 percpu_ref_exit(pgmap->ref);
119 }
06282373
DW
120 /*
121 * Undo the pgmap ref assignment for the internal case as the
122 * caller may re-enable the same pgmap.
123 */
124 if (pgmap->ref == &pgmap->internal_ref)
125 pgmap->ref = NULL;
24917f6b
CH
126}
127
b7b3c01b 128static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
41e94a85 129{
b7b3c01b 130 struct range *range = &pgmap->ranges[range_id];
77e080e7 131 struct page *first_page;
2c2a5af6 132 int nid;
71389703 133
77e080e7 134 /* make sure to access a memmap that was actually initialized */
b7b3c01b 135 first_page = pfn_to_page(pfn_first(pgmap, range_id));
77e080e7 136
41e94a85 137 /* pages are dead and unused, undo the arch mapping */
77e080e7 138 nid = page_to_nid(first_page);
2c2a5af6 139
f931ab47 140 mem_hotplug_begin();
a4574f63
DW
141 remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
142 PHYS_PFN(range_len(range)));
69324b8f 143 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
a4574f63
DW
144 __remove_pages(PHYS_PFN(range->start),
145 PHYS_PFN(range_len(range)), NULL);
69324b8f 146 } else {
a4574f63 147 arch_remove_memory(nid, range->start, range_len(range),
514caf23 148 pgmap_altmap(pgmap));
a4574f63 149 kasan_remove_zero_shadow(__va(range->start), range_len(range));
69324b8f 150 }
f931ab47 151 mem_hotplug_done();
b5d24fda 152
a4574f63
DW
153 untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
154 pgmap_array_delete(range);
b7b3c01b
DW
155}
156
157void memunmap_pages(struct dev_pagemap *pgmap)
158{
159 unsigned long pfn;
160 int i;
161
162 dev_pagemap_kill(pgmap);
163 for (i = 0; i < pgmap->nr_range; i++)
164 for_each_device_pfn(pfn, pgmap, i)
165 put_page(pfn_to_page(pfn));
166 dev_pagemap_cleanup(pgmap);
167
168 for (i = 0; i < pgmap->nr_range; i++)
169 pageunmap_range(pgmap, i);
170
fdc029b1 171 WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
6f42193f 172 devmap_managed_enable_put();
9476df7d 173}
6869b7b2
CH
174EXPORT_SYMBOL_GPL(memunmap_pages);
175
176static void devm_memremap_pages_release(void *data)
177{
178 memunmap_pages(data);
179}
9476df7d 180
24917f6b
CH
181static void dev_pagemap_percpu_release(struct percpu_ref *ref)
182{
183 struct dev_pagemap *pgmap =
184 container_of(ref, struct dev_pagemap, internal_ref);
185
186 complete(&pgmap->done);
187}
188
b7b3c01b
DW
189static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
190 int range_id, int nid)
41e94a85 191{
b7b3c01b 192 struct range *range = &pgmap->ranges[range_id];
966cf44f 193 struct dev_pagemap *conflict_pgmap;
6869b7b2 194 int error, is_ram;
5f29a77c 195
b7b3c01b
DW
196 if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0,
197 "altmap not supported for multiple ranges\n"))
198 return -EINVAL;
f6a55e1a 199
a4574f63 200 conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
15d36fec 201 if (conflict_pgmap) {
6869b7b2 202 WARN(1, "Conflicting mapping in same section\n");
15d36fec 203 put_dev_pagemap(conflict_pgmap);
b7b3c01b 204 return -ENOMEM;
15d36fec
DJ
205 }
206
a4574f63 207 conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
15d36fec 208 if (conflict_pgmap) {
6869b7b2 209 WARN(1, "Conflicting mapping in same section\n");
15d36fec 210 put_dev_pagemap(conflict_pgmap);
b7b3c01b 211 return -ENOMEM;
15d36fec
DJ
212 }
213
a4574f63 214 is_ram = region_intersects(range->start, range_len(range),
d37a14bb 215 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
41e94a85 216
06489cfb 217 if (is_ram != REGION_DISJOINT) {
a4574f63
DW
218 WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n",
219 is_ram == REGION_MIXED ? "mixed" : "ram",
220 range->start, range->end);
b7b3c01b 221 return -ENXIO;
41e94a85
CH
222 }
223
a4574f63
DW
224 error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
225 PHYS_PFN(range->end), pgmap, GFP_KERNEL));
9476df7d 226 if (error)
b7b3c01b 227 return error;
9476df7d 228
41e94a85 229 if (nid < 0)
7eff93b7 230 nid = numa_mem_id();
41e94a85 231
b7b3c01b 232 error = track_pfn_remap(NULL, &params->pgprot, PHYS_PFN(range->start), 0,
a4574f63 233 range_len(range));
9049771f
DW
234 if (error)
235 goto err_pfn_remap;
236
f931ab47 237 mem_hotplug_begin();
69324b8f
DW
238
239 /*
240 * For device private memory we call add_pages() as we only need to
241 * allocate and initialize struct page for the device memory. More-
242 * over the device memory is un-accessible thus we do not want to
243 * create a linear mapping for the memory like arch_add_memory()
244 * would do.
245 *
246 * For all other device memory types, which are accessible by
247 * the CPU, we do want the linear mapping and thus use
248 * arch_add_memory().
249 */
250 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
a4574f63 251 error = add_pages(nid, PHYS_PFN(range->start),
b7b3c01b 252 PHYS_PFN(range_len(range)), params);
69324b8f 253 } else {
a4574f63 254 error = kasan_add_zero_shadow(__va(range->start), range_len(range));
69324b8f
DW
255 if (error) {
256 mem_hotplug_done();
257 goto err_kasan;
258 }
259
a4574f63 260 error = arch_add_memory(nid, range->start, range_len(range),
b7b3c01b 261 params);
69324b8f
DW
262 }
263
264 if (!error) {
265 struct zone *zone;
266
267 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
a4574f63 268 move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
b7b3c01b 269 PHYS_PFN(range_len(range)), params->altmap);
0207df4f
AR
270 }
271
f931ab47 272 mem_hotplug_done();
9476df7d
DW
273 if (error)
274 goto err_add_memory;
41e94a85 275
966cf44f
AD
276 /*
277 * Initialization of the pages has been deferred until now in order
278 * to allow us to do the work while not holding the hotplug lock.
279 */
280 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
a4574f63
DW
281 PHYS_PFN(range->start),
282 PHYS_PFN(range_len(range)), pgmap);
b7b3c01b
DW
283 percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id)
284 - pfn_first(pgmap, range_id));
285 return 0;
9476df7d 286
b7b3c01b 287err_add_memory:
a4574f63 288 kasan_remove_zero_shadow(__va(range->start), range_len(range));
b7b3c01b 289err_kasan:
a4574f63 290 untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
b7b3c01b 291err_pfn_remap:
a4574f63 292 pgmap_array_delete(range);
b7b3c01b
DW
293 return error;
294}
295
296
297/*
298 * Not device managed version of dev_memremap_pages, undone by
299 * memunmap_pages(). Please use dev_memremap_pages if you have a struct
300 * device available.
301 */
302void *memremap_pages(struct dev_pagemap *pgmap, int nid)
303{
304 struct mhp_params params = {
305 .altmap = pgmap_altmap(pgmap),
306 .pgprot = PAGE_KERNEL,
307 };
308 const int nr_range = pgmap->nr_range;
309 bool need_devmap_managed = true;
310 int error, i;
311
312 if (WARN_ONCE(!nr_range, "nr_range must be specified\n"))
313 return ERR_PTR(-EINVAL);
314
315 switch (pgmap->type) {
316 case MEMORY_DEVICE_PRIVATE:
317 if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
318 WARN(1, "Device private memory not supported\n");
319 return ERR_PTR(-EINVAL);
320 }
321 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
322 WARN(1, "Missing migrate_to_ram method\n");
323 return ERR_PTR(-EINVAL);
324 }
325 if (!pgmap->owner) {
326 WARN(1, "Missing owner\n");
327 return ERR_PTR(-EINVAL);
328 }
329 break;
330 case MEMORY_DEVICE_FS_DAX:
331 if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
332 IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
333 WARN(1, "File system DAX not supported\n");
334 return ERR_PTR(-EINVAL);
335 }
336 break;
337 case MEMORY_DEVICE_GENERIC:
338 need_devmap_managed = false;
339 break;
340 case MEMORY_DEVICE_PCI_P2PDMA:
341 params.pgprot = pgprot_noncached(params.pgprot);
342 need_devmap_managed = false;
343 break;
344 default:
345 WARN(1, "Invalid pgmap type %d\n", pgmap->type);
346 break;
347 }
348
349 if (!pgmap->ref) {
350 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
351 return ERR_PTR(-EINVAL);
352
353 init_completion(&pgmap->done);
354 error = percpu_ref_init(&pgmap->internal_ref,
355 dev_pagemap_percpu_release, 0, GFP_KERNEL);
356 if (error)
357 return ERR_PTR(error);
358 pgmap->ref = &pgmap->internal_ref;
359 } else {
360 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
361 WARN(1, "Missing reference count teardown definition\n");
362 return ERR_PTR(-EINVAL);
363 }
364 }
365
366 if (need_devmap_managed) {
367 error = devmap_managed_enable_get(pgmap);
368 if (error)
369 return ERR_PTR(error);
370 }
371
372 /*
373 * Clear the pgmap nr_range as it will be incremented for each
374 * successfully processed range. This communicates how many
375 * regions to unwind in the abort case.
376 */
377 pgmap->nr_range = 0;
378 error = 0;
379 for (i = 0; i < nr_range; i++) {
380 error = pagemap_range(pgmap, &params, i, nid);
381 if (error)
382 break;
383 pgmap->nr_range++;
384 }
385
386 if (i < nr_range) {
387 memunmap_pages(pgmap);
388 pgmap->nr_range = nr_range;
389 return ERR_PTR(error);
390 }
391
392 return __va(pgmap->ranges[0].start);
41e94a85 393}
6869b7b2
CH
394EXPORT_SYMBOL_GPL(memremap_pages);
395
396/**
397 * devm_memremap_pages - remap and provide memmap backing for the given resource
398 * @dev: hosting device for @res
399 * @pgmap: pointer to a struct dev_pagemap
400 *
401 * Notes:
402 * 1/ At a minimum the res and type members of @pgmap must be initialized
403 * by the caller before passing it to this function
404 *
405 * 2/ The altmap field may optionally be initialized, in which case
406 * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
407 *
408 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
409 * 'live' on entry and will be killed and reaped at
410 * devm_memremap_pages_release() time, or if this routine fails.
411 *
a4574f63 412 * 4/ range is expected to be a host memory range that could feasibly be
6869b7b2
CH
413 * treated as a "System RAM" range, i.e. not a device mmio range, but
414 * this is not enforced.
415 */
416void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
417{
418 int error;
419 void *ret;
420
421 ret = memremap_pages(pgmap, dev_to_node(dev));
422 if (IS_ERR(ret))
423 return ret;
424
425 error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
426 pgmap);
427 if (error)
428 return ERR_PTR(error);
429 return ret;
430}
808153e1 431EXPORT_SYMBOL_GPL(devm_memremap_pages);
4b94ffdc 432
2e3f139e
DW
433void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
434{
435 devm_release_action(dev, devm_memremap_pages_release, pgmap);
436}
437EXPORT_SYMBOL_GPL(devm_memunmap_pages);
438
4b94ffdc
DW
439unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
440{
441 /* number of pfns from base where pfn_to_page() is valid */
514caf23
CH
442 if (altmap)
443 return altmap->reserve + altmap->free;
444 return 0;
4b94ffdc
DW
445}
446
447void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
448{
449 altmap->alloc -= nr_pfns;
450}
451
0822acb8
CH
452/**
453 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
454 * @pfn: page frame number to lookup page_map
455 * @pgmap: optional known pgmap that already has a reference
456 *
832d7aa0
CH
457 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
458 * is non-NULL but does not cover @pfn the reference to it will be released.
0822acb8
CH
459 */
460struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
461 struct dev_pagemap *pgmap)
462{
0822acb8
CH
463 resource_size_t phys = PFN_PHYS(pfn);
464
465 /*
832d7aa0 466 * In the cached case we're already holding a live reference.
0822acb8 467 */
832d7aa0 468 if (pgmap) {
a4574f63 469 if (phys >= pgmap->range.start && phys <= pgmap->range.end)
832d7aa0
CH
470 return pgmap;
471 put_dev_pagemap(pgmap);
0822acb8
CH
472 }
473
474 /* fall back to slow path lookup */
475 rcu_read_lock();
bcfa4b72 476 pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
0822acb8
CH
477 if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
478 pgmap = NULL;
479 rcu_read_unlock();
480
481 return pgmap;
482}
e7638488 483EXPORT_SYMBOL_GPL(get_dev_pagemap);
7b2d55d2 484
e7638488 485#ifdef CONFIG_DEV_PAGEMAP_OPS
07d80269 486void free_devmap_managed_page(struct page *page)
7b2d55d2 487{
429589d6
DW
488 /* notify page idle for dax */
489 if (!is_device_private_page(page)) {
490 wake_up_var(&page->_refcount);
491 return;
492 }
7ab0ad0e 493
429589d6
DW
494 __ClearPageWaiters(page);
495
496 mem_cgroup_uncharge(page);
497
498 /*
499 * When a device_private page is freed, the page->mapping field
500 * may still contain a (stale) mapping value. For example, the
501 * lower bits of page->mapping may still identify the page as an
502 * anonymous page. Ultimately, this entire field is just stale
503 * and wrong, and it will cause errors if not cleared. One
504 * example is:
505 *
506 * migrate_vma_pages()
507 * migrate_vma_insert_page()
508 * page_add_new_anon_rmap()
509 * __page_set_anon_rmap()
510 * ...checks page->mapping, via PageAnon(page) call,
511 * and incorrectly concludes that the page is an
512 * anonymous page. Therefore, it incorrectly,
513 * silently fails to set up the new anon rmap.
514 *
515 * For other types of ZONE_DEVICE pages, migration is either
516 * handled differently or not done at all, so there is no need
517 * to clear page->mapping.
518 */
519 page->mapping = NULL;
520 page->pgmap->ops->page_free(page);
7b2d55d2 521}
e7638488 522#endif /* CONFIG_DEV_PAGEMAP_OPS */