1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/mmzone.h>
4 #include <linux/memblock.h>
5 #include <linux/page_ext.h>
6 #include <linux/memory.h>
7 #include <linux/vmalloc.h>
8 #include <linux/kmemleak.h>
9 #include <linux/page_owner.h>
10 #include <linux/page_idle.h>
11 #include <linux/page_table_check.h>
12 #include <linux/rcupdate.h>
13 #include <linux/pgalloc_tag.h>
16 * struct page extension
18 * This is the feature to manage memory for extended data per page.
20 * Until now, we must modify struct page itself to store extra data per page.
21 * This requires rebuilding the kernel and it is really time consuming process.
22 * And, sometimes, rebuild is impossible due to third party module dependency.
23 * At last, enlarging struct page could cause un-wanted system behaviour change.
25 * This feature is intended to overcome above mentioned problems. This feature
26 * allocates memory for extended data per page in certain place rather than
27 * the struct page itself. This memory can be accessed by the accessor
28 * functions provided by this code. During the boot process, it checks whether
29 * allocation of huge chunk of memory is needed or not. If not, it avoids
30 * allocating memory at all. With this advantage, we can include this feature
31 * into the kernel in default and can avoid rebuild and solve related problems.
33 * To help these things to work well, there are two callbacks for clients. One
34 * is the need callback which is mandatory if user wants to avoid useless
35 * memory allocation at boot-time. The other is optional, init callback, which
36 * is used to do proper initialization after memory is allocated.
38 * The need callback is used to decide whether extended memory allocation is
39 * needed or not. Sometimes users want to deactivate some features in this
40 * boot and extra memory would be unnecessary. In this case, to avoid
41 * allocating huge chunk of memory, each clients represent their need of
42 * extra memory through the need callback. If one of the need callbacks
43 * returns true, it means that someone needs extra memory so that
44 * page extension core should allocates memory for page extension. If
45 * none of need callbacks return true, memory isn't needed at all in this boot
46 * and page extension core can skip to allocate memory. As result,
47 * none of memory is wasted.
49 * When need callback returns true, page_ext checks if there is a request for
50 * extra memory through size in struct page_ext_operations. If it is non-zero,
51 * extra space is allocated for each page_ext entry and offset is returned to
52 * user through offset in struct page_ext_operations.
54 * The init callback is used to do proper initialization after page extension
55 * is completely initialized. In sparse memory system, extra memory is
56 * allocated some time later than memmap is allocated. In other words, lifetime
57 * of memory for page extension isn't same with memmap for struct page.
58 * Therefore, clients can't store extra data until page extension is
59 * initialized, even if pages are allocated and used freely. This could
60 * cause inadequate state of extra data per page, so, to prevent it, client
61 * can utilize this callback to initialize the state of it correctly.
64 #ifdef CONFIG_SPARSEMEM
65 #define PAGE_EXT_INVALID (0x1)
68 #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
69 static bool need_page_idle(void)
73 static struct page_ext_operations page_idle_ops __initdata = {
74 .need = need_page_idle,
75 .need_shared_flags = true,
79 static struct page_ext_operations *page_ext_ops[] __initdata = {
80 #ifdef CONFIG_PAGE_OWNER
83 #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
86 #ifdef CONFIG_MEM_ALLOC_PROFILING
87 &page_alloc_tagging_ops,
89 #ifdef CONFIG_PAGE_TABLE_CHECK
90 &page_table_check_ops,
94 unsigned long page_ext_size;
96 static unsigned long total_usage;
98 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
100 * To ensure correct allocation tagging for pages, page_ext should be available
101 * before the first page allocation. Otherwise early task stacks will be
102 * allocated before page_ext initialization and missing tags will be flagged.
104 bool early_page_ext __meminitdata = true;
106 bool early_page_ext __meminitdata;
108 static int __init setup_early_page_ext(char *str)
110 early_page_ext = true;
113 early_param("early_page_ext", setup_early_page_ext);
115 static bool __init invoke_need_callbacks(void)
118 int entries = ARRAY_SIZE(page_ext_ops);
121 for (i = 0; i < entries; i++) {
122 if (page_ext_ops[i]->need()) {
123 if (page_ext_ops[i]->need_shared_flags) {
124 page_ext_size = sizeof(struct page_ext);
130 for (i = 0; i < entries; i++) {
131 if (page_ext_ops[i]->need()) {
132 page_ext_ops[i]->offset = page_ext_size;
133 page_ext_size += page_ext_ops[i]->size;
141 static void __init invoke_init_callbacks(void)
144 int entries = ARRAY_SIZE(page_ext_ops);
146 for (i = 0; i < entries; i++) {
147 if (page_ext_ops[i]->init)
148 page_ext_ops[i]->init();
152 static inline struct page_ext *get_entry(void *base, unsigned long index)
154 return base + page_ext_size * index;
157 #ifndef CONFIG_SPARSEMEM
158 void __init page_ext_init_flatmem_late(void)
160 invoke_init_callbacks();
163 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
165 pgdat->node_page_ext = NULL;
168 static struct page_ext *lookup_page_ext(const struct page *page)
170 unsigned long pfn = page_to_pfn(page);
172 struct page_ext *base;
174 WARN_ON_ONCE(!rcu_read_lock_held());
175 base = NODE_DATA(page_to_nid(page))->node_page_ext;
177 * The sanity checks the page allocator does upon freeing a
178 * page can reach here before the page_ext arrays are
179 * allocated when feeding a range of pages to the allocator
180 * for the first time during bootup or memory hotplug.
184 index = pfn - round_down(node_start_pfn(page_to_nid(page)),
186 return get_entry(base, index);
189 static int __init alloc_node_page_ext(int nid)
191 struct page_ext *base;
192 unsigned long table_size;
193 unsigned long nr_pages;
195 nr_pages = NODE_DATA(nid)->node_spanned_pages;
200 * Need extra space if node range is not aligned with
201 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
202 * checks buddy's status, range could be out of exact node range.
204 if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
205 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
206 nr_pages += MAX_ORDER_NR_PAGES;
208 table_size = page_ext_size * nr_pages;
210 base = memblock_alloc_try_nid(
211 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
212 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
215 NODE_DATA(nid)->node_page_ext = base;
216 total_usage += table_size;
220 void __init page_ext_init_flatmem(void)
225 if (!invoke_need_callbacks())
228 for_each_online_node(nid) {
229 fail = alloc_node_page_ext(nid);
233 pr_info("allocated %ld bytes of page_ext\n", total_usage);
237 pr_crit("allocation of page_ext failed.\n");
238 panic("Out of memory");
241 #else /* CONFIG_SPARSEMEM */
242 static bool page_ext_invalid(struct page_ext *page_ext)
244 return !page_ext || (((unsigned long)page_ext & PAGE_EXT_INVALID) == PAGE_EXT_INVALID);
247 static struct page_ext *lookup_page_ext(const struct page *page)
249 unsigned long pfn = page_to_pfn(page);
250 struct mem_section *section = __pfn_to_section(pfn);
251 struct page_ext *page_ext = READ_ONCE(section->page_ext);
253 WARN_ON_ONCE(!rcu_read_lock_held());
255 * The sanity checks the page allocator does upon freeing a
256 * page can reach here before the page_ext arrays are
257 * allocated when feeding a range of pages to the allocator
258 * for the first time during bootup or memory hotplug.
260 if (page_ext_invalid(page_ext))
262 return get_entry(page_ext, pfn);
265 static void *__meminit alloc_page_ext(size_t size, int nid)
267 gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
270 addr = alloc_pages_exact_nid(nid, size, flags);
272 kmemleak_alloc(addr, size, 1, flags);
276 addr = vzalloc_node(size, nid);
281 static int __meminit init_section_page_ext(unsigned long pfn, int nid)
283 struct mem_section *section;
284 struct page_ext *base;
285 unsigned long table_size;
287 section = __pfn_to_section(pfn);
289 if (section->page_ext)
292 table_size = page_ext_size * PAGES_PER_SECTION;
293 base = alloc_page_ext(table_size, nid);
296 * The value stored in section->page_ext is (base - pfn)
297 * and it does not point to the memory block allocated above,
298 * causing kmemleak false positives.
300 kmemleak_not_leak(base);
303 pr_err("page ext allocation failure\n");
308 * The passed "pfn" may not be aligned to SECTION. For the calculation
309 * we need to apply a mask.
311 pfn &= PAGE_SECTION_MASK;
312 section->page_ext = (void *)base - page_ext_size * pfn;
313 total_usage += table_size;
317 static void free_page_ext(void *addr)
319 if (is_vmalloc_addr(addr)) {
322 struct page *page = virt_to_page(addr);
325 table_size = page_ext_size * PAGES_PER_SECTION;
327 BUG_ON(PageReserved(page));
329 free_pages_exact(addr, table_size);
333 static void __free_page_ext(unsigned long pfn)
335 struct mem_section *ms;
336 struct page_ext *base;
338 ms = __pfn_to_section(pfn);
339 if (!ms || !ms->page_ext)
342 base = READ_ONCE(ms->page_ext);
344 * page_ext here can be valid while doing the roll back
345 * operation in online_page_ext().
347 if (page_ext_invalid(base))
348 base = (void *)base - PAGE_EXT_INVALID;
349 WRITE_ONCE(ms->page_ext, NULL);
351 base = get_entry(base, pfn);
355 static void __invalidate_page_ext(unsigned long pfn)
357 struct mem_section *ms;
360 ms = __pfn_to_section(pfn);
361 if (!ms || !ms->page_ext)
363 val = (void *)ms->page_ext + PAGE_EXT_INVALID;
364 WRITE_ONCE(ms->page_ext, val);
367 static int __meminit online_page_ext(unsigned long start_pfn,
368 unsigned long nr_pages,
371 unsigned long start, end, pfn;
374 start = SECTION_ALIGN_DOWN(start_pfn);
375 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
377 if (nid == NUMA_NO_NODE) {
379 * In this case, "nid" already exists and contains valid memory.
380 * "start_pfn" passed to us is a pfn which is an arg for
381 * online__pages(), and start_pfn should exist.
383 nid = pfn_to_nid(start_pfn);
384 VM_BUG_ON(!node_online(nid));
387 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION)
388 fail = init_section_page_ext(pfn, nid);
393 end = pfn - PAGES_PER_SECTION;
394 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
395 __free_page_ext(pfn);
400 static void __meminit offline_page_ext(unsigned long start_pfn,
401 unsigned long nr_pages)
403 unsigned long start, end, pfn;
405 start = SECTION_ALIGN_DOWN(start_pfn);
406 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
409 * Freeing of page_ext is done in 3 steps to avoid
410 * use-after-free of it:
411 * 1) Traverse all the sections and mark their page_ext
413 * 2) Wait for all the existing users of page_ext who
414 * started before invalidation to finish.
415 * 3) Free the page_ext.
417 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
418 __invalidate_page_ext(pfn);
422 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
423 __free_page_ext(pfn);
426 static int __meminit page_ext_callback(struct notifier_block *self,
427 unsigned long action, void *arg)
429 struct memory_notify *mn = arg;
433 case MEM_GOING_ONLINE:
434 ret = online_page_ext(mn->start_pfn,
435 mn->nr_pages, mn->status_change_nid);
438 offline_page_ext(mn->start_pfn,
441 case MEM_CANCEL_ONLINE:
442 offline_page_ext(mn->start_pfn,
445 case MEM_GOING_OFFLINE:
448 case MEM_CANCEL_OFFLINE:
452 return notifier_from_errno(ret);
455 void __init page_ext_init(void)
460 if (!invoke_need_callbacks())
463 for_each_node_state(nid, N_MEMORY) {
464 unsigned long start_pfn, end_pfn;
466 start_pfn = node_start_pfn(nid);
467 end_pfn = node_end_pfn(nid);
469 * start_pfn and end_pfn may not be aligned to SECTION and the
470 * page->flags of out of node pages are not initialized. So we
471 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
473 for (pfn = start_pfn; pfn < end_pfn;
474 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
479 * Nodes's pfns can be overlapping.
480 * We know some arch can have a nodes layout such as
481 * -------------pfn-------------->
482 * N0 | N1 | N2 | N0 | N1 | N2|....
484 if (pfn_to_nid(pfn) != nid)
486 if (init_section_page_ext(pfn, nid))
491 hotplug_memory_notifier(page_ext_callback, DEFAULT_CALLBACK_PRI);
492 pr_info("allocated %ld bytes of page_ext\n", total_usage);
493 invoke_init_callbacks();
497 panic("Out of memory");
500 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
507 * page_ext_get() - Get the extended information for a page.
508 * @page: The page we're interested in.
510 * Ensures that the page_ext will remain valid until page_ext_put()
513 * Return: NULL if no page_ext exists for this page.
514 * Context: Any context. Caller may not sleep until they have called
517 struct page_ext *page_ext_get(const struct page *page)
519 struct page_ext *page_ext;
522 page_ext = lookup_page_ext(page);
532 * page_ext_put() - Working with page extended information is done.
533 * @page_ext: Page extended information received from page_ext_get().
535 * The page extended information of the page may not be valid after this
536 * function is called.
539 * Context: Any context with corresponding page_ext_get() is called.
541 void page_ext_put(struct page_ext *page_ext)
543 if (unlikely(!page_ext))