Today, we do not have any observability of per-page metadata and how much
it takes away from the machine capacity. Thus, we want to describe the
amount of memory that is going towards per-page metadata, which can vary
depending on build configuration, machine architecture, and system use.
This patch adds 2 fields to /proc/vmstat that can used as shown below:
Accounting per-page metadata allocated by boot-allocator:
/proc/vmstat:nr_memmap_boot * PAGE_SIZE
Accounting per-page metadata allocated by buddy-allocator:
/proc/vmstat:nr_memmap * PAGE_SIZE
Accounting total Perpage metadata allocated on the machine:
(/proc/vmstat:nr_memmap_boot +
/proc/vmstat:nr_memmap) * PAGE_SIZE
Utility for userspace:
Observability: Describe the amount of memory overhead that is going to
per-page metadata on the system at any given time since this overhead is
not currently observable.
Debugging: Tracking the changes or absolute value in struct pages can help
detect anomalies as they can be correlated with other metrics in the
machine (e.g., memtotal, number of huge pages, etc).
page_ext overheads: Some kernel features such as page_owner
page_table_check that use page_ext can be optionally enabled via kernel
parameters. Having the total per-page metadata information helps users
precisely measure impact. Furthermore, page-metadata metrics will reflect
the amount of struct pages reliquished (or overhead reduced) when
hugetlbfs pages are reserved which will vary depending on whether hugetlb
vmemmap optimization is enabled or not.
For background and results see:
lore.kernel.org/all/
20240220214558.
3377482-1-souravpanda@google.com
Link: https://lkml.kernel.org/r/20240605222751.1406125-1-souravpanda@google.com
Signed-off-by: Sourav Panda <souravpanda@google.com>
Acked-by: David Rientjes <rientjes@google.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Chen Linxuan <chenlinxuan@uniontech.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Ivan Babrou <ivan@cloudflare.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: "Rafael J. Wysocki" <rafael@kernel.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Tomas Mudrunka <tomas.mudrunka@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Xu <weixugc@google.com>
Cc: Yang Yang <yang.yang29@zte.com.cn>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
PGDEMOTE_KSWAPD,
PGDEMOTE_DIRECT,
PGDEMOTE_KHUGEPAGED,
+ NR_MEMMAP, /* page metadata allocated through buddy allocator */
+ NR_MEMMAP_BOOT, /* page metadata allocated through boot allocator */
NR_VM_NODE_STAT_ITEMS
};
{
lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
}
+
+void __meminit mod_node_early_perpage_metadata(int nid, long delta);
+void __meminit store_early_perpage_metadata(void);
+
#endif /* _LINUX_VMSTAT_H */
*/
static inline void free_vmemmap_page(struct page *page)
{
- if (PageReserved(page))
+ if (PageReserved(page)) {
free_bootmem_page(page);
- else
+ mod_node_page_state(page_pgdat(page), NR_MEMMAP_BOOT, -1);
+ } else {
__free_page(page);
+ mod_node_page_state(page_pgdat(page), NR_MEMMAP, -1);
+ }
}
/* Free a list of the vmemmap pages */
copy_page(page_to_virt(walk.reuse_page),
(void *)walk.reuse_addr);
list_add(&walk.reuse_page->lru, vmemmap_pages);
+ mod_node_page_state(NODE_DATA(nid), NR_MEMMAP, 1);
}
/*
unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
int nid = page_to_nid((struct page *)start);
struct page *page, *next;
+ int i;
- while (nr_pages--) {
+ for (i = 0; i < nr_pages; i++) {
page = alloc_pages_node(nid, gfp_mask, 0);
- if (!page)
+ if (!page) {
+ mod_node_page_state(NODE_DATA(nid), NR_MEMMAP, i);
goto out;
+ }
list_add(&page->lru, list);
}
+ mod_node_page_state(NODE_DATA(nid), NR_MEMMAP, nr_pages);
+
return 0;
out:
list_for_each_entry_safe(page, next, list, lru)
#include <linux/cma.h>
#include <linux/crash_dump.h>
#include <linux/execmem.h>
+#include <linux/vmstat.h>
#include "internal.h"
#include "slab.h"
#include "shuffle.h"
panic("Failed to allocate %ld bytes for node %d memory map\n",
size, pgdat->node_id);
pgdat->node_mem_map = map + offset;
+ mod_node_early_perpage_metadata(pgdat->node_id,
+ DIV_ROUND_UP(size, PAGE_SIZE));
pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
__func__, pgdat->node_id, (unsigned long)pgdat,
(unsigned long)pgdat->node_mem_map);
for_each_online_pgdat(pgdat)
pgdat->per_cpu_nodestats =
alloc_percpu(struct per_cpu_nodestat);
+ store_early_perpage_metadata();
}
__meminit void zone_pcp_init(struct zone *zone)
return -ENOMEM;
NODE_DATA(nid)->node_page_ext = base;
total_usage += table_size;
+ mod_node_page_state(NODE_DATA(nid), NR_MEMMAP_BOOT,
+ DIV_ROUND_UP(table_size, PAGE_SIZE));
return 0;
}
void *addr = NULL;
addr = alloc_pages_exact_nid(nid, size, flags);
- if (addr) {
+ if (addr)
kmemleak_alloc(addr, size, 1, flags);
- return addr;
- }
+ else
+ addr = vzalloc_node(size, nid);
- addr = vzalloc_node(size, nid);
+ if (addr) {
+ mod_node_page_state(NODE_DATA(nid), NR_MEMMAP,
+ DIV_ROUND_UP(size, PAGE_SIZE));
+ }
return addr;
}
static void free_page_ext(void *addr)
{
+ size_t table_size;
+ struct page *page;
+ struct pglist_data *pgdat;
+
+ table_size = page_ext_size * PAGES_PER_SECTION;
+
if (is_vmalloc_addr(addr)) {
+ page = vmalloc_to_page(addr);
+ pgdat = page_pgdat(page);
vfree(addr);
} else {
- struct page *page = virt_to_page(addr);
- size_t table_size;
-
- table_size = page_ext_size * PAGES_PER_SECTION;
-
+ page = virt_to_page(addr);
+ pgdat = page_pgdat(page);
BUG_ON(PageReserved(page));
kmemleak_free(addr);
free_pages_exact(addr, table_size);
}
+
+ mod_node_page_state(pgdat, NR_MEMMAP,
+ -1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
+
}
static void __free_page_ext(unsigned long pfn)
if (r < 0)
return NULL;
+ if (system_state == SYSTEM_BOOTING) {
+ mod_node_early_perpage_metadata(nid, DIV_ROUND_UP(end - start,
+ PAGE_SIZE));
+ } else {
+ mod_node_page_state(NODE_DATA(nid), NR_MEMMAP,
+ DIV_ROUND_UP(end - start, PAGE_SIZE));
+ }
+
return pfn_to_page(pfn);
}
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/bootmem_info.h>
-
+#include <linux/vmstat.h>
#include "internal.h"
#include <asm/dma.h>
*/
sparsemap_buf = memmap_alloc(size, section_map_size(), addr, nid, true);
sparsemap_buf_end = sparsemap_buf + size;
+#ifndef CONFIG_SPARSEMEM_VMEMMAP
+ mod_node_early_perpage_metadata(nid, DIV_ROUND_UP(size, PAGE_SIZE));
+#endif
}
static void __init sparse_buffer_fini(void)
unsigned long start = (unsigned long) pfn_to_page(pfn);
unsigned long end = start + nr_pages * sizeof(struct page);
+ mod_node_page_state(page_pgdat(pfn_to_page(pfn)), NR_MEMMAP,
+ -1L * (DIV_ROUND_UP(end - start, PAGE_SIZE)));
vmemmap_free(start, end, altmap);
}
static void free_map_bootmem(struct page *memmap)
"pgdemote_kswapd",
"pgdemote_direct",
"pgdemote_khugepaged",
-
+ "nr_memmap",
+ "nr_memmap_boot",
/* enum writeback_stat_item counters */
"nr_dirty_threshold",
"nr_dirty_background_threshold",
}
module_init(extfrag_debug_init);
+
#endif
+
+/*
+ * Page metadata size (struct page and page_ext) in pages
+ */
+static unsigned long early_perpage_metadata[MAX_NUMNODES] __meminitdata;
+
+void __meminit mod_node_early_perpage_metadata(int nid, long delta)
+{
+ early_perpage_metadata[nid] += delta;
+}
+
+void __meminit store_early_perpage_metadata(void)
+{
+ int nid;
+ struct pglist_data *pgdat;
+
+ for_each_online_pgdat(pgdat) {
+ nid = pgdat->node_id;
+ mod_node_page_state(NODE_DATA(nid), NR_MEMMAP_BOOT,
+ early_perpage_metadata[nid]);
+ }
+}