1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2016, Rashmica Gupta, IBM Corp.
5 * This traverses the kernel pagetables and dumps the
6 * information about the used sections of memory to
7 * /sys/kernel/debug/kernel_pagetables.
9 * Derived from the arm64 implementation:
10 * Copyright (c) 2014, The Linux Foundation, Laura Abbott.
11 * (C) Copyright 2008 Intel Corporation, Arjan van de Ven.
13 #include <linux/debugfs.h>
15 #include <linux/hugetlb.h>
18 #include <linux/highmem.h>
19 #include <linux/ptdump.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <asm/fixmap.h>
23 #include <linux/const.h>
24 #include <linux/kasan.h>
26 #include <asm/hugetlb.h>
28 #include <mm/mmu_decl.h>
33 * To visualise what is happening,
35 * - PTRS_PER_P** = how many entries there are in the corresponding P**
36 * - P**_SHIFT = how many bits of the address we use to index into the
38 * - P**_SIZE is how much memory we can access through the table - not the
39 * size of the table itself.
40 * P**={PGD, PUD, PMD, PTE}
43 * Each entry of the PGD points to a PUD. Each entry of a PUD points to a
44 * PMD. Each entry of a PMD points to a PTE. And every PTE entry points to
47 * In the case where there are only 3 levels, the PUD is folded into the
48 * PGD: every PUD has only one entry which points to the PMD.
50 * The page dumper groups page table entries of the same type into a single
51 * description. It uses pg_state to track the range information while
52 * iterating over the PTE entries. When the continuity is broken it then
53 * dumps out a description of the range - ie PTEs that are virtually contiguous
54 * with the same PTE flags are chunked together. This is to make it clear how
55 * different areas of the kernel virtual memory are used.
59 struct ptdump_state ptdump;
61 const struct addr_marker *marker;
62 unsigned long start_address;
63 unsigned long start_pa;
67 unsigned long wx_pages;
71 unsigned long start_address;
75 static struct addr_marker address_markers[] = {
76 { 0, "Start of kernel VM" },
78 { 0, "modules start" },
81 { 0, "vmalloc() Area" },
82 { 0, "vmalloc() End" },
84 { 0, "isa I/O start" },
86 { 0, "phb I/O start" },
88 { 0, "I/O remap start" },
89 { 0, "I/O remap end" },
90 { 0, "vmemmap start" },
92 { 0, "Early I/O remap start" },
93 { 0, "Early I/O remap end" },
95 { 0, "Highmem PTEs start" },
96 { 0, "Highmem PTEs end" },
98 { 0, "Fixmap start" },
102 { 0, "kasan shadow mem start" },
103 { 0, "kasan shadow mem end" },
108 static struct ptdump_range ptdump_range[] __ro_after_init = {
109 {TASK_SIZE_MAX, ~0UL},
113 #define pt_dump_seq_printf(m, fmt, args...) \
116 seq_printf(m, fmt, ##args); \
119 #define pt_dump_seq_putc(m, c) \
125 void pt_dump_size(struct seq_file *m, unsigned long size)
127 static const char units[] = " KMGTPE";
128 const char *unit = units;
130 /* Work out what appropriate unit to use */
131 while (!(size & 1023) && unit[1]) {
135 pt_dump_seq_printf(m, "%9lu%c ", size, *unit);
138 static void dump_flag_info(struct pg_state *st, const struct flag_info
139 *flag, u64 pte, int num)
143 for (i = 0; i < num; i++, flag++) {
144 const char *s = NULL;
147 /* flag not defined so don't check it */
150 /* Some 'flags' are actually values */
152 val = pte & flag->val;
154 val = val >> flag->shift;
155 pt_dump_seq_printf(st->seq, " %s:%llx", flag->set, val);
157 if ((pte & flag->mask) == flag->val)
162 pt_dump_seq_printf(st->seq, " %s", s);
164 st->current_flags &= ~flag->mask;
166 if (st->current_flags != 0)
167 pt_dump_seq_printf(st->seq, " unknown flags:%llx", st->current_flags);
170 static void dump_addr(struct pg_state *st, unsigned long addr)
173 #define REG "0x%016lx"
175 #define REG "0x%08lx"
178 pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1);
179 pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
180 pt_dump_size(st->seq, addr - st->start_address);
183 static void note_prot_wx(struct pg_state *st, unsigned long addr)
185 pte_t pte = __pte(st->current_flags);
190 if (!pte_write(pte) || !pte_exec(pte))
193 WARN_ONCE(IS_ENABLED(CONFIG_DEBUG_WX),
194 "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n",
195 (void *)st->start_address, (void *)st->start_address);
197 st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
200 static void note_page_update_state(struct pg_state *st, unsigned long addr, int level, u64 val)
202 u64 flag = level >= 0 ? val & pg_level[level].mask : 0;
203 u64 pa = val & PTE_RPN_MASK;
206 st->current_flags = flag;
207 st->start_address = addr;
210 while (addr >= st->marker[1].start_address) {
212 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
216 static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val)
218 u64 flag = level >= 0 ? val & pg_level[level].mask : 0;
219 struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
221 /* At first no level is set */
222 if (st->level == -1) {
223 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
224 note_page_update_state(st, addr, level, val);
226 * Dump the section of virtual memory when:
227 * - the PTE flags from one entry to the next differs.
228 * - we change levels in the tree.
229 * - the address is in a different section of memory and is thus
230 * used for a different purpose, regardless of the flags.
232 } else if (flag != st->current_flags || level != st->level ||
233 addr >= st->marker[1].start_address) {
235 /* Check the PTE flags */
236 if (st->current_flags) {
237 note_prot_wx(st, addr);
240 /* Dump all the flags */
241 if (pg_level[st->level].flag)
242 dump_flag_info(st, pg_level[st->level].flag,
244 pg_level[st->level].num);
246 pt_dump_seq_putc(st->seq, '\n');
250 * Address indicates we have passed the end of the
251 * current section of virtual memory
253 note_page_update_state(st, addr, level, val);
257 static void populate_markers(void)
262 address_markers[i++].start_address = PAGE_OFFSET;
264 address_markers[i++].start_address = TASK_SIZE;
267 address_markers[i++].start_address = MODULES_VADDR;
268 address_markers[i++].start_address = MODULES_END;
270 address_markers[i++].start_address = VMALLOC_START;
271 address_markers[i++].start_address = VMALLOC_END;
273 address_markers[i++].start_address = ISA_IO_BASE;
274 address_markers[i++].start_address = ISA_IO_END;
275 address_markers[i++].start_address = PHB_IO_BASE;
276 address_markers[i++].start_address = PHB_IO_END;
277 address_markers[i++].start_address = IOREMAP_BASE;
278 address_markers[i++].start_address = IOREMAP_END;
279 /* What is the ifdef about? */
280 #ifdef CONFIG_PPC_BOOK3S_64
281 address_markers[i++].start_address = H_VMEMMAP_START;
283 address_markers[i++].start_address = VMEMMAP_BASE;
285 #else /* !CONFIG_PPC64 */
286 address_markers[i++].start_address = ioremap_bot;
287 address_markers[i++].start_address = IOREMAP_TOP;
288 #ifdef CONFIG_HIGHMEM
289 address_markers[i++].start_address = PKMAP_BASE;
290 address_markers[i++].start_address = PKMAP_ADDR(LAST_PKMAP);
292 address_markers[i++].start_address = FIXADDR_START;
293 address_markers[i++].start_address = FIXADDR_TOP;
294 #endif /* CONFIG_PPC64 */
296 address_markers[i++].start_address = KASAN_SHADOW_START;
297 address_markers[i++].start_address = KASAN_SHADOW_END;
301 static int ptdump_show(struct seq_file *m, void *v)
303 struct pg_state st = {
305 .marker = address_markers,
308 .note_page = note_page,
309 .range = ptdump_range,
313 /* Traverse kernel page tables */
314 ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
318 DEFINE_SHOW_ATTRIBUTE(ptdump);
320 static void __init build_pgtable_complete_mask(void)
324 for (i = 0; i < ARRAY_SIZE(pg_level); i++)
325 if (pg_level[i].flag)
326 for (j = 0; j < pg_level[i].num; j++)
327 pg_level[i].mask |= pg_level[i].flag[j].mask;
330 bool ptdump_check_wx(void)
332 struct pg_state st = {
334 .marker = (struct addr_marker[]) {
341 .note_page = note_page,
342 .range = ptdump_range,
346 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !mmu_has_feature(MMU_FTR_KERNEL_RO))
349 ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
352 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n",
357 pr_info("Checked W+X mappings: passed, no W+X pages found\n");
363 static int __init ptdump_init(void)
366 if (!radix_enabled())
367 ptdump_range[0].start = KERN_VIRT_START;
369 ptdump_range[0].start = PAGE_OFFSET;
371 ptdump_range[0].end = PAGE_OFFSET + (PGDIR_SIZE * PTRS_PER_PGD);
375 build_pgtable_complete_mask();
377 if (IS_ENABLED(CONFIG_PTDUMP_DEBUGFS))
378 debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
382 device_initcall(ptdump_init);