1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2016, Rashmica Gupta, IBM Corp.
5 * This traverses the kernel pagetables and dumps the
6 * information about the used sections of memory to
7 * /sys/kernel/debug/kernel_pagetables.
9 * Derived from the arm64 implementation:
10 * Copyright (c) 2014, The Linux Foundation, Laura Abbott.
11 * (C) Copyright 2008 Intel Corporation, Arjan van de Ven.
13 #include <linux/debugfs.h>
15 #include <linux/hugetlb.h>
18 #include <linux/highmem.h>
19 #include <linux/sched.h>
20 #include <linux/seq_file.h>
21 #include <asm/fixmap.h>
22 #include <asm/pgtable.h>
23 #include <linux/const.h>
25 #include <asm/pgalloc.h>
30 * To visualise what is happening,
32 * - PTRS_PER_P** = how many entries there are in the corresponding P**
33 * - P**_SHIFT = how many bits of the address we use to index into the
35 * - P**_SIZE is how much memory we can access through the table - not the
36 * size of the table itself.
37 * P**={PGD, PUD, PMD, PTE}
40 * Each entry of the PGD points to a PUD. Each entry of a PUD points to a
41 * PMD. Each entry of a PMD points to a PTE. And every PTE entry points to
44 * In the case where there are only 3 levels, the PUD is folded into the
45 * PGD: every PUD has only one entry which points to the PMD.
47 * The page dumper groups page table entries of the same type into a single
48 * description. It uses pg_state to track the range information while
49 * iterating over the PTE entries. When the continuity is broken it then
50 * dumps out a description of the range - ie PTEs that are virtually contiguous
51 * with the same PTE flags are chunked together. This is to make it clear how
52 * different areas of the kernel virtual memory are used.
57 const struct addr_marker *marker;
58 unsigned long start_address;
59 unsigned long start_pa;
60 unsigned long last_pa;
64 unsigned long wx_pages;
68 unsigned long start_address;
72 static struct addr_marker address_markers[] = {
73 { 0, "Start of kernel VM" },
74 { 0, "vmalloc() Area" },
75 { 0, "vmalloc() End" },
77 { 0, "isa I/O start" },
79 { 0, "phb I/O start" },
81 { 0, "I/O remap start" },
82 { 0, "I/O remap end" },
83 { 0, "vmemmap start" },
85 { 0, "Early I/O remap start" },
86 { 0, "Early I/O remap end" },
87 #ifdef CONFIG_NOT_COHERENT_CACHE
88 { 0, "Consistent mem start" },
89 { 0, "Consistent mem end" },
92 { 0, "Highmem PTEs start" },
93 { 0, "Highmem PTEs end" },
95 { 0, "Fixmap start" },
99 { 0, "kasan shadow mem start" },
100 { 0, "kasan shadow mem end" },
105 #define pt_dump_seq_printf(m, fmt, args...) \
108 seq_printf(m, fmt, ##args); \
111 #define pt_dump_seq_putc(m, c) \
117 static void dump_flag_info(struct pg_state *st, const struct flag_info
118 *flag, u64 pte, int num)
122 for (i = 0; i < num; i++, flag++) {
123 const char *s = NULL;
126 /* flag not defined so don't check it */
129 /* Some 'flags' are actually values */
131 val = pte & flag->val;
133 val = val >> flag->shift;
134 pt_dump_seq_printf(st->seq, " %s:%llx", flag->set, val);
136 if ((pte & flag->mask) == flag->val)
141 pt_dump_seq_printf(st->seq, " %s", s);
143 st->current_flags &= ~flag->mask;
145 if (st->current_flags != 0)
146 pt_dump_seq_printf(st->seq, " unknown flags:%llx", st->current_flags);
149 static void dump_addr(struct pg_state *st, unsigned long addr)
151 static const char units[] = "KMGTPE";
152 const char *unit = units;
156 #define REG "0x%016lx"
158 #define REG "0x%08lx"
161 pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1);
162 if (st->start_pa == st->last_pa && st->start_address + PAGE_SIZE != addr) {
163 pt_dump_seq_printf(st->seq, "[" REG "]", st->start_pa);
164 delta = PAGE_SIZE >> 10;
166 pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
167 delta = (addr - st->start_address) >> 10;
169 /* Work out what appropriate unit to use */
170 while (!(delta & 1023) && unit[1]) {
174 pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit);
178 static void note_prot_wx(struct pg_state *st, unsigned long addr)
183 if (!((st->current_flags & pgprot_val(PAGE_KERNEL_X)) == pgprot_val(PAGE_KERNEL_X)))
186 WARN_ONCE(1, "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n",
187 (void *)st->start_address, (void *)st->start_address);
189 st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
192 static void note_page(struct pg_state *st, unsigned long addr,
193 unsigned int level, u64 val)
195 u64 flag = val & pg_level[level].mask;
196 u64 pa = val & PTE_RPN_MASK;
198 /* At first no level is set */
201 st->current_flags = flag;
202 st->start_address = addr;
205 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
207 * Dump the section of virtual memory when:
208 * - the PTE flags from one entry to the next differs.
209 * - we change levels in the tree.
210 * - the address is in a different section of memory and is thus
211 * used for a different purpose, regardless of the flags.
212 * - the pa of this page is not adjacent to the last inspected page
214 } else if (flag != st->current_flags || level != st->level ||
215 addr >= st->marker[1].start_address ||
216 (pa != st->last_pa + PAGE_SIZE &&
217 (pa != st->start_pa || st->start_pa != st->last_pa))) {
219 /* Check the PTE flags */
220 if (st->current_flags) {
221 note_prot_wx(st, addr);
224 /* Dump all the flags */
225 if (pg_level[st->level].flag)
226 dump_flag_info(st, pg_level[st->level].flag,
228 pg_level[st->level].num);
230 pt_dump_seq_putc(st->seq, '\n');
234 * Address indicates we have passed the end of the
235 * current section of virtual memory
237 while (addr >= st->marker[1].start_address) {
239 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
241 st->start_address = addr;
244 st->current_flags = flag;
251 static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
253 pte_t *pte = pte_offset_kernel(pmd, 0);
257 for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
258 addr = start + i * PAGE_SIZE;
259 note_page(st, addr, 4, pte_val(*pte));
264 static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
266 pmd_t *pmd = pmd_offset(pud, 0);
270 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
271 addr = start + i * PMD_SIZE;
272 if (!pmd_none(*pmd) && !pmd_is_leaf(*pmd))
274 walk_pte(st, pmd, addr);
276 note_page(st, addr, 3, pmd_val(*pmd));
280 static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
282 pud_t *pud = pud_offset(pgd, 0);
286 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
287 addr = start + i * PUD_SIZE;
288 if (!pud_none(*pud) && !pud_is_leaf(*pud))
290 walk_pmd(st, pud, addr);
292 note_page(st, addr, 2, pud_val(*pud));
296 static void walk_pagetables(struct pg_state *st)
299 unsigned long addr = st->start_address & PGDIR_MASK;
300 pgd_t *pgd = pgd_offset_k(addr);
303 * Traverse the linux pagetable structure and dump pages that are in
304 * the hash pagetable.
306 for (i = pgd_index(addr); i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) {
307 if (!pgd_none(*pgd) && !pgd_is_leaf(*pgd))
309 walk_pud(st, pgd, addr);
311 note_page(st, addr, 1, pgd_val(*pgd));
315 static void populate_markers(void)
319 address_markers[i++].start_address = PAGE_OFFSET;
320 address_markers[i++].start_address = VMALLOC_START;
321 address_markers[i++].start_address = VMALLOC_END;
323 address_markers[i++].start_address = ISA_IO_BASE;
324 address_markers[i++].start_address = ISA_IO_END;
325 address_markers[i++].start_address = PHB_IO_BASE;
326 address_markers[i++].start_address = PHB_IO_END;
327 address_markers[i++].start_address = IOREMAP_BASE;
328 address_markers[i++].start_address = IOREMAP_END;
329 /* What is the ifdef about? */
330 #ifdef CONFIG_PPC_BOOK3S_64
331 address_markers[i++].start_address = H_VMEMMAP_START;
333 address_markers[i++].start_address = VMEMMAP_BASE;
335 #else /* !CONFIG_PPC64 */
336 address_markers[i++].start_address = ioremap_bot;
337 address_markers[i++].start_address = IOREMAP_TOP;
338 #ifdef CONFIG_NOT_COHERENT_CACHE
339 address_markers[i++].start_address = IOREMAP_TOP;
340 address_markers[i++].start_address = IOREMAP_TOP +
341 CONFIG_CONSISTENT_SIZE;
343 #ifdef CONFIG_HIGHMEM
344 address_markers[i++].start_address = PKMAP_BASE;
345 address_markers[i++].start_address = PKMAP_ADDR(LAST_PKMAP);
347 address_markers[i++].start_address = FIXADDR_START;
348 address_markers[i++].start_address = FIXADDR_TOP;
350 address_markers[i++].start_address = KASAN_SHADOW_START;
351 address_markers[i++].start_address = KASAN_SHADOW_END;
353 #endif /* CONFIG_PPC64 */
356 static int ptdump_show(struct seq_file *m, void *v)
358 struct pg_state st = {
360 .marker = address_markers,
361 .start_address = PAGE_OFFSET,
365 if (!radix_enabled())
366 st.start_address = KERN_VIRT_START;
369 /* Traverse kernel page tables */
370 walk_pagetables(&st);
371 note_page(&st, 0, 0, 0);
376 static int ptdump_open(struct inode *inode, struct file *file)
378 return single_open(file, ptdump_show, NULL);
381 static const struct file_operations ptdump_fops = {
385 .release = single_release,
388 static void build_pgtable_complete_mask(void)
392 for (i = 0; i < ARRAY_SIZE(pg_level); i++)
393 if (pg_level[i].flag)
394 for (j = 0; j < pg_level[i].num; j++)
395 pg_level[i].mask |= pg_level[i].flag[j].mask;
398 #ifdef CONFIG_PPC_DEBUG_WX
399 void ptdump_check_wx(void)
401 struct pg_state st = {
403 .marker = address_markers,
405 .start_address = PAGE_OFFSET,
409 if (!radix_enabled())
410 st.start_address = KERN_VIRT_START;
413 walk_pagetables(&st);
416 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n",
419 pr_info("Checked W+X mappings: passed, no W+X pages found\n");
423 static int ptdump_init(void)
425 struct dentry *debugfs_file;
428 build_pgtable_complete_mask();
429 debugfs_file = debugfs_create_file("kernel_page_tables", 0400, NULL,
431 return debugfs_file ? 0 : -ENOMEM;
433 device_initcall(ptdump_init);