License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-block.git] / arch / s390 / mm / dump_pagetables.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
e76e82d7
HC
2#include <linux/seq_file.h>
3#include <linux/debugfs.h>
549f2bf5 4#include <linux/sched.h>
e76e82d7
HC
5#include <linux/mm.h>
6#include <asm/sections.h>
7#include <asm/pgtable.h>
8
9static unsigned long max_addr;
10
11struct addr_marker {
12 unsigned long start_address;
13 const char *name;
14};
15
16enum address_markers_idx {
17 IDENTITY_NR = 0,
18 KERNEL_START_NR,
19 KERNEL_END_NR,
20 VMEMMAP_NR,
21 VMALLOC_NR,
c972cc60 22 MODULES_NR,
e76e82d7
HC
23};
24
25static struct addr_marker address_markers[] = {
26 [IDENTITY_NR] = {0, "Identity Mapping"},
27 [KERNEL_START_NR] = {(unsigned long)&_stext, "Kernel Image Start"},
28 [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"},
29 [VMEMMAP_NR] = {0, "vmemmap Area"},
30 [VMALLOC_NR] = {0, "vmalloc Area"},
c972cc60 31 [MODULES_NR] = {0, "Modules Area"},
e76e82d7
HC
32 { -1, NULL }
33};
34
35struct pg_state {
36 int level;
37 unsigned int current_prot;
38 unsigned long start_address;
39 unsigned long current_address;
40 const struct addr_marker *marker;
41};
42
43static void print_prot(struct seq_file *m, unsigned int pr, int level)
44{
45 static const char * const level_name[] =
46 { "ASCE", "PGD", "PUD", "PMD", "PTE" };
47
48 seq_printf(m, "%s ", level_name[level]);
1819ed1f 49 if (pr & _PAGE_INVALID) {
e76e82d7 50 seq_printf(m, "I\n");
1819ed1f
HC
51 return;
52 }
57d7f939
MS
53 seq_puts(m, (pr & _PAGE_PROTECT) ? "RO " : "RW ");
54 seq_puts(m, (pr & _PAGE_NOEXEC) ? "NX\n" : "X\n");
e76e82d7
HC
55}
56
57static void note_page(struct seq_file *m, struct pg_state *st,
58 unsigned int new_prot, int level)
59{
60 static const char units[] = "KMGTPE";
61 int width = sizeof(unsigned long) * 2;
62 const char *unit = units;
63 unsigned int prot, cur;
64 unsigned long delta;
65
66 /*
67 * If we have a "break" in the series, we need to flush the state
68 * that we have now. "break" is either changing perms, levels or
69 * address space marker.
70 */
71 prot = new_prot;
72 cur = st->current_prot;
73
74 if (!st->level) {
75 /* First entry */
76 st->current_prot = new_prot;
77 st->level = level;
78 st->marker = address_markers;
79 seq_printf(m, "---[ %s ]---\n", st->marker->name);
80 } else if (prot != cur || level != st->level ||
81 st->current_address >= st->marker[1].start_address) {
82 /* Print the actual finished series */
83 seq_printf(m, "0x%0*lx-0x%0*lx",
84 width, st->start_address,
85 width, st->current_address);
86 delta = (st->current_address - st->start_address) >> 10;
87 while (!(delta & 0x3ff) && unit[1]) {
88 delta >>= 10;
89 unit++;
90 }
91 seq_printf(m, "%9lu%c ", delta, *unit);
92 print_prot(m, st->current_prot, st->level);
93 if (st->current_address >= st->marker[1].start_address) {
94 st->marker++;
95 seq_printf(m, "---[ %s ]---\n", st->marker->name);
96 }
97 st->start_address = st->current_address;
98 st->current_prot = new_prot;
99 st->level = level;
100 }
101}
102
103/*
e5098611
MS
104 * The actual page table walker functions. In order to keep the
105 * implementation of print_prot() short, we only check and pass
106 * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region,
107 * segment or page table entry is invalid or read-only.
108 * After all it's just a hint that the current level being walked
109 * contains an invalid or read-only entry.
e76e82d7
HC
110 */
111static void walk_pte_level(struct seq_file *m, struct pg_state *st,
112 pmd_t *pmd, unsigned long addr)
113{
114 unsigned int prot;
115 pte_t *pte;
116 int i;
117
118 for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
119 st->current_address = addr;
120 pte = pte_offset_kernel(pmd, addr);
57d7f939
MS
121 prot = pte_val(*pte) &
122 (_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
e76e82d7
HC
123 note_page(m, st, prot, 4);
124 addr += PAGE_SIZE;
125 }
126}
127
128static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
129 pud_t *pud, unsigned long addr)
130{
131 unsigned int prot;
132 pmd_t *pmd;
133 int i;
134
135 for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) {
136 st->current_address = addr;
137 pmd = pmd_offset(pud, addr);
138 if (!pmd_none(*pmd)) {
139 if (pmd_large(*pmd)) {
57d7f939
MS
140 prot = pmd_val(*pmd) &
141 (_SEGMENT_ENTRY_PROTECT |
142 _SEGMENT_ENTRY_NOEXEC);
e76e82d7
HC
143 note_page(m, st, prot, 3);
144 } else
145 walk_pte_level(m, st, pmd, addr);
146 } else
147 note_page(m, st, _PAGE_INVALID, 3);
148 addr += PMD_SIZE;
149 }
150}
151
152static void walk_pud_level(struct seq_file *m, struct pg_state *st,
1aea9b3f 153 p4d_t *p4d, unsigned long addr)
e76e82d7 154{
18da2369 155 unsigned int prot;
e76e82d7
HC
156 pud_t *pud;
157 int i;
158
159 for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
160 st->current_address = addr;
1aea9b3f 161 pud = pud_offset(p4d, addr);
e76e82d7 162 if (!pud_none(*pud))
18da2369 163 if (pud_large(*pud)) {
57d7f939
MS
164 prot = pud_val(*pud) &
165 (_REGION_ENTRY_PROTECT |
166 _REGION_ENTRY_NOEXEC);
18da2369
HC
167 note_page(m, st, prot, 2);
168 } else
169 walk_pmd_level(m, st, pud, addr);
e76e82d7
HC
170 else
171 note_page(m, st, _PAGE_INVALID, 2);
172 addr += PUD_SIZE;
173 }
174}
175
1aea9b3f
MS
176static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
177 pgd_t *pgd, unsigned long addr)
178{
179 p4d_t *p4d;
180 int i;
181
182 for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) {
183 st->current_address = addr;
184 p4d = p4d_offset(pgd, addr);
185 if (!p4d_none(*p4d))
186 walk_pud_level(m, st, p4d, addr);
187 else
188 note_page(m, st, _PAGE_INVALID, 2);
189 addr += P4D_SIZE;
190 }
191}
192
e76e82d7
HC
193static void walk_pgd_level(struct seq_file *m)
194{
195 unsigned long addr = 0;
196 struct pg_state st;
197 pgd_t *pgd;
198 int i;
199
200 memset(&st, 0, sizeof(st));
201 for (i = 0; i < PTRS_PER_PGD && addr < max_addr; i++) {
202 st.current_address = addr;
203 pgd = pgd_offset_k(addr);
204 if (!pgd_none(*pgd))
1aea9b3f 205 walk_p4d_level(m, &st, pgd, addr);
e76e82d7
HC
206 else
207 note_page(m, &st, _PAGE_INVALID, 1);
208 addr += PGDIR_SIZE;
549f2bf5 209 cond_resched();
e76e82d7
HC
210 }
211 /* Flush out the last page */
212 st.current_address = max_addr;
213 note_page(m, &st, 0, 0);
214}
215
216static int ptdump_show(struct seq_file *m, void *v)
217{
218 walk_pgd_level(m);
219 return 0;
220}
221
222static int ptdump_open(struct inode *inode, struct file *filp)
223{
224 return single_open(filp, ptdump_show, NULL);
225}
226
227static const struct file_operations ptdump_fops = {
228 .open = ptdump_open,
229 .read = seq_read,
230 .llseek = seq_lseek,
231 .release = single_release,
232};
233
234static int pt_dump_init(void)
235{
236 /*
237 * Figure out the maximum virtual address being accessible with the
238 * kernel ASCE. We need this to keep the page table walker functions
239 * from accessing non-existent entries.
240 */
e76e82d7
HC
241 max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
242 max_addr = 1UL << (max_addr * 11 + 31);
c972cc60 243 address_markers[MODULES_NR].start_address = MODULES_VADDR;
e76e82d7
HC
244 address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
245 address_markers[VMALLOC_NR].start_address = VMALLOC_START;
246 debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
247 return 0;
248}
249device_initcall(pt_dump_init);