Merge tag 'char-misc-6.10-rc1-fix' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / mm / debug.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * mm/debug.c
4  *
5  * mm/ specific debug routines.
6  *
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/trace_events.h>
12 #include <linux/memcontrol.h>
13 #include <trace/events/mmflags.h>
14 #include <linux/migrate.h>
15 #include <linux/page_owner.h>
16 #include <linux/ctype.h>
17
18 #include "internal.h"
19 #include <trace/events/migrate.h>
20
21 /*
22  * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
23  * be used to populate migrate_reason_names[].
24  */
25 #undef EM
26 #undef EMe
27 #define EM(a, b)        b,
28 #define EMe(a, b)       b
29
30 const char *migrate_reason_names[MR_TYPES] = {
31         MIGRATE_REASON
32 };
33
34 const struct trace_print_flags pageflag_names[] = {
35         __def_pageflag_names,
36         {0, NULL}
37 };
38
39 const struct trace_print_flags pagetype_names[] = {
40         __def_pagetype_names,
41         {0, NULL}
42 };
43
44 const struct trace_print_flags gfpflag_names[] = {
45         __def_gfpflag_names,
46         {0, NULL}
47 };
48
49 const struct trace_print_flags vmaflag_names[] = {
50         __def_vmaflag_names,
51         {0, NULL}
52 };
53
54 static void __dump_folio(struct folio *folio, struct page *page,
55                 unsigned long pfn, unsigned long idx)
56 {
57         struct address_space *mapping = folio_mapping(folio);
58         int mapcount = atomic_read(&page->_mapcount);
59         char *type = "";
60
61         mapcount = page_type_has_type(mapcount) ? 0 : mapcount + 1;
62         pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
63                         folio_ref_count(folio), mapcount, mapping,
64                         folio->index + idx, pfn);
65         if (folio_test_large(folio)) {
66                 pr_warn("head: order:%u mapcount:%d entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
67                                 folio_order(folio),
68                                 folio_mapcount(folio),
69                                 folio_entire_mapcount(folio),
70                                 folio_nr_pages_mapped(folio),
71                                 atomic_read(&folio->_pincount));
72         }
73
74 #ifdef CONFIG_MEMCG
75         if (folio->memcg_data)
76                 pr_warn("memcg:%lx\n", folio->memcg_data);
77 #endif
78         if (folio_test_ksm(folio))
79                 type = "ksm ";
80         else if (folio_test_anon(folio))
81                 type = "anon ";
82         else if (mapping)
83                 dump_mapping(mapping);
84         BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
85
86         /*
87          * Accessing the pageblock without the zone lock. It could change to
88          * "isolate" again in the meantime, but since we are just dumping the
89          * state for debugging, it should be fine to accept a bit of
90          * inaccuracy here due to racing.
91          */
92         pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
93                 is_migrate_cma_folio(folio, pfn) ? " CMA" : "");
94         if (page_has_type(&folio->page))
95                 pr_warn("page_type: %pGt\n", &folio->page.page_type);
96
97         print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
98                         sizeof(unsigned long), page,
99                         sizeof(struct page), false);
100         if (folio_test_large(folio))
101                 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
102                         sizeof(unsigned long), folio,
103                         2 * sizeof(struct page), false);
104 }
105
106 static void __dump_page(const struct page *page)
107 {
108         struct folio *foliop, folio;
109         struct page precise;
110         unsigned long pfn = page_to_pfn(page);
111         unsigned long idx, nr_pages = 1;
112         int loops = 5;
113
114 again:
115         memcpy(&precise, page, sizeof(*page));
116         foliop = page_folio(&precise);
117         if (foliop == (struct folio *)&precise) {
118                 idx = 0;
119                 if (!folio_test_large(foliop))
120                         goto dump;
121                 foliop = (struct folio *)page;
122         } else {
123                 idx = folio_page_idx(foliop, page);
124         }
125
126         if (idx < MAX_FOLIO_NR_PAGES) {
127                 memcpy(&folio, foliop, 2 * sizeof(struct page));
128                 nr_pages = folio_nr_pages(&folio);
129                 foliop = &folio;
130         }
131
132         if (idx > nr_pages) {
133                 if (loops-- > 0)
134                         goto again;
135                 pr_warn("page does not match folio\n");
136                 precise.compound_head &= ~1UL;
137                 foliop = (struct folio *)&precise;
138                 idx = 0;
139         }
140
141 dump:
142         __dump_folio(foliop, &precise, pfn, idx);
143 }
144
145 void dump_page(const struct page *page, const char *reason)
146 {
147         if (PagePoisoned(page))
148                 pr_warn("page:%p is uninitialized and poisoned", page);
149         else
150                 __dump_page(page);
151         if (reason)
152                 pr_warn("page dumped because: %s\n", reason);
153         dump_page_owner(page);
154 }
155 EXPORT_SYMBOL(dump_page);
156
157 #ifdef CONFIG_DEBUG_VM
158
159 void dump_vma(const struct vm_area_struct *vma)
160 {
161         pr_emerg("vma %px start %px end %px mm %px\n"
162                 "prot %lx anon_vma %px vm_ops %px\n"
163                 "pgoff %lx file %px private_data %px\n"
164                 "flags: %#lx(%pGv)\n",
165                 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
166                 (unsigned long)pgprot_val(vma->vm_page_prot),
167                 vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
168                 vma->vm_file, vma->vm_private_data,
169                 vma->vm_flags, &vma->vm_flags);
170 }
171 EXPORT_SYMBOL(dump_vma);
172
173 void dump_mm(const struct mm_struct *mm)
174 {
175         pr_emerg("mm %px task_size %lu\n"
176                 "mmap_base %lu mmap_legacy_base %lu\n"
177                 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
178                 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
179                 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
180                 "start_code %lx end_code %lx start_data %lx end_data %lx\n"
181                 "start_brk %lx brk %lx start_stack %lx\n"
182                 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
183                 "binfmt %px flags %lx\n"
184 #ifdef CONFIG_AIO
185                 "ioctx_table %px\n"
186 #endif
187 #ifdef CONFIG_MEMCG
188                 "owner %px "
189 #endif
190                 "exe_file %px\n"
191 #ifdef CONFIG_MMU_NOTIFIER
192                 "notifier_subscriptions %px\n"
193 #endif
194 #ifdef CONFIG_NUMA_BALANCING
195                 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
196 #endif
197                 "tlb_flush_pending %d\n"
198                 "def_flags: %#lx(%pGv)\n",
199
200                 mm, mm->task_size,
201                 mm->mmap_base, mm->mmap_legacy_base,
202                 mm->pgd, atomic_read(&mm->mm_users),
203                 atomic_read(&mm->mm_count),
204                 mm_pgtables_bytes(mm),
205                 mm->map_count,
206                 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
207                 (u64)atomic64_read(&mm->pinned_vm),
208                 mm->data_vm, mm->exec_vm, mm->stack_vm,
209                 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
210                 mm->start_brk, mm->brk, mm->start_stack,
211                 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
212                 mm->binfmt, mm->flags,
213 #ifdef CONFIG_AIO
214                 mm->ioctx_table,
215 #endif
216 #ifdef CONFIG_MEMCG
217                 mm->owner,
218 #endif
219                 mm->exe_file,
220 #ifdef CONFIG_MMU_NOTIFIER
221                 mm->notifier_subscriptions,
222 #endif
223 #ifdef CONFIG_NUMA_BALANCING
224                 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
225 #endif
226                 atomic_read(&mm->tlb_flush_pending),
227                 mm->def_flags, &mm->def_flags
228         );
229 }
230 EXPORT_SYMBOL(dump_mm);
231
232 static bool page_init_poisoning __read_mostly = true;
233
234 static int __init setup_vm_debug(char *str)
235 {
236         bool __page_init_poisoning = true;
237
238         /*
239          * Calling vm_debug with no arguments is equivalent to requesting
240          * to enable all debugging options we can control.
241          */
242         if (*str++ != '=' || !*str)
243                 goto out;
244
245         __page_init_poisoning = false;
246         if (*str == '-')
247                 goto out;
248
249         while (*str) {
250                 switch (tolower(*str)) {
251                 case'p':
252                         __page_init_poisoning = true;
253                         break;
254                 default:
255                         pr_err("vm_debug option '%c' unknown. skipped\n",
256                                *str);
257                 }
258
259                 str++;
260         }
261 out:
262         if (page_init_poisoning && !__page_init_poisoning)
263                 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
264
265         page_init_poisoning = __page_init_poisoning;
266
267         return 1;
268 }
269 __setup("vm_debug", setup_vm_debug);
270
271 void page_init_poison(struct page *page, size_t size)
272 {
273         if (page_init_poisoning)
274                 memset(page, PAGE_POISON_PATTERN, size);
275 }
276
277 void vma_iter_dump_tree(const struct vma_iterator *vmi)
278 {
279 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
280         mas_dump(&vmi->mas);
281         mt_dump(vmi->mas.tree, mt_dump_hex);
282 #endif  /* CONFIG_DEBUG_VM_MAPLE_TREE */
283 }
284
285 #endif          /* CONFIG_DEBUG_VM */