Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
48c96a36 JK |
2 | #include <linux/debugfs.h> |
3 | #include <linux/mm.h> | |
4 | #include <linux/slab.h> | |
5 | #include <linux/uaccess.h> | |
57c8a661 | 6 | #include <linux/memblock.h> |
48c96a36 JK |
7 | #include <linux/stacktrace.h> |
8 | #include <linux/page_owner.h> | |
7dd80b8a | 9 | #include <linux/jump_label.h> |
7cd12b4a | 10 | #include <linux/migrate.h> |
f2ca0b55 | 11 | #include <linux/stackdepot.h> |
e2f612e6 | 12 | #include <linux/seq_file.h> |
fcf89358 | 13 | #include <linux/memcontrol.h> |
9cc7e96a | 14 | #include <linux/sched/clock.h> |
f2ca0b55 | 15 | |
48c96a36 JK |
16 | #include "internal.h" |
17 | ||
f2ca0b55 JK |
18 | /* |
19 | * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack) | |
20 | * to use off stack temporal storage | |
21 | */ | |
22 | #define PAGE_OWNER_STACK_DEPTH (16) | |
23 | ||
9300d8df | 24 | struct page_owner { |
6b4c54e3 AM |
25 | unsigned short order; |
26 | short last_migrate_reason; | |
9300d8df | 27 | gfp_t gfp_mask; |
9300d8df | 28 | depot_stack_handle_t handle; |
8974558f | 29 | depot_stack_handle_t free_handle; |
9cc7e96a | 30 | u64 ts_nsec; |
866b4852 | 31 | u64 free_ts_nsec; |
865ed6a3 | 32 | char comm[TASK_COMM_LEN]; |
9cc7e96a | 33 | pid_t pid; |
bf215eab | 34 | pid_t tgid; |
9300d8df JK |
35 | }; |
36 | ||
3645b5ec | 37 | static bool page_owner_enabled __initdata; |
7dd80b8a | 38 | DEFINE_STATIC_KEY_FALSE(page_owner_inited); |
48c96a36 | 39 | |
f2ca0b55 JK |
40 | static depot_stack_handle_t dummy_handle; |
41 | static depot_stack_handle_t failure_handle; | |
dab4ead1 | 42 | static depot_stack_handle_t early_handle; |
f2ca0b55 | 43 | |
61cf5feb JK |
44 | static void init_early_allocated_pages(void); |
45 | ||
1173194e | 46 | static int __init early_page_owner_param(char *buf) |
48c96a36 | 47 | { |
a5f1783b VB |
48 | int ret = kstrtobool(buf, &page_owner_enabled); |
49 | ||
50 | if (page_owner_enabled) | |
51 | stack_depot_want_early_init(); | |
52 | ||
53 | return ret; | |
48c96a36 JK |
54 | } |
55 | early_param("page_owner", early_page_owner_param); | |
56 | ||
cab0a7c1 | 57 | static __init bool need_page_owner(void) |
48c96a36 | 58 | { |
0fe9a448 | 59 | return page_owner_enabled; |
48c96a36 JK |
60 | } |
61 | ||
dab4ead1 | 62 | static __always_inline depot_stack_handle_t create_dummy_stack(void) |
f2ca0b55 JK |
63 | { |
64 | unsigned long entries[4]; | |
af52bf6b | 65 | unsigned int nr_entries; |
f2ca0b55 | 66 | |
af52bf6b TG |
67 | nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); |
68 | return stack_depot_save(entries, nr_entries, GFP_KERNEL); | |
f2ca0b55 JK |
69 | } |
70 | ||
dab4ead1 | 71 | static noinline void register_dummy_stack(void) |
f2ca0b55 | 72 | { |
dab4ead1 VB |
73 | dummy_handle = create_dummy_stack(); |
74 | } | |
f2ca0b55 | 75 | |
dab4ead1 VB |
76 | static noinline void register_failure_stack(void) |
77 | { | |
78 | failure_handle = create_dummy_stack(); | |
79 | } | |
f2ca0b55 | 80 | |
dab4ead1 VB |
81 | static noinline void register_early_stack(void) |
82 | { | |
83 | early_handle = create_dummy_stack(); | |
f2ca0b55 JK |
84 | } |
85 | ||
cab0a7c1 | 86 | static __init void init_page_owner(void) |
48c96a36 | 87 | { |
0fe9a448 | 88 | if (!page_owner_enabled) |
48c96a36 JK |
89 | return; |
90 | ||
f2ca0b55 JK |
91 | register_dummy_stack(); |
92 | register_failure_stack(); | |
dab4ead1 | 93 | register_early_stack(); |
7dd80b8a | 94 | static_branch_enable(&page_owner_inited); |
61cf5feb | 95 | init_early_allocated_pages(); |
48c96a36 JK |
96 | } |
97 | ||
98 | struct page_ext_operations page_owner_ops = { | |
9300d8df | 99 | .size = sizeof(struct page_owner), |
48c96a36 JK |
100 | .need = need_page_owner, |
101 | .init = init_page_owner, | |
102 | }; | |
103 | ||
9300d8df JK |
104 | static inline struct page_owner *get_page_owner(struct page_ext *page_ext) |
105 | { | |
106 | return (void *)page_ext + page_owner_ops.offset; | |
107 | } | |
108 | ||
f2ca0b55 JK |
109 | static noinline depot_stack_handle_t save_stack(gfp_t flags) |
110 | { | |
111 | unsigned long entries[PAGE_OWNER_STACK_DEPTH]; | |
f2ca0b55 | 112 | depot_stack_handle_t handle; |
af52bf6b | 113 | unsigned int nr_entries; |
f2ca0b55 | 114 | |
f2ca0b55 | 115 | /* |
8e9b16c4 ST |
116 | * Avoid recursion. |
117 | * | |
118 | * Sometimes page metadata allocation tracking requires more | |
119 | * memory to be allocated: | |
120 | * - when new stack trace is saved to stack depot | |
121 | * - when backtrace itself is calculated (ia64) | |
f2ca0b55 | 122 | */ |
8e9b16c4 | 123 | if (current->in_page_owner) |
f2ca0b55 | 124 | return dummy_handle; |
8e9b16c4 | 125 | current->in_page_owner = 1; |
f2ca0b55 | 126 | |
8e9b16c4 | 127 | nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2); |
af52bf6b | 128 | handle = stack_depot_save(entries, nr_entries, flags); |
f2ca0b55 JK |
129 | if (!handle) |
130 | handle = failure_handle; | |
131 | ||
8e9b16c4 | 132 | current->in_page_owner = 0; |
f2ca0b55 JK |
133 | return handle; |
134 | } | |
135 | ||
0093de69 | 136 | void __reset_page_owner(struct page *page, unsigned short order) |
8974558f VB |
137 | { |
138 | int i; | |
139 | struct page_ext *page_ext; | |
fab765c2 | 140 | depot_stack_handle_t handle; |
8974558f | 141 | struct page_owner *page_owner; |
866b4852 | 142 | u64 free_ts_nsec = local_clock(); |
8974558f | 143 | |
b1d5488a | 144 | page_ext = page_ext_get(page); |
5556cfe8 VB |
145 | if (unlikely(!page_ext)) |
146 | return; | |
fab765c2 ST |
147 | |
148 | handle = save_stack(GFP_NOWAIT | __GFP_NOWARN); | |
8974558f | 149 | for (i = 0; i < (1 << order); i++) { |
fdf3bf80 | 150 | __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags); |
0fe9a448 VB |
151 | page_owner = get_page_owner(page_ext); |
152 | page_owner->free_handle = handle; | |
866b4852 | 153 | page_owner->free_ts_nsec = free_ts_nsec; |
5556cfe8 | 154 | page_ext = page_ext_next(page_ext); |
8974558f | 155 | } |
b1d5488a | 156 | page_ext_put(page_ext); |
8974558f VB |
157 | } |
158 | ||
64ea78d2 | 159 | static inline void __set_page_owner_handle(struct page_ext *page_ext, |
160 | depot_stack_handle_t handle, | |
0093de69 | 161 | unsigned short order, gfp_t gfp_mask) |
f2ca0b55 | 162 | { |
9300d8df | 163 | struct page_owner *page_owner; |
7e2f2a0c | 164 | int i; |
48c96a36 | 165 | |
7e2f2a0c VB |
166 | for (i = 0; i < (1 << order); i++) { |
167 | page_owner = get_page_owner(page_ext); | |
168 | page_owner->handle = handle; | |
169 | page_owner->order = order; | |
170 | page_owner->gfp_mask = gfp_mask; | |
171 | page_owner->last_migrate_reason = -1; | |
9cc7e96a | 172 | page_owner->pid = current->pid; |
bf215eab | 173 | page_owner->tgid = current->tgid; |
9cc7e96a | 174 | page_owner->ts_nsec = local_clock(); |
cd8c1fd8 | 175 | strscpy(page_owner->comm, current->comm, |
865ed6a3 | 176 | sizeof(page_owner->comm)); |
7e2f2a0c | 177 | __set_bit(PAGE_EXT_OWNER, &page_ext->flags); |
fdf3bf80 | 178 | __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags); |
48c96a36 | 179 | |
5556cfe8 | 180 | page_ext = page_ext_next(page_ext); |
7e2f2a0c | 181 | } |
48c96a36 JK |
182 | } |
183 | ||
0093de69 | 184 | noinline void __set_page_owner(struct page *page, unsigned short order, |
dab4ead1 VB |
185 | gfp_t gfp_mask) |
186 | { | |
b1d5488a | 187 | struct page_ext *page_ext; |
dab4ead1 VB |
188 | depot_stack_handle_t handle; |
189 | ||
b1d5488a CTK |
190 | handle = save_stack(gfp_mask); |
191 | ||
192 | page_ext = page_ext_get(page); | |
dab4ead1 VB |
193 | if (unlikely(!page_ext)) |
194 | return; | |
64ea78d2 | 195 | __set_page_owner_handle(page_ext, handle, order, gfp_mask); |
b1d5488a | 196 | page_ext_put(page_ext); |
dab4ead1 VB |
197 | } |
198 | ||
7cd12b4a VB |
199 | void __set_page_owner_migrate_reason(struct page *page, int reason) |
200 | { | |
b1d5488a | 201 | struct page_ext *page_ext = page_ext_get(page); |
9300d8df JK |
202 | struct page_owner *page_owner; |
203 | ||
f86e4271 YS |
204 | if (unlikely(!page_ext)) |
205 | return; | |
7cd12b4a | 206 | |
9300d8df JK |
207 | page_owner = get_page_owner(page_ext); |
208 | page_owner->last_migrate_reason = reason; | |
b1d5488a | 209 | page_ext_put(page_ext); |
7cd12b4a VB |
210 | } |
211 | ||
8fb156c9 | 212 | void __split_page_owner(struct page *page, unsigned int nr) |
e2cfc911 | 213 | { |
a9627bc5 | 214 | int i; |
b1d5488a | 215 | struct page_ext *page_ext = page_ext_get(page); |
9300d8df | 216 | struct page_owner *page_owner; |
a9627bc5 | 217 | |
f86e4271 | 218 | if (unlikely(!page_ext)) |
a9627bc5 | 219 | return; |
e2cfc911 | 220 | |
8fb156c9 | 221 | for (i = 0; i < nr; i++) { |
7e2f2a0c VB |
222 | page_owner = get_page_owner(page_ext); |
223 | page_owner->order = 0; | |
5556cfe8 | 224 | page_ext = page_ext_next(page_ext); |
7e2f2a0c | 225 | } |
b1d5488a | 226 | page_ext_put(page_ext); |
e2cfc911 JK |
227 | } |
228 | ||
19138349 | 229 | void __folio_copy_owner(struct folio *newfolio, struct folio *old) |
d435edca | 230 | { |
b1d5488a CTK |
231 | struct page_ext *old_ext; |
232 | struct page_ext *new_ext; | |
9300d8df | 233 | struct page_owner *old_page_owner, *new_page_owner; |
d435edca | 234 | |
b1d5488a CTK |
235 | old_ext = page_ext_get(&old->page); |
236 | if (unlikely(!old_ext)) | |
f86e4271 YS |
237 | return; |
238 | ||
b1d5488a CTK |
239 | new_ext = page_ext_get(&newfolio->page); |
240 | if (unlikely(!new_ext)) { | |
241 | page_ext_put(old_ext); | |
242 | return; | |
243 | } | |
244 | ||
9300d8df JK |
245 | old_page_owner = get_page_owner(old_ext); |
246 | new_page_owner = get_page_owner(new_ext); | |
247 | new_page_owner->order = old_page_owner->order; | |
248 | new_page_owner->gfp_mask = old_page_owner->gfp_mask; | |
249 | new_page_owner->last_migrate_reason = | |
250 | old_page_owner->last_migrate_reason; | |
251 | new_page_owner->handle = old_page_owner->handle; | |
9cc7e96a | 252 | new_page_owner->pid = old_page_owner->pid; |
bf215eab | 253 | new_page_owner->tgid = old_page_owner->tgid; |
9cc7e96a | 254 | new_page_owner->ts_nsec = old_page_owner->ts_nsec; |
866b4852 | 255 | new_page_owner->free_ts_nsec = old_page_owner->ts_nsec; |
865ed6a3 | 256 | strcpy(new_page_owner->comm, old_page_owner->comm); |
d435edca VB |
257 | |
258 | /* | |
19138349 | 259 | * We don't clear the bit on the old folio as it's going to be freed |
d435edca | 260 | * after migration. Until then, the info can be useful in case of |
f0953a1b | 261 | * a bug, and the overall stats will be off a bit only temporarily. |
d435edca | 262 | * Also, migrate_misplaced_transhuge_page() can still fail the |
19138349 | 263 | * migration and then we want the old folio to retain the info. But |
d435edca VB |
264 | * in that case we also don't need to explicitly clear the info from |
265 | * the new page, which will be freed. | |
266 | */ | |
267 | __set_bit(PAGE_EXT_OWNER, &new_ext->flags); | |
fdf3bf80 | 268 | __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags); |
b1d5488a CTK |
269 | page_ext_put(new_ext); |
270 | page_ext_put(old_ext); | |
d435edca VB |
271 | } |
272 | ||
e2f612e6 JK |
273 | void pagetypeinfo_showmixedcount_print(struct seq_file *m, |
274 | pg_data_t *pgdat, struct zone *zone) | |
275 | { | |
276 | struct page *page; | |
277 | struct page_ext *page_ext; | |
9300d8df | 278 | struct page_owner *page_owner; |
1d2cae8e ML |
279 | unsigned long pfn, block_end_pfn; |
280 | unsigned long end_pfn = zone_end_pfn(zone); | |
e2f612e6 JK |
281 | unsigned long count[MIGRATE_TYPES] = { 0, }; |
282 | int pageblock_mt, page_mt; | |
283 | int i; | |
284 | ||
285 | /* Scan block by block. First and last block may be incomplete */ | |
286 | pfn = zone->zone_start_pfn; | |
287 | ||
288 | /* | |
289 | * Walk the zone in pageblock_nr_pages steps. If a page block spans | |
290 | * a zone boundary, it will be double counted between zones. This does | |
291 | * not matter as the mixed block count will still be correct | |
292 | */ | |
293 | for (; pfn < end_pfn; ) { | |
a26ee565 QC |
294 | page = pfn_to_online_page(pfn); |
295 | if (!page) { | |
e2f612e6 JK |
296 | pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); |
297 | continue; | |
298 | } | |
299 | ||
4f9bc69a | 300 | block_end_pfn = pageblock_end_pfn(pfn); |
e2f612e6 JK |
301 | block_end_pfn = min(block_end_pfn, end_pfn); |
302 | ||
e2f612e6 JK |
303 | pageblock_mt = get_pageblock_migratetype(page); |
304 | ||
305 | for (; pfn < block_end_pfn; pfn++) { | |
a26ee565 | 306 | /* The pageblock is online, no need to recheck. */ |
e2f612e6 JK |
307 | page = pfn_to_page(pfn); |
308 | ||
309 | if (page_zone(page) != zone) | |
310 | continue; | |
311 | ||
312 | if (PageBuddy(page)) { | |
727c080f VM |
313 | unsigned long freepage_order; |
314 | ||
ab130f91 | 315 | freepage_order = buddy_order_unsafe(page); |
727c080f VM |
316 | if (freepage_order < MAX_ORDER) |
317 | pfn += (1UL << freepage_order) - 1; | |
e2f612e6 JK |
318 | continue; |
319 | } | |
320 | ||
321 | if (PageReserved(page)) | |
322 | continue; | |
323 | ||
b1d5488a | 324 | page_ext = page_ext_get(page); |
e2f612e6 JK |
325 | if (unlikely(!page_ext)) |
326 | continue; | |
327 | ||
fdf3bf80 | 328 | if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags)) |
b1d5488a | 329 | goto ext_put_continue; |
e2f612e6 | 330 | |
9300d8df | 331 | page_owner = get_page_owner(page_ext); |
01c0bfe0 | 332 | page_mt = gfp_migratetype(page_owner->gfp_mask); |
e2f612e6 JK |
333 | if (pageblock_mt != page_mt) { |
334 | if (is_migrate_cma(pageblock_mt)) | |
335 | count[MIGRATE_MOVABLE]++; | |
336 | else | |
337 | count[pageblock_mt]++; | |
338 | ||
339 | pfn = block_end_pfn; | |
b1d5488a | 340 | page_ext_put(page_ext); |
e2f612e6 JK |
341 | break; |
342 | } | |
9300d8df | 343 | pfn += (1UL << page_owner->order) - 1; |
b1d5488a CTK |
344 | ext_put_continue: |
345 | page_ext_put(page_ext); | |
e2f612e6 JK |
346 | } |
347 | } | |
348 | ||
349 | /* Print counts */ | |
350 | seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); | |
351 | for (i = 0; i < MIGRATE_TYPES; i++) | |
352 | seq_printf(m, "%12lu ", count[i]); | |
353 | seq_putc(m, '\n'); | |
354 | } | |
355 | ||
fcf89358 WL |
356 | /* |
357 | * Looking for memcg information and print it out | |
358 | */ | |
359 | static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret, | |
360 | struct page *page) | |
361 | { | |
362 | #ifdef CONFIG_MEMCG | |
363 | unsigned long memcg_data; | |
364 | struct mem_cgroup *memcg; | |
365 | bool online; | |
366 | char name[80]; | |
367 | ||
368 | rcu_read_lock(); | |
369 | memcg_data = READ_ONCE(page->memcg_data); | |
370 | if (!memcg_data) | |
371 | goto out_unlock; | |
372 | ||
373 | if (memcg_data & MEMCG_DATA_OBJCGS) | |
374 | ret += scnprintf(kbuf + ret, count - ret, | |
375 | "Slab cache page\n"); | |
376 | ||
377 | memcg = page_memcg_check(page); | |
378 | if (!memcg) | |
379 | goto out_unlock; | |
380 | ||
381 | online = (memcg->css.flags & CSS_ONLINE); | |
382 | cgroup_name(memcg->css.cgroup, name, sizeof(name)); | |
383 | ret += scnprintf(kbuf + ret, count - ret, | |
384 | "Charged %sto %smemcg %s\n", | |
385 | PageMemcgKmem(page) ? "(via objcg) " : "", | |
386 | online ? "" : "offline ", | |
387 | name); | |
388 | out_unlock: | |
389 | rcu_read_unlock(); | |
390 | #endif /* CONFIG_MEMCG */ | |
391 | ||
392 | return ret; | |
393 | } | |
394 | ||
48c96a36 JK |
395 | static ssize_t |
396 | print_page_owner(char __user *buf, size_t count, unsigned long pfn, | |
9300d8df | 397 | struct page *page, struct page_owner *page_owner, |
f2ca0b55 | 398 | depot_stack_handle_t handle) |
48c96a36 | 399 | { |
af52bf6b | 400 | int ret, pageblock_mt, page_mt; |
48c96a36 JK |
401 | char *kbuf; |
402 | ||
c8f61cfc | 403 | count = min_t(size_t, count, PAGE_SIZE); |
48c96a36 JK |
404 | kbuf = kmalloc(count, GFP_KERNEL); |
405 | if (!kbuf) | |
406 | return -ENOMEM; | |
407 | ||
3ebc4397 | 408 | ret = scnprintf(kbuf, count, |
bf215eab | 409 | "Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns, free_ts %llu ns\n", |
9300d8df | 410 | page_owner->order, page_owner->gfp_mask, |
9cc7e96a | 411 | &page_owner->gfp_mask, page_owner->pid, |
bf215eab YC |
412 | page_owner->tgid, page_owner->comm, |
413 | page_owner->ts_nsec, page_owner->free_ts_nsec); | |
48c96a36 | 414 | |
48c96a36 | 415 | /* Print information relevant to grouping pages by mobility */ |
0b423ca2 | 416 | pageblock_mt = get_pageblock_migratetype(page); |
01c0bfe0 | 417 | page_mt = gfp_migratetype(page_owner->gfp_mask); |
3ebc4397 | 418 | ret += scnprintf(kbuf + ret, count - ret, |
23efd080 | 419 | "PFN %lu type %s Block %lu type %s Flags %pGp\n", |
48c96a36 | 420 | pfn, |
60f30350 | 421 | migratetype_names[page_mt], |
48c96a36 | 422 | pfn >> pageblock_order, |
60f30350 | 423 | migratetype_names[pageblock_mt], |
23efd080 | 424 | &page->flags); |
48c96a36 | 425 | |
0f68d45e | 426 | ret += stack_depot_snprint(handle, kbuf + ret, count - ret, 0); |
48c96a36 JK |
427 | if (ret >= count) |
428 | goto err; | |
429 | ||
9300d8df | 430 | if (page_owner->last_migrate_reason != -1) { |
3ebc4397 | 431 | ret += scnprintf(kbuf + ret, count - ret, |
7cd12b4a | 432 | "Page has been migrated, last migrate reason: %s\n", |
9300d8df | 433 | migrate_reason_names[page_owner->last_migrate_reason]); |
7cd12b4a VB |
434 | } |
435 | ||
fcf89358 WL |
436 | ret = print_page_owner_memcg(kbuf, count, ret, page); |
437 | ||
48c96a36 JK |
438 | ret += snprintf(kbuf + ret, count - ret, "\n"); |
439 | if (ret >= count) | |
440 | goto err; | |
441 | ||
442 | if (copy_to_user(buf, kbuf, ret)) | |
443 | ret = -EFAULT; | |
444 | ||
445 | kfree(kbuf); | |
446 | return ret; | |
447 | ||
448 | err: | |
449 | kfree(kbuf); | |
450 | return -ENOMEM; | |
451 | } | |
452 | ||
8bf6f451 | 453 | void __dump_page_owner(const struct page *page) |
4e462112 | 454 | { |
b1d5488a | 455 | struct page_ext *page_ext = page_ext_get((void *)page); |
9300d8df | 456 | struct page_owner *page_owner; |
f2ca0b55 | 457 | depot_stack_handle_t handle; |
8285027f SM |
458 | gfp_t gfp_mask; |
459 | int mt; | |
4e462112 | 460 | |
f86e4271 YS |
461 | if (unlikely(!page_ext)) { |
462 | pr_alert("There is not page extension available.\n"); | |
463 | return; | |
464 | } | |
9300d8df JK |
465 | |
466 | page_owner = get_page_owner(page_ext); | |
467 | gfp_mask = page_owner->gfp_mask; | |
01c0bfe0 | 468 | mt = gfp_migratetype(gfp_mask); |
f86e4271 | 469 | |
4e462112 | 470 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { |
37389167 | 471 | pr_alert("page_owner info is not present (never set?)\n"); |
b1d5488a | 472 | page_ext_put(page_ext); |
4e462112 VB |
473 | return; |
474 | } | |
475 | ||
fdf3bf80 | 476 | if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags)) |
37389167 VB |
477 | pr_alert("page_owner tracks the page as allocated\n"); |
478 | else | |
479 | pr_alert("page_owner tracks the page as freed\n"); | |
480 | ||
bf215eab | 481 | pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu, free_ts %llu\n", |
9cc7e96a | 482 | page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask, |
bf215eab YC |
483 | page_owner->pid, page_owner->tgid, page_owner->comm, |
484 | page_owner->ts_nsec, page_owner->free_ts_nsec); | |
37389167 | 485 | |
9300d8df | 486 | handle = READ_ONCE(page_owner->handle); |
505be481 | 487 | if (!handle) |
37389167 | 488 | pr_alert("page_owner allocation stack trace missing\n"); |
505be481 IK |
489 | else |
490 | stack_depot_print(handle); | |
f2ca0b55 | 491 | |
8974558f VB |
492 | handle = READ_ONCE(page_owner->free_handle); |
493 | if (!handle) { | |
494 | pr_alert("page_owner free stack trace missing\n"); | |
495 | } else { | |
8974558f | 496 | pr_alert("page last free stack trace:\n"); |
505be481 | 497 | stack_depot_print(handle); |
8974558f | 498 | } |
8974558f | 499 | |
9300d8df | 500 | if (page_owner->last_migrate_reason != -1) |
4e462112 | 501 | pr_alert("page has been migrated, last migrate reason: %s\n", |
9300d8df | 502 | migrate_reason_names[page_owner->last_migrate_reason]); |
b1d5488a | 503 | page_ext_put(page_ext); |
4e462112 VB |
504 | } |
505 | ||
48c96a36 JK |
506 | static ssize_t |
507 | read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |
508 | { | |
509 | unsigned long pfn; | |
510 | struct page *page; | |
511 | struct page_ext *page_ext; | |
9300d8df | 512 | struct page_owner *page_owner; |
f2ca0b55 | 513 | depot_stack_handle_t handle; |
48c96a36 | 514 | |
7dd80b8a | 515 | if (!static_branch_unlikely(&page_owner_inited)) |
48c96a36 JK |
516 | return -EINVAL; |
517 | ||
518 | page = NULL; | |
8f0efa81 KL |
519 | if (*ppos == 0) |
520 | pfn = min_low_pfn; | |
521 | else | |
522 | pfn = *ppos; | |
48c96a36 JK |
523 | /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */ |
524 | while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) | |
525 | pfn++; | |
526 | ||
48c96a36 JK |
527 | /* Find an allocated page */ |
528 | for (; pfn < max_pfn; pfn++) { | |
b1d5488a CTK |
529 | /* |
530 | * This temporary page_owner is required so | |
531 | * that we can avoid the context switches while holding | |
532 | * the rcu lock and copying the page owner information to | |
533 | * user through copy_to_user() or GFP_KERNEL allocations. | |
534 | */ | |
535 | struct page_owner page_owner_tmp; | |
536 | ||
48c96a36 JK |
537 | /* |
538 | * If the new page is in a new MAX_ORDER_NR_PAGES area, | |
539 | * validate the area as existing, skip it if not | |
540 | */ | |
541 | if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) { | |
542 | pfn += MAX_ORDER_NR_PAGES - 1; | |
543 | continue; | |
544 | } | |
545 | ||
48c96a36 JK |
546 | page = pfn_to_page(pfn); |
547 | if (PageBuddy(page)) { | |
ab130f91 | 548 | unsigned long freepage_order = buddy_order_unsafe(page); |
48c96a36 JK |
549 | |
550 | if (freepage_order < MAX_ORDER) | |
551 | pfn += (1UL << freepage_order) - 1; | |
552 | continue; | |
553 | } | |
554 | ||
b1d5488a | 555 | page_ext = page_ext_get(page); |
f86e4271 YS |
556 | if (unlikely(!page_ext)) |
557 | continue; | |
48c96a36 JK |
558 | |
559 | /* | |
61cf5feb JK |
560 | * Some pages could be missed by concurrent allocation or free, |
561 | * because we don't hold the zone lock. | |
48c96a36 JK |
562 | */ |
563 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) | |
b1d5488a | 564 | goto ext_put_continue; |
48c96a36 | 565 | |
37389167 VB |
566 | /* |
567 | * Although we do have the info about past allocation of free | |
568 | * pages, it's not relevant for current memory usage. | |
569 | */ | |
fdf3bf80 | 570 | if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags)) |
b1d5488a | 571 | goto ext_put_continue; |
37389167 | 572 | |
9300d8df JK |
573 | page_owner = get_page_owner(page_ext); |
574 | ||
7e2f2a0c VB |
575 | /* |
576 | * Don't print "tail" pages of high-order allocations as that | |
577 | * would inflate the stats. | |
578 | */ | |
579 | if (!IS_ALIGNED(pfn, 1 << page_owner->order)) | |
b1d5488a | 580 | goto ext_put_continue; |
7e2f2a0c | 581 | |
f2ca0b55 JK |
582 | /* |
583 | * Access to page_ext->handle isn't synchronous so we should | |
584 | * be careful to access it. | |
585 | */ | |
9300d8df | 586 | handle = READ_ONCE(page_owner->handle); |
f2ca0b55 | 587 | if (!handle) |
b1d5488a | 588 | goto ext_put_continue; |
f2ca0b55 | 589 | |
48c96a36 | 590 | /* Record the next PFN to read in the file offset */ |
8f0efa81 | 591 | *ppos = pfn + 1; |
48c96a36 | 592 | |
b1d5488a CTK |
593 | page_owner_tmp = *page_owner; |
594 | page_ext_put(page_ext); | |
f2ca0b55 | 595 | return print_page_owner(buf, count, pfn, page, |
b1d5488a CTK |
596 | &page_owner_tmp, handle); |
597 | ext_put_continue: | |
598 | page_ext_put(page_ext); | |
48c96a36 JK |
599 | } |
600 | ||
601 | return 0; | |
602 | } | |
603 | ||
8f0efa81 KL |
604 | static loff_t lseek_page_owner(struct file *file, loff_t offset, int orig) |
605 | { | |
606 | switch (orig) { | |
607 | case SEEK_SET: | |
608 | file->f_pos = offset; | |
609 | break; | |
610 | case SEEK_CUR: | |
611 | file->f_pos += offset; | |
612 | break; | |
613 | default: | |
614 | return -EINVAL; | |
615 | } | |
616 | return file->f_pos; | |
617 | } | |
618 | ||
61cf5feb JK |
619 | static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) |
620 | { | |
6787c1da OS |
621 | unsigned long pfn = zone->zone_start_pfn; |
622 | unsigned long end_pfn = zone_end_pfn(zone); | |
61cf5feb JK |
623 | unsigned long count = 0; |
624 | ||
61cf5feb JK |
625 | /* |
626 | * Walk the zone in pageblock_nr_pages steps. If a page block spans | |
627 | * a zone boundary, it will be double counted between zones. This does | |
628 | * not matter as the mixed block count will still be correct | |
629 | */ | |
630 | for (; pfn < end_pfn; ) { | |
6787c1da OS |
631 | unsigned long block_end_pfn; |
632 | ||
61cf5feb JK |
633 | if (!pfn_valid(pfn)) { |
634 | pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); | |
635 | continue; | |
636 | } | |
637 | ||
4f9bc69a | 638 | block_end_pfn = pageblock_end_pfn(pfn); |
61cf5feb JK |
639 | block_end_pfn = min(block_end_pfn, end_pfn); |
640 | ||
61cf5feb | 641 | for (; pfn < block_end_pfn; pfn++) { |
859a85dd | 642 | struct page *page = pfn_to_page(pfn); |
6787c1da OS |
643 | struct page_ext *page_ext; |
644 | ||
9d43f5ae JK |
645 | if (page_zone(page) != zone) |
646 | continue; | |
647 | ||
61cf5feb | 648 | /* |
10903027 VB |
649 | * To avoid having to grab zone->lock, be a little |
650 | * careful when reading buddy page order. The only | |
651 | * danger is that we skip too much and potentially miss | |
652 | * some early allocated pages, which is better than | |
653 | * heavy lock contention. | |
61cf5feb JK |
654 | */ |
655 | if (PageBuddy(page)) { | |
ab130f91 | 656 | unsigned long order = buddy_order_unsafe(page); |
10903027 VB |
657 | |
658 | if (order > 0 && order < MAX_ORDER) | |
659 | pfn += (1UL << order) - 1; | |
61cf5feb JK |
660 | continue; |
661 | } | |
662 | ||
663 | if (PageReserved(page)) | |
664 | continue; | |
665 | ||
b1d5488a | 666 | page_ext = page_ext_get(page); |
f86e4271 YS |
667 | if (unlikely(!page_ext)) |
668 | continue; | |
61cf5feb | 669 | |
dab4ead1 | 670 | /* Maybe overlapping zone */ |
61cf5feb | 671 | if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) |
b1d5488a | 672 | goto ext_put_continue; |
61cf5feb JK |
673 | |
674 | /* Found early allocated page */ | |
64ea78d2 | 675 | __set_page_owner_handle(page_ext, early_handle, |
7e2f2a0c | 676 | 0, 0); |
61cf5feb | 677 | count++; |
b1d5488a CTK |
678 | ext_put_continue: |
679 | page_ext_put(page_ext); | |
61cf5feb | 680 | } |
10903027 | 681 | cond_resched(); |
61cf5feb JK |
682 | } |
683 | ||
684 | pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n", | |
685 | pgdat->node_id, zone->name, count); | |
686 | } | |
687 | ||
688 | static void init_zones_in_node(pg_data_t *pgdat) | |
689 | { | |
690 | struct zone *zone; | |
691 | struct zone *node_zones = pgdat->node_zones; | |
61cf5feb JK |
692 | |
693 | for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { | |
694 | if (!populated_zone(zone)) | |
695 | continue; | |
696 | ||
61cf5feb | 697 | init_pages_in_zone(pgdat, zone); |
61cf5feb JK |
698 | } |
699 | } | |
700 | ||
701 | static void init_early_allocated_pages(void) | |
702 | { | |
703 | pg_data_t *pgdat; | |
704 | ||
61cf5feb JK |
705 | for_each_online_pgdat(pgdat) |
706 | init_zones_in_node(pgdat); | |
707 | } | |
708 | ||
48c96a36 JK |
709 | static const struct file_operations proc_page_owner_operations = { |
710 | .read = read_page_owner, | |
8f0efa81 | 711 | .llseek = lseek_page_owner, |
48c96a36 JK |
712 | }; |
713 | ||
714 | static int __init pageowner_init(void) | |
715 | { | |
7dd80b8a | 716 | if (!static_branch_unlikely(&page_owner_inited)) { |
48c96a36 JK |
717 | pr_info("page_owner is disabled\n"); |
718 | return 0; | |
719 | } | |
720 | ||
d9f7979c GKH |
721 | debugfs_create_file("page_owner", 0400, NULL, NULL, |
722 | &proc_page_owner_operations); | |
48c96a36 | 723 | |
d9f7979c | 724 | return 0; |
48c96a36 | 725 | } |
44c5af96 | 726 | late_initcall(pageowner_init) |