mm,page_owner: implement the tracking of the stacks count
[linux-2.6-block.git] / mm / page_owner.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
48c96a36
JK
2#include <linux/debugfs.h>
3#include <linux/mm.h>
4#include <linux/slab.h>
5#include <linux/uaccess.h>
57c8a661 6#include <linux/memblock.h>
48c96a36
JK
7#include <linux/stacktrace.h>
8#include <linux/page_owner.h>
7dd80b8a 9#include <linux/jump_label.h>
7cd12b4a 10#include <linux/migrate.h>
f2ca0b55 11#include <linux/stackdepot.h>
e2f612e6 12#include <linux/seq_file.h>
fcf89358 13#include <linux/memcontrol.h>
9cc7e96a 14#include <linux/sched/clock.h>
f2ca0b55 15
48c96a36
JK
16#include "internal.h"
17
f2ca0b55
JK
18/*
19 * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
20 * to use off stack temporal storage
21 */
22#define PAGE_OWNER_STACK_DEPTH (16)
23
9300d8df 24struct page_owner {
6b4c54e3
AM
25 unsigned short order;
26 short last_migrate_reason;
9300d8df 27 gfp_t gfp_mask;
9300d8df 28 depot_stack_handle_t handle;
8974558f 29 depot_stack_handle_t free_handle;
9cc7e96a 30 u64 ts_nsec;
866b4852 31 u64 free_ts_nsec;
865ed6a3 32 char comm[TASK_COMM_LEN];
9cc7e96a 33 pid_t pid;
bf215eab 34 pid_t tgid;
1b5c65b6
BS
35 pid_t free_pid;
36 pid_t free_tgid;
9300d8df
JK
37};
38
4bedfb31
OS
39struct stack {
40 struct stack_record *stack_record;
41 struct stack *next;
42};
43static struct stack dummy_stack;
44static struct stack failure_stack;
45static struct stack *stack_list;
217b2119 46static DEFINE_SPINLOCK(stack_list_lock);
4bedfb31 47
3645b5ec 48static bool page_owner_enabled __initdata;
7dd80b8a 49DEFINE_STATIC_KEY_FALSE(page_owner_inited);
48c96a36 50
f2ca0b55
JK
51static depot_stack_handle_t dummy_handle;
52static depot_stack_handle_t failure_handle;
dab4ead1 53static depot_stack_handle_t early_handle;
f2ca0b55 54
61cf5feb
JK
55static void init_early_allocated_pages(void);
56
1173194e 57static int __init early_page_owner_param(char *buf)
48c96a36 58{
a5f1783b
VB
59 int ret = kstrtobool(buf, &page_owner_enabled);
60
61 if (page_owner_enabled)
1c0310ad 62 stack_depot_request_early_init();
a5f1783b
VB
63
64 return ret;
48c96a36
JK
65}
66early_param("page_owner", early_page_owner_param);
67
cab0a7c1 68static __init bool need_page_owner(void)
48c96a36 69{
0fe9a448 70 return page_owner_enabled;
48c96a36
JK
71}
72
dab4ead1 73static __always_inline depot_stack_handle_t create_dummy_stack(void)
f2ca0b55
JK
74{
75 unsigned long entries[4];
af52bf6b 76 unsigned int nr_entries;
f2ca0b55 77
af52bf6b
TG
78 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
79 return stack_depot_save(entries, nr_entries, GFP_KERNEL);
f2ca0b55
JK
80}
81
dab4ead1 82static noinline void register_dummy_stack(void)
f2ca0b55 83{
dab4ead1
VB
84 dummy_handle = create_dummy_stack();
85}
f2ca0b55 86
dab4ead1
VB
87static noinline void register_failure_stack(void)
88{
89 failure_handle = create_dummy_stack();
90}
f2ca0b55 91
dab4ead1
VB
92static noinline void register_early_stack(void)
93{
94 early_handle = create_dummy_stack();
f2ca0b55
JK
95}
96
cab0a7c1 97static __init void init_page_owner(void)
48c96a36 98{
0fe9a448 99 if (!page_owner_enabled)
48c96a36
JK
100 return;
101
f2ca0b55
JK
102 register_dummy_stack();
103 register_failure_stack();
dab4ead1 104 register_early_stack();
7dd80b8a 105 static_branch_enable(&page_owner_inited);
61cf5feb 106 init_early_allocated_pages();
4bedfb31
OS
107 /* Initialize dummy and failure stacks and link them to stack_list */
108 dummy_stack.stack_record = __stack_depot_get_stack_record(dummy_handle);
109 failure_stack.stack_record = __stack_depot_get_stack_record(failure_handle);
110 refcount_set(&dummy_stack.stack_record->count, 1);
111 refcount_set(&failure_stack.stack_record->count, 1);
112 dummy_stack.next = &failure_stack;
113 stack_list = &dummy_stack;
48c96a36
JK
114}
115
116struct page_ext_operations page_owner_ops = {
9300d8df 117 .size = sizeof(struct page_owner),
48c96a36
JK
118 .need = need_page_owner,
119 .init = init_page_owner,
6189eb82 120 .need_shared_flags = true,
48c96a36
JK
121};
122
9300d8df
JK
123static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
124{
1cac4c07 125 return page_ext_data(page_ext, &page_owner_ops);
9300d8df
JK
126}
127
f2ca0b55
JK
128static noinline depot_stack_handle_t save_stack(gfp_t flags)
129{
130 unsigned long entries[PAGE_OWNER_STACK_DEPTH];
f2ca0b55 131 depot_stack_handle_t handle;
af52bf6b 132 unsigned int nr_entries;
f2ca0b55 133
f2ca0b55 134 /*
8e9b16c4
ST
135 * Avoid recursion.
136 *
137 * Sometimes page metadata allocation tracking requires more
138 * memory to be allocated:
139 * - when new stack trace is saved to stack depot
f2ca0b55 140 */
8e9b16c4 141 if (current->in_page_owner)
f2ca0b55 142 return dummy_handle;
8e9b16c4 143 current->in_page_owner = 1;
f2ca0b55 144
8e9b16c4 145 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
af52bf6b 146 handle = stack_depot_save(entries, nr_entries, flags);
f2ca0b55
JK
147 if (!handle)
148 handle = failure_handle;
149
8e9b16c4 150 current->in_page_owner = 0;
f2ca0b55
JK
151 return handle;
152}
153
217b2119
OS
154static void add_stack_record_to_list(struct stack_record *stack_record,
155 gfp_t gfp_mask)
156{
157 unsigned long flags;
158 struct stack *stack;
159
160 /* Filter gfp_mask the same way stackdepot does, for consistency */
161 gfp_mask &= ~GFP_ZONEMASK;
162 gfp_mask &= (GFP_ATOMIC | GFP_KERNEL);
163 gfp_mask |= __GFP_NOWARN;
164
165 stack = kmalloc(sizeof(*stack), gfp_mask);
166 if (!stack)
167 return;
168
169 stack->stack_record = stack_record;
170 stack->next = NULL;
171
172 spin_lock_irqsave(&stack_list_lock, flags);
173 stack->next = stack_list;
174 stack_list = stack;
175 spin_unlock_irqrestore(&stack_list_lock, flags);
176}
177
178static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask)
179{
180 struct stack_record *stack_record = __stack_depot_get_stack_record(handle);
181
182 if (!stack_record)
183 return;
184
185 /*
186 * New stack_record's that do not use STACK_DEPOT_FLAG_GET start
187 * with REFCOUNT_SATURATED to catch spurious increments of their
188 * refcount.
189 * Since we do not use STACK_DEPOT_FLAG_GET API, let us
190 * set a refcount of 1 ourselves.
191 */
192 if (refcount_read(&stack_record->count) == REFCOUNT_SATURATED) {
193 int old = REFCOUNT_SATURATED;
194
195 if (atomic_try_cmpxchg_relaxed(&stack_record->count.refs, &old, 1))
196 /* Add the new stack_record to our list */
197 add_stack_record_to_list(stack_record, gfp_mask);
198 }
199 refcount_inc(&stack_record->count);
200}
201
202static void dec_stack_record_count(depot_stack_handle_t handle)
203{
204 struct stack_record *stack_record = __stack_depot_get_stack_record(handle);
205
206 if (stack_record)
207 refcount_dec(&stack_record->count);
208}
209
0093de69 210void __reset_page_owner(struct page *page, unsigned short order)
8974558f
VB
211{
212 int i;
213 struct page_ext *page_ext;
fab765c2 214 depot_stack_handle_t handle;
217b2119 215 depot_stack_handle_t alloc_handle;
8974558f 216 struct page_owner *page_owner;
866b4852 217 u64 free_ts_nsec = local_clock();
8974558f 218
b1d5488a 219 page_ext = page_ext_get(page);
5556cfe8
VB
220 if (unlikely(!page_ext))
221 return;
fab765c2 222
217b2119
OS
223 page_owner = get_page_owner(page_ext);
224 alloc_handle = page_owner->handle;
225
fab765c2 226 handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
8974558f 227 for (i = 0; i < (1 << order); i++) {
fdf3bf80 228 __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
0fe9a448 229 page_owner->free_handle = handle;
866b4852 230 page_owner->free_ts_nsec = free_ts_nsec;
1b5c65b6
BS
231 page_owner->free_pid = current->pid;
232 page_owner->free_tgid = current->tgid;
5556cfe8 233 page_ext = page_ext_next(page_ext);
217b2119 234 page_owner = get_page_owner(page_ext);
8974558f 235 }
b1d5488a 236 page_ext_put(page_ext);
217b2119
OS
237 if (alloc_handle != early_handle)
238 /*
239 * early_handle is being set as a handle for all those
240 * early allocated pages. See init_pages_in_zone().
241 * Since their refcount is not being incremented because
242 * the machinery is not ready yet, we cannot decrement
243 * their refcount either.
244 */
245 dec_stack_record_count(alloc_handle);
8974558f
VB
246}
247
64ea78d2 248static inline void __set_page_owner_handle(struct page_ext *page_ext,
249 depot_stack_handle_t handle,
0093de69 250 unsigned short order, gfp_t gfp_mask)
f2ca0b55 251{
9300d8df 252 struct page_owner *page_owner;
7e2f2a0c 253 int i;
05a42199 254 u64 ts_nsec = local_clock();
48c96a36 255
7e2f2a0c
VB
256 for (i = 0; i < (1 << order); i++) {
257 page_owner = get_page_owner(page_ext);
258 page_owner->handle = handle;
259 page_owner->order = order;
260 page_owner->gfp_mask = gfp_mask;
261 page_owner->last_migrate_reason = -1;
9cc7e96a 262 page_owner->pid = current->pid;
bf215eab 263 page_owner->tgid = current->tgid;
05a42199 264 page_owner->ts_nsec = ts_nsec;
cd8c1fd8 265 strscpy(page_owner->comm, current->comm,
865ed6a3 266 sizeof(page_owner->comm));
7e2f2a0c 267 __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
fdf3bf80 268 __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
48c96a36 269
5556cfe8 270 page_ext = page_ext_next(page_ext);
7e2f2a0c 271 }
48c96a36
JK
272}
273
0093de69 274noinline void __set_page_owner(struct page *page, unsigned short order,
dab4ead1
VB
275 gfp_t gfp_mask)
276{
b1d5488a 277 struct page_ext *page_ext;
dab4ead1
VB
278 depot_stack_handle_t handle;
279
b1d5488a
CTK
280 handle = save_stack(gfp_mask);
281
282 page_ext = page_ext_get(page);
dab4ead1
VB
283 if (unlikely(!page_ext))
284 return;
64ea78d2 285 __set_page_owner_handle(page_ext, handle, order, gfp_mask);
b1d5488a 286 page_ext_put(page_ext);
217b2119 287 inc_stack_record_count(handle, gfp_mask);
dab4ead1
VB
288}
289
7cd12b4a
VB
290void __set_page_owner_migrate_reason(struct page *page, int reason)
291{
b1d5488a 292 struct page_ext *page_ext = page_ext_get(page);
9300d8df
JK
293 struct page_owner *page_owner;
294
f86e4271
YS
295 if (unlikely(!page_ext))
296 return;
7cd12b4a 297
9300d8df
JK
298 page_owner = get_page_owner(page_ext);
299 page_owner->last_migrate_reason = reason;
b1d5488a 300 page_ext_put(page_ext);
7cd12b4a
VB
301}
302
8fb156c9 303void __split_page_owner(struct page *page, unsigned int nr)
e2cfc911 304{
a9627bc5 305 int i;
b1d5488a 306 struct page_ext *page_ext = page_ext_get(page);
9300d8df 307 struct page_owner *page_owner;
a9627bc5 308
f86e4271 309 if (unlikely(!page_ext))
a9627bc5 310 return;
e2cfc911 311
8fb156c9 312 for (i = 0; i < nr; i++) {
7e2f2a0c
VB
313 page_owner = get_page_owner(page_ext);
314 page_owner->order = 0;
5556cfe8 315 page_ext = page_ext_next(page_ext);
7e2f2a0c 316 }
b1d5488a 317 page_ext_put(page_ext);
e2cfc911
JK
318}
319
19138349 320void __folio_copy_owner(struct folio *newfolio, struct folio *old)
d435edca 321{
b1d5488a
CTK
322 struct page_ext *old_ext;
323 struct page_ext *new_ext;
9300d8df 324 struct page_owner *old_page_owner, *new_page_owner;
d435edca 325
b1d5488a
CTK
326 old_ext = page_ext_get(&old->page);
327 if (unlikely(!old_ext))
f86e4271
YS
328 return;
329
b1d5488a
CTK
330 new_ext = page_ext_get(&newfolio->page);
331 if (unlikely(!new_ext)) {
332 page_ext_put(old_ext);
333 return;
334 }
335
9300d8df
JK
336 old_page_owner = get_page_owner(old_ext);
337 new_page_owner = get_page_owner(new_ext);
338 new_page_owner->order = old_page_owner->order;
339 new_page_owner->gfp_mask = old_page_owner->gfp_mask;
340 new_page_owner->last_migrate_reason =
341 old_page_owner->last_migrate_reason;
342 new_page_owner->handle = old_page_owner->handle;
9cc7e96a 343 new_page_owner->pid = old_page_owner->pid;
bf215eab 344 new_page_owner->tgid = old_page_owner->tgid;
1b5c65b6
BS
345 new_page_owner->free_pid = old_page_owner->free_pid;
346 new_page_owner->free_tgid = old_page_owner->free_tgid;
9cc7e96a 347 new_page_owner->ts_nsec = old_page_owner->ts_nsec;
866b4852 348 new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
865ed6a3 349 strcpy(new_page_owner->comm, old_page_owner->comm);
d435edca
VB
350
351 /*
19138349 352 * We don't clear the bit on the old folio as it's going to be freed
d435edca 353 * after migration. Until then, the info can be useful in case of
f0953a1b 354 * a bug, and the overall stats will be off a bit only temporarily.
d435edca 355 * Also, migrate_misplaced_transhuge_page() can still fail the
19138349 356 * migration and then we want the old folio to retain the info. But
d435edca
VB
357 * in that case we also don't need to explicitly clear the info from
358 * the new page, which will be freed.
359 */
360 __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
fdf3bf80 361 __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
b1d5488a
CTK
362 page_ext_put(new_ext);
363 page_ext_put(old_ext);
d435edca
VB
364}
365
e2f612e6
JK
366void pagetypeinfo_showmixedcount_print(struct seq_file *m,
367 pg_data_t *pgdat, struct zone *zone)
368{
369 struct page *page;
370 struct page_ext *page_ext;
9300d8df 371 struct page_owner *page_owner;
1d2cae8e
ML
372 unsigned long pfn, block_end_pfn;
373 unsigned long end_pfn = zone_end_pfn(zone);
e2f612e6
JK
374 unsigned long count[MIGRATE_TYPES] = { 0, };
375 int pageblock_mt, page_mt;
376 int i;
377
378 /* Scan block by block. First and last block may be incomplete */
379 pfn = zone->zone_start_pfn;
380
381 /*
382 * Walk the zone in pageblock_nr_pages steps. If a page block spans
383 * a zone boundary, it will be double counted between zones. This does
384 * not matter as the mixed block count will still be correct
385 */
386 for (; pfn < end_pfn; ) {
a26ee565
QC
387 page = pfn_to_online_page(pfn);
388 if (!page) {
e2f612e6
JK
389 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
390 continue;
391 }
392
4f9bc69a 393 block_end_pfn = pageblock_end_pfn(pfn);
e2f612e6
JK
394 block_end_pfn = min(block_end_pfn, end_pfn);
395
e2f612e6
JK
396 pageblock_mt = get_pageblock_migratetype(page);
397
398 for (; pfn < block_end_pfn; pfn++) {
a26ee565 399 /* The pageblock is online, no need to recheck. */
e2f612e6
JK
400 page = pfn_to_page(pfn);
401
402 if (page_zone(page) != zone)
403 continue;
404
405 if (PageBuddy(page)) {
727c080f
VM
406 unsigned long freepage_order;
407
ab130f91 408 freepage_order = buddy_order_unsafe(page);
5e0a760b 409 if (freepage_order <= MAX_PAGE_ORDER)
727c080f 410 pfn += (1UL << freepage_order) - 1;
e2f612e6
JK
411 continue;
412 }
413
414 if (PageReserved(page))
415 continue;
416
b1d5488a 417 page_ext = page_ext_get(page);
e2f612e6
JK
418 if (unlikely(!page_ext))
419 continue;
420
fdf3bf80 421 if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
b1d5488a 422 goto ext_put_continue;
e2f612e6 423
9300d8df 424 page_owner = get_page_owner(page_ext);
01c0bfe0 425 page_mt = gfp_migratetype(page_owner->gfp_mask);
e2f612e6
JK
426 if (pageblock_mt != page_mt) {
427 if (is_migrate_cma(pageblock_mt))
428 count[MIGRATE_MOVABLE]++;
429 else
430 count[pageblock_mt]++;
431
432 pfn = block_end_pfn;
b1d5488a 433 page_ext_put(page_ext);
e2f612e6
JK
434 break;
435 }
9300d8df 436 pfn += (1UL << page_owner->order) - 1;
b1d5488a
CTK
437ext_put_continue:
438 page_ext_put(page_ext);
e2f612e6
JK
439 }
440 }
441
442 /* Print counts */
443 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
444 for (i = 0; i < MIGRATE_TYPES; i++)
445 seq_printf(m, "%12lu ", count[i]);
446 seq_putc(m, '\n');
447}
448
fcf89358
WL
449/*
450 * Looking for memcg information and print it out
451 */
452static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret,
453 struct page *page)
454{
455#ifdef CONFIG_MEMCG
456 unsigned long memcg_data;
457 struct mem_cgroup *memcg;
458 bool online;
459 char name[80];
460
461 rcu_read_lock();
462 memcg_data = READ_ONCE(page->memcg_data);
463 if (!memcg_data)
464 goto out_unlock;
465
466 if (memcg_data & MEMCG_DATA_OBJCGS)
467 ret += scnprintf(kbuf + ret, count - ret,
468 "Slab cache page\n");
469
470 memcg = page_memcg_check(page);
471 if (!memcg)
472 goto out_unlock;
473
474 online = (memcg->css.flags & CSS_ONLINE);
475 cgroup_name(memcg->css.cgroup, name, sizeof(name));
476 ret += scnprintf(kbuf + ret, count - ret,
477 "Charged %sto %smemcg %s\n",
478 PageMemcgKmem(page) ? "(via objcg) " : "",
479 online ? "" : "offline ",
480 name);
481out_unlock:
482 rcu_read_unlock();
483#endif /* CONFIG_MEMCG */
484
485 return ret;
486}
487
48c96a36
JK
488static ssize_t
489print_page_owner(char __user *buf, size_t count, unsigned long pfn,
9300d8df 490 struct page *page, struct page_owner *page_owner,
f2ca0b55 491 depot_stack_handle_t handle)
48c96a36 492{
af52bf6b 493 int ret, pageblock_mt, page_mt;
48c96a36
JK
494 char *kbuf;
495
c8f61cfc 496 count = min_t(size_t, count, PAGE_SIZE);
48c96a36
JK
497 kbuf = kmalloc(count, GFP_KERNEL);
498 if (!kbuf)
499 return -ENOMEM;
500
3ebc4397 501 ret = scnprintf(kbuf, count,
b459f090 502 "Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns\n",
9300d8df 503 page_owner->order, page_owner->gfp_mask,
9cc7e96a 504 &page_owner->gfp_mask, page_owner->pid,
bf215eab 505 page_owner->tgid, page_owner->comm,
b459f090 506 page_owner->ts_nsec);
48c96a36 507
48c96a36 508 /* Print information relevant to grouping pages by mobility */
0b423ca2 509 pageblock_mt = get_pageblock_migratetype(page);
01c0bfe0 510 page_mt = gfp_migratetype(page_owner->gfp_mask);
3ebc4397 511 ret += scnprintf(kbuf + ret, count - ret,
399fd496 512 "PFN 0x%lx type %s Block %lu type %s Flags %pGp\n",
48c96a36 513 pfn,
60f30350 514 migratetype_names[page_mt],
48c96a36 515 pfn >> pageblock_order,
60f30350 516 migratetype_names[pageblock_mt],
23efd080 517 &page->flags);
48c96a36 518
0f68d45e 519 ret += stack_depot_snprint(handle, kbuf + ret, count - ret, 0);
48c96a36
JK
520 if (ret >= count)
521 goto err;
522
9300d8df 523 if (page_owner->last_migrate_reason != -1) {
3ebc4397 524 ret += scnprintf(kbuf + ret, count - ret,
7cd12b4a 525 "Page has been migrated, last migrate reason: %s\n",
9300d8df 526 migrate_reason_names[page_owner->last_migrate_reason]);
7cd12b4a
VB
527 }
528
fcf89358
WL
529 ret = print_page_owner_memcg(kbuf, count, ret, page);
530
48c96a36
JK
531 ret += snprintf(kbuf + ret, count - ret, "\n");
532 if (ret >= count)
533 goto err;
534
535 if (copy_to_user(buf, kbuf, ret))
536 ret = -EFAULT;
537
538 kfree(kbuf);
539 return ret;
540
541err:
542 kfree(kbuf);
543 return -ENOMEM;
544}
545
8bf6f451 546void __dump_page_owner(const struct page *page)
4e462112 547{
b1d5488a 548 struct page_ext *page_ext = page_ext_get((void *)page);
9300d8df 549 struct page_owner *page_owner;
f2ca0b55 550 depot_stack_handle_t handle;
8285027f
SM
551 gfp_t gfp_mask;
552 int mt;
4e462112 553
f86e4271
YS
554 if (unlikely(!page_ext)) {
555 pr_alert("There is not page extension available.\n");
556 return;
557 }
9300d8df
JK
558
559 page_owner = get_page_owner(page_ext);
560 gfp_mask = page_owner->gfp_mask;
01c0bfe0 561 mt = gfp_migratetype(gfp_mask);
f86e4271 562
4e462112 563 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
37389167 564 pr_alert("page_owner info is not present (never set?)\n");
b1d5488a 565 page_ext_put(page_ext);
4e462112
VB
566 return;
567 }
568
fdf3bf80 569 if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
37389167
VB
570 pr_alert("page_owner tracks the page as allocated\n");
571 else
572 pr_alert("page_owner tracks the page as freed\n");
573
bf215eab 574 pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu, free_ts %llu\n",
9cc7e96a 575 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
bf215eab
YC
576 page_owner->pid, page_owner->tgid, page_owner->comm,
577 page_owner->ts_nsec, page_owner->free_ts_nsec);
37389167 578
9300d8df 579 handle = READ_ONCE(page_owner->handle);
505be481 580 if (!handle)
37389167 581 pr_alert("page_owner allocation stack trace missing\n");
505be481
IK
582 else
583 stack_depot_print(handle);
f2ca0b55 584
8974558f
VB
585 handle = READ_ONCE(page_owner->free_handle);
586 if (!handle) {
587 pr_alert("page_owner free stack trace missing\n");
588 } else {
1b5c65b6
BS
589 pr_alert("page last free pid %d tgid %d stack trace:\n",
590 page_owner->free_pid, page_owner->free_tgid);
505be481 591 stack_depot_print(handle);
8974558f 592 }
8974558f 593
9300d8df 594 if (page_owner->last_migrate_reason != -1)
4e462112 595 pr_alert("page has been migrated, last migrate reason: %s\n",
9300d8df 596 migrate_reason_names[page_owner->last_migrate_reason]);
b1d5488a 597 page_ext_put(page_ext);
4e462112
VB
598}
599
48c96a36
JK
600static ssize_t
601read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
602{
603 unsigned long pfn;
604 struct page *page;
605 struct page_ext *page_ext;
9300d8df 606 struct page_owner *page_owner;
f2ca0b55 607 depot_stack_handle_t handle;
48c96a36 608
7dd80b8a 609 if (!static_branch_unlikely(&page_owner_inited))
48c96a36
JK
610 return -EINVAL;
611
612 page = NULL;
8f0efa81
KL
613 if (*ppos == 0)
614 pfn = min_low_pfn;
615 else
616 pfn = *ppos;
48c96a36
JK
617 /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
618 while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
619 pfn++;
620
48c96a36
JK
621 /* Find an allocated page */
622 for (; pfn < max_pfn; pfn++) {
b1d5488a
CTK
623 /*
624 * This temporary page_owner is required so
625 * that we can avoid the context switches while holding
626 * the rcu lock and copying the page owner information to
627 * user through copy_to_user() or GFP_KERNEL allocations.
628 */
629 struct page_owner page_owner_tmp;
630
48c96a36
JK
631 /*
632 * If the new page is in a new MAX_ORDER_NR_PAGES area,
633 * validate the area as existing, skip it if not
634 */
635 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
636 pfn += MAX_ORDER_NR_PAGES - 1;
637 continue;
638 }
639
48c96a36
JK
640 page = pfn_to_page(pfn);
641 if (PageBuddy(page)) {
ab130f91 642 unsigned long freepage_order = buddy_order_unsafe(page);
48c96a36 643
5e0a760b 644 if (freepage_order <= MAX_PAGE_ORDER)
48c96a36
JK
645 pfn += (1UL << freepage_order) - 1;
646 continue;
647 }
648
b1d5488a 649 page_ext = page_ext_get(page);
f86e4271
YS
650 if (unlikely(!page_ext))
651 continue;
48c96a36
JK
652
653 /*
61cf5feb
JK
654 * Some pages could be missed by concurrent allocation or free,
655 * because we don't hold the zone lock.
48c96a36
JK
656 */
657 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
b1d5488a 658 goto ext_put_continue;
48c96a36 659
37389167
VB
660 /*
661 * Although we do have the info about past allocation of free
662 * pages, it's not relevant for current memory usage.
663 */
fdf3bf80 664 if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
b1d5488a 665 goto ext_put_continue;
37389167 666
9300d8df
JK
667 page_owner = get_page_owner(page_ext);
668
7e2f2a0c
VB
669 /*
670 * Don't print "tail" pages of high-order allocations as that
671 * would inflate the stats.
672 */
673 if (!IS_ALIGNED(pfn, 1 << page_owner->order))
b1d5488a 674 goto ext_put_continue;
7e2f2a0c 675
f2ca0b55
JK
676 /*
677 * Access to page_ext->handle isn't synchronous so we should
678 * be careful to access it.
679 */
9300d8df 680 handle = READ_ONCE(page_owner->handle);
f2ca0b55 681 if (!handle)
b1d5488a 682 goto ext_put_continue;
f2ca0b55 683
48c96a36 684 /* Record the next PFN to read in the file offset */
8f0efa81 685 *ppos = pfn + 1;
48c96a36 686
b1d5488a
CTK
687 page_owner_tmp = *page_owner;
688 page_ext_put(page_ext);
f2ca0b55 689 return print_page_owner(buf, count, pfn, page,
b1d5488a
CTK
690 &page_owner_tmp, handle);
691ext_put_continue:
692 page_ext_put(page_ext);
48c96a36
JK
693 }
694
695 return 0;
696}
697
8f0efa81
KL
698static loff_t lseek_page_owner(struct file *file, loff_t offset, int orig)
699{
700 switch (orig) {
701 case SEEK_SET:
702 file->f_pos = offset;
703 break;
704 case SEEK_CUR:
705 file->f_pos += offset;
706 break;
707 default:
708 return -EINVAL;
709 }
710 return file->f_pos;
711}
712
61cf5feb
JK
713static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
714{
6787c1da
OS
715 unsigned long pfn = zone->zone_start_pfn;
716 unsigned long end_pfn = zone_end_pfn(zone);
61cf5feb
JK
717 unsigned long count = 0;
718
61cf5feb
JK
719 /*
720 * Walk the zone in pageblock_nr_pages steps. If a page block spans
721 * a zone boundary, it will be double counted between zones. This does
722 * not matter as the mixed block count will still be correct
723 */
724 for (; pfn < end_pfn; ) {
6787c1da
OS
725 unsigned long block_end_pfn;
726
61cf5feb
JK
727 if (!pfn_valid(pfn)) {
728 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
729 continue;
730 }
731
4f9bc69a 732 block_end_pfn = pageblock_end_pfn(pfn);
61cf5feb
JK
733 block_end_pfn = min(block_end_pfn, end_pfn);
734
61cf5feb 735 for (; pfn < block_end_pfn; pfn++) {
859a85dd 736 struct page *page = pfn_to_page(pfn);
6787c1da
OS
737 struct page_ext *page_ext;
738
9d43f5ae
JK
739 if (page_zone(page) != zone)
740 continue;
741
61cf5feb 742 /*
10903027
VB
743 * To avoid having to grab zone->lock, be a little
744 * careful when reading buddy page order. The only
745 * danger is that we skip too much and potentially miss
746 * some early allocated pages, which is better than
747 * heavy lock contention.
61cf5feb
JK
748 */
749 if (PageBuddy(page)) {
ab130f91 750 unsigned long order = buddy_order_unsafe(page);
10903027 751
5e0a760b 752 if (order > 0 && order <= MAX_PAGE_ORDER)
10903027 753 pfn += (1UL << order) - 1;
61cf5feb
JK
754 continue;
755 }
756
757 if (PageReserved(page))
758 continue;
759
b1d5488a 760 page_ext = page_ext_get(page);
f86e4271
YS
761 if (unlikely(!page_ext))
762 continue;
61cf5feb 763
dab4ead1 764 /* Maybe overlapping zone */
61cf5feb 765 if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
b1d5488a 766 goto ext_put_continue;
61cf5feb
JK
767
768 /* Found early allocated page */
64ea78d2 769 __set_page_owner_handle(page_ext, early_handle,
7e2f2a0c 770 0, 0);
61cf5feb 771 count++;
b1d5488a
CTK
772ext_put_continue:
773 page_ext_put(page_ext);
61cf5feb 774 }
10903027 775 cond_resched();
61cf5feb
JK
776 }
777
778 pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
779 pgdat->node_id, zone->name, count);
780}
781
782static void init_zones_in_node(pg_data_t *pgdat)
783{
784 struct zone *zone;
785 struct zone *node_zones = pgdat->node_zones;
61cf5feb
JK
786
787 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
788 if (!populated_zone(zone))
789 continue;
790
61cf5feb 791 init_pages_in_zone(pgdat, zone);
61cf5feb
JK
792 }
793}
794
795static void init_early_allocated_pages(void)
796{
797 pg_data_t *pgdat;
798
61cf5feb
JK
799 for_each_online_pgdat(pgdat)
800 init_zones_in_node(pgdat);
801}
802
48c96a36
JK
803static const struct file_operations proc_page_owner_operations = {
804 .read = read_page_owner,
8f0efa81 805 .llseek = lseek_page_owner,
48c96a36
JK
806};
807
808static int __init pageowner_init(void)
809{
7dd80b8a 810 if (!static_branch_unlikely(&page_owner_inited)) {
48c96a36
JK
811 pr_info("page_owner is disabled\n");
812 return 0;
813 }
814
d9f7979c
GKH
815 debugfs_create_file("page_owner", 0400, NULL, NULL,
816 &proc_page_owner_operations);
48c96a36 817
d9f7979c 818 return 0;
48c96a36 819}
44c5af96 820late_initcall(pageowner_init)