Merge tag 'sched-core-2024-09-19' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / mm / page_owner.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
48c96a36
JK
2#include <linux/debugfs.h>
3#include <linux/mm.h>
4#include <linux/slab.h>
5#include <linux/uaccess.h>
57c8a661 6#include <linux/memblock.h>
48c96a36
JK
7#include <linux/stacktrace.h>
8#include <linux/page_owner.h>
7dd80b8a 9#include <linux/jump_label.h>
7cd12b4a 10#include <linux/migrate.h>
f2ca0b55 11#include <linux/stackdepot.h>
e2f612e6 12#include <linux/seq_file.h>
fcf89358 13#include <linux/memcontrol.h>
9cc7e96a 14#include <linux/sched/clock.h>
f2ca0b55 15
48c96a36
JK
16#include "internal.h"
17
f2ca0b55
JK
18/*
19 * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
20 * to use off stack temporal storage
21 */
22#define PAGE_OWNER_STACK_DEPTH (16)
23
9300d8df 24struct page_owner {
6b4c54e3
AM
25 unsigned short order;
26 short last_migrate_reason;
9300d8df 27 gfp_t gfp_mask;
9300d8df 28 depot_stack_handle_t handle;
8974558f 29 depot_stack_handle_t free_handle;
9cc7e96a 30 u64 ts_nsec;
866b4852 31 u64 free_ts_nsec;
865ed6a3 32 char comm[TASK_COMM_LEN];
9cc7e96a 33 pid_t pid;
bf215eab 34 pid_t tgid;
1b5c65b6
BS
35 pid_t free_pid;
36 pid_t free_tgid;
9300d8df
JK
37};
38
4bedfb31
OS
39struct stack {
40 struct stack_record *stack_record;
41 struct stack *next;
42};
43static struct stack dummy_stack;
44static struct stack failure_stack;
45static struct stack *stack_list;
217b2119 46static DEFINE_SPINLOCK(stack_list_lock);
4bedfb31 47
3645b5ec 48static bool page_owner_enabled __initdata;
7dd80b8a 49DEFINE_STATIC_KEY_FALSE(page_owner_inited);
48c96a36 50
f2ca0b55
JK
51static depot_stack_handle_t dummy_handle;
52static depot_stack_handle_t failure_handle;
dab4ead1 53static depot_stack_handle_t early_handle;
f2ca0b55 54
61cf5feb
JK
55static void init_early_allocated_pages(void);
56
7844c014
OS
57static inline void set_current_in_page_owner(void)
58{
59 /*
60 * Avoid recursion.
61 *
62 * We might need to allocate more memory from page_owner code, so make
63 * sure to signal it in order to avoid recursion.
64 */
65 current->in_page_owner = 1;
66}
67
68static inline void unset_current_in_page_owner(void)
69{
70 current->in_page_owner = 0;
71}
72
1173194e 73static int __init early_page_owner_param(char *buf)
48c96a36 74{
a5f1783b
VB
75 int ret = kstrtobool(buf, &page_owner_enabled);
76
77 if (page_owner_enabled)
1c0310ad 78 stack_depot_request_early_init();
a5f1783b
VB
79
80 return ret;
48c96a36
JK
81}
82early_param("page_owner", early_page_owner_param);
83
cab0a7c1 84static __init bool need_page_owner(void)
48c96a36 85{
0fe9a448 86 return page_owner_enabled;
48c96a36
JK
87}
88
dab4ead1 89static __always_inline depot_stack_handle_t create_dummy_stack(void)
f2ca0b55
JK
90{
91 unsigned long entries[4];
af52bf6b 92 unsigned int nr_entries;
f2ca0b55 93
af52bf6b
TG
94 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
95 return stack_depot_save(entries, nr_entries, GFP_KERNEL);
f2ca0b55
JK
96}
97
dab4ead1 98static noinline void register_dummy_stack(void)
f2ca0b55 99{
dab4ead1
VB
100 dummy_handle = create_dummy_stack();
101}
f2ca0b55 102
dab4ead1
VB
103static noinline void register_failure_stack(void)
104{
105 failure_handle = create_dummy_stack();
106}
f2ca0b55 107
dab4ead1
VB
108static noinline void register_early_stack(void)
109{
110 early_handle = create_dummy_stack();
f2ca0b55
JK
111}
112
cab0a7c1 113static __init void init_page_owner(void)
48c96a36 114{
0fe9a448 115 if (!page_owner_enabled)
48c96a36
JK
116 return;
117
f2ca0b55
JK
118 register_dummy_stack();
119 register_failure_stack();
dab4ead1 120 register_early_stack();
61cf5feb 121 init_early_allocated_pages();
4bedfb31
OS
122 /* Initialize dummy and failure stacks and link them to stack_list */
123 dummy_stack.stack_record = __stack_depot_get_stack_record(dummy_handle);
124 failure_stack.stack_record = __stack_depot_get_stack_record(failure_handle);
84d6ac31
OS
125 if (dummy_stack.stack_record)
126 refcount_set(&dummy_stack.stack_record->count, 1);
127 if (failure_stack.stack_record)
128 refcount_set(&failure_stack.stack_record->count, 1);
4bedfb31
OS
129 dummy_stack.next = &failure_stack;
130 stack_list = &dummy_stack;
0b2cf0a4 131 static_branch_enable(&page_owner_inited);
48c96a36
JK
132}
133
134struct page_ext_operations page_owner_ops = {
9300d8df 135 .size = sizeof(struct page_owner),
48c96a36
JK
136 .need = need_page_owner,
137 .init = init_page_owner,
6189eb82 138 .need_shared_flags = true,
48c96a36
JK
139};
140
9300d8df
JK
141static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
142{
1cac4c07 143 return page_ext_data(page_ext, &page_owner_ops);
9300d8df
JK
144}
145
f2ca0b55
JK
146static noinline depot_stack_handle_t save_stack(gfp_t flags)
147{
148 unsigned long entries[PAGE_OWNER_STACK_DEPTH];
f2ca0b55 149 depot_stack_handle_t handle;
af52bf6b 150 unsigned int nr_entries;
f2ca0b55 151
8e9b16c4 152 if (current->in_page_owner)
f2ca0b55
JK
153 return dummy_handle;
154
7844c014 155 set_current_in_page_owner();
8e9b16c4 156 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
af52bf6b 157 handle = stack_depot_save(entries, nr_entries, flags);
f2ca0b55
JK
158 if (!handle)
159 handle = failure_handle;
7844c014 160 unset_current_in_page_owner();
f2ca0b55
JK
161
162 return handle;
163}
164
217b2119
OS
165static void add_stack_record_to_list(struct stack_record *stack_record,
166 gfp_t gfp_mask)
167{
168 unsigned long flags;
169 struct stack *stack;
170
7844c014 171 set_current_in_page_owner();
99b80ac4 172 stack = kmalloc(sizeof(*stack), gfp_nested_mask(gfp_mask));
7844c014
OS
173 if (!stack) {
174 unset_current_in_page_owner();
217b2119 175 return;
7844c014
OS
176 }
177 unset_current_in_page_owner();
217b2119
OS
178
179 stack->stack_record = stack_record;
180 stack->next = NULL;
181
182 spin_lock_irqsave(&stack_list_lock, flags);
183 stack->next = stack_list;
765973a0
OS
184 /*
185 * This pairs with smp_load_acquire() from function
186 * stack_start(). This guarantees that stack_start()
187 * will see an updated stack_list before starting to
188 * traverse the list.
189 */
190 smp_store_release(&stack_list, stack);
217b2119
OS
191 spin_unlock_irqrestore(&stack_list_lock, flags);
192}
193
f5c12105
OS
194static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask,
195 int nr_base_pages)
217b2119
OS
196{
197 struct stack_record *stack_record = __stack_depot_get_stack_record(handle);
198
199 if (!stack_record)
200 return;
201
202 /*
203 * New stack_record's that do not use STACK_DEPOT_FLAG_GET start
204 * with REFCOUNT_SATURATED to catch spurious increments of their
205 * refcount.
206 * Since we do not use STACK_DEPOT_FLAG_GET API, let us
207 * set a refcount of 1 ourselves.
208 */
209 if (refcount_read(&stack_record->count) == REFCOUNT_SATURATED) {
210 int old = REFCOUNT_SATURATED;
211
212 if (atomic_try_cmpxchg_relaxed(&stack_record->count.refs, &old, 1))
213 /* Add the new stack_record to our list */
214 add_stack_record_to_list(stack_record, gfp_mask);
215 }
f5c12105 216 refcount_add(nr_base_pages, &stack_record->count);
217b2119
OS
217}
218
f5c12105
OS
219static void dec_stack_record_count(depot_stack_handle_t handle,
220 int nr_base_pages)
217b2119
OS
221{
222 struct stack_record *stack_record = __stack_depot_get_stack_record(handle);
223
f5c12105
OS
224 if (!stack_record)
225 return;
226
227 if (refcount_sub_and_test(nr_base_pages, &stack_record->count))
228 pr_warn("%s: refcount went to 0 for %u handle\n", __func__,
229 handle);
217b2119
OS
230}
231
ea4b5b33
OS
232static inline void __update_page_owner_handle(struct page_ext *page_ext,
233 depot_stack_handle_t handle,
234 unsigned short order,
235 gfp_t gfp_mask,
236 short last_migrate_reason, u64 ts_nsec,
237 pid_t pid, pid_t tgid, char *comm)
8974558f
VB
238{
239 int i;
ea4b5b33
OS
240 struct page_owner *page_owner;
241
242 for (i = 0; i < (1 << order); i++) {
243 page_owner = get_page_owner(page_ext);
244 page_owner->handle = handle;
245 page_owner->order = order;
246 page_owner->gfp_mask = gfp_mask;
247 page_owner->last_migrate_reason = last_migrate_reason;
248 page_owner->pid = pid;
249 page_owner->tgid = tgid;
250 page_owner->ts_nsec = ts_nsec;
251 strscpy(page_owner->comm, comm,
252 sizeof(page_owner->comm));
253 __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
254 __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
255 page_ext = page_ext_next(page_ext);
256 }
257}
258
259static inline void __update_page_owner_free_handle(struct page_ext *page_ext,
260 depot_stack_handle_t handle,
261 unsigned short order,
262 pid_t pid, pid_t tgid,
263 u64 free_ts_nsec)
264{
265 int i;
266 struct page_owner *page_owner;
267
268 for (i = 0; i < (1 << order); i++) {
269 page_owner = get_page_owner(page_ext);
270 /* Only __reset_page_owner() wants to clear the bit */
271 if (handle) {
272 __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
273 page_owner->free_handle = handle;
274 }
275 page_owner->free_ts_nsec = free_ts_nsec;
276 page_owner->free_pid = current->pid;
277 page_owner->free_tgid = current->tgid;
278 page_ext = page_ext_next(page_ext);
279 }
280}
281
282void __reset_page_owner(struct page *page, unsigned short order)
283{
8974558f 284 struct page_ext *page_ext;
fab765c2 285 depot_stack_handle_t handle;
217b2119 286 depot_stack_handle_t alloc_handle;
8974558f 287 struct page_owner *page_owner;
866b4852 288 u64 free_ts_nsec = local_clock();
8974558f 289
b1d5488a 290 page_ext = page_ext_get(page);
5556cfe8
VB
291 if (unlikely(!page_ext))
292 return;
fab765c2 293
217b2119
OS
294 page_owner = get_page_owner(page_ext);
295 alloc_handle = page_owner->handle;
296
fab765c2 297 handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
ea4b5b33
OS
298 __update_page_owner_free_handle(page_ext, handle, order, current->pid,
299 current->tgid, free_ts_nsec);
b1d5488a 300 page_ext_put(page_ext);
ea4b5b33 301
217b2119
OS
302 if (alloc_handle != early_handle)
303 /*
304 * early_handle is being set as a handle for all those
305 * early allocated pages. See init_pages_in_zone().
306 * Since their refcount is not being incremented because
307 * the machinery is not ready yet, we cannot decrement
308 * their refcount either.
309 */
f5c12105 310 dec_stack_record_count(alloc_handle, 1 << order);
8974558f
VB
311}
312
0093de69 313noinline void __set_page_owner(struct page *page, unsigned short order,
dab4ead1
VB
314 gfp_t gfp_mask)
315{
b1d5488a 316 struct page_ext *page_ext;
ea4b5b33 317 u64 ts_nsec = local_clock();
dab4ead1
VB
318 depot_stack_handle_t handle;
319
b1d5488a
CTK
320 handle = save_stack(gfp_mask);
321
322 page_ext = page_ext_get(page);
dab4ead1
VB
323 if (unlikely(!page_ext))
324 return;
ea4b5b33 325 __update_page_owner_handle(page_ext, handle, order, gfp_mask, -1,
e7af4014 326 ts_nsec, current->pid, current->tgid,
ea4b5b33 327 current->comm);
b1d5488a 328 page_ext_put(page_ext);
f5c12105 329 inc_stack_record_count(handle, gfp_mask, 1 << order);
dab4ead1
VB
330}
331
7cd12b4a
VB
332void __set_page_owner_migrate_reason(struct page *page, int reason)
333{
b1d5488a 334 struct page_ext *page_ext = page_ext_get(page);
9300d8df
JK
335 struct page_owner *page_owner;
336
f86e4271
YS
337 if (unlikely(!page_ext))
338 return;
7cd12b4a 339
9300d8df
JK
340 page_owner = get_page_owner(page_ext);
341 page_owner->last_migrate_reason = reason;
b1d5488a 342 page_ext_put(page_ext);
7cd12b4a
VB
343}
344
46d44d09 345void __split_page_owner(struct page *page, int old_order, int new_order)
e2cfc911 346{
a9627bc5 347 int i;
b1d5488a 348 struct page_ext *page_ext = page_ext_get(page);
9300d8df 349 struct page_owner *page_owner;
a9627bc5 350
f86e4271 351 if (unlikely(!page_ext))
a9627bc5 352 return;
e2cfc911 353
46d44d09 354 for (i = 0; i < (1 << old_order); i++) {
7e2f2a0c 355 page_owner = get_page_owner(page_ext);
46d44d09 356 page_owner->order = new_order;
5556cfe8 357 page_ext = page_ext_next(page_ext);
7e2f2a0c 358 }
b1d5488a 359 page_ext_put(page_ext);
e2cfc911
JK
360}
361
19138349 362void __folio_copy_owner(struct folio *newfolio, struct folio *old)
d435edca 363{
718b1f33 364 int i;
b1d5488a
CTK
365 struct page_ext *old_ext;
366 struct page_ext *new_ext;
ea4b5b33 367 struct page_owner *old_page_owner;
718b1f33
OS
368 struct page_owner *new_page_owner;
369 depot_stack_handle_t migrate_handle;
d435edca 370
b1d5488a
CTK
371 old_ext = page_ext_get(&old->page);
372 if (unlikely(!old_ext))
f86e4271
YS
373 return;
374
b1d5488a
CTK
375 new_ext = page_ext_get(&newfolio->page);
376 if (unlikely(!new_ext)) {
377 page_ext_put(old_ext);
378 return;
379 }
380
9300d8df 381 old_page_owner = get_page_owner(old_ext);
718b1f33
OS
382 new_page_owner = get_page_owner(new_ext);
383 migrate_handle = new_page_owner->handle;
ea4b5b33
OS
384 __update_page_owner_handle(new_ext, old_page_owner->handle,
385 old_page_owner->order, old_page_owner->gfp_mask,
386 old_page_owner->last_migrate_reason,
387 old_page_owner->ts_nsec, old_page_owner->pid,
388 old_page_owner->tgid, old_page_owner->comm);
d435edca 389 /*
ea4b5b33
OS
390 * Do not proactively clear PAGE_EXT_OWNER{_ALLOCATED} bits as the folio
391 * will be freed after migration. Keep them until then as they may be
392 * useful.
d435edca 393 */
ea4b5b33
OS
394 __update_page_owner_free_handle(new_ext, 0, old_page_owner->order,
395 old_page_owner->free_pid,
396 old_page_owner->free_tgid,
397 old_page_owner->free_ts_nsec);
718b1f33
OS
398 /*
399 * We linked the original stack to the new folio, we need to do the same
400 * for the new one and the old folio otherwise there will be an imbalance
401 * when subtracting those pages from the stack.
402 */
403 for (i = 0; i < (1 << new_page_owner->order); i++) {
404 old_page_owner->handle = migrate_handle;
405 old_ext = page_ext_next(old_ext);
406 old_page_owner = get_page_owner(old_ext);
407 }
ea4b5b33 408
b1d5488a
CTK
409 page_ext_put(new_ext);
410 page_ext_put(old_ext);
d435edca
VB
411}
412
e2f612e6
JK
413void pagetypeinfo_showmixedcount_print(struct seq_file *m,
414 pg_data_t *pgdat, struct zone *zone)
415{
416 struct page *page;
417 struct page_ext *page_ext;
9300d8df 418 struct page_owner *page_owner;
1d2cae8e
ML
419 unsigned long pfn, block_end_pfn;
420 unsigned long end_pfn = zone_end_pfn(zone);
e2f612e6
JK
421 unsigned long count[MIGRATE_TYPES] = { 0, };
422 int pageblock_mt, page_mt;
423 int i;
424
425 /* Scan block by block. First and last block may be incomplete */
426 pfn = zone->zone_start_pfn;
427
428 /*
429 * Walk the zone in pageblock_nr_pages steps. If a page block spans
430 * a zone boundary, it will be double counted between zones. This does
431 * not matter as the mixed block count will still be correct
432 */
433 for (; pfn < end_pfn; ) {
a26ee565
QC
434 page = pfn_to_online_page(pfn);
435 if (!page) {
e2f612e6
JK
436 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
437 continue;
438 }
439
4f9bc69a 440 block_end_pfn = pageblock_end_pfn(pfn);
e2f612e6
JK
441 block_end_pfn = min(block_end_pfn, end_pfn);
442
e2f612e6
JK
443 pageblock_mt = get_pageblock_migratetype(page);
444
445 for (; pfn < block_end_pfn; pfn++) {
a26ee565 446 /* The pageblock is online, no need to recheck. */
e2f612e6
JK
447 page = pfn_to_page(pfn);
448
449 if (page_zone(page) != zone)
450 continue;
451
452 if (PageBuddy(page)) {
727c080f
VM
453 unsigned long freepage_order;
454
ab130f91 455 freepage_order = buddy_order_unsafe(page);
5e0a760b 456 if (freepage_order <= MAX_PAGE_ORDER)
727c080f 457 pfn += (1UL << freepage_order) - 1;
e2f612e6
JK
458 continue;
459 }
460
461 if (PageReserved(page))
462 continue;
463
b1d5488a 464 page_ext = page_ext_get(page);
e2f612e6
JK
465 if (unlikely(!page_ext))
466 continue;
467
fdf3bf80 468 if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
b1d5488a 469 goto ext_put_continue;
e2f612e6 470
9300d8df 471 page_owner = get_page_owner(page_ext);
01c0bfe0 472 page_mt = gfp_migratetype(page_owner->gfp_mask);
e2f612e6
JK
473 if (pageblock_mt != page_mt) {
474 if (is_migrate_cma(pageblock_mt))
475 count[MIGRATE_MOVABLE]++;
476 else
477 count[pageblock_mt]++;
478
479 pfn = block_end_pfn;
b1d5488a 480 page_ext_put(page_ext);
e2f612e6
JK
481 break;
482 }
9300d8df 483 pfn += (1UL << page_owner->order) - 1;
b1d5488a
CTK
484ext_put_continue:
485 page_ext_put(page_ext);
e2f612e6
JK
486 }
487 }
488
489 /* Print counts */
490 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
491 for (i = 0; i < MIGRATE_TYPES; i++)
492 seq_printf(m, "%12lu ", count[i]);
493 seq_putc(m, '\n');
494}
495
fcf89358
WL
496/*
497 * Looking for memcg information and print it out
498 */
499static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret,
500 struct page *page)
501{
502#ifdef CONFIG_MEMCG
503 unsigned long memcg_data;
504 struct mem_cgroup *memcg;
505 bool online;
506 char name[80];
507
508 rcu_read_lock();
509 memcg_data = READ_ONCE(page->memcg_data);
510 if (!memcg_data)
511 goto out_unlock;
512
21c690a3 513 if (memcg_data & MEMCG_DATA_OBJEXTS)
fcf89358
WL
514 ret += scnprintf(kbuf + ret, count - ret,
515 "Slab cache page\n");
516
517 memcg = page_memcg_check(page);
518 if (!memcg)
519 goto out_unlock;
520
521 online = (memcg->css.flags & CSS_ONLINE);
522 cgroup_name(memcg->css.cgroup, name, sizeof(name));
523 ret += scnprintf(kbuf + ret, count - ret,
524 "Charged %sto %smemcg %s\n",
525 PageMemcgKmem(page) ? "(via objcg) " : "",
526 online ? "" : "offline ",
527 name);
528out_unlock:
529 rcu_read_unlock();
530#endif /* CONFIG_MEMCG */
531
532 return ret;
533}
534
48c96a36
JK
535static ssize_t
536print_page_owner(char __user *buf, size_t count, unsigned long pfn,
9300d8df 537 struct page *page, struct page_owner *page_owner,
f2ca0b55 538 depot_stack_handle_t handle)
48c96a36 539{
af52bf6b 540 int ret, pageblock_mt, page_mt;
48c96a36
JK
541 char *kbuf;
542
c8f61cfc 543 count = min_t(size_t, count, PAGE_SIZE);
48c96a36
JK
544 kbuf = kmalloc(count, GFP_KERNEL);
545 if (!kbuf)
546 return -ENOMEM;
547
3ebc4397 548 ret = scnprintf(kbuf, count,
b459f090 549 "Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns\n",
9300d8df 550 page_owner->order, page_owner->gfp_mask,
9cc7e96a 551 &page_owner->gfp_mask, page_owner->pid,
bf215eab 552 page_owner->tgid, page_owner->comm,
b459f090 553 page_owner->ts_nsec);
48c96a36 554
48c96a36 555 /* Print information relevant to grouping pages by mobility */
0b423ca2 556 pageblock_mt = get_pageblock_migratetype(page);
01c0bfe0 557 page_mt = gfp_migratetype(page_owner->gfp_mask);
3ebc4397 558 ret += scnprintf(kbuf + ret, count - ret,
399fd496 559 "PFN 0x%lx type %s Block %lu type %s Flags %pGp\n",
48c96a36 560 pfn,
60f30350 561 migratetype_names[page_mt],
48c96a36 562 pfn >> pageblock_order,
60f30350 563 migratetype_names[pageblock_mt],
23efd080 564 &page->flags);
48c96a36 565
0f68d45e 566 ret += stack_depot_snprint(handle, kbuf + ret, count - ret, 0);
48c96a36
JK
567 if (ret >= count)
568 goto err;
569
9300d8df 570 if (page_owner->last_migrate_reason != -1) {
3ebc4397 571 ret += scnprintf(kbuf + ret, count - ret,
7cd12b4a 572 "Page has been migrated, last migrate reason: %s\n",
9300d8df 573 migrate_reason_names[page_owner->last_migrate_reason]);
7cd12b4a
VB
574 }
575
fcf89358
WL
576 ret = print_page_owner_memcg(kbuf, count, ret, page);
577
48c96a36
JK
578 ret += snprintf(kbuf + ret, count - ret, "\n");
579 if (ret >= count)
580 goto err;
581
582 if (copy_to_user(buf, kbuf, ret))
583 ret = -EFAULT;
584
585 kfree(kbuf);
586 return ret;
587
588err:
589 kfree(kbuf);
590 return -ENOMEM;
591}
592
8bf6f451 593void __dump_page_owner(const struct page *page)
4e462112 594{
b1d5488a 595 struct page_ext *page_ext = page_ext_get((void *)page);
9300d8df 596 struct page_owner *page_owner;
f2ca0b55 597 depot_stack_handle_t handle;
8285027f
SM
598 gfp_t gfp_mask;
599 int mt;
4e462112 600
f86e4271
YS
601 if (unlikely(!page_ext)) {
602 pr_alert("There is not page extension available.\n");
603 return;
604 }
9300d8df
JK
605
606 page_owner = get_page_owner(page_ext);
607 gfp_mask = page_owner->gfp_mask;
01c0bfe0 608 mt = gfp_migratetype(gfp_mask);
f86e4271 609
4e462112 610 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
37389167 611 pr_alert("page_owner info is not present (never set?)\n");
b1d5488a 612 page_ext_put(page_ext);
4e462112
VB
613 return;
614 }
615
fdf3bf80 616 if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
37389167
VB
617 pr_alert("page_owner tracks the page as allocated\n");
618 else
619 pr_alert("page_owner tracks the page as freed\n");
620
bf215eab 621 pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu, free_ts %llu\n",
9cc7e96a 622 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
bf215eab
YC
623 page_owner->pid, page_owner->tgid, page_owner->comm,
624 page_owner->ts_nsec, page_owner->free_ts_nsec);
37389167 625
9300d8df 626 handle = READ_ONCE(page_owner->handle);
505be481 627 if (!handle)
37389167 628 pr_alert("page_owner allocation stack trace missing\n");
505be481
IK
629 else
630 stack_depot_print(handle);
f2ca0b55 631
8974558f
VB
632 handle = READ_ONCE(page_owner->free_handle);
633 if (!handle) {
634 pr_alert("page_owner free stack trace missing\n");
635 } else {
1b5c65b6
BS
636 pr_alert("page last free pid %d tgid %d stack trace:\n",
637 page_owner->free_pid, page_owner->free_tgid);
505be481 638 stack_depot_print(handle);
8974558f 639 }
8974558f 640
9300d8df 641 if (page_owner->last_migrate_reason != -1)
4e462112 642 pr_alert("page has been migrated, last migrate reason: %s\n",
9300d8df 643 migrate_reason_names[page_owner->last_migrate_reason]);
b1d5488a 644 page_ext_put(page_ext);
4e462112
VB
645}
646
48c96a36
JK
647static ssize_t
648read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
649{
650 unsigned long pfn;
651 struct page *page;
652 struct page_ext *page_ext;
9300d8df 653 struct page_owner *page_owner;
f2ca0b55 654 depot_stack_handle_t handle;
48c96a36 655
7dd80b8a 656 if (!static_branch_unlikely(&page_owner_inited))
48c96a36
JK
657 return -EINVAL;
658
659 page = NULL;
8f0efa81
KL
660 if (*ppos == 0)
661 pfn = min_low_pfn;
662 else
663 pfn = *ppos;
48c96a36
JK
664 /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
665 while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
666 pfn++;
667
48c96a36
JK
668 /* Find an allocated page */
669 for (; pfn < max_pfn; pfn++) {
b1d5488a
CTK
670 /*
671 * This temporary page_owner is required so
672 * that we can avoid the context switches while holding
673 * the rcu lock and copying the page owner information to
674 * user through copy_to_user() or GFP_KERNEL allocations.
675 */
676 struct page_owner page_owner_tmp;
677
48c96a36
JK
678 /*
679 * If the new page is in a new MAX_ORDER_NR_PAGES area,
680 * validate the area as existing, skip it if not
681 */
682 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
683 pfn += MAX_ORDER_NR_PAGES - 1;
684 continue;
685 }
686
48c96a36
JK
687 page = pfn_to_page(pfn);
688 if (PageBuddy(page)) {
ab130f91 689 unsigned long freepage_order = buddy_order_unsafe(page);
48c96a36 690
5e0a760b 691 if (freepage_order <= MAX_PAGE_ORDER)
48c96a36
JK
692 pfn += (1UL << freepage_order) - 1;
693 continue;
694 }
695
b1d5488a 696 page_ext = page_ext_get(page);
f86e4271
YS
697 if (unlikely(!page_ext))
698 continue;
48c96a36
JK
699
700 /*
61cf5feb
JK
701 * Some pages could be missed by concurrent allocation or free,
702 * because we don't hold the zone lock.
48c96a36
JK
703 */
704 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
b1d5488a 705 goto ext_put_continue;
48c96a36 706
37389167
VB
707 /*
708 * Although we do have the info about past allocation of free
709 * pages, it's not relevant for current memory usage.
710 */
fdf3bf80 711 if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
b1d5488a 712 goto ext_put_continue;
37389167 713
9300d8df
JK
714 page_owner = get_page_owner(page_ext);
715
7e2f2a0c
VB
716 /*
717 * Don't print "tail" pages of high-order allocations as that
718 * would inflate the stats.
719 */
720 if (!IS_ALIGNED(pfn, 1 << page_owner->order))
b1d5488a 721 goto ext_put_continue;
7e2f2a0c 722
f2ca0b55
JK
723 /*
724 * Access to page_ext->handle isn't synchronous so we should
725 * be careful to access it.
726 */
9300d8df 727 handle = READ_ONCE(page_owner->handle);
f2ca0b55 728 if (!handle)
b1d5488a 729 goto ext_put_continue;
f2ca0b55 730
48c96a36 731 /* Record the next PFN to read in the file offset */
8f0efa81 732 *ppos = pfn + 1;
48c96a36 733
b1d5488a
CTK
734 page_owner_tmp = *page_owner;
735 page_ext_put(page_ext);
f2ca0b55 736 return print_page_owner(buf, count, pfn, page,
b1d5488a
CTK
737 &page_owner_tmp, handle);
738ext_put_continue:
739 page_ext_put(page_ext);
48c96a36
JK
740 }
741
742 return 0;
743}
744
8f0efa81
KL
745static loff_t lseek_page_owner(struct file *file, loff_t offset, int orig)
746{
747 switch (orig) {
748 case SEEK_SET:
749 file->f_pos = offset;
750 break;
751 case SEEK_CUR:
752 file->f_pos += offset;
753 break;
754 default:
755 return -EINVAL;
756 }
757 return file->f_pos;
758}
759
61cf5feb
JK
760static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
761{
6787c1da
OS
762 unsigned long pfn = zone->zone_start_pfn;
763 unsigned long end_pfn = zone_end_pfn(zone);
61cf5feb
JK
764 unsigned long count = 0;
765
61cf5feb
JK
766 /*
767 * Walk the zone in pageblock_nr_pages steps. If a page block spans
768 * a zone boundary, it will be double counted between zones. This does
769 * not matter as the mixed block count will still be correct
770 */
771 for (; pfn < end_pfn; ) {
6787c1da
OS
772 unsigned long block_end_pfn;
773
61cf5feb
JK
774 if (!pfn_valid(pfn)) {
775 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
776 continue;
777 }
778
4f9bc69a 779 block_end_pfn = pageblock_end_pfn(pfn);
61cf5feb
JK
780 block_end_pfn = min(block_end_pfn, end_pfn);
781
61cf5feb 782 for (; pfn < block_end_pfn; pfn++) {
859a85dd 783 struct page *page = pfn_to_page(pfn);
6787c1da
OS
784 struct page_ext *page_ext;
785
9d43f5ae
JK
786 if (page_zone(page) != zone)
787 continue;
788
61cf5feb 789 /*
10903027
VB
790 * To avoid having to grab zone->lock, be a little
791 * careful when reading buddy page order. The only
792 * danger is that we skip too much and potentially miss
793 * some early allocated pages, which is better than
794 * heavy lock contention.
61cf5feb
JK
795 */
796 if (PageBuddy(page)) {
ab130f91 797 unsigned long order = buddy_order_unsafe(page);
10903027 798
5e0a760b 799 if (order > 0 && order <= MAX_PAGE_ORDER)
10903027 800 pfn += (1UL << order) - 1;
61cf5feb
JK
801 continue;
802 }
803
804 if (PageReserved(page))
805 continue;
806
b1d5488a 807 page_ext = page_ext_get(page);
f86e4271
YS
808 if (unlikely(!page_ext))
809 continue;
61cf5feb 810
dab4ead1 811 /* Maybe overlapping zone */
61cf5feb 812 if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
b1d5488a 813 goto ext_put_continue;
61cf5feb
JK
814
815 /* Found early allocated page */
ea4b5b33
OS
816 __update_page_owner_handle(page_ext, early_handle, 0, 0,
817 -1, local_clock(), current->pid,
818 current->tgid, current->comm);
61cf5feb 819 count++;
b1d5488a
CTK
820ext_put_continue:
821 page_ext_put(page_ext);
61cf5feb 822 }
10903027 823 cond_resched();
61cf5feb
JK
824 }
825
826 pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
827 pgdat->node_id, zone->name, count);
828}
829
830static void init_zones_in_node(pg_data_t *pgdat)
831{
832 struct zone *zone;
833 struct zone *node_zones = pgdat->node_zones;
61cf5feb
JK
834
835 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
836 if (!populated_zone(zone))
837 continue;
838
61cf5feb 839 init_pages_in_zone(pgdat, zone);
61cf5feb
JK
840 }
841}
842
843static void init_early_allocated_pages(void)
844{
845 pg_data_t *pgdat;
846
61cf5feb
JK
847 for_each_online_pgdat(pgdat)
848 init_zones_in_node(pgdat);
849}
850
48c96a36
JK
851static const struct file_operations proc_page_owner_operations = {
852 .read = read_page_owner,
8f0efa81 853 .llseek = lseek_page_owner,
48c96a36
JK
854};
855
765973a0
OS
856static void *stack_start(struct seq_file *m, loff_t *ppos)
857{
858 struct stack *stack;
859
860 if (*ppos == -1UL)
861 return NULL;
862
863 if (!*ppos) {
864 /*
865 * This pairs with smp_store_release() from function
866 * add_stack_record_to_list(), so we get a consistent
867 * value of stack_list.
868 */
869 stack = smp_load_acquire(&stack_list);
74017458 870 m->private = stack;
765973a0
OS
871 } else {
872 stack = m->private;
765973a0
OS
873 }
874
765973a0
OS
875 return stack;
876}
877
878static void *stack_next(struct seq_file *m, void *v, loff_t *ppos)
879{
880 struct stack *stack = v;
881
882 stack = stack->next;
883 *ppos = stack ? *ppos + 1 : -1UL;
884 m->private = stack;
885
886 return stack;
887}
888
f5c12105 889static unsigned long page_owner_pages_threshold;
05bb6f4e 890
765973a0
OS
891static int stack_print(struct seq_file *m, void *v)
892{
f5c12105 893 int i, nr_base_pages;
765973a0
OS
894 struct stack *stack = v;
895 unsigned long *entries;
896 unsigned long nr_entries;
897 struct stack_record *stack_record = stack->stack_record;
898
84d6ac31
OS
899 if (!stack->stack_record)
900 return 0;
901
765973a0
OS
902 nr_entries = stack_record->size;
903 entries = stack_record->entries;
f5c12105 904 nr_base_pages = refcount_read(&stack_record->count) - 1;
765973a0 905
f5c12105 906 if (nr_base_pages < 1 || nr_base_pages < page_owner_pages_threshold)
765973a0
OS
907 return 0;
908
909 for (i = 0; i < nr_entries; i++)
910 seq_printf(m, " %pS\n", (void *)entries[i]);
f5c12105 911 seq_printf(m, "nr_base_pages: %d\n\n", nr_base_pages);
765973a0
OS
912
913 return 0;
914}
915
916static void stack_stop(struct seq_file *m, void *v)
917{
918}
919
920static const struct seq_operations page_owner_stack_op = {
921 .start = stack_start,
922 .next = stack_next,
923 .stop = stack_stop,
924 .show = stack_print
925};
926
927static int page_owner_stack_open(struct inode *inode, struct file *file)
928{
929 return seq_open_private(file, &page_owner_stack_op, 0);
930}
931
932static const struct file_operations page_owner_stack_operations = {
933 .open = page_owner_stack_open,
934 .read = seq_read,
935 .llseek = seq_lseek,
936 .release = seq_release,
937};
938
05bb6f4e
OS
939static int page_owner_threshold_get(void *data, u64 *val)
940{
f5c12105 941 *val = READ_ONCE(page_owner_pages_threshold);
05bb6f4e
OS
942 return 0;
943}
944
945static int page_owner_threshold_set(void *data, u64 val)
946{
f5c12105 947 WRITE_ONCE(page_owner_pages_threshold, val);
05bb6f4e
OS
948 return 0;
949}
950
951DEFINE_SIMPLE_ATTRIBUTE(proc_page_owner_threshold, &page_owner_threshold_get,
952 &page_owner_threshold_set, "%llu");
953
954
48c96a36
JK
955static int __init pageowner_init(void)
956{
765973a0
OS
957 struct dentry *dir;
958
7dd80b8a 959 if (!static_branch_unlikely(&page_owner_inited)) {
48c96a36
JK
960 pr_info("page_owner is disabled\n");
961 return 0;
962 }
963
d9f7979c
GKH
964 debugfs_create_file("page_owner", 0400, NULL, NULL,
965 &proc_page_owner_operations);
765973a0
OS
966 dir = debugfs_create_dir("page_owner_stacks", NULL);
967 debugfs_create_file("show_stacks", 0400, dir, NULL,
968 &page_owner_stack_operations);
05bb6f4e
OS
969 debugfs_create_file("count_threshold", 0600, dir, NULL,
970 &proc_page_owner_threshold);
48c96a36 971
d9f7979c 972 return 0;
48c96a36 973}
44c5af96 974late_initcall(pageowner_init)