leds: tca6507: Don't use fixed GPIO base
[linux-2.6-block.git] / mm / memory-failure.c
CommitLineData
1439f94c 1// SPDX-License-Identifier: GPL-2.0-only
6a46079c
AK
2/*
3 * Copyright (C) 2008, 2009 Intel Corporation
4 * Authors: Andi Kleen, Fengguang Wu
5 *
6a46079c 6 * High level machine check handler. Handles pages reported by the
1c80b990 7 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
6a46079c 8 * failure.
c33c7948 9 *
1c80b990
AK
10 * In addition there is a "soft offline" entry point that allows stop using
11 * not-yet-corrupted-by-suspicious pages without killing anything.
6a46079c
AK
12 *
13 * Handles page cache pages in various states. The tricky part
c33c7948
RR
14 * here is that we can access any page asynchronously in respect to
15 * other VM users, because memory failures could happen anytime and
16 * anywhere. This could violate some of their assumptions. This is why
17 * this code has to be extremely careful. Generally it tries to use
18 * normal locking rules, as in get the standard locks, even if that means
1c80b990 19 * the error handling takes potentially a long time.
e0de78df
AK
20 *
21 * It can be very tempting to add handling for obscure cases here.
22 * In general any code for handling new cases should only be added iff:
23 * - You know how to test it.
24 * - You have a test that can be added to mce-test
25 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
26 * - The case actually shows up as a frequent (top 10) page state in
799fb82a 27 * tools/mm/page-types when running a real workload.
c33c7948 28 *
1c80b990 29 * There are several operations here with exponential complexity because
c33c7948
RR
30 * of unsuitable VM data structures. For example the operation to map back
31 * from RMAP chains to processes has to walk the complete process list and
1c80b990 32 * has non linear complexity with the number. But since memory corruptions
c33c7948 33 * are rare we hope to get away with this. This avoids impacting the core
1c80b990 34 * VM.
6a46079c 35 */
96f96763
KW
36
37#define pr_fmt(fmt) "Memory failure: " fmt
38
6a46079c
AK
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/page-flags.h>
3f07c014 42#include <linux/sched/signal.h>
29930025 43#include <linux/sched/task.h>
96c84dde 44#include <linux/dax.h>
01e00f88 45#include <linux/ksm.h>
6a46079c 46#include <linux/rmap.h>
b9e15baf 47#include <linux/export.h>
6a46079c
AK
48#include <linux/pagemap.h>
49#include <linux/swap.h>
50#include <linux/backing-dev.h>
facb6011 51#include <linux/migrate.h>
5a0e3ad6 52#include <linux/slab.h>
bf998156 53#include <linux/swapops.h>
7af446a8 54#include <linux/hugetlb.h>
20d6c96b 55#include <linux/memory_hotplug.h>
5db8a73a 56#include <linux/mm_inline.h>
6100e34b 57#include <linux/memremap.h>
ea8f5fb8 58#include <linux/kfifo.h>
a5f65109 59#include <linux/ratelimit.h>
a3f5d80e 60#include <linux/pagewalk.h>
a7605426 61#include <linux/shmem_fs.h>
8cbc82f3 62#include <linux/sysctl.h>
014bb1de 63#include "swap.h"
6a46079c 64#include "internal.h"
97f0b134 65#include "ras/ras_event.h"
6a46079c 66
8cbc82f3 67static int sysctl_memory_failure_early_kill __read_mostly;
6a46079c 68
8cbc82f3 69static int sysctl_memory_failure_recovery __read_mostly = 1;
6a46079c 70
293c07e3 71atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
6a46079c 72
67f22ba7 73static bool hw_memory_failure __read_mostly = false;
74
b79f8eb4
JY
75static DEFINE_MUTEX(mf_mutex);
76
1a7d018d 77void num_poisoned_pages_inc(unsigned long pfn)
d027122d
NH
78{
79 atomic_long_inc(&num_poisoned_pages);
5033091d 80 memblk_nr_poison_inc(pfn);
d027122d
NH
81}
82
1a7d018d 83void num_poisoned_pages_sub(unsigned long pfn, long i)
d027122d
NH
84{
85 atomic_long_sub(i, &num_poisoned_pages);
5033091d
NH
86 if (pfn != -1UL)
87 memblk_nr_poison_sub(pfn, i);
d027122d
NH
88}
89
44b8f8bf
JY
90/**
91 * MF_ATTR_RO - Create sysfs entry for each memory failure statistics.
92 * @_name: name of the file in the per NUMA sysfs directory.
93 */
94#define MF_ATTR_RO(_name) \
95static ssize_t _name##_show(struct device *dev, \
96 struct device_attribute *attr, \
97 char *buf) \
98{ \
99 struct memory_failure_stats *mf_stats = \
100 &NODE_DATA(dev->id)->mf_stats; \
101 return sprintf(buf, "%lu\n", mf_stats->_name); \
102} \
103static DEVICE_ATTR_RO(_name)
104
105MF_ATTR_RO(total);
106MF_ATTR_RO(ignored);
107MF_ATTR_RO(failed);
108MF_ATTR_RO(delayed);
109MF_ATTR_RO(recovered);
110
111static struct attribute *memory_failure_attr[] = {
112 &dev_attr_total.attr,
113 &dev_attr_ignored.attr,
114 &dev_attr_failed.attr,
115 &dev_attr_delayed.attr,
116 &dev_attr_recovered.attr,
117 NULL,
118};
119
120const struct attribute_group memory_failure_attr_group = {
121 .name = "memory_failure",
122 .attrs = memory_failure_attr,
123};
124
8cbc82f3
KW
125static struct ctl_table memory_failure_table[] = {
126 {
127 .procname = "memory_failure_early_kill",
128 .data = &sysctl_memory_failure_early_kill,
129 .maxlen = sizeof(sysctl_memory_failure_early_kill),
130 .mode = 0644,
131 .proc_handler = proc_dointvec_minmax,
132 .extra1 = SYSCTL_ZERO,
133 .extra2 = SYSCTL_ONE,
134 },
135 {
136 .procname = "memory_failure_recovery",
137 .data = &sysctl_memory_failure_recovery,
138 .maxlen = sizeof(sysctl_memory_failure_recovery),
139 .mode = 0644,
140 .proc_handler = proc_dointvec_minmax,
141 .extra1 = SYSCTL_ZERO,
142 .extra2 = SYSCTL_ONE,
143 },
144 { }
145};
146
7453bf62
NH
147/*
148 * Return values:
149 * 1: the page is dissolved (if needed) and taken off from buddy,
150 * 0: the page is dissolved (if needed) and not taken off from buddy,
151 * < 0: failed to dissolve.
152 */
153static int __page_handle_poison(struct page *page)
510d25c9 154{
f87060d3 155 int ret;
510d25c9
NH
156
157 zone_pcp_disable(page_zone(page));
158 ret = dissolve_free_huge_page(page);
159 if (!ret)
160 ret = take_page_off_buddy(page);
161 zone_pcp_enable(page_zone(page));
162
7453bf62 163 return ret;
510d25c9
NH
164}
165
6b9a217e 166static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
06be6ff3 167{
6b9a217e
OS
168 if (hugepage_or_freepage) {
169 /*
170 * Doing this check for free pages is also fine since dissolve_free_huge_page
171 * returns 0 for non-hugetlb pages as well.
172 */
7453bf62 173 if (__page_handle_poison(page) <= 0)
6b9a217e
OS
174 /*
175 * We could fail to take off the target page from buddy
f0953a1b 176 * for example due to racy page allocation, but that's
6b9a217e
OS
177 * acceptable because soft-offlined page is not broken
178 * and if someone really want to use it, they should
179 * take it.
180 */
181 return false;
182 }
183
06be6ff3 184 SetPageHWPoison(page);
79f5f8fa
OS
185 if (release)
186 put_page(page);
06be6ff3 187 page_ref_inc(page);
a46c9304 188 num_poisoned_pages_inc(page_to_pfn(page));
6b9a217e
OS
189
190 return true;
06be6ff3
OS
191}
192
611b9fd8 193#if IS_ENABLED(CONFIG_HWPOISON_INJECT)
27df5068 194
1bfe5feb 195u32 hwpoison_filter_enable = 0;
7c116f2b
WF
196u32 hwpoison_filter_dev_major = ~0U;
197u32 hwpoison_filter_dev_minor = ~0U;
478c5ffc
WF
198u64 hwpoison_filter_flags_mask;
199u64 hwpoison_filter_flags_value;
1bfe5feb 200EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
7c116f2b
WF
201EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
202EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
478c5ffc
WF
203EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
204EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
7c116f2b
WF
205
206static int hwpoison_filter_dev(struct page *p)
207{
208 struct address_space *mapping;
209 dev_t dev;
210
211 if (hwpoison_filter_dev_major == ~0U &&
212 hwpoison_filter_dev_minor == ~0U)
213 return 0;
214
7c116f2b
WF
215 mapping = page_mapping(p);
216 if (mapping == NULL || mapping->host == NULL)
217 return -EINVAL;
218
219 dev = mapping->host->i_sb->s_dev;
220 if (hwpoison_filter_dev_major != ~0U &&
221 hwpoison_filter_dev_major != MAJOR(dev))
222 return -EINVAL;
223 if (hwpoison_filter_dev_minor != ~0U &&
224 hwpoison_filter_dev_minor != MINOR(dev))
225 return -EINVAL;
226
227 return 0;
228}
229
478c5ffc
WF
230static int hwpoison_filter_flags(struct page *p)
231{
232 if (!hwpoison_filter_flags_mask)
233 return 0;
234
235 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
236 hwpoison_filter_flags_value)
237 return 0;
238 else
239 return -EINVAL;
240}
241
4fd466eb
AK
242/*
243 * This allows stress tests to limit test scope to a collection of tasks
244 * by putting them under some memcg. This prevents killing unrelated/important
245 * processes such as /sbin/init. Note that the target task may share clean
246 * pages with init (eg. libc text), which is harmless. If the target task
247 * share _dirty_ pages with another task B, the test scheme must make sure B
248 * is also included in the memcg. At last, due to race conditions this filter
249 * can only guarantee that the page either belongs to the memcg tasks, or is
250 * a freed page.
251 */
94a59fb3 252#ifdef CONFIG_MEMCG
4fd466eb
AK
253u64 hwpoison_filter_memcg;
254EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
255static int hwpoison_filter_task(struct page *p)
256{
4fd466eb
AK
257 if (!hwpoison_filter_memcg)
258 return 0;
259
94a59fb3 260 if (page_cgroup_ino(p) != hwpoison_filter_memcg)
4fd466eb
AK
261 return -EINVAL;
262
263 return 0;
264}
265#else
266static int hwpoison_filter_task(struct page *p) { return 0; }
267#endif
268
7c116f2b
WF
269int hwpoison_filter(struct page *p)
270{
1bfe5feb
HL
271 if (!hwpoison_filter_enable)
272 return 0;
273
7c116f2b
WF
274 if (hwpoison_filter_dev(p))
275 return -EINVAL;
276
478c5ffc
WF
277 if (hwpoison_filter_flags(p))
278 return -EINVAL;
279
4fd466eb
AK
280 if (hwpoison_filter_task(p))
281 return -EINVAL;
282
7c116f2b
WF
283 return 0;
284}
27df5068
AK
285#else
286int hwpoison_filter(struct page *p)
287{
288 return 0;
289}
290#endif
291
7c116f2b
WF
292EXPORT_SYMBOL_GPL(hwpoison_filter);
293
ae1139ec
DW
294/*
295 * Kill all processes that have a poisoned page mapped and then isolate
296 * the page.
297 *
298 * General strategy:
299 * Find all processes having the page mapped and kill them.
300 * But we keep a page reference around so that the page is not
301 * actually freed yet.
302 * Then stash the page away
303 *
304 * There's no convenient way to get back to mapped processes
305 * from the VMAs. So do a brute-force search over all
306 * running processes.
307 *
308 * Remember that machine checks are not common (or rather
309 * if they are common you have other problems), so this shouldn't
310 * be a performance issue.
311 *
312 * Also there are some races possible while we get from the
313 * error detection to actually handle it.
314 */
315
316struct to_kill {
317 struct list_head nd;
318 struct task_struct *tsk;
319 unsigned long addr;
320 short size_shift;
ae1139ec
DW
321};
322
6a46079c 323/*
7329bbeb
TL
324 * Send all the processes who have the page mapped a signal.
325 * ``action optional'' if they are not immediately affected by the error
326 * ``action required'' if error happened in current execution context
6a46079c 327 */
ae1139ec 328static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
6a46079c 329{
ae1139ec
DW
330 struct task_struct *t = tk->tsk;
331 short addr_lsb = tk->size_shift;
872e9a20 332 int ret = 0;
6a46079c 333
96f96763 334 pr_err("%#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
872e9a20 335 pfn, t->comm, t->pid);
7329bbeb 336
49775047
ML
337 if ((flags & MF_ACTION_REQUIRED) && (t == current))
338 ret = force_sig_mceerr(BUS_MCEERR_AR,
339 (void __user *)tk->addr, addr_lsb);
340 else
7329bbeb 341 /*
49775047
ML
342 * Signal other processes sharing the page if they have
343 * PF_MCE_EARLY set.
7329bbeb
TL
344 * Don't use force here, it's convenient if the signal
345 * can be temporarily blocked.
346 * This could cause a loop when the user sets SIGBUS
347 * to SIG_IGN, but hopefully no one will do that?
348 */
ae1139ec 349 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
9cf28191 350 addr_lsb, t);
6a46079c 351 if (ret < 0)
96f96763 352 pr_info("Error sending signal to %s:%d: %d\n",
1170532b 353 t->comm, t->pid, ret);
6a46079c
AK
354 return ret;
355}
356
588f9ce6 357/*
47e431f4 358 * Unknown page type encountered. Try to check whether it can turn PageLRU by
d0505e9f 359 * lru_add_drain_all.
588f9ce6 360 */
d0505e9f 361void shake_page(struct page *p)
588f9ce6 362{
8bcb74de
NH
363 if (PageHuge(p))
364 return;
588f9ce6 365 /*
d0505e9f
YS
366 * TODO: Could shrink slab caches here if a lightweight range-based
367 * shrinker will be available.
588f9ce6 368 */
b7b618da
ML
369 if (PageSlab(p))
370 return;
371
372 lru_add_drain_all();
588f9ce6
AK
373}
374EXPORT_SYMBOL_GPL(shake_page);
375
c36e2024
SR
376static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
377 unsigned long address)
6100e34b 378{
5c91c0e7 379 unsigned long ret = 0;
6100e34b
DW
380 pgd_t *pgd;
381 p4d_t *p4d;
382 pud_t *pud;
383 pmd_t *pmd;
384 pte_t *pte;
c33c7948 385 pte_t ptent;
6100e34b 386
a994402b 387 VM_BUG_ON_VMA(address == -EFAULT, vma);
6100e34b
DW
388 pgd = pgd_offset(vma->vm_mm, address);
389 if (!pgd_present(*pgd))
390 return 0;
391 p4d = p4d_offset(pgd, address);
392 if (!p4d_present(*p4d))
393 return 0;
394 pud = pud_offset(p4d, address);
395 if (!pud_present(*pud))
396 return 0;
397 if (pud_devmap(*pud))
398 return PUD_SHIFT;
399 pmd = pmd_offset(pud, address);
400 if (!pmd_present(*pmd))
401 return 0;
402 if (pmd_devmap(*pmd))
403 return PMD_SHIFT;
404 pte = pte_offset_map(pmd, address);
04dee9e8
HD
405 if (!pte)
406 return 0;
c33c7948
RR
407 ptent = ptep_get(pte);
408 if (pte_present(ptent) && pte_devmap(ptent))
5c91c0e7
QZ
409 ret = PAGE_SHIFT;
410 pte_unmap(pte);
411 return ret;
6100e34b 412}
6a46079c
AK
413
414/*
415 * Failure handling: if we can't find or can't kill a process there's
416 * not much we can do. We just print a message and ignore otherwise.
417 */
418
ac87ca0e
DW
419#define FSDAX_INVALID_PGOFF ULONG_MAX
420
6a46079c
AK
421/*
422 * Schedule a process for later kill.
423 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
c36e2024 424 *
ac87ca0e
DW
425 * Note: @fsdax_pgoff is used only when @p is a fsdax page and a
426 * filesystem with a memory failure handler has claimed the
427 * memory_failure event. In all other cases, page->index and
428 * page->mapping are sufficient for mapping the page back to its
429 * corresponding user virtual address.
6a46079c 430 */
4f775086
LX
431static void __add_to_kill(struct task_struct *tsk, struct page *p,
432 struct vm_area_struct *vma, struct list_head *to_kill,
433 unsigned long ksm_addr, pgoff_t fsdax_pgoff)
6a46079c
AK
434{
435 struct to_kill *tk;
436
996ff7a0
JC
437 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
438 if (!tk) {
96f96763 439 pr_err("Out of memory while machine check handling\n");
996ff7a0 440 return;
6a46079c 441 }
996ff7a0 442
4f775086 443 tk->addr = ksm_addr ? ksm_addr : page_address_in_vma(p, vma);
c36e2024 444 if (is_zone_device_page(p)) {
ac87ca0e 445 if (fsdax_pgoff != FSDAX_INVALID_PGOFF)
c36e2024
SR
446 tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
447 tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
448 } else
75068518 449 tk->size_shift = page_shift(compound_head(p));
6a46079c
AK
450
451 /*
3d7fed4a
JC
452 * Send SIGKILL if "tk->addr == -EFAULT". Also, as
453 * "tk->size_shift" is always non-zero for !is_zone_device_page(),
454 * so "tk->size_shift == 0" effectively checks no mapping on
455 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
456 * to a process' address space, it's possible not all N VMAs
457 * contain mappings for the page, but at least one VMA does.
458 * Only deliver SIGBUS with payload derived from the VMA that
459 * has a mapping for the page.
6a46079c 460 */
3d7fed4a 461 if (tk->addr == -EFAULT) {
96f96763 462 pr_info("Unable to find user space address %lx in %s\n",
6a46079c 463 page_to_pfn(p), tsk->comm);
3d7fed4a
JC
464 } else if (tk->size_shift == 0) {
465 kfree(tk);
466 return;
6a46079c 467 }
996ff7a0 468
6a46079c
AK
469 get_task_struct(tsk);
470 tk->tsk = tsk;
471 list_add_tail(&tk->nd, to_kill);
472}
473
4f775086
LX
474static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p,
475 struct vm_area_struct *vma,
476 struct list_head *to_kill)
477{
478 __add_to_kill(tsk, p, vma, to_kill, 0, FSDAX_INVALID_PGOFF);
479}
480
4248d008
LX
481#ifdef CONFIG_KSM
482static bool task_in_to_kill_list(struct list_head *to_kill,
483 struct task_struct *tsk)
484{
485 struct to_kill *tk, *next;
486
487 list_for_each_entry_safe(tk, next, to_kill, nd) {
488 if (tk->tsk == tsk)
489 return true;
490 }
491
492 return false;
493}
494void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
495 struct vm_area_struct *vma, struct list_head *to_kill,
496 unsigned long ksm_addr)
497{
498 if (!task_in_to_kill_list(to_kill, tsk))
499 __add_to_kill(tsk, p, vma, to_kill, ksm_addr, FSDAX_INVALID_PGOFF);
500}
501#endif
6a46079c
AK
502/*
503 * Kill the processes that have been collected earlier.
504 *
a21c184f
ML
505 * Only do anything when FORCEKILL is set, otherwise just free the
506 * list (this is used for clean pages which do not need killing)
6a46079c
AK
507 * Also when FAIL is set do a force kill because something went
508 * wrong earlier.
509 */
ae1139ec
DW
510static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
511 unsigned long pfn, int flags)
6a46079c
AK
512{
513 struct to_kill *tk, *next;
514
54f9555d 515 list_for_each_entry_safe(tk, next, to_kill, nd) {
6751ed65 516 if (forcekill) {
6a46079c 517 /*
af901ca1 518 * In case something went wrong with munmapping
6a46079c
AK
519 * make sure the process doesn't catch the
520 * signal and then access the memory. Just kill it.
6a46079c 521 */
3d7fed4a 522 if (fail || tk->addr == -EFAULT) {
96f96763 523 pr_err("%#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
1170532b 524 pfn, tk->tsk->comm, tk->tsk->pid);
6376360e
NH
525 do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
526 tk->tsk, PIDTYPE_PID);
6a46079c
AK
527 }
528
529 /*
530 * In theory the process could have mapped
531 * something else on the address in-between. We could
532 * check for that, but we need to tell the
533 * process anyways.
534 */
ae1139ec 535 else if (kill_proc(tk, pfn, flags) < 0)
96f96763 536 pr_err("%#lx: Cannot send advisory machine check signal to %s:%d\n",
1170532b 537 pfn, tk->tsk->comm, tk->tsk->pid);
6a46079c 538 }
54f9555d 539 list_del(&tk->nd);
6a46079c
AK
540 put_task_struct(tk->tsk);
541 kfree(tk);
542 }
543}
544
3ba08129
NH
545/*
546 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
547 * on behalf of the thread group. Return task_struct of the (first found)
548 * dedicated thread if found, and return NULL otherwise.
549 *
d256d1cd
TT
550 * We already hold rcu lock in the caller, so we don't have to call
551 * rcu_read_lock/unlock() in this function.
3ba08129
NH
552 */
553static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
6a46079c 554{
3ba08129
NH
555 struct task_struct *t;
556
4e018b45
NH
557 for_each_thread(tsk, t) {
558 if (t->flags & PF_MCE_PROCESS) {
559 if (t->flags & PF_MCE_EARLY)
560 return t;
561 } else {
562 if (sysctl_memory_failure_early_kill)
563 return t;
564 }
565 }
3ba08129
NH
566 return NULL;
567}
568
569/*
570 * Determine whether a given process is "early kill" process which expects
571 * to be signaled when some page under the process is hwpoisoned.
572 * Return task_struct of the dedicated thread (main thread unless explicitly
30c9cf49 573 * specified) if the process is "early kill" and otherwise returns NULL.
03151c6e 574 *
30c9cf49
AY
575 * Note that the above is true for Action Optional case. For Action Required
576 * case, it's only meaningful to the current thread which need to be signaled
577 * with SIGBUS, this error is Action Optional for other non current
578 * processes sharing the same error page,if the process is "early kill", the
579 * task_struct of the dedicated thread will also be returned.
3ba08129 580 */
4248d008 581struct task_struct *task_early_kill(struct task_struct *tsk, int force_early)
3ba08129 582{
6a46079c 583 if (!tsk->mm)
3ba08129 584 return NULL;
30c9cf49
AY
585 /*
586 * Comparing ->mm here because current task might represent
587 * a subthread, while tsk always points to the main thread.
588 */
589 if (force_early && tsk->mm == current->mm)
590 return current;
591
4e018b45 592 return find_early_kill_thread(tsk);
6a46079c
AK
593}
594
595/*
596 * Collect processes when the error hit an anonymous page.
597 */
598static void collect_procs_anon(struct page *page, struct list_head *to_kill,
996ff7a0 599 int force_early)
6a46079c 600{
9595d769 601 struct folio *folio = page_folio(page);
6a46079c
AK
602 struct vm_area_struct *vma;
603 struct task_struct *tsk;
604 struct anon_vma *av;
bf181b9f 605 pgoff_t pgoff;
6a46079c 606
6d4675e6 607 av = folio_lock_anon_vma_read(folio, NULL);
6a46079c 608 if (av == NULL) /* Not actually mapped anymore */
9b679320
PZ
609 return;
610
a0f7a756 611 pgoff = page_to_pgoff(page);
d256d1cd 612 rcu_read_lock();
5885c6a6 613 for_each_process(tsk) {
5beb4930 614 struct anon_vma_chain *vmac;
3ba08129 615 struct task_struct *t = task_early_kill(tsk, force_early);
5beb4930 616
3ba08129 617 if (!t)
6a46079c 618 continue;
bf181b9f
ML
619 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
620 pgoff, pgoff) {
5beb4930 621 vma = vmac->vma;
36537a67
ML
622 if (vma->vm_mm != t->mm)
623 continue;
6a46079c
AK
624 if (!page_mapped_in_vma(page, vma))
625 continue;
4f775086 626 add_to_kill_anon_file(t, page, vma, to_kill);
6a46079c
AK
627 }
628 }
d256d1cd 629 rcu_read_unlock();
0c826c0b 630 anon_vma_unlock_read(av);
6a46079c
AK
631}
632
633/*
634 * Collect processes when the error hit a file mapped page.
635 */
636static void collect_procs_file(struct page *page, struct list_head *to_kill,
996ff7a0 637 int force_early)
6a46079c
AK
638{
639 struct vm_area_struct *vma;
640 struct task_struct *tsk;
6a46079c 641 struct address_space *mapping = page->mapping;
c43bc03d 642 pgoff_t pgoff;
6a46079c 643
d28eb9c8 644 i_mmap_lock_read(mapping);
d256d1cd 645 rcu_read_lock();
c43bc03d 646 pgoff = page_to_pgoff(page);
6a46079c 647 for_each_process(tsk) {
3ba08129 648 struct task_struct *t = task_early_kill(tsk, force_early);
6a46079c 649
3ba08129 650 if (!t)
6a46079c 651 continue;
6b2dbba8 652 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
6a46079c
AK
653 pgoff) {
654 /*
655 * Send early kill signal to tasks where a vma covers
656 * the page but the corrupted page is not necessarily
5885c6a6 657 * mapped in its pte.
6a46079c
AK
658 * Assume applications who requested early kill want
659 * to be informed of all such data corruptions.
660 */
3ba08129 661 if (vma->vm_mm == t->mm)
4f775086 662 add_to_kill_anon_file(t, page, vma, to_kill);
6a46079c
AK
663 }
664 }
d256d1cd 665 rcu_read_unlock();
d28eb9c8 666 i_mmap_unlock_read(mapping);
6a46079c
AK
667}
668
c36e2024 669#ifdef CONFIG_FS_DAX
4f775086
LX
670static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p,
671 struct vm_area_struct *vma,
672 struct list_head *to_kill, pgoff_t pgoff)
673{
674 __add_to_kill(tsk, p, vma, to_kill, 0, pgoff);
675}
676
c36e2024
SR
677/*
678 * Collect processes when the error hit a fsdax page.
679 */
680static void collect_procs_fsdax(struct page *page,
681 struct address_space *mapping, pgoff_t pgoff,
682 struct list_head *to_kill)
683{
684 struct vm_area_struct *vma;
685 struct task_struct *tsk;
686
687 i_mmap_lock_read(mapping);
d256d1cd 688 rcu_read_lock();
c36e2024
SR
689 for_each_process(tsk) {
690 struct task_struct *t = task_early_kill(tsk, true);
691
692 if (!t)
693 continue;
694 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
695 if (vma->vm_mm == t->mm)
4f775086 696 add_to_kill_fsdax(t, page, vma, to_kill, pgoff);
c36e2024
SR
697 }
698 }
d256d1cd 699 rcu_read_unlock();
c36e2024
SR
700 i_mmap_unlock_read(mapping);
701}
702#endif /* CONFIG_FS_DAX */
703
6a46079c
AK
704/*
705 * Collect the processes who have the corrupted page mapped to kill.
6a46079c 706 */
74614de1
TL
707static void collect_procs(struct page *page, struct list_head *tokill,
708 int force_early)
6a46079c 709{
6a46079c
AK
710 if (!page->mapping)
711 return;
4248d008
LX
712 if (unlikely(PageKsm(page)))
713 collect_procs_ksm(page, tokill, force_early);
714 else if (PageAnon(page))
996ff7a0 715 collect_procs_anon(page, tokill, force_early);
6a46079c 716 else
996ff7a0 717 collect_procs_file(page, tokill, force_early);
6a46079c
AK
718}
719
6885938c 720struct hwpoison_walk {
a3f5d80e
NH
721 struct to_kill tk;
722 unsigned long pfn;
723 int flags;
724};
725
726static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift)
727{
728 tk->addr = addr;
729 tk->size_shift = shift;
730}
731
732static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
733 unsigned long poisoned_pfn, struct to_kill *tk)
734{
735 unsigned long pfn = 0;
736
737 if (pte_present(pte)) {
738 pfn = pte_pfn(pte);
739 } else {
740 swp_entry_t swp = pte_to_swp_entry(pte);
741
742 if (is_hwpoison_entry(swp))
0d206b5d 743 pfn = swp_offset_pfn(swp);
a3f5d80e
NH
744 }
745
746 if (!pfn || pfn != poisoned_pfn)
747 return 0;
748
749 set_to_kill(tk, addr, shift);
750 return 1;
751}
752
753#ifdef CONFIG_TRANSPARENT_HUGEPAGE
754static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
6885938c 755 struct hwpoison_walk *hwp)
a3f5d80e
NH
756{
757 pmd_t pmd = *pmdp;
758 unsigned long pfn;
759 unsigned long hwpoison_vaddr;
760
761 if (!pmd_present(pmd))
762 return 0;
763 pfn = pmd_pfn(pmd);
764 if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) {
765 hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT);
766 set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT);
767 return 1;
768 }
769 return 0;
770}
771#else
772static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
6885938c 773 struct hwpoison_walk *hwp)
a3f5d80e
NH
774{
775 return 0;
776}
777#endif
778
779static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
780 unsigned long end, struct mm_walk *walk)
781{
6885938c 782 struct hwpoison_walk *hwp = walk->private;
a3f5d80e 783 int ret = 0;
ea3732f7 784 pte_t *ptep, *mapped_pte;
a3f5d80e
NH
785 spinlock_t *ptl;
786
787 ptl = pmd_trans_huge_lock(pmdp, walk->vma);
788 if (ptl) {
789 ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp);
790 spin_unlock(ptl);
791 goto out;
792 }
793
ea3732f7
ML
794 mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp,
795 addr, &ptl);
04dee9e8
HD
796 if (!ptep)
797 goto out;
798
a3f5d80e 799 for (; addr != end; ptep++, addr += PAGE_SIZE) {
c33c7948 800 ret = check_hwpoisoned_entry(ptep_get(ptep), addr, PAGE_SHIFT,
a3f5d80e
NH
801 hwp->pfn, &hwp->tk);
802 if (ret == 1)
803 break;
804 }
ea3732f7 805 pte_unmap_unlock(mapped_pte, ptl);
a3f5d80e
NH
806out:
807 cond_resched();
808 return ret;
809}
810
811#ifdef CONFIG_HUGETLB_PAGE
812static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
813 unsigned long addr, unsigned long end,
814 struct mm_walk *walk)
815{
6885938c 816 struct hwpoison_walk *hwp = walk->private;
a3f5d80e
NH
817 pte_t pte = huge_ptep_get(ptep);
818 struct hstate *h = hstate_vma(walk->vma);
819
820 return check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
821 hwp->pfn, &hwp->tk);
822}
823#else
824#define hwpoison_hugetlb_range NULL
825#endif
826
6885938c 827static const struct mm_walk_ops hwpoison_walk_ops = {
a3f5d80e
NH
828 .pmd_entry = hwpoison_pte_range,
829 .hugetlb_entry = hwpoison_hugetlb_range,
49b06385 830 .walk_lock = PGWALK_RDLOCK,
a3f5d80e
NH
831};
832
833/*
834 * Sends SIGBUS to the current process with error info.
835 *
836 * This function is intended to handle "Action Required" MCEs on already
837 * hardware poisoned pages. They could happen, for example, when
838 * memory_failure() failed to unmap the error page at the first call, or
839 * when multiple local machine checks happened on different CPUs.
840 *
841 * MCE handler currently has no easy access to the error virtual address,
842 * so this function walks page table to find it. The returned virtual address
843 * is proper in most cases, but it could be wrong when the application
844 * process has multiple entries mapping the error page.
845 */
846static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
847 int flags)
848{
849 int ret;
6885938c 850 struct hwpoison_walk priv = {
a3f5d80e
NH
851 .pfn = pfn,
852 };
853 priv.tk.tsk = p;
854
77677cdb
SX
855 if (!p->mm)
856 return -EFAULT;
857
a3f5d80e 858 mmap_read_lock(p->mm);
6885938c 859 ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops,
a3f5d80e
NH
860 (void *)&priv);
861 if (ret == 1 && priv.tk.addr)
862 kill_proc(&priv.tk, pfn, flags);
046545a6
NH
863 else
864 ret = 0;
a3f5d80e 865 mmap_read_unlock(p->mm);
046545a6 866 return ret > 0 ? -EHWPOISON : -EFAULT;
a3f5d80e
NH
867}
868
6a46079c 869static const char *action_name[] = {
cc637b17
XX
870 [MF_IGNORED] = "Ignored",
871 [MF_FAILED] = "Failed",
872 [MF_DELAYED] = "Delayed",
873 [MF_RECOVERED] = "Recovered",
64d37a2b
NH
874};
875
876static const char * const action_page_types[] = {
cc637b17
XX
877 [MF_MSG_KERNEL] = "reserved kernel page",
878 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
879 [MF_MSG_SLAB] = "kernel slab page",
880 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
cc637b17
XX
881 [MF_MSG_HUGE] = "huge page",
882 [MF_MSG_FREE_HUGE] = "free huge page",
883 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
884 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
885 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
886 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
887 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
888 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
889 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
890 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
891 [MF_MSG_CLEAN_LRU] = "clean LRU page",
892 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
893 [MF_MSG_BUDDY] = "free buddy page",
6100e34b 894 [MF_MSG_DAX] = "dax page",
5d1fd5dc 895 [MF_MSG_UNSPLIT_THP] = "unsplit thp",
cc637b17 896 [MF_MSG_UNKNOWN] = "unknown page",
64d37a2b
NH
897};
898
dc2a1cbf
WF
899/*
900 * XXX: It is possible that a page is isolated from LRU cache,
901 * and then kept in swap cache or failed to remove from page cache.
902 * The page count will stop it from being freed by unpoison.
903 * Stress tests should be aware of this memory leak problem.
904 */
905static int delete_from_lru_cache(struct page *p)
906{
f7f9c00d 907 if (isolate_lru_page(p)) {
dc2a1cbf
WF
908 /*
909 * Clear sensible page flags, so that the buddy system won't
910 * complain when the page is unpoison-and-freed.
911 */
912 ClearPageActive(p);
913 ClearPageUnevictable(p);
18365225
MH
914
915 /*
916 * Poisoned page might never drop its ref count to 0 so we have
917 * to uncharge it manually from its memcg.
918 */
bbc6b703 919 mem_cgroup_uncharge(page_folio(p));
18365225 920
dc2a1cbf
WF
921 /*
922 * drop the page count elevated by isolate_lru_page()
923 */
09cbfeaf 924 put_page(p);
dc2a1cbf
WF
925 return 0;
926 }
927 return -EIO;
928}
929
78bb9203
NH
930static int truncate_error_page(struct page *p, unsigned long pfn,
931 struct address_space *mapping)
932{
933 int ret = MF_FAILED;
934
935 if (mapping->a_ops->error_remove_page) {
ac5efa78 936 struct folio *folio = page_folio(p);
78bb9203
NH
937 int err = mapping->a_ops->error_remove_page(mapping, p);
938
0201ebf2 939 if (err != 0)
96f96763 940 pr_info("%#lx: Failed to punch page: %d\n", pfn, err);
0201ebf2 941 else if (!filemap_release_folio(folio, GFP_NOIO))
96f96763 942 pr_info("%#lx: failed to release buffers\n", pfn);
0201ebf2 943 else
78bb9203 944 ret = MF_RECOVERED;
78bb9203
NH
945 } else {
946 /*
947 * If the file system doesn't support it just invalidate
948 * This fails on dirty or anything with private pages
949 */
950 if (invalidate_inode_page(p))
951 ret = MF_RECOVERED;
952 else
96f96763 953 pr_info("%#lx: Failed to invalidate\n", pfn);
78bb9203
NH
954 }
955
956 return ret;
957}
958
dd0f230a
YS
959struct page_state {
960 unsigned long mask;
961 unsigned long res;
962 enum mf_action_page_type type;
963
964 /* Callback ->action() has to unlock the relevant page inside it. */
965 int (*action)(struct page_state *ps, struct page *p);
966};
967
968/*
969 * Return true if page is still referenced by others, otherwise return
970 * false.
971 *
972 * The extra_pins is true when one extra refcount is expected.
973 */
974static bool has_extra_refcount(struct page_state *ps, struct page *p,
975 bool extra_pins)
976{
977 int count = page_count(p) - 1;
978
979 if (extra_pins)
980 count -= 1;
981
982 if (count > 0) {
96f96763 983 pr_err("%#lx: %s still referenced by %d users\n",
dd0f230a
YS
984 page_to_pfn(p), action_page_types[ps->type], count);
985 return true;
986 }
987
988 return false;
989}
990
6a46079c
AK
991/*
992 * Error hit kernel page.
993 * Do nothing, try to be lucky and not touch this instead. For a few cases we
994 * could be more sophisticated.
995 */
dd0f230a 996static int me_kernel(struct page_state *ps, struct page *p)
6a46079c 997{
ea6d0630 998 unlock_page(p);
cc637b17 999 return MF_IGNORED;
6a46079c
AK
1000}
1001
1002/*
1003 * Page in unknown state. Do nothing.
1004 */
dd0f230a 1005static int me_unknown(struct page_state *ps, struct page *p)
6a46079c 1006{
96f96763 1007 pr_err("%#lx: Unknown page state\n", page_to_pfn(p));
ea6d0630 1008 unlock_page(p);
cc637b17 1009 return MF_FAILED;
6a46079c
AK
1010}
1011
6a46079c
AK
1012/*
1013 * Clean (or cleaned) page cache page.
1014 */
dd0f230a 1015static int me_pagecache_clean(struct page_state *ps, struct page *p)
6a46079c 1016{
ea6d0630 1017 int ret;
6a46079c 1018 struct address_space *mapping;
a7605426 1019 bool extra_pins;
6a46079c 1020
dc2a1cbf
WF
1021 delete_from_lru_cache(p);
1022
6a46079c
AK
1023 /*
1024 * For anonymous pages we're done the only reference left
1025 * should be the one m_f() holds.
1026 */
ea6d0630
NH
1027 if (PageAnon(p)) {
1028 ret = MF_RECOVERED;
1029 goto out;
1030 }
6a46079c
AK
1031
1032 /*
1033 * Now truncate the page in the page cache. This is really
1034 * more like a "temporary hole punch"
1035 * Don't do this for block devices when someone else
1036 * has a reference, because it could be file system metadata
1037 * and that's not safe to truncate.
1038 */
1039 mapping = page_mapping(p);
1040 if (!mapping) {
1041 /*
1042 * Page has been teared down in the meanwhile
1043 */
ea6d0630
NH
1044 ret = MF_FAILED;
1045 goto out;
6a46079c
AK
1046 }
1047
a7605426
YS
1048 /*
1049 * The shmem page is kept in page cache instead of truncating
1050 * so is expected to have an extra refcount after error-handling.
1051 */
1052 extra_pins = shmem_mapping(mapping);
1053
6a46079c
AK
1054 /*
1055 * Truncation is a bit tricky. Enable it per file system for now.
1056 *
9608703e 1057 * Open: to take i_rwsem or not for this? Right now we don't.
6a46079c 1058 */
dd0f230a 1059 ret = truncate_error_page(p, page_to_pfn(p), mapping);
a7605426
YS
1060 if (has_extra_refcount(ps, p, extra_pins))
1061 ret = MF_FAILED;
1062
ea6d0630
NH
1063out:
1064 unlock_page(p);
dd0f230a 1065
ea6d0630 1066 return ret;
6a46079c
AK
1067}
1068
1069/*
549543df 1070 * Dirty pagecache page
6a46079c
AK
1071 * Issues: when the error hit a hole page the error is not properly
1072 * propagated.
1073 */
dd0f230a 1074static int me_pagecache_dirty(struct page_state *ps, struct page *p)
6a46079c
AK
1075{
1076 struct address_space *mapping = page_mapping(p);
1077
1078 SetPageError(p);
1079 /* TBD: print more information about the file. */
1080 if (mapping) {
1081 /*
1082 * IO error will be reported by write(), fsync(), etc.
1083 * who check the mapping.
1084 * This way the application knows that something went
1085 * wrong with its dirty file data.
1086 *
1087 * There's one open issue:
1088 *
1089 * The EIO will be only reported on the next IO
1090 * operation and then cleared through the IO map.
1091 * Normally Linux has two mechanisms to pass IO error
1092 * first through the AS_EIO flag in the address space
1093 * and then through the PageError flag in the page.
1094 * Since we drop pages on memory failure handling the
1095 * only mechanism open to use is through AS_AIO.
1096 *
1097 * This has the disadvantage that it gets cleared on
1098 * the first operation that returns an error, while
1099 * the PageError bit is more sticky and only cleared
1100 * when the page is reread or dropped. If an
1101 * application assumes it will always get error on
1102 * fsync, but does other operations on the fd before
25985edc 1103 * and the page is dropped between then the error
6a46079c
AK
1104 * will not be properly reported.
1105 *
1106 * This can already happen even without hwpoisoned
1107 * pages: first on metadata IO errors (which only
1108 * report through AS_EIO) or when the page is dropped
1109 * at the wrong time.
1110 *
1111 * So right now we assume that the application DTRT on
1112 * the first EIO, but we're not worse than other parts
1113 * of the kernel.
1114 */
af21bfaf 1115 mapping_set_error(mapping, -EIO);
6a46079c
AK
1116 }
1117
dd0f230a 1118 return me_pagecache_clean(ps, p);
6a46079c
AK
1119}
1120
1121/*
1122 * Clean and dirty swap cache.
1123 *
1124 * Dirty swap cache page is tricky to handle. The page could live both in page
1125 * cache and swap cache(ie. page is freshly swapped in). So it could be
1126 * referenced concurrently by 2 types of PTEs:
1127 * normal PTEs and swap PTEs. We try to handle them consistently by calling
6da6b1d4 1128 * try_to_unmap(!TTU_HWPOISON) to convert the normal PTEs to swap PTEs,
6a46079c
AK
1129 * and then
1130 * - clear dirty bit to prevent IO
1131 * - remove from LRU
1132 * - but keep in the swap cache, so that when we return to it on
1133 * a later page fault, we know the application is accessing
1134 * corrupted data and shall be killed (we installed simple
1135 * interception code in do_swap_page to catch it).
1136 *
1137 * Clean swap cache pages can be directly isolated. A later page fault will
1138 * bring in the known good data from disk.
1139 */
dd0f230a 1140static int me_swapcache_dirty(struct page_state *ps, struct page *p)
6a46079c 1141{
ea6d0630 1142 int ret;
dd0f230a 1143 bool extra_pins = false;
ea6d0630 1144
6a46079c
AK
1145 ClearPageDirty(p);
1146 /* Trigger EIO in shmem: */
1147 ClearPageUptodate(p);
1148
ea6d0630
NH
1149 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED;
1150 unlock_page(p);
dd0f230a
YS
1151
1152 if (ret == MF_DELAYED)
1153 extra_pins = true;
1154
1155 if (has_extra_refcount(ps, p, extra_pins))
1156 ret = MF_FAILED;
1157
ea6d0630 1158 return ret;
6a46079c
AK
1159}
1160
dd0f230a 1161static int me_swapcache_clean(struct page_state *ps, struct page *p)
6a46079c 1162{
75fa68a5 1163 struct folio *folio = page_folio(p);
ea6d0630
NH
1164 int ret;
1165
75fa68a5 1166 delete_from_swap_cache(folio);
e43c3afb 1167
ea6d0630 1168 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED;
75fa68a5 1169 folio_unlock(folio);
dd0f230a
YS
1170
1171 if (has_extra_refcount(ps, p, false))
1172 ret = MF_FAILED;
1173
ea6d0630 1174 return ret;
6a46079c
AK
1175}
1176
1177/*
1178 * Huge pages. Needs work.
1179 * Issues:
93f70f90
NH
1180 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
1181 * To narrow down kill region to one page, we need to break up pmd.
6a46079c 1182 */
dd0f230a 1183static int me_huge_page(struct page_state *ps, struct page *p)
6a46079c 1184{
a8b2c2ce 1185 int res;
93f70f90 1186 struct page *hpage = compound_head(p);
78bb9203 1187 struct address_space *mapping;
8625147c 1188 bool extra_pins = false;
2491ffee 1189
78bb9203
NH
1190 mapping = page_mapping(hpage);
1191 if (mapping) {
dd0f230a 1192 res = truncate_error_page(hpage, page_to_pfn(p), mapping);
8625147c
JH
1193 /* The page is kept in page cache. */
1194 extra_pins = true;
ea6d0630 1195 unlock_page(hpage);
78bb9203
NH
1196 } else {
1197 unlock_page(hpage);
1198 /*
ef526b17
ML
1199 * migration entry prevents later access on error hugepage,
1200 * so we can free and dissolve it into buddy to save healthy
1201 * subpages.
78bb9203 1202 */
ef526b17 1203 put_page(hpage);
ceaf8fbe 1204 if (__page_handle_poison(p) >= 0) {
a8b2c2ce
OS
1205 page_ref_inc(p);
1206 res = MF_RECOVERED;
ceaf8fbe
NH
1207 } else {
1208 res = MF_FAILED;
a8b2c2ce 1209 }
93f70f90 1210 }
78bb9203 1211
8625147c 1212 if (has_extra_refcount(ps, p, extra_pins))
dd0f230a
YS
1213 res = MF_FAILED;
1214
78bb9203 1215 return res;
6a46079c
AK
1216}
1217
1218/*
1219 * Various page states we can handle.
1220 *
1221 * A page state is defined by its current page->flags bits.
1222 * The table matches them in order and calls the right handler.
1223 *
1224 * This is quite tricky because we can access page at any time
25985edc 1225 * in its live cycle, so all accesses have to be extremely careful.
6a46079c
AK
1226 *
1227 * This is not complete. More states could be added.
1228 * For any missing state don't attempt recovery.
1229 */
1230
1231#define dirty (1UL << PG_dirty)
6326fec1 1232#define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked))
6a46079c
AK
1233#define unevict (1UL << PG_unevictable)
1234#define mlock (1UL << PG_mlocked)
6a46079c 1235#define lru (1UL << PG_lru)
6a46079c 1236#define head (1UL << PG_head)
6a46079c 1237#define slab (1UL << PG_slab)
6a46079c
AK
1238#define reserved (1UL << PG_reserved)
1239
dd0f230a 1240static struct page_state error_states[] = {
cc637b17 1241 { reserved, reserved, MF_MSG_KERNEL, me_kernel },
95d01fc6
WF
1242 /*
1243 * free pages are specially detected outside this table:
1244 * PG_buddy pages only make a small fraction of all free pages.
1245 */
6a46079c
AK
1246
1247 /*
1248 * Could in theory check if slab page is free or if we can drop
1249 * currently unused objects without touching them. But just
1250 * treat it as standard kernel for now.
1251 */
cc637b17 1252 { slab, slab, MF_MSG_SLAB, me_kernel },
6a46079c 1253
cc637b17 1254 { head, head, MF_MSG_HUGE, me_huge_page },
6a46079c 1255
cc637b17
XX
1256 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
1257 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
6a46079c 1258
cc637b17
XX
1259 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
1260 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
6a46079c 1261
cc637b17
XX
1262 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
1263 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
5f4b9fc5 1264
cc637b17
XX
1265 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
1266 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
6a46079c
AK
1267
1268 /*
1269 * Catchall entry: must be at end.
1270 */
cc637b17 1271 { 0, 0, MF_MSG_UNKNOWN, me_unknown },
6a46079c
AK
1272};
1273
2326c467
AK
1274#undef dirty
1275#undef sc
1276#undef unevict
1277#undef mlock
2326c467 1278#undef lru
2326c467 1279#undef head
2326c467
AK
1280#undef slab
1281#undef reserved
1282
18f41fa6
JY
1283static void update_per_node_mf_stats(unsigned long pfn,
1284 enum mf_result result)
1285{
1286 int nid = MAX_NUMNODES;
1287 struct memory_failure_stats *mf_stats = NULL;
1288
1289 nid = pfn_to_nid(pfn);
1290 if (unlikely(nid < 0 || nid >= MAX_NUMNODES)) {
1291 WARN_ONCE(1, "Memory failure: pfn=%#lx, invalid nid=%d", pfn, nid);
1292 return;
1293 }
1294
1295 mf_stats = &NODE_DATA(nid)->mf_stats;
1296 switch (result) {
1297 case MF_IGNORED:
1298 ++mf_stats->ignored;
1299 break;
1300 case MF_FAILED:
1301 ++mf_stats->failed;
1302 break;
1303 case MF_DELAYED:
1304 ++mf_stats->delayed;
1305 break;
1306 case MF_RECOVERED:
1307 ++mf_stats->recovered;
1308 break;
1309 default:
1310 WARN_ONCE(1, "Memory failure: mf_result=%d is not properly handled", result);
1311 break;
1312 }
1313 ++mf_stats->total;
1314}
1315
ff604cf6
NH
1316/*
1317 * "Dirty/Clean" indication is not 100% accurate due to the possibility of
1318 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
1319 */
b66d00df
KW
1320static int action_result(unsigned long pfn, enum mf_action_page_type type,
1321 enum mf_result result)
6a46079c 1322{
97f0b134
XX
1323 trace_memory_failure_event(pfn, type, result);
1324
a46c9304 1325 num_poisoned_pages_inc(pfn);
18f41fa6
JY
1326
1327 update_per_node_mf_stats(pfn, result);
1328
96f96763 1329 pr_err("%#lx: recovery action for %s: %s\n",
64d37a2b 1330 pfn, action_page_types[type], action_name[result]);
b66d00df
KW
1331
1332 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
6a46079c
AK
1333}
1334
1335static int page_action(struct page_state *ps, struct page *p,
bd1ce5f9 1336 unsigned long pfn)
6a46079c
AK
1337{
1338 int result;
1339
ea6d0630 1340 /* page p should be unlocked after returning from ps->action(). */
dd0f230a 1341 result = ps->action(ps, p);
7456b040 1342
6a46079c
AK
1343 /* Could do more checks here if page looks ok */
1344 /*
1345 * Could adjust zone counters here to correct for the missing page.
1346 */
1347
b66d00df 1348 return action_result(pfn, ps->type, result);
6a46079c
AK
1349}
1350
bf181c58
NH
1351static inline bool PageHWPoisonTakenOff(struct page *page)
1352{
1353 return PageHWPoison(page) && page_private(page) == MAGIC_HWPOISON;
1354}
1355
1356void SetPageHWPoisonTakenOff(struct page *page)
1357{
1358 set_page_private(page, MAGIC_HWPOISON);
1359}
1360
1361void ClearPageHWPoisonTakenOff(struct page *page)
1362{
1363 if (PageHWPoison(page))
1364 set_page_private(page, 0);
1365}
1366
25182f05
NH
1367/*
1368 * Return true if a page type of a given page is supported by hwpoison
1369 * mechanism (while handling could fail), otherwise false. This function
1370 * does not return true for hugetlb or device memory pages, so it's assumed
1371 * to be called only in the context where we never have such pages.
1372 */
bf6445bc 1373static inline bool HWPoisonHandlable(struct page *page, unsigned long flags)
25182f05 1374{
3f871370 1375 /* Soft offline could migrate non-LRU movable pages */
bf6445bc 1376 if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page))
3f871370 1377 return true;
bf6445bc 1378
3f871370 1379 return PageLRU(page) || is_free_buddy_page(page);
25182f05
NH
1380}
1381
bf6445bc 1382static int __get_hwpoison_page(struct page *page, unsigned long flags)
ead07f6a 1383{
04bac040 1384 struct folio *folio = page_folio(page);
25182f05
NH
1385 int ret = 0;
1386 bool hugetlb = false;
1387
04bac040 1388 ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, false);
d31155b8
ML
1389 if (hugetlb) {
1390 /* Make sure hugetlb demotion did not happen from under us. */
1391 if (folio == page_folio(page))
1392 return ret;
1393 if (ret > 0) {
1394 folio_put(folio);
1395 folio = page_folio(page);
1396 }
1397 }
25182f05
NH
1398
1399 /*
04bac040
SK
1400 * This check prevents from calling folio_try_get() for any
1401 * unsupported type of folio in order to reduce the risk of unexpected
1402 * races caused by taking a folio refcount.
25182f05 1403 */
04bac040 1404 if (!HWPoisonHandlable(&folio->page, flags))
fcc00621 1405 return -EBUSY;
ead07f6a 1406
04bac040
SK
1407 if (folio_try_get(folio)) {
1408 if (folio == page_folio(page))
c2e7e00b
KK
1409 return 1;
1410
96f96763 1411 pr_info("%#lx cannot catch tail\n", page_to_pfn(page));
04bac040 1412 folio_put(folio);
c2e7e00b
KK
1413 }
1414
1415 return 0;
ead07f6a 1416}
ead07f6a 1417
2f714160 1418static int get_any_page(struct page *p, unsigned long flags)
17e395b6 1419{
2f714160
OS
1420 int ret = 0, pass = 0;
1421 bool count_increased = false;
17e395b6 1422
2f714160
OS
1423 if (flags & MF_COUNT_INCREASED)
1424 count_increased = true;
1425
1426try_again:
0ed950d1 1427 if (!count_increased) {
bf6445bc 1428 ret = __get_hwpoison_page(p, flags);
0ed950d1
NH
1429 if (!ret) {
1430 if (page_count(p)) {
1431 /* We raced with an allocation, retry. */
1432 if (pass++ < 3)
1433 goto try_again;
1434 ret = -EBUSY;
1435 } else if (!PageHuge(p) && !is_free_buddy_page(p)) {
1436 /* We raced with put_page, retry. */
1437 if (pass++ < 3)
1438 goto try_again;
1439 ret = -EIO;
1440 }
1441 goto out;
1442 } else if (ret == -EBUSY) {
fcc00621
NH
1443 /*
1444 * We raced with (possibly temporary) unhandlable
1445 * page, retry.
1446 */
1447 if (pass++ < 3) {
d0505e9f 1448 shake_page(p);
2f714160 1449 goto try_again;
fcc00621
NH
1450 }
1451 ret = -EIO;
0ed950d1 1452 goto out;
2f714160 1453 }
0ed950d1
NH
1454 }
1455
bf6445bc 1456 if (PageHuge(p) || HWPoisonHandlable(p, flags)) {
0ed950d1 1457 ret = 1;
2f714160 1458 } else {
0ed950d1
NH
1459 /*
1460 * A page we cannot handle. Check whether we can turn
1461 * it into something we can handle.
1462 */
1463 if (pass++ < 3) {
2f714160 1464 put_page(p);
d0505e9f 1465 shake_page(p);
0ed950d1
NH
1466 count_increased = false;
1467 goto try_again;
2f714160 1468 }
0ed950d1
NH
1469 put_page(p);
1470 ret = -EIO;
17e395b6 1471 }
0ed950d1 1472out:
941ca063 1473 if (ret == -EIO)
96f96763 1474 pr_err("%#lx: unhandlable page.\n", page_to_pfn(p));
941ca063 1475
17e395b6
OS
1476 return ret;
1477}
1478
bf181c58
NH
1479static int __get_unpoison_page(struct page *page)
1480{
04bac040 1481 struct folio *folio = page_folio(page);
bf181c58
NH
1482 int ret = 0;
1483 bool hugetlb = false;
1484
04bac040 1485 ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, true);
d31155b8
ML
1486 if (hugetlb) {
1487 /* Make sure hugetlb demotion did not happen from under us. */
1488 if (folio == page_folio(page))
1489 return ret;
1490 if (ret > 0)
1491 folio_put(folio);
1492 }
bf181c58
NH
1493
1494 /*
1495 * PageHWPoisonTakenOff pages are not only marked as PG_hwpoison,
1496 * but also isolated from buddy freelist, so need to identify the
1497 * state and have to cancel both operations to unpoison.
1498 */
1499 if (PageHWPoisonTakenOff(page))
1500 return -EHWPOISON;
1501
1502 return get_page_unless_zero(page) ? 1 : 0;
1503}
1504
0ed950d1
NH
1505/**
1506 * get_hwpoison_page() - Get refcount for memory error handling
1507 * @p: Raw error page (hit by memory error)
1508 * @flags: Flags controlling behavior of error handling
1509 *
1510 * get_hwpoison_page() takes a page refcount of an error page to handle memory
1511 * error on it, after checking that the error page is in a well-defined state
0b8f0d87 1512 * (defined as a page-type we can successfully handle the memory error on it,
0ed950d1
NH
1513 * such as LRU page and hugetlb page).
1514 *
1515 * Memory error handling could be triggered at any time on any type of page,
1516 * so it's prone to race with typical memory management lifecycle (like
1517 * allocation and free). So to avoid such races, get_hwpoison_page() takes
1518 * extra care for the error page's state (as done in __get_hwpoison_page()),
1519 * and has some retry logic in get_any_page().
1520 *
bf181c58
NH
1521 * When called from unpoison_memory(), the caller should already ensure that
1522 * the given page has PG_hwpoison. So it's never reused for other page
1523 * allocations, and __get_unpoison_page() never races with them.
1524 *
0ed950d1
NH
1525 * Return: 0 on failure,
1526 * 1 on success for in-use pages in a well-defined state,
1527 * -EIO for pages on which we can not handle memory errors,
1528 * -EBUSY when get_hwpoison_page() has raced with page lifecycle
bf181c58
NH
1529 * operations like allocation and free,
1530 * -EHWPOISON when the page is hwpoisoned and taken off from buddy.
0ed950d1
NH
1531 */
1532static int get_hwpoison_page(struct page *p, unsigned long flags)
2f714160
OS
1533{
1534 int ret;
1535
1536 zone_pcp_disable(page_zone(p));
bf181c58
NH
1537 if (flags & MF_UNPOISON)
1538 ret = __get_unpoison_page(p);
1539 else
1540 ret = get_any_page(p, flags);
2f714160
OS
1541 zone_pcp_enable(page_zone(p));
1542
1543 return ret;
1544}
1545
6a46079c
AK
1546/*
1547 * Do all that is necessary to remove user space mappings. Unmap
1548 * the pages and send SIGBUS to the processes if the data was dirty.
1549 */
666e5a40 1550static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
ed8c2f49 1551 int flags, struct page *hpage)
6a46079c 1552{
869f7ee6 1553 struct folio *folio = page_folio(hpage);
6da6b1d4 1554 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
6a46079c
AK
1555 struct address_space *mapping;
1556 LIST_HEAD(tokill);
1fb08ac6 1557 bool unmap_success;
0792a4a6 1558 int forcekill;
286c469a 1559 bool mlocked = PageMlocked(hpage);
6a46079c 1560
93a9eb39
NH
1561 /*
1562 * Here we are interested only in user-mapped pages, so skip any
1563 * other types of pages.
1564 */
7a8817f2 1565 if (PageReserved(p) || PageSlab(p) || PageTable(p) || PageOffline(p))
666e5a40 1566 return true;
93a9eb39 1567 if (!(PageLRU(hpage) || PageHuge(p)))
666e5a40 1568 return true;
6a46079c 1569
6a46079c
AK
1570 /*
1571 * This check implies we don't kill processes if their pages
1572 * are in the swap cache early. Those are always late kills.
1573 */
7af446a8 1574 if (!page_mapped(hpage))
666e5a40 1575 return true;
1668bfd5 1576
6a46079c 1577 if (PageSwapCache(p)) {
96f96763 1578 pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
6da6b1d4 1579 ttu &= ~TTU_HWPOISON;
6a46079c
AK
1580 }
1581
1582 /*
1583 * Propagate the dirty bit from PTEs to struct page first, because we
1584 * need this to decide if we should kill or just drop the page.
db0480b3
WF
1585 * XXX: the dirty test could be racy: set_page_dirty() may not always
1586 * be called inside page lock (it's recommended but not enforced).
6a46079c 1587 */
7af446a8 1588 mapping = page_mapping(hpage);
6751ed65 1589 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
f56753ac 1590 mapping_can_writeback(mapping)) {
7af446a8
NH
1591 if (page_mkclean(hpage)) {
1592 SetPageDirty(hpage);
6a46079c 1593 } else {
6da6b1d4 1594 ttu &= ~TTU_HWPOISON;
96f96763 1595 pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
6a46079c
AK
1596 pfn);
1597 }
1598 }
1599
1600 /*
1601 * First collect all the processes that have the page
1602 * mapped in dirty form. This has to be done before try_to_unmap,
1603 * because ttu takes the rmap data structures down.
6a46079c 1604 */
0792a4a6 1605 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
6a46079c 1606
357670f7
ML
1607 if (PageHuge(hpage) && !PageAnon(hpage)) {
1608 /*
1609 * For hugetlb pages in shared mappings, try_to_unmap
1610 * could potentially call huge_pmd_unshare. Because of
1611 * this, take semaphore in write mode here and set
1612 * TTU_RMAP_LOCKED to indicate we have taken the lock
1613 * at this higher level.
1614 */
1615 mapping = hugetlb_page_mapping_lock_write(hpage);
1616 if (mapping) {
9030fb0b 1617 try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
357670f7
ML
1618 i_mmap_unlock_write(mapping);
1619 } else
96f96763 1620 pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn);
c0d0381a 1621 } else {
9030fb0b 1622 try_to_unmap(folio, ttu);
c0d0381a 1623 }
1fb08ac6
YS
1624
1625 unmap_success = !page_mapped(hpage);
666e5a40 1626 if (!unmap_success)
96f96763 1627 pr_err("%#lx: failed to unmap page (mapcount=%d)\n",
1170532b 1628 pfn, page_mapcount(hpage));
a6d30ddd 1629
286c469a
NH
1630 /*
1631 * try_to_unmap() might put mlocked page in lru cache, so call
1632 * shake_page() again to ensure that it's flushed.
1633 */
1634 if (mlocked)
d0505e9f 1635 shake_page(hpage);
286c469a 1636
6a46079c
AK
1637 /*
1638 * Now that the dirty bit has been propagated to the
1639 * struct page and all unmaps done we can decide if
1640 * killing is needed or not. Only kill when the page
6751ed65
TL
1641 * was dirty or the process is not restartable,
1642 * otherwise the tokill list is merely
6a46079c
AK
1643 * freed. When there was a problem unmapping earlier
1644 * use a more force-full uncatchable kill to prevent
1645 * any accesses to the poisoned memory.
1646 */
0792a4a6
ML
1647 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL) ||
1648 !unmap_success;
ae1139ec 1649 kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
1668bfd5 1650
666e5a40 1651 return unmap_success;
6a46079c
AK
1652}
1653
0348d2eb
NH
1654static int identify_page_state(unsigned long pfn, struct page *p,
1655 unsigned long page_flags)
761ad8d7
NH
1656{
1657 struct page_state *ps;
0348d2eb
NH
1658
1659 /*
1660 * The first check uses the current page flags which may not have any
1661 * relevant information. The second check with the saved page flags is
1662 * carried out only if the first check can't determine the page status.
1663 */
1664 for (ps = error_states;; ps++)
1665 if ((p->flags & ps->mask) == ps->res)
1666 break;
1667
1668 page_flags |= (p->flags & (1UL << PG_dirty));
1669
1670 if (!ps->mask)
1671 for (ps = error_states;; ps++)
1672 if ((page_flags & ps->mask) == ps->res)
1673 break;
1674 return page_action(ps, p, pfn);
1675}
1676
2ace36f0 1677static int try_to_split_thp_page(struct page *page)
694bf0b0 1678{
2ace36f0
KW
1679 int ret;
1680
694bf0b0 1681 lock_page(page);
2ace36f0
KW
1682 ret = split_huge_page(page);
1683 unlock_page(page);
694bf0b0 1684
2ace36f0 1685 if (unlikely(ret))
694bf0b0 1686 put_page(page);
694bf0b0 1687
2ace36f0 1688 return ret;
694bf0b0
OS
1689}
1690
00cc790e
SR
1691static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
1692 struct address_space *mapping, pgoff_t index, int flags)
1693{
1694 struct to_kill *tk;
1695 unsigned long size = 0;
1696
1697 list_for_each_entry(tk, to_kill, nd)
1698 if (tk->size_shift)
1699 size = max(size, 1UL << tk->size_shift);
1700
1701 if (size) {
1702 /*
1703 * Unmap the largest mapping to avoid breaking up device-dax
1704 * mappings which are constant size. The actual size of the
1705 * mapping being torn down is communicated in siginfo, see
1706 * kill_proc()
1707 */
1708 loff_t start = (index << PAGE_SHIFT) & ~(size - 1);
1709
1710 unmap_mapping_range(mapping, start, size, 0);
1711 }
1712
1713 kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
1714}
1715
1716static int mf_generic_kill_procs(unsigned long long pfn, int flags,
1717 struct dev_pagemap *pgmap)
1718{
1719 struct page *page = pfn_to_page(pfn);
1720 LIST_HEAD(to_kill);
1721 dax_entry_t cookie;
1722 int rc = 0;
1723
1724 /*
1725 * Pages instantiated by device-dax (not filesystem-dax)
1726 * may be compound pages.
1727 */
1728 page = compound_head(page);
1729
1730 /*
1731 * Prevent the inode from being freed while we are interrogating
1732 * the address_space, typically this would be handled by
1733 * lock_page(), but dax pages do not use the page lock. This
1734 * also prevents changes to the mapping of this pfn until
1735 * poison signaling is complete.
1736 */
1737 cookie = dax_lock_page(page);
1738 if (!cookie)
1739 return -EBUSY;
1740
1741 if (hwpoison_filter(page)) {
1742 rc = -EOPNOTSUPP;
1743 goto unlock;
1744 }
1745
1746 switch (pgmap->type) {
1747 case MEMORY_DEVICE_PRIVATE:
1748 case MEMORY_DEVICE_COHERENT:
1749 /*
1750 * TODO: Handle device pages which may need coordination
1751 * with device-side memory.
1752 */
1753 rc = -ENXIO;
1754 goto unlock;
1755 default:
1756 break;
1757 }
1758
1759 /*
1760 * Use this flag as an indication that the dax page has been
1761 * remapped UC to prevent speculative consumption of poison.
1762 */
1763 SetPageHWPoison(page);
1764
1765 /*
1766 * Unlike System-RAM there is no possibility to swap in a
1767 * different physical page at a given virtual address, so all
1768 * userspace consumption of ZONE_DEVICE memory necessitates
1769 * SIGBUS (i.e. MF_MUST_KILL)
1770 */
1771 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1772 collect_procs(page, &to_kill, true);
1773
1774 unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags);
1775unlock:
1776 dax_unlock_page(page, cookie);
1777 return rc;
1778}
1779
c36e2024
SR
1780#ifdef CONFIG_FS_DAX
1781/**
1782 * mf_dax_kill_procs - Collect and kill processes who are using this file range
1783 * @mapping: address_space of the file in use
1784 * @index: start pgoff of the range within the file
1785 * @count: length of the range, in unit of PAGE_SIZE
1786 * @mf_flags: memory failure flags
1787 */
1788int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
1789 unsigned long count, int mf_flags)
1790{
1791 LIST_HEAD(to_kill);
1792 dax_entry_t cookie;
1793 struct page *page;
1794 size_t end = index + count;
1795
1796 mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1797
1798 for (; index < end; index++) {
1799 page = NULL;
1800 cookie = dax_lock_mapping_entry(mapping, index, &page);
1801 if (!cookie)
1802 return -EBUSY;
1803 if (!page)
1804 goto unlock;
1805
1806 SetPageHWPoison(page);
1807
1808 collect_procs_fsdax(page, mapping, index, &to_kill);
1809 unmap_and_kill(&to_kill, page_to_pfn(page), mapping,
1810 index, mf_flags);
1811unlock:
1812 dax_unlock_mapping_entry(mapping, index, cookie);
1813 }
1814 return 0;
1815}
1816EXPORT_SYMBOL_GPL(mf_dax_kill_procs);
1817#endif /* CONFIG_FS_DAX */
1818
161df60e 1819#ifdef CONFIG_HUGETLB_PAGE
b79f8eb4 1820
161df60e
NH
1821/*
1822 * Struct raw_hwp_page represents information about "raw error page",
dad6a5eb 1823 * constructing singly linked list from ->_hugetlb_hwpoison field of folio.
161df60e
NH
1824 */
1825struct raw_hwp_page {
1826 struct llist_node node;
1827 struct page *page;
1828};
1829
b02e7582 1830static inline struct llist_head *raw_hwp_list_head(struct folio *folio)
161df60e 1831{
b02e7582 1832 return (struct llist_head *)&folio->_hugetlb_hwpoison;
161df60e
NH
1833}
1834
b79f8eb4
JY
1835bool is_raw_hwpoison_page_in_hugepage(struct page *page)
1836{
1837 struct llist_head *raw_hwp_head;
1838 struct raw_hwp_page *p;
1839 struct folio *folio = page_folio(page);
1840 bool ret = false;
1841
1842 if (!folio_test_hwpoison(folio))
1843 return false;
1844
1845 if (!folio_test_hugetlb(folio))
1846 return PageHWPoison(page);
1847
1848 /*
1849 * When RawHwpUnreliable is set, kernel lost track of which subpages
1850 * are HWPOISON. So return as if ALL subpages are HWPOISONed.
1851 */
1852 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1853 return true;
1854
1855 mutex_lock(&mf_mutex);
1856
1857 raw_hwp_head = raw_hwp_list_head(folio);
1858 llist_for_each_entry(p, raw_hwp_head->first, node) {
1859 if (page == p->page) {
1860 ret = true;
1861 break;
1862 }
1863 }
1864
1865 mutex_unlock(&mf_mutex);
1866
1867 return ret;
1868}
1869
0858b5eb 1870static unsigned long __folio_free_raw_hwp(struct folio *folio, bool move_flag)
161df60e 1871{
6379693e
ML
1872 struct llist_node *head;
1873 struct raw_hwp_page *p, *next;
ac5fcde0 1874 unsigned long count = 0;
161df60e 1875
9e130c4b 1876 head = llist_del_all(raw_hwp_list_head(folio));
6379693e 1877 llist_for_each_entry_safe(p, next, head, node) {
ac5fcde0
NH
1878 if (move_flag)
1879 SetPageHWPoison(p->page);
5033091d
NH
1880 else
1881 num_poisoned_pages_sub(page_to_pfn(p->page), 1);
161df60e 1882 kfree(p);
ac5fcde0 1883 count++;
161df60e 1884 }
ac5fcde0 1885 return count;
161df60e
NH
1886}
1887
595dd818 1888static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page)
161df60e
NH
1889{
1890 struct llist_head *head;
1891 struct raw_hwp_page *raw_hwp;
6379693e 1892 struct raw_hwp_page *p, *next;
595dd818 1893 int ret = folio_test_set_hwpoison(folio) ? -EHWPOISON : 0;
161df60e
NH
1894
1895 /*
1896 * Once the hwpoison hugepage has lost reliable raw error info,
1897 * there is little meaning to keep additional error info precisely,
1898 * so skip to add additional raw error info.
1899 */
b02e7582 1900 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
161df60e 1901 return -EHWPOISON;
b02e7582 1902 head = raw_hwp_list_head(folio);
6379693e 1903 llist_for_each_entry_safe(p, next, head->first, node) {
161df60e
NH
1904 if (p->page == page)
1905 return -EHWPOISON;
1906 }
1907
1908 raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC);
1909 if (raw_hwp) {
1910 raw_hwp->page = page;
1911 llist_add(&raw_hwp->node, head);
1912 /* the first error event will be counted in action_result(). */
1913 if (ret)
a46c9304 1914 num_poisoned_pages_inc(page_to_pfn(page));
161df60e
NH
1915 } else {
1916 /*
1917 * Failed to save raw error info. We no longer trace all
1918 * hwpoisoned subpages, and we need refuse to free/dissolve
1919 * this hwpoisoned hugepage.
1920 */
b02e7582 1921 folio_set_hugetlb_raw_hwp_unreliable(folio);
161df60e 1922 /*
b02e7582 1923 * Once hugetlb_raw_hwp_unreliable is set, raw_hwp_page is not
161df60e
NH
1924 * used any more, so free it.
1925 */
0858b5eb 1926 __folio_free_raw_hwp(folio, false);
161df60e
NH
1927 }
1928 return ret;
1929}
1930
9637d7df 1931static unsigned long folio_free_raw_hwp(struct folio *folio, bool move_flag)
ac5fcde0
NH
1932{
1933 /*
9637d7df 1934 * hugetlb_vmemmap_optimized hugepages can't be freed because struct
ac5fcde0
NH
1935 * pages for tail pages are required but they don't exist.
1936 */
9637d7df 1937 if (move_flag && folio_test_hugetlb_vmemmap_optimized(folio))
ac5fcde0
NH
1938 return 0;
1939
1940 /*
9637d7df 1941 * hugetlb_raw_hwp_unreliable hugepages shouldn't be unpoisoned by
ac5fcde0
NH
1942 * definition.
1943 */
9637d7df 1944 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
ac5fcde0
NH
1945 return 0;
1946
0858b5eb 1947 return __folio_free_raw_hwp(folio, move_flag);
ac5fcde0
NH
1948}
1949
2ff6cece 1950void folio_clear_hugetlb_hwpoison(struct folio *folio)
161df60e 1951{
2ff6cece 1952 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
161df60e 1953 return;
92a025a7
ML
1954 if (folio_test_hugetlb_vmemmap_optimized(folio))
1955 return;
2ff6cece 1956 folio_clear_hwpoison(folio);
9637d7df 1957 folio_free_raw_hwp(folio, true);
161df60e
NH
1958}
1959
405ce051
NH
1960/*
1961 * Called from hugetlb code with hugetlb_lock held.
1962 *
1963 * Return values:
1964 * 0 - free hugepage
1965 * 1 - in-use hugepage
1966 * 2 - not a hugepage
1967 * -EBUSY - the hugepage is busy (try to retry)
1968 * -EHWPOISON - the hugepage is already hwpoisoned
1969 */
e591ef7d
NH
1970int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
1971 bool *migratable_cleared)
405ce051
NH
1972{
1973 struct page *page = pfn_to_page(pfn);
4c110ec9 1974 struct folio *folio = page_folio(page);
405ce051
NH
1975 int ret = 2; /* fallback to normal page handling */
1976 bool count_increased = false;
1977
4c110ec9 1978 if (!folio_test_hugetlb(folio))
405ce051
NH
1979 goto out;
1980
1981 if (flags & MF_COUNT_INCREASED) {
1982 ret = 1;
1983 count_increased = true;
4c110ec9 1984 } else if (folio_test_hugetlb_freed(folio)) {
b283d983 1985 ret = 0;
4c110ec9
SK
1986 } else if (folio_test_hugetlb_migratable(folio)) {
1987 ret = folio_try_get(folio);
405ce051
NH
1988 if (ret)
1989 count_increased = true;
1990 } else {
1991 ret = -EBUSY;
38f6d293
NH
1992 if (!(flags & MF_NO_RETRY))
1993 goto out;
405ce051
NH
1994 }
1995
595dd818 1996 if (folio_set_hugetlb_hwpoison(folio, page)) {
405ce051
NH
1997 ret = -EHWPOISON;
1998 goto out;
1999 }
2000
e591ef7d 2001 /*
4c110ec9 2002 * Clearing hugetlb_migratable for hwpoisoned hugepages to prevent them
e591ef7d
NH
2003 * from being migrated by memory hotremove.
2004 */
4c110ec9
SK
2005 if (count_increased && folio_test_hugetlb_migratable(folio)) {
2006 folio_clear_hugetlb_migratable(folio);
e591ef7d
NH
2007 *migratable_cleared = true;
2008 }
2009
405ce051
NH
2010 return ret;
2011out:
2012 if (count_increased)
4c110ec9 2013 folio_put(folio);
405ce051
NH
2014 return ret;
2015}
2016
405ce051
NH
2017/*
2018 * Taking refcount of hugetlb pages needs extra care about race conditions
2019 * with basic operations like hugepage allocation/free/demotion.
2020 * So some of prechecks for hwpoison (pinning, and testing/setting
2021 * PageHWPoison) should be done in single hugetlb_lock range.
2022 */
2023static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
0348d2eb 2024{
761ad8d7 2025 int res;
405ce051 2026 struct page *p = pfn_to_page(pfn);
bc1cfde1 2027 struct folio *folio;
761ad8d7 2028 unsigned long page_flags;
e591ef7d 2029 bool migratable_cleared = false;
761ad8d7 2030
405ce051
NH
2031 *hugetlb = 1;
2032retry:
e591ef7d 2033 res = get_huge_page_for_hwpoison(pfn, flags, &migratable_cleared);
405ce051
NH
2034 if (res == 2) { /* fallback to normal page handling */
2035 *hugetlb = 0;
2036 return 0;
2037 } else if (res == -EHWPOISON) {
96f96763 2038 pr_err("%#lx: already hardware poisoned\n", pfn);
405ce051 2039 if (flags & MF_ACTION_REQUIRED) {
bc1cfde1
SK
2040 folio = page_folio(p);
2041 res = kill_accessing_process(current, folio_pfn(folio), flags);
405ce051
NH
2042 }
2043 return res;
2044 } else if (res == -EBUSY) {
38f6d293
NH
2045 if (!(flags & MF_NO_RETRY)) {
2046 flags |= MF_NO_RETRY;
405ce051
NH
2047 goto retry;
2048 }
b66d00df 2049 return action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
761ad8d7
NH
2050 }
2051
bc1cfde1
SK
2052 folio = page_folio(p);
2053 folio_lock(folio);
405ce051
NH
2054
2055 if (hwpoison_filter(p)) {
2ff6cece 2056 folio_clear_hugetlb_hwpoison(folio);
e591ef7d 2057 if (migratable_cleared)
bc1cfde1
SK
2058 folio_set_hugetlb_migratable(folio);
2059 folio_unlock(folio);
f36a5543 2060 if (res == 1)
bc1cfde1 2061 folio_put(folio);
f36a5543 2062 return -EOPNOTSUPP;
405ce051
NH
2063 }
2064
405ce051
NH
2065 /*
2066 * Handling free hugepage. The possible race with hugepage allocation
2067 * or demotion can be prevented by PageHWPoison flag.
2068 */
2069 if (res == 0) {
bc1cfde1 2070 folio_unlock(folio);
ceaf8fbe 2071 if (__page_handle_poison(p) >= 0) {
405ce051
NH
2072 page_ref_inc(p);
2073 res = MF_RECOVERED;
ceaf8fbe
NH
2074 } else {
2075 res = MF_FAILED;
761ad8d7 2076 }
b66d00df 2077 return action_result(pfn, MF_MSG_FREE_HUGE, res);
761ad8d7
NH
2078 }
2079
bc1cfde1 2080 page_flags = folio->flags;
761ad8d7 2081
bc1cfde1
SK
2082 if (!hwpoison_user_mappings(p, pfn, flags, &folio->page)) {
2083 folio_unlock(folio);
b66d00df 2084 return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
761ad8d7
NH
2085 }
2086
ea6d0630 2087 return identify_page_state(pfn, p, page_flags);
761ad8d7 2088}
00cc790e 2089
405ce051
NH
2090#else
2091static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
2092{
2093 return 0;
2094}
00cc790e 2095
9637d7df 2096static inline unsigned long folio_free_raw_hwp(struct folio *folio, bool flag)
ac5fcde0
NH
2097{
2098 return 0;
2099}
00cc790e 2100#endif /* CONFIG_HUGETLB_PAGE */
761ad8d7 2101
b5f1fc98
KW
2102/* Drop the extra refcount in case we come from madvise() */
2103static void put_ref_page(unsigned long pfn, int flags)
2104{
2105 struct page *page;
2106
2107 if (!(flags & MF_COUNT_INCREASED))
2108 return;
2109
2110 page = pfn_to_page(pfn);
2111 if (page)
2112 put_page(page);
2113}
2114
6100e34b
DW
2115static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
2116 struct dev_pagemap *pgmap)
2117{
00cc790e 2118 int rc = -ENXIO;
6100e34b 2119
34dc45be 2120 /* device metadata space is not recoverable */
00cc790e 2121 if (!pgmap_pfn_valid(pgmap, pfn))
34dc45be 2122 goto out;
61e28cf0 2123
6100e34b 2124 /*
33a8f7f2
SR
2125 * Call driver's implementation to handle the memory failure, otherwise
2126 * fall back to generic handler.
6100e34b 2127 */
65d3440e 2128 if (pgmap_has_memory_failure(pgmap)) {
33a8f7f2 2129 rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
6100e34b 2130 /*
33a8f7f2
SR
2131 * Fall back to generic handler too if operation is not
2132 * supported inside the driver/device/filesystem.
6100e34b 2133 */
33a8f7f2
SR
2134 if (rc != -EOPNOTSUPP)
2135 goto out;
6100e34b
DW
2136 }
2137
00cc790e 2138 rc = mf_generic_kill_procs(pfn, flags, pgmap);
6100e34b
DW
2139out:
2140 /* drop pgmap ref acquired in caller */
2141 put_dev_pagemap(pgmap);
80ee7cb2
ML
2142 if (rc != -EOPNOTSUPP)
2143 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
6100e34b
DW
2144 return rc;
2145}
2146
cd42f4a3
TL
2147/**
2148 * memory_failure - Handle memory failure of a page.
2149 * @pfn: Page Number of the corrupted page
cd42f4a3
TL
2150 * @flags: fine tune action taken
2151 *
2152 * This function is called by the low level machine check code
2153 * of an architecture when it detects hardware memory corruption
2154 * of a page. It tries its best to recover, which includes
2155 * dropping pages, killing processes etc.
2156 *
2157 * The function is primarily of use for corruptions that
2158 * happen outside the current execution context (e.g. when
2159 * detected by a background scrubber)
2160 *
2161 * Must run in process context (e.g. a work queue) with interrupts
5885c6a6 2162 * enabled and no spinlocks held.
d1fe111f 2163 *
2164 * Return: 0 for successfully handled the memory error,
9113eaf3 2165 * -EOPNOTSUPP for hwpoison_filter() filtered the error event,
d1fe111f 2166 * < 0(except -EOPNOTSUPP) on failure.
cd42f4a3 2167 */
83b57531 2168int memory_failure(unsigned long pfn, int flags)
6a46079c 2169{
6a46079c 2170 struct page *p;
7af446a8 2171 struct page *hpage;
6100e34b 2172 struct dev_pagemap *pgmap;
171936dd 2173 int res = 0;
524fca1e 2174 unsigned long page_flags;
a8b2c2ce 2175 bool retry = true;
405ce051 2176 int hugetlb = 0;
6a46079c
AK
2177
2178 if (!sysctl_memory_failure_recovery)
83b57531 2179 panic("Memory failure on page %lx", pfn);
6a46079c 2180
03b122da
TL
2181 mutex_lock(&mf_mutex);
2182
67f22ba7 2183 if (!(flags & MF_SW_SIMULATED))
2184 hw_memory_failure = true;
2185
96c804a6
DH
2186 p = pfn_to_online_page(pfn);
2187 if (!p) {
03b122da
TL
2188 res = arch_memory_failure(pfn, flags);
2189 if (res == 0)
2190 goto unlock_mutex;
2191
96c804a6
DH
2192 if (pfn_valid(pfn)) {
2193 pgmap = get_dev_pagemap(pfn, NULL);
d51b6846 2194 put_ref_page(pfn, flags);
03b122da
TL
2195 if (pgmap) {
2196 res = memory_failure_dev_pagemap(pfn, flags,
2197 pgmap);
2198 goto unlock_mutex;
2199 }
96c804a6 2200 }
96f96763 2201 pr_err("%#lx: memory outside kernel control\n", pfn);
03b122da
TL
2202 res = -ENXIO;
2203 goto unlock_mutex;
6a46079c
AK
2204 }
2205
a8b2c2ce 2206try_again:
405ce051
NH
2207 res = try_memory_failure_hugetlb(pfn, flags, &hugetlb);
2208 if (hugetlb)
171936dd 2209 goto unlock_mutex;
171936dd 2210
6a46079c 2211 if (TestSetPageHWPoison(p)) {
96f96763 2212 pr_err("%#lx: already hardware poisoned\n", pfn);
47af12ba 2213 res = -EHWPOISON;
a3f5d80e
NH
2214 if (flags & MF_ACTION_REQUIRED)
2215 res = kill_accessing_process(current, pfn, flags);
f361e246
NH
2216 if (flags & MF_COUNT_INCREASED)
2217 put_page(p);
171936dd 2218 goto unlock_mutex;
6a46079c
AK
2219 }
2220
6a46079c
AK
2221 /*
2222 * We need/can do nothing about count=0 pages.
2223 * 1) it's a free page, and therefore in safe hand:
9cf28191 2224 * check_new_page() will be the gate keeper.
761ad8d7 2225 * 2) it's part of a non-compound high order page.
6a46079c
AK
2226 * Implies some kernel user: cannot stop them from
2227 * R/W the page; let's pray that the page has been
2228 * used and will be freed some time later.
2229 * In fact it's dangerous to directly bump up page count from 0,
1c4c3b99 2230 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch.
6a46079c 2231 */
0ed950d1
NH
2232 if (!(flags & MF_COUNT_INCREASED)) {
2233 res = get_hwpoison_page(p, flags);
2234 if (!res) {
2235 if (is_free_buddy_page(p)) {
2236 if (take_page_off_buddy(p)) {
2237 page_ref_inc(p);
2238 res = MF_RECOVERED;
2239 } else {
2240 /* We lost the race, try again */
2241 if (retry) {
2242 ClearPageHWPoison(p);
0ed950d1
NH
2243 retry = false;
2244 goto try_again;
2245 }
2246 res = MF_FAILED;
a8b2c2ce 2247 }
b66d00df 2248 res = action_result(pfn, MF_MSG_BUDDY, res);
0ed950d1 2249 } else {
b66d00df 2250 res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
a8b2c2ce 2251 }
0ed950d1
NH
2252 goto unlock_mutex;
2253 } else if (res < 0) {
b66d00df 2254 res = action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
0ed950d1 2255 goto unlock_mutex;
8d22ba1b 2256 }
6a46079c
AK
2257 }
2258
a363d122 2259 hpage = compound_head(p);
761ad8d7 2260 if (PageTransHuge(hpage)) {
eac96c3e
YS
2261 /*
2262 * The flag must be set after the refcount is bumped
2263 * otherwise it may race with THP split.
2264 * And the flag can't be set in get_hwpoison_page() since
2265 * it is called by soft offline too and it is just called
5885c6a6 2266 * for !MF_COUNT_INCREASED. So here seems to be the best
eac96c3e
YS
2267 * place.
2268 *
2269 * Don't need care about the above error handling paths for
2270 * get_hwpoison_page() since they handle either free page
2271 * or unhandlable page. The refcount is bumped iff the
2272 * page is a valid handlable page.
2273 */
2274 SetPageHasHWPoisoned(hpage);
2ace36f0 2275 if (try_to_split_thp_page(p) < 0) {
b66d00df 2276 res = action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
171936dd 2277 goto unlock_mutex;
5d1fd5dc 2278 }
415c64c1 2279 VM_BUG_ON_PAGE(!page_count(p), p);
415c64c1
NH
2280 }
2281
e43c3afb
WF
2282 /*
2283 * We ignore non-LRU pages for good reasons.
2284 * - PG_locked is only well defined for LRU pages and a few others
48c935ad 2285 * - to avoid races with __SetPageLocked()
e43c3afb
WF
2286 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
2287 * The check (unnecessarily) ignores LRU pages being isolated and
2288 * walked by the page reclaim code, however that's not a big loss.
2289 */
d0505e9f 2290 shake_page(p);
e43c3afb 2291
761ad8d7 2292 lock_page(p);
847ce401 2293
f37d4298 2294 /*
75ee64b3
ML
2295 * We're only intended to deal with the non-Compound page here.
2296 * However, the page could have changed compound pages due to
2297 * race window. If this happens, we could try again to hopefully
2298 * handle the page next round.
f37d4298 2299 */
75ee64b3
ML
2300 if (PageCompound(p)) {
2301 if (retry) {
e240ac52 2302 ClearPageHWPoison(p);
75ee64b3
ML
2303 unlock_page(p);
2304 put_page(p);
2305 flags &= ~MF_COUNT_INCREASED;
2306 retry = false;
2307 goto try_again;
2308 }
b66d00df 2309 res = action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
171936dd 2310 goto unlock_page;
f37d4298
AK
2311 }
2312
524fca1e
NH
2313 /*
2314 * We use page flags to determine what action should be taken, but
2315 * the flags can be modified by the error containment action. One
2316 * example is an mlocked page, where PG_mlocked is cleared by
2317 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
2318 * correctly, we save a copy of the page flags at this time.
2319 */
7d9d46ac 2320 page_flags = p->flags;
524fca1e 2321
7c116f2b 2322 if (hwpoison_filter(p)) {
2fe62e22 2323 ClearPageHWPoison(p);
761ad8d7 2324 unlock_page(p);
dd6e2402 2325 put_page(p);
d1fe111f 2326 res = -EOPNOTSUPP;
171936dd 2327 goto unlock_mutex;
7c116f2b 2328 }
847ce401 2329
e8675d29 2330 /*
e0650a41 2331 * __munlock_folio() may clear a writeback page's LRU flag without
e8675d29 2332 * page_lock. We need wait writeback completion for this page or it
2333 * may trigger vfs BUG while evict inode.
2334 */
b04d3eeb 2335 if (!PageLRU(p) && !PageWriteback(p))
0bc1f8b0
CY
2336 goto identify_page_state;
2337
6edd6cc6
NH
2338 /*
2339 * It's very difficult to mess with pages currently under IO
2340 * and in many cases impossible, so we just avoid it here.
2341 */
6a46079c
AK
2342 wait_on_page_writeback(p);
2343
2344 /*
2345 * Now take care of user space mappings.
6ffcd825 2346 * Abort on fail: __filemap_remove_folio() assumes unmapped page.
6a46079c 2347 */
ed8c2f49 2348 if (!hwpoison_user_mappings(p, pfn, flags, p)) {
b66d00df 2349 res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
171936dd 2350 goto unlock_page;
1668bfd5 2351 }
6a46079c
AK
2352
2353 /*
2354 * Torn down by someone else?
2355 */
dc2a1cbf 2356 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
b66d00df 2357 res = action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
171936dd 2358 goto unlock_page;
6a46079c
AK
2359 }
2360
0bc1f8b0 2361identify_page_state:
0348d2eb 2362 res = identify_page_state(pfn, p, page_flags);
ea6d0630
NH
2363 mutex_unlock(&mf_mutex);
2364 return res;
171936dd 2365unlock_page:
761ad8d7 2366 unlock_page(p);
171936dd
TL
2367unlock_mutex:
2368 mutex_unlock(&mf_mutex);
6a46079c
AK
2369 return res;
2370}
cd42f4a3 2371EXPORT_SYMBOL_GPL(memory_failure);
847ce401 2372
ea8f5fb8
HY
2373#define MEMORY_FAILURE_FIFO_ORDER 4
2374#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
2375
2376struct memory_failure_entry {
2377 unsigned long pfn;
ea8f5fb8
HY
2378 int flags;
2379};
2380
2381struct memory_failure_cpu {
2382 DECLARE_KFIFO(fifo, struct memory_failure_entry,
2383 MEMORY_FAILURE_FIFO_SIZE);
2384 spinlock_t lock;
2385 struct work_struct work;
2386};
2387
2388static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
2389
2390/**
2391 * memory_failure_queue - Schedule handling memory failure of a page.
2392 * @pfn: Page Number of the corrupted page
ea8f5fb8
HY
2393 * @flags: Flags for memory failure handling
2394 *
2395 * This function is called by the low level hardware error handler
2396 * when it detects hardware memory corruption of a page. It schedules
2397 * the recovering of error page, including dropping pages, killing
2398 * processes etc.
2399 *
2400 * The function is primarily of use for corruptions that
2401 * happen outside the current execution context (e.g. when
2402 * detected by a background scrubber)
2403 *
2404 * Can run in IRQ context.
2405 */
83b57531 2406void memory_failure_queue(unsigned long pfn, int flags)
ea8f5fb8
HY
2407{
2408 struct memory_failure_cpu *mf_cpu;
2409 unsigned long proc_flags;
2410 struct memory_failure_entry entry = {
2411 .pfn = pfn,
ea8f5fb8
HY
2412 .flags = flags,
2413 };
2414
2415 mf_cpu = &get_cpu_var(memory_failure_cpu);
2416 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
498d319b 2417 if (kfifo_put(&mf_cpu->fifo, entry))
ea8f5fb8
HY
2418 schedule_work_on(smp_processor_id(), &mf_cpu->work);
2419 else
96f96763 2420 pr_err("buffer overflow when queuing memory failure at %#lx\n",
ea8f5fb8
HY
2421 pfn);
2422 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
2423 put_cpu_var(memory_failure_cpu);
2424}
2425EXPORT_SYMBOL_GPL(memory_failure_queue);
2426
2427static void memory_failure_work_func(struct work_struct *work)
2428{
2429 struct memory_failure_cpu *mf_cpu;
2430 struct memory_failure_entry entry = { 0, };
2431 unsigned long proc_flags;
2432 int gotten;
2433
06202231 2434 mf_cpu = container_of(work, struct memory_failure_cpu, work);
ea8f5fb8
HY
2435 for (;;) {
2436 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
2437 gotten = kfifo_get(&mf_cpu->fifo, &entry);
2438 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
2439 if (!gotten)
2440 break;
cf870c70 2441 if (entry.flags & MF_SOFT_OFFLINE)
feec24a6 2442 soft_offline_page(entry.pfn, entry.flags);
cf870c70 2443 else
83b57531 2444 memory_failure(entry.pfn, entry.flags);
ea8f5fb8
HY
2445 }
2446}
2447
06202231
JM
2448/*
2449 * Process memory_failure work queued on the specified CPU.
2450 * Used to avoid return-to-userspace racing with the memory_failure workqueue.
2451 */
2452void memory_failure_queue_kick(int cpu)
2453{
2454 struct memory_failure_cpu *mf_cpu;
2455
2456 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
2457 cancel_work_sync(&mf_cpu->work);
2458 memory_failure_work_func(&mf_cpu->work);
2459}
2460
ea8f5fb8
HY
2461static int __init memory_failure_init(void)
2462{
2463 struct memory_failure_cpu *mf_cpu;
2464 int cpu;
2465
2466 for_each_possible_cpu(cpu) {
2467 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
2468 spin_lock_init(&mf_cpu->lock);
2469 INIT_KFIFO(mf_cpu->fifo);
2470 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
2471 }
2472
97de10a9
KW
2473 register_sysctl_init("vm", memory_failure_table);
2474
ea8f5fb8
HY
2475 return 0;
2476}
2477core_initcall(memory_failure_init);
2478
96f96763
KW
2479#undef pr_fmt
2480#define pr_fmt(fmt) "" fmt
a5f65109
NH
2481#define unpoison_pr_info(fmt, pfn, rs) \
2482({ \
2483 if (__ratelimit(rs)) \
2484 pr_info(fmt, pfn); \
2485})
2486
847ce401
WF
2487/**
2488 * unpoison_memory - Unpoison a previously poisoned page
2489 * @pfn: Page number of the to be unpoisoned page
2490 *
2491 * Software-unpoison a page that has been poisoned by
2492 * memory_failure() earlier.
2493 *
2494 * This is only done on the software-level, so it only works
2495 * for linux injected failures, not real hardware failures
2496 *
2497 * Returns 0 for success, otherwise -errno.
2498 */
2499int unpoison_memory(unsigned long pfn)
2500{
9637d7df 2501 struct folio *folio;
847ce401 2502 struct page *p;
f29623e4 2503 int ret = -EBUSY, ghp;
ac5fcde0 2504 unsigned long count = 1;
5033091d 2505 bool huge = false;
a5f65109
NH
2506 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
2507 DEFAULT_RATELIMIT_BURST);
847ce401
WF
2508
2509 if (!pfn_valid(pfn))
2510 return -ENXIO;
2511
2512 p = pfn_to_page(pfn);
9637d7df 2513 folio = page_folio(p);
847ce401 2514
91d00547
NH
2515 mutex_lock(&mf_mutex);
2516
67f22ba7 2517 if (hw_memory_failure) {
2518 unpoison_pr_info("Unpoison: Disabled after HW memory failure %#lx\n",
2519 pfn, &unpoison_rs);
2520 ret = -EOPNOTSUPP;
2521 goto unlock_mutex;
2522 }
2523
6c54312f 2524 if (!PageHWPoison(p)) {
495367c0 2525 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
a5f65109 2526 pfn, &unpoison_rs);
91d00547 2527 goto unlock_mutex;
847ce401
WF
2528 }
2529
a6fddef4 2530 if (folio_ref_count(folio) > 1) {
495367c0 2531 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
a5f65109 2532 pfn, &unpoison_rs);
91d00547 2533 goto unlock_mutex;
230ac719
NH
2534 }
2535
7a8817f2
ML
2536 if (folio_test_slab(folio) || PageTable(&folio->page) ||
2537 folio_test_reserved(folio) || PageOffline(&folio->page))
faeb2ff2
ML
2538 goto unlock_mutex;
2539
2540 /*
2541 * Note that folio->_mapcount is overloaded in SLAB, so the simple test
2542 * in folio_mapped() has to be done after folio_test_slab() is checked.
2543 */
a6fddef4 2544 if (folio_mapped(folio)) {
495367c0 2545 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
a5f65109 2546 pfn, &unpoison_rs);
91d00547 2547 goto unlock_mutex;
230ac719
NH
2548 }
2549
a6fddef4 2550 if (folio_mapping(folio)) {
495367c0 2551 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
a5f65109 2552 pfn, &unpoison_rs);
91d00547 2553 goto unlock_mutex;
0cea3fdc
WL
2554 }
2555
f29623e4
ML
2556 ghp = get_hwpoison_page(p, MF_UNPOISON);
2557 if (!ghp) {
ac5fcde0 2558 if (PageHuge(p)) {
5033091d 2559 huge = true;
9637d7df 2560 count = folio_free_raw_hwp(folio, false);
f29623e4 2561 if (count == 0)
ac5fcde0 2562 goto unlock_mutex;
ac5fcde0 2563 }
a6fddef4 2564 ret = folio_test_clear_hwpoison(folio) ? 0 : -EBUSY;
f29623e4
ML
2565 } else if (ghp < 0) {
2566 if (ghp == -EHWPOISON) {
c8bd84f7 2567 ret = put_page_back_buddy(p) ? 0 : -EBUSY;
f29623e4
ML
2568 } else {
2569 ret = ghp;
bf181c58
NH
2570 unpoison_pr_info("Unpoison: failed to grab page %#lx\n",
2571 pfn, &unpoison_rs);
f29623e4 2572 }
bf181c58 2573 } else {
ac5fcde0 2574 if (PageHuge(p)) {
5033091d 2575 huge = true;
9637d7df 2576 count = folio_free_raw_hwp(folio, false);
ac5fcde0 2577 if (count == 0) {
a6fddef4 2578 folio_put(folio);
ac5fcde0
NH
2579 goto unlock_mutex;
2580 }
2581 }
847ce401 2582
a6fddef4 2583 folio_put(folio);
e0ff4280 2584 if (TestClearPageHWPoison(p)) {
a6fddef4 2585 folio_put(folio);
bf181c58
NH
2586 ret = 0;
2587 }
2588 }
847ce401 2589
91d00547
NH
2590unlock_mutex:
2591 mutex_unlock(&mf_mutex);
e0ff4280 2592 if (!ret) {
5033091d
NH
2593 if (!huge)
2594 num_poisoned_pages_sub(pfn, 1);
c8bd84f7 2595 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
2596 page_to_pfn(p), &unpoison_rs);
2597 }
91d00547 2598 return ret;
847ce401
WF
2599}
2600EXPORT_SYMBOL(unpoison_memory);
facb6011 2601
6b9a217e 2602static bool isolate_page(struct page *page, struct list_head *pagelist)
d950b958 2603{
6b9a217e 2604 bool isolated = false;
d950b958 2605
6b9a217e 2606 if (PageHuge(page)) {
9747b9e9 2607 isolated = isolate_hugetlb(page_folio(page), pagelist);
6b9a217e 2608 } else {
da294991
ML
2609 bool lru = !__PageMovable(page);
2610
6b9a217e 2611 if (lru)
f7f9c00d 2612 isolated = isolate_lru_page(page);
6b9a217e 2613 else
cd775580
BW
2614 isolated = isolate_movable_page(page,
2615 ISOLATE_UNEVICTABLE);
6b9a217e 2616
da294991 2617 if (isolated) {
6b9a217e 2618 list_add(&page->lru, pagelist);
da294991
ML
2619 if (lru)
2620 inc_node_page_state(page, NR_ISOLATED_ANON +
2621 page_is_file_lru(page));
2622 }
0ebff32c 2623 }
d950b958 2624
03613808 2625 /*
6b9a217e 2626 * If we succeed to isolate the page, we grabbed another refcount on
5885c6a6 2627 * the page, so we can safely drop the one we got from get_any_page().
6b9a217e
OS
2628 * If we failed to isolate the page, it means that we cannot go further
2629 * and we will return an error, so drop the reference we got from
5885c6a6 2630 * get_any_page() as well.
03613808 2631 */
6b9a217e
OS
2632 put_page(page);
2633 return isolated;
d950b958
NH
2634}
2635
6b9a217e 2636/*
48309e1f 2637 * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages.
6b9a217e
OS
2638 * If the page is a non-dirty unmapped page-cache page, it simply invalidates.
2639 * If the page is mapped, it migrates the contents over.
2640 */
48309e1f 2641static int soft_offline_in_use_page(struct page *page)
af8fae7c 2642{
d6c75dc2 2643 long ret = 0;
af8fae7c 2644 unsigned long pfn = page_to_pfn(page);
6b9a217e
OS
2645 struct page *hpage = compound_head(page);
2646 char const *msg_page[] = {"page", "hugepage"};
2647 bool huge = PageHuge(page);
2648 LIST_HEAD(pagelist);
54608759
JK
2649 struct migration_target_control mtc = {
2650 .nid = NUMA_NO_NODE,
2651 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
2652 };
facb6011 2653
48309e1f
KW
2654 if (!huge && PageTransHuge(hpage)) {
2655 if (try_to_split_thp_page(page)) {
2656 pr_info("soft offline: %#lx: thp split failed\n", pfn);
2657 return -EBUSY;
2658 }
2659 hpage = page;
2660 }
2661
0ebff32c 2662 lock_page(page);
55c7ac45 2663 if (!huge)
6b9a217e 2664 wait_on_page_writeback(page);
af8fae7c
NH
2665 if (PageHWPoison(page)) {
2666 unlock_page(page);
dd6e2402 2667 put_page(page);
af8fae7c 2668 pr_info("soft offline: %#lx page already poisoned\n", pfn);
5a2ffca3 2669 return 0;
af8fae7c 2670 }
6b9a217e 2671
55c7ac45 2672 if (!huge && PageLRU(page) && !PageSwapCache(page))
6b9a217e
OS
2673 /*
2674 * Try to invalidate first. This should work for
2675 * non dirty unmapped page cache pages.
2676 */
2677 ret = invalidate_inode_page(page);
facb6011 2678 unlock_page(page);
6b9a217e 2679
6b9a217e 2680 if (ret) {
fb46e735 2681 pr_info("soft_offline: %#lx: invalidated\n", pfn);
6b9a217e 2682 page_handle_poison(page, false, true);
af8fae7c 2683 return 0;
facb6011
AK
2684 }
2685
6b9a217e 2686 if (isolate_page(hpage, &pagelist)) {
54608759 2687 ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
5ac95884 2688 (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
79f5f8fa 2689 if (!ret) {
6b9a217e
OS
2690 bool release = !huge;
2691
2692 if (!page_handle_poison(page, huge, release))
2693 ret = -EBUSY;
79f5f8fa 2694 } else {
85fbe5d1
YX
2695 if (!list_empty(&pagelist))
2696 putback_movable_pages(&pagelist);
59c82b70 2697
d6c75dc2 2698 pr_info("soft offline: %#lx: %s migration failed %ld, type %pGp\n",
23efd080 2699 pfn, msg_page[huge], ret, &page->flags);
facb6011 2700 if (ret > 0)
3f4b815a 2701 ret = -EBUSY;
facb6011
AK
2702 }
2703 } else {
23efd080
MWO
2704 pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n",
2705 pfn, msg_page[huge], page_count(page), &page->flags);
6b9a217e 2706 ret = -EBUSY;
facb6011 2707 }
facb6011
AK
2708 return ret;
2709}
86e05773
WL
2710
2711/**
2712 * soft_offline_page - Soft offline a page.
feec24a6 2713 * @pfn: pfn to soft-offline
86e05773
WL
2714 * @flags: flags. Same as memory_failure().
2715 *
9113eaf3 2716 * Returns 0 on success
2717 * -EOPNOTSUPP for hwpoison_filter() filtered the error event
2718 * < 0 otherwise negated errno.
86e05773
WL
2719 *
2720 * Soft offline a page, by migration or invalidation,
2721 * without killing anything. This is for the case when
2722 * a page is not corrupted yet (so it's still valid to access),
2723 * but has had a number of corrected errors and is better taken
2724 * out.
2725 *
2726 * The actual policy on when to do that is maintained by
2727 * user space.
2728 *
2729 * This should never impact any application or cause data loss,
2730 * however it might take some time.
2731 *
2732 * This is not a 100% solution for all memory, but tries to be
2733 * ``good enough'' for the majority of memory.
2734 */
feec24a6 2735int soft_offline_page(unsigned long pfn, int flags)
86e05773
WL
2736{
2737 int ret;
b94e0282 2738 bool try_again = true;
b5f1fc98 2739 struct page *page;
dad4e5b3 2740
183a7c5d
KW
2741 if (!pfn_valid(pfn)) {
2742 WARN_ON_ONCE(flags & MF_COUNT_INCREASED);
feec24a6 2743 return -ENXIO;
183a7c5d 2744 }
dad4e5b3 2745
feec24a6
NH
2746 /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
2747 page = pfn_to_online_page(pfn);
dad4e5b3 2748 if (!page) {
b5f1fc98 2749 put_ref_page(pfn, flags);
86a66810 2750 return -EIO;
dad4e5b3 2751 }
86a66810 2752
91d00547
NH
2753 mutex_lock(&mf_mutex);
2754
86e05773 2755 if (PageHWPoison(page)) {
8295d535 2756 pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
b5f1fc98 2757 put_ref_page(pfn, flags);
91d00547 2758 mutex_unlock(&mf_mutex);
5a2ffca3 2759 return 0;
86e05773 2760 }
86e05773 2761
b94e0282 2762retry:
bfc8c901 2763 get_online_mems();
bf6445bc 2764 ret = get_hwpoison_page(page, flags | MF_SOFT_OFFLINE);
bfc8c901 2765 put_online_mems();
4e41a30c 2766
9113eaf3 2767 if (hwpoison_filter(page)) {
2768 if (ret > 0)
2769 put_page(page);
9113eaf3 2770
2771 mutex_unlock(&mf_mutex);
2772 return -EOPNOTSUPP;
2773 }
2774
8295d535 2775 if (ret > 0) {
6b9a217e 2776 ret = soft_offline_in_use_page(page);
8295d535 2777 } else if (ret == 0) {
e2c1ab07
ML
2778 if (!page_handle_poison(page, true, false)) {
2779 if (try_again) {
2780 try_again = false;
2781 flags &= ~MF_COUNT_INCREASED;
2782 goto retry;
2783 }
2784 ret = -EBUSY;
b94e0282 2785 }
8295d535 2786 }
4e41a30c 2787
91d00547
NH
2788 mutex_unlock(&mf_mutex);
2789
86e05773
WL
2790 return ret;
2791}