1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2008, 2009 Intel Corporation
4 * Authors: Andi Kleen, Fengguang Wu
6 * High level machine check handler. Handles pages reported by the
7 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
10 * In addition there is a "soft offline" entry point that allows stop using
11 * not-yet-corrupted-by-suspicious pages without killing anything.
13 * Handles page cache pages in various states. The tricky part
14 * here is that we can access any page asynchronously in respect to
15 * other VM users, because memory failures could happen anytime and
16 * anywhere. This could violate some of their assumptions. This is why
17 * this code has to be extremely careful. Generally it tries to use
18 * normal locking rules, as in get the standard locks, even if that means
19 * the error handling takes potentially a long time.
21 * It can be very tempting to add handling for obscure cases here.
22 * In general any code for handling new cases should only be added iff:
23 * - You know how to test it.
24 * - You have a test that can be added to mce-test
25 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
26 * - The case actually shows up as a frequent (top 10) page state in
27 * tools/vm/page-types when running a real workload.
29 * There are several operations here with exponential complexity because
30 * of unsuitable VM data structures. For example the operation to map back
31 * from RMAP chains to processes has to walk the complete process list and
32 * has non linear complexity with the number. But since memory corruptions
33 * are rare we hope to get away with this. This avoids impacting the core
37 #define pr_fmt(fmt) "Memory failure: " fmt
39 #include <linux/kernel.h>
41 #include <linux/page-flags.h>
42 #include <linux/kernel-page-flags.h>
43 #include <linux/sched/signal.h>
44 #include <linux/sched/task.h>
45 #include <linux/dax.h>
46 #include <linux/ksm.h>
47 #include <linux/rmap.h>
48 #include <linux/export.h>
49 #include <linux/pagemap.h>
50 #include <linux/swap.h>
51 #include <linux/backing-dev.h>
52 #include <linux/migrate.h>
53 #include <linux/suspend.h>
54 #include <linux/slab.h>
55 #include <linux/swapops.h>
56 #include <linux/hugetlb.h>
57 #include <linux/memory_hotplug.h>
58 #include <linux/mm_inline.h>
59 #include <linux/memremap.h>
60 #include <linux/kfifo.h>
61 #include <linux/ratelimit.h>
62 #include <linux/page-isolation.h>
63 #include <linux/pagewalk.h>
64 #include <linux/shmem_fs.h>
67 #include "ras/ras_event.h"
69 int sysctl_memory_failure_early_kill __read_mostly = 0;
71 int sysctl_memory_failure_recovery __read_mostly = 1;
73 atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
75 static bool hw_memory_failure __read_mostly = false;
77 static bool __page_handle_poison(struct page *page)
81 zone_pcp_disable(page_zone(page));
82 ret = dissolve_free_huge_page(page);
84 ret = take_page_off_buddy(page);
85 zone_pcp_enable(page_zone(page));
90 static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release)
92 if (hugepage_or_freepage) {
94 * Doing this check for free pages is also fine since dissolve_free_huge_page
95 * returns 0 for non-hugetlb pages as well.
97 if (!__page_handle_poison(page))
99 * We could fail to take off the target page from buddy
100 * for example due to racy page allocation, but that's
101 * acceptable because soft-offlined page is not broken
102 * and if someone really want to use it, they should
108 SetPageHWPoison(page);
112 num_poisoned_pages_inc();
117 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
119 u32 hwpoison_filter_enable = 0;
120 u32 hwpoison_filter_dev_major = ~0U;
121 u32 hwpoison_filter_dev_minor = ~0U;
122 u64 hwpoison_filter_flags_mask;
123 u64 hwpoison_filter_flags_value;
124 EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
125 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
126 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
127 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
128 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
130 static int hwpoison_filter_dev(struct page *p)
132 struct address_space *mapping;
135 if (hwpoison_filter_dev_major == ~0U &&
136 hwpoison_filter_dev_minor == ~0U)
139 mapping = page_mapping(p);
140 if (mapping == NULL || mapping->host == NULL)
143 dev = mapping->host->i_sb->s_dev;
144 if (hwpoison_filter_dev_major != ~0U &&
145 hwpoison_filter_dev_major != MAJOR(dev))
147 if (hwpoison_filter_dev_minor != ~0U &&
148 hwpoison_filter_dev_minor != MINOR(dev))
154 static int hwpoison_filter_flags(struct page *p)
156 if (!hwpoison_filter_flags_mask)
159 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
160 hwpoison_filter_flags_value)
167 * This allows stress tests to limit test scope to a collection of tasks
168 * by putting them under some memcg. This prevents killing unrelated/important
169 * processes such as /sbin/init. Note that the target task may share clean
170 * pages with init (eg. libc text), which is harmless. If the target task
171 * share _dirty_ pages with another task B, the test scheme must make sure B
172 * is also included in the memcg. At last, due to race conditions this filter
173 * can only guarantee that the page either belongs to the memcg tasks, or is
177 u64 hwpoison_filter_memcg;
178 EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
179 static int hwpoison_filter_task(struct page *p)
181 if (!hwpoison_filter_memcg)
184 if (page_cgroup_ino(p) != hwpoison_filter_memcg)
190 static int hwpoison_filter_task(struct page *p) { return 0; }
193 int hwpoison_filter(struct page *p)
195 if (!hwpoison_filter_enable)
198 if (hwpoison_filter_dev(p))
201 if (hwpoison_filter_flags(p))
204 if (hwpoison_filter_task(p))
210 int hwpoison_filter(struct page *p)
216 EXPORT_SYMBOL_GPL(hwpoison_filter);
219 * Kill all processes that have a poisoned page mapped and then isolate
223 * Find all processes having the page mapped and kill them.
224 * But we keep a page reference around so that the page is not
225 * actually freed yet.
226 * Then stash the page away
228 * There's no convenient way to get back to mapped processes
229 * from the VMAs. So do a brute-force search over all
232 * Remember that machine checks are not common (or rather
233 * if they are common you have other problems), so this shouldn't
234 * be a performance issue.
236 * Also there are some races possible while we get from the
237 * error detection to actually handle it.
242 struct task_struct *tsk;
248 * Send all the processes who have the page mapped a signal.
249 * ``action optional'' if they are not immediately affected by the error
250 * ``action required'' if error happened in current execution context
252 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
254 struct task_struct *t = tk->tsk;
255 short addr_lsb = tk->size_shift;
258 pr_err("%#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
259 pfn, t->comm, t->pid);
261 if ((flags & MF_ACTION_REQUIRED) && (t == current))
262 ret = force_sig_mceerr(BUS_MCEERR_AR,
263 (void __user *)tk->addr, addr_lsb);
266 * Signal other processes sharing the page if they have
268 * Don't use force here, it's convenient if the signal
269 * can be temporarily blocked.
270 * This could cause a loop when the user sets SIGBUS
271 * to SIG_IGN, but hopefully no one will do that?
273 ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
274 addr_lsb, t); /* synchronous? */
276 pr_info("Error sending signal to %s:%d: %d\n",
277 t->comm, t->pid, ret);
282 * Unknown page type encountered. Try to check whether it can turn PageLRU by
285 void shake_page(struct page *p)
292 if (PageLRU(p) || is_free_buddy_page(p))
297 * TODO: Could shrink slab caches here if a lightweight range-based
298 * shrinker will be available.
301 EXPORT_SYMBOL_GPL(shake_page);
303 static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
304 unsigned long address)
306 unsigned long ret = 0;
313 VM_BUG_ON_VMA(address == -EFAULT, vma);
314 pgd = pgd_offset(vma->vm_mm, address);
315 if (!pgd_present(*pgd))
317 p4d = p4d_offset(pgd, address);
318 if (!p4d_present(*p4d))
320 pud = pud_offset(p4d, address);
321 if (!pud_present(*pud))
323 if (pud_devmap(*pud))
325 pmd = pmd_offset(pud, address);
326 if (!pmd_present(*pmd))
328 if (pmd_devmap(*pmd))
330 pte = pte_offset_map(pmd, address);
331 if (pte_present(*pte) && pte_devmap(*pte))
338 * Failure handling: if we can't find or can't kill a process there's
339 * not much we can do. We just print a message and ignore otherwise.
343 * Schedule a process for later kill.
344 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
346 * Notice: @fsdax_pgoff is used only when @p is a fsdax page.
347 * In other cases, such as anonymous and file-backend page, the address to be
348 * killed can be caculated by @p itself.
350 static void add_to_kill(struct task_struct *tsk, struct page *p,
351 pgoff_t fsdax_pgoff, struct vm_area_struct *vma,
352 struct list_head *to_kill)
356 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
358 pr_err("Out of memory while machine check handling\n");
362 tk->addr = page_address_in_vma(p, vma);
363 if (is_zone_device_page(p)) {
365 * Since page->mapping is not used for fsdax, we need
366 * calculate the address based on the vma.
368 if (p->pgmap->type == MEMORY_DEVICE_FS_DAX)
369 tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
370 tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
372 tk->size_shift = page_shift(compound_head(p));
375 * Send SIGKILL if "tk->addr == -EFAULT". Also, as
376 * "tk->size_shift" is always non-zero for !is_zone_device_page(),
377 * so "tk->size_shift == 0" effectively checks no mapping on
378 * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
379 * to a process' address space, it's possible not all N VMAs
380 * contain mappings for the page, but at least one VMA does.
381 * Only deliver SIGBUS with payload derived from the VMA that
382 * has a mapping for the page.
384 if (tk->addr == -EFAULT) {
385 pr_info("Unable to find user space address %lx in %s\n",
386 page_to_pfn(p), tsk->comm);
387 } else if (tk->size_shift == 0) {
392 get_task_struct(tsk);
394 list_add_tail(&tk->nd, to_kill);
398 * Kill the processes that have been collected earlier.
400 * Only do anything when FORCEKILL is set, otherwise just free the
401 * list (this is used for clean pages which do not need killing)
402 * Also when FAIL is set do a force kill because something went
405 static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
406 unsigned long pfn, int flags)
408 struct to_kill *tk, *next;
410 list_for_each_entry_safe (tk, next, to_kill, nd) {
413 * In case something went wrong with munmapping
414 * make sure the process doesn't catch the
415 * signal and then access the memory. Just kill it.
417 if (fail || tk->addr == -EFAULT) {
418 pr_err("%#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
419 pfn, tk->tsk->comm, tk->tsk->pid);
420 do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
421 tk->tsk, PIDTYPE_PID);
425 * In theory the process could have mapped
426 * something else on the address in-between. We could
427 * check for that, but we need to tell the
430 else if (kill_proc(tk, pfn, flags) < 0)
431 pr_err("%#lx: Cannot send advisory machine check signal to %s:%d\n",
432 pfn, tk->tsk->comm, tk->tsk->pid);
434 put_task_struct(tk->tsk);
440 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
441 * on behalf of the thread group. Return task_struct of the (first found)
442 * dedicated thread if found, and return NULL otherwise.
444 * We already hold read_lock(&tasklist_lock) in the caller, so we don't
445 * have to call rcu_read_lock/unlock() in this function.
447 static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
449 struct task_struct *t;
451 for_each_thread(tsk, t) {
452 if (t->flags & PF_MCE_PROCESS) {
453 if (t->flags & PF_MCE_EARLY)
456 if (sysctl_memory_failure_early_kill)
464 * Determine whether a given process is "early kill" process which expects
465 * to be signaled when some page under the process is hwpoisoned.
466 * Return task_struct of the dedicated thread (main thread unless explicitly
467 * specified) if the process is "early kill" and otherwise returns NULL.
469 * Note that the above is true for Action Optional case. For Action Required
470 * case, it's only meaningful to the current thread which need to be signaled
471 * with SIGBUS, this error is Action Optional for other non current
472 * processes sharing the same error page,if the process is "early kill", the
473 * task_struct of the dedicated thread will also be returned.
475 static struct task_struct *task_early_kill(struct task_struct *tsk,
481 * Comparing ->mm here because current task might represent
482 * a subthread, while tsk always points to the main thread.
484 if (force_early && tsk->mm == current->mm)
487 return find_early_kill_thread(tsk);
491 * Collect processes when the error hit an anonymous page.
493 static void collect_procs_anon(struct page *page, struct list_head *to_kill,
496 struct folio *folio = page_folio(page);
497 struct vm_area_struct *vma;
498 struct task_struct *tsk;
502 av = folio_lock_anon_vma_read(folio, NULL);
503 if (av == NULL) /* Not actually mapped anymore */
506 pgoff = page_to_pgoff(page);
507 read_lock(&tasklist_lock);
508 for_each_process (tsk) {
509 struct anon_vma_chain *vmac;
510 struct task_struct *t = task_early_kill(tsk, force_early);
514 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
517 if (!page_mapped_in_vma(page, vma))
519 if (vma->vm_mm == t->mm)
520 add_to_kill(t, page, 0, vma, to_kill);
523 read_unlock(&tasklist_lock);
524 page_unlock_anon_vma_read(av);
528 * Collect processes when the error hit a file mapped page.
530 static void collect_procs_file(struct page *page, struct list_head *to_kill,
533 struct vm_area_struct *vma;
534 struct task_struct *tsk;
535 struct address_space *mapping = page->mapping;
538 i_mmap_lock_read(mapping);
539 read_lock(&tasklist_lock);
540 pgoff = page_to_pgoff(page);
541 for_each_process(tsk) {
542 struct task_struct *t = task_early_kill(tsk, force_early);
546 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
549 * Send early kill signal to tasks where a vma covers
550 * the page but the corrupted page is not necessarily
551 * mapped it in its pte.
552 * Assume applications who requested early kill want
553 * to be informed of all such data corruptions.
555 if (vma->vm_mm == t->mm)
556 add_to_kill(t, page, 0, vma, to_kill);
559 read_unlock(&tasklist_lock);
560 i_mmap_unlock_read(mapping);
565 * Collect processes when the error hit a fsdax page.
567 static void collect_procs_fsdax(struct page *page,
568 struct address_space *mapping, pgoff_t pgoff,
569 struct list_head *to_kill)
571 struct vm_area_struct *vma;
572 struct task_struct *tsk;
574 i_mmap_lock_read(mapping);
575 read_lock(&tasklist_lock);
576 for_each_process(tsk) {
577 struct task_struct *t = task_early_kill(tsk, true);
581 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
582 if (vma->vm_mm == t->mm)
583 add_to_kill(t, page, pgoff, vma, to_kill);
586 read_unlock(&tasklist_lock);
587 i_mmap_unlock_read(mapping);
589 #endif /* CONFIG_FS_DAX */
592 * Collect the processes who have the corrupted page mapped to kill.
594 static void collect_procs(struct page *page, struct list_head *tokill,
601 collect_procs_anon(page, tokill, force_early);
603 collect_procs_file(page, tokill, force_early);
612 static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift)
615 tk->size_shift = shift;
618 static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
619 unsigned long poisoned_pfn, struct to_kill *tk)
621 unsigned long pfn = 0;
623 if (pte_present(pte)) {
626 swp_entry_t swp = pte_to_swp_entry(pte);
628 if (is_hwpoison_entry(swp))
629 pfn = hwpoison_entry_to_pfn(swp);
632 if (!pfn || pfn != poisoned_pfn)
635 set_to_kill(tk, addr, shift);
639 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
640 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
641 struct hwp_walk *hwp)
645 unsigned long hwpoison_vaddr;
647 if (!pmd_present(pmd))
650 if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) {
651 hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT);
652 set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT);
658 static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
659 struct hwp_walk *hwp)
665 static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
666 unsigned long end, struct mm_walk *walk)
668 struct hwp_walk *hwp = walk->private;
670 pte_t *ptep, *mapped_pte;
673 ptl = pmd_trans_huge_lock(pmdp, walk->vma);
675 ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp);
680 if (pmd_trans_unstable(pmdp))
683 mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp,
685 for (; addr != end; ptep++, addr += PAGE_SIZE) {
686 ret = check_hwpoisoned_entry(*ptep, addr, PAGE_SHIFT,
691 pte_unmap_unlock(mapped_pte, ptl);
697 #ifdef CONFIG_HUGETLB_PAGE
698 static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
699 unsigned long addr, unsigned long end,
700 struct mm_walk *walk)
702 struct hwp_walk *hwp = walk->private;
703 pte_t pte = huge_ptep_get(ptep);
704 struct hstate *h = hstate_vma(walk->vma);
706 return check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
710 #define hwpoison_hugetlb_range NULL
713 static const struct mm_walk_ops hwp_walk_ops = {
714 .pmd_entry = hwpoison_pte_range,
715 .hugetlb_entry = hwpoison_hugetlb_range,
719 * Sends SIGBUS to the current process with error info.
721 * This function is intended to handle "Action Required" MCEs on already
722 * hardware poisoned pages. They could happen, for example, when
723 * memory_failure() failed to unmap the error page at the first call, or
724 * when multiple local machine checks happened on different CPUs.
726 * MCE handler currently has no easy access to the error virtual address,
727 * so this function walks page table to find it. The returned virtual address
728 * is proper in most cases, but it could be wrong when the application
729 * process has multiple entries mapping the error page.
731 static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
735 struct hwp_walk priv = {
740 mmap_read_lock(p->mm);
741 ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops,
743 if (ret == 1 && priv.tk.addr)
744 kill_proc(&priv.tk, pfn, flags);
747 mmap_read_unlock(p->mm);
748 return ret > 0 ? -EHWPOISON : -EFAULT;
751 static const char *action_name[] = {
752 [MF_IGNORED] = "Ignored",
753 [MF_FAILED] = "Failed",
754 [MF_DELAYED] = "Delayed",
755 [MF_RECOVERED] = "Recovered",
758 static const char * const action_page_types[] = {
759 [MF_MSG_KERNEL] = "reserved kernel page",
760 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
761 [MF_MSG_SLAB] = "kernel slab page",
762 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
763 [MF_MSG_HUGE] = "huge page",
764 [MF_MSG_FREE_HUGE] = "free huge page",
765 [MF_MSG_NON_PMD_HUGE] = "non-pmd-sized huge page",
766 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
767 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
768 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
769 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
770 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
771 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
772 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
773 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
774 [MF_MSG_CLEAN_LRU] = "clean LRU page",
775 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
776 [MF_MSG_BUDDY] = "free buddy page",
777 [MF_MSG_DAX] = "dax page",
778 [MF_MSG_UNSPLIT_THP] = "unsplit thp",
779 [MF_MSG_UNKNOWN] = "unknown page",
783 * XXX: It is possible that a page is isolated from LRU cache,
784 * and then kept in swap cache or failed to remove from page cache.
785 * The page count will stop it from being freed by unpoison.
786 * Stress tests should be aware of this memory leak problem.
788 static int delete_from_lru_cache(struct page *p)
790 if (!isolate_lru_page(p)) {
792 * Clear sensible page flags, so that the buddy system won't
793 * complain when the page is unpoison-and-freed.
796 ClearPageUnevictable(p);
799 * Poisoned page might never drop its ref count to 0 so we have
800 * to uncharge it manually from its memcg.
802 mem_cgroup_uncharge(page_folio(p));
805 * drop the page count elevated by isolate_lru_page()
813 static int truncate_error_page(struct page *p, unsigned long pfn,
814 struct address_space *mapping)
818 if (mapping->a_ops->error_remove_page) {
819 int err = mapping->a_ops->error_remove_page(mapping, p);
822 pr_info("%#lx: Failed to punch page: %d\n", pfn, err);
823 } else if (page_has_private(p) &&
824 !try_to_release_page(p, GFP_NOIO)) {
825 pr_info("%#lx: failed to release buffers\n", pfn);
831 * If the file system doesn't support it just invalidate
832 * This fails on dirty or anything with private pages
834 if (invalidate_inode_page(p))
837 pr_info("%#lx: Failed to invalidate\n", pfn);
846 enum mf_action_page_type type;
848 /* Callback ->action() has to unlock the relevant page inside it. */
849 int (*action)(struct page_state *ps, struct page *p);
853 * Return true if page is still referenced by others, otherwise return
856 * The extra_pins is true when one extra refcount is expected.
858 static bool has_extra_refcount(struct page_state *ps, struct page *p,
861 int count = page_count(p) - 1;
867 pr_err("%#lx: %s still referenced by %d users\n",
868 page_to_pfn(p), action_page_types[ps->type], count);
876 * Error hit kernel page.
877 * Do nothing, try to be lucky and not touch this instead. For a few cases we
878 * could be more sophisticated.
880 static int me_kernel(struct page_state *ps, struct page *p)
887 * Page in unknown state. Do nothing.
889 static int me_unknown(struct page_state *ps, struct page *p)
891 pr_err("%#lx: Unknown page state\n", page_to_pfn(p));
897 * Clean (or cleaned) page cache page.
899 static int me_pagecache_clean(struct page_state *ps, struct page *p)
902 struct address_space *mapping;
905 delete_from_lru_cache(p);
908 * For anonymous pages we're done the only reference left
909 * should be the one m_f() holds.
917 * Now truncate the page in the page cache. This is really
918 * more like a "temporary hole punch"
919 * Don't do this for block devices when someone else
920 * has a reference, because it could be file system metadata
921 * and that's not safe to truncate.
923 mapping = page_mapping(p);
926 * Page has been teared down in the meanwhile
933 * The shmem page is kept in page cache instead of truncating
934 * so is expected to have an extra refcount after error-handling.
936 extra_pins = shmem_mapping(mapping);
939 * Truncation is a bit tricky. Enable it per file system for now.
941 * Open: to take i_rwsem or not for this? Right now we don't.
943 ret = truncate_error_page(p, page_to_pfn(p), mapping);
944 if (has_extra_refcount(ps, p, extra_pins))
954 * Dirty pagecache page
955 * Issues: when the error hit a hole page the error is not properly
958 static int me_pagecache_dirty(struct page_state *ps, struct page *p)
960 struct address_space *mapping = page_mapping(p);
963 /* TBD: print more information about the file. */
966 * IO error will be reported by write(), fsync(), etc.
967 * who check the mapping.
968 * This way the application knows that something went
969 * wrong with its dirty file data.
971 * There's one open issue:
973 * The EIO will be only reported on the next IO
974 * operation and then cleared through the IO map.
975 * Normally Linux has two mechanisms to pass IO error
976 * first through the AS_EIO flag in the address space
977 * and then through the PageError flag in the page.
978 * Since we drop pages on memory failure handling the
979 * only mechanism open to use is through AS_AIO.
981 * This has the disadvantage that it gets cleared on
982 * the first operation that returns an error, while
983 * the PageError bit is more sticky and only cleared
984 * when the page is reread or dropped. If an
985 * application assumes it will always get error on
986 * fsync, but does other operations on the fd before
987 * and the page is dropped between then the error
988 * will not be properly reported.
990 * This can already happen even without hwpoisoned
991 * pages: first on metadata IO errors (which only
992 * report through AS_EIO) or when the page is dropped
995 * So right now we assume that the application DTRT on
996 * the first EIO, but we're not worse than other parts
999 mapping_set_error(mapping, -EIO);
1002 return me_pagecache_clean(ps, p);
1006 * Clean and dirty swap cache.
1008 * Dirty swap cache page is tricky to handle. The page could live both in page
1009 * cache and swap cache(ie. page is freshly swapped in). So it could be
1010 * referenced concurrently by 2 types of PTEs:
1011 * normal PTEs and swap PTEs. We try to handle them consistently by calling
1012 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
1014 * - clear dirty bit to prevent IO
1016 * - but keep in the swap cache, so that when we return to it on
1017 * a later page fault, we know the application is accessing
1018 * corrupted data and shall be killed (we installed simple
1019 * interception code in do_swap_page to catch it).
1021 * Clean swap cache pages can be directly isolated. A later page fault will
1022 * bring in the known good data from disk.
1024 static int me_swapcache_dirty(struct page_state *ps, struct page *p)
1027 bool extra_pins = false;
1030 /* Trigger EIO in shmem: */
1031 ClearPageUptodate(p);
1033 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED;
1036 if (ret == MF_DELAYED)
1039 if (has_extra_refcount(ps, p, extra_pins))
1045 static int me_swapcache_clean(struct page_state *ps, struct page *p)
1047 struct folio *folio = page_folio(p);
1050 delete_from_swap_cache(folio);
1052 ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED;
1053 folio_unlock(folio);
1055 if (has_extra_refcount(ps, p, false))
1062 * Huge pages. Needs work.
1064 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
1065 * To narrow down kill region to one page, we need to break up pmd.
1067 static int me_huge_page(struct page_state *ps, struct page *p)
1070 struct page *hpage = compound_head(p);
1071 struct address_space *mapping;
1073 if (!PageHuge(hpage))
1076 mapping = page_mapping(hpage);
1078 res = truncate_error_page(hpage, page_to_pfn(p), mapping);
1084 * migration entry prevents later access on error hugepage,
1085 * so we can free and dissolve it into buddy to save healthy
1089 if (__page_handle_poison(p)) {
1095 if (has_extra_refcount(ps, p, false))
1102 * Various page states we can handle.
1104 * A page state is defined by its current page->flags bits.
1105 * The table matches them in order and calls the right handler.
1107 * This is quite tricky because we can access page at any time
1108 * in its live cycle, so all accesses have to be extremely careful.
1110 * This is not complete. More states could be added.
1111 * For any missing state don't attempt recovery.
1114 #define dirty (1UL << PG_dirty)
1115 #define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked))
1116 #define unevict (1UL << PG_unevictable)
1117 #define mlock (1UL << PG_mlocked)
1118 #define lru (1UL << PG_lru)
1119 #define head (1UL << PG_head)
1120 #define slab (1UL << PG_slab)
1121 #define reserved (1UL << PG_reserved)
1123 static struct page_state error_states[] = {
1124 { reserved, reserved, MF_MSG_KERNEL, me_kernel },
1126 * free pages are specially detected outside this table:
1127 * PG_buddy pages only make a small fraction of all free pages.
1131 * Could in theory check if slab page is free or if we can drop
1132 * currently unused objects without touching them. But just
1133 * treat it as standard kernel for now.
1135 { slab, slab, MF_MSG_SLAB, me_kernel },
1137 { head, head, MF_MSG_HUGE, me_huge_page },
1139 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
1140 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
1142 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
1143 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
1145 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
1146 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
1148 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
1149 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
1152 * Catchall entry: must be at end.
1154 { 0, 0, MF_MSG_UNKNOWN, me_unknown },
1167 * "Dirty/Clean" indication is not 100% accurate due to the possibility of
1168 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
1170 static void action_result(unsigned long pfn, enum mf_action_page_type type,
1171 enum mf_result result)
1173 trace_memory_failure_event(pfn, type, result);
1175 num_poisoned_pages_inc();
1176 pr_err("%#lx: recovery action for %s: %s\n",
1177 pfn, action_page_types[type], action_name[result]);
1180 static int page_action(struct page_state *ps, struct page *p,
1185 /* page p should be unlocked after returning from ps->action(). */
1186 result = ps->action(ps, p);
1188 action_result(pfn, ps->type, result);
1190 /* Could do more checks here if page looks ok */
1192 * Could adjust zone counters here to correct for the missing page.
1195 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
1198 static inline bool PageHWPoisonTakenOff(struct page *page)
1200 return PageHWPoison(page) && page_private(page) == MAGIC_HWPOISON;
1203 void SetPageHWPoisonTakenOff(struct page *page)
1205 set_page_private(page, MAGIC_HWPOISON);
1208 void ClearPageHWPoisonTakenOff(struct page *page)
1210 if (PageHWPoison(page))
1211 set_page_private(page, 0);
1215 * Return true if a page type of a given page is supported by hwpoison
1216 * mechanism (while handling could fail), otherwise false. This function
1217 * does not return true for hugetlb or device memory pages, so it's assumed
1218 * to be called only in the context where we never have such pages.
1220 static inline bool HWPoisonHandlable(struct page *page, unsigned long flags)
1222 /* Soft offline could migrate non-LRU movable pages */
1223 if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page))
1226 return PageLRU(page) || is_free_buddy_page(page);
1229 static int __get_hwpoison_page(struct page *page, unsigned long flags)
1231 struct page *head = compound_head(page);
1233 bool hugetlb = false;
1235 ret = get_hwpoison_huge_page(head, &hugetlb);
1240 * This check prevents from calling get_hwpoison_unless_zero()
1241 * for any unsupported type of page in order to reduce the risk of
1242 * unexpected races caused by taking a page refcount.
1244 if (!HWPoisonHandlable(head, flags))
1247 if (get_page_unless_zero(head)) {
1248 if (head == compound_head(page))
1251 pr_info("%#lx cannot catch tail\n", page_to_pfn(page));
1258 static int get_any_page(struct page *p, unsigned long flags)
1260 int ret = 0, pass = 0;
1261 bool count_increased = false;
1263 if (flags & MF_COUNT_INCREASED)
1264 count_increased = true;
1267 if (!count_increased) {
1268 ret = __get_hwpoison_page(p, flags);
1270 if (page_count(p)) {
1271 /* We raced with an allocation, retry. */
1275 } else if (!PageHuge(p) && !is_free_buddy_page(p)) {
1276 /* We raced with put_page, retry. */
1282 } else if (ret == -EBUSY) {
1284 * We raced with (possibly temporary) unhandlable
1296 if (PageHuge(p) || HWPoisonHandlable(p, flags)) {
1300 * A page we cannot handle. Check whether we can turn
1301 * it into something we can handle.
1306 count_increased = false;
1314 pr_err("%#lx: unhandlable page.\n", page_to_pfn(p));
1319 static int __get_unpoison_page(struct page *page)
1321 struct page *head = compound_head(page);
1323 bool hugetlb = false;
1325 ret = get_hwpoison_huge_page(head, &hugetlb);
1330 * PageHWPoisonTakenOff pages are not only marked as PG_hwpoison,
1331 * but also isolated from buddy freelist, so need to identify the
1332 * state and have to cancel both operations to unpoison.
1334 if (PageHWPoisonTakenOff(page))
1337 return get_page_unless_zero(page) ? 1 : 0;
1341 * get_hwpoison_page() - Get refcount for memory error handling
1342 * @p: Raw error page (hit by memory error)
1343 * @flags: Flags controlling behavior of error handling
1345 * get_hwpoison_page() takes a page refcount of an error page to handle memory
1346 * error on it, after checking that the error page is in a well-defined state
1347 * (defined as a page-type we can successfully handle the memory error on it,
1348 * such as LRU page and hugetlb page).
1350 * Memory error handling could be triggered at any time on any type of page,
1351 * so it's prone to race with typical memory management lifecycle (like
1352 * allocation and free). So to avoid such races, get_hwpoison_page() takes
1353 * extra care for the error page's state (as done in __get_hwpoison_page()),
1354 * and has some retry logic in get_any_page().
1356 * When called from unpoison_memory(), the caller should already ensure that
1357 * the given page has PG_hwpoison. So it's never reused for other page
1358 * allocations, and __get_unpoison_page() never races with them.
1360 * Return: 0 on failure,
1361 * 1 on success for in-use pages in a well-defined state,
1362 * -EIO for pages on which we can not handle memory errors,
1363 * -EBUSY when get_hwpoison_page() has raced with page lifecycle
1364 * operations like allocation and free,
1365 * -EHWPOISON when the page is hwpoisoned and taken off from buddy.
1367 static int get_hwpoison_page(struct page *p, unsigned long flags)
1371 zone_pcp_disable(page_zone(p));
1372 if (flags & MF_UNPOISON)
1373 ret = __get_unpoison_page(p);
1375 ret = get_any_page(p, flags);
1376 zone_pcp_enable(page_zone(p));
1382 * Do all that is necessary to remove user space mappings. Unmap
1383 * the pages and send SIGBUS to the processes if the data was dirty.
1385 static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
1386 int flags, struct page *hpage)
1388 struct folio *folio = page_folio(hpage);
1389 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC;
1390 struct address_space *mapping;
1393 int kill = 1, forcekill;
1394 bool mlocked = PageMlocked(hpage);
1397 * Here we are interested only in user-mapped pages, so skip any
1398 * other types of pages.
1400 if (PageReserved(p) || PageSlab(p))
1402 if (!(PageLRU(hpage) || PageHuge(p)))
1406 * This check implies we don't kill processes if their pages
1407 * are in the swap cache early. Those are always late kills.
1409 if (!page_mapped(hpage))
1413 pr_err("%#lx: can't handle KSM pages.\n", pfn);
1417 if (PageSwapCache(p)) {
1418 pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
1419 ttu |= TTU_IGNORE_HWPOISON;
1423 * Propagate the dirty bit from PTEs to struct page first, because we
1424 * need this to decide if we should kill or just drop the page.
1425 * XXX: the dirty test could be racy: set_page_dirty() may not always
1426 * be called inside page lock (it's recommended but not enforced).
1428 mapping = page_mapping(hpage);
1429 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
1430 mapping_can_writeback(mapping)) {
1431 if (page_mkclean(hpage)) {
1432 SetPageDirty(hpage);
1435 ttu |= TTU_IGNORE_HWPOISON;
1436 pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
1442 * First collect all the processes that have the page
1443 * mapped in dirty form. This has to be done before try_to_unmap,
1444 * because ttu takes the rmap data structures down.
1446 * Error handling: We ignore errors here because
1447 * there's nothing that can be done.
1450 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
1452 if (PageHuge(hpage) && !PageAnon(hpage)) {
1454 * For hugetlb pages in shared mappings, try_to_unmap
1455 * could potentially call huge_pmd_unshare. Because of
1456 * this, take semaphore in write mode here and set
1457 * TTU_RMAP_LOCKED to indicate we have taken the lock
1458 * at this higher level.
1460 mapping = hugetlb_page_mapping_lock_write(hpage);
1462 try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
1463 i_mmap_unlock_write(mapping);
1465 pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn);
1467 try_to_unmap(folio, ttu);
1470 unmap_success = !page_mapped(hpage);
1472 pr_err("%#lx: failed to unmap page (mapcount=%d)\n",
1473 pfn, page_mapcount(hpage));
1476 * try_to_unmap() might put mlocked page in lru cache, so call
1477 * shake_page() again to ensure that it's flushed.
1483 * Now that the dirty bit has been propagated to the
1484 * struct page and all unmaps done we can decide if
1485 * killing is needed or not. Only kill when the page
1486 * was dirty or the process is not restartable,
1487 * otherwise the tokill list is merely
1488 * freed. When there was a problem unmapping earlier
1489 * use a more force-full uncatchable kill to prevent
1490 * any accesses to the poisoned memory.
1492 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
1493 kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
1495 return unmap_success;
1498 static int identify_page_state(unsigned long pfn, struct page *p,
1499 unsigned long page_flags)
1501 struct page_state *ps;
1504 * The first check uses the current page flags which may not have any
1505 * relevant information. The second check with the saved page flags is
1506 * carried out only if the first check can't determine the page status.
1508 for (ps = error_states;; ps++)
1509 if ((p->flags & ps->mask) == ps->res)
1512 page_flags |= (p->flags & (1UL << PG_dirty));
1515 for (ps = error_states;; ps++)
1516 if ((page_flags & ps->mask) == ps->res)
1518 return page_action(ps, p, pfn);
1521 static int try_to_split_thp_page(struct page *page, const char *msg)
1524 if (unlikely(split_huge_page(page))) {
1525 unsigned long pfn = page_to_pfn(page);
1528 pr_info("%s: %#lx: thp split failed\n", msg, pfn);
1537 static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
1538 struct address_space *mapping, pgoff_t index, int flags)
1541 unsigned long size = 0;
1543 list_for_each_entry(tk, to_kill, nd)
1545 size = max(size, 1UL << tk->size_shift);
1549 * Unmap the largest mapping to avoid breaking up device-dax
1550 * mappings which are constant size. The actual size of the
1551 * mapping being torn down is communicated in siginfo, see
1554 loff_t start = (index << PAGE_SHIFT) & ~(size - 1);
1556 unmap_mapping_range(mapping, start, size, 0);
1559 kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
1562 static int mf_generic_kill_procs(unsigned long long pfn, int flags,
1563 struct dev_pagemap *pgmap)
1565 struct page *page = pfn_to_page(pfn);
1571 * Pages instantiated by device-dax (not filesystem-dax)
1572 * may be compound pages.
1574 page = compound_head(page);
1577 * Prevent the inode from being freed while we are interrogating
1578 * the address_space, typically this would be handled by
1579 * lock_page(), but dax pages do not use the page lock. This
1580 * also prevents changes to the mapping of this pfn until
1581 * poison signaling is complete.
1583 cookie = dax_lock_page(page);
1587 if (hwpoison_filter(page)) {
1592 switch (pgmap->type) {
1593 case MEMORY_DEVICE_PRIVATE:
1594 case MEMORY_DEVICE_COHERENT:
1596 * TODO: Handle device pages which may need coordination
1597 * with device-side memory.
1606 * Use this flag as an indication that the dax page has been
1607 * remapped UC to prevent speculative consumption of poison.
1609 SetPageHWPoison(page);
1612 * Unlike System-RAM there is no possibility to swap in a
1613 * different physical page at a given virtual address, so all
1614 * userspace consumption of ZONE_DEVICE memory necessitates
1615 * SIGBUS (i.e. MF_MUST_KILL)
1617 flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1618 collect_procs(page, &to_kill, true);
1620 unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags);
1622 dax_unlock_page(page, cookie);
1626 #ifdef CONFIG_FS_DAX
1628 * mf_dax_kill_procs - Collect and kill processes who are using this file range
1629 * @mapping: address_space of the file in use
1630 * @index: start pgoff of the range within the file
1631 * @count: length of the range, in unit of PAGE_SIZE
1632 * @mf_flags: memory failure flags
1634 int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
1635 unsigned long count, int mf_flags)
1640 size_t end = index + count;
1642 mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
1644 for (; index < end; index++) {
1646 cookie = dax_lock_mapping_entry(mapping, index, &page);
1652 SetPageHWPoison(page);
1654 collect_procs_fsdax(page, mapping, index, &to_kill);
1655 unmap_and_kill(&to_kill, page_to_pfn(page), mapping,
1658 dax_unlock_mapping_entry(mapping, index, cookie);
1662 EXPORT_SYMBOL_GPL(mf_dax_kill_procs);
1663 #endif /* CONFIG_FS_DAX */
1666 * Called from hugetlb code with hugetlb_lock held.
1670 * 1 - in-use hugepage
1671 * 2 - not a hugepage
1672 * -EBUSY - the hugepage is busy (try to retry)
1673 * -EHWPOISON - the hugepage is already hwpoisoned
1675 int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
1677 struct page *page = pfn_to_page(pfn);
1678 struct page *head = compound_head(page);
1679 int ret = 2; /* fallback to normal page handling */
1680 bool count_increased = false;
1682 if (!PageHeadHuge(head))
1685 if (flags & MF_COUNT_INCREASED) {
1687 count_increased = true;
1688 } else if (HPageFreed(head)) {
1690 } else if (HPageMigratable(head)) {
1691 ret = get_page_unless_zero(head);
1693 count_increased = true;
1699 if (TestSetPageHWPoison(head)) {
1706 if (count_increased)
1711 #ifdef CONFIG_HUGETLB_PAGE
1713 * Taking refcount of hugetlb pages needs extra care about race conditions
1714 * with basic operations like hugepage allocation/free/demotion.
1715 * So some of prechecks for hwpoison (pinning, and testing/setting
1716 * PageHWPoison) should be done in single hugetlb_lock range.
1718 static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
1721 struct page *p = pfn_to_page(pfn);
1723 unsigned long page_flags;
1728 res = get_huge_page_for_hwpoison(pfn, flags);
1729 if (res == 2) { /* fallback to normal page handling */
1732 } else if (res == -EHWPOISON) {
1733 pr_err("%#lx: already hardware poisoned\n", pfn);
1734 if (flags & MF_ACTION_REQUIRED) {
1735 head = compound_head(p);
1736 res = kill_accessing_process(current, page_to_pfn(head), flags);
1739 } else if (res == -EBUSY) {
1744 action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
1748 head = compound_head(p);
1751 if (hwpoison_filter(p)) {
1752 ClearPageHWPoison(head);
1758 * Handling free hugepage. The possible race with hugepage allocation
1759 * or demotion can be prevented by PageHWPoison flag.
1764 if (__page_handle_poison(p)) {
1768 action_result(pfn, MF_MSG_FREE_HUGE, res);
1769 return res == MF_RECOVERED ? 0 : -EBUSY;
1772 page_flags = head->flags;
1775 * TODO: hwpoison for pud-sized hugetlb doesn't work right now, so
1776 * simply disable it. In order to make it work properly, we need
1778 * - conversion of a pud that maps an error hugetlb into hwpoison
1779 * entry properly works, and
1780 * - other mm code walking over page table is aware of pud-aligned
1783 if (huge_page_size(page_hstate(head)) > PMD_SIZE) {
1784 action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED);
1789 if (!hwpoison_user_mappings(p, pfn, flags, head)) {
1790 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
1795 return identify_page_state(pfn, p, page_flags);
1802 static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
1807 #endif /* CONFIG_HUGETLB_PAGE */
1809 static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
1810 struct dev_pagemap *pgmap)
1812 struct page *page = pfn_to_page(pfn);
1815 if (flags & MF_COUNT_INCREASED)
1817 * Drop the extra refcount in case we come from madvise().
1821 /* device metadata space is not recoverable */
1822 if (!pgmap_pfn_valid(pgmap, pfn))
1826 * Call driver's implementation to handle the memory failure, otherwise
1827 * fall back to generic handler.
1829 if (pgmap->ops->memory_failure) {
1830 rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
1832 * Fall back to generic handler too if operation is not
1833 * supported inside the driver/device/filesystem.
1835 if (rc != -EOPNOTSUPP)
1839 rc = mf_generic_kill_procs(pfn, flags, pgmap);
1841 /* drop pgmap ref acquired in caller */
1842 put_dev_pagemap(pgmap);
1843 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
1847 static DEFINE_MUTEX(mf_mutex);
1850 * memory_failure - Handle memory failure of a page.
1851 * @pfn: Page Number of the corrupted page
1852 * @flags: fine tune action taken
1854 * This function is called by the low level machine check code
1855 * of an architecture when it detects hardware memory corruption
1856 * of a page. It tries its best to recover, which includes
1857 * dropping pages, killing processes etc.
1859 * The function is primarily of use for corruptions that
1860 * happen outside the current execution context (e.g. when
1861 * detected by a background scrubber)
1863 * Must run in process context (e.g. a work queue) with interrupts
1864 * enabled and no spinlocks hold.
1866 * Return: 0 for successfully handled the memory error,
1867 * -EOPNOTSUPP for hwpoison_filter() filtered the error event,
1868 * < 0(except -EOPNOTSUPP) on failure.
1870 int memory_failure(unsigned long pfn, int flags)
1874 struct dev_pagemap *pgmap;
1876 unsigned long page_flags;
1880 if (!sysctl_memory_failure_recovery)
1881 panic("Memory failure on page %lx", pfn);
1883 mutex_lock(&mf_mutex);
1885 if (!(flags & MF_SW_SIMULATED))
1886 hw_memory_failure = true;
1888 p = pfn_to_online_page(pfn);
1890 res = arch_memory_failure(pfn, flags);
1894 if (pfn_valid(pfn)) {
1895 pgmap = get_dev_pagemap(pfn, NULL);
1897 res = memory_failure_dev_pagemap(pfn, flags,
1902 pr_err("%#lx: memory outside kernel control\n", pfn);
1908 res = try_memory_failure_hugetlb(pfn, flags, &hugetlb);
1912 if (TestSetPageHWPoison(p)) {
1913 pr_err("%#lx: already hardware poisoned\n", pfn);
1915 if (flags & MF_ACTION_REQUIRED)
1916 res = kill_accessing_process(current, pfn, flags);
1917 if (flags & MF_COUNT_INCREASED)
1922 hpage = compound_head(p);
1925 * We need/can do nothing about count=0 pages.
1926 * 1) it's a free page, and therefore in safe hand:
1927 * prep_new_page() will be the gate keeper.
1928 * 2) it's part of a non-compound high order page.
1929 * Implies some kernel user: cannot stop them from
1930 * R/W the page; let's pray that the page has been
1931 * used and will be freed some time later.
1932 * In fact it's dangerous to directly bump up page count from 0,
1933 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch.
1935 if (!(flags & MF_COUNT_INCREASED)) {
1936 res = get_hwpoison_page(p, flags);
1938 if (is_free_buddy_page(p)) {
1939 if (take_page_off_buddy(p)) {
1943 /* We lost the race, try again */
1945 ClearPageHWPoison(p);
1951 action_result(pfn, MF_MSG_BUDDY, res);
1952 res = res == MF_RECOVERED ? 0 : -EBUSY;
1954 action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
1958 } else if (res < 0) {
1959 action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
1965 if (PageTransHuge(hpage)) {
1967 * The flag must be set after the refcount is bumped
1968 * otherwise it may race with THP split.
1969 * And the flag can't be set in get_hwpoison_page() since
1970 * it is called by soft offline too and it is just called
1971 * for !MF_COUNT_INCREASE. So here seems to be the best
1974 * Don't need care about the above error handling paths for
1975 * get_hwpoison_page() since they handle either free page
1976 * or unhandlable page. The refcount is bumped iff the
1977 * page is a valid handlable page.
1979 SetPageHasHWPoisoned(hpage);
1980 if (try_to_split_thp_page(p, "Memory Failure") < 0) {
1981 action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
1985 VM_BUG_ON_PAGE(!page_count(p), p);
1989 * We ignore non-LRU pages for good reasons.
1990 * - PG_locked is only well defined for LRU pages and a few others
1991 * - to avoid races with __SetPageLocked()
1992 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
1993 * The check (unnecessarily) ignores LRU pages being isolated and
1994 * walked by the page reclaim code, however that's not a big loss.
2001 * We're only intended to deal with the non-Compound page here.
2002 * However, the page could have changed compound pages due to
2003 * race window. If this happens, we could try again to hopefully
2004 * handle the page next round.
2006 if (PageCompound(p)) {
2008 ClearPageHWPoison(p);
2011 flags &= ~MF_COUNT_INCREASED;
2015 action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
2021 * We use page flags to determine what action should be taken, but
2022 * the flags can be modified by the error containment action. One
2023 * example is an mlocked page, where PG_mlocked is cleared by
2024 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
2025 * correctly, we save a copy of the page flags at this time.
2027 page_flags = p->flags;
2029 if (hwpoison_filter(p)) {
2030 TestClearPageHWPoison(p);
2038 * __munlock_pagevec may clear a writeback page's LRU flag without
2039 * page_lock. We need wait writeback completion for this page or it
2040 * may trigger vfs BUG while evict inode.
2042 if (!PageLRU(p) && !PageWriteback(p))
2043 goto identify_page_state;
2046 * It's very difficult to mess with pages currently under IO
2047 * and in many cases impossible, so we just avoid it here.
2049 wait_on_page_writeback(p);
2052 * Now take care of user space mappings.
2053 * Abort on fail: __filemap_remove_folio() assumes unmapped page.
2055 if (!hwpoison_user_mappings(p, pfn, flags, p)) {
2056 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
2062 * Torn down by someone else?
2064 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
2065 action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
2070 identify_page_state:
2071 res = identify_page_state(pfn, p, page_flags);
2072 mutex_unlock(&mf_mutex);
2077 mutex_unlock(&mf_mutex);
2080 EXPORT_SYMBOL_GPL(memory_failure);
2082 #define MEMORY_FAILURE_FIFO_ORDER 4
2083 #define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
2085 struct memory_failure_entry {
2090 struct memory_failure_cpu {
2091 DECLARE_KFIFO(fifo, struct memory_failure_entry,
2092 MEMORY_FAILURE_FIFO_SIZE);
2094 struct work_struct work;
2097 static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
2100 * memory_failure_queue - Schedule handling memory failure of a page.
2101 * @pfn: Page Number of the corrupted page
2102 * @flags: Flags for memory failure handling
2104 * This function is called by the low level hardware error handler
2105 * when it detects hardware memory corruption of a page. It schedules
2106 * the recovering of error page, including dropping pages, killing
2109 * The function is primarily of use for corruptions that
2110 * happen outside the current execution context (e.g. when
2111 * detected by a background scrubber)
2113 * Can run in IRQ context.
2115 void memory_failure_queue(unsigned long pfn, int flags)
2117 struct memory_failure_cpu *mf_cpu;
2118 unsigned long proc_flags;
2119 struct memory_failure_entry entry = {
2124 mf_cpu = &get_cpu_var(memory_failure_cpu);
2125 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
2126 if (kfifo_put(&mf_cpu->fifo, entry))
2127 schedule_work_on(smp_processor_id(), &mf_cpu->work);
2129 pr_err("buffer overflow when queuing memory failure at %#lx\n",
2131 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
2132 put_cpu_var(memory_failure_cpu);
2134 EXPORT_SYMBOL_GPL(memory_failure_queue);
2136 static void memory_failure_work_func(struct work_struct *work)
2138 struct memory_failure_cpu *mf_cpu;
2139 struct memory_failure_entry entry = { 0, };
2140 unsigned long proc_flags;
2143 mf_cpu = container_of(work, struct memory_failure_cpu, work);
2145 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
2146 gotten = kfifo_get(&mf_cpu->fifo, &entry);
2147 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
2150 if (entry.flags & MF_SOFT_OFFLINE)
2151 soft_offline_page(entry.pfn, entry.flags);
2153 memory_failure(entry.pfn, entry.flags);
2158 * Process memory_failure work queued on the specified CPU.
2159 * Used to avoid return-to-userspace racing with the memory_failure workqueue.
2161 void memory_failure_queue_kick(int cpu)
2163 struct memory_failure_cpu *mf_cpu;
2165 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
2166 cancel_work_sync(&mf_cpu->work);
2167 memory_failure_work_func(&mf_cpu->work);
2170 static int __init memory_failure_init(void)
2172 struct memory_failure_cpu *mf_cpu;
2175 for_each_possible_cpu(cpu) {
2176 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
2177 spin_lock_init(&mf_cpu->lock);
2178 INIT_KFIFO(mf_cpu->fifo);
2179 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
2184 core_initcall(memory_failure_init);
2187 #define pr_fmt(fmt) "" fmt
2188 #define unpoison_pr_info(fmt, pfn, rs) \
2190 if (__ratelimit(rs)) \
2191 pr_info(fmt, pfn); \
2195 * unpoison_memory - Unpoison a previously poisoned page
2196 * @pfn: Page number of the to be unpoisoned page
2198 * Software-unpoison a page that has been poisoned by
2199 * memory_failure() earlier.
2201 * This is only done on the software-level, so it only works
2202 * for linux injected failures, not real hardware failures
2204 * Returns 0 for success, otherwise -errno.
2206 int unpoison_memory(unsigned long pfn)
2212 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
2213 DEFAULT_RATELIMIT_BURST);
2215 if (!pfn_valid(pfn))
2218 p = pfn_to_page(pfn);
2219 page = compound_head(p);
2221 mutex_lock(&mf_mutex);
2223 if (hw_memory_failure) {
2224 unpoison_pr_info("Unpoison: Disabled after HW memory failure %#lx\n",
2230 if (!PageHWPoison(p)) {
2231 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
2236 if (page_count(page) > 1) {
2237 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
2242 if (page_mapped(page)) {
2243 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
2248 if (page_mapping(page)) {
2249 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
2254 if (PageSlab(page) || PageTable(page))
2257 ret = get_hwpoison_page(p, MF_UNPOISON);
2259 ret = TestClearPageHWPoison(page) ? 0 : -EBUSY;
2260 } else if (ret < 0) {
2261 if (ret == -EHWPOISON) {
2262 ret = put_page_back_buddy(p) ? 0 : -EBUSY;
2264 unpoison_pr_info("Unpoison: failed to grab page %#lx\n",
2267 freeit = !!TestClearPageHWPoison(p);
2270 if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1)) {
2277 mutex_unlock(&mf_mutex);
2278 if (!ret || freeit) {
2279 num_poisoned_pages_dec();
2280 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
2281 page_to_pfn(p), &unpoison_rs);
2285 EXPORT_SYMBOL(unpoison_memory);
2287 static bool isolate_page(struct page *page, struct list_head *pagelist)
2289 bool isolated = false;
2290 bool lru = PageLRU(page);
2292 if (PageHuge(page)) {
2293 isolated = !isolate_hugetlb(page, pagelist);
2296 isolated = !isolate_lru_page(page);
2298 isolated = !isolate_movable_page(page, ISOLATE_UNEVICTABLE);
2301 list_add(&page->lru, pagelist);
2304 if (isolated && lru)
2305 inc_node_page_state(page, NR_ISOLATED_ANON +
2306 page_is_file_lru(page));
2309 * If we succeed to isolate the page, we grabbed another refcount on
2310 * the page, so we can safely drop the one we got from get_any_pages().
2311 * If we failed to isolate the page, it means that we cannot go further
2312 * and we will return an error, so drop the reference we got from
2313 * get_any_pages() as well.
2320 * __soft_offline_page handles hugetlb-pages and non-hugetlb pages.
2321 * If the page is a non-dirty unmapped page-cache page, it simply invalidates.
2322 * If the page is mapped, it migrates the contents over.
2324 static int __soft_offline_page(struct page *page)
2327 unsigned long pfn = page_to_pfn(page);
2328 struct page *hpage = compound_head(page);
2329 char const *msg_page[] = {"page", "hugepage"};
2330 bool huge = PageHuge(page);
2331 LIST_HEAD(pagelist);
2332 struct migration_target_control mtc = {
2333 .nid = NUMA_NO_NODE,
2334 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
2338 if (!PageHuge(page))
2339 wait_on_page_writeback(page);
2340 if (PageHWPoison(page)) {
2343 pr_info("soft offline: %#lx page already poisoned\n", pfn);
2347 if (!PageHuge(page) && PageLRU(page) && !PageSwapCache(page))
2349 * Try to invalidate first. This should work for
2350 * non dirty unmapped page cache pages.
2352 ret = invalidate_inode_page(page);
2356 pr_info("soft_offline: %#lx: invalidated\n", pfn);
2357 page_handle_poison(page, false, true);
2361 if (isolate_page(hpage, &pagelist)) {
2362 ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
2363 (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
2365 bool release = !huge;
2367 if (!page_handle_poison(page, huge, release))
2370 if (!list_empty(&pagelist))
2371 putback_movable_pages(&pagelist);
2373 pr_info("soft offline: %#lx: %s migration failed %ld, type %pGp\n",
2374 pfn, msg_page[huge], ret, &page->flags);
2379 pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n",
2380 pfn, msg_page[huge], page_count(page), &page->flags);
2386 static int soft_offline_in_use_page(struct page *page)
2388 struct page *hpage = compound_head(page);
2390 if (!PageHuge(page) && PageTransHuge(hpage))
2391 if (try_to_split_thp_page(page, "soft offline") < 0)
2393 return __soft_offline_page(page);
2396 static int soft_offline_free_page(struct page *page)
2400 if (!page_handle_poison(page, true, false))
2406 static void put_ref_page(struct page *page)
2413 * soft_offline_page - Soft offline a page.
2414 * @pfn: pfn to soft-offline
2415 * @flags: flags. Same as memory_failure().
2417 * Returns 0 on success
2418 * -EOPNOTSUPP for hwpoison_filter() filtered the error event
2419 * < 0 otherwise negated errno.
2421 * Soft offline a page, by migration or invalidation,
2422 * without killing anything. This is for the case when
2423 * a page is not corrupted yet (so it's still valid to access),
2424 * but has had a number of corrected errors and is better taken
2427 * The actual policy on when to do that is maintained by
2430 * This should never impact any application or cause data loss,
2431 * however it might take some time.
2433 * This is not a 100% solution for all memory, but tries to be
2434 * ``good enough'' for the majority of memory.
2436 int soft_offline_page(unsigned long pfn, int flags)
2439 bool try_again = true;
2440 struct page *page, *ref_page = NULL;
2442 WARN_ON_ONCE(!pfn_valid(pfn) && (flags & MF_COUNT_INCREASED));
2444 if (!pfn_valid(pfn))
2446 if (flags & MF_COUNT_INCREASED)
2447 ref_page = pfn_to_page(pfn);
2449 /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
2450 page = pfn_to_online_page(pfn);
2452 put_ref_page(ref_page);
2456 mutex_lock(&mf_mutex);
2458 if (PageHWPoison(page)) {
2459 pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
2460 put_ref_page(ref_page);
2461 mutex_unlock(&mf_mutex);
2467 ret = get_hwpoison_page(page, flags | MF_SOFT_OFFLINE);
2470 if (hwpoison_filter(page)) {
2474 put_ref_page(ref_page);
2476 mutex_unlock(&mf_mutex);
2481 ret = soft_offline_in_use_page(page);
2482 } else if (ret == 0) {
2483 if (soft_offline_free_page(page) && try_again) {
2485 flags &= ~MF_COUNT_INCREASED;
2490 mutex_unlock(&mf_mutex);
2495 void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
2500 * A further optimization is to have per section refcounted
2501 * num_poisoned_pages. But that would need more space per memmap, so
2502 * for now just do a quick global check to speed up this routine in the
2503 * absence of bad pages.
2505 if (atomic_long_read(&num_poisoned_pages) == 0)
2508 for (i = 0; i < nr_pages; i++) {
2509 if (PageHWPoison(&memmap[i])) {
2510 num_poisoned_pages_dec();
2511 ClearPageHWPoison(&memmap[i]);