mm, page_poison: use static key more efficiently
[linux-block.git] / mm / page_poison.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
6a11f75b 2#include <linux/kernel.h>
8c5fb8ea 3#include <linux/string.h>
6a11f75b 4#include <linux/mm.h>
64212ec5 5#include <linux/highmem.h>
e30825f1 6#include <linux/page_ext.h>
6a11f75b 7#include <linux/poison.h>
77311139 8#include <linux/ratelimit.h>
4117992d 9#include <linux/kasan.h>
6a11f75b 10
8db26a3d
VB
11bool _page_poisoning_enabled_early;
12EXPORT_SYMBOL(_page_poisoning_enabled_early);
13DEFINE_STATIC_KEY_FALSE(_page_poisoning_enabled);
14EXPORT_SYMBOL(_page_poisoning_enabled);
e30825f1 15
14298d36 16static int __init early_page_poison_param(char *buf)
e30825f1 17{
8db26a3d 18 return kstrtobool(buf, &_page_poisoning_enabled_early);
8823b1db
LA
19}
20early_param("page_poison", early_page_poison_param);
21
6a11f75b
AM
22static void poison_page(struct page *page)
23{
64212ec5 24 void *addr = kmap_atomic(page);
6a11f75b 25
4117992d
QC
26 /* KASAN still think the page is in-use, so skip it. */
27 kasan_disable_current();
6a11f75b 28 memset(addr, PAGE_POISON, PAGE_SIZE);
4117992d 29 kasan_enable_current();
64212ec5 30 kunmap_atomic(addr);
6a11f75b
AM
31}
32
8db26a3d 33void __kernel_poison_pages(struct page *page, int n)
6a11f75b
AM
34{
35 int i;
36
37 for (i = 0; i < n; i++)
38 poison_page(page + i);
39}
40
41static bool single_bit_flip(unsigned char a, unsigned char b)
42{
43 unsigned char error = a ^ b;
44
45 return error && !(error & (error - 1));
46}
47
48static void check_poison_mem(unsigned char *mem, size_t bytes)
49{
77311139 50 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
6a11f75b
AM
51 unsigned char *start;
52 unsigned char *end;
53
8823b1db
LA
54 if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY))
55 return;
56
8c5fb8ea
AM
57 start = memchr_inv(mem, PAGE_POISON, bytes);
58 if (!start)
6a11f75b
AM
59 return;
60
61 for (end = mem + bytes - 1; end > start; end--) {
62 if (*end != PAGE_POISON)
63 break;
64 }
65
77311139 66 if (!__ratelimit(&ratelimit))
6a11f75b
AM
67 return;
68 else if (start == end && single_bit_flip(*start, PAGE_POISON))
8823b1db 69 pr_err("pagealloc: single bit error\n");
6a11f75b 70 else
8823b1db 71 pr_err("pagealloc: memory corruption\n");
6a11f75b
AM
72
73 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
74 end - start + 1, 1);
75 dump_stack();
76}
77
6a11f75b
AM
78static void unpoison_page(struct page *page)
79{
64212ec5
AM
80 void *addr;
81
64212ec5 82 addr = kmap_atomic(page);
bd33ef36
VM
83 /*
84 * Page poisoning when enabled poisons each and every page
85 * that is freed to buddy. Thus no extra check is done to
dbf7684e 86 * see if a page was poisoned.
bd33ef36 87 */
64212ec5 88 check_poison_mem(addr, PAGE_SIZE);
64212ec5 89 kunmap_atomic(addr);
6a11f75b
AM
90}
91
8db26a3d 92void __kernel_unpoison_pages(struct page *page, int n)
6a11f75b
AM
93{
94 int i;
95
96 for (i = 0; i < n; i++)
97 unpoison_page(page + i);
98}
99
8823b1db
LA
100#ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
101void __kernel_map_pages(struct page *page, int numpages, int enable)
102{
103 /* This function does nothing, all work is done via poison pages */
104}
105#endif