mm: move MAP_SYNC to asm-generic/mman-common.h
[linux-2.6-block.git] / kernel / extable.c
CommitLineData
1a59d1b8 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/* Rewritten by Rusty Russell, on the backs of many others...
3 Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
4
1da177e4 5*/
505f2b97 6#include <linux/ftrace.h>
f80d2d77 7#include <linux/memory.h>
8a293be0 8#include <linux/extable.h>
1da177e4 9#include <linux/module.h>
505f2b97 10#include <linux/mutex.h>
1da177e4 11#include <linux/init.h>
5b485629 12#include <linux/kprobes.h>
74451e66 13#include <linux/filter.h>
505f2b97 14
1da177e4 15#include <asm/sections.h>
7c0f6ba6 16#include <linux/uaccess.h>
505f2b97
IM
17
18/*
19 * mutex protecting text section modification (dynamic code patching).
20 * some users need to sleep (allocating memory...) while they hold this lock.
21 *
e846d139
ZC
22 * Note: Also protects SMP-alternatives modification on x86.
23 *
505f2b97
IM
24 * NOT exported to modules - patching kernel text is a really delicate matter.
25 */
26DEFINE_MUTEX(text_mutex);
1da177e4
LT
27
28extern struct exception_table_entry __start___ex_table[];
29extern struct exception_table_entry __stop___ex_table[];
30
d219e2e8 31/* Cleared by build time tools if the table is already sorted. */
00b71030 32u32 __initdata __visible main_extable_sort_needed = 1;
d219e2e8 33
1da177e4
LT
34/* Sort the kernel's built-in exception table */
35void __init sort_main_extable(void)
36{
e656a634 37 if (main_extable_sort_needed && __stop___ex_table > __start___ex_table) {
bec1b9e7 38 pr_notice("Sorting __ex_table...\n");
d219e2e8 39 sort_extable(__start___ex_table, __stop___ex_table);
bec1b9e7 40 }
1da177e4
LT
41}
42
43/* Given an address, look for it in the exception tables. */
44const struct exception_table_entry *search_exception_tables(unsigned long addr)
45{
46 const struct exception_table_entry *e;
47
a94c33dd
TM
48 e = search_extable(__start___ex_table,
49 __stop___ex_table - __start___ex_table, addr);
1da177e4
LT
50 if (!e)
51 e = search_module_extables(addr);
52 return e;
53}
54
9fbcc57a 55int init_kernel_text(unsigned long addr)
4a44bac1
IM
56{
57 if (addr >= (unsigned long)_sinittext &&
5ecbe3c3 58 addr < (unsigned long)_einittext)
4a44bac1
IM
59 return 1;
60 return 0;
61}
62
c0d80dda 63int notrace core_kernel_text(unsigned long addr)
1da177e4
LT
64{
65 if (addr >= (unsigned long)_stext &&
5ecbe3c3 66 addr < (unsigned long)_etext)
1da177e4
LT
67 return 1;
68
0594729c 69 if (system_state < SYSTEM_RUNNING &&
4a44bac1 70 init_kernel_text(addr))
1da177e4
LT
71 return 1;
72 return 0;
73}
74
a2d063ac
SR
75/**
76 * core_kernel_data - tell if addr points to kernel data
77 * @addr: address to test
78 *
79 * Returns true if @addr passed in is from the core kernel data
80 * section.
81 *
82 * Note: On some archs it may return true for core RODATA, and false
83 * for others. But will always be true for core RW data.
84 */
cdbe61bf
SR
85int core_kernel_data(unsigned long addr)
86{
a2d063ac 87 if (addr >= (unsigned long)_sdata &&
cdbe61bf
SR
88 addr < (unsigned long)_edata)
89 return 1;
90 return 0;
91}
92
3861a17b 93int __kernel_text_address(unsigned long addr)
1da177e4 94{
9aadde91 95 if (kernel_text_address(addr))
74451e66 96 return 1;
4a44bac1
IM
97 /*
98 * There might be init symbols in saved stacktraces.
99 * Give those symbols a chance to be printed in
100 * backtraces (such as lockdep traces).
101 *
102 * Since we are after the module-symbols check, there's
103 * no danger of address overlap:
104 */
105 if (init_kernel_text(addr))
106 return 1;
107 return 0;
1da177e4
LT
108}
109
110int kernel_text_address(unsigned long addr)
111{
e8cac8b1
SRV
112 bool no_rcu;
113 int ret = 1;
114
1da177e4
LT
115 if (core_kernel_text(addr))
116 return 1;
e8cac8b1
SRV
117
118 /*
119 * If a stack dump happens while RCU is not watching, then
120 * RCU needs to be notified that it requires to start
121 * watching again. This can happen either by tracing that
122 * triggers a stack trace, or a WARN() that happens during
123 * coming back from idle, or cpu on or offlining.
124 *
125 * is_module_text_address() as well as the kprobe slots
126 * and is_bpf_text_address() require RCU to be watching.
127 */
128 no_rcu = !rcu_is_watching();
129
130 /* Treat this like an NMI as it can happen anywhere */
131 if (no_rcu)
132 rcu_nmi_enter();
133
aec0be2d 134 if (is_module_text_address(addr))
e8cac8b1 135 goto out;
5b485629 136 if (is_ftrace_trampoline(addr))
e8cac8b1 137 goto out;
5b485629 138 if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
e8cac8b1 139 goto out;
74451e66 140 if (is_bpf_text_address(addr))
e8cac8b1
SRV
141 goto out;
142 ret = 0;
143out:
144 if (no_rcu)
145 rcu_nmi_exit();
146
147 return ret;
1da177e4 148}
ab7476cf
AV
149
150/*
151 * On some architectures (PPC64, IA64) function pointers
152 * are actually only tokens to some data that then holds the
153 * real function address. As a result, to find if a function
154 * pointer is part of the kernel text, we need to do some
155 * special dereferencing first.
156 */
157int func_ptr_is_kernel_text(void *ptr)
158{
159 unsigned long addr;
160 addr = (unsigned long) dereference_function_descriptor(ptr);
161 if (core_kernel_text(addr))
162 return 1;
a6e6abd5 163 return is_module_text_address(addr);
ab7476cf 164}