scsi: core: Move scsi_host_busy() out of host lock for waking up EH handler
[linux-block.git] / kernel / extable.c
CommitLineData
1a59d1b8 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/* Rewritten by Rusty Russell, on the backs of many others...
3 Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
4
1da177e4 5*/
e1478d8e 6#include <linux/elf.h>
505f2b97 7#include <linux/ftrace.h>
f80d2d77 8#include <linux/memory.h>
8a293be0 9#include <linux/extable.h>
1da177e4 10#include <linux/module.h>
505f2b97 11#include <linux/mutex.h>
1da177e4 12#include <linux/init.h>
5b485629 13#include <linux/kprobes.h>
74451e66 14#include <linux/filter.h>
505f2b97 15
1da177e4 16#include <asm/sections.h>
7c0f6ba6 17#include <linux/uaccess.h>
505f2b97
IM
18
19/*
20 * mutex protecting text section modification (dynamic code patching).
21 * some users need to sleep (allocating memory...) while they hold this lock.
22 *
e846d139
ZC
23 * Note: Also protects SMP-alternatives modification on x86.
24 *
505f2b97
IM
25 * NOT exported to modules - patching kernel text is a really delicate matter.
26 */
27DEFINE_MUTEX(text_mutex);
1da177e4
LT
28
29extern struct exception_table_entry __start___ex_table[];
30extern struct exception_table_entry __stop___ex_table[];
31
d219e2e8 32/* Cleared by build time tools if the table is already sorted. */
00b71030 33u32 __initdata __visible main_extable_sort_needed = 1;
d219e2e8 34
1da177e4
LT
35/* Sort the kernel's built-in exception table */
36void __init sort_main_extable(void)
37{
63174f61
NC
38 if (main_extable_sort_needed &&
39 &__stop___ex_table > &__start___ex_table) {
bec1b9e7 40 pr_notice("Sorting __ex_table...\n");
d219e2e8 41 sort_extable(__start___ex_table, __stop___ex_table);
bec1b9e7 42 }
1da177e4
LT
43}
44
49ec9177
SS
45/* Given an address, look for it in the kernel exception table */
46const
47struct exception_table_entry *search_kernel_exception_table(unsigned long addr)
48{
49 return search_extable(__start___ex_table,
50 __stop___ex_table - __start___ex_table, addr);
51}
52
1da177e4
LT
53/* Given an address, look for it in the exception tables. */
54const struct exception_table_entry *search_exception_tables(unsigned long addr)
55{
56 const struct exception_table_entry *e;
57
49ec9177 58 e = search_kernel_exception_table(addr);
1da177e4
LT
59 if (!e)
60 e = search_module_extables(addr);
3dec541b
AS
61 if (!e)
62 e = search_bpf_extables(addr);
1da177e4
LT
63 return e;
64}
65
c0d80dda 66int notrace core_kernel_text(unsigned long addr)
1da177e4 67{
808b6456 68 if (is_kernel_text(addr))
1da177e4
LT
69 return 1;
70
d2635f20 71 if (system_state < SYSTEM_FREEING_INITMEM &&
b9ad8fe7 72 is_kernel_inittext(addr))
1da177e4
LT
73 return 1;
74 return 0;
75}
76
3861a17b 77int __kernel_text_address(unsigned long addr)
1da177e4 78{
9aadde91 79 if (kernel_text_address(addr))
74451e66 80 return 1;
4a44bac1
IM
81 /*
82 * There might be init symbols in saved stacktraces.
83 * Give those symbols a chance to be printed in
84 * backtraces (such as lockdep traces).
85 *
86 * Since we are after the module-symbols check, there's
87 * no danger of address overlap:
88 */
b9ad8fe7 89 if (is_kernel_inittext(addr))
4a44bac1
IM
90 return 1;
91 return 0;
1da177e4
LT
92}
93
94int kernel_text_address(unsigned long addr)
95{
e8cac8b1
SRV
96 bool no_rcu;
97 int ret = 1;
98
1da177e4
LT
99 if (core_kernel_text(addr))
100 return 1;
e8cac8b1
SRV
101
102 /*
103 * If a stack dump happens while RCU is not watching, then
104 * RCU needs to be notified that it requires to start
105 * watching again. This can happen either by tracing that
106 * triggers a stack trace, or a WARN() that happens during
107 * coming back from idle, or cpu on or offlining.
108 *
e9b4e606
JO
109 * is_module_text_address() as well as the kprobe slots,
110 * is_bpf_text_address() and is_bpf_image_address require
111 * RCU to be watching.
e8cac8b1
SRV
112 */
113 no_rcu = !rcu_is_watching();
114
115 /* Treat this like an NMI as it can happen anywhere */
116 if (no_rcu)
493c1822 117 ct_nmi_enter();
e8cac8b1 118
aec0be2d 119 if (is_module_text_address(addr))
e8cac8b1 120 goto out;
5b485629 121 if (is_ftrace_trampoline(addr))
e8cac8b1 122 goto out;
5b485629 123 if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
e8cac8b1 124 goto out;
74451e66 125 if (is_bpf_text_address(addr))
e8cac8b1
SRV
126 goto out;
127 ret = 0;
128out:
129 if (no_rcu)
493c1822 130 ct_nmi_exit();
e8cac8b1
SRV
131
132 return ret;
1da177e4 133}
ab7476cf
AV
134
135/*
e1478d8e 136 * On some architectures (PPC64, IA64, PARISC) function pointers
ab7476cf
AV
137 * are actually only tokens to some data that then holds the
138 * real function address. As a result, to find if a function
139 * pointer is part of the kernel text, we need to do some
140 * special dereferencing first.
141 */
e1478d8e
CL
142#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS
143void *dereference_function_descriptor(void *ptr)
144{
145 func_desc_t *desc = ptr;
146 void *p;
147
148 if (!get_kernel_nofault(p, (void *)&desc->addr))
149 ptr = p;
150 return ptr;
151}
b6491339 152EXPORT_SYMBOL_GPL(dereference_function_descriptor);
e1478d8e
CL
153
154void *dereference_kernel_function_descriptor(void *ptr)
155{
156 if (ptr < (void *)__start_opd || ptr >= (void *)__end_opd)
157 return ptr;
158
159 return dereference_function_descriptor(ptr);
160}
161#endif
162
ab7476cf
AV
163int func_ptr_is_kernel_text(void *ptr)
164{
165 unsigned long addr;
166 addr = (unsigned long) dereference_function_descriptor(ptr);
167 if (core_kernel_text(addr))
168 return 1;
a6e6abd5 169 return is_module_text_address(addr);
ab7476cf 170}