Commit | Line | Data |
---|---|---|
540adea3 MH |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // error-inject.c: Function-level error injection table | |
3 | #include <linux/error-injection.h> | |
4 | #include <linux/debugfs.h> | |
5 | #include <linux/kallsyms.h> | |
6 | #include <linux/kprobes.h> | |
7 | #include <linux/module.h> | |
8 | #include <linux/mutex.h> | |
9 | #include <linux/list.h> | |
10 | #include <linux/slab.h> | |
f2ec8d9a | 11 | #include <asm/sections.h> |
540adea3 MH |
12 | |
13 | /* Whitelist of symbols that can be overridden for error injection. */ | |
14 | static LIST_HEAD(error_injection_list); | |
15 | static DEFINE_MUTEX(ei_mutex); | |
16 | struct ei_entry { | |
17 | struct list_head list; | |
18 | unsigned long start_addr; | |
19 | unsigned long end_addr; | |
663faf9f | 20 | int etype; |
540adea3 MH |
21 | void *priv; |
22 | }; | |
23 | ||
24 | bool within_error_injection_list(unsigned long addr) | |
25 | { | |
26 | struct ei_entry *ent; | |
27 | bool ret = false; | |
28 | ||
29 | mutex_lock(&ei_mutex); | |
30 | list_for_each_entry(ent, &error_injection_list, list) { | |
31 | if (addr >= ent->start_addr && addr < ent->end_addr) { | |
32 | ret = true; | |
33 | break; | |
34 | } | |
35 | } | |
36 | mutex_unlock(&ei_mutex); | |
37 | return ret; | |
38 | } | |
39 | ||
663faf9f MH |
40 | int get_injectable_error_type(unsigned long addr) |
41 | { | |
42 | struct ei_entry *ent; | |
43 | ||
44 | list_for_each_entry(ent, &error_injection_list, list) { | |
45 | if (addr >= ent->start_addr && addr < ent->end_addr) | |
46 | return ent->etype; | |
47 | } | |
48 | return EI_ETYPE_NONE; | |
49 | } | |
50 | ||
540adea3 MH |
51 | /* |
52 | * Lookup and populate the error_injection_list. | |
53 | * | |
54 | * For safety reasons we only allow certain functions to be overridden with | |
55 | * bpf_error_injection, so we need to populate the list of the symbols that have | |
56 | * been marked as safe for overriding. | |
57 | */ | |
663faf9f MH |
58 | static void populate_error_injection_list(struct error_injection_entry *start, |
59 | struct error_injection_entry *end, | |
60 | void *priv) | |
540adea3 | 61 | { |
663faf9f | 62 | struct error_injection_entry *iter; |
540adea3 MH |
63 | struct ei_entry *ent; |
64 | unsigned long entry, offset = 0, size = 0; | |
65 | ||
66 | mutex_lock(&ei_mutex); | |
67 | for (iter = start; iter < end; iter++) { | |
f2ec8d9a | 68 | entry = (unsigned long)dereference_symbol_descriptor((void *)iter->addr); |
540adea3 MH |
69 | |
70 | if (!kernel_text_address(entry) || | |
71 | !kallsyms_lookup_size_offset(entry, &size, &offset)) { | |
72 | pr_err("Failed to find error inject entry at %p\n", | |
73 | (void *)entry); | |
74 | continue; | |
75 | } | |
76 | ||
77 | ent = kmalloc(sizeof(*ent), GFP_KERNEL); | |
78 | if (!ent) | |
79 | break; | |
80 | ent->start_addr = entry; | |
81 | ent->end_addr = entry + size; | |
663faf9f | 82 | ent->etype = iter->etype; |
540adea3 MH |
83 | ent->priv = priv; |
84 | INIT_LIST_HEAD(&ent->list); | |
85 | list_add_tail(&ent->list, &error_injection_list); | |
86 | } | |
87 | mutex_unlock(&ei_mutex); | |
88 | } | |
89 | ||
90 | /* Markers of the _error_inject_whitelist section */ | |
663faf9f MH |
91 | extern struct error_injection_entry __start_error_injection_whitelist[]; |
92 | extern struct error_injection_entry __stop_error_injection_whitelist[]; | |
540adea3 MH |
93 | |
94 | static void __init populate_kernel_ei_list(void) | |
95 | { | |
96 | populate_error_injection_list(__start_error_injection_whitelist, | |
97 | __stop_error_injection_whitelist, | |
98 | NULL); | |
99 | } | |
100 | ||
101 | #ifdef CONFIG_MODULES | |
102 | static void module_load_ei_list(struct module *mod) | |
103 | { | |
104 | if (!mod->num_ei_funcs) | |
105 | return; | |
106 | ||
107 | populate_error_injection_list(mod->ei_funcs, | |
108 | mod->ei_funcs + mod->num_ei_funcs, mod); | |
109 | } | |
110 | ||
111 | static void module_unload_ei_list(struct module *mod) | |
112 | { | |
113 | struct ei_entry *ent, *n; | |
114 | ||
115 | if (!mod->num_ei_funcs) | |
116 | return; | |
117 | ||
118 | mutex_lock(&ei_mutex); | |
119 | list_for_each_entry_safe(ent, n, &error_injection_list, list) { | |
120 | if (ent->priv == mod) { | |
121 | list_del_init(&ent->list); | |
122 | kfree(ent); | |
123 | } | |
124 | } | |
125 | mutex_unlock(&ei_mutex); | |
126 | } | |
127 | ||
128 | /* Module notifier call back, checking error injection table on the module */ | |
129 | static int ei_module_callback(struct notifier_block *nb, | |
130 | unsigned long val, void *data) | |
131 | { | |
132 | struct module *mod = data; | |
133 | ||
134 | if (val == MODULE_STATE_COMING) | |
135 | module_load_ei_list(mod); | |
136 | else if (val == MODULE_STATE_GOING) | |
137 | module_unload_ei_list(mod); | |
138 | ||
139 | return NOTIFY_DONE; | |
140 | } | |
141 | ||
142 | static struct notifier_block ei_module_nb = { | |
143 | .notifier_call = ei_module_callback, | |
144 | .priority = 0 | |
145 | }; | |
146 | ||
147 | static __init int module_ei_init(void) | |
148 | { | |
149 | return register_module_notifier(&ei_module_nb); | |
150 | } | |
151 | #else /* !CONFIG_MODULES */ | |
152 | #define module_ei_init() (0) | |
153 | #endif | |
154 | ||
155 | /* | |
156 | * error_injection/whitelist -- shows which functions can be overridden for | |
157 | * error injection. | |
158 | */ | |
159 | static void *ei_seq_start(struct seq_file *m, loff_t *pos) | |
160 | { | |
161 | mutex_lock(&ei_mutex); | |
162 | return seq_list_start(&error_injection_list, *pos); | |
163 | } | |
164 | ||
165 | static void ei_seq_stop(struct seq_file *m, void *v) | |
166 | { | |
167 | mutex_unlock(&ei_mutex); | |
168 | } | |
169 | ||
170 | static void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos) | |
171 | { | |
172 | return seq_list_next(v, &error_injection_list, pos); | |
173 | } | |
174 | ||
663faf9f MH |
175 | static const char *error_type_string(int etype) |
176 | { | |
177 | switch (etype) { | |
178 | case EI_ETYPE_NULL: | |
179 | return "NULL"; | |
180 | case EI_ETYPE_ERRNO: | |
181 | return "ERRNO"; | |
182 | case EI_ETYPE_ERRNO_NULL: | |
183 | return "ERRNO_NULL"; | |
537cd894 BP |
184 | case EI_ETYPE_TRUE: |
185 | return "TRUE"; | |
663faf9f MH |
186 | default: |
187 | return "(unknown)"; | |
188 | } | |
189 | } | |
190 | ||
540adea3 MH |
191 | static int ei_seq_show(struct seq_file *m, void *v) |
192 | { | |
193 | struct ei_entry *ent = list_entry(v, struct ei_entry, list); | |
194 | ||
d75f773c | 195 | seq_printf(m, "%ps\t%s\n", (void *)ent->start_addr, |
663faf9f | 196 | error_type_string(ent->etype)); |
540adea3 MH |
197 | return 0; |
198 | } | |
199 | ||
200 | static const struct seq_operations ei_seq_ops = { | |
201 | .start = ei_seq_start, | |
202 | .next = ei_seq_next, | |
203 | .stop = ei_seq_stop, | |
204 | .show = ei_seq_show, | |
205 | }; | |
206 | ||
207 | static int ei_open(struct inode *inode, struct file *filp) | |
208 | { | |
209 | return seq_open(filp, &ei_seq_ops); | |
210 | } | |
211 | ||
212 | static const struct file_operations debugfs_ei_ops = { | |
213 | .open = ei_open, | |
214 | .read = seq_read, | |
215 | .llseek = seq_lseek, | |
216 | .release = seq_release, | |
217 | }; | |
218 | ||
219 | static int __init ei_debugfs_init(void) | |
220 | { | |
221 | struct dentry *dir, *file; | |
222 | ||
223 | dir = debugfs_create_dir("error_injection", NULL); | |
224 | if (!dir) | |
225 | return -ENOMEM; | |
226 | ||
227 | file = debugfs_create_file("list", 0444, dir, NULL, &debugfs_ei_ops); | |
228 | if (!file) { | |
229 | debugfs_remove(dir); | |
230 | return -ENOMEM; | |
231 | } | |
232 | ||
233 | return 0; | |
234 | } | |
235 | ||
236 | static int __init init_error_injection(void) | |
237 | { | |
238 | populate_kernel_ei_list(); | |
239 | ||
240 | if (!module_ei_init()) | |
241 | ei_debugfs_init(); | |
242 | ||
243 | return 0; | |
244 | } | |
245 | late_initcall(init_error_injection); |