Commit | Line | Data |
---|---|---|
1ccea77e | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
c349cdca JP |
2 | /* |
3 | * patch.c - livepatch patching functions | |
4 | * | |
5 | * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> | |
6 | * Copyright (C) 2014 SUSE | |
7 | * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com> | |
c349cdca JP |
8 | */ |
9 | ||
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
11 | ||
12 | #include <linux/livepatch.h> | |
13 | #include <linux/list.h> | |
14 | #include <linux/ftrace.h> | |
15 | #include <linux/rculist.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/bug.h> | |
18 | #include <linux/printk.h> | |
93862e38 | 19 | #include "core.h" |
c349cdca | 20 | #include "patch.h" |
d83a7cb3 | 21 | #include "transition.h" |
c349cdca JP |
22 | |
23 | static LIST_HEAD(klp_ops); | |
24 | ||
19514910 | 25 | struct klp_ops *klp_find_ops(void *old_func) |
c349cdca JP |
26 | { |
27 | struct klp_ops *ops; | |
28 | struct klp_func *func; | |
29 | ||
30 | list_for_each_entry(ops, &klp_ops, node) { | |
31 | func = list_first_entry(&ops->func_stack, struct klp_func, | |
32 | stack_node); | |
19514910 | 33 | if (func->old_func == old_func) |
c349cdca JP |
34 | return ops; |
35 | } | |
36 | ||
37 | return NULL; | |
38 | } | |
39 | ||
40 | static void notrace klp_ftrace_handler(unsigned long ip, | |
41 | unsigned long parent_ip, | |
42 | struct ftrace_ops *fops, | |
43 | struct pt_regs *regs) | |
44 | { | |
45 | struct klp_ops *ops; | |
46 | struct klp_func *func; | |
d83a7cb3 | 47 | int patch_state; |
c349cdca JP |
48 | |
49 | ops = container_of(fops, struct klp_ops, fops); | |
50 | ||
842c0884 | 51 | /* |
6932689e | 52 | * A variant of synchronize_rcu() is used to allow patching functions |
842c0884 PM |
53 | * where RCU is not watching, see klp_synchronize_transition(). |
54 | */ | |
55 | preempt_disable_notrace(); | |
d83a7cb3 | 56 | |
c349cdca JP |
57 | func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, |
58 | stack_node); | |
d83a7cb3 JP |
59 | |
60 | /* | |
61 | * func should never be NULL because preemption should be disabled here | |
62 | * and unregister_ftrace_function() does the equivalent of a | |
6932689e | 63 | * synchronize_rcu() before the func_stack removal. |
d83a7cb3 | 64 | */ |
c349cdca JP |
65 | if (WARN_ON_ONCE(!func)) |
66 | goto unlock; | |
67 | ||
d83a7cb3 JP |
68 | /* |
69 | * In the enable path, enforce the order of the ops->func_stack and | |
70 | * func->transition reads. The corresponding write barrier is in | |
71 | * __klp_enable_patch(). | |
72 | * | |
73 | * (Note that this barrier technically isn't needed in the disable | |
74 | * path. In the rare case where klp_update_patch_state() runs before | |
75 | * this handler, its TIF_PATCH_PENDING read and this func->transition | |
76 | * read need to be ordered. But klp_update_patch_state() already | |
77 | * enforces that.) | |
78 | */ | |
79 | smp_rmb(); | |
80 | ||
81 | if (unlikely(func->transition)) { | |
82 | ||
83 | /* | |
84 | * Enforce the order of the func->transition and | |
85 | * current->patch_state reads. Otherwise we could read an | |
86 | * out-of-date task state and pick the wrong function. The | |
87 | * corresponding write barrier is in klp_init_transition(). | |
88 | */ | |
89 | smp_rmb(); | |
90 | ||
91 | patch_state = current->patch_state; | |
92 | ||
93 | WARN_ON_ONCE(patch_state == KLP_UNDEFINED); | |
94 | ||
95 | if (patch_state == KLP_UNPATCHED) { | |
96 | /* | |
97 | * Use the previously patched version of the function. | |
98 | * If no previous patches exist, continue with the | |
99 | * original function. | |
100 | */ | |
101 | func = list_entry_rcu(func->stack_node.next, | |
102 | struct klp_func, stack_node); | |
103 | ||
104 | if (&func->stack_node == &ops->func_stack) | |
105 | goto unlock; | |
106 | } | |
107 | } | |
108 | ||
e1452b60 JB |
109 | /* |
110 | * NOPs are used to replace existing patches with original code. | |
111 | * Do nothing! Setting pc would cause an infinite loop. | |
112 | */ | |
113 | if (func->nop) | |
114 | goto unlock; | |
115 | ||
c349cdca | 116 | klp_arch_set_pc(regs, (unsigned long)func->new_func); |
e1452b60 | 117 | |
c349cdca | 118 | unlock: |
842c0884 | 119 | preempt_enable_notrace(); |
c349cdca JP |
120 | } |
121 | ||
122 | /* | |
123 | * Convert a function address into the appropriate ftrace location. | |
124 | * | |
125 | * Usually this is just the address of the function, but on some architectures | |
126 | * it's more complicated so allow them to provide a custom behaviour. | |
127 | */ | |
128 | #ifndef klp_get_ftrace_location | |
129 | static unsigned long klp_get_ftrace_location(unsigned long faddr) | |
130 | { | |
131 | return faddr; | |
132 | } | |
133 | #endif | |
134 | ||
135 | static void klp_unpatch_func(struct klp_func *func) | |
136 | { | |
137 | struct klp_ops *ops; | |
138 | ||
139 | if (WARN_ON(!func->patched)) | |
140 | return; | |
19514910 | 141 | if (WARN_ON(!func->old_func)) |
c349cdca JP |
142 | return; |
143 | ||
19514910 | 144 | ops = klp_find_ops(func->old_func); |
c349cdca JP |
145 | if (WARN_ON(!ops)) |
146 | return; | |
147 | ||
148 | if (list_is_singular(&ops->func_stack)) { | |
149 | unsigned long ftrace_loc; | |
150 | ||
19514910 PM |
151 | ftrace_loc = |
152 | klp_get_ftrace_location((unsigned long)func->old_func); | |
c349cdca JP |
153 | if (WARN_ON(!ftrace_loc)) |
154 | return; | |
155 | ||
156 | WARN_ON(unregister_ftrace_function(&ops->fops)); | |
157 | WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0)); | |
158 | ||
159 | list_del_rcu(&func->stack_node); | |
160 | list_del(&ops->node); | |
161 | kfree(ops); | |
162 | } else { | |
163 | list_del_rcu(&func->stack_node); | |
164 | } | |
165 | ||
166 | func->patched = false; | |
167 | } | |
168 | ||
169 | static int klp_patch_func(struct klp_func *func) | |
170 | { | |
171 | struct klp_ops *ops; | |
172 | int ret; | |
173 | ||
19514910 | 174 | if (WARN_ON(!func->old_func)) |
c349cdca JP |
175 | return -EINVAL; |
176 | ||
177 | if (WARN_ON(func->patched)) | |
178 | return -EINVAL; | |
179 | ||
19514910 | 180 | ops = klp_find_ops(func->old_func); |
c349cdca JP |
181 | if (!ops) { |
182 | unsigned long ftrace_loc; | |
183 | ||
19514910 PM |
184 | ftrace_loc = |
185 | klp_get_ftrace_location((unsigned long)func->old_func); | |
c349cdca JP |
186 | if (!ftrace_loc) { |
187 | pr_err("failed to find location for function '%s'\n", | |
188 | func->old_name); | |
189 | return -EINVAL; | |
190 | } | |
191 | ||
192 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); | |
193 | if (!ops) | |
194 | return -ENOMEM; | |
195 | ||
196 | ops->fops.func = klp_ftrace_handler; | |
197 | ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS | | |
198 | FTRACE_OPS_FL_DYNAMIC | | |
199 | FTRACE_OPS_FL_IPMODIFY; | |
200 | ||
201 | list_add(&ops->node, &klp_ops); | |
202 | ||
203 | INIT_LIST_HEAD(&ops->func_stack); | |
204 | list_add_rcu(&func->stack_node, &ops->func_stack); | |
205 | ||
206 | ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0); | |
207 | if (ret) { | |
208 | pr_err("failed to set ftrace filter for function '%s' (%d)\n", | |
209 | func->old_name, ret); | |
210 | goto err; | |
211 | } | |
212 | ||
213 | ret = register_ftrace_function(&ops->fops); | |
214 | if (ret) { | |
215 | pr_err("failed to register ftrace handler for function '%s' (%d)\n", | |
216 | func->old_name, ret); | |
217 | ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0); | |
218 | goto err; | |
219 | } | |
220 | ||
221 | ||
222 | } else { | |
223 | list_add_rcu(&func->stack_node, &ops->func_stack); | |
224 | } | |
225 | ||
226 | func->patched = true; | |
227 | ||
228 | return 0; | |
229 | ||
230 | err: | |
231 | list_del_rcu(&func->stack_node); | |
232 | list_del(&ops->node); | |
233 | kfree(ops); | |
234 | return ret; | |
235 | } | |
236 | ||
d697bad5 | 237 | static void __klp_unpatch_object(struct klp_object *obj, bool nops_only) |
c349cdca JP |
238 | { |
239 | struct klp_func *func; | |
240 | ||
d697bad5 PM |
241 | klp_for_each_func(obj, func) { |
242 | if (nops_only && !func->nop) | |
243 | continue; | |
244 | ||
c349cdca JP |
245 | if (func->patched) |
246 | klp_unpatch_func(func); | |
d697bad5 | 247 | } |
c349cdca | 248 | |
d697bad5 PM |
249 | if (obj->dynamic || !nops_only) |
250 | obj->patched = false; | |
251 | } | |
252 | ||
253 | ||
254 | void klp_unpatch_object(struct klp_object *obj) | |
255 | { | |
256 | __klp_unpatch_object(obj, false); | |
c349cdca JP |
257 | } |
258 | ||
259 | int klp_patch_object(struct klp_object *obj) | |
260 | { | |
261 | struct klp_func *func; | |
262 | int ret; | |
263 | ||
264 | if (WARN_ON(obj->patched)) | |
265 | return -EINVAL; | |
266 | ||
267 | klp_for_each_func(obj, func) { | |
268 | ret = klp_patch_func(func); | |
269 | if (ret) { | |
270 | klp_unpatch_object(obj); | |
271 | return ret; | |
272 | } | |
273 | } | |
274 | obj->patched = true; | |
275 | ||
276 | return 0; | |
277 | } | |
d83a7cb3 | 278 | |
d697bad5 | 279 | static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only) |
d83a7cb3 JP |
280 | { |
281 | struct klp_object *obj; | |
282 | ||
283 | klp_for_each_object(patch, obj) | |
284 | if (obj->patched) | |
d697bad5 PM |
285 | __klp_unpatch_object(obj, nops_only); |
286 | } | |
287 | ||
288 | void klp_unpatch_objects(struct klp_patch *patch) | |
289 | { | |
290 | __klp_unpatch_objects(patch, false); | |
291 | } | |
292 | ||
293 | void klp_unpatch_objects_dynamic(struct klp_patch *patch) | |
294 | { | |
295 | __klp_unpatch_objects(patch, true); | |
d83a7cb3 | 296 | } |