Commit | Line | Data |
---|---|---|
1ccea77e | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
d83a7cb3 JP |
2 | /* |
3 | * transition.c - Kernel Live Patching transition functions | |
4 | * | |
5 | * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com> | |
d83a7cb3 JP |
6 | */ |
7 | ||
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
9 | ||
10 | #include <linux/cpu.h> | |
11 | #include <linux/stacktrace.h> | |
e3ff7c60 | 12 | #include <linux/static_call.h> |
10517429 | 13 | #include "core.h" |
d83a7cb3 JP |
14 | #include "patch.h" |
15 | #include "transition.h" | |
d83a7cb3 JP |
16 | |
17 | #define MAX_STACK_ENTRIES 100 | |
42cffe98 | 18 | static DEFINE_PER_CPU(unsigned long[MAX_STACK_ENTRIES], klp_stack_entries); |
e92606fa | 19 | |
d83a7cb3 JP |
20 | #define STACK_ERR_BUF_SIZE 128 |
21 | ||
cba82dea MB |
22 | #define SIGNALS_TIMEOUT 15 |
23 | ||
d83a7cb3 JP |
24 | struct klp_patch *klp_transition_patch; |
25 | ||
d927752f | 26 | static int klp_target_state = KLP_TRANSITION_IDLE; |
d83a7cb3 | 27 | |
cba82dea MB |
28 | static unsigned int klp_signals_cnt; |
29 | ||
e3ff7c60 JP |
30 | /* |
31 | * When a livepatch is in progress, enable klp stack checking in | |
676e8cf7 | 32 | * schedule(). This helps CPU-bound kthreads get patched. |
e3ff7c60 | 33 | */ |
e3ff7c60 JP |
34 | |
35 | DEFINE_STATIC_KEY_FALSE(klp_sched_try_switch_key); | |
e3ff7c60 | 36 | |
676e8cf7 PZ |
37 | #define klp_resched_enable() static_branch_enable(&klp_sched_try_switch_key) |
38 | #define klp_resched_disable() static_branch_disable(&klp_sched_try_switch_key) | |
e3ff7c60 | 39 | |
d83a7cb3 JP |
40 | /* |
41 | * This work can be performed periodically to finish patching or unpatching any | |
42 | * "straggler" tasks which failed to transition in the first attempt. | |
43 | */ | |
44 | static void klp_transition_work_fn(struct work_struct *work) | |
45 | { | |
46 | mutex_lock(&klp_mutex); | |
47 | ||
48 | if (klp_transition_patch) | |
49 | klp_try_complete_transition(); | |
50 | ||
51 | mutex_unlock(&klp_mutex); | |
52 | } | |
53 | static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); | |
54 | ||
842c0884 PM |
55 | /* |
56 | * This function is just a stub to implement a hard force | |
6932689e | 57 | * of synchronize_rcu(). This requires synchronizing |
842c0884 PM |
58 | * tasks even in userspace and idle. |
59 | */ | |
60 | static void klp_sync(struct work_struct *work) | |
61 | { | |
62 | } | |
63 | ||
64 | /* | |
65 | * We allow to patch also functions where RCU is not watching, | |
66 | * e.g. before user_exit(). We can not rely on the RCU infrastructure | |
67 | * to do the synchronization. Instead hard force the sched synchronization. | |
68 | * | |
69 | * This approach allows to use RCU functions for manipulating func_stack | |
70 | * safely. | |
71 | */ | |
72 | static void klp_synchronize_transition(void) | |
73 | { | |
74 | schedule_on_each_cpu(klp_sync); | |
75 | } | |
76 | ||
d83a7cb3 JP |
77 | /* |
78 | * The transition to the target patch state is complete. Clean up the data | |
79 | * structures. | |
80 | */ | |
81 | static void klp_complete_transition(void) | |
82 | { | |
83 | struct klp_object *obj; | |
84 | struct klp_func *func; | |
85 | struct task_struct *g, *task; | |
86 | unsigned int cpu; | |
87 | ||
af026796 JL |
88 | pr_debug("'%s': completing %s transition\n", |
89 | klp_transition_patch->mod->name, | |
d927752f | 90 | klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching"); |
af026796 | 91 | |
d927752f | 92 | if (klp_transition_patch->replace && klp_target_state == KLP_TRANSITION_PATCHED) { |
7e35e4eb | 93 | klp_unpatch_replaced_patches(klp_transition_patch); |
d697bad5 PM |
94 | klp_discard_nops(klp_transition_patch); |
95 | } | |
e1452b60 | 96 | |
d927752f | 97 | if (klp_target_state == KLP_TRANSITION_UNPATCHED) { |
d83a7cb3 | 98 | /* |
d927752f | 99 | * All tasks have transitioned to KLP_TRANSITION_UNPATCHED so we can now |
d83a7cb3 JP |
100 | * remove the new functions from the func_stack. |
101 | */ | |
102 | klp_unpatch_objects(klp_transition_patch); | |
103 | ||
104 | /* | |
105 | * Make sure klp_ftrace_handler() can no longer see functions | |
106 | * from this patch on the ops->func_stack. Otherwise, after | |
107 | * func->transition gets cleared, the handler may choose a | |
108 | * removed function. | |
109 | */ | |
842c0884 | 110 | klp_synchronize_transition(); |
d83a7cb3 JP |
111 | } |
112 | ||
d0807da7 MB |
113 | klp_for_each_object(klp_transition_patch, obj) |
114 | klp_for_each_func(obj, func) | |
d83a7cb3 | 115 | func->transition = false; |
3ec24776 | 116 | |
d927752f W |
117 | /* Prevent klp_ftrace_handler() from seeing KLP_TRANSITION_IDLE state */ |
118 | if (klp_target_state == KLP_TRANSITION_PATCHED) | |
842c0884 | 119 | klp_synchronize_transition(); |
d83a7cb3 JP |
120 | |
121 | read_lock(&tasklist_lock); | |
122 | for_each_process_thread(g, task) { | |
123 | WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); | |
d927752f | 124 | task->patch_state = KLP_TRANSITION_IDLE; |
d83a7cb3 JP |
125 | } |
126 | read_unlock(&tasklist_lock); | |
127 | ||
128 | for_each_possible_cpu(cpu) { | |
129 | task = idle_task(cpu); | |
130 | WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); | |
d927752f | 131 | task->patch_state = KLP_TRANSITION_IDLE; |
d83a7cb3 JP |
132 | } |
133 | ||
93862e38 JL |
134 | klp_for_each_object(klp_transition_patch, obj) { |
135 | if (!klp_is_object_loaded(obj)) | |
136 | continue; | |
d927752f | 137 | if (klp_target_state == KLP_TRANSITION_PATCHED) |
93862e38 | 138 | klp_post_patch_callback(obj); |
d927752f | 139 | else if (klp_target_state == KLP_TRANSITION_UNPATCHED) |
93862e38 JL |
140 | klp_post_unpatch_callback(obj); |
141 | } | |
142 | ||
6116c303 | 143 | pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name, |
d927752f | 144 | klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching"); |
6116c303 | 145 | |
d927752f | 146 | klp_target_state = KLP_TRANSITION_IDLE; |
d83a7cb3 JP |
147 | klp_transition_patch = NULL; |
148 | } | |
149 | ||
150 | /* | |
151 | * This is called in the error path, to cancel a transition before it has | |
152 | * started, i.e. klp_init_transition() has been called but | |
153 | * klp_start_transition() hasn't. If the transition *has* been started, | |
154 | * klp_reverse_transition() should be used instead. | |
155 | */ | |
156 | void klp_cancel_transition(void) | |
157 | { | |
d927752f | 158 | if (WARN_ON_ONCE(klp_target_state != KLP_TRANSITION_PATCHED)) |
3ec24776 JP |
159 | return; |
160 | ||
af026796 JL |
161 | pr_debug("'%s': canceling patching transition, going to unpatch\n", |
162 | klp_transition_patch->mod->name); | |
163 | ||
d927752f | 164 | klp_target_state = KLP_TRANSITION_UNPATCHED; |
d83a7cb3 JP |
165 | klp_complete_transition(); |
166 | } | |
167 | ||
168 | /* | |
169 | * Switch the patched state of the task to the set of functions in the target | |
170 | * patch state. | |
171 | * | |
172 | * NOTE: If task is not 'current', the caller must ensure the task is inactive. | |
173 | * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value. | |
174 | */ | |
175 | void klp_update_patch_state(struct task_struct *task) | |
176 | { | |
842c0884 | 177 | /* |
6932689e | 178 | * A variant of synchronize_rcu() is used to allow patching functions |
842c0884 PM |
179 | * where RCU is not watching, see klp_synchronize_transition(). |
180 | */ | |
181 | preempt_disable_notrace(); | |
d83a7cb3 JP |
182 | |
183 | /* | |
184 | * This test_and_clear_tsk_thread_flag() call also serves as a read | |
185 | * barrier (smp_rmb) for two cases: | |
186 | * | |
187 | * 1) Enforce the order of the TIF_PATCH_PENDING read and the | |
e3ff7c60 JP |
188 | * klp_target_state read. The corresponding write barriers are in |
189 | * klp_init_transition() and klp_reverse_transition(). | |
d83a7cb3 JP |
190 | * |
191 | * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read | |
192 | * of func->transition, if klp_ftrace_handler() is called later on | |
193 | * the same CPU. See __klp_disable_patch(). | |
194 | */ | |
195 | if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) | |
196 | task->patch_state = READ_ONCE(klp_target_state); | |
197 | ||
842c0884 | 198 | preempt_enable_notrace(); |
d83a7cb3 JP |
199 | } |
200 | ||
201 | /* | |
202 | * Determine whether the given stack trace includes any references to a | |
203 | * to-be-patched or to-be-unpatched function. | |
204 | */ | |
25e39e32 TG |
205 | static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, |
206 | unsigned int nr_entries) | |
d83a7cb3 JP |
207 | { |
208 | unsigned long func_addr, func_size, address; | |
209 | struct klp_ops *ops; | |
210 | int i; | |
211 | ||
d927752f | 212 | if (klp_target_state == KLP_TRANSITION_UNPATCHED) { |
53910ef7 ZL |
213 | /* |
214 | * Check for the to-be-unpatched function | |
215 | * (the func itself). | |
216 | */ | |
217 | func_addr = (unsigned long)func->new_func; | |
218 | func_size = func->new_size; | |
219 | } else { | |
220 | /* | |
221 | * Check for the to-be-patched function | |
222 | * (the previous func). | |
223 | */ | |
224 | ops = klp_find_ops(func->old_func); | |
d83a7cb3 | 225 | |
53910ef7 ZL |
226 | if (list_is_singular(&ops->func_stack)) { |
227 | /* original function */ | |
228 | func_addr = (unsigned long)func->old_func; | |
229 | func_size = func->old_size; | |
d83a7cb3 | 230 | } else { |
53910ef7 ZL |
231 | /* previously patched function */ |
232 | struct klp_func *prev; | |
233 | ||
234 | prev = list_next_entry(func, stack_node); | |
235 | func_addr = (unsigned long)prev->new_func; | |
236 | func_size = prev->new_size; | |
d83a7cb3 | 237 | } |
53910ef7 ZL |
238 | } |
239 | ||
240 | for (i = 0; i < nr_entries; i++) { | |
241 | address = entries[i]; | |
d83a7cb3 JP |
242 | |
243 | if (address >= func_addr && address < func_addr + func_size) | |
244 | return -EAGAIN; | |
245 | } | |
246 | ||
247 | return 0; | |
248 | } | |
249 | ||
250 | /* | |
251 | * Determine whether it's safe to transition the task to the target patch state | |
252 | * by looking for any to-be-patched or to-be-unpatched functions on its stack. | |
253 | */ | |
00619f7c | 254 | static int klp_check_stack(struct task_struct *task, const char **oldname) |
d83a7cb3 | 255 | { |
e92606fa | 256 | unsigned long *entries = this_cpu_ptr(klp_stack_entries); |
d83a7cb3 JP |
257 | struct klp_object *obj; |
258 | struct klp_func *func; | |
25e39e32 | 259 | int ret, nr_entries; |
d83a7cb3 | 260 | |
e92606fa JP |
261 | /* Protect 'klp_stack_entries' */ |
262 | lockdep_assert_preemption_disabled(); | |
263 | ||
264 | ret = stack_trace_save_tsk_reliable(task, entries, MAX_STACK_ENTRIES); | |
00619f7c PZ |
265 | if (ret < 0) |
266 | return -EINVAL; | |
25e39e32 | 267 | nr_entries = ret; |
d83a7cb3 JP |
268 | |
269 | klp_for_each_object(klp_transition_patch, obj) { | |
270 | if (!obj->patched) | |
271 | continue; | |
272 | klp_for_each_func(obj, func) { | |
25e39e32 | 273 | ret = klp_check_stack_func(func, entries, nr_entries); |
d83a7cb3 | 274 | if (ret) { |
00619f7c PZ |
275 | *oldname = func->old_name; |
276 | return -EADDRINUSE; | |
d83a7cb3 JP |
277 | } |
278 | } | |
279 | } | |
280 | ||
281 | return 0; | |
282 | } | |
283 | ||
00619f7c PZ |
284 | static int klp_check_and_switch_task(struct task_struct *task, void *arg) |
285 | { | |
286 | int ret; | |
287 | ||
288 | if (task_curr(task) && task != current) | |
289 | return -EBUSY; | |
290 | ||
291 | ret = klp_check_stack(task, arg); | |
292 | if (ret) | |
293 | return ret; | |
294 | ||
295 | clear_tsk_thread_flag(task, TIF_PATCH_PENDING); | |
296 | task->patch_state = klp_target_state; | |
297 | return 0; | |
298 | } | |
299 | ||
d83a7cb3 JP |
300 | /* |
301 | * Try to safely switch a task to the target patch state. If it's currently | |
302 | * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or | |
303 | * if the stack is unreliable, return false. | |
304 | */ | |
305 | static bool klp_try_switch_task(struct task_struct *task) | |
306 | { | |
00619f7c | 307 | const char *old_name; |
d83a7cb3 | 308 | int ret; |
d83a7cb3 JP |
309 | |
310 | /* check if this task has already switched over */ | |
311 | if (task->patch_state == klp_target_state) | |
312 | return true; | |
313 | ||
67059d65 MB |
314 | /* |
315 | * For arches which don't have reliable stack traces, we have to rely | |
316 | * on other methods (e.g., switching tasks at kernel exit). | |
317 | */ | |
318 | if (!klp_have_reliable_stack()) | |
319 | return false; | |
320 | ||
d83a7cb3 JP |
321 | /* |
322 | * Now try to check the stack for any to-be-patched or to-be-unpatched | |
323 | * functions. If all goes well, switch the task to the target patch | |
324 | * state. | |
325 | */ | |
383439d3 JP |
326 | if (task == current) |
327 | ret = klp_check_and_switch_task(current, &old_name); | |
328 | else | |
329 | ret = task_call_func(task, klp_check_and_switch_task, &old_name); | |
330 | ||
00619f7c PZ |
331 | switch (ret) { |
332 | case 0: /* success */ | |
333 | break; | |
d83a7cb3 | 334 | |
00619f7c PZ |
335 | case -EBUSY: /* klp_check_and_switch_task() */ |
336 | pr_debug("%s: %s:%d is running\n", | |
337 | __func__, task->comm, task->pid); | |
338 | break; | |
339 | case -EINVAL: /* klp_check_and_switch_task() */ | |
340 | pr_debug("%s: %s:%d has an unreliable stack\n", | |
341 | __func__, task->comm, task->pid); | |
342 | break; | |
343 | case -EADDRINUSE: /* klp_check_and_switch_task() */ | |
344 | pr_debug("%s: %s:%d is sleeping on function %s\n", | |
345 | __func__, task->comm, task->pid, old_name); | |
346 | break; | |
347 | ||
348 | default: | |
349 | pr_debug("%s: Unknown error code (%d) when trying to switch %s:%d\n", | |
350 | __func__, ret, task->comm, task->pid); | |
351 | break; | |
d83a7cb3 JP |
352 | } |
353 | ||
00619f7c | 354 | return !ret; |
d83a7cb3 JP |
355 | } |
356 | ||
e3ff7c60 JP |
357 | void __klp_sched_try_switch(void) |
358 | { | |
e3ff7c60 | 359 | /* |
676e8cf7 PZ |
360 | * This function is called from __schedule() while a context switch is |
361 | * about to happen. Preemption is already disabled and klp_mutex | |
362 | * can't be acquired. | |
363 | * Disabled preemption is used to prevent racing with other callers of | |
364 | * klp_try_switch_task(). Thanks to task_call_func() they won't be | |
365 | * able to switch to this task while it's running. | |
e3ff7c60 | 366 | */ |
676e8cf7 | 367 | lockdep_assert_preemption_disabled(); |
e3ff7c60 | 368 | |
676e8cf7 PZ |
369 | if (likely(!klp_patch_pending(current))) |
370 | return; | |
e3ff7c60 JP |
371 | |
372 | /* | |
373 | * Enforce the order of the TIF_PATCH_PENDING read above and the | |
374 | * klp_target_state read in klp_try_switch_task(). The corresponding | |
375 | * write barriers are in klp_init_transition() and | |
376 | * klp_reverse_transition(). | |
377 | */ | |
378 | smp_rmb(); | |
379 | ||
380 | klp_try_switch_task(current); | |
e3ff7c60 | 381 | } |
e3ff7c60 | 382 | |
0b3d5279 MB |
383 | /* |
384 | * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set. | |
385 | * Kthreads with TIF_PATCH_PENDING set are woken up. | |
386 | */ | |
387 | static void klp_send_signals(void) | |
388 | { | |
389 | struct task_struct *g, *task; | |
390 | ||
391 | if (klp_signals_cnt == SIGNALS_TIMEOUT) | |
392 | pr_notice("signaling remaining tasks\n"); | |
393 | ||
394 | read_lock(&tasklist_lock); | |
395 | for_each_process_thread(g, task) { | |
396 | if (!klp_patch_pending(task)) | |
397 | continue; | |
398 | ||
399 | /* | |
400 | * There is a small race here. We could see TIF_PATCH_PENDING | |
401 | * set and decide to wake up a kthread or send a fake signal. | |
402 | * Meanwhile the task could migrate itself and the action | |
403 | * would be meaningless. It is not serious though. | |
404 | */ | |
405 | if (task->flags & PF_KTHREAD) { | |
406 | /* | |
407 | * Wake up a kthread which sleeps interruptedly and | |
408 | * still has not been migrated. | |
409 | */ | |
410 | wake_up_state(task, TASK_INTERRUPTIBLE); | |
411 | } else { | |
412 | /* | |
413 | * Send fake signal to all non-kthread tasks which are | |
414 | * still not migrated. | |
415 | */ | |
8df1947c | 416 | set_notify_signal(task); |
0b3d5279 MB |
417 | } |
418 | } | |
419 | read_unlock(&tasklist_lock); | |
420 | } | |
421 | ||
d83a7cb3 JP |
422 | /* |
423 | * Try to switch all remaining tasks to the target patch state by walking the | |
424 | * stacks of sleeping tasks and looking for any to-be-patched or | |
425 | * to-be-unpatched functions. If such functions are found, the task can't be | |
426 | * switched yet. | |
427 | * | |
428 | * If any tasks are still stuck in the initial patch state, schedule a retry. | |
429 | */ | |
430 | void klp_try_complete_transition(void) | |
431 | { | |
432 | unsigned int cpu; | |
433 | struct task_struct *g, *task; | |
958ef1e3 | 434 | struct klp_patch *patch; |
d83a7cb3 JP |
435 | bool complete = true; |
436 | ||
d927752f | 437 | WARN_ON_ONCE(klp_target_state == KLP_TRANSITION_IDLE); |
d83a7cb3 | 438 | |
d83a7cb3 JP |
439 | /* |
440 | * Try to switch the tasks to the target patch state by walking their | |
441 | * stacks and looking for any to-be-patched or to-be-unpatched | |
442 | * functions. If such functions are found on a stack, or if the stack | |
443 | * is deemed unreliable, the task can't be switched yet. | |
444 | * | |
445 | * Usually this will transition most (or all) of the tasks on a system | |
446 | * unless the patch includes changes to a very common function. | |
447 | */ | |
448 | read_lock(&tasklist_lock); | |
449 | for_each_process_thread(g, task) | |
450 | if (!klp_try_switch_task(task)) | |
451 | complete = false; | |
452 | read_unlock(&tasklist_lock); | |
453 | ||
454 | /* | |
455 | * Ditto for the idle "swapper" tasks. | |
456 | */ | |
1daf08a0 | 457 | cpus_read_lock(); |
d83a7cb3 JP |
458 | for_each_possible_cpu(cpu) { |
459 | task = idle_task(cpu); | |
460 | if (cpu_online(cpu)) { | |
5de62ea8 | 461 | if (!klp_try_switch_task(task)) { |
d83a7cb3 | 462 | complete = false; |
5de62ea8 PZ |
463 | /* Make idle task go through the main loop. */ |
464 | wake_up_if_idle(cpu); | |
465 | } | |
d83a7cb3 JP |
466 | } else if (task->patch_state != klp_target_state) { |
467 | /* offline idle tasks can be switched immediately */ | |
468 | clear_tsk_thread_flag(task, TIF_PATCH_PENDING); | |
469 | task->patch_state = klp_target_state; | |
470 | } | |
471 | } | |
1daf08a0 | 472 | cpus_read_unlock(); |
d83a7cb3 JP |
473 | |
474 | if (!complete) { | |
cba82dea MB |
475 | if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT)) |
476 | klp_send_signals(); | |
477 | klp_signals_cnt++; | |
478 | ||
d83a7cb3 JP |
479 | /* |
480 | * Some tasks weren't able to be switched over. Try again | |
481 | * later and/or wait for other methods like kernel exit | |
482 | * switching. | |
483 | */ | |
484 | schedule_delayed_work(&klp_transition_work, | |
485 | round_jiffies_relative(HZ)); | |
486 | return; | |
487 | } | |
488 | ||
e3ff7c60 | 489 | /* Done! Now cleanup the data structures. */ |
676e8cf7 | 490 | klp_resched_disable(); |
958ef1e3 | 491 | patch = klp_transition_patch; |
d83a7cb3 | 492 | klp_complete_transition(); |
958ef1e3 PM |
493 | |
494 | /* | |
7e35e4eb | 495 | * It would make more sense to free the unused patches in |
958ef1e3 PM |
496 | * klp_complete_transition() but it is called also |
497 | * from klp_cancel_transition(). | |
498 | */ | |
7e35e4eb PM |
499 | if (!patch->enabled) |
500 | klp_free_patch_async(patch); | |
501 | else if (patch->replace) | |
502 | klp_free_replaced_patches_async(patch); | |
d83a7cb3 JP |
503 | } |
504 | ||
505 | /* | |
506 | * Start the transition to the specified target patch state so tasks can begin | |
507 | * switching to it. | |
508 | */ | |
509 | void klp_start_transition(void) | |
510 | { | |
511 | struct task_struct *g, *task; | |
512 | unsigned int cpu; | |
513 | ||
d927752f | 514 | WARN_ON_ONCE(klp_target_state == KLP_TRANSITION_IDLE); |
d83a7cb3 | 515 | |
af026796 JL |
516 | pr_notice("'%s': starting %s transition\n", |
517 | klp_transition_patch->mod->name, | |
d927752f | 518 | klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching"); |
d83a7cb3 | 519 | |
d83a7cb3 JP |
520 | /* |
521 | * Mark all normal tasks as needing a patch state update. They'll | |
522 | * switch either in klp_try_complete_transition() or as they exit the | |
523 | * kernel. | |
524 | */ | |
525 | read_lock(&tasklist_lock); | |
526 | for_each_process_thread(g, task) | |
527 | if (task->patch_state != klp_target_state) | |
528 | set_tsk_thread_flag(task, TIF_PATCH_PENDING); | |
529 | read_unlock(&tasklist_lock); | |
530 | ||
531 | /* | |
532 | * Mark all idle tasks as needing a patch state update. They'll switch | |
533 | * either in klp_try_complete_transition() or at the idle loop switch | |
534 | * point. | |
535 | */ | |
536 | for_each_possible_cpu(cpu) { | |
537 | task = idle_task(cpu); | |
538 | if (task->patch_state != klp_target_state) | |
539 | set_tsk_thread_flag(task, TIF_PATCH_PENDING); | |
540 | } | |
cba82dea | 541 | |
676e8cf7 | 542 | klp_resched_enable(); |
e3ff7c60 | 543 | |
cba82dea | 544 | klp_signals_cnt = 0; |
d83a7cb3 JP |
545 | } |
546 | ||
547 | /* | |
548 | * Initialize the global target patch state and all tasks to the initial patch | |
549 | * state, and initialize all function transition states to true in preparation | |
550 | * for patching or unpatching. | |
551 | */ | |
552 | void klp_init_transition(struct klp_patch *patch, int state) | |
553 | { | |
554 | struct task_struct *g, *task; | |
555 | unsigned int cpu; | |
556 | struct klp_object *obj; | |
557 | struct klp_func *func; | |
558 | int initial_state = !state; | |
559 | ||
d927752f | 560 | WARN_ON_ONCE(klp_target_state != KLP_TRANSITION_IDLE); |
d83a7cb3 JP |
561 | |
562 | klp_transition_patch = patch; | |
563 | ||
564 | /* | |
565 | * Set the global target patch state which tasks will switch to. This | |
566 | * has no effect until the TIF_PATCH_PENDING flags get set later. | |
567 | */ | |
568 | klp_target_state = state; | |
569 | ||
af026796 | 570 | pr_debug("'%s': initializing %s transition\n", patch->mod->name, |
d927752f | 571 | klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching"); |
af026796 | 572 | |
d83a7cb3 JP |
573 | /* |
574 | * Initialize all tasks to the initial patch state to prepare them for | |
575 | * switching to the target state. | |
576 | */ | |
577 | read_lock(&tasklist_lock); | |
578 | for_each_process_thread(g, task) { | |
d927752f | 579 | WARN_ON_ONCE(task->patch_state != KLP_TRANSITION_IDLE); |
d83a7cb3 JP |
580 | task->patch_state = initial_state; |
581 | } | |
582 | read_unlock(&tasklist_lock); | |
583 | ||
584 | /* | |
585 | * Ditto for the idle "swapper" tasks. | |
586 | */ | |
587 | for_each_possible_cpu(cpu) { | |
588 | task = idle_task(cpu); | |
d927752f | 589 | WARN_ON_ONCE(task->patch_state != KLP_TRANSITION_IDLE); |
d83a7cb3 JP |
590 | task->patch_state = initial_state; |
591 | } | |
592 | ||
593 | /* | |
594 | * Enforce the order of the task->patch_state initializations and the | |
595 | * func->transition updates to ensure that klp_ftrace_handler() doesn't | |
d927752f | 596 | * see a func in transition with a task->patch_state of KLP_TRANSITION_IDLE. |
d83a7cb3 JP |
597 | * |
598 | * Also enforce the order of the klp_target_state write and future | |
e3ff7c60 JP |
599 | * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() and |
600 | * __klp_sched_try_switch() don't set a task->patch_state to | |
d927752f | 601 | * KLP_TRANSITION_IDLE. |
d83a7cb3 JP |
602 | */ |
603 | smp_wmb(); | |
604 | ||
605 | /* | |
606 | * Set the func transition states so klp_ftrace_handler() will know to | |
607 | * switch to the transition logic. | |
608 | * | |
609 | * When patching, the funcs aren't yet in the func_stack and will be | |
610 | * made visible to the ftrace handler shortly by the calls to | |
611 | * klp_patch_object(). | |
612 | * | |
613 | * When unpatching, the funcs are already in the func_stack and so are | |
614 | * already visible to the ftrace handler. | |
615 | */ | |
616 | klp_for_each_object(patch, obj) | |
617 | klp_for_each_func(obj, func) | |
618 | func->transition = true; | |
619 | } | |
620 | ||
621 | /* | |
622 | * This function can be called in the middle of an existing transition to | |
623 | * reverse the direction of the target patch state. This can be done to | |
624 | * effectively cancel an existing enable or disable operation if there are any | |
625 | * tasks which are stuck in the initial patch state. | |
626 | */ | |
627 | void klp_reverse_transition(void) | |
628 | { | |
629 | unsigned int cpu; | |
630 | struct task_struct *g, *task; | |
631 | ||
af026796 JL |
632 | pr_debug("'%s': reversing transition from %s\n", |
633 | klp_transition_patch->mod->name, | |
d927752f | 634 | klp_target_state == KLP_TRANSITION_PATCHED ? "patching to unpatching" : |
af026796 JL |
635 | "unpatching to patching"); |
636 | ||
d83a7cb3 JP |
637 | /* |
638 | * Clear all TIF_PATCH_PENDING flags to prevent races caused by | |
e3ff7c60 JP |
639 | * klp_update_patch_state() or __klp_sched_try_switch() running in |
640 | * parallel with the reverse transition. | |
d83a7cb3 JP |
641 | */ |
642 | read_lock(&tasklist_lock); | |
643 | for_each_process_thread(g, task) | |
644 | clear_tsk_thread_flag(task, TIF_PATCH_PENDING); | |
645 | read_unlock(&tasklist_lock); | |
646 | ||
647 | for_each_possible_cpu(cpu) | |
648 | clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); | |
649 | ||
e3ff7c60 JP |
650 | /* |
651 | * Make sure all existing invocations of klp_update_patch_state() and | |
652 | * __klp_sched_try_switch() see the cleared TIF_PATCH_PENDING before | |
653 | * starting the reverse transition. | |
654 | */ | |
842c0884 | 655 | klp_synchronize_transition(); |
d83a7cb3 | 656 | |
e3ff7c60 JP |
657 | /* |
658 | * All patching has stopped, now re-initialize the global variables to | |
659 | * prepare for the reverse transition. | |
660 | */ | |
661 | klp_transition_patch->enabled = !klp_transition_patch->enabled; | |
662 | klp_target_state = !klp_target_state; | |
663 | ||
664 | /* | |
665 | * Enforce the order of the klp_target_state write and the | |
666 | * TIF_PATCH_PENDING writes in klp_start_transition() to ensure | |
667 | * klp_update_patch_state() and __klp_sched_try_switch() don't set | |
668 | * task->patch_state to the wrong value. | |
669 | */ | |
670 | smp_wmb(); | |
671 | ||
d83a7cb3 JP |
672 | klp_start_transition(); |
673 | } | |
674 | ||
675 | /* Called from copy_process() during fork */ | |
676 | void klp_copy_process(struct task_struct *child) | |
677 | { | |
d83a7cb3 | 678 | |
747f7a29 RR |
679 | /* |
680 | * The parent process may have gone through a KLP transition since | |
681 | * the thread flag was copied in setup_thread_stack earlier. Bring | |
682 | * the task flag up to date with the parent here. | |
683 | * | |
684 | * The operation is serialized against all klp_*_transition() | |
e3ff7c60 JP |
685 | * operations by the tasklist_lock. The only exceptions are |
686 | * klp_update_patch_state(current) and __klp_sched_try_switch(), but we | |
687 | * cannot race with them because we are current. | |
747f7a29 RR |
688 | */ |
689 | if (test_tsk_thread_flag(current, TIF_PATCH_PENDING)) | |
690 | set_tsk_thread_flag(child, TIF_PATCH_PENDING); | |
691 | else | |
692 | clear_tsk_thread_flag(child, TIF_PATCH_PENDING); | |
693 | ||
694 | child->patch_state = current->patch_state; | |
d83a7cb3 | 695 | } |
43347d56 | 696 | |
c99a2be7 MB |
697 | /* |
698 | * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an | |
699 | * existing transition to finish. | |
700 | * | |
701 | * NOTE: klp_update_patch_state(task) requires the task to be inactive or | |
702 | * 'current'. This is not the case here and the consistency model could be | |
703 | * broken. Administrator, who is the only one to execute the | |
704 | * klp_force_transitions(), has to be aware of this. | |
705 | */ | |
706 | void klp_force_transition(void) | |
707 | { | |
68007289 | 708 | struct klp_patch *patch; |
c99a2be7 MB |
709 | struct task_struct *g, *task; |
710 | unsigned int cpu; | |
711 | ||
712 | pr_warn("forcing remaining tasks to the patched state\n"); | |
713 | ||
714 | read_lock(&tasklist_lock); | |
715 | for_each_process_thread(g, task) | |
716 | klp_update_patch_state(task); | |
717 | read_unlock(&tasklist_lock); | |
718 | ||
719 | for_each_possible_cpu(cpu) | |
720 | klp_update_patch_state(idle_task(cpu)); | |
721 | ||
29573083 | 722 | /* Set forced flag for patches being removed. */ |
d927752f | 723 | if (klp_target_state == KLP_TRANSITION_UNPATCHED) |
29573083 CZ |
724 | klp_transition_patch->forced = true; |
725 | else if (klp_transition_patch->replace) { | |
726 | klp_for_each_patch(patch) { | |
727 | if (patch != klp_transition_patch) | |
728 | patch->forced = true; | |
729 | } | |
730 | } | |
c99a2be7 | 731 | } |