Commit | Line | Data |
---|---|---|
1ccea77e | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
d83a7cb3 JP |
2 | /* |
3 | * transition.c - Kernel Live Patching transition functions | |
4 | * | |
5 | * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com> | |
d83a7cb3 JP |
6 | */ |
7 | ||
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
9 | ||
10 | #include <linux/cpu.h> | |
11 | #include <linux/stacktrace.h> | |
8df1947c | 12 | #include <linux/tracehook.h> |
10517429 | 13 | #include "core.h" |
d83a7cb3 JP |
14 | #include "patch.h" |
15 | #include "transition.h" | |
d83a7cb3 JP |
16 | |
17 | #define MAX_STACK_ENTRIES 100 | |
18 | #define STACK_ERR_BUF_SIZE 128 | |
19 | ||
cba82dea MB |
20 | #define SIGNALS_TIMEOUT 15 |
21 | ||
d83a7cb3 JP |
22 | struct klp_patch *klp_transition_patch; |
23 | ||
24 | static int klp_target_state = KLP_UNDEFINED; | |
25 | ||
cba82dea MB |
26 | static unsigned int klp_signals_cnt; |
27 | ||
d83a7cb3 JP |
28 | /* |
29 | * This work can be performed periodically to finish patching or unpatching any | |
30 | * "straggler" tasks which failed to transition in the first attempt. | |
31 | */ | |
32 | static void klp_transition_work_fn(struct work_struct *work) | |
33 | { | |
34 | mutex_lock(&klp_mutex); | |
35 | ||
36 | if (klp_transition_patch) | |
37 | klp_try_complete_transition(); | |
38 | ||
39 | mutex_unlock(&klp_mutex); | |
40 | } | |
41 | static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); | |
42 | ||
842c0884 PM |
43 | /* |
44 | * This function is just a stub to implement a hard force | |
6932689e | 45 | * of synchronize_rcu(). This requires synchronizing |
842c0884 PM |
46 | * tasks even in userspace and idle. |
47 | */ | |
48 | static void klp_sync(struct work_struct *work) | |
49 | { | |
50 | } | |
51 | ||
52 | /* | |
53 | * We allow to patch also functions where RCU is not watching, | |
54 | * e.g. before user_exit(). We can not rely on the RCU infrastructure | |
55 | * to do the synchronization. Instead hard force the sched synchronization. | |
56 | * | |
57 | * This approach allows to use RCU functions for manipulating func_stack | |
58 | * safely. | |
59 | */ | |
60 | static void klp_synchronize_transition(void) | |
61 | { | |
62 | schedule_on_each_cpu(klp_sync); | |
63 | } | |
64 | ||
d83a7cb3 JP |
65 | /* |
66 | * The transition to the target patch state is complete. Clean up the data | |
67 | * structures. | |
68 | */ | |
69 | static void klp_complete_transition(void) | |
70 | { | |
71 | struct klp_object *obj; | |
72 | struct klp_func *func; | |
73 | struct task_struct *g, *task; | |
74 | unsigned int cpu; | |
75 | ||
af026796 JL |
76 | pr_debug("'%s': completing %s transition\n", |
77 | klp_transition_patch->mod->name, | |
78 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); | |
79 | ||
d697bad5 | 80 | if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) { |
7e35e4eb | 81 | klp_unpatch_replaced_patches(klp_transition_patch); |
d697bad5 PM |
82 | klp_discard_nops(klp_transition_patch); |
83 | } | |
e1452b60 | 84 | |
d83a7cb3 JP |
85 | if (klp_target_state == KLP_UNPATCHED) { |
86 | /* | |
87 | * All tasks have transitioned to KLP_UNPATCHED so we can now | |
88 | * remove the new functions from the func_stack. | |
89 | */ | |
90 | klp_unpatch_objects(klp_transition_patch); | |
91 | ||
92 | /* | |
93 | * Make sure klp_ftrace_handler() can no longer see functions | |
94 | * from this patch on the ops->func_stack. Otherwise, after | |
95 | * func->transition gets cleared, the handler may choose a | |
96 | * removed function. | |
97 | */ | |
842c0884 | 98 | klp_synchronize_transition(); |
d83a7cb3 JP |
99 | } |
100 | ||
d0807da7 MB |
101 | klp_for_each_object(klp_transition_patch, obj) |
102 | klp_for_each_func(obj, func) | |
d83a7cb3 | 103 | func->transition = false; |
3ec24776 | 104 | |
d83a7cb3 JP |
105 | /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ |
106 | if (klp_target_state == KLP_PATCHED) | |
842c0884 | 107 | klp_synchronize_transition(); |
d83a7cb3 JP |
108 | |
109 | read_lock(&tasklist_lock); | |
110 | for_each_process_thread(g, task) { | |
111 | WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); | |
112 | task->patch_state = KLP_UNDEFINED; | |
113 | } | |
114 | read_unlock(&tasklist_lock); | |
115 | ||
116 | for_each_possible_cpu(cpu) { | |
117 | task = idle_task(cpu); | |
118 | WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); | |
119 | task->patch_state = KLP_UNDEFINED; | |
120 | } | |
121 | ||
93862e38 JL |
122 | klp_for_each_object(klp_transition_patch, obj) { |
123 | if (!klp_is_object_loaded(obj)) | |
124 | continue; | |
125 | if (klp_target_state == KLP_PATCHED) | |
126 | klp_post_patch_callback(obj); | |
127 | else if (klp_target_state == KLP_UNPATCHED) | |
128 | klp_post_unpatch_callback(obj); | |
129 | } | |
130 | ||
6116c303 JL |
131 | pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name, |
132 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); | |
133 | ||
d83a7cb3 JP |
134 | klp_target_state = KLP_UNDEFINED; |
135 | klp_transition_patch = NULL; | |
136 | } | |
137 | ||
138 | /* | |
139 | * This is called in the error path, to cancel a transition before it has | |
140 | * started, i.e. klp_init_transition() has been called but | |
141 | * klp_start_transition() hasn't. If the transition *has* been started, | |
142 | * klp_reverse_transition() should be used instead. | |
143 | */ | |
144 | void klp_cancel_transition(void) | |
145 | { | |
3ec24776 JP |
146 | if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED)) |
147 | return; | |
148 | ||
af026796 JL |
149 | pr_debug("'%s': canceling patching transition, going to unpatch\n", |
150 | klp_transition_patch->mod->name); | |
151 | ||
3ec24776 | 152 | klp_target_state = KLP_UNPATCHED; |
d83a7cb3 JP |
153 | klp_complete_transition(); |
154 | } | |
155 | ||
156 | /* | |
157 | * Switch the patched state of the task to the set of functions in the target | |
158 | * patch state. | |
159 | * | |
160 | * NOTE: If task is not 'current', the caller must ensure the task is inactive. | |
161 | * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value. | |
162 | */ | |
163 | void klp_update_patch_state(struct task_struct *task) | |
164 | { | |
842c0884 | 165 | /* |
6932689e | 166 | * A variant of synchronize_rcu() is used to allow patching functions |
842c0884 PM |
167 | * where RCU is not watching, see klp_synchronize_transition(). |
168 | */ | |
169 | preempt_disable_notrace(); | |
d83a7cb3 JP |
170 | |
171 | /* | |
172 | * This test_and_clear_tsk_thread_flag() call also serves as a read | |
173 | * barrier (smp_rmb) for two cases: | |
174 | * | |
175 | * 1) Enforce the order of the TIF_PATCH_PENDING read and the | |
176 | * klp_target_state read. The corresponding write barrier is in | |
177 | * klp_init_transition(). | |
178 | * | |
179 | * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read | |
180 | * of func->transition, if klp_ftrace_handler() is called later on | |
181 | * the same CPU. See __klp_disable_patch(). | |
182 | */ | |
183 | if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) | |
184 | task->patch_state = READ_ONCE(klp_target_state); | |
185 | ||
842c0884 | 186 | preempt_enable_notrace(); |
d83a7cb3 JP |
187 | } |
188 | ||
189 | /* | |
190 | * Determine whether the given stack trace includes any references to a | |
191 | * to-be-patched or to-be-unpatched function. | |
192 | */ | |
25e39e32 TG |
193 | static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, |
194 | unsigned int nr_entries) | |
d83a7cb3 JP |
195 | { |
196 | unsigned long func_addr, func_size, address; | |
197 | struct klp_ops *ops; | |
198 | int i; | |
199 | ||
25e39e32 TG |
200 | for (i = 0; i < nr_entries; i++) { |
201 | address = entries[i]; | |
d83a7cb3 JP |
202 | |
203 | if (klp_target_state == KLP_UNPATCHED) { | |
204 | /* | |
205 | * Check for the to-be-unpatched function | |
206 | * (the func itself). | |
207 | */ | |
208 | func_addr = (unsigned long)func->new_func; | |
209 | func_size = func->new_size; | |
210 | } else { | |
211 | /* | |
212 | * Check for the to-be-patched function | |
213 | * (the previous func). | |
214 | */ | |
19514910 | 215 | ops = klp_find_ops(func->old_func); |
d83a7cb3 JP |
216 | |
217 | if (list_is_singular(&ops->func_stack)) { | |
218 | /* original function */ | |
19514910 | 219 | func_addr = (unsigned long)func->old_func; |
d83a7cb3 JP |
220 | func_size = func->old_size; |
221 | } else { | |
222 | /* previously patched function */ | |
223 | struct klp_func *prev; | |
224 | ||
225 | prev = list_next_entry(func, stack_node); | |
226 | func_addr = (unsigned long)prev->new_func; | |
227 | func_size = prev->new_size; | |
228 | } | |
229 | } | |
230 | ||
231 | if (address >= func_addr && address < func_addr + func_size) | |
232 | return -EAGAIN; | |
233 | } | |
234 | ||
235 | return 0; | |
236 | } | |
237 | ||
238 | /* | |
239 | * Determine whether it's safe to transition the task to the target patch state | |
240 | * by looking for any to-be-patched or to-be-unpatched functions on its stack. | |
241 | */ | |
00619f7c | 242 | static int klp_check_stack(struct task_struct *task, const char **oldname) |
d83a7cb3 JP |
243 | { |
244 | static unsigned long entries[MAX_STACK_ENTRIES]; | |
d83a7cb3 JP |
245 | struct klp_object *obj; |
246 | struct klp_func *func; | |
25e39e32 | 247 | int ret, nr_entries; |
d83a7cb3 | 248 | |
25e39e32 | 249 | ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries)); |
00619f7c PZ |
250 | if (ret < 0) |
251 | return -EINVAL; | |
25e39e32 | 252 | nr_entries = ret; |
d83a7cb3 JP |
253 | |
254 | klp_for_each_object(klp_transition_patch, obj) { | |
255 | if (!obj->patched) | |
256 | continue; | |
257 | klp_for_each_func(obj, func) { | |
25e39e32 | 258 | ret = klp_check_stack_func(func, entries, nr_entries); |
d83a7cb3 | 259 | if (ret) { |
00619f7c PZ |
260 | *oldname = func->old_name; |
261 | return -EADDRINUSE; | |
d83a7cb3 JP |
262 | } |
263 | } | |
264 | } | |
265 | ||
266 | return 0; | |
267 | } | |
268 | ||
00619f7c PZ |
269 | static int klp_check_and_switch_task(struct task_struct *task, void *arg) |
270 | { | |
271 | int ret; | |
272 | ||
273 | if (task_curr(task) && task != current) | |
274 | return -EBUSY; | |
275 | ||
276 | ret = klp_check_stack(task, arg); | |
277 | if (ret) | |
278 | return ret; | |
279 | ||
280 | clear_tsk_thread_flag(task, TIF_PATCH_PENDING); | |
281 | task->patch_state = klp_target_state; | |
282 | return 0; | |
283 | } | |
284 | ||
d83a7cb3 JP |
285 | /* |
286 | * Try to safely switch a task to the target patch state. If it's currently | |
287 | * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or | |
288 | * if the stack is unreliable, return false. | |
289 | */ | |
290 | static bool klp_try_switch_task(struct task_struct *task) | |
291 | { | |
00619f7c | 292 | const char *old_name; |
d83a7cb3 | 293 | int ret; |
d83a7cb3 JP |
294 | |
295 | /* check if this task has already switched over */ | |
296 | if (task->patch_state == klp_target_state) | |
297 | return true; | |
298 | ||
67059d65 MB |
299 | /* |
300 | * For arches which don't have reliable stack traces, we have to rely | |
301 | * on other methods (e.g., switching tasks at kernel exit). | |
302 | */ | |
303 | if (!klp_have_reliable_stack()) | |
304 | return false; | |
305 | ||
d83a7cb3 JP |
306 | /* |
307 | * Now try to check the stack for any to-be-patched or to-be-unpatched | |
308 | * functions. If all goes well, switch the task to the target patch | |
309 | * state. | |
310 | */ | |
00619f7c PZ |
311 | ret = task_call_func(task, klp_check_and_switch_task, &old_name); |
312 | switch (ret) { | |
313 | case 0: /* success */ | |
314 | break; | |
d83a7cb3 | 315 | |
00619f7c PZ |
316 | case -EBUSY: /* klp_check_and_switch_task() */ |
317 | pr_debug("%s: %s:%d is running\n", | |
318 | __func__, task->comm, task->pid); | |
319 | break; | |
320 | case -EINVAL: /* klp_check_and_switch_task() */ | |
321 | pr_debug("%s: %s:%d has an unreliable stack\n", | |
322 | __func__, task->comm, task->pid); | |
323 | break; | |
324 | case -EADDRINUSE: /* klp_check_and_switch_task() */ | |
325 | pr_debug("%s: %s:%d is sleeping on function %s\n", | |
326 | __func__, task->comm, task->pid, old_name); | |
327 | break; | |
328 | ||
329 | default: | |
330 | pr_debug("%s: Unknown error code (%d) when trying to switch %s:%d\n", | |
331 | __func__, ret, task->comm, task->pid); | |
332 | break; | |
d83a7cb3 JP |
333 | } |
334 | ||
00619f7c | 335 | return !ret; |
d83a7cb3 JP |
336 | } |
337 | ||
0b3d5279 MB |
338 | /* |
339 | * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set. | |
340 | * Kthreads with TIF_PATCH_PENDING set are woken up. | |
341 | */ | |
342 | static void klp_send_signals(void) | |
343 | { | |
344 | struct task_struct *g, *task; | |
345 | ||
346 | if (klp_signals_cnt == SIGNALS_TIMEOUT) | |
347 | pr_notice("signaling remaining tasks\n"); | |
348 | ||
349 | read_lock(&tasklist_lock); | |
350 | for_each_process_thread(g, task) { | |
351 | if (!klp_patch_pending(task)) | |
352 | continue; | |
353 | ||
354 | /* | |
355 | * There is a small race here. We could see TIF_PATCH_PENDING | |
356 | * set and decide to wake up a kthread or send a fake signal. | |
357 | * Meanwhile the task could migrate itself and the action | |
358 | * would be meaningless. It is not serious though. | |
359 | */ | |
360 | if (task->flags & PF_KTHREAD) { | |
361 | /* | |
362 | * Wake up a kthread which sleeps interruptedly and | |
363 | * still has not been migrated. | |
364 | */ | |
365 | wake_up_state(task, TASK_INTERRUPTIBLE); | |
366 | } else { | |
367 | /* | |
368 | * Send fake signal to all non-kthread tasks which are | |
369 | * still not migrated. | |
370 | */ | |
8df1947c | 371 | set_notify_signal(task); |
0b3d5279 MB |
372 | } |
373 | } | |
374 | read_unlock(&tasklist_lock); | |
375 | } | |
376 | ||
d83a7cb3 JP |
377 | /* |
378 | * Try to switch all remaining tasks to the target patch state by walking the | |
379 | * stacks of sleeping tasks and looking for any to-be-patched or | |
380 | * to-be-unpatched functions. If such functions are found, the task can't be | |
381 | * switched yet. | |
382 | * | |
383 | * If any tasks are still stuck in the initial patch state, schedule a retry. | |
384 | */ | |
385 | void klp_try_complete_transition(void) | |
386 | { | |
387 | unsigned int cpu; | |
388 | struct task_struct *g, *task; | |
958ef1e3 | 389 | struct klp_patch *patch; |
d83a7cb3 JP |
390 | bool complete = true; |
391 | ||
392 | WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); | |
393 | ||
d83a7cb3 JP |
394 | /* |
395 | * Try to switch the tasks to the target patch state by walking their | |
396 | * stacks and looking for any to-be-patched or to-be-unpatched | |
397 | * functions. If such functions are found on a stack, or if the stack | |
398 | * is deemed unreliable, the task can't be switched yet. | |
399 | * | |
400 | * Usually this will transition most (or all) of the tasks on a system | |
401 | * unless the patch includes changes to a very common function. | |
402 | */ | |
403 | read_lock(&tasklist_lock); | |
404 | for_each_process_thread(g, task) | |
405 | if (!klp_try_switch_task(task)) | |
406 | complete = false; | |
407 | read_unlock(&tasklist_lock); | |
408 | ||
409 | /* | |
410 | * Ditto for the idle "swapper" tasks. | |
411 | */ | |
1daf08a0 | 412 | cpus_read_lock(); |
d83a7cb3 JP |
413 | for_each_possible_cpu(cpu) { |
414 | task = idle_task(cpu); | |
415 | if (cpu_online(cpu)) { | |
416 | if (!klp_try_switch_task(task)) | |
417 | complete = false; | |
418 | } else if (task->patch_state != klp_target_state) { | |
419 | /* offline idle tasks can be switched immediately */ | |
420 | clear_tsk_thread_flag(task, TIF_PATCH_PENDING); | |
421 | task->patch_state = klp_target_state; | |
422 | } | |
423 | } | |
1daf08a0 | 424 | cpus_read_unlock(); |
d83a7cb3 JP |
425 | |
426 | if (!complete) { | |
cba82dea MB |
427 | if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT)) |
428 | klp_send_signals(); | |
429 | klp_signals_cnt++; | |
430 | ||
d83a7cb3 JP |
431 | /* |
432 | * Some tasks weren't able to be switched over. Try again | |
433 | * later and/or wait for other methods like kernel exit | |
434 | * switching. | |
435 | */ | |
436 | schedule_delayed_work(&klp_transition_work, | |
437 | round_jiffies_relative(HZ)); | |
438 | return; | |
439 | } | |
440 | ||
d83a7cb3 | 441 | /* we're done, now cleanup the data structures */ |
958ef1e3 | 442 | patch = klp_transition_patch; |
d83a7cb3 | 443 | klp_complete_transition(); |
958ef1e3 PM |
444 | |
445 | /* | |
7e35e4eb | 446 | * It would make more sense to free the unused patches in |
958ef1e3 PM |
447 | * klp_complete_transition() but it is called also |
448 | * from klp_cancel_transition(). | |
449 | */ | |
7e35e4eb PM |
450 | if (!patch->enabled) |
451 | klp_free_patch_async(patch); | |
452 | else if (patch->replace) | |
453 | klp_free_replaced_patches_async(patch); | |
d83a7cb3 JP |
454 | } |
455 | ||
456 | /* | |
457 | * Start the transition to the specified target patch state so tasks can begin | |
458 | * switching to it. | |
459 | */ | |
460 | void klp_start_transition(void) | |
461 | { | |
462 | struct task_struct *g, *task; | |
463 | unsigned int cpu; | |
464 | ||
465 | WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); | |
466 | ||
af026796 JL |
467 | pr_notice("'%s': starting %s transition\n", |
468 | klp_transition_patch->mod->name, | |
d83a7cb3 JP |
469 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); |
470 | ||
d83a7cb3 JP |
471 | /* |
472 | * Mark all normal tasks as needing a patch state update. They'll | |
473 | * switch either in klp_try_complete_transition() or as they exit the | |
474 | * kernel. | |
475 | */ | |
476 | read_lock(&tasklist_lock); | |
477 | for_each_process_thread(g, task) | |
478 | if (task->patch_state != klp_target_state) | |
479 | set_tsk_thread_flag(task, TIF_PATCH_PENDING); | |
480 | read_unlock(&tasklist_lock); | |
481 | ||
482 | /* | |
483 | * Mark all idle tasks as needing a patch state update. They'll switch | |
484 | * either in klp_try_complete_transition() or at the idle loop switch | |
485 | * point. | |
486 | */ | |
487 | for_each_possible_cpu(cpu) { | |
488 | task = idle_task(cpu); | |
489 | if (task->patch_state != klp_target_state) | |
490 | set_tsk_thread_flag(task, TIF_PATCH_PENDING); | |
491 | } | |
cba82dea MB |
492 | |
493 | klp_signals_cnt = 0; | |
d83a7cb3 JP |
494 | } |
495 | ||
496 | /* | |
497 | * Initialize the global target patch state and all tasks to the initial patch | |
498 | * state, and initialize all function transition states to true in preparation | |
499 | * for patching or unpatching. | |
500 | */ | |
501 | void klp_init_transition(struct klp_patch *patch, int state) | |
502 | { | |
503 | struct task_struct *g, *task; | |
504 | unsigned int cpu; | |
505 | struct klp_object *obj; | |
506 | struct klp_func *func; | |
507 | int initial_state = !state; | |
508 | ||
509 | WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED); | |
510 | ||
511 | klp_transition_patch = patch; | |
512 | ||
513 | /* | |
514 | * Set the global target patch state which tasks will switch to. This | |
515 | * has no effect until the TIF_PATCH_PENDING flags get set later. | |
516 | */ | |
517 | klp_target_state = state; | |
518 | ||
af026796 JL |
519 | pr_debug("'%s': initializing %s transition\n", patch->mod->name, |
520 | klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); | |
521 | ||
d83a7cb3 JP |
522 | /* |
523 | * Initialize all tasks to the initial patch state to prepare them for | |
524 | * switching to the target state. | |
525 | */ | |
526 | read_lock(&tasklist_lock); | |
527 | for_each_process_thread(g, task) { | |
528 | WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); | |
529 | task->patch_state = initial_state; | |
530 | } | |
531 | read_unlock(&tasklist_lock); | |
532 | ||
533 | /* | |
534 | * Ditto for the idle "swapper" tasks. | |
535 | */ | |
536 | for_each_possible_cpu(cpu) { | |
537 | task = idle_task(cpu); | |
538 | WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); | |
539 | task->patch_state = initial_state; | |
540 | } | |
541 | ||
542 | /* | |
543 | * Enforce the order of the task->patch_state initializations and the | |
544 | * func->transition updates to ensure that klp_ftrace_handler() doesn't | |
545 | * see a func in transition with a task->patch_state of KLP_UNDEFINED. | |
546 | * | |
547 | * Also enforce the order of the klp_target_state write and future | |
548 | * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't | |
549 | * set a task->patch_state to KLP_UNDEFINED. | |
550 | */ | |
551 | smp_wmb(); | |
552 | ||
553 | /* | |
554 | * Set the func transition states so klp_ftrace_handler() will know to | |
555 | * switch to the transition logic. | |
556 | * | |
557 | * When patching, the funcs aren't yet in the func_stack and will be | |
558 | * made visible to the ftrace handler shortly by the calls to | |
559 | * klp_patch_object(). | |
560 | * | |
561 | * When unpatching, the funcs are already in the func_stack and so are | |
562 | * already visible to the ftrace handler. | |
563 | */ | |
564 | klp_for_each_object(patch, obj) | |
565 | klp_for_each_func(obj, func) | |
566 | func->transition = true; | |
567 | } | |
568 | ||
569 | /* | |
570 | * This function can be called in the middle of an existing transition to | |
571 | * reverse the direction of the target patch state. This can be done to | |
572 | * effectively cancel an existing enable or disable operation if there are any | |
573 | * tasks which are stuck in the initial patch state. | |
574 | */ | |
575 | void klp_reverse_transition(void) | |
576 | { | |
577 | unsigned int cpu; | |
578 | struct task_struct *g, *task; | |
579 | ||
af026796 JL |
580 | pr_debug("'%s': reversing transition from %s\n", |
581 | klp_transition_patch->mod->name, | |
582 | klp_target_state == KLP_PATCHED ? "patching to unpatching" : | |
583 | "unpatching to patching"); | |
584 | ||
d83a7cb3 JP |
585 | klp_transition_patch->enabled = !klp_transition_patch->enabled; |
586 | ||
587 | klp_target_state = !klp_target_state; | |
588 | ||
589 | /* | |
590 | * Clear all TIF_PATCH_PENDING flags to prevent races caused by | |
591 | * klp_update_patch_state() running in parallel with | |
592 | * klp_start_transition(). | |
593 | */ | |
594 | read_lock(&tasklist_lock); | |
595 | for_each_process_thread(g, task) | |
596 | clear_tsk_thread_flag(task, TIF_PATCH_PENDING); | |
597 | read_unlock(&tasklist_lock); | |
598 | ||
599 | for_each_possible_cpu(cpu) | |
600 | clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); | |
601 | ||
602 | /* Let any remaining calls to klp_update_patch_state() complete */ | |
842c0884 | 603 | klp_synchronize_transition(); |
d83a7cb3 JP |
604 | |
605 | klp_start_transition(); | |
606 | } | |
607 | ||
608 | /* Called from copy_process() during fork */ | |
609 | void klp_copy_process(struct task_struct *child) | |
610 | { | |
611 | child->patch_state = current->patch_state; | |
612 | ||
613 | /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ | |
614 | } | |
43347d56 | 615 | |
c99a2be7 MB |
616 | /* |
617 | * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an | |
618 | * existing transition to finish. | |
619 | * | |
620 | * NOTE: klp_update_patch_state(task) requires the task to be inactive or | |
621 | * 'current'. This is not the case here and the consistency model could be | |
622 | * broken. Administrator, who is the only one to execute the | |
623 | * klp_force_transitions(), has to be aware of this. | |
624 | */ | |
625 | void klp_force_transition(void) | |
626 | { | |
68007289 | 627 | struct klp_patch *patch; |
c99a2be7 MB |
628 | struct task_struct *g, *task; |
629 | unsigned int cpu; | |
630 | ||
631 | pr_warn("forcing remaining tasks to the patched state\n"); | |
632 | ||
633 | read_lock(&tasklist_lock); | |
634 | for_each_process_thread(g, task) | |
635 | klp_update_patch_state(task); | |
636 | read_unlock(&tasklist_lock); | |
637 | ||
638 | for_each_possible_cpu(cpu) | |
639 | klp_update_patch_state(idle_task(cpu)); | |
640 | ||
ecba29f4 | 641 | klp_for_each_patch(patch) |
68007289 | 642 | patch->forced = true; |
c99a2be7 | 643 | } |