spi: sh-msiof: Add r8a774a1 support
[linux-2.6-block.git] / kernel / livepatch / transition.c
CommitLineData
d83a7cb3
JP
1/*
2 * transition.c - Kernel Live Patching transition functions
3 *
4 * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/cpu.h>
23#include <linux/stacktrace.h>
10517429 24#include "core.h"
d83a7cb3
JP
25#include "patch.h"
26#include "transition.h"
27#include "../sched/sched.h"
28
29#define MAX_STACK_ENTRIES 100
30#define STACK_ERR_BUF_SIZE 128
31
d83a7cb3
JP
32struct klp_patch *klp_transition_patch;
33
34static int klp_target_state = KLP_UNDEFINED;
35
c99a2be7
MB
36static bool klp_forced = false;
37
d83a7cb3
JP
38/*
39 * This work can be performed periodically to finish patching or unpatching any
40 * "straggler" tasks which failed to transition in the first attempt.
41 */
42static void klp_transition_work_fn(struct work_struct *work)
43{
44 mutex_lock(&klp_mutex);
45
46 if (klp_transition_patch)
47 klp_try_complete_transition();
48
49 mutex_unlock(&klp_mutex);
50}
51static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
52
842c0884
PM
53/*
54 * This function is just a stub to implement a hard force
55 * of synchronize_sched(). This requires synchronizing
56 * tasks even in userspace and idle.
57 */
58static void klp_sync(struct work_struct *work)
59{
60}
61
62/*
63 * We allow to patch also functions where RCU is not watching,
64 * e.g. before user_exit(). We can not rely on the RCU infrastructure
65 * to do the synchronization. Instead hard force the sched synchronization.
66 *
67 * This approach allows to use RCU functions for manipulating func_stack
68 * safely.
69 */
70static void klp_synchronize_transition(void)
71{
72 schedule_on_each_cpu(klp_sync);
73}
74
d83a7cb3
JP
75/*
76 * The transition to the target patch state is complete. Clean up the data
77 * structures.
78 */
79static void klp_complete_transition(void)
80{
81 struct klp_object *obj;
82 struct klp_func *func;
83 struct task_struct *g, *task;
84 unsigned int cpu;
85
af026796
JL
86 pr_debug("'%s': completing %s transition\n",
87 klp_transition_patch->mod->name,
88 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
89
d83a7cb3
JP
90 if (klp_target_state == KLP_UNPATCHED) {
91 /*
92 * All tasks have transitioned to KLP_UNPATCHED so we can now
93 * remove the new functions from the func_stack.
94 */
95 klp_unpatch_objects(klp_transition_patch);
96
97 /*
98 * Make sure klp_ftrace_handler() can no longer see functions
99 * from this patch on the ops->func_stack. Otherwise, after
100 * func->transition gets cleared, the handler may choose a
101 * removed function.
102 */
842c0884 103 klp_synchronize_transition();
d83a7cb3
JP
104 }
105
d0807da7
MB
106 klp_for_each_object(klp_transition_patch, obj)
107 klp_for_each_func(obj, func)
d83a7cb3 108 func->transition = false;
3ec24776 109
d83a7cb3
JP
110 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
111 if (klp_target_state == KLP_PATCHED)
842c0884 112 klp_synchronize_transition();
d83a7cb3
JP
113
114 read_lock(&tasklist_lock);
115 for_each_process_thread(g, task) {
116 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
117 task->patch_state = KLP_UNDEFINED;
118 }
119 read_unlock(&tasklist_lock);
120
121 for_each_possible_cpu(cpu) {
122 task = idle_task(cpu);
123 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
124 task->patch_state = KLP_UNDEFINED;
125 }
126
93862e38
JL
127 klp_for_each_object(klp_transition_patch, obj) {
128 if (!klp_is_object_loaded(obj))
129 continue;
130 if (klp_target_state == KLP_PATCHED)
131 klp_post_patch_callback(obj);
132 else if (klp_target_state == KLP_UNPATCHED)
133 klp_post_unpatch_callback(obj);
134 }
135
6116c303
JL
136 pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
137 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
138
93862e38 139 /*
d0807da7
MB
140 * klp_forced set implies unbounded increase of module's ref count if
141 * the module is disabled/enabled in a loop.
93862e38 142 */
d0807da7 143 if (!klp_forced && klp_target_state == KLP_UNPATCHED)
93862e38 144 module_put(klp_transition_patch->mod);
93862e38 145
d83a7cb3
JP
146 klp_target_state = KLP_UNDEFINED;
147 klp_transition_patch = NULL;
148}
149
150/*
151 * This is called in the error path, to cancel a transition before it has
152 * started, i.e. klp_init_transition() has been called but
153 * klp_start_transition() hasn't. If the transition *has* been started,
154 * klp_reverse_transition() should be used instead.
155 */
156void klp_cancel_transition(void)
157{
3ec24776
JP
158 if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
159 return;
160
af026796
JL
161 pr_debug("'%s': canceling patching transition, going to unpatch\n",
162 klp_transition_patch->mod->name);
163
3ec24776 164 klp_target_state = KLP_UNPATCHED;
d83a7cb3
JP
165 klp_complete_transition();
166}
167
168/*
169 * Switch the patched state of the task to the set of functions in the target
170 * patch state.
171 *
172 * NOTE: If task is not 'current', the caller must ensure the task is inactive.
173 * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
174 */
175void klp_update_patch_state(struct task_struct *task)
176{
842c0884
PM
177 /*
178 * A variant of synchronize_sched() is used to allow patching functions
179 * where RCU is not watching, see klp_synchronize_transition().
180 */
181 preempt_disable_notrace();
d83a7cb3
JP
182
183 /*
184 * This test_and_clear_tsk_thread_flag() call also serves as a read
185 * barrier (smp_rmb) for two cases:
186 *
187 * 1) Enforce the order of the TIF_PATCH_PENDING read and the
188 * klp_target_state read. The corresponding write barrier is in
189 * klp_init_transition().
190 *
191 * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
192 * of func->transition, if klp_ftrace_handler() is called later on
193 * the same CPU. See __klp_disable_patch().
194 */
195 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
196 task->patch_state = READ_ONCE(klp_target_state);
197
842c0884 198 preempt_enable_notrace();
d83a7cb3
JP
199}
200
201/*
202 * Determine whether the given stack trace includes any references to a
203 * to-be-patched or to-be-unpatched function.
204 */
205static int klp_check_stack_func(struct klp_func *func,
206 struct stack_trace *trace)
207{
208 unsigned long func_addr, func_size, address;
209 struct klp_ops *ops;
210 int i;
211
d83a7cb3
JP
212 for (i = 0; i < trace->nr_entries; i++) {
213 address = trace->entries[i];
214
215 if (klp_target_state == KLP_UNPATCHED) {
216 /*
217 * Check for the to-be-unpatched function
218 * (the func itself).
219 */
220 func_addr = (unsigned long)func->new_func;
221 func_size = func->new_size;
222 } else {
223 /*
224 * Check for the to-be-patched function
225 * (the previous func).
226 */
227 ops = klp_find_ops(func->old_addr);
228
229 if (list_is_singular(&ops->func_stack)) {
230 /* original function */
231 func_addr = func->old_addr;
232 func_size = func->old_size;
233 } else {
234 /* previously patched function */
235 struct klp_func *prev;
236
237 prev = list_next_entry(func, stack_node);
238 func_addr = (unsigned long)prev->new_func;
239 func_size = prev->new_size;
240 }
241 }
242
243 if (address >= func_addr && address < func_addr + func_size)
244 return -EAGAIN;
245 }
246
247 return 0;
248}
249
250/*
251 * Determine whether it's safe to transition the task to the target patch state
252 * by looking for any to-be-patched or to-be-unpatched functions on its stack.
253 */
254static int klp_check_stack(struct task_struct *task, char *err_buf)
255{
256 static unsigned long entries[MAX_STACK_ENTRIES];
257 struct stack_trace trace;
258 struct klp_object *obj;
259 struct klp_func *func;
260 int ret;
261
262 trace.skip = 0;
263 trace.nr_entries = 0;
264 trace.max_entries = MAX_STACK_ENTRIES;
265 trace.entries = entries;
266 ret = save_stack_trace_tsk_reliable(task, &trace);
267 WARN_ON_ONCE(ret == -ENOSYS);
268 if (ret) {
269 snprintf(err_buf, STACK_ERR_BUF_SIZE,
270 "%s: %s:%d has an unreliable stack\n",
271 __func__, task->comm, task->pid);
272 return ret;
273 }
274
275 klp_for_each_object(klp_transition_patch, obj) {
276 if (!obj->patched)
277 continue;
278 klp_for_each_func(obj, func) {
279 ret = klp_check_stack_func(func, &trace);
280 if (ret) {
281 snprintf(err_buf, STACK_ERR_BUF_SIZE,
282 "%s: %s:%d is sleeping on function %s\n",
283 __func__, task->comm, task->pid,
284 func->old_name);
285 return ret;
286 }
287 }
288 }
289
290 return 0;
291}
292
293/*
294 * Try to safely switch a task to the target patch state. If it's currently
295 * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
296 * if the stack is unreliable, return false.
297 */
298static bool klp_try_switch_task(struct task_struct *task)
299{
300 struct rq *rq;
301 struct rq_flags flags;
302 int ret;
303 bool success = false;
304 char err_buf[STACK_ERR_BUF_SIZE];
305
306 err_buf[0] = '\0';
307
308 /* check if this task has already switched over */
309 if (task->patch_state == klp_target_state)
310 return true;
311
d83a7cb3
JP
312 /*
313 * Now try to check the stack for any to-be-patched or to-be-unpatched
314 * functions. If all goes well, switch the task to the target patch
315 * state.
316 */
317 rq = task_rq_lock(task, &flags);
318
319 if (task_running(rq, task) && task != current) {
320 snprintf(err_buf, STACK_ERR_BUF_SIZE,
321 "%s: %s:%d is running\n", __func__, task->comm,
322 task->pid);
323 goto done;
324 }
325
326 ret = klp_check_stack(task, err_buf);
327 if (ret)
328 goto done;
329
330 success = true;
331
332 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
333 task->patch_state = klp_target_state;
334
335done:
336 task_rq_unlock(rq, task, &flags);
337
338 /*
339 * Due to console deadlock issues, pr_debug() can't be used while
340 * holding the task rq lock. Instead we have to use a temporary buffer
341 * and print the debug message after releasing the lock.
342 */
343 if (err_buf[0] != '\0')
344 pr_debug("%s", err_buf);
345
346 return success;
347
348}
349
350/*
351 * Try to switch all remaining tasks to the target patch state by walking the
352 * stacks of sleeping tasks and looking for any to-be-patched or
353 * to-be-unpatched functions. If such functions are found, the task can't be
354 * switched yet.
355 *
356 * If any tasks are still stuck in the initial patch state, schedule a retry.
357 */
358void klp_try_complete_transition(void)
359{
360 unsigned int cpu;
361 struct task_struct *g, *task;
362 bool complete = true;
363
364 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
365
d83a7cb3
JP
366 /*
367 * Try to switch the tasks to the target patch state by walking their
368 * stacks and looking for any to-be-patched or to-be-unpatched
369 * functions. If such functions are found on a stack, or if the stack
370 * is deemed unreliable, the task can't be switched yet.
371 *
372 * Usually this will transition most (or all) of the tasks on a system
373 * unless the patch includes changes to a very common function.
374 */
375 read_lock(&tasklist_lock);
376 for_each_process_thread(g, task)
377 if (!klp_try_switch_task(task))
378 complete = false;
379 read_unlock(&tasklist_lock);
380
381 /*
382 * Ditto for the idle "swapper" tasks.
383 */
384 get_online_cpus();
385 for_each_possible_cpu(cpu) {
386 task = idle_task(cpu);
387 if (cpu_online(cpu)) {
388 if (!klp_try_switch_task(task))
389 complete = false;
390 } else if (task->patch_state != klp_target_state) {
391 /* offline idle tasks can be switched immediately */
392 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
393 task->patch_state = klp_target_state;
394 }
395 }
396 put_online_cpus();
397
398 if (!complete) {
399 /*
400 * Some tasks weren't able to be switched over. Try again
401 * later and/or wait for other methods like kernel exit
402 * switching.
403 */
404 schedule_delayed_work(&klp_transition_work,
405 round_jiffies_relative(HZ));
406 return;
407 }
408
d83a7cb3
JP
409 /* we're done, now cleanup the data structures */
410 klp_complete_transition();
411}
412
413/*
414 * Start the transition to the specified target patch state so tasks can begin
415 * switching to it.
416 */
417void klp_start_transition(void)
418{
419 struct task_struct *g, *task;
420 unsigned int cpu;
421
422 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
423
af026796
JL
424 pr_notice("'%s': starting %s transition\n",
425 klp_transition_patch->mod->name,
d83a7cb3
JP
426 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
427
d83a7cb3
JP
428 /*
429 * Mark all normal tasks as needing a patch state update. They'll
430 * switch either in klp_try_complete_transition() or as they exit the
431 * kernel.
432 */
433 read_lock(&tasklist_lock);
434 for_each_process_thread(g, task)
435 if (task->patch_state != klp_target_state)
436 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
437 read_unlock(&tasklist_lock);
438
439 /*
440 * Mark all idle tasks as needing a patch state update. They'll switch
441 * either in klp_try_complete_transition() or at the idle loop switch
442 * point.
443 */
444 for_each_possible_cpu(cpu) {
445 task = idle_task(cpu);
446 if (task->patch_state != klp_target_state)
447 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
448 }
449}
450
451/*
452 * Initialize the global target patch state and all tasks to the initial patch
453 * state, and initialize all function transition states to true in preparation
454 * for patching or unpatching.
455 */
456void klp_init_transition(struct klp_patch *patch, int state)
457{
458 struct task_struct *g, *task;
459 unsigned int cpu;
460 struct klp_object *obj;
461 struct klp_func *func;
462 int initial_state = !state;
463
464 WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
465
466 klp_transition_patch = patch;
467
468 /*
469 * Set the global target patch state which tasks will switch to. This
470 * has no effect until the TIF_PATCH_PENDING flags get set later.
471 */
472 klp_target_state = state;
473
af026796
JL
474 pr_debug("'%s': initializing %s transition\n", patch->mod->name,
475 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
476
d83a7cb3
JP
477 /*
478 * Initialize all tasks to the initial patch state to prepare them for
479 * switching to the target state.
480 */
481 read_lock(&tasklist_lock);
482 for_each_process_thread(g, task) {
483 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
484 task->patch_state = initial_state;
485 }
486 read_unlock(&tasklist_lock);
487
488 /*
489 * Ditto for the idle "swapper" tasks.
490 */
491 for_each_possible_cpu(cpu) {
492 task = idle_task(cpu);
493 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
494 task->patch_state = initial_state;
495 }
496
497 /*
498 * Enforce the order of the task->patch_state initializations and the
499 * func->transition updates to ensure that klp_ftrace_handler() doesn't
500 * see a func in transition with a task->patch_state of KLP_UNDEFINED.
501 *
502 * Also enforce the order of the klp_target_state write and future
503 * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
504 * set a task->patch_state to KLP_UNDEFINED.
505 */
506 smp_wmb();
507
508 /*
509 * Set the func transition states so klp_ftrace_handler() will know to
510 * switch to the transition logic.
511 *
512 * When patching, the funcs aren't yet in the func_stack and will be
513 * made visible to the ftrace handler shortly by the calls to
514 * klp_patch_object().
515 *
516 * When unpatching, the funcs are already in the func_stack and so are
517 * already visible to the ftrace handler.
518 */
519 klp_for_each_object(patch, obj)
520 klp_for_each_func(obj, func)
521 func->transition = true;
522}
523
524/*
525 * This function can be called in the middle of an existing transition to
526 * reverse the direction of the target patch state. This can be done to
527 * effectively cancel an existing enable or disable operation if there are any
528 * tasks which are stuck in the initial patch state.
529 */
530void klp_reverse_transition(void)
531{
532 unsigned int cpu;
533 struct task_struct *g, *task;
534
af026796
JL
535 pr_debug("'%s': reversing transition from %s\n",
536 klp_transition_patch->mod->name,
537 klp_target_state == KLP_PATCHED ? "patching to unpatching" :
538 "unpatching to patching");
539
d83a7cb3
JP
540 klp_transition_patch->enabled = !klp_transition_patch->enabled;
541
542 klp_target_state = !klp_target_state;
543
544 /*
545 * Clear all TIF_PATCH_PENDING flags to prevent races caused by
546 * klp_update_patch_state() running in parallel with
547 * klp_start_transition().
548 */
549 read_lock(&tasklist_lock);
550 for_each_process_thread(g, task)
551 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
552 read_unlock(&tasklist_lock);
553
554 for_each_possible_cpu(cpu)
555 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
556
557 /* Let any remaining calls to klp_update_patch_state() complete */
842c0884 558 klp_synchronize_transition();
d83a7cb3
JP
559
560 klp_start_transition();
561}
562
563/* Called from copy_process() during fork */
564void klp_copy_process(struct task_struct *child)
565{
566 child->patch_state = current->patch_state;
567
568 /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
569}
43347d56
MB
570
571/*
572 * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
573 * Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this
574 * action currently.
575 */
576void klp_send_signals(void)
577{
578 struct task_struct *g, *task;
579
580 pr_notice("signaling remaining tasks\n");
581
582 read_lock(&tasklist_lock);
583 for_each_process_thread(g, task) {
584 if (!klp_patch_pending(task))
585 continue;
586
587 /*
588 * There is a small race here. We could see TIF_PATCH_PENDING
589 * set and decide to wake up a kthread or send a fake signal.
590 * Meanwhile the task could migrate itself and the action
591 * would be meaningless. It is not serious though.
592 */
593 if (task->flags & PF_KTHREAD) {
594 /*
595 * Wake up a kthread which sleeps interruptedly and
596 * still has not been migrated.
597 */
598 wake_up_state(task, TASK_INTERRUPTIBLE);
599 } else {
600 /*
601 * Send fake signal to all non-kthread tasks which are
602 * still not migrated.
603 */
604 spin_lock_irq(&task->sighand->siglock);
605 signal_wake_up(task, 0);
606 spin_unlock_irq(&task->sighand->siglock);
607 }
608 }
609 read_unlock(&tasklist_lock);
610}
c99a2be7
MB
611
612/*
613 * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
614 * existing transition to finish.
615 *
616 * NOTE: klp_update_patch_state(task) requires the task to be inactive or
617 * 'current'. This is not the case here and the consistency model could be
618 * broken. Administrator, who is the only one to execute the
619 * klp_force_transitions(), has to be aware of this.
620 */
621void klp_force_transition(void)
622{
623 struct task_struct *g, *task;
624 unsigned int cpu;
625
626 pr_warn("forcing remaining tasks to the patched state\n");
627
628 read_lock(&tasklist_lock);
629 for_each_process_thread(g, task)
630 klp_update_patch_state(task);
631 read_unlock(&tasklist_lock);
632
633 for_each_possible_cpu(cpu)
634 klp_update_patch_state(idle_task(cpu));
635
636 klp_forced = true;
637}