2351a19ac2a9e89a503eca1fdea65690a0b11ae3
[linux-block.git] / kernel / livepatch / transition.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * transition.c - Kernel Live Patching transition functions
4  *
5  * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
6  */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/cpu.h>
11 #include <linux/stacktrace.h>
12 #include <linux/static_call.h>
13 #include "core.h"
14 #include "patch.h"
15 #include "transition.h"
16
17 #define MAX_STACK_ENTRIES  100
18 static DEFINE_PER_CPU(unsigned long[MAX_STACK_ENTRIES], klp_stack_entries);
19
20 #define STACK_ERR_BUF_SIZE 128
21
22 #define SIGNALS_TIMEOUT 15
23
24 struct klp_patch *klp_transition_patch;
25
26 static int klp_target_state = KLP_TRANSITION_IDLE;
27
28 static unsigned int klp_signals_cnt;
29
30 /*
31  * When a livepatch is in progress, enable klp stack checking in
32  * schedule().  This helps CPU-bound kthreads get patched.
33  */
34
35 DEFINE_STATIC_KEY_FALSE(klp_sched_try_switch_key);
36
37 #define klp_resched_enable() static_branch_enable(&klp_sched_try_switch_key)
38 #define klp_resched_disable() static_branch_disable(&klp_sched_try_switch_key)
39
40 /*
41  * This work can be performed periodically to finish patching or unpatching any
42  * "straggler" tasks which failed to transition in the first attempt.
43  */
44 static void klp_transition_work_fn(struct work_struct *work)
45 {
46         mutex_lock(&klp_mutex);
47
48         if (klp_transition_patch)
49                 klp_try_complete_transition();
50
51         mutex_unlock(&klp_mutex);
52 }
53 static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
54
55 /*
56  * This function is just a stub to implement a hard force
57  * of synchronize_rcu(). This requires synchronizing
58  * tasks even in userspace and idle.
59  */
60 static void klp_sync(struct work_struct *work)
61 {
62 }
63
64 /*
65  * We allow to patch also functions where RCU is not watching,
66  * e.g. before user_exit(). We can not rely on the RCU infrastructure
67  * to do the synchronization. Instead hard force the sched synchronization.
68  *
69  * This approach allows to use RCU functions for manipulating func_stack
70  * safely.
71  */
72 static void klp_synchronize_transition(void)
73 {
74         schedule_on_each_cpu(klp_sync);
75 }
76
77 /*
78  * The transition to the target patch state is complete.  Clean up the data
79  * structures.
80  */
81 static void klp_complete_transition(void)
82 {
83         struct klp_object *obj;
84         struct klp_func *func;
85         struct task_struct *g, *task;
86         unsigned int cpu;
87
88         pr_debug("'%s': completing %s transition\n",
89                  klp_transition_patch->mod->name,
90                  klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching");
91
92         if (klp_transition_patch->replace && klp_target_state == KLP_TRANSITION_PATCHED) {
93                 klp_unpatch_replaced_patches(klp_transition_patch);
94                 klp_discard_nops(klp_transition_patch);
95         }
96
97         if (klp_target_state == KLP_TRANSITION_UNPATCHED) {
98                 /*
99                  * All tasks have transitioned to KLP_TRANSITION_UNPATCHED so we can now
100                  * remove the new functions from the func_stack.
101                  */
102                 klp_unpatch_objects(klp_transition_patch);
103
104                 /*
105                  * Make sure klp_ftrace_handler() can no longer see functions
106                  * from this patch on the ops->func_stack.  Otherwise, after
107                  * func->transition gets cleared, the handler may choose a
108                  * removed function.
109                  */
110                 klp_synchronize_transition();
111         }
112
113         klp_for_each_object(klp_transition_patch, obj)
114                 klp_for_each_func(obj, func)
115                         func->transition = false;
116
117         /* Prevent klp_ftrace_handler() from seeing KLP_TRANSITION_IDLE state */
118         if (klp_target_state == KLP_TRANSITION_PATCHED)
119                 klp_synchronize_transition();
120
121         read_lock(&tasklist_lock);
122         for_each_process_thread(g, task) {
123                 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
124                 task->patch_state = KLP_TRANSITION_IDLE;
125         }
126         read_unlock(&tasklist_lock);
127
128         for_each_possible_cpu(cpu) {
129                 task = idle_task(cpu);
130                 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
131                 task->patch_state = KLP_TRANSITION_IDLE;
132         }
133
134         klp_for_each_object(klp_transition_patch, obj) {
135                 if (!klp_is_object_loaded(obj))
136                         continue;
137                 if (klp_target_state == KLP_TRANSITION_PATCHED)
138                         klp_post_patch_callback(obj);
139                 else if (klp_target_state == KLP_TRANSITION_UNPATCHED)
140                         klp_post_unpatch_callback(obj);
141         }
142
143         pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
144                   klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching");
145
146         klp_target_state = KLP_TRANSITION_IDLE;
147         klp_transition_patch = NULL;
148 }
149
150 /*
151  * This is called in the error path, to cancel a transition before it has
152  * started, i.e. klp_init_transition() has been called but
153  * klp_start_transition() hasn't.  If the transition *has* been started,
154  * klp_reverse_transition() should be used instead.
155  */
156 void klp_cancel_transition(void)
157 {
158         if (WARN_ON_ONCE(klp_target_state != KLP_TRANSITION_PATCHED))
159                 return;
160
161         pr_debug("'%s': canceling patching transition, going to unpatch\n",
162                  klp_transition_patch->mod->name);
163
164         klp_target_state = KLP_TRANSITION_UNPATCHED;
165         klp_complete_transition();
166 }
167
168 /*
169  * Switch the patched state of the task to the set of functions in the target
170  * patch state.
171  *
172  * NOTE: If task is not 'current', the caller must ensure the task is inactive.
173  * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
174  */
175 void klp_update_patch_state(struct task_struct *task)
176 {
177         /*
178          * A variant of synchronize_rcu() is used to allow patching functions
179          * where RCU is not watching, see klp_synchronize_transition().
180          */
181         preempt_disable_notrace();
182
183         /*
184          * This test_and_clear_tsk_thread_flag() call also serves as a read
185          * barrier (smp_rmb) for two cases:
186          *
187          * 1) Enforce the order of the TIF_PATCH_PENDING read and the
188          *    klp_target_state read.  The corresponding write barriers are in
189          *    klp_init_transition() and klp_reverse_transition().
190          *
191          * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
192          *    of func->transition, if klp_ftrace_handler() is called later on
193          *    the same CPU.  See __klp_disable_patch().
194          */
195         if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
196                 task->patch_state = READ_ONCE(klp_target_state);
197
198         preempt_enable_notrace();
199 }
200
201 /*
202  * Determine whether the given stack trace includes any references to a
203  * to-be-patched or to-be-unpatched function.
204  */
205 static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
206                                 unsigned int nr_entries)
207 {
208         unsigned long func_addr, func_size, address;
209         struct klp_ops *ops;
210         int i;
211
212         if (klp_target_state == KLP_TRANSITION_UNPATCHED) {
213                  /*
214                   * Check for the to-be-unpatched function
215                   * (the func itself).
216                   */
217                 func_addr = (unsigned long)func->new_func;
218                 func_size = func->new_size;
219         } else {
220                 /*
221                  * Check for the to-be-patched function
222                  * (the previous func).
223                  */
224                 ops = klp_find_ops(func->old_func);
225
226                 if (list_is_singular(&ops->func_stack)) {
227                         /* original function */
228                         func_addr = (unsigned long)func->old_func;
229                         func_size = func->old_size;
230                 } else {
231                         /* previously patched function */
232                         struct klp_func *prev;
233
234                         prev = list_next_entry(func, stack_node);
235                         func_addr = (unsigned long)prev->new_func;
236                         func_size = prev->new_size;
237                 }
238         }
239
240         for (i = 0; i < nr_entries; i++) {
241                 address = entries[i];
242
243                 if (address >= func_addr && address < func_addr + func_size)
244                         return -EAGAIN;
245         }
246
247         return 0;
248 }
249
250 /*
251  * Determine whether it's safe to transition the task to the target patch state
252  * by looking for any to-be-patched or to-be-unpatched functions on its stack.
253  */
254 static int klp_check_stack(struct task_struct *task, const char **oldname)
255 {
256         unsigned long *entries = this_cpu_ptr(klp_stack_entries);
257         struct klp_object *obj;
258         struct klp_func *func;
259         int ret, nr_entries;
260
261         /* Protect 'klp_stack_entries' */
262         lockdep_assert_preemption_disabled();
263
264         ret = stack_trace_save_tsk_reliable(task, entries, MAX_STACK_ENTRIES);
265         if (ret < 0)
266                 return -EINVAL;
267         nr_entries = ret;
268
269         klp_for_each_object(klp_transition_patch, obj) {
270                 if (!obj->patched)
271                         continue;
272                 klp_for_each_func(obj, func) {
273                         ret = klp_check_stack_func(func, entries, nr_entries);
274                         if (ret) {
275                                 *oldname = func->old_name;
276                                 return -EADDRINUSE;
277                         }
278                 }
279         }
280
281         return 0;
282 }
283
284 static int klp_check_and_switch_task(struct task_struct *task, void *arg)
285 {
286         int ret;
287
288         if (task_curr(task) && task != current)
289                 return -EBUSY;
290
291         ret = klp_check_stack(task, arg);
292         if (ret)
293                 return ret;
294
295         clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
296         task->patch_state = klp_target_state;
297         return 0;
298 }
299
300 /*
301  * Try to safely switch a task to the target patch state.  If it's currently
302  * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
303  * if the stack is unreliable, return false.
304  */
305 static bool klp_try_switch_task(struct task_struct *task)
306 {
307         const char *old_name;
308         int ret;
309
310         /* check if this task has already switched over */
311         if (task->patch_state == klp_target_state)
312                 return true;
313
314         /*
315          * For arches which don't have reliable stack traces, we have to rely
316          * on other methods (e.g., switching tasks at kernel exit).
317          */
318         if (!klp_have_reliable_stack())
319                 return false;
320
321         /*
322          * Now try to check the stack for any to-be-patched or to-be-unpatched
323          * functions.  If all goes well, switch the task to the target patch
324          * state.
325          */
326         if (task == current)
327                 ret = klp_check_and_switch_task(current, &old_name);
328         else
329                 ret = task_call_func(task, klp_check_and_switch_task, &old_name);
330
331         switch (ret) {
332         case 0:         /* success */
333                 break;
334
335         case -EBUSY:    /* klp_check_and_switch_task() */
336                 pr_debug("%s: %s:%d is running\n",
337                          __func__, task->comm, task->pid);
338                 break;
339         case -EINVAL:   /* klp_check_and_switch_task() */
340                 pr_debug("%s: %s:%d has an unreliable stack\n",
341                          __func__, task->comm, task->pid);
342                 break;
343         case -EADDRINUSE: /* klp_check_and_switch_task() */
344                 pr_debug("%s: %s:%d is sleeping on function %s\n",
345                          __func__, task->comm, task->pid, old_name);
346                 break;
347
348         default:
349                 pr_debug("%s: Unknown error code (%d) when trying to switch %s:%d\n",
350                          __func__, ret, task->comm, task->pid);
351                 break;
352         }
353
354         return !ret;
355 }
356
357 void __klp_sched_try_switch(void)
358 {
359         /*
360          * This function is called from __schedule() while a context switch is
361          * about to happen. Preemption is already disabled and klp_mutex
362          * can't be acquired.
363          * Disabled preemption is used to prevent racing with other callers of
364          * klp_try_switch_task(). Thanks to task_call_func() they won't be
365          * able to switch to this task while it's running.
366          */
367         lockdep_assert_preemption_disabled();
368
369         if (likely(!klp_patch_pending(current)))
370                 return;
371
372         /*
373          * Enforce the order of the TIF_PATCH_PENDING read above and the
374          * klp_target_state read in klp_try_switch_task().  The corresponding
375          * write barriers are in klp_init_transition() and
376          * klp_reverse_transition().
377          */
378         smp_rmb();
379
380         klp_try_switch_task(current);
381 }
382
383 /*
384  * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
385  * Kthreads with TIF_PATCH_PENDING set are woken up.
386  */
387 static void klp_send_signals(void)
388 {
389         struct task_struct *g, *task;
390
391         if (klp_signals_cnt == SIGNALS_TIMEOUT)
392                 pr_notice("signaling remaining tasks\n");
393
394         read_lock(&tasklist_lock);
395         for_each_process_thread(g, task) {
396                 if (!klp_patch_pending(task))
397                         continue;
398
399                 /*
400                  * There is a small race here. We could see TIF_PATCH_PENDING
401                  * set and decide to wake up a kthread or send a fake signal.
402                  * Meanwhile the task could migrate itself and the action
403                  * would be meaningless. It is not serious though.
404                  */
405                 if (task->flags & PF_KTHREAD) {
406                         /*
407                          * Wake up a kthread which sleeps interruptedly and
408                          * still has not been migrated.
409                          */
410                         wake_up_state(task, TASK_INTERRUPTIBLE);
411                 } else {
412                         /*
413                          * Send fake signal to all non-kthread tasks which are
414                          * still not migrated.
415                          */
416                         set_notify_signal(task);
417                 }
418         }
419         read_unlock(&tasklist_lock);
420 }
421
422 /*
423  * Try to switch all remaining tasks to the target patch state by walking the
424  * stacks of sleeping tasks and looking for any to-be-patched or
425  * to-be-unpatched functions.  If such functions are found, the task can't be
426  * switched yet.
427  *
428  * If any tasks are still stuck in the initial patch state, schedule a retry.
429  */
430 void klp_try_complete_transition(void)
431 {
432         unsigned int cpu;
433         struct task_struct *g, *task;
434         struct klp_patch *patch;
435         bool complete = true;
436
437         WARN_ON_ONCE(klp_target_state == KLP_TRANSITION_IDLE);
438
439         /*
440          * Try to switch the tasks to the target patch state by walking their
441          * stacks and looking for any to-be-patched or to-be-unpatched
442          * functions.  If such functions are found on a stack, or if the stack
443          * is deemed unreliable, the task can't be switched yet.
444          *
445          * Usually this will transition most (or all) of the tasks on a system
446          * unless the patch includes changes to a very common function.
447          */
448         read_lock(&tasklist_lock);
449         for_each_process_thread(g, task)
450                 if (!klp_try_switch_task(task))
451                         complete = false;
452         read_unlock(&tasklist_lock);
453
454         /*
455          * Ditto for the idle "swapper" tasks.
456          */
457         cpus_read_lock();
458         for_each_possible_cpu(cpu) {
459                 task = idle_task(cpu);
460                 if (cpu_online(cpu)) {
461                         if (!klp_try_switch_task(task)) {
462                                 complete = false;
463                                 /* Make idle task go through the main loop. */
464                                 wake_up_if_idle(cpu);
465                         }
466                 } else if (task->patch_state != klp_target_state) {
467                         /* offline idle tasks can be switched immediately */
468                         clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
469                         task->patch_state = klp_target_state;
470                 }
471         }
472         cpus_read_unlock();
473
474         if (!complete) {
475                 if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
476                         klp_send_signals();
477                 klp_signals_cnt++;
478
479                 /*
480                  * Some tasks weren't able to be switched over.  Try again
481                  * later and/or wait for other methods like kernel exit
482                  * switching.
483                  */
484                 schedule_delayed_work(&klp_transition_work,
485                                       round_jiffies_relative(HZ));
486                 return;
487         }
488
489         /* Done!  Now cleanup the data structures. */
490         klp_resched_disable();
491         patch = klp_transition_patch;
492         klp_complete_transition();
493
494         /*
495          * It would make more sense to free the unused patches in
496          * klp_complete_transition() but it is called also
497          * from klp_cancel_transition().
498          */
499         if (!patch->enabled)
500                 klp_free_patch_async(patch);
501         else if (patch->replace)
502                 klp_free_replaced_patches_async(patch);
503 }
504
505 /*
506  * Start the transition to the specified target patch state so tasks can begin
507  * switching to it.
508  */
509 void klp_start_transition(void)
510 {
511         struct task_struct *g, *task;
512         unsigned int cpu;
513
514         WARN_ON_ONCE(klp_target_state == KLP_TRANSITION_IDLE);
515
516         pr_notice("'%s': starting %s transition\n",
517                   klp_transition_patch->mod->name,
518                   klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching");
519
520         /*
521          * Mark all normal tasks as needing a patch state update.  They'll
522          * switch either in klp_try_complete_transition() or as they exit the
523          * kernel.
524          */
525         read_lock(&tasklist_lock);
526         for_each_process_thread(g, task)
527                 if (task->patch_state != klp_target_state)
528                         set_tsk_thread_flag(task, TIF_PATCH_PENDING);
529         read_unlock(&tasklist_lock);
530
531         /*
532          * Mark all idle tasks as needing a patch state update.  They'll switch
533          * either in klp_try_complete_transition() or at the idle loop switch
534          * point.
535          */
536         for_each_possible_cpu(cpu) {
537                 task = idle_task(cpu);
538                 if (task->patch_state != klp_target_state)
539                         set_tsk_thread_flag(task, TIF_PATCH_PENDING);
540         }
541
542         klp_resched_enable();
543
544         klp_signals_cnt = 0;
545 }
546
547 /*
548  * Initialize the global target patch state and all tasks to the initial patch
549  * state, and initialize all function transition states to true in preparation
550  * for patching or unpatching.
551  */
552 void klp_init_transition(struct klp_patch *patch, int state)
553 {
554         struct task_struct *g, *task;
555         unsigned int cpu;
556         struct klp_object *obj;
557         struct klp_func *func;
558         int initial_state = !state;
559
560         WARN_ON_ONCE(klp_target_state != KLP_TRANSITION_IDLE);
561
562         klp_transition_patch = patch;
563
564         /*
565          * Set the global target patch state which tasks will switch to.  This
566          * has no effect until the TIF_PATCH_PENDING flags get set later.
567          */
568         klp_target_state = state;
569
570         pr_debug("'%s': initializing %s transition\n", patch->mod->name,
571                  klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching");
572
573         /*
574          * Initialize all tasks to the initial patch state to prepare them for
575          * switching to the target state.
576          */
577         read_lock(&tasklist_lock);
578         for_each_process_thread(g, task) {
579                 WARN_ON_ONCE(task->patch_state != KLP_TRANSITION_IDLE);
580                 task->patch_state = initial_state;
581         }
582         read_unlock(&tasklist_lock);
583
584         /*
585          * Ditto for the idle "swapper" tasks.
586          */
587         for_each_possible_cpu(cpu) {
588                 task = idle_task(cpu);
589                 WARN_ON_ONCE(task->patch_state != KLP_TRANSITION_IDLE);
590                 task->patch_state = initial_state;
591         }
592
593         /*
594          * Enforce the order of the task->patch_state initializations and the
595          * func->transition updates to ensure that klp_ftrace_handler() doesn't
596          * see a func in transition with a task->patch_state of KLP_TRANSITION_IDLE.
597          *
598          * Also enforce the order of the klp_target_state write and future
599          * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() and
600          * __klp_sched_try_switch() don't set a task->patch_state to
601          * KLP_TRANSITION_IDLE.
602          */
603         smp_wmb();
604
605         /*
606          * Set the func transition states so klp_ftrace_handler() will know to
607          * switch to the transition logic.
608          *
609          * When patching, the funcs aren't yet in the func_stack and will be
610          * made visible to the ftrace handler shortly by the calls to
611          * klp_patch_object().
612          *
613          * When unpatching, the funcs are already in the func_stack and so are
614          * already visible to the ftrace handler.
615          */
616         klp_for_each_object(patch, obj)
617                 klp_for_each_func(obj, func)
618                         func->transition = true;
619 }
620
621 /*
622  * This function can be called in the middle of an existing transition to
623  * reverse the direction of the target patch state.  This can be done to
624  * effectively cancel an existing enable or disable operation if there are any
625  * tasks which are stuck in the initial patch state.
626  */
627 void klp_reverse_transition(void)
628 {
629         unsigned int cpu;
630         struct task_struct *g, *task;
631
632         pr_debug("'%s': reversing transition from %s\n",
633                  klp_transition_patch->mod->name,
634                  klp_target_state == KLP_TRANSITION_PATCHED ? "patching to unpatching" :
635                                                    "unpatching to patching");
636
637         /*
638          * Clear all TIF_PATCH_PENDING flags to prevent races caused by
639          * klp_update_patch_state() or __klp_sched_try_switch() running in
640          * parallel with the reverse transition.
641          */
642         read_lock(&tasklist_lock);
643         for_each_process_thread(g, task)
644                 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
645         read_unlock(&tasklist_lock);
646
647         for_each_possible_cpu(cpu)
648                 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
649
650         /*
651          * Make sure all existing invocations of klp_update_patch_state() and
652          * __klp_sched_try_switch() see the cleared TIF_PATCH_PENDING before
653          * starting the reverse transition.
654          */
655         klp_synchronize_transition();
656
657         /*
658          * All patching has stopped, now re-initialize the global variables to
659          * prepare for the reverse transition.
660          */
661         klp_transition_patch->enabled = !klp_transition_patch->enabled;
662         klp_target_state = !klp_target_state;
663
664         /*
665          * Enforce the order of the klp_target_state write and the
666          * TIF_PATCH_PENDING writes in klp_start_transition() to ensure
667          * klp_update_patch_state() and __klp_sched_try_switch() don't set
668          * task->patch_state to the wrong value.
669          */
670         smp_wmb();
671
672         klp_start_transition();
673 }
674
675 /* Called from copy_process() during fork */
676 void klp_copy_process(struct task_struct *child)
677 {
678
679         /*
680          * The parent process may have gone through a KLP transition since
681          * the thread flag was copied in setup_thread_stack earlier. Bring
682          * the task flag up to date with the parent here.
683          *
684          * The operation is serialized against all klp_*_transition()
685          * operations by the tasklist_lock. The only exceptions are
686          * klp_update_patch_state(current) and __klp_sched_try_switch(), but we
687          * cannot race with them because we are current.
688          */
689         if (test_tsk_thread_flag(current, TIF_PATCH_PENDING))
690                 set_tsk_thread_flag(child, TIF_PATCH_PENDING);
691         else
692                 clear_tsk_thread_flag(child, TIF_PATCH_PENDING);
693
694         child->patch_state = current->patch_state;
695 }
696
697 /*
698  * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
699  * existing transition to finish.
700  *
701  * NOTE: klp_update_patch_state(task) requires the task to be inactive or
702  * 'current'. This is not the case here and the consistency model could be
703  * broken. Administrator, who is the only one to execute the
704  * klp_force_transitions(), has to be aware of this.
705  */
706 void klp_force_transition(void)
707 {
708         struct klp_patch *patch;
709         struct task_struct *g, *task;
710         unsigned int cpu;
711
712         pr_warn("forcing remaining tasks to the patched state\n");
713
714         read_lock(&tasklist_lock);
715         for_each_process_thread(g, task)
716                 klp_update_patch_state(task);
717         read_unlock(&tasklist_lock);
718
719         for_each_possible_cpu(cpu)
720                 klp_update_patch_state(idle_task(cpu));
721
722         /* Set forced flag for patches being removed. */
723         if (klp_target_state == KLP_TRANSITION_UNPATCHED)
724                 klp_transition_patch->forced = true;
725         else if (klp_transition_patch->replace) {
726                 klp_for_each_patch(patch) {
727                         if (patch != klp_transition_patch)
728                                 patch->forced = true;
729                 }
730         }
731 }