1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6 * Copyright (C) 2011 Peter Zijlstra
9 #include <linux/memory.h>
10 #include <linux/uaccess.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/slab.h>
14 #include <linux/sort.h>
15 #include <linux/err.h>
16 #include <linux/static_key.h>
17 #include <linux/jump_label_ratelimit.h>
18 #include <linux/bug.h>
19 #include <linux/cpu.h>
20 #include <asm/sections.h>
22 /* mutex to protect coming/going of the jump_label table */
23 static DEFINE_MUTEX(jump_label_mutex);
25 void jump_label_lock(void)
27 mutex_lock(&jump_label_mutex);
30 void jump_label_unlock(void)
32 mutex_unlock(&jump_label_mutex);
35 static int jump_label_cmp(const void *a, const void *b)
37 const struct jump_entry *jea = a;
38 const struct jump_entry *jeb = b;
41 * Entrires are sorted by key.
43 if (jump_entry_key(jea) < jump_entry_key(jeb))
46 if (jump_entry_key(jea) > jump_entry_key(jeb))
50 * In the batching mode, entries should also be sorted by the code
51 * inside the already sorted list of entries, enabling a bsearch in
54 if (jump_entry_code(jea) < jump_entry_code(jeb))
57 if (jump_entry_code(jea) > jump_entry_code(jeb))
63 static void jump_label_swap(void *a, void *b, int size)
65 long delta = (unsigned long)a - (unsigned long)b;
66 struct jump_entry *jea = a;
67 struct jump_entry *jeb = b;
68 struct jump_entry tmp = *jea;
70 jea->code = jeb->code - delta;
71 jea->target = jeb->target - delta;
72 jea->key = jeb->key - delta;
74 jeb->code = tmp.code + delta;
75 jeb->target = tmp.target + delta;
76 jeb->key = tmp.key + delta;
80 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
85 if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
86 swapfn = jump_label_swap;
88 size = (((unsigned long)stop - (unsigned long)start)
89 / sizeof(struct jump_entry));
90 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
93 static void jump_label_update(struct static_key *key);
96 * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
97 * The use of 'atomic_read()' requires atomic.h and its problematic for some
98 * kernel headers such as kernel.h and others. Since static_key_count() is not
99 * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
100 * to have it be a function here. Similarly, for 'static_key_enable()' and
101 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
102 * to be included from most/all places for CONFIG_JUMP_LABEL.
104 int static_key_count(struct static_key *key)
107 * -1 means the first static_key_slow_inc() is in progress.
108 * static_key_enabled() must return true, so return 1 here.
110 int n = atomic_read(&key->enabled);
112 return n >= 0 ? n : 1;
114 EXPORT_SYMBOL_GPL(static_key_count);
117 * static_key_fast_inc_not_disabled - adds a user for a static key
118 * @key: static key that must be already enabled
120 * The caller must make sure that the static key can't get disabled while
121 * in this function. It doesn't patch jump labels, only adds a user to
122 * an already enabled static key.
124 * Returns true if the increment was done. Unlike refcount_t the ref counter
125 * is not saturated, but will fail to increment on overflow.
127 bool static_key_fast_inc_not_disabled(struct static_key *key)
131 STATIC_KEY_CHECK_USE(key);
133 * Negative key->enabled has a special meaning: it sends
134 * static_key_slow_inc/dec() down the slow path, and it is non-zero
135 * so it counts as "enabled" in jump_label_update().
137 * The INT_MAX overflow condition is either used by the networking
138 * code to reset or detected in the slow path of
139 * static_key_slow_inc_cpuslocked().
141 v = atomic_read(&key->enabled);
143 if (v <= 0 || v == INT_MAX)
145 } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
149 EXPORT_SYMBOL_GPL(static_key_fast_inc_not_disabled);
151 bool static_key_slow_inc_cpuslocked(struct static_key *key)
153 lockdep_assert_cpus_held();
156 * Careful if we get concurrent static_key_slow_inc/dec() calls;
157 * later calls must wait for the first one to _finish_ the
158 * jump_label_update() process. At the same time, however,
159 * the jump_label_update() call below wants to see
160 * static_key_enabled(&key) for jumps to be updated properly.
162 if (static_key_fast_inc_not_disabled(key))
165 guard(mutex)(&jump_label_mutex);
166 /* Try to mark it as 'enabling in progress. */
167 if (!atomic_cmpxchg(&key->enabled, 0, -1)) {
168 jump_label_update(key);
170 * Ensure that when static_key_fast_inc_not_disabled() or
171 * static_key_slow_try_dec() observe the positive value,
172 * they must also observe all the text changes.
174 atomic_set_release(&key->enabled, 1);
177 * While holding the mutex this should never observe
178 * anything else than a value >= 1 and succeed
180 if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key)))
186 bool static_key_slow_inc(struct static_key *key)
191 ret = static_key_slow_inc_cpuslocked(key);
195 EXPORT_SYMBOL_GPL(static_key_slow_inc);
197 void static_key_enable_cpuslocked(struct static_key *key)
199 STATIC_KEY_CHECK_USE(key);
200 lockdep_assert_cpus_held();
202 if (atomic_read(&key->enabled) > 0) {
203 WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
208 if (atomic_read(&key->enabled) == 0) {
209 atomic_set(&key->enabled, -1);
210 jump_label_update(key);
212 * See static_key_slow_inc().
214 atomic_set_release(&key->enabled, 1);
218 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
220 void static_key_enable(struct static_key *key)
223 static_key_enable_cpuslocked(key);
226 EXPORT_SYMBOL_GPL(static_key_enable);
228 void static_key_disable_cpuslocked(struct static_key *key)
230 STATIC_KEY_CHECK_USE(key);
231 lockdep_assert_cpus_held();
233 if (atomic_read(&key->enabled) != 1) {
234 WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
239 if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
240 jump_label_update(key);
243 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
245 void static_key_disable(struct static_key *key)
248 static_key_disable_cpuslocked(key);
251 EXPORT_SYMBOL_GPL(static_key_disable);
253 static bool static_key_slow_try_dec(struct static_key *key)
258 * Go into the slow path if key::enabled is less than or equal than
259 * one. One is valid to shut down the key, anything less than one
260 * is an imbalance, which is handled at the call site.
262 * That includes the special case of '-1' which is set in
263 * static_key_slow_inc_cpuslocked(), but that's harmless as it is
264 * fully serialized in the slow path below. By the time this task
265 * acquires the jump label lock the value is back to one and the
266 * retry under the lock must succeed.
268 v = atomic_read(&key->enabled);
271 * Warn about the '-1' case though; since that means a
272 * decrement is concurrent with a first (0->1) increment. IOW
273 * people are trying to disable something that wasn't yet fully
274 * enabled. This suggests an ordering problem on the user side.
279 } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1)));
284 static void __static_key_slow_dec_cpuslocked(struct static_key *key)
286 lockdep_assert_cpus_held();
288 if (static_key_slow_try_dec(key))
291 guard(mutex)(&jump_label_mutex);
292 if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
293 jump_label_update(key);
295 WARN_ON_ONCE(!static_key_slow_try_dec(key));
298 static void __static_key_slow_dec(struct static_key *key)
301 __static_key_slow_dec_cpuslocked(key);
305 void jump_label_update_timeout(struct work_struct *work)
307 struct static_key_deferred *key =
308 container_of(work, struct static_key_deferred, work.work);
309 __static_key_slow_dec(&key->key);
311 EXPORT_SYMBOL_GPL(jump_label_update_timeout);
313 void static_key_slow_dec(struct static_key *key)
315 STATIC_KEY_CHECK_USE(key);
316 __static_key_slow_dec(key);
318 EXPORT_SYMBOL_GPL(static_key_slow_dec);
320 void static_key_slow_dec_cpuslocked(struct static_key *key)
322 STATIC_KEY_CHECK_USE(key);
323 __static_key_slow_dec_cpuslocked(key);
326 void __static_key_slow_dec_deferred(struct static_key *key,
327 struct delayed_work *work,
328 unsigned long timeout)
330 STATIC_KEY_CHECK_USE(key);
332 if (static_key_slow_try_dec(key))
335 schedule_delayed_work(work, timeout);
337 EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
339 void __static_key_deferred_flush(void *key, struct delayed_work *work)
341 STATIC_KEY_CHECK_USE(key);
342 flush_delayed_work(work);
344 EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
346 void jump_label_rate_limit(struct static_key_deferred *key,
349 STATIC_KEY_CHECK_USE(key);
351 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
353 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
355 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
357 if (jump_entry_code(entry) <= (unsigned long)end &&
358 jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start)
364 static int __jump_label_text_reserved(struct jump_entry *iter_start,
365 struct jump_entry *iter_stop, void *start, void *end, bool init)
367 struct jump_entry *iter;
370 while (iter < iter_stop) {
371 if (init || !jump_entry_is_init(iter)) {
372 if (addr_conflict(iter, start, end))
381 #ifndef arch_jump_label_transform_static
382 static void arch_jump_label_transform_static(struct jump_entry *entry,
383 enum jump_label_type type)
385 /* nothing to do on most architectures */
389 static inline struct jump_entry *static_key_entries(struct static_key *key)
391 WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
392 return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
395 static inline bool static_key_type(struct static_key *key)
397 return key->type & JUMP_TYPE_TRUE;
400 static inline bool static_key_linked(struct static_key *key)
402 return key->type & JUMP_TYPE_LINKED;
405 static inline void static_key_clear_linked(struct static_key *key)
407 key->type &= ~JUMP_TYPE_LINKED;
410 static inline void static_key_set_linked(struct static_key *key)
412 key->type |= JUMP_TYPE_LINKED;
416 * A 'struct static_key' uses a union such that it either points directly
417 * to a table of 'struct jump_entry' or to a linked list of modules which in
418 * turn point to 'struct jump_entry' tables.
420 * The two lower bits of the pointer are used to keep track of which pointer
421 * type is in use and to store the initial branch direction, we use an access
422 * function which preserves these bits.
424 static void static_key_set_entries(struct static_key *key,
425 struct jump_entry *entries)
429 WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
430 type = key->type & JUMP_TYPE_MASK;
431 key->entries = entries;
435 static enum jump_label_type jump_label_type(struct jump_entry *entry)
437 struct static_key *key = jump_entry_key(entry);
438 bool enabled = static_key_enabled(key);
439 bool branch = jump_entry_is_branch(entry);
441 /* See the comment in linux/jump_label.h */
442 return enabled ^ branch;
445 static bool jump_label_can_update(struct jump_entry *entry, bool init)
448 * Cannot update code that was in an init text area.
450 if (!init && jump_entry_is_init(entry))
453 if (!kernel_text_address(jump_entry_code(entry))) {
455 * This skips patching built-in __exit, which
456 * is part of init_section_contains() but is
457 * not part of kernel_text_address().
459 * Skipping built-in __exit is fine since it
460 * will never be executed.
462 WARN_ONCE(!jump_entry_is_init(entry),
463 "can't patch jump_label at %pS",
464 (void *)jump_entry_code(entry));
471 #ifndef HAVE_JUMP_LABEL_BATCH
472 static void __jump_label_update(struct static_key *key,
473 struct jump_entry *entry,
474 struct jump_entry *stop,
477 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
478 if (jump_label_can_update(entry, init))
479 arch_jump_label_transform(entry, jump_label_type(entry));
483 static void __jump_label_update(struct static_key *key,
484 struct jump_entry *entry,
485 struct jump_entry *stop,
488 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
490 if (!jump_label_can_update(entry, init))
493 if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
495 * Queue is full: Apply the current queue and try again.
497 arch_jump_label_transform_apply();
498 BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
501 arch_jump_label_transform_apply();
505 void __init jump_label_init(void)
507 struct jump_entry *iter_start = __start___jump_table;
508 struct jump_entry *iter_stop = __stop___jump_table;
509 struct static_key *key = NULL;
510 struct jump_entry *iter;
513 * Since we are initializing the static_key.enabled field with
514 * with the 'raw' int values (to avoid pulling in atomic.h) in
515 * jump_label.h, let's make sure that is safe. There are only two
516 * cases to check since we initialize to 0 or 1.
518 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
519 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
521 if (static_key_initialized)
526 jump_label_sort_entries(iter_start, iter_stop);
528 for (iter = iter_start; iter < iter_stop; iter++) {
529 struct static_key *iterk;
533 if (jump_label_type(iter) == JUMP_LABEL_NOP)
534 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
536 in_init = init_section_contains((void *)jump_entry_code(iter), 1);
537 jump_entry_set_init(iter, in_init);
539 iterk = jump_entry_key(iter);
544 static_key_set_entries(key, iter);
546 static_key_initialized = true;
551 static inline bool static_key_sealed(struct static_key *key)
553 return (key->type & JUMP_TYPE_LINKED) && !(key->type & ~JUMP_TYPE_MASK);
556 static inline void static_key_seal(struct static_key *key)
558 unsigned long type = key->type & JUMP_TYPE_TRUE;
559 key->type = JUMP_TYPE_LINKED | type;
562 void jump_label_init_ro(void)
564 struct jump_entry *iter_start = __start___jump_table;
565 struct jump_entry *iter_stop = __stop___jump_table;
566 struct jump_entry *iter;
568 if (WARN_ON_ONCE(!static_key_initialized))
574 for (iter = iter_start; iter < iter_stop; iter++) {
575 struct static_key *iterk = jump_entry_key(iter);
577 if (!is_kernel_ro_after_init((unsigned long)iterk))
580 if (static_key_sealed(iterk))
583 static_key_seal(iterk);
590 #ifdef CONFIG_MODULES
592 enum jump_label_type jump_label_init_type(struct jump_entry *entry)
594 struct static_key *key = jump_entry_key(entry);
595 bool type = static_key_type(key);
596 bool branch = jump_entry_is_branch(entry);
598 /* See the comment in linux/jump_label.h */
599 return type ^ branch;
602 struct static_key_mod {
603 struct static_key_mod *next;
604 struct jump_entry *entries;
608 static inline struct static_key_mod *static_key_mod(struct static_key *key)
610 WARN_ON_ONCE(!static_key_linked(key));
611 return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
615 * key->type and key->next are the same via union.
616 * This sets key->next and preserves the type bits.
618 * See additional comments above static_key_set_entries().
620 static void static_key_set_mod(struct static_key *key,
621 struct static_key_mod *mod)
625 WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
626 type = key->type & JUMP_TYPE_MASK;
631 static int __jump_label_mod_text_reserved(void *start, void *end)
637 mod = __module_text_address((unsigned long)start);
638 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
639 if (!try_module_get(mod))
646 ret = __jump_label_text_reserved(mod->jump_entries,
647 mod->jump_entries + mod->num_jump_entries,
648 start, end, mod->state == MODULE_STATE_COMING);
655 static void __jump_label_mod_update(struct static_key *key)
657 struct static_key_mod *mod;
659 for (mod = static_key_mod(key); mod; mod = mod->next) {
660 struct jump_entry *stop;
664 * NULL if the static_key is defined in a module
665 * that does not use it
672 stop = __stop___jump_table;
674 stop = m->jump_entries + m->num_jump_entries;
675 __jump_label_update(key, mod->entries, stop,
676 m && m->state == MODULE_STATE_COMING);
680 static int jump_label_add_module(struct module *mod)
682 struct jump_entry *iter_start = mod->jump_entries;
683 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
684 struct jump_entry *iter;
685 struct static_key *key = NULL;
686 struct static_key_mod *jlm, *jlm2;
688 /* if the module doesn't have jump label entries, just return */
689 if (iter_start == iter_stop)
692 jump_label_sort_entries(iter_start, iter_stop);
694 for (iter = iter_start; iter < iter_stop; iter++) {
695 struct static_key *iterk;
698 in_init = within_module_init(jump_entry_code(iter), mod);
699 jump_entry_set_init(iter, in_init);
701 iterk = jump_entry_key(iter);
706 if (within_module((unsigned long)key, mod)) {
707 static_key_set_entries(key, iter);
712 * If the key was sealed at init, then there's no need to keep a
713 * reference to its module entries - just patch them now and be
716 if (static_key_sealed(key))
719 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
722 if (!static_key_linked(key)) {
723 jlm2 = kzalloc(sizeof(struct static_key_mod),
730 jlm2->mod = __module_address((unsigned long)key);
732 jlm2->entries = static_key_entries(key);
734 static_key_set_mod(key, jlm2);
735 static_key_set_linked(key);
739 jlm->next = static_key_mod(key);
740 static_key_set_mod(key, jlm);
741 static_key_set_linked(key);
743 /* Only update if we've changed from our initial state */
745 if (jump_label_type(iter) != jump_label_init_type(iter))
746 __jump_label_update(key, iter, iter_stop, true);
752 static void jump_label_del_module(struct module *mod)
754 struct jump_entry *iter_start = mod->jump_entries;
755 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
756 struct jump_entry *iter;
757 struct static_key *key = NULL;
758 struct static_key_mod *jlm, **prev;
760 for (iter = iter_start; iter < iter_stop; iter++) {
761 if (jump_entry_key(iter) == key)
764 key = jump_entry_key(iter);
766 if (within_module((unsigned long)key, mod))
769 /* No @jlm allocated because key was sealed at init. */
770 if (static_key_sealed(key))
773 /* No memory during module load */
774 if (WARN_ON(!static_key_linked(key)))
778 jlm = static_key_mod(key);
780 while (jlm && jlm->mod != mod) {
785 /* No memory during module load */
789 if (prev == &key->next)
790 static_key_set_mod(key, jlm->next);
796 jlm = static_key_mod(key);
797 /* if only one etry is left, fold it back into the static_key */
798 if (jlm->next == NULL) {
799 static_key_set_entries(key, jlm->entries);
800 static_key_clear_linked(key);
807 jump_label_module_notify(struct notifier_block *self, unsigned long val,
810 struct module *mod = data;
817 case MODULE_STATE_COMING:
818 ret = jump_label_add_module(mod);
820 WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
821 jump_label_del_module(mod);
824 case MODULE_STATE_GOING:
825 jump_label_del_module(mod);
832 return notifier_from_errno(ret);
835 static struct notifier_block jump_label_module_nb = {
836 .notifier_call = jump_label_module_notify,
837 .priority = 1, /* higher than tracepoints */
840 static __init int jump_label_init_module(void)
842 return register_module_notifier(&jump_label_module_nb);
844 early_initcall(jump_label_init_module);
846 #endif /* CONFIG_MODULES */
849 * jump_label_text_reserved - check if addr range is reserved
850 * @start: start text addr
851 * @end: end text addr
853 * checks if the text addr located between @start and @end
854 * overlaps with any of the jump label patch addresses. Code
855 * that wants to modify kernel text should first verify that
856 * it does not overlap with any of the jump label addresses.
857 * Caller must hold jump_label_mutex.
859 * returns 1 if there is an overlap, 0 otherwise
861 int jump_label_text_reserved(void *start, void *end)
863 bool init = system_state < SYSTEM_RUNNING;
864 int ret = __jump_label_text_reserved(__start___jump_table,
865 __stop___jump_table, start, end, init);
870 #ifdef CONFIG_MODULES
871 ret = __jump_label_mod_text_reserved(start, end);
876 static void jump_label_update(struct static_key *key)
878 struct jump_entry *stop = __stop___jump_table;
879 bool init = system_state < SYSTEM_RUNNING;
880 struct jump_entry *entry;
881 #ifdef CONFIG_MODULES
884 if (static_key_linked(key)) {
885 __jump_label_mod_update(key);
890 mod = __module_address((unsigned long)key);
892 stop = mod->jump_entries + mod->num_jump_entries;
893 init = mod->state == MODULE_STATE_COMING;
897 entry = static_key_entries(key);
898 /* if there are no users, entry can be NULL */
900 __jump_label_update(key, entry, stop, init);
903 #ifdef CONFIG_STATIC_KEYS_SELFTEST
904 static DEFINE_STATIC_KEY_TRUE(sk_true);
905 static DEFINE_STATIC_KEY_FALSE(sk_false);
907 static __init int jump_label_test(void)
911 for (i = 0; i < 2; i++) {
912 WARN_ON(static_key_enabled(&sk_true.key) != true);
913 WARN_ON(static_key_enabled(&sk_false.key) != false);
915 WARN_ON(!static_branch_likely(&sk_true));
916 WARN_ON(!static_branch_unlikely(&sk_true));
917 WARN_ON(static_branch_likely(&sk_false));
918 WARN_ON(static_branch_unlikely(&sk_false));
920 static_branch_disable(&sk_true);
921 static_branch_enable(&sk_false);
923 WARN_ON(static_key_enabled(&sk_true.key) == true);
924 WARN_ON(static_key_enabled(&sk_false.key) == false);
926 WARN_ON(static_branch_likely(&sk_true));
927 WARN_ON(static_branch_unlikely(&sk_true));
928 WARN_ON(!static_branch_likely(&sk_false));
929 WARN_ON(!static_branch_unlikely(&sk_false));
931 static_branch_enable(&sk_true);
932 static_branch_disable(&sk_false);
937 early_initcall(jump_label_test);
938 #endif /* STATIC_KEYS_SELFTEST */