jump_label: remove bug.h, atomic.h dependencies for HAVE_JUMP_LABEL
[linux-2.6-block.git] / kernel / jump_label.c
1 /*
2  * jump label support
3  *
4  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5  * Copyright (C) 2011 Peter Zijlstra
6  *
7  */
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 #include <linux/err.h>
15 #include <linux/static_key.h>
16 #include <linux/jump_label_ratelimit.h>
17 #include <linux/bug.h>
18
19 #ifdef HAVE_JUMP_LABEL
20
21 /* mutex to protect coming/going of the the jump_label table */
22 static DEFINE_MUTEX(jump_label_mutex);
23
24 void jump_label_lock(void)
25 {
26         mutex_lock(&jump_label_mutex);
27 }
28
29 void jump_label_unlock(void)
30 {
31         mutex_unlock(&jump_label_mutex);
32 }
33
34 static int jump_label_cmp(const void *a, const void *b)
35 {
36         const struct jump_entry *jea = a;
37         const struct jump_entry *jeb = b;
38
39         if (jea->key < jeb->key)
40                 return -1;
41
42         if (jea->key > jeb->key)
43                 return 1;
44
45         return 0;
46 }
47
48 static void
49 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
50 {
51         unsigned long size;
52
53         size = (((unsigned long)stop - (unsigned long)start)
54                                         / sizeof(struct jump_entry));
55         sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
56 }
57
58 static void jump_label_update(struct static_key *key);
59
60 /*
61  * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
62  * The use of 'atomic_read()' requires atomic.h and its problematic for some
63  * kernel headers such as kernel.h and others. Since static_key_count() is not
64  * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
65  * to have it be a function here. Similarly, for 'static_key_enable()' and
66  * 'static_key_disable()', which require bug.h. This should allow jump_label.h
67  * to be included from most/all places for HAVE_JUMP_LABEL.
68  */
69 int static_key_count(struct static_key *key)
70 {
71         /*
72          * -1 means the first static_key_slow_inc() is in progress.
73          *  static_key_enabled() must return true, so return 1 here.
74          */
75         int n = atomic_read(&key->enabled);
76
77         return n >= 0 ? n : 1;
78 }
79 EXPORT_SYMBOL_GPL(static_key_count);
80
81 void static_key_enable(struct static_key *key)
82 {
83         int count = static_key_count(key);
84
85         WARN_ON_ONCE(count < 0 || count > 1);
86
87         if (!count)
88                 static_key_slow_inc(key);
89 }
90 EXPORT_SYMBOL_GPL(static_key_enable);
91
92 void static_key_disable(struct static_key *key)
93 {
94         int count = static_key_count(key);
95
96         WARN_ON_ONCE(count < 0 || count > 1);
97
98         if (count)
99                 static_key_slow_dec(key);
100 }
101 EXPORT_SYMBOL_GPL(static_key_disable);
102
103 void static_key_slow_inc(struct static_key *key)
104 {
105         int v, v1;
106
107         STATIC_KEY_CHECK_USE();
108
109         /*
110          * Careful if we get concurrent static_key_slow_inc() calls;
111          * later calls must wait for the first one to _finish_ the
112          * jump_label_update() process.  At the same time, however,
113          * the jump_label_update() call below wants to see
114          * static_key_enabled(&key) for jumps to be updated properly.
115          *
116          * So give a special meaning to negative key->enabled: it sends
117          * static_key_slow_inc() down the slow path, and it is non-zero
118          * so it counts as "enabled" in jump_label_update().  Note that
119          * atomic_inc_unless_negative() checks >= 0, so roll our own.
120          */
121         for (v = atomic_read(&key->enabled); v > 0; v = v1) {
122                 v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
123                 if (likely(v1 == v))
124                         return;
125         }
126
127         jump_label_lock();
128         if (atomic_read(&key->enabled) == 0) {
129                 atomic_set(&key->enabled, -1);
130                 jump_label_update(key);
131                 atomic_set(&key->enabled, 1);
132         } else {
133                 atomic_inc(&key->enabled);
134         }
135         jump_label_unlock();
136 }
137 EXPORT_SYMBOL_GPL(static_key_slow_inc);
138
139 static void __static_key_slow_dec(struct static_key *key,
140                 unsigned long rate_limit, struct delayed_work *work)
141 {
142         /*
143          * The negative count check is valid even when a negative
144          * key->enabled is in use by static_key_slow_inc(); a
145          * __static_key_slow_dec() before the first static_key_slow_inc()
146          * returns is unbalanced, because all other static_key_slow_inc()
147          * instances block while the update is in progress.
148          */
149         if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
150                 WARN(atomic_read(&key->enabled) < 0,
151                      "jump label: negative count!\n");
152                 return;
153         }
154
155         if (rate_limit) {
156                 atomic_inc(&key->enabled);
157                 schedule_delayed_work(work, rate_limit);
158         } else {
159                 jump_label_update(key);
160         }
161         jump_label_unlock();
162 }
163
164 static void jump_label_update_timeout(struct work_struct *work)
165 {
166         struct static_key_deferred *key =
167                 container_of(work, struct static_key_deferred, work.work);
168         __static_key_slow_dec(&key->key, 0, NULL);
169 }
170
171 void static_key_slow_dec(struct static_key *key)
172 {
173         STATIC_KEY_CHECK_USE();
174         __static_key_slow_dec(key, 0, NULL);
175 }
176 EXPORT_SYMBOL_GPL(static_key_slow_dec);
177
178 void static_key_slow_dec_deferred(struct static_key_deferred *key)
179 {
180         STATIC_KEY_CHECK_USE();
181         __static_key_slow_dec(&key->key, key->timeout, &key->work);
182 }
183 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
184
185 void jump_label_rate_limit(struct static_key_deferred *key,
186                 unsigned long rl)
187 {
188         STATIC_KEY_CHECK_USE();
189         key->timeout = rl;
190         INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
191 }
192 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
193
194 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
195 {
196         if (entry->code <= (unsigned long)end &&
197                 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
198                 return 1;
199
200         return 0;
201 }
202
203 static int __jump_label_text_reserved(struct jump_entry *iter_start,
204                 struct jump_entry *iter_stop, void *start, void *end)
205 {
206         struct jump_entry *iter;
207
208         iter = iter_start;
209         while (iter < iter_stop) {
210                 if (addr_conflict(iter, start, end))
211                         return 1;
212                 iter++;
213         }
214
215         return 0;
216 }
217
218 /*
219  * Update code which is definitely not currently executing.
220  * Architectures which need heavyweight synchronization to modify
221  * running code can override this to make the non-live update case
222  * cheaper.
223  */
224 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
225                                             enum jump_label_type type)
226 {
227         arch_jump_label_transform(entry, type);
228 }
229
230 static inline struct jump_entry *static_key_entries(struct static_key *key)
231 {
232         return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK);
233 }
234
235 static inline bool static_key_type(struct static_key *key)
236 {
237         return (unsigned long)key->entries & JUMP_TYPE_MASK;
238 }
239
240 static inline struct static_key *jump_entry_key(struct jump_entry *entry)
241 {
242         return (struct static_key *)((unsigned long)entry->key & ~1UL);
243 }
244
245 static bool jump_entry_branch(struct jump_entry *entry)
246 {
247         return (unsigned long)entry->key & 1UL;
248 }
249
250 static enum jump_label_type jump_label_type(struct jump_entry *entry)
251 {
252         struct static_key *key = jump_entry_key(entry);
253         bool enabled = static_key_enabled(key);
254         bool branch = jump_entry_branch(entry);
255
256         /* See the comment in linux/jump_label.h */
257         return enabled ^ branch;
258 }
259
260 static void __jump_label_update(struct static_key *key,
261                                 struct jump_entry *entry,
262                                 struct jump_entry *stop)
263 {
264         for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
265                 /*
266                  * entry->code set to 0 invalidates module init text sections
267                  * kernel_text_address() verifies we are not in core kernel
268                  * init code, see jump_label_invalidate_module_init().
269                  */
270                 if (entry->code && kernel_text_address(entry->code))
271                         arch_jump_label_transform(entry, jump_label_type(entry));
272         }
273 }
274
275 void __init jump_label_init(void)
276 {
277         struct jump_entry *iter_start = __start___jump_table;
278         struct jump_entry *iter_stop = __stop___jump_table;
279         struct static_key *key = NULL;
280         struct jump_entry *iter;
281
282         /*
283          * Since we are initializing the static_key.enabled field with
284          * with the 'raw' int values (to avoid pulling in atomic.h) in
285          * jump_label.h, let's make sure that is safe. There are only two
286          * cases to check since we initialize to 0 or 1.
287          */
288         BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
289         BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
290
291         jump_label_lock();
292         jump_label_sort_entries(iter_start, iter_stop);
293
294         for (iter = iter_start; iter < iter_stop; iter++) {
295                 struct static_key *iterk;
296
297                 /* rewrite NOPs */
298                 if (jump_label_type(iter) == JUMP_LABEL_NOP)
299                         arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
300
301                 iterk = jump_entry_key(iter);
302                 if (iterk == key)
303                         continue;
304
305                 key = iterk;
306                 /*
307                  * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
308                  */
309                 *((unsigned long *)&key->entries) += (unsigned long)iter;
310 #ifdef CONFIG_MODULES
311                 key->next = NULL;
312 #endif
313         }
314         static_key_initialized = true;
315         jump_label_unlock();
316 }
317
318 #ifdef CONFIG_MODULES
319
320 static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
321 {
322         struct static_key *key = jump_entry_key(entry);
323         bool type = static_key_type(key);
324         bool branch = jump_entry_branch(entry);
325
326         /* See the comment in linux/jump_label.h */
327         return type ^ branch;
328 }
329
330 struct static_key_mod {
331         struct static_key_mod *next;
332         struct jump_entry *entries;
333         struct module *mod;
334 };
335
336 static int __jump_label_mod_text_reserved(void *start, void *end)
337 {
338         struct module *mod;
339
340         mod = __module_text_address((unsigned long)start);
341         if (!mod)
342                 return 0;
343
344         WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
345
346         return __jump_label_text_reserved(mod->jump_entries,
347                                 mod->jump_entries + mod->num_jump_entries,
348                                 start, end);
349 }
350
351 static void __jump_label_mod_update(struct static_key *key)
352 {
353         struct static_key_mod *mod;
354
355         for (mod = key->next; mod; mod = mod->next) {
356                 struct module *m = mod->mod;
357
358                 __jump_label_update(key, mod->entries,
359                                     m->jump_entries + m->num_jump_entries);
360         }
361 }
362
363 /***
364  * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
365  * @mod: module to patch
366  *
367  * Allow for run-time selection of the optimal nops. Before the module
368  * loads patch these with arch_get_jump_label_nop(), which is specified by
369  * the arch specific jump label code.
370  */
371 void jump_label_apply_nops(struct module *mod)
372 {
373         struct jump_entry *iter_start = mod->jump_entries;
374         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
375         struct jump_entry *iter;
376
377         /* if the module doesn't have jump label entries, just return */
378         if (iter_start == iter_stop)
379                 return;
380
381         for (iter = iter_start; iter < iter_stop; iter++) {
382                 /* Only write NOPs for arch_branch_static(). */
383                 if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
384                         arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
385         }
386 }
387
388 static int jump_label_add_module(struct module *mod)
389 {
390         struct jump_entry *iter_start = mod->jump_entries;
391         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
392         struct jump_entry *iter;
393         struct static_key *key = NULL;
394         struct static_key_mod *jlm;
395
396         /* if the module doesn't have jump label entries, just return */
397         if (iter_start == iter_stop)
398                 return 0;
399
400         jump_label_sort_entries(iter_start, iter_stop);
401
402         for (iter = iter_start; iter < iter_stop; iter++) {
403                 struct static_key *iterk;
404
405                 iterk = jump_entry_key(iter);
406                 if (iterk == key)
407                         continue;
408
409                 key = iterk;
410                 if (within_module(iter->key, mod)) {
411                         /*
412                          * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
413                          */
414                         *((unsigned long *)&key->entries) += (unsigned long)iter;
415                         key->next = NULL;
416                         continue;
417                 }
418                 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
419                 if (!jlm)
420                         return -ENOMEM;
421                 jlm->mod = mod;
422                 jlm->entries = iter;
423                 jlm->next = key->next;
424                 key->next = jlm;
425
426                 /* Only update if we've changed from our initial state */
427                 if (jump_label_type(iter) != jump_label_init_type(iter))
428                         __jump_label_update(key, iter, iter_stop);
429         }
430
431         return 0;
432 }
433
434 static void jump_label_del_module(struct module *mod)
435 {
436         struct jump_entry *iter_start = mod->jump_entries;
437         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
438         struct jump_entry *iter;
439         struct static_key *key = NULL;
440         struct static_key_mod *jlm, **prev;
441
442         for (iter = iter_start; iter < iter_stop; iter++) {
443                 if (jump_entry_key(iter) == key)
444                         continue;
445
446                 key = jump_entry_key(iter);
447
448                 if (within_module(iter->key, mod))
449                         continue;
450
451                 prev = &key->next;
452                 jlm = key->next;
453
454                 while (jlm && jlm->mod != mod) {
455                         prev = &jlm->next;
456                         jlm = jlm->next;
457                 }
458
459                 if (jlm) {
460                         *prev = jlm->next;
461                         kfree(jlm);
462                 }
463         }
464 }
465
466 static void jump_label_invalidate_module_init(struct module *mod)
467 {
468         struct jump_entry *iter_start = mod->jump_entries;
469         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
470         struct jump_entry *iter;
471
472         for (iter = iter_start; iter < iter_stop; iter++) {
473                 if (within_module_init(iter->code, mod))
474                         iter->code = 0;
475         }
476 }
477
478 static int
479 jump_label_module_notify(struct notifier_block *self, unsigned long val,
480                          void *data)
481 {
482         struct module *mod = data;
483         int ret = 0;
484
485         switch (val) {
486         case MODULE_STATE_COMING:
487                 jump_label_lock();
488                 ret = jump_label_add_module(mod);
489                 if (ret)
490                         jump_label_del_module(mod);
491                 jump_label_unlock();
492                 break;
493         case MODULE_STATE_GOING:
494                 jump_label_lock();
495                 jump_label_del_module(mod);
496                 jump_label_unlock();
497                 break;
498         case MODULE_STATE_LIVE:
499                 jump_label_lock();
500                 jump_label_invalidate_module_init(mod);
501                 jump_label_unlock();
502                 break;
503         }
504
505         return notifier_from_errno(ret);
506 }
507
508 static struct notifier_block jump_label_module_nb = {
509         .notifier_call = jump_label_module_notify,
510         .priority = 1, /* higher than tracepoints */
511 };
512
513 static __init int jump_label_init_module(void)
514 {
515         return register_module_notifier(&jump_label_module_nb);
516 }
517 early_initcall(jump_label_init_module);
518
519 #endif /* CONFIG_MODULES */
520
521 /***
522  * jump_label_text_reserved - check if addr range is reserved
523  * @start: start text addr
524  * @end: end text addr
525  *
526  * checks if the text addr located between @start and @end
527  * overlaps with any of the jump label patch addresses. Code
528  * that wants to modify kernel text should first verify that
529  * it does not overlap with any of the jump label addresses.
530  * Caller must hold jump_label_mutex.
531  *
532  * returns 1 if there is an overlap, 0 otherwise
533  */
534 int jump_label_text_reserved(void *start, void *end)
535 {
536         int ret = __jump_label_text_reserved(__start___jump_table,
537                         __stop___jump_table, start, end);
538
539         if (ret)
540                 return ret;
541
542 #ifdef CONFIG_MODULES
543         ret = __jump_label_mod_text_reserved(start, end);
544 #endif
545         return ret;
546 }
547
548 static void jump_label_update(struct static_key *key)
549 {
550         struct jump_entry *stop = __stop___jump_table;
551         struct jump_entry *entry = static_key_entries(key);
552 #ifdef CONFIG_MODULES
553         struct module *mod;
554
555         __jump_label_mod_update(key);
556
557         preempt_disable();
558         mod = __module_address((unsigned long)key);
559         if (mod)
560                 stop = mod->jump_entries + mod->num_jump_entries;
561         preempt_enable();
562 #endif
563         /* if there are no users, entry can be NULL */
564         if (entry)
565                 __jump_label_update(key, entry, stop);
566 }
567
568 #ifdef CONFIG_STATIC_KEYS_SELFTEST
569 static DEFINE_STATIC_KEY_TRUE(sk_true);
570 static DEFINE_STATIC_KEY_FALSE(sk_false);
571
572 static __init int jump_label_test(void)
573 {
574         int i;
575
576         for (i = 0; i < 2; i++) {
577                 WARN_ON(static_key_enabled(&sk_true.key) != true);
578                 WARN_ON(static_key_enabled(&sk_false.key) != false);
579
580                 WARN_ON(!static_branch_likely(&sk_true));
581                 WARN_ON(!static_branch_unlikely(&sk_true));
582                 WARN_ON(static_branch_likely(&sk_false));
583                 WARN_ON(static_branch_unlikely(&sk_false));
584
585                 static_branch_disable(&sk_true);
586                 static_branch_enable(&sk_false);
587
588                 WARN_ON(static_key_enabled(&sk_true.key) == true);
589                 WARN_ON(static_key_enabled(&sk_false.key) == false);
590
591                 WARN_ON(static_branch_likely(&sk_true));
592                 WARN_ON(static_branch_unlikely(&sk_true));
593                 WARN_ON(!static_branch_likely(&sk_false));
594                 WARN_ON(!static_branch_unlikely(&sk_false));
595
596                 static_branch_enable(&sk_true);
597                 static_branch_disable(&sk_false);
598         }
599
600         return 0;
601 }
602 late_initcall(jump_label_test);
603 #endif /* STATIC_KEYS_SELFTEST */
604
605 #endif /* HAVE_JUMP_LABEL */