perf: Fix enable_on_exec for sibling events
[linux-2.6-block.git] / kernel / jump_label.c
CommitLineData
bf5438fc
JB
1/*
2 * jump label support
3 *
4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
d430d3d7 5 * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
bf5438fc
JB
6 *
7 */
bf5438fc
JB
8#include <linux/memory.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/list.h>
bf5438fc
JB
12#include <linux/slab.h>
13#include <linux/sort.h>
14#include <linux/err.h>
d430d3d7 15#include <linux/jump_label.h>
bf5438fc
JB
16
17#ifdef HAVE_JUMP_LABEL
18
bf5438fc
JB
19/* mutex to protect coming/going of the the jump_label table */
20static DEFINE_MUTEX(jump_label_mutex);
21
91bad2f8
JB
22void jump_label_lock(void)
23{
24 mutex_lock(&jump_label_mutex);
25}
26
27void jump_label_unlock(void)
28{
29 mutex_unlock(&jump_label_mutex);
30}
31
d430d3d7
JB
32bool jump_label_enabled(struct jump_label_key *key)
33{
34 return !!atomic_read(&key->enabled);
35}
36
bf5438fc
JB
37static int jump_label_cmp(const void *a, const void *b)
38{
39 const struct jump_entry *jea = a;
40 const struct jump_entry *jeb = b;
41
42 if (jea->key < jeb->key)
43 return -1;
44
45 if (jea->key > jeb->key)
46 return 1;
47
48 return 0;
49}
50
51static void
d430d3d7 52jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
bf5438fc
JB
53{
54 unsigned long size;
55
56 size = (((unsigned long)stop - (unsigned long)start)
57 / sizeof(struct jump_entry));
58 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
59}
60
d430d3d7 61static void jump_label_update(struct jump_label_key *key, int enable);
bf5438fc 62
d430d3d7 63void jump_label_inc(struct jump_label_key *key)
bf5438fc 64{
d430d3d7
JB
65 if (atomic_inc_not_zero(&key->enabled))
66 return;
bf5438fc 67
d430d3d7 68 jump_label_lock();
c8452afb 69 if (atomic_read(&key->enabled) == 0)
d430d3d7 70 jump_label_update(key, JUMP_LABEL_ENABLE);
c8452afb 71 atomic_inc(&key->enabled);
d430d3d7 72 jump_label_unlock();
bf5438fc
JB
73}
74
d430d3d7 75void jump_label_dec(struct jump_label_key *key)
bf5438fc 76{
d430d3d7
JB
77 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex))
78 return;
bf5438fc 79
d430d3d7 80 jump_label_update(key, JUMP_LABEL_DISABLE);
91bad2f8 81 jump_label_unlock();
bf5438fc
JB
82}
83
4c3ef6d7
JB
84static int addr_conflict(struct jump_entry *entry, void *start, void *end)
85{
86 if (entry->code <= (unsigned long)end &&
87 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
88 return 1;
89
90 return 0;
91}
92
d430d3d7
JB
93static int __jump_label_text_reserved(struct jump_entry *iter_start,
94 struct jump_entry *iter_stop, void *start, void *end)
4c3ef6d7 95{
4c3ef6d7 96 struct jump_entry *iter;
4c3ef6d7 97
4c3ef6d7
JB
98 iter = iter_start;
99 while (iter < iter_stop) {
d430d3d7
JB
100 if (addr_conflict(iter, start, end))
101 return 1;
4c3ef6d7
JB
102 iter++;
103 }
104
d430d3d7
JB
105 return 0;
106}
107
20284aa7
JF
108/*
109 * Update code which is definitely not currently executing.
110 * Architectures which need heavyweight synchronization to modify
111 * running code can override this to make the non-live update case
112 * cheaper.
113 */
114void __weak arch_jump_label_transform_static(struct jump_entry *entry,
115 enum jump_label_type type)
116{
117 arch_jump_label_transform(entry, type);
118}
119
d430d3d7 120static void __jump_label_update(struct jump_label_key *key,
7cbc5b8d
JO
121 struct jump_entry *entry,
122 struct jump_entry *stop, int enable)
d430d3d7 123{
7cbc5b8d
JO
124 for (; (entry < stop) &&
125 (entry->key == (jump_label_t)(unsigned long)key);
126 entry++) {
d430d3d7
JB
127 /*
128 * entry->code set to 0 invalidates module init text sections
129 * kernel_text_address() verifies we are not in core kernel
130 * init code, see jump_label_invalidate_module_init().
131 */
132 if (entry->code && kernel_text_address(entry->code))
133 arch_jump_label_transform(entry, enable);
134 }
4c3ef6d7
JB
135}
136
97ce2c88 137void __init jump_label_init(void)
bf5438fc 138{
bf5438fc
JB
139 struct jump_entry *iter_start = __start___jump_table;
140 struct jump_entry *iter_stop = __stop___jump_table;
d430d3d7 141 struct jump_label_key *key = NULL;
bf5438fc
JB
142 struct jump_entry *iter;
143
91bad2f8 144 jump_label_lock();
d430d3d7
JB
145 jump_label_sort_entries(iter_start, iter_stop);
146
147 for (iter = iter_start; iter < iter_stop; iter++) {
37348804
JF
148 struct jump_label_key *iterk;
149
150 iterk = (struct jump_label_key *)(unsigned long)iter->key;
20284aa7
JF
151 arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
152 JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
37348804 153 if (iterk == key)
d430d3d7
JB
154 continue;
155
37348804 156 key = iterk;
d430d3d7
JB
157 key->entries = iter;
158#ifdef CONFIG_MODULES
159 key->next = NULL;
160#endif
bf5438fc 161 }
91bad2f8 162 jump_label_unlock();
bf5438fc 163}
bf5438fc
JB
164
165#ifdef CONFIG_MODULES
166
d430d3d7
JB
167struct jump_label_mod {
168 struct jump_label_mod *next;
169 struct jump_entry *entries;
170 struct module *mod;
171};
172
173static int __jump_label_mod_text_reserved(void *start, void *end)
174{
175 struct module *mod;
176
177 mod = __module_text_address((unsigned long)start);
178 if (!mod)
179 return 0;
180
181 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
182
183 return __jump_label_text_reserved(mod->jump_entries,
184 mod->jump_entries + mod->num_jump_entries,
185 start, end);
186}
187
188static void __jump_label_mod_update(struct jump_label_key *key, int enable)
189{
190 struct jump_label_mod *mod = key->next;
191
192 while (mod) {
7cbc5b8d
JO
193 struct module *m = mod->mod;
194
195 __jump_label_update(key, mod->entries,
196 m->jump_entries + m->num_jump_entries,
197 enable);
d430d3d7
JB
198 mod = mod->next;
199 }
200}
201
202/***
203 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
204 * @mod: module to patch
205 *
206 * Allow for run-time selection of the optimal nops. Before the module
207 * loads patch these with arch_get_jump_label_nop(), which is specified by
208 * the arch specific jump label code.
209 */
210void jump_label_apply_nops(struct module *mod)
bf5438fc 211{
d430d3d7
JB
212 struct jump_entry *iter_start = mod->jump_entries;
213 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
214 struct jump_entry *iter;
215
216 /* if the module doesn't have jump label entries, just return */
217 if (iter_start == iter_stop)
218 return;
219
220 for (iter = iter_start; iter < iter_stop; iter++)
20284aa7 221 arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
bf5438fc
JB
222}
223
d430d3d7 224static int jump_label_add_module(struct module *mod)
bf5438fc 225{
d430d3d7
JB
226 struct jump_entry *iter_start = mod->jump_entries;
227 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
228 struct jump_entry *iter;
229 struct jump_label_key *key = NULL;
230 struct jump_label_mod *jlm;
bf5438fc
JB
231
232 /* if the module doesn't have jump label entries, just return */
d430d3d7 233 if (iter_start == iter_stop)
bf5438fc
JB
234 return 0;
235
d430d3d7
JB
236 jump_label_sort_entries(iter_start, iter_stop);
237
238 for (iter = iter_start; iter < iter_stop; iter++) {
239 if (iter->key == (jump_label_t)(unsigned long)key)
240 continue;
241
242 key = (struct jump_label_key *)(unsigned long)iter->key;
243
244 if (__module_address(iter->key) == mod) {
245 atomic_set(&key->enabled, 0);
246 key->entries = iter;
247 key->next = NULL;
248 continue;
bf5438fc 249 }
d430d3d7
JB
250
251 jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL);
252 if (!jlm)
253 return -ENOMEM;
254
255 jlm->mod = mod;
256 jlm->entries = iter;
257 jlm->next = key->next;
258 key->next = jlm;
259
260 if (jump_label_enabled(key))
7cbc5b8d
JO
261 __jump_label_update(key, iter, iter_stop,
262 JUMP_LABEL_ENABLE);
bf5438fc 263 }
d430d3d7 264
bf5438fc
JB
265 return 0;
266}
267
d430d3d7 268static void jump_label_del_module(struct module *mod)
bf5438fc 269{
d430d3d7
JB
270 struct jump_entry *iter_start = mod->jump_entries;
271 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
272 struct jump_entry *iter;
273 struct jump_label_key *key = NULL;
274 struct jump_label_mod *jlm, **prev;
bf5438fc 275
d430d3d7
JB
276 for (iter = iter_start; iter < iter_stop; iter++) {
277 if (iter->key == (jump_label_t)(unsigned long)key)
278 continue;
279
280 key = (struct jump_label_key *)(unsigned long)iter->key;
281
282 if (__module_address(iter->key) == mod)
283 continue;
284
285 prev = &key->next;
286 jlm = key->next;
bf5438fc 287
d430d3d7
JB
288 while (jlm && jlm->mod != mod) {
289 prev = &jlm->next;
290 jlm = jlm->next;
291 }
292
293 if (jlm) {
294 *prev = jlm->next;
295 kfree(jlm);
bf5438fc
JB
296 }
297 }
298}
299
d430d3d7 300static void jump_label_invalidate_module_init(struct module *mod)
b842f8fa 301{
d430d3d7
JB
302 struct jump_entry *iter_start = mod->jump_entries;
303 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
b842f8fa 304 struct jump_entry *iter;
b842f8fa 305
d430d3d7
JB
306 for (iter = iter_start; iter < iter_stop; iter++) {
307 if (within_module_init(iter->code, mod))
308 iter->code = 0;
b842f8fa
JB
309 }
310}
311
bf5438fc
JB
312static int
313jump_label_module_notify(struct notifier_block *self, unsigned long val,
314 void *data)
315{
316 struct module *mod = data;
317 int ret = 0;
318
319 switch (val) {
320 case MODULE_STATE_COMING:
91bad2f8 321 jump_label_lock();
d430d3d7 322 ret = jump_label_add_module(mod);
bf5438fc 323 if (ret)
d430d3d7 324 jump_label_del_module(mod);
91bad2f8 325 jump_label_unlock();
bf5438fc
JB
326 break;
327 case MODULE_STATE_GOING:
91bad2f8 328 jump_label_lock();
d430d3d7 329 jump_label_del_module(mod);
91bad2f8 330 jump_label_unlock();
bf5438fc 331 break;
b842f8fa 332 case MODULE_STATE_LIVE:
91bad2f8 333 jump_label_lock();
d430d3d7 334 jump_label_invalidate_module_init(mod);
91bad2f8 335 jump_label_unlock();
b842f8fa 336 break;
bf5438fc 337 }
bf5438fc 338
d430d3d7 339 return notifier_from_errno(ret);
bf5438fc
JB
340}
341
342struct notifier_block jump_label_module_nb = {
343 .notifier_call = jump_label_module_notify,
d430d3d7 344 .priority = 1, /* higher than tracepoints */
bf5438fc
JB
345};
346
d430d3d7 347static __init int jump_label_init_module(void)
bf5438fc
JB
348{
349 return register_module_notifier(&jump_label_module_nb);
350}
d430d3d7 351early_initcall(jump_label_init_module);
bf5438fc
JB
352
353#endif /* CONFIG_MODULES */
354
d430d3d7
JB
355/***
356 * jump_label_text_reserved - check if addr range is reserved
357 * @start: start text addr
358 * @end: end text addr
359 *
360 * checks if the text addr located between @start and @end
361 * overlaps with any of the jump label patch addresses. Code
362 * that wants to modify kernel text should first verify that
363 * it does not overlap with any of the jump label addresses.
364 * Caller must hold jump_label_mutex.
365 *
366 * returns 1 if there is an overlap, 0 otherwise
367 */
368int jump_label_text_reserved(void *start, void *end)
369{
370 int ret = __jump_label_text_reserved(__start___jump_table,
371 __stop___jump_table, start, end);
372
373 if (ret)
374 return ret;
375
376#ifdef CONFIG_MODULES
377 ret = __jump_label_mod_text_reserved(start, end);
378#endif
379 return ret;
380}
381
382static void jump_label_update(struct jump_label_key *key, int enable)
383{
140fe3b1 384 struct jump_entry *entry = key->entries, *stop = __stop___jump_table;
d430d3d7
JB
385
386#ifdef CONFIG_MODULES
140fe3b1
XG
387 struct module *mod = __module_address((jump_label_t)key);
388
d430d3d7 389 __jump_label_mod_update(key, enable);
140fe3b1
XG
390
391 if (mod)
392 stop = mod->jump_entries + mod->num_jump_entries;
d430d3d7 393#endif
140fe3b1
XG
394 /* if there are no users, entry can be NULL */
395 if (entry)
396 __jump_label_update(key, entry, stop, enable);
d430d3d7
JB
397}
398
bf5438fc 399#endif