x86/static_call: Add inline static call implementation for x86-64
[linux-2.6-block.git] / kernel / static_call.c
CommitLineData
9183c3f9
JP
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/init.h>
3#include <linux/static_call.h>
4#include <linux/bug.h>
5#include <linux/smp.h>
6#include <linux/sort.h>
7#include <linux/slab.h>
8#include <linux/module.h>
9#include <linux/cpu.h>
10#include <linux/processor.h>
11#include <asm/sections.h>
12
13extern struct static_call_site __start_static_call_sites[],
14 __stop_static_call_sites[];
15
16static bool static_call_initialized;
17
18#define STATIC_CALL_INIT 1UL
19
20/* mutex to protect key modules/sites */
21static DEFINE_MUTEX(static_call_mutex);
22
23static void static_call_lock(void)
24{
25 mutex_lock(&static_call_mutex);
26}
27
28static void static_call_unlock(void)
29{
30 mutex_unlock(&static_call_mutex);
31}
32
33static inline void *static_call_addr(struct static_call_site *site)
34{
35 return (void *)((long)site->addr + (long)&site->addr);
36}
37
38
39static inline struct static_call_key *static_call_key(const struct static_call_site *site)
40{
41 return (struct static_call_key *)
42 (((long)site->key + (long)&site->key) & ~STATIC_CALL_INIT);
43}
44
45/* These assume the key is word-aligned. */
46static inline bool static_call_is_init(struct static_call_site *site)
47{
48 return ((long)site->key + (long)&site->key) & STATIC_CALL_INIT;
49}
50
51static inline void static_call_set_init(struct static_call_site *site)
52{
53 site->key = ((long)static_call_key(site) | STATIC_CALL_INIT) -
54 (long)&site->key;
55}
56
57static int static_call_site_cmp(const void *_a, const void *_b)
58{
59 const struct static_call_site *a = _a;
60 const struct static_call_site *b = _b;
61 const struct static_call_key *key_a = static_call_key(a);
62 const struct static_call_key *key_b = static_call_key(b);
63
64 if (key_a < key_b)
65 return -1;
66
67 if (key_a > key_b)
68 return 1;
69
70 return 0;
71}
72
73static void static_call_site_swap(void *_a, void *_b, int size)
74{
75 long delta = (unsigned long)_a - (unsigned long)_b;
76 struct static_call_site *a = _a;
77 struct static_call_site *b = _b;
78 struct static_call_site tmp = *a;
79
80 a->addr = b->addr - delta;
81 a->key = b->key - delta;
82
83 b->addr = tmp.addr + delta;
84 b->key = tmp.key + delta;
85}
86
87static inline void static_call_sort_entries(struct static_call_site *start,
88 struct static_call_site *stop)
89{
90 sort(start, stop - start, sizeof(struct static_call_site),
91 static_call_site_cmp, static_call_site_swap);
92}
93
94void __static_call_update(struct static_call_key *key, void *tramp, void *func)
95{
96 struct static_call_site *site, *stop;
97 struct static_call_mod *site_mod;
98
99 cpus_read_lock();
100 static_call_lock();
101
102 if (key->func == func)
103 goto done;
104
105 key->func = func;
106
107 arch_static_call_transform(NULL, tramp, func);
108
109 /*
110 * If uninitialized, we'll not update the callsites, but they still
111 * point to the trampoline and we just patched that.
112 */
113 if (WARN_ON_ONCE(!static_call_initialized))
114 goto done;
115
116 for (site_mod = key->mods; site_mod; site_mod = site_mod->next) {
117 struct module *mod = site_mod->mod;
118
119 if (!site_mod->sites) {
120 /*
121 * This can happen if the static call key is defined in
122 * a module which doesn't use it.
123 */
124 continue;
125 }
126
127 stop = __stop_static_call_sites;
128
129#ifdef CONFIG_MODULES
130 if (mod) {
131 stop = mod->static_call_sites +
132 mod->num_static_call_sites;
133 }
134#endif
135
136 for (site = site_mod->sites;
137 site < stop && static_call_key(site) == key; site++) {
138 void *site_addr = static_call_addr(site);
139
140 if (static_call_is_init(site)) {
141 /*
142 * Don't write to call sites which were in
143 * initmem and have since been freed.
144 */
145 if (!mod && system_state >= SYSTEM_RUNNING)
146 continue;
147 if (mod && !within_module_init((unsigned long)site_addr, mod))
148 continue;
149 }
150
151 if (!kernel_text_address((unsigned long)site_addr)) {
152 WARN_ONCE(1, "can't patch static call site at %pS",
153 site_addr);
154 continue;
155 }
156
157 arch_static_call_transform(site_addr, NULL, func);
158 }
159 }
160
161done:
162 static_call_unlock();
163 cpus_read_unlock();
164}
165EXPORT_SYMBOL_GPL(__static_call_update);
166
167static int __static_call_init(struct module *mod,
168 struct static_call_site *start,
169 struct static_call_site *stop)
170{
171 struct static_call_site *site;
172 struct static_call_key *key, *prev_key = NULL;
173 struct static_call_mod *site_mod;
174
175 if (start == stop)
176 return 0;
177
178 static_call_sort_entries(start, stop);
179
180 for (site = start; site < stop; site++) {
181 void *site_addr = static_call_addr(site);
182
183 if ((mod && within_module_init((unsigned long)site_addr, mod)) ||
184 (!mod && init_section_contains(site_addr, 1)))
185 static_call_set_init(site);
186
187 key = static_call_key(site);
188 if (key != prev_key) {
189 prev_key = key;
190
191 site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
192 if (!site_mod)
193 return -ENOMEM;
194
195 site_mod->mod = mod;
196 site_mod->sites = site;
197 site_mod->next = key->mods;
198 key->mods = site_mod;
199 }
200
201 arch_static_call_transform(site_addr, NULL, key->func);
202 }
203
204 return 0;
205}
206
6333e8f7
PZ
207static int addr_conflict(struct static_call_site *site, void *start, void *end)
208{
209 unsigned long addr = (unsigned long)static_call_addr(site);
210
211 if (addr <= (unsigned long)end &&
212 addr + CALL_INSN_SIZE > (unsigned long)start)
213 return 1;
214
215 return 0;
216}
217
218static int __static_call_text_reserved(struct static_call_site *iter_start,
219 struct static_call_site *iter_stop,
220 void *start, void *end)
221{
222 struct static_call_site *iter = iter_start;
223
224 while (iter < iter_stop) {
225 if (addr_conflict(iter, start, end))
226 return 1;
227 iter++;
228 }
229
230 return 0;
231}
232
9183c3f9
JP
233#ifdef CONFIG_MODULES
234
6333e8f7
PZ
235static int __static_call_mod_text_reserved(void *start, void *end)
236{
237 struct module *mod;
238 int ret;
239
240 preempt_disable();
241 mod = __module_text_address((unsigned long)start);
242 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
243 if (!try_module_get(mod))
244 mod = NULL;
245 preempt_enable();
246
247 if (!mod)
248 return 0;
249
250 ret = __static_call_text_reserved(mod->static_call_sites,
251 mod->static_call_sites + mod->num_static_call_sites,
252 start, end);
253
254 module_put(mod);
255
256 return ret;
257}
258
9183c3f9
JP
259static int static_call_add_module(struct module *mod)
260{
261 return __static_call_init(mod, mod->static_call_sites,
262 mod->static_call_sites + mod->num_static_call_sites);
263}
264
265static void static_call_del_module(struct module *mod)
266{
267 struct static_call_site *start = mod->static_call_sites;
268 struct static_call_site *stop = mod->static_call_sites +
269 mod->num_static_call_sites;
270 struct static_call_key *key, *prev_key = NULL;
271 struct static_call_mod *site_mod, **prev;
272 struct static_call_site *site;
273
274 for (site = start; site < stop; site++) {
275 key = static_call_key(site);
276 if (key == prev_key)
277 continue;
278
279 prev_key = key;
280
281 for (prev = &key->mods, site_mod = key->mods;
282 site_mod && site_mod->mod != mod;
283 prev = &site_mod->next, site_mod = site_mod->next)
284 ;
285
286 if (!site_mod)
287 continue;
288
289 *prev = site_mod->next;
290 kfree(site_mod);
291 }
292}
293
294static int static_call_module_notify(struct notifier_block *nb,
295 unsigned long val, void *data)
296{
297 struct module *mod = data;
298 int ret = 0;
299
300 cpus_read_lock();
301 static_call_lock();
302
303 switch (val) {
304 case MODULE_STATE_COMING:
305 ret = static_call_add_module(mod);
306 if (ret) {
307 WARN(1, "Failed to allocate memory for static calls");
308 static_call_del_module(mod);
309 }
310 break;
311 case MODULE_STATE_GOING:
312 static_call_del_module(mod);
313 break;
314 }
315
316 static_call_unlock();
317 cpus_read_unlock();
318
319 return notifier_from_errno(ret);
320}
321
322static struct notifier_block static_call_module_nb = {
323 .notifier_call = static_call_module_notify,
324};
325
6333e8f7
PZ
326#else
327
328static inline int __static_call_mod_text_reserved(void *start, void *end)
329{
330 return 0;
331}
332
9183c3f9
JP
333#endif /* CONFIG_MODULES */
334
6333e8f7
PZ
335int static_call_text_reserved(void *start, void *end)
336{
337 int ret = __static_call_text_reserved(__start_static_call_sites,
338 __stop_static_call_sites, start, end);
339
340 if (ret)
341 return ret;
342
343 return __static_call_mod_text_reserved(start, end);
344}
345
9183c3f9
JP
346static void __init static_call_init(void)
347{
348 int ret;
349
350 if (static_call_initialized)
351 return;
352
353 cpus_read_lock();
354 static_call_lock();
355 ret = __static_call_init(NULL, __start_static_call_sites,
356 __stop_static_call_sites);
357 static_call_unlock();
358 cpus_read_unlock();
359
360 if (ret) {
361 pr_err("Failed to allocate memory for static_call!\n");
362 BUG();
363 }
364
365 static_call_initialized = true;
366
367#ifdef CONFIG_MODULES
368 register_module_notifier(&static_call_module_nb);
369#endif
370}
371early_initcall(static_call_init);