1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/hash.h>
5 #include <linux/filter.h>
6 #include <linux/ftrace.h>
7 #include <linux/rbtree_latch.h>
9 /* dummy _ops. The verifier will operate on target program's ops. */
10 const struct bpf_verifier_ops bpf_extension_verifier_ops = {
12 const struct bpf_prog_ops bpf_extension_prog_ops = {
15 /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
16 #define TRAMPOLINE_HASH_BITS 10
17 #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
19 static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
20 static struct latch_tree_root image_tree __cacheline_aligned;
22 /* serializes access to trampoline_table and image_tree */
23 static DEFINE_MUTEX(trampoline_mutex);
25 static void *bpf_jit_alloc_exec_page(void)
29 image = bpf_jit_alloc_exec(PAGE_SIZE);
33 set_vm_flush_reset_perms(image);
34 /* Keep image as writeable. The alternative is to keep flipping ro/rw
35 * everytime new program is attached or detached.
37 set_memory_x((long)image, 1);
41 static __always_inline bool image_tree_less(struct latch_tree_node *a,
42 struct latch_tree_node *b)
44 struct bpf_image *ia = container_of(a, struct bpf_image, tnode);
45 struct bpf_image *ib = container_of(b, struct bpf_image, tnode);
50 static __always_inline int image_tree_comp(void *addr, struct latch_tree_node *n)
52 void *image = container_of(n, struct bpf_image, tnode);
56 if (addr >= image + PAGE_SIZE)
62 static const struct latch_tree_ops image_tree_ops = {
63 .less = image_tree_less,
64 .comp = image_tree_comp,
67 static void *__bpf_image_alloc(bool lock)
69 struct bpf_image *image;
71 image = bpf_jit_alloc_exec_page();
76 mutex_lock(&trampoline_mutex);
77 latch_tree_insert(&image->tnode, &image_tree, &image_tree_ops);
79 mutex_unlock(&trampoline_mutex);
83 void *bpf_image_alloc(void)
85 return __bpf_image_alloc(true);
88 bool is_bpf_image_address(unsigned long addr)
93 ret = latch_tree_find((void *) addr, &image_tree, &image_tree_ops) != NULL;
99 struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
101 struct bpf_trampoline *tr;
102 struct hlist_head *head;
106 mutex_lock(&trampoline_mutex);
107 head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
108 hlist_for_each_entry(tr, head, hlist) {
109 if (tr->key == key) {
110 refcount_inc(&tr->refcnt);
114 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
118 /* is_root was checked earlier. No need for bpf_jit_charge_modmem() */
119 image = __bpf_image_alloc(false);
127 INIT_HLIST_NODE(&tr->hlist);
128 hlist_add_head(&tr->hlist, head);
129 refcount_set(&tr->refcnt, 1);
130 mutex_init(&tr->mutex);
131 for (i = 0; i < BPF_TRAMP_MAX; i++)
132 INIT_HLIST_HEAD(&tr->progs_hlist[i]);
135 mutex_unlock(&trampoline_mutex);
139 static int is_ftrace_location(void *ip)
143 addr = ftrace_location((long)ip);
146 if (WARN_ON_ONCE(addr != (long)ip))
151 static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
153 void *ip = tr->func.addr;
156 if (tr->func.ftrace_managed)
157 ret = unregister_ftrace_direct((long)ip, (long)old_addr);
159 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
163 static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr)
165 void *ip = tr->func.addr;
168 if (tr->func.ftrace_managed)
169 ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr);
171 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
175 /* first time registering */
176 static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
178 void *ip = tr->func.addr;
181 ret = is_ftrace_location(ip);
184 tr->func.ftrace_managed = ret;
186 if (tr->func.ftrace_managed)
187 ret = register_ftrace_direct((long)ip, (long)new_addr);
189 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
193 static struct bpf_tramp_progs *
194 bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total)
196 const struct bpf_prog_aux *aux;
197 struct bpf_tramp_progs *tprogs;
198 struct bpf_prog **progs;
202 tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
204 return ERR_PTR(-ENOMEM);
206 for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
207 tprogs[kind].nr_progs = tr->progs_cnt[kind];
208 *total += tr->progs_cnt[kind];
209 progs = tprogs[kind].progs;
211 hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist)
212 *progs++ = aux->prog;
217 static int bpf_trampoline_update(struct bpf_trampoline *tr)
219 void *old_image = tr->image + ((tr->selector + 1) & 1) * BPF_IMAGE_SIZE/2;
220 void *new_image = tr->image + (tr->selector & 1) * BPF_IMAGE_SIZE/2;
221 struct bpf_tramp_progs *tprogs;
222 u32 flags = BPF_TRAMP_F_RESTORE_REGS;
225 tprogs = bpf_trampoline_get_progs(tr, &total);
227 return PTR_ERR(tprogs);
230 err = unregister_fentry(tr, old_image);
235 if (tprogs[BPF_TRAMP_FEXIT].nr_progs ||
236 tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs)
237 flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
239 /* Though the second half of trampoline page is unused a task could be
240 * preempted in the middle of the first half of trampoline and two
241 * updates to trampoline would change the code from underneath the
242 * preempted task. Hence wait for tasks to voluntarily schedule or go
246 synchronize_rcu_tasks();
248 err = arch_prepare_bpf_trampoline(new_image, new_image + BPF_IMAGE_SIZE / 2,
249 &tr->func.model, flags, tprogs,
255 /* progs already running at this address */
256 err = modify_fentry(tr, old_image, new_image);
258 /* first time registering */
259 err = register_fentry(tr, new_image);
268 static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(enum bpf_attach_type t)
271 case BPF_TRACE_FENTRY:
272 return BPF_TRAMP_FENTRY;
273 case BPF_MODIFY_RETURN:
274 return BPF_TRAMP_MODIFY_RETURN;
275 case BPF_TRACE_FEXIT:
276 return BPF_TRAMP_FEXIT;
278 return BPF_TRAMP_REPLACE;
282 int bpf_trampoline_link_prog(struct bpf_prog *prog)
284 enum bpf_tramp_prog_type kind;
285 struct bpf_trampoline *tr;
289 tr = prog->aux->trampoline;
290 kind = bpf_attach_type_to_tramp(prog->expected_attach_type);
291 mutex_lock(&tr->mutex);
292 if (tr->extension_prog) {
293 /* cannot attach fentry/fexit if extension prog is attached.
294 * cannot overwrite extension prog either.
299 cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT];
300 if (kind == BPF_TRAMP_REPLACE) {
301 /* Cannot attach extension if fentry/fexit are in use. */
306 tr->extension_prog = prog;
307 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
311 if (cnt >= BPF_MAX_TRAMP_PROGS) {
315 if (!hlist_unhashed(&prog->aux->tramp_hlist)) {
316 /* prog already linked */
320 hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]);
321 tr->progs_cnt[kind]++;
322 err = bpf_trampoline_update(prog->aux->trampoline);
324 hlist_del(&prog->aux->tramp_hlist);
325 tr->progs_cnt[kind]--;
328 mutex_unlock(&tr->mutex);
332 /* bpf_trampoline_unlink_prog() should never fail. */
333 int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
335 enum bpf_tramp_prog_type kind;
336 struct bpf_trampoline *tr;
339 tr = prog->aux->trampoline;
340 kind = bpf_attach_type_to_tramp(prog->expected_attach_type);
341 mutex_lock(&tr->mutex);
342 if (kind == BPF_TRAMP_REPLACE) {
343 WARN_ON_ONCE(!tr->extension_prog);
344 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
345 tr->extension_prog->bpf_func, NULL);
346 tr->extension_prog = NULL;
349 hlist_del(&prog->aux->tramp_hlist);
350 tr->progs_cnt[kind]--;
351 err = bpf_trampoline_update(prog->aux->trampoline);
353 mutex_unlock(&tr->mutex);
357 void bpf_trampoline_put(struct bpf_trampoline *tr)
359 struct bpf_image *image;
363 mutex_lock(&trampoline_mutex);
364 if (!refcount_dec_and_test(&tr->refcnt))
366 WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
367 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY])))
369 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
371 image = container_of(tr->image, struct bpf_image, data);
372 latch_tree_erase(&image->tnode, &image_tree, &image_tree_ops);
373 /* wait for tasks to get out of trampoline before freeing it */
374 synchronize_rcu_tasks();
375 bpf_jit_free_exec(image);
376 hlist_del(&tr->hlist);
379 mutex_unlock(&trampoline_mutex);
382 /* The logic is similar to BPF_PROG_RUN, but with an explicit
383 * rcu_read_lock() and migrate_disable() which are required
384 * for the trampoline. The macro is split into
385 * call _bpf_prog_enter
386 * call prog->bpf_func
387 * call __bpf_prog_exit
389 u64 notrace __bpf_prog_enter(void)
395 if (static_branch_unlikely(&bpf_stats_enabled_key))
396 start = sched_clock();
400 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
402 struct bpf_prog_stats *stats;
404 if (static_branch_unlikely(&bpf_stats_enabled_key) &&
405 /* static_key could be enabled in __bpf_prog_enter
406 * and disabled in __bpf_prog_exit.
408 * Hence check that 'start' is not zero.
411 stats = this_cpu_ptr(prog->aux->stats);
412 u64_stats_update_begin(&stats->syncp);
414 stats->nsecs += sched_clock() - start;
415 u64_stats_update_end(&stats->syncp);
422 arch_prepare_bpf_trampoline(void *image, void *image_end,
423 const struct btf_func_model *m, u32 flags,
424 struct bpf_tramp_progs *tprogs,
430 static int __init init_trampolines(void)
434 for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
435 INIT_HLIST_HEAD(&trampoline_table[i]);
438 late_initcall(init_trampolines);