bpf: offload: report device information for offloaded programs
[linux-2.6-block.git] / kernel / bpf / syscall.c
index 2c4cfeaa8d5e785f16758be08cb8a462766363d9..ebf0fb23e2373af86aae2dca1a557105f0800f0d 100644 (file)
@@ -905,9 +905,13 @@ static int bpf_prog_alloc_id(struct bpf_prog *prog)
        return id > 0 ? 0 : id;
 }
 
-static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
+void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
 {
-       /* cBPF to eBPF migrations are currently not in the idr store. */
+       /* cBPF to eBPF migrations are currently not in the idr store.
+        * Offloaded programs are removed from the store when their device
+        * disappears - even if someone grabs an fd to them they are unusable,
+        * simply waiting for refcnt to drop to be freed.
+        */
        if (!prog->aux->id)
                return;
 
@@ -917,6 +921,7 @@ static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
                __acquire(&prog_idr_lock);
 
        idr_remove(&prog_idr, prog->aux->id);
+       prog->aux->id = 0;
 
        if (do_idr_lock)
                spin_unlock_bh(&prog_idr_lock);
@@ -937,10 +942,16 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
 {
        if (atomic_dec_and_test(&prog->aux->refcnt)) {
+               int i;
+
                trace_bpf_prog_put_rcu(prog);
                /* bpf_prog_free_id() must be called first */
                bpf_prog_free_id(prog, do_idr_lock);
+
+               for (i = 0; i < prog->aux->func_cnt; i++)
+                       bpf_prog_kallsyms_del(prog->aux->func[i]);
                bpf_prog_kallsyms_del(prog);
+
                call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
        }
 }
@@ -1151,6 +1162,8 @@ static int bpf_prog_load(union bpf_attr *attr)
        if (!prog)
                return -ENOMEM;
 
+       prog->aux->offload_requested = !!attr->prog_ifindex;
+
        err = security_bpf_prog_alloc(prog->aux);
        if (err)
                goto free_prog_nouncharge;
@@ -1172,7 +1185,7 @@ static int bpf_prog_load(union bpf_attr *attr)
        atomic_set(&prog->aux->refcnt, 1);
        prog->gpl_compatible = is_gpl ? 1 : 0;
 
-       if (attr->prog_ifindex) {
+       if (bpf_prog_is_dev_bound(prog->aux)) {
                err = bpf_prog_offload_init(prog, attr);
                if (err)
                        goto free_prog;
@@ -1194,7 +1207,8 @@ static int bpf_prog_load(union bpf_attr *attr)
                goto free_used_maps;
 
        /* eBPF program is ready to be JITed */
-       prog = bpf_prog_select_runtime(prog, &err);
+       if (!prog->bpf_func)
+               prog = bpf_prog_select_runtime(prog, &err);
        if (err < 0)
                goto free_used_maps;
 
@@ -1551,6 +1565,67 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
        return fd;
 }
 
+static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
+                                             unsigned long addr)
+{
+       int i;
+
+       for (i = 0; i < prog->aux->used_map_cnt; i++)
+               if (prog->aux->used_maps[i] == (void *)addr)
+                       return prog->aux->used_maps[i];
+       return NULL;
+}
+
+static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
+{
+       const struct bpf_map *map;
+       struct bpf_insn *insns;
+       u64 imm;
+       int i;
+
+       insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
+                       GFP_USER);
+       if (!insns)
+               return insns;
+
+       for (i = 0; i < prog->len; i++) {
+               if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
+                       insns[i].code = BPF_JMP | BPF_CALL;
+                       insns[i].imm = BPF_FUNC_tail_call;
+                       /* fall-through */
+               }
+               if (insns[i].code == (BPF_JMP | BPF_CALL) ||
+                   insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
+                       if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
+                               insns[i].code = BPF_JMP | BPF_CALL;
+                       if (!bpf_dump_raw_ok())
+                               insns[i].imm = 0;
+                       continue;
+               }
+
+               if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
+                       continue;
+
+               imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
+               map = bpf_map_from_imm(prog, imm);
+               if (map) {
+                       insns[i].src_reg = BPF_PSEUDO_MAP_FD;
+                       insns[i].imm = map->id;
+                       insns[i + 1].imm = 0;
+                       continue;
+               }
+
+               if (!bpf_dump_raw_ok() &&
+                   imm == (unsigned long)prog->aux) {
+                       insns[i].imm = 0;
+                       insns[i + 1].imm = 0;
+                       continue;
+               }
+       }
+
+       return insns;
+}
+
 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
                                   const union bpf_attr *attr,
                                   union bpf_attr __user *uattr)
@@ -1601,21 +1676,43 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        ulen = info.jited_prog_len;
        info.jited_prog_len = prog->jited_len;
        if (info.jited_prog_len && ulen) {
-               uinsns = u64_to_user_ptr(info.jited_prog_insns);
-               ulen = min_t(u32, info.jited_prog_len, ulen);
-               if (copy_to_user(uinsns, prog->bpf_func, ulen))
-                       return -EFAULT;
+               if (bpf_dump_raw_ok()) {
+                       uinsns = u64_to_user_ptr(info.jited_prog_insns);
+                       ulen = min_t(u32, info.jited_prog_len, ulen);
+                       if (copy_to_user(uinsns, prog->bpf_func, ulen))
+                               return -EFAULT;
+               } else {
+                       info.jited_prog_insns = 0;
+               }
        }
 
        ulen = info.xlated_prog_len;
        info.xlated_prog_len = bpf_prog_insn_size(prog);
        if (info.xlated_prog_len && ulen) {
+               struct bpf_insn *insns_sanitized;
+               bool fault;
+
+               if (prog->blinded && !bpf_dump_raw_ok()) {
+                       info.xlated_prog_insns = 0;
+                       goto done;
+               }
+               insns_sanitized = bpf_insn_prepare_dump(prog);
+               if (!insns_sanitized)
+                       return -ENOMEM;
                uinsns = u64_to_user_ptr(info.xlated_prog_insns);
                ulen = min_t(u32, info.xlated_prog_len, ulen);
-               if (copy_to_user(uinsns, prog->insnsi, ulen))
+               fault = copy_to_user(uinsns, insns_sanitized, ulen);
+               kfree(insns_sanitized);
+               if (fault)
                        return -EFAULT;
        }
 
+       if (bpf_prog_is_dev_bound(prog->aux)) {
+               err = bpf_prog_offload_info_fill(&info, prog);
+               if (err)
+                       return err;
+       }
+
 done:
        if (copy_to_user(uinfo, &info, info_len) ||
            put_user(info_len, &uattr->info.info_len))