Merge tag 'arm-newsoc-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-block.git] / net / ipv4 / bpf_tcp_ca.c
CommitLineData
0baf26b0
MKL
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 Facebook */
3
4#include <linux/types.h>
5#include <linux/bpf_verifier.h>
6#include <linux/bpf.h>
7#include <linux/btf.h>
8#include <linux/filter.h>
9#include <net/tcp.h>
ab14fd4e 10#include <net/bpf_sk_storage.h>
0baf26b0
MKL
11
12static u32 optional_ops[] = {
13 offsetof(struct tcp_congestion_ops, init),
14 offsetof(struct tcp_congestion_ops, release),
15 offsetof(struct tcp_congestion_ops, set_state),
16 offsetof(struct tcp_congestion_ops, cwnd_event),
17 offsetof(struct tcp_congestion_ops, in_ack_event),
18 offsetof(struct tcp_congestion_ops, pkts_acked),
19 offsetof(struct tcp_congestion_ops, min_tso_segs),
20 offsetof(struct tcp_congestion_ops, sndbuf_expand),
21 offsetof(struct tcp_congestion_ops, cong_control),
22};
23
24static u32 unsupported_ops[] = {
25 offsetof(struct tcp_congestion_ops, get_info),
26};
27
28static const struct btf_type *tcp_sock_type;
29static u32 tcp_sock_id, sock_id;
30
31static int bpf_tcp_ca_init(struct btf *btf)
32{
33 s32 type_id;
34
35 type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
36 if (type_id < 0)
37 return -EINVAL;
38 sock_id = type_id;
39
40 type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
41 if (type_id < 0)
42 return -EINVAL;
43 tcp_sock_id = type_id;
44 tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
45
46 return 0;
47}
48
49static bool is_optional(u32 member_offset)
50{
51 unsigned int i;
52
53 for (i = 0; i < ARRAY_SIZE(optional_ops); i++) {
54 if (member_offset == optional_ops[i])
55 return true;
56 }
57
58 return false;
59}
60
61static bool is_unsupported(u32 member_offset)
62{
63 unsigned int i;
64
65 for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
66 if (member_offset == unsupported_ops[i])
67 return true;
68 }
69
70 return false;
71}
72
73extern struct btf *btf_vmlinux;
74
75static bool bpf_tcp_ca_is_valid_access(int off, int size,
76 enum bpf_access_type type,
77 const struct bpf_prog *prog,
78 struct bpf_insn_access_aux *info)
79{
80 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
81 return false;
82 if (type != BPF_READ)
83 return false;
84 if (off % size != 0)
85 return false;
86
87 if (!btf_ctx_access(off, size, type, prog, info))
88 return false;
89
90 if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id)
91 /* promote it to tcp_sock */
92 info->btf_id = tcp_sock_id;
93
94 return true;
95}
96
97static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
22dc4a0f 98 const struct btf *btf,
0baf26b0
MKL
99 const struct btf_type *t, int off,
100 int size, enum bpf_access_type atype,
101 u32 *next_btf_id)
102{
103 size_t end;
104
105 if (atype == BPF_READ)
22dc4a0f 106 return btf_struct_access(log, btf, t, off, size, atype, next_btf_id);
0baf26b0
MKL
107
108 if (t != tcp_sock_type) {
109 bpf_log(log, "only read is supported\n");
110 return -EACCES;
111 }
112
113 switch (off) {
114 case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
115 end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
116 break;
117 case offsetof(struct inet_connection_sock, icsk_ack.pending):
118 end = offsetofend(struct inet_connection_sock,
119 icsk_ack.pending);
120 break;
121 case offsetof(struct tcp_sock, snd_cwnd):
122 end = offsetofend(struct tcp_sock, snd_cwnd);
123 break;
124 case offsetof(struct tcp_sock, snd_cwnd_cnt):
125 end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
126 break;
127 case offsetof(struct tcp_sock, snd_ssthresh):
128 end = offsetofend(struct tcp_sock, snd_ssthresh);
129 break;
130 case offsetof(struct tcp_sock, ecn_flags):
131 end = offsetofend(struct tcp_sock, ecn_flags);
132 break;
133 default:
134 bpf_log(log, "no write support to tcp_sock at off %d\n", off);
135 return -EACCES;
136 }
137
138 if (off + size > end) {
139 bpf_log(log,
140 "write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
141 off, size, end);
142 return -EACCES;
143 }
144
145 return NOT_INIT;
146}
147
206057fe
MKL
148BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
149{
150 /* bpf_tcp_ca prog cannot have NULL tp */
151 __tcp_send_ack((struct sock *)tp, rcv_nxt);
152 return 0;
153}
154
155static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
156 .func = bpf_tcp_send_ack,
157 .gpl_only = false,
158 /* In case we want to report error later */
159 .ret_type = RET_INTEGER,
160 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 161 .arg1_btf_id = &tcp_sock_id,
206057fe 162 .arg2_type = ARG_ANYTHING,
206057fe
MKL
163};
164
0baf26b0
MKL
165static const struct bpf_func_proto *
166bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
167 const struct bpf_prog *prog)
168{
206057fe
MKL
169 switch (func_id) {
170 case BPF_FUNC_tcp_send_ack:
171 return &bpf_tcp_send_ack_proto;
ab14fd4e 172 case BPF_FUNC_sk_storage_get:
592a3498 173 return &bpf_sk_storage_get_proto;
ab14fd4e 174 case BPF_FUNC_sk_storage_delete:
592a3498 175 return &bpf_sk_storage_delete_proto;
206057fe
MKL
176 default:
177 return bpf_base_func_proto(func_id);
178 }
0baf26b0
MKL
179}
180
181static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
182 .get_func_proto = bpf_tcp_ca_get_func_proto,
183 .is_valid_access = bpf_tcp_ca_is_valid_access,
184 .btf_struct_access = bpf_tcp_ca_btf_struct_access,
185};
186
187static int bpf_tcp_ca_init_member(const struct btf_type *t,
188 const struct btf_member *member,
189 void *kdata, const void *udata)
190{
191 const struct tcp_congestion_ops *utcp_ca;
192 struct tcp_congestion_ops *tcp_ca;
0baf26b0
MKL
193 int prog_fd;
194 u32 moff;
195
196 utcp_ca = (const struct tcp_congestion_ops *)udata;
197 tcp_ca = (struct tcp_congestion_ops *)kdata;
198
199 moff = btf_member_bit_offset(t, member) / 8;
200 switch (moff) {
201 case offsetof(struct tcp_congestion_ops, flags):
202 if (utcp_ca->flags & ~TCP_CONG_MASK)
203 return -EINVAL;
204 tcp_ca->flags = utcp_ca->flags;
205 return 1;
206 case offsetof(struct tcp_congestion_ops, name):
8e7ae251
MKL
207 if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
208 sizeof(tcp_ca->name)) <= 0)
0baf26b0
MKL
209 return -EINVAL;
210 if (tcp_ca_find(utcp_ca->name))
211 return -EEXIST;
0baf26b0
MKL
212 return 1;
213 }
214
215 if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL))
216 return 0;
217
218 /* Ensure bpf_prog is provided for compulsory func ptr */
219 prog_fd = (int)(*(unsigned long *)(udata + moff));
220 if (!prog_fd && !is_optional(moff) && !is_unsupported(moff))
221 return -EINVAL;
222
223 return 0;
224}
225
226static int bpf_tcp_ca_check_member(const struct btf_type *t,
227 const struct btf_member *member)
228{
229 if (is_unsupported(btf_member_bit_offset(t, member) / 8))
230 return -ENOTSUPP;
231 return 0;
232}
233
234static int bpf_tcp_ca_reg(void *kdata)
235{
236 return tcp_register_congestion_control(kdata);
237}
238
239static void bpf_tcp_ca_unreg(void *kdata)
240{
241 tcp_unregister_congestion_control(kdata);
242}
243
244/* Avoid sparse warning. It is only used in bpf_struct_ops.c. */
245extern struct bpf_struct_ops bpf_tcp_congestion_ops;
246
247struct bpf_struct_ops bpf_tcp_congestion_ops = {
248 .verifier_ops = &bpf_tcp_ca_verifier_ops,
249 .reg = bpf_tcp_ca_reg,
250 .unreg = bpf_tcp_ca_unreg,
251 .check_member = bpf_tcp_ca_check_member,
252 .init_member = bpf_tcp_ca_init_member,
253 .init = bpf_tcp_ca_init,
254 .name = "tcp_congestion_ops",
255};