Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
ef456144 CG |
2 | /* |
3 | * To speed up listener socket lookup, create an array to store all sockets | |
4 | * listening on the same port. This allows a decision to be made after finding | |
538950a1 CG |
5 | * the first socket. An optional BPF program can also be configured for |
6 | * selecting the socket index from the array of available sockets. | |
ef456144 CG |
7 | */ |
8 | ||
55d444b3 | 9 | #include <net/ip.h> |
ef456144 | 10 | #include <net/sock_reuseport.h> |
538950a1 | 11 | #include <linux/bpf.h> |
736b4602 | 12 | #include <linux/idr.h> |
8217ca65 | 13 | #include <linux/filter.h> |
ef456144 CG |
14 | #include <linux/rcupdate.h> |
15 | ||
16 | #define INIT_SOCKS 128 | |
17 | ||
736b4602 MKL |
18 | DEFINE_SPINLOCK(reuseport_lock); |
19 | ||
736b4602 | 20 | static DEFINE_IDA(reuseport_ida); |
333bb73f KI |
21 | static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse, |
22 | struct sock_reuseport *reuse, bool bind_inany); | |
736b4602 | 23 | |
69421bf9 KI |
24 | void reuseport_has_conns_set(struct sock *sk) |
25 | { | |
26 | struct sock_reuseport *reuse; | |
27 | ||
28 | if (!rcu_access_pointer(sk->sk_reuseport_cb)) | |
29 | return; | |
30 | ||
31 | spin_lock_bh(&reuseport_lock); | |
32 | reuse = rcu_dereference_protected(sk->sk_reuseport_cb, | |
33 | lockdep_is_held(&reuseport_lock)); | |
34 | if (likely(reuse)) | |
35 | reuse->has_conns = 1; | |
36 | spin_unlock_bh(&reuseport_lock); | |
37 | } | |
38 | EXPORT_SYMBOL(reuseport_has_conns_set); | |
39 | ||
b261eda8 KI |
40 | static void __reuseport_get_incoming_cpu(struct sock_reuseport *reuse) |
41 | { | |
42 | /* Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */ | |
43 | WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu + 1); | |
44 | } | |
45 | ||
46 | static void __reuseport_put_incoming_cpu(struct sock_reuseport *reuse) | |
47 | { | |
48 | /* Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */ | |
49 | WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu - 1); | |
50 | } | |
51 | ||
52 | static void reuseport_get_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse) | |
53 | { | |
54 | if (sk->sk_incoming_cpu >= 0) | |
55 | __reuseport_get_incoming_cpu(reuse); | |
56 | } | |
57 | ||
58 | static void reuseport_put_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse) | |
59 | { | |
60 | if (sk->sk_incoming_cpu >= 0) | |
61 | __reuseport_put_incoming_cpu(reuse); | |
62 | } | |
63 | ||
64 | void reuseport_update_incoming_cpu(struct sock *sk, int val) | |
65 | { | |
66 | struct sock_reuseport *reuse; | |
67 | int old_sk_incoming_cpu; | |
68 | ||
69 | if (unlikely(!rcu_access_pointer(sk->sk_reuseport_cb))) { | |
70 | /* Paired with REAE_ONCE() in sk_incoming_cpu_update() | |
71 | * and compute_score(). | |
72 | */ | |
73 | WRITE_ONCE(sk->sk_incoming_cpu, val); | |
74 | return; | |
75 | } | |
76 | ||
77 | spin_lock_bh(&reuseport_lock); | |
78 | ||
79 | /* This must be done under reuseport_lock to avoid a race with | |
80 | * reuseport_grow(), which accesses sk->sk_incoming_cpu without | |
81 | * lock_sock() when detaching a shutdown()ed sk. | |
82 | * | |
83 | * Paired with READ_ONCE() in reuseport_select_sock_by_hash(). | |
84 | */ | |
85 | old_sk_incoming_cpu = sk->sk_incoming_cpu; | |
86 | WRITE_ONCE(sk->sk_incoming_cpu, val); | |
87 | ||
88 | reuse = rcu_dereference_protected(sk->sk_reuseport_cb, | |
89 | lockdep_is_held(&reuseport_lock)); | |
90 | ||
91 | /* reuseport_grow() has detached a closed sk. */ | |
92 | if (!reuse) | |
93 | goto out; | |
94 | ||
95 | if (old_sk_incoming_cpu < 0 && val >= 0) | |
96 | __reuseport_get_incoming_cpu(reuse); | |
97 | else if (old_sk_incoming_cpu >= 0 && val < 0) | |
98 | __reuseport_put_incoming_cpu(reuse); | |
99 | ||
100 | out: | |
101 | spin_unlock_bh(&reuseport_lock); | |
102 | } | |
103 | ||
5c040eaf KI |
104 | static int reuseport_sock_index(struct sock *sk, |
105 | const struct sock_reuseport *reuse, | |
106 | bool closed) | |
107 | { | |
108 | int left, right; | |
109 | ||
110 | if (!closed) { | |
111 | left = 0; | |
112 | right = reuse->num_socks; | |
113 | } else { | |
114 | left = reuse->max_socks - reuse->num_closed_socks; | |
115 | right = reuse->max_socks; | |
116 | } | |
117 | ||
118 | for (; left < right; left++) | |
119 | if (reuse->socks[left] == sk) | |
120 | return left; | |
121 | return -1; | |
122 | } | |
123 | ||
124 | static void __reuseport_add_sock(struct sock *sk, | |
125 | struct sock_reuseport *reuse) | |
126 | { | |
127 | reuse->socks[reuse->num_socks] = sk; | |
1cd62c21 | 128 | /* paired with smp_rmb() in reuseport_(select|migrate)_sock() */ |
5c040eaf KI |
129 | smp_wmb(); |
130 | reuse->num_socks++; | |
b261eda8 | 131 | reuseport_get_incoming_cpu(sk, reuse); |
5c040eaf KI |
132 | } |
133 | ||
134 | static bool __reuseport_detach_sock(struct sock *sk, | |
135 | struct sock_reuseport *reuse) | |
136 | { | |
137 | int i = reuseport_sock_index(sk, reuse, false); | |
138 | ||
139 | if (i == -1) | |
140 | return false; | |
141 | ||
142 | reuse->socks[i] = reuse->socks[reuse->num_socks - 1]; | |
143 | reuse->num_socks--; | |
b261eda8 | 144 | reuseport_put_incoming_cpu(sk, reuse); |
5c040eaf KI |
145 | |
146 | return true; | |
147 | } | |
148 | ||
333bb73f KI |
149 | static void __reuseport_add_closed_sock(struct sock *sk, |
150 | struct sock_reuseport *reuse) | |
151 | { | |
152 | reuse->socks[reuse->max_socks - reuse->num_closed_socks - 1] = sk; | |
153 | /* paired with READ_ONCE() in inet_csk_bind_conflict() */ | |
154 | WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks + 1); | |
b261eda8 | 155 | reuseport_get_incoming_cpu(sk, reuse); |
333bb73f KI |
156 | } |
157 | ||
158 | static bool __reuseport_detach_closed_sock(struct sock *sk, | |
159 | struct sock_reuseport *reuse) | |
160 | { | |
161 | int i = reuseport_sock_index(sk, reuse, true); | |
162 | ||
163 | if (i == -1) | |
164 | return false; | |
165 | ||
166 | reuse->socks[i] = reuse->socks[reuse->max_socks - reuse->num_closed_socks]; | |
167 | /* paired with READ_ONCE() in inet_csk_bind_conflict() */ | |
168 | WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks - 1); | |
b261eda8 | 169 | reuseport_put_incoming_cpu(sk, reuse); |
333bb73f KI |
170 | |
171 | return true; | |
172 | } | |
173 | ||
822f9bb1 | 174 | static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks) |
ef456144 | 175 | { |
822f9bb1 | 176 | unsigned int size = sizeof(struct sock_reuseport) + |
ef456144 CG |
177 | sizeof(struct sock *) * max_socks; |
178 | struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC); | |
179 | ||
180 | if (!reuse) | |
181 | return NULL; | |
182 | ||
183 | reuse->max_socks = max_socks; | |
184 | ||
538950a1 | 185 | RCU_INIT_POINTER(reuse->prog, NULL); |
ef456144 CG |
186 | return reuse; |
187 | } | |
188 | ||
2dbb9b9e | 189 | int reuseport_alloc(struct sock *sk, bool bind_inany) |
ef456144 CG |
190 | { |
191 | struct sock_reuseport *reuse; | |
035ff358 | 192 | int id, ret = 0; |
ef456144 CG |
193 | |
194 | /* bh lock used since this function call may precede hlist lock in | |
195 | * soft irq of receive path or setsockopt from process context | |
196 | */ | |
197 | spin_lock_bh(&reuseport_lock); | |
1b5f962e CG |
198 | |
199 | /* Allocation attempts can occur concurrently via the setsockopt path | |
200 | * and the bind/hash path. Nothing to do when we lose the race. | |
201 | */ | |
2dbb9b9e MKL |
202 | reuse = rcu_dereference_protected(sk->sk_reuseport_cb, |
203 | lockdep_is_held(&reuseport_lock)); | |
204 | if (reuse) { | |
333bb73f KI |
205 | if (reuse->num_closed_socks) { |
206 | /* sk was shutdown()ed before */ | |
207 | ret = reuseport_resurrect(sk, reuse, NULL, bind_inany); | |
208 | goto out; | |
209 | } | |
210 | ||
2dbb9b9e MKL |
211 | /* Only set reuse->bind_inany if the bind_inany is true. |
212 | * Otherwise, it will overwrite the reuse->bind_inany | |
213 | * which was set by the bind/hash path. | |
214 | */ | |
215 | if (bind_inany) | |
216 | reuse->bind_inany = bind_inany; | |
1b5f962e | 217 | goto out; |
2dbb9b9e | 218 | } |
1b5f962e | 219 | |
ef456144 CG |
220 | reuse = __reuseport_alloc(INIT_SOCKS); |
221 | if (!reuse) { | |
035ff358 JS |
222 | ret = -ENOMEM; |
223 | goto out; | |
ef456144 CG |
224 | } |
225 | ||
035ff358 JS |
226 | id = ida_alloc(&reuseport_ida, GFP_ATOMIC); |
227 | if (id < 0) { | |
228 | kfree(reuse); | |
229 | ret = id; | |
230 | goto out; | |
231 | } | |
232 | ||
233 | reuse->reuseport_id = id; | |
5c040eaf | 234 | reuse->bind_inany = bind_inany; |
ef456144 CG |
235 | reuse->socks[0] = sk; |
236 | reuse->num_socks = 1; | |
b261eda8 | 237 | reuseport_get_incoming_cpu(sk, reuse); |
ef456144 CG |
238 | rcu_assign_pointer(sk->sk_reuseport_cb, reuse); |
239 | ||
1b5f962e | 240 | out: |
ef456144 CG |
241 | spin_unlock_bh(&reuseport_lock); |
242 | ||
035ff358 | 243 | return ret; |
ef456144 CG |
244 | } |
245 | EXPORT_SYMBOL(reuseport_alloc); | |
246 | ||
247 | static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) | |
248 | { | |
249 | struct sock_reuseport *more_reuse; | |
250 | u32 more_socks_size, i; | |
251 | ||
252 | more_socks_size = reuse->max_socks * 2U; | |
333bb73f KI |
253 | if (more_socks_size > U16_MAX) { |
254 | if (reuse->num_closed_socks) { | |
255 | /* Make room by removing a closed sk. | |
256 | * The child has already been migrated. | |
257 | * Only reqsk left at this point. | |
258 | */ | |
259 | struct sock *sk; | |
260 | ||
261 | sk = reuse->socks[reuse->max_socks - reuse->num_closed_socks]; | |
262 | RCU_INIT_POINTER(sk->sk_reuseport_cb, NULL); | |
263 | __reuseport_detach_closed_sock(sk, reuse); | |
264 | ||
265 | return reuse; | |
266 | } | |
267 | ||
ef456144 | 268 | return NULL; |
333bb73f | 269 | } |
ef456144 CG |
270 | |
271 | more_reuse = __reuseport_alloc(more_socks_size); | |
272 | if (!more_reuse) | |
273 | return NULL; | |
274 | ||
ef456144 | 275 | more_reuse->num_socks = reuse->num_socks; |
5c040eaf | 276 | more_reuse->num_closed_socks = reuse->num_closed_socks; |
538950a1 | 277 | more_reuse->prog = reuse->prog; |
736b4602 | 278 | more_reuse->reuseport_id = reuse->reuseport_id; |
2dbb9b9e | 279 | more_reuse->bind_inany = reuse->bind_inany; |
f2b2c55e | 280 | more_reuse->has_conns = reuse->has_conns; |
b261eda8 | 281 | more_reuse->incoming_cpu = reuse->incoming_cpu; |
ef456144 CG |
282 | |
283 | memcpy(more_reuse->socks, reuse->socks, | |
284 | reuse->num_socks * sizeof(struct sock *)); | |
5c040eaf KI |
285 | memcpy(more_reuse->socks + |
286 | (more_reuse->max_socks - more_reuse->num_closed_socks), | |
287 | reuse->socks + (reuse->max_socks - reuse->num_closed_socks), | |
288 | reuse->num_closed_socks * sizeof(struct sock *)); | |
40a1227e | 289 | more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts); |
ef456144 | 290 | |
5c040eaf | 291 | for (i = 0; i < reuse->max_socks; ++i) |
ef456144 CG |
292 | rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb, |
293 | more_reuse); | |
294 | ||
538950a1 CG |
295 | /* Note: we use kfree_rcu here instead of reuseport_free_rcu so |
296 | * that reuse and more_reuse can temporarily share a reference | |
297 | * to prog. | |
298 | */ | |
ef456144 CG |
299 | kfree_rcu(reuse, rcu); |
300 | return more_reuse; | |
301 | } | |
302 | ||
4db428a7 ED |
303 | static void reuseport_free_rcu(struct rcu_head *head) |
304 | { | |
305 | struct sock_reuseport *reuse; | |
306 | ||
307 | reuse = container_of(head, struct sock_reuseport, rcu); | |
8217ca65 | 308 | sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1)); |
035ff358 | 309 | ida_free(&reuseport_ida, reuse->reuseport_id); |
4db428a7 ED |
310 | kfree(reuse); |
311 | } | |
312 | ||
ef456144 CG |
313 | /** |
314 | * reuseport_add_sock - Add a socket to the reuseport group of another. | |
315 | * @sk: New socket to add to the group. | |
316 | * @sk2: Socket belonging to the existing reuseport group. | |
37f3c421 BVA |
317 | * @bind_inany: Whether or not the group is bound to a local INANY address. |
318 | * | |
ef456144 CG |
319 | * May return ENOMEM and not add socket to group under memory pressure. |
320 | */ | |
2dbb9b9e | 321 | int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany) |
ef456144 | 322 | { |
4db428a7 | 323 | struct sock_reuseport *old_reuse, *reuse; |
ef456144 | 324 | |
b4ace4f1 | 325 | if (!rcu_access_pointer(sk2->sk_reuseport_cb)) { |
2dbb9b9e | 326 | int err = reuseport_alloc(sk2, bind_inany); |
b4ace4f1 CG |
327 | |
328 | if (err) | |
329 | return err; | |
330 | } | |
331 | ||
ef456144 CG |
332 | spin_lock_bh(&reuseport_lock); |
333 | reuse = rcu_dereference_protected(sk2->sk_reuseport_cb, | |
4db428a7 ED |
334 | lockdep_is_held(&reuseport_lock)); |
335 | old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb, | |
333bb73f KI |
336 | lockdep_is_held(&reuseport_lock)); |
337 | if (old_reuse && old_reuse->num_closed_socks) { | |
338 | /* sk was shutdown()ed before */ | |
339 | int err = reuseport_resurrect(sk, old_reuse, reuse, reuse->bind_inany); | |
340 | ||
341 | spin_unlock_bh(&reuseport_lock); | |
342 | return err; | |
343 | } | |
344 | ||
4db428a7 ED |
345 | if (old_reuse && old_reuse->num_socks != 1) { |
346 | spin_unlock_bh(&reuseport_lock); | |
347 | return -EBUSY; | |
348 | } | |
ef456144 | 349 | |
5c040eaf | 350 | if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) { |
ef456144 CG |
351 | reuse = reuseport_grow(reuse); |
352 | if (!reuse) { | |
353 | spin_unlock_bh(&reuseport_lock); | |
354 | return -ENOMEM; | |
355 | } | |
356 | } | |
357 | ||
5c040eaf | 358 | __reuseport_add_sock(sk, reuse); |
ef456144 CG |
359 | rcu_assign_pointer(sk->sk_reuseport_cb, reuse); |
360 | ||
361 | spin_unlock_bh(&reuseport_lock); | |
362 | ||
4db428a7 ED |
363 | if (old_reuse) |
364 | call_rcu(&old_reuse->rcu, reuseport_free_rcu); | |
ef456144 CG |
365 | return 0; |
366 | } | |
76c6d988 | 367 | EXPORT_SYMBOL(reuseport_add_sock); |
ef456144 | 368 | |
333bb73f KI |
369 | static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse, |
370 | struct sock_reuseport *reuse, bool bind_inany) | |
371 | { | |
372 | if (old_reuse == reuse) { | |
373 | /* If sk was in the same reuseport group, just pop sk out of | |
374 | * the closed section and push sk into the listening section. | |
375 | */ | |
376 | __reuseport_detach_closed_sock(sk, old_reuse); | |
377 | __reuseport_add_sock(sk, old_reuse); | |
378 | return 0; | |
379 | } | |
380 | ||
381 | if (!reuse) { | |
382 | /* In bind()/listen() path, we cannot carry over the eBPF prog | |
383 | * for the shutdown()ed socket. In setsockopt() path, we should | |
384 | * not change the eBPF prog of listening sockets by attaching a | |
385 | * prog to the shutdown()ed socket. Thus, we will allocate a new | |
386 | * reuseport group and detach sk from the old group. | |
387 | */ | |
388 | int id; | |
389 | ||
390 | reuse = __reuseport_alloc(INIT_SOCKS); | |
391 | if (!reuse) | |
392 | return -ENOMEM; | |
393 | ||
394 | id = ida_alloc(&reuseport_ida, GFP_ATOMIC); | |
395 | if (id < 0) { | |
396 | kfree(reuse); | |
397 | return id; | |
398 | } | |
399 | ||
400 | reuse->reuseport_id = id; | |
401 | reuse->bind_inany = bind_inany; | |
402 | } else { | |
403 | /* Move sk from the old group to the new one if | |
404 | * - all the other listeners in the old group were close()d or | |
405 | * shutdown()ed, and then sk2 has listen()ed on the same port | |
406 | * OR | |
407 | * - sk listen()ed without bind() (or with autobind), was | |
408 | * shutdown()ed, and then listen()s on another port which | |
409 | * sk2 listen()s on. | |
410 | */ | |
411 | if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) { | |
412 | reuse = reuseport_grow(reuse); | |
413 | if (!reuse) | |
414 | return -ENOMEM; | |
415 | } | |
416 | } | |
417 | ||
418 | __reuseport_detach_closed_sock(sk, old_reuse); | |
419 | __reuseport_add_sock(sk, reuse); | |
420 | rcu_assign_pointer(sk->sk_reuseport_cb, reuse); | |
421 | ||
422 | if (old_reuse->num_socks + old_reuse->num_closed_socks == 0) | |
423 | call_rcu(&old_reuse->rcu, reuseport_free_rcu); | |
424 | ||
425 | return 0; | |
426 | } | |
427 | ||
ef456144 CG |
428 | void reuseport_detach_sock(struct sock *sk) |
429 | { | |
430 | struct sock_reuseport *reuse; | |
ef456144 CG |
431 | |
432 | spin_lock_bh(&reuseport_lock); | |
433 | reuse = rcu_dereference_protected(sk->sk_reuseport_cb, | |
434 | lockdep_is_held(&reuseport_lock)); | |
5dc4c4b7 | 435 | |
333bb73f KI |
436 | /* reuseport_grow() has detached a closed sk */ |
437 | if (!reuse) | |
438 | goto out; | |
439 | ||
035ff358 JS |
440 | /* Notify the bpf side. The sk may be added to a sockarray |
441 | * map. If so, sockarray logic will remove it from the map. | |
442 | * | |
443 | * Other bpf map types that work with reuseport, like sockmap, | |
444 | * don't need an explicit callback from here. They override sk | |
445 | * unhash/close ops to remove the sk from the map before we | |
446 | * get to this point. | |
5dc4c4b7 | 447 | */ |
035ff358 | 448 | bpf_sk_reuseport_detach(sk); |
5dc4c4b7 | 449 | |
ef456144 | 450 | rcu_assign_pointer(sk->sk_reuseport_cb, NULL); |
333bb73f KI |
451 | |
452 | if (!__reuseport_detach_closed_sock(sk, reuse)) | |
453 | __reuseport_detach_sock(sk, reuse); | |
5c040eaf KI |
454 | |
455 | if (reuse->num_socks + reuse->num_closed_socks == 0) | |
456 | call_rcu(&reuse->rcu, reuseport_free_rcu); | |
ef456144 | 457 | |
333bb73f | 458 | out: |
ef456144 CG |
459 | spin_unlock_bh(&reuseport_lock); |
460 | } | |
461 | EXPORT_SYMBOL(reuseport_detach_sock); | |
462 | ||
333bb73f KI |
463 | void reuseport_stop_listen_sock(struct sock *sk) |
464 | { | |
465 | if (sk->sk_protocol == IPPROTO_TCP) { | |
466 | struct sock_reuseport *reuse; | |
d5e4ddae | 467 | struct bpf_prog *prog; |
333bb73f KI |
468 | |
469 | spin_lock_bh(&reuseport_lock); | |
470 | ||
471 | reuse = rcu_dereference_protected(sk->sk_reuseport_cb, | |
472 | lockdep_is_held(&reuseport_lock)); | |
d5e4ddae KI |
473 | prog = rcu_dereference_protected(reuse->prog, |
474 | lockdep_is_held(&reuseport_lock)); | |
333bb73f | 475 | |
4177f545 | 476 | if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req) || |
d5e4ddae | 477 | (prog && prog->expected_attach_type == BPF_SK_REUSEPORT_SELECT_OR_MIGRATE)) { |
333bb73f KI |
478 | /* Migration capable, move sk from the listening section |
479 | * to the closed section. | |
480 | */ | |
481 | bpf_sk_reuseport_detach(sk); | |
482 | ||
483 | __reuseport_detach_sock(sk, reuse); | |
484 | __reuseport_add_closed_sock(sk, reuse); | |
485 | ||
486 | spin_unlock_bh(&reuseport_lock); | |
487 | return; | |
488 | } | |
489 | ||
490 | spin_unlock_bh(&reuseport_lock); | |
491 | } | |
492 | ||
493 | /* Not capable to do migration, detach immediately */ | |
494 | reuseport_detach_sock(sk); | |
495 | } | |
496 | EXPORT_SYMBOL(reuseport_stop_listen_sock); | |
497 | ||
8217ca65 MKL |
498 | static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks, |
499 | struct bpf_prog *prog, struct sk_buff *skb, | |
500 | int hdr_len) | |
538950a1 CG |
501 | { |
502 | struct sk_buff *nskb = NULL; | |
503 | u32 index; | |
504 | ||
505 | if (skb_shared(skb)) { | |
506 | nskb = skb_clone(skb, GFP_ATOMIC); | |
507 | if (!nskb) | |
508 | return NULL; | |
509 | skb = nskb; | |
510 | } | |
511 | ||
512 | /* temporarily advance data past protocol header */ | |
513 | if (!pskb_pull(skb, hdr_len)) { | |
00ce3a15 | 514 | kfree_skb(nskb); |
538950a1 CG |
515 | return NULL; |
516 | } | |
517 | index = bpf_prog_run_save_cb(prog, skb); | |
518 | __skb_push(skb, hdr_len); | |
519 | ||
520 | consume_skb(nskb); | |
521 | ||
522 | if (index >= socks) | |
523 | return NULL; | |
524 | ||
525 | return reuse->socks[index]; | |
526 | } | |
527 | ||
1cd62c21 KI |
528 | static struct sock *reuseport_select_sock_by_hash(struct sock_reuseport *reuse, |
529 | u32 hash, u16 num_socks) | |
530 | { | |
b261eda8 | 531 | struct sock *first_valid_sk = NULL; |
1cd62c21 KI |
532 | int i, j; |
533 | ||
534 | i = j = reciprocal_scale(hash, num_socks); | |
b261eda8 KI |
535 | do { |
536 | struct sock *sk = reuse->socks[i]; | |
537 | ||
538 | if (sk->sk_state != TCP_ESTABLISHED) { | |
539 | /* Paired with WRITE_ONCE() in __reuseport_(get|put)_incoming_cpu(). */ | |
540 | if (!READ_ONCE(reuse->incoming_cpu)) | |
541 | return sk; | |
542 | ||
543 | /* Paired with WRITE_ONCE() in reuseport_update_incoming_cpu(). */ | |
544 | if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) | |
545 | return sk; | |
546 | ||
547 | if (!first_valid_sk) | |
548 | first_valid_sk = sk; | |
549 | } | |
550 | ||
1cd62c21 KI |
551 | i++; |
552 | if (i >= num_socks) | |
553 | i = 0; | |
b261eda8 | 554 | } while (i != j); |
1cd62c21 | 555 | |
b261eda8 | 556 | return first_valid_sk; |
1cd62c21 KI |
557 | } |
558 | ||
ef456144 CG |
559 | /** |
560 | * reuseport_select_sock - Select a socket from an SO_REUSEPORT group. | |
561 | * @sk: First socket in the group. | |
538950a1 CG |
562 | * @hash: When no BPF filter is available, use this hash to select. |
563 | * @skb: skb to run through BPF filter. | |
564 | * @hdr_len: BPF filter expects skb data pointer at payload data. If | |
565 | * the skb does not yet point at the payload, this parameter represents | |
566 | * how far the pointer needs to advance to reach the payload. | |
ef456144 CG |
567 | * Returns a socket that should receive the packet (or NULL on error). |
568 | */ | |
538950a1 CG |
569 | struct sock *reuseport_select_sock(struct sock *sk, |
570 | u32 hash, | |
571 | struct sk_buff *skb, | |
572 | int hdr_len) | |
ef456144 CG |
573 | { |
574 | struct sock_reuseport *reuse; | |
538950a1 | 575 | struct bpf_prog *prog; |
ef456144 CG |
576 | struct sock *sk2 = NULL; |
577 | u16 socks; | |
578 | ||
579 | rcu_read_lock(); | |
580 | reuse = rcu_dereference(sk->sk_reuseport_cb); | |
581 | ||
582 | /* if memory allocation failed or add call is not yet complete */ | |
583 | if (!reuse) | |
584 | goto out; | |
585 | ||
538950a1 | 586 | prog = rcu_dereference(reuse->prog); |
ef456144 CG |
587 | socks = READ_ONCE(reuse->num_socks); |
588 | if (likely(socks)) { | |
5c040eaf | 589 | /* paired with smp_wmb() in __reuseport_add_sock() */ |
ef456144 CG |
590 | smp_rmb(); |
591 | ||
8217ca65 MKL |
592 | if (!prog || !skb) |
593 | goto select_by_hash; | |
594 | ||
595 | if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) | |
d5e4ddae | 596 | sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, NULL, hash); |
8217ca65 MKL |
597 | else |
598 | sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len); | |
e94a62f5 | 599 | |
8217ca65 | 600 | select_by_hash: |
e94a62f5 | 601 | /* no bpf or invalid bpf result: fall back to hash usage */ |
1cd62c21 KI |
602 | if (!sk2) |
603 | sk2 = reuseport_select_sock_by_hash(reuse, hash, socks); | |
ef456144 CG |
604 | } |
605 | ||
606 | out: | |
607 | rcu_read_unlock(); | |
608 | return sk2; | |
609 | } | |
610 | EXPORT_SYMBOL(reuseport_select_sock); | |
538950a1 | 611 | |
1cd62c21 KI |
612 | /** |
613 | * reuseport_migrate_sock - Select a socket from an SO_REUSEPORT group. | |
614 | * @sk: close()ed or shutdown()ed socket in the group. | |
615 | * @migrating_sk: ESTABLISHED/SYN_RECV full socket in the accept queue or | |
616 | * NEW_SYN_RECV request socket during 3WHS. | |
617 | * @skb: skb to run through BPF filter. | |
618 | * Returns a socket (with sk_refcnt +1) that should accept the child socket | |
619 | * (or NULL on error). | |
620 | */ | |
621 | struct sock *reuseport_migrate_sock(struct sock *sk, | |
622 | struct sock *migrating_sk, | |
623 | struct sk_buff *skb) | |
624 | { | |
625 | struct sock_reuseport *reuse; | |
626 | struct sock *nsk = NULL; | |
d5e4ddae KI |
627 | bool allocated = false; |
628 | struct bpf_prog *prog; | |
1cd62c21 KI |
629 | u16 socks; |
630 | u32 hash; | |
631 | ||
632 | rcu_read_lock(); | |
633 | ||
634 | reuse = rcu_dereference(sk->sk_reuseport_cb); | |
635 | if (!reuse) | |
636 | goto out; | |
637 | ||
638 | socks = READ_ONCE(reuse->num_socks); | |
639 | if (unlikely(!socks)) | |
55d444b3 | 640 | goto failure; |
1cd62c21 KI |
641 | |
642 | /* paired with smp_wmb() in __reuseport_add_sock() */ | |
643 | smp_rmb(); | |
644 | ||
645 | hash = migrating_sk->sk_hash; | |
d5e4ddae KI |
646 | prog = rcu_dereference(reuse->prog); |
647 | if (!prog || prog->expected_attach_type != BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) { | |
4177f545 | 648 | if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req)) |
d5e4ddae | 649 | goto select_by_hash; |
55d444b3 | 650 | goto failure; |
d5e4ddae KI |
651 | } |
652 | ||
653 | if (!skb) { | |
654 | skb = alloc_skb(0, GFP_ATOMIC); | |
655 | if (!skb) | |
55d444b3 | 656 | goto failure; |
d5e4ddae KI |
657 | allocated = true; |
658 | } | |
659 | ||
660 | nsk = bpf_run_sk_reuseport(reuse, sk, prog, skb, migrating_sk, hash); | |
661 | ||
662 | if (allocated) | |
663 | kfree_skb(skb); | |
664 | ||
665 | select_by_hash: | |
666 | if (!nsk) | |
1cd62c21 KI |
667 | nsk = reuseport_select_sock_by_hash(reuse, hash, socks); |
668 | ||
55d444b3 | 669 | if (IS_ERR_OR_NULL(nsk) || unlikely(!refcount_inc_not_zero(&nsk->sk_refcnt))) { |
1cd62c21 | 670 | nsk = NULL; |
55d444b3 KI |
671 | goto failure; |
672 | } | |
1cd62c21 KI |
673 | |
674 | out: | |
675 | rcu_read_unlock(); | |
676 | return nsk; | |
55d444b3 KI |
677 | |
678 | failure: | |
679 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); | |
680 | goto out; | |
1cd62c21 KI |
681 | } |
682 | EXPORT_SYMBOL(reuseport_migrate_sock); | |
683 | ||
8217ca65 | 684 | int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog) |
538950a1 CG |
685 | { |
686 | struct sock_reuseport *reuse; | |
687 | struct bpf_prog *old_prog; | |
688 | ||
333bb73f KI |
689 | if (sk_unhashed(sk)) { |
690 | int err; | |
8217ca65 | 691 | |
333bb73f KI |
692 | if (!sk->sk_reuseport) |
693 | return -EINVAL; | |
694 | ||
695 | err = reuseport_alloc(sk, false); | |
8217ca65 MKL |
696 | if (err) |
697 | return err; | |
698 | } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) { | |
699 | /* The socket wasn't bound with SO_REUSEPORT */ | |
700 | return -EINVAL; | |
701 | } | |
702 | ||
538950a1 CG |
703 | spin_lock_bh(&reuseport_lock); |
704 | reuse = rcu_dereference_protected(sk->sk_reuseport_cb, | |
705 | lockdep_is_held(&reuseport_lock)); | |
706 | old_prog = rcu_dereference_protected(reuse->prog, | |
707 | lockdep_is_held(&reuseport_lock)); | |
708 | rcu_assign_pointer(reuse->prog, prog); | |
709 | spin_unlock_bh(&reuseport_lock); | |
710 | ||
8217ca65 MKL |
711 | sk_reuseport_prog_free(old_prog); |
712 | return 0; | |
538950a1 CG |
713 | } |
714 | EXPORT_SYMBOL(reuseport_attach_prog); | |
99f3a064 MKL |
715 | |
716 | int reuseport_detach_prog(struct sock *sk) | |
717 | { | |
718 | struct sock_reuseport *reuse; | |
719 | struct bpf_prog *old_prog; | |
720 | ||
99f3a064 MKL |
721 | old_prog = NULL; |
722 | spin_lock_bh(&reuseport_lock); | |
723 | reuse = rcu_dereference_protected(sk->sk_reuseport_cb, | |
724 | lockdep_is_held(&reuseport_lock)); | |
333bb73f KI |
725 | |
726 | /* reuse must be checked after acquiring the reuseport_lock | |
727 | * because reuseport_grow() can detach a closed sk. | |
728 | */ | |
729 | if (!reuse) { | |
730 | spin_unlock_bh(&reuseport_lock); | |
731 | return sk->sk_reuseport ? -ENOENT : -EINVAL; | |
732 | } | |
733 | ||
734 | if (sk_unhashed(sk) && reuse->num_closed_socks) { | |
735 | spin_unlock_bh(&reuseport_lock); | |
736 | return -ENOENT; | |
737 | } | |
738 | ||
e3f0d761 PM |
739 | old_prog = rcu_replace_pointer(reuse->prog, old_prog, |
740 | lockdep_is_held(&reuseport_lock)); | |
99f3a064 MKL |
741 | spin_unlock_bh(&reuseport_lock); |
742 | ||
743 | if (!old_prog) | |
744 | return -ENOENT; | |
745 | ||
746 | sk_reuseport_prog_free(old_prog); | |
747 | return 0; | |
748 | } | |
749 | EXPORT_SYMBOL(reuseport_detach_prog); |