Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
3f421baa ACM |
2 | /* |
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
4 | * operating system. INET is implemented using the BSD Socket | |
5 | * interface as the means of communication with the user level. | |
6 | * | |
7 | * Support for INET connection oriented protocols. | |
8 | * | |
9 | * Authors: See the TCP sources | |
3f421baa ACM |
10 | */ |
11 | ||
3f421baa ACM |
12 | #include <linux/module.h> |
13 | #include <linux/jhash.h> | |
14 | ||
15 | #include <net/inet_connection_sock.h> | |
16 | #include <net/inet_hashtables.h> | |
17 | #include <net/inet_timewait_sock.h> | |
18 | #include <net/ip.h> | |
19 | #include <net/route.h> | |
20 | #include <net/tcp_states.h> | |
a019d6fe | 21 | #include <net/xfrm.h> |
fa76ce73 | 22 | #include <net/tcp.h> |
c125e80b | 23 | #include <net/sock_reuseport.h> |
9691724e | 24 | #include <net/addrconf.h> |
3f421baa | 25 | |
fe38d2a1 | 26 | #if IS_ENABLED(CONFIG_IPV6) |
88d7fcfa MKL |
27 | /* match_sk*_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses |
28 | * if IPv6 only, and any IPv4 addresses | |
29 | * if not IPv6 only | |
30 | * match_sk*_wildcard == false: addresses must be exactly the same, i.e. | |
31 | * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY, | |
32 | * and 0.0.0.0 equals to 0.0.0.0 only | |
fe38d2a1 | 33 | */ |
7016e062 JP |
34 | static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6, |
35 | const struct in6_addr *sk2_rcv_saddr6, | |
36 | __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, | |
37 | bool sk1_ipv6only, bool sk2_ipv6only, | |
88d7fcfa MKL |
38 | bool match_sk1_wildcard, |
39 | bool match_sk2_wildcard) | |
fe38d2a1 | 40 | { |
637bc8bb | 41 | int addr_type = ipv6_addr_type(sk1_rcv_saddr6); |
fe38d2a1 JB |
42 | int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; |
43 | ||
44 | /* if both are mapped, treat as IPv4 */ | |
45 | if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) { | |
46 | if (!sk2_ipv6only) { | |
637bc8bb | 47 | if (sk1_rcv_saddr == sk2_rcv_saddr) |
7016e062 | 48 | return true; |
88d7fcfa MKL |
49 | return (match_sk1_wildcard && !sk1_rcv_saddr) || |
50 | (match_sk2_wildcard && !sk2_rcv_saddr); | |
fe38d2a1 | 51 | } |
7016e062 | 52 | return false; |
fe38d2a1 JB |
53 | } |
54 | ||
55 | if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY) | |
7016e062 | 56 | return true; |
fe38d2a1 | 57 | |
88d7fcfa | 58 | if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard && |
fe38d2a1 | 59 | !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) |
7016e062 | 60 | return true; |
fe38d2a1 | 61 | |
88d7fcfa | 62 | if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard && |
637bc8bb | 63 | !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED)) |
7016e062 | 64 | return true; |
fe38d2a1 JB |
65 | |
66 | if (sk2_rcv_saddr6 && | |
637bc8bb | 67 | ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6)) |
7016e062 | 68 | return true; |
fe38d2a1 | 69 | |
7016e062 | 70 | return false; |
fe38d2a1 JB |
71 | } |
72 | #endif | |
73 | ||
88d7fcfa MKL |
74 | /* match_sk*_wildcard == true: 0.0.0.0 equals to any IPv4 addresses |
75 | * match_sk*_wildcard == false: addresses must be exactly the same, i.e. | |
76 | * 0.0.0.0 only equals to 0.0.0.0 | |
fe38d2a1 | 77 | */ |
7016e062 | 78 | static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, |
88d7fcfa MKL |
79 | bool sk2_ipv6only, bool match_sk1_wildcard, |
80 | bool match_sk2_wildcard) | |
fe38d2a1 | 81 | { |
637bc8bb JB |
82 | if (!sk2_ipv6only) { |
83 | if (sk1_rcv_saddr == sk2_rcv_saddr) | |
7016e062 | 84 | return true; |
88d7fcfa MKL |
85 | return (match_sk1_wildcard && !sk1_rcv_saddr) || |
86 | (match_sk2_wildcard && !sk2_rcv_saddr); | |
fe38d2a1 | 87 | } |
7016e062 | 88 | return false; |
fe38d2a1 JB |
89 | } |
90 | ||
7016e062 JP |
91 | bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, |
92 | bool match_wildcard) | |
fe38d2a1 JB |
93 | { |
94 | #if IS_ENABLED(CONFIG_IPV6) | |
95 | if (sk->sk_family == AF_INET6) | |
637bc8bb | 96 | return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr, |
319554f2 | 97 | inet6_rcv_saddr(sk2), |
637bc8bb JB |
98 | sk->sk_rcv_saddr, |
99 | sk2->sk_rcv_saddr, | |
100 | ipv6_only_sock(sk), | |
101 | ipv6_only_sock(sk2), | |
88d7fcfa | 102 | match_wildcard, |
637bc8bb | 103 | match_wildcard); |
fe38d2a1 | 104 | #endif |
637bc8bb | 105 | return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr, |
88d7fcfa MKL |
106 | ipv6_only_sock(sk2), match_wildcard, |
107 | match_wildcard); | |
fe38d2a1 JB |
108 | } |
109 | EXPORT_SYMBOL(inet_rcv_saddr_equal); | |
110 | ||
2dbb9b9e MKL |
111 | bool inet_rcv_saddr_any(const struct sock *sk) |
112 | { | |
113 | #if IS_ENABLED(CONFIG_IPV6) | |
114 | if (sk->sk_family == AF_INET6) | |
115 | return ipv6_addr_any(&sk->sk_v6_rcv_saddr); | |
116 | #endif | |
117 | return !sk->sk_rcv_saddr; | |
118 | } | |
119 | ||
0bbf87d8 | 120 | void inet_get_local_port_range(struct net *net, int *low, int *high) |
227b60f5 | 121 | { |
95c96174 ED |
122 | unsigned int seq; |
123 | ||
227b60f5 | 124 | do { |
c9d8f1a6 | 125 | seq = read_seqbegin(&net->ipv4.ip_local_ports.lock); |
227b60f5 | 126 | |
c9d8f1a6 CW |
127 | *low = net->ipv4.ip_local_ports.range[0]; |
128 | *high = net->ipv4.ip_local_ports.range[1]; | |
129 | } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq)); | |
227b60f5 SH |
130 | } |
131 | EXPORT_SYMBOL(inet_get_local_port_range); | |
3f421baa | 132 | |
28044fc1 JK |
133 | static bool inet_use_bhash2_on_bind(const struct sock *sk) |
134 | { | |
135 | #if IS_ENABLED(CONFIG_IPV6) | |
136 | if (sk->sk_family == AF_INET6) { | |
137 | int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); | |
138 | ||
139 | return addr_type != IPV6_ADDR_ANY && | |
140 | addr_type != IPV6_ADDR_MAPPED; | |
141 | } | |
142 | #endif | |
143 | return sk->sk_rcv_saddr != htonl(INADDR_ANY); | |
144 | } | |
145 | ||
146 | static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2, | |
147 | kuid_t sk_uid, bool relax, | |
148 | bool reuseport_cb_ok, bool reuseport_ok) | |
149 | { | |
150 | int bound_dev_if2; | |
151 | ||
152 | if (sk == sk2) | |
153 | return false; | |
154 | ||
155 | bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if); | |
156 | ||
157 | if (!sk->sk_bound_dev_if || !bound_dev_if2 || | |
158 | sk->sk_bound_dev_if == bound_dev_if2) { | |
159 | if (sk->sk_reuse && sk2->sk_reuse && | |
160 | sk2->sk_state != TCP_LISTEN) { | |
161 | if (!relax || (!reuseport_ok && sk->sk_reuseport && | |
162 | sk2->sk_reuseport && reuseport_cb_ok && | |
163 | (sk2->sk_state == TCP_TIME_WAIT || | |
164 | uid_eq(sk_uid, sock_i_uid(sk2))))) | |
165 | return true; | |
166 | } else if (!reuseport_ok || !sk->sk_reuseport || | |
167 | !sk2->sk_reuseport || !reuseport_cb_ok || | |
168 | (sk2->sk_state != TCP_TIME_WAIT && | |
169 | !uid_eq(sk_uid, sock_i_uid(sk2)))) { | |
170 | return true; | |
171 | } | |
172 | } | |
173 | return false; | |
174 | } | |
175 | ||
936a192f KI |
176 | static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2, |
177 | kuid_t sk_uid, bool relax, | |
178 | bool reuseport_cb_ok, bool reuseport_ok) | |
179 | { | |
180 | if (sk->sk_family == AF_INET && ipv6_only_sock(sk2)) | |
181 | return false; | |
182 | ||
183 | return inet_bind_conflict(sk, sk2, sk_uid, relax, | |
184 | reuseport_cb_ok, reuseport_ok); | |
185 | } | |
186 | ||
28044fc1 JK |
187 | static bool inet_bhash2_conflict(const struct sock *sk, |
188 | const struct inet_bind2_bucket *tb2, | |
189 | kuid_t sk_uid, | |
190 | bool relax, bool reuseport_cb_ok, | |
191 | bool reuseport_ok) | |
192 | { | |
936a192f | 193 | struct inet_timewait_sock *tw2; |
28044fc1 JK |
194 | struct sock *sk2; |
195 | ||
196 | sk_for_each_bound_bhash2(sk2, &tb2->owners) { | |
936a192f KI |
197 | if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax, |
198 | reuseport_cb_ok, reuseport_ok)) | |
199 | return true; | |
200 | } | |
28044fc1 | 201 | |
936a192f KI |
202 | twsk_for_each_bound_bhash2(tw2, &tb2->deathrow) { |
203 | sk2 = (struct sock *)tw2; | |
204 | ||
205 | if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax, | |
206 | reuseport_cb_ok, reuseport_ok)) | |
28044fc1 JK |
207 | return true; |
208 | } | |
936a192f | 209 | |
28044fc1 JK |
210 | return false; |
211 | } | |
212 | ||
213 | /* This should be called only when the tb and tb2 hashbuckets' locks are held */ | |
593d1ebe JK |
214 | static int inet_csk_bind_conflict(const struct sock *sk, |
215 | const struct inet_bind_bucket *tb, | |
28044fc1 | 216 | const struct inet_bind2_bucket *tb2, /* may be null */ |
d5a42de8 JK |
217 | bool relax, bool reuseport_ok) |
218 | { | |
593d1ebe | 219 | bool reuseport_cb_ok; |
593d1ebe JK |
220 | struct sock_reuseport *reuseport_cb; |
221 | kuid_t uid = sock_i_uid((struct sock *)sk); | |
3f421baa | 222 | |
333bb73f KI |
223 | rcu_read_lock(); |
224 | reuseport_cb = rcu_dereference(sk->sk_reuseport_cb); | |
225 | /* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */ | |
226 | reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks); | |
227 | rcu_read_unlock(); | |
228 | ||
7477fd2e PE |
229 | /* |
230 | * Unlike other sk lookup places we do not check | |
231 | * for sk_net here, since _all_ the socks listed | |
28044fc1 JK |
232 | * in tb->owners and tb2->owners list belong |
233 | * to the same net - the one this bucket belongs to. | |
7477fd2e PE |
234 | */ |
235 | ||
28044fc1 JK |
236 | if (!inet_use_bhash2_on_bind(sk)) { |
237 | struct sock *sk2; | |
d2c13561 | 238 | |
28044fc1 JK |
239 | sk_for_each_bound(sk2, &tb->owners) |
240 | if (inet_bind_conflict(sk, sk2, uid, relax, | |
241 | reuseport_cb_ok, reuseport_ok) && | |
242 | inet_rcv_saddr_equal(sk, sk2, true)) | |
243 | return true; | |
244 | ||
245 | return false; | |
246 | } | |
247 | ||
248 | /* Conflicts with an existing IPV6_ADDR_ANY (if ipv6) or INADDR_ANY (if | |
249 | * ipv4) should have been checked already. We need to do these two | |
250 | * checks separately because their spinlocks have to be acquired/released | |
251 | * independently of each other, to prevent possible deadlocks | |
252 | */ | |
253 | return tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok, | |
254 | reuseport_ok); | |
255 | } | |
256 | ||
257 | /* Determine if there is a bind conflict with an existing IPV6_ADDR_ANY (if ipv6) or | |
258 | * INADDR_ANY (if ipv4) socket. | |
259 | * | |
260 | * Caller must hold bhash hashbucket lock with local bh disabled, to protect | |
261 | * against concurrent binds on the port for addr any | |
262 | */ | |
263 | static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev, | |
264 | bool relax, bool reuseport_ok) | |
265 | { | |
266 | kuid_t uid = sock_i_uid((struct sock *)sk); | |
267 | const struct net *net = sock_net(sk); | |
268 | struct sock_reuseport *reuseport_cb; | |
269 | struct inet_bind_hashbucket *head2; | |
270 | struct inet_bind2_bucket *tb2; | |
271 | bool reuseport_cb_ok; | |
272 | ||
273 | rcu_read_lock(); | |
274 | reuseport_cb = rcu_dereference(sk->sk_reuseport_cb); | |
275 | /* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */ | |
276 | reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks); | |
277 | rcu_read_unlock(); | |
278 | ||
279 | head2 = inet_bhash2_addr_any_hashbucket(sk, net, port); | |
280 | ||
281 | spin_lock(&head2->lock); | |
282 | ||
283 | inet_bind_bucket_for_each(tb2, &head2->chain) | |
284 | if (inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk)) | |
285 | break; | |
286 | ||
287 | if (tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok, | |
288 | reuseport_ok)) { | |
289 | spin_unlock(&head2->lock); | |
290 | return true; | |
3f421baa | 291 | } |
28044fc1 JK |
292 | |
293 | spin_unlock(&head2->lock); | |
294 | return false; | |
3f421baa | 295 | } |
971af18b | 296 | |
289141b7 JB |
297 | /* |
298 | * Find an open port number for the socket. Returns with the | |
28044fc1 | 299 | * inet_bind_hashbucket locks held if successful. |
3f421baa | 300 | */ |
289141b7 | 301 | static struct inet_bind_hashbucket * |
28044fc1 JK |
302 | inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret, |
303 | struct inet_bind2_bucket **tb2_ret, | |
304 | struct inet_bind_hashbucket **head2_ret, int *port_ret) | |
3f421baa | 305 | { |
429e42c1 | 306 | struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk); |
08eaef90 | 307 | int i, low, high, attempt_half, port, l3mdev; |
28044fc1 | 308 | struct inet_bind_hashbucket *head, *head2; |
3b1e0a65 | 309 | struct net *net = sock_net(sk); |
28044fc1 | 310 | struct inet_bind2_bucket *tb2; |
ea8add2b | 311 | struct inet_bind_bucket *tb; |
ea8add2b | 312 | u32 remaining, offset; |
08eaef90 | 313 | bool relax = false; |
3f421baa | 314 | |
3c82a21f | 315 | l3mdev = inet_sk_bound_l3mdev(sk); |
4b01a967 | 316 | ports_exhausted: |
ea8add2b ED |
317 | attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0; |
318 | other_half_scan: | |
319 | inet_get_local_port_range(net, &low, &high); | |
320 | high++; /* [32768, 60999] -> [32768, 61000[ */ | |
321 | if (high - low < 4) | |
322 | attempt_half = 0; | |
323 | if (attempt_half) { | |
324 | int half = low + (((high - low) >> 2) << 1); | |
325 | ||
326 | if (attempt_half == 1) | |
327 | high = half; | |
328 | else | |
329 | low = half; | |
330 | } | |
331 | remaining = high - low; | |
332 | if (likely(remaining > 1)) | |
333 | remaining &= ~1U; | |
3f421baa | 334 | |
8032bf12 | 335 | offset = get_random_u32_below(remaining); |
ea8add2b ED |
336 | /* __inet_hash_connect() favors ports having @low parity |
337 | * We do the opposite to not pollute connect() users. | |
338 | */ | |
339 | offset |= 1U; | |
ea8add2b ED |
340 | |
341 | other_parity_scan: | |
342 | port = low + offset; | |
343 | for (i = 0; i < remaining; i += 2, port += 2) { | |
344 | if (unlikely(port >= high)) | |
345 | port -= remaining; | |
346 | if (inet_is_local_reserved_port(net, port)) | |
347 | continue; | |
348 | head = &hinfo->bhash[inet_bhashfn(net, port, | |
349 | hinfo->bhash_size)]; | |
350 | spin_lock_bh(&head->lock); | |
28044fc1 JK |
351 | if (inet_use_bhash2_on_bind(sk)) { |
352 | if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, relax, false)) | |
353 | goto next_port; | |
354 | } | |
355 | ||
356 | head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); | |
357 | spin_lock(&head2->lock); | |
358 | tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); | |
ea8add2b | 359 | inet_bind_bucket_for_each(tb, &head->chain) |
28044fc1 JK |
360 | if (inet_bind_bucket_match(tb, net, port, l3mdev)) { |
361 | if (!inet_csk_bind_conflict(sk, tb, tb2, | |
362 | relax, false)) | |
6cd66616 | 363 | goto success; |
28044fc1 | 364 | spin_unlock(&head2->lock); |
ea8add2b | 365 | goto next_port; |
946f9eb2 | 366 | } |
289141b7 JB |
367 | tb = NULL; |
368 | goto success; | |
ea8add2b ED |
369 | next_port: |
370 | spin_unlock_bh(&head->lock); | |
371 | cond_resched(); | |
372 | } | |
373 | ||
ea8add2b ED |
374 | offset--; |
375 | if (!(offset & 1)) | |
376 | goto other_parity_scan; | |
377 | ||
378 | if (attempt_half == 1) { | |
379 | /* OK we now try the upper half of the range */ | |
380 | attempt_half = 2; | |
381 | goto other_half_scan; | |
382 | } | |
4b01a967 | 383 | |
0db23276 | 384 | if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) { |
4b01a967 KI |
385 | /* We still have a chance to connect to different destinations */ |
386 | relax = true; | |
387 | goto ports_exhausted; | |
388 | } | |
289141b7 JB |
389 | return NULL; |
390 | success: | |
391 | *port_ret = port; | |
392 | *tb_ret = tb; | |
28044fc1 JK |
393 | *tb2_ret = tb2; |
394 | *head2_ret = head2; | |
289141b7 JB |
395 | return head; |
396 | } | |
ea8add2b | 397 | |
637bc8bb JB |
398 | static inline int sk_reuseport_match(struct inet_bind_bucket *tb, |
399 | struct sock *sk) | |
400 | { | |
401 | kuid_t uid = sock_i_uid(sk); | |
402 | ||
403 | if (tb->fastreuseport <= 0) | |
404 | return 0; | |
405 | if (!sk->sk_reuseport) | |
406 | return 0; | |
407 | if (rcu_access_pointer(sk->sk_reuseport_cb)) | |
408 | return 0; | |
409 | if (!uid_eq(tb->fastuid, uid)) | |
410 | return 0; | |
411 | /* We only need to check the rcv_saddr if this tb was once marked | |
412 | * without fastreuseport and then was reset, as we can only know that | |
413 | * the fast_*rcv_saddr doesn't have any conflicts with the socks on the | |
414 | * owners list. | |
415 | */ | |
416 | if (tb->fastreuseport == FASTREUSEPORT_ANY) | |
417 | return 1; | |
418 | #if IS_ENABLED(CONFIG_IPV6) | |
419 | if (tb->fast_sk_family == AF_INET6) | |
420 | return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr, | |
7a56673b | 421 | inet6_rcv_saddr(sk), |
637bc8bb JB |
422 | tb->fast_rcv_saddr, |
423 | sk->sk_rcv_saddr, | |
424 | tb->fast_ipv6_only, | |
88d7fcfa | 425 | ipv6_only_sock(sk), true, false); |
637bc8bb JB |
426 | #endif |
427 | return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr, | |
88d7fcfa | 428 | ipv6_only_sock(sk), true, false); |
637bc8bb JB |
429 | } |
430 | ||
62ffc589 TF |
431 | void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, |
432 | struct sock *sk) | |
433 | { | |
434 | kuid_t uid = sock_i_uid(sk); | |
435 | bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; | |
436 | ||
437 | if (hlist_empty(&tb->owners)) { | |
438 | tb->fastreuse = reuse; | |
439 | if (sk->sk_reuseport) { | |
440 | tb->fastreuseport = FASTREUSEPORT_ANY; | |
441 | tb->fastuid = uid; | |
442 | tb->fast_rcv_saddr = sk->sk_rcv_saddr; | |
443 | tb->fast_ipv6_only = ipv6_only_sock(sk); | |
444 | tb->fast_sk_family = sk->sk_family; | |
445 | #if IS_ENABLED(CONFIG_IPV6) | |
446 | tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; | |
447 | #endif | |
448 | } else { | |
449 | tb->fastreuseport = 0; | |
450 | } | |
451 | } else { | |
452 | if (!reuse) | |
453 | tb->fastreuse = 0; | |
454 | if (sk->sk_reuseport) { | |
455 | /* We didn't match or we don't have fastreuseport set on | |
456 | * the tb, but we have sk_reuseport set on this socket | |
457 | * and we know that there are no bind conflicts with | |
458 | * this socket in this tb, so reset our tb's reuseport | |
459 | * settings so that any subsequent sockets that match | |
460 | * our current socket will be put on the fast path. | |
461 | * | |
462 | * If we reset we need to set FASTREUSEPORT_STRICT so we | |
463 | * do extra checking for all subsequent sk_reuseport | |
464 | * socks. | |
465 | */ | |
466 | if (!sk_reuseport_match(tb, sk)) { | |
467 | tb->fastreuseport = FASTREUSEPORT_STRICT; | |
468 | tb->fastuid = uid; | |
469 | tb->fast_rcv_saddr = sk->sk_rcv_saddr; | |
470 | tb->fast_ipv6_only = ipv6_only_sock(sk); | |
471 | tb->fast_sk_family = sk->sk_family; | |
472 | #if IS_ENABLED(CONFIG_IPV6) | |
473 | tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; | |
474 | #endif | |
475 | } | |
476 | } else { | |
477 | tb->fastreuseport = 0; | |
478 | } | |
479 | } | |
480 | } | |
481 | ||
289141b7 JB |
482 | /* Obtain a reference to a local port for the given sock, |
483 | * if snum is zero it means select any available local port. | |
484 | * We try to allocate an odd port (and leave even ports for connect()) | |
485 | */ | |
486 | int inet_csk_get_port(struct sock *sk, unsigned short snum) | |
487 | { | |
429e42c1 | 488 | struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk); |
289141b7 | 489 | bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; |
28044fc1 JK |
490 | bool found_port = false, check_bind_conflict = true; |
491 | bool bhash_created = false, bhash2_created = false; | |
7a7160ed | 492 | int ret = -EADDRINUSE, port = snum, l3mdev; |
28044fc1 JK |
493 | struct inet_bind_hashbucket *head, *head2; |
494 | struct inet_bind2_bucket *tb2 = NULL; | |
593d1ebe | 495 | struct inet_bind_bucket *tb = NULL; |
28044fc1 | 496 | bool head2_lock_acquired = false; |
08eaef90 | 497 | struct net *net = sock_net(sk); |
3c82a21f RS |
498 | |
499 | l3mdev = inet_sk_bound_l3mdev(sk); | |
289141b7 JB |
500 | |
501 | if (!port) { | |
28044fc1 | 502 | head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port); |
289141b7 JB |
503 | if (!head) |
504 | return ret; | |
28044fc1 JK |
505 | |
506 | head2_lock_acquired = true; | |
507 | ||
508 | if (tb && tb2) | |
509 | goto success; | |
510 | found_port = true; | |
511 | } else { | |
512 | head = &hinfo->bhash[inet_bhashfn(net, port, | |
513 | hinfo->bhash_size)]; | |
514 | spin_lock_bh(&head->lock); | |
515 | inet_bind_bucket_for_each(tb, &head->chain) | |
516 | if (inet_bind_bucket_match(tb, net, port, l3mdev)) | |
517 | break; | |
518 | } | |
519 | ||
520 | if (!tb) { | |
521 | tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net, | |
522 | head, port, l3mdev); | |
289141b7 | 523 | if (!tb) |
28044fc1 JK |
524 | goto fail_unlock; |
525 | bhash_created = true; | |
d5a42de8 | 526 | } |
4a17fd52 | 527 | |
28044fc1 JK |
528 | if (!found_port) { |
529 | if (!hlist_empty(&tb->owners)) { | |
530 | if (sk->sk_reuse == SK_FORCE_REUSE || | |
531 | (tb->fastreuse > 0 && reuse) || | |
532 | sk_reuseport_match(tb, sk)) | |
533 | check_bind_conflict = false; | |
534 | } | |
535 | ||
536 | if (check_bind_conflict && inet_use_bhash2_on_bind(sk)) { | |
537 | if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, true, true)) | |
538 | goto fail_unlock; | |
539 | } | |
540 | ||
541 | head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); | |
542 | spin_lock(&head2->lock); | |
543 | head2_lock_acquired = true; | |
544 | tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); | |
545 | } | |
546 | ||
547 | if (!tb2) { | |
548 | tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, | |
549 | net, head2, port, l3mdev, sk); | |
550 | if (!tb2) | |
ea8add2b | 551 | goto fail_unlock; |
28044fc1 | 552 | bhash2_created = true; |
6cd66616 | 553 | } |
28044fc1 JK |
554 | |
555 | if (!found_port && check_bind_conflict) { | |
556 | if (inet_csk_bind_conflict(sk, tb, tb2, true, true)) | |
557 | goto fail_unlock; | |
558 | } | |
559 | ||
6cd66616 | 560 | success: |
62ffc589 TF |
561 | inet_csk_update_fastreuse(tb, sk); |
562 | ||
3f421baa | 563 | if (!inet_csk(sk)->icsk_bind_hash) |
28044fc1 | 564 | inet_bind_hash(sk, tb, tb2, port); |
547b792c | 565 | WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); |
28044fc1 | 566 | WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2); |
e905a9ed | 567 | ret = 0; |
3f421baa ACM |
568 | |
569 | fail_unlock: | |
28044fc1 JK |
570 | if (ret) { |
571 | if (bhash_created) | |
572 | inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb); | |
573 | if (bhash2_created) | |
574 | inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, | |
575 | tb2); | |
576 | } | |
577 | if (head2_lock_acquired) | |
578 | spin_unlock(&head2->lock); | |
ea8add2b | 579 | spin_unlock_bh(&head->lock); |
3f421baa ACM |
580 | return ret; |
581 | } | |
3f421baa ACM |
582 | EXPORT_SYMBOL_GPL(inet_csk_get_port); |
583 | ||
584 | /* | |
585 | * Wait for an incoming connection, avoid race conditions. This must be called | |
586 | * with the socket locked. | |
587 | */ | |
588 | static int inet_csk_wait_for_connect(struct sock *sk, long timeo) | |
589 | { | |
590 | struct inet_connection_sock *icsk = inet_csk(sk); | |
591 | DEFINE_WAIT(wait); | |
592 | int err; | |
593 | ||
594 | /* | |
595 | * True wake-one mechanism for incoming connections: only | |
596 | * one process gets woken up, not the 'whole herd'. | |
597 | * Since we do not 'race & poll' for established sockets | |
598 | * anymore, the common case will execute the loop only once. | |
599 | * | |
600 | * Subtle issue: "add_wait_queue_exclusive()" will be added | |
601 | * after any current non-exclusive waiters, and we know that | |
602 | * it will always _stay_ after any new non-exclusive waiters | |
603 | * because all non-exclusive waiters are added at the | |
604 | * beginning of the wait-queue. As such, it's ok to "drop" | |
605 | * our exclusiveness temporarily when we get woken up without | |
606 | * having to remove and re-insert us on the wait queue. | |
607 | */ | |
608 | for (;;) { | |
aa395145 | 609 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, |
3f421baa ACM |
610 | TASK_INTERRUPTIBLE); |
611 | release_sock(sk); | |
612 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) | |
613 | timeo = schedule_timeout(timeo); | |
cb7cf8a3 | 614 | sched_annotate_sleep(); |
3f421baa ACM |
615 | lock_sock(sk); |
616 | err = 0; | |
617 | if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) | |
618 | break; | |
619 | err = -EINVAL; | |
620 | if (sk->sk_state != TCP_LISTEN) | |
621 | break; | |
622 | err = sock_intr_errno(timeo); | |
623 | if (signal_pending(current)) | |
624 | break; | |
625 | err = -EAGAIN; | |
626 | if (!timeo) | |
627 | break; | |
628 | } | |
aa395145 | 629 | finish_wait(sk_sleep(sk), &wait); |
3f421baa ACM |
630 | return err; |
631 | } | |
632 | ||
633 | /* | |
634 | * This will accept the next outstanding connection. | |
635 | */ | |
cdfbabfb | 636 | struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern) |
3f421baa ACM |
637 | { |
638 | struct inet_connection_sock *icsk = inet_csk(sk); | |
8336886f | 639 | struct request_sock_queue *queue = &icsk->icsk_accept_queue; |
8336886f | 640 | struct request_sock *req; |
e3d95ad7 | 641 | struct sock *newsk; |
3f421baa ACM |
642 | int error; |
643 | ||
644 | lock_sock(sk); | |
645 | ||
646 | /* We need to make sure that this socket is listening, | |
647 | * and that it has something pending. | |
648 | */ | |
649 | error = -EINVAL; | |
650 | if (sk->sk_state != TCP_LISTEN) | |
651 | goto out_err; | |
652 | ||
653 | /* Find already established connection */ | |
8336886f | 654 | if (reqsk_queue_empty(queue)) { |
3f421baa ACM |
655 | long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); |
656 | ||
657 | /* If this is a non blocking socket don't sleep */ | |
658 | error = -EAGAIN; | |
659 | if (!timeo) | |
660 | goto out_err; | |
661 | ||
662 | error = inet_csk_wait_for_connect(sk, timeo); | |
663 | if (error) | |
664 | goto out_err; | |
665 | } | |
fff1f300 | 666 | req = reqsk_queue_remove(queue, sk); |
8336886f JC |
667 | newsk = req->sk; |
668 | ||
e3d95ad7 | 669 | if (sk->sk_protocol == IPPROTO_TCP && |
0536fcc0 ED |
670 | tcp_rsk(req)->tfo_listener) { |
671 | spin_lock_bh(&queue->fastopenq.lock); | |
9439ce00 | 672 | if (tcp_rsk(req)->tfo_listener) { |
8336886f JC |
673 | /* We are still waiting for the final ACK from 3WHS |
674 | * so can't free req now. Instead, we set req->sk to | |
675 | * NULL to signify that the child socket is taken | |
676 | * so reqsk_fastopen_remove() will free the req | |
677 | * when 3WHS finishes (or is aborted). | |
678 | */ | |
679 | req->sk = NULL; | |
680 | req = NULL; | |
681 | } | |
0536fcc0 | 682 | spin_unlock_bh(&queue->fastopenq.lock); |
8336886f | 683 | } |
d752a498 | 684 | |
3f421baa ACM |
685 | out: |
686 | release_sock(sk); | |
06669ea3 | 687 | if (newsk && mem_cgroup_sockets_enabled) { |
d752a498 SB |
688 | int amt; |
689 | ||
690 | /* atomically get the memory usage, set and charge the | |
06669ea3 | 691 | * newsk->sk_memcg. |
d752a498 SB |
692 | */ |
693 | lock_sock(newsk); | |
694 | ||
06669ea3 ED |
695 | /* The socket has not been accepted yet, no need to look at |
696 | * newsk->sk_wmem_queued. | |
d752a498 SB |
697 | */ |
698 | amt = sk_mem_pages(newsk->sk_forward_alloc + | |
06669ea3 | 699 | atomic_read(&newsk->sk_rmem_alloc)); |
d752a498 SB |
700 | mem_cgroup_sk_alloc(newsk); |
701 | if (newsk->sk_memcg && amt) | |
4b1327be WW |
702 | mem_cgroup_charge_skmem(newsk->sk_memcg, amt, |
703 | GFP_KERNEL | __GFP_NOFAIL); | |
d752a498 SB |
704 | |
705 | release_sock(newsk); | |
706 | } | |
8336886f | 707 | if (req) |
13854e5a | 708 | reqsk_put(req); |
3f421baa ACM |
709 | return newsk; |
710 | out_err: | |
711 | newsk = NULL; | |
8336886f | 712 | req = NULL; |
3f421baa ACM |
713 | *err = error; |
714 | goto out; | |
715 | } | |
3f421baa ACM |
716 | EXPORT_SYMBOL(inet_csk_accept); |
717 | ||
718 | /* | |
719 | * Using different timers for retransmit, delayed acks and probes | |
e905a9ed | 720 | * We may wish use just one timer maintaining a list of expire jiffies |
3f421baa ACM |
721 | * to optimize. |
722 | */ | |
723 | void inet_csk_init_xmit_timers(struct sock *sk, | |
59f379f9 KC |
724 | void (*retransmit_handler)(struct timer_list *t), |
725 | void (*delack_handler)(struct timer_list *t), | |
726 | void (*keepalive_handler)(struct timer_list *t)) | |
3f421baa ACM |
727 | { |
728 | struct inet_connection_sock *icsk = inet_csk(sk); | |
729 | ||
59f379f9 KC |
730 | timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0); |
731 | timer_setup(&icsk->icsk_delack_timer, delack_handler, 0); | |
732 | timer_setup(&sk->sk_timer, keepalive_handler, 0); | |
3f421baa ACM |
733 | icsk->icsk_pending = icsk->icsk_ack.pending = 0; |
734 | } | |
3f421baa ACM |
735 | EXPORT_SYMBOL(inet_csk_init_xmit_timers); |
736 | ||
737 | void inet_csk_clear_xmit_timers(struct sock *sk) | |
738 | { | |
739 | struct inet_connection_sock *icsk = inet_csk(sk); | |
740 | ||
b6b6d653 | 741 | icsk->icsk_pending = icsk->icsk_ack.pending = 0; |
3f421baa ACM |
742 | |
743 | sk_stop_timer(sk, &icsk->icsk_retransmit_timer); | |
744 | sk_stop_timer(sk, &icsk->icsk_delack_timer); | |
745 | sk_stop_timer(sk, &sk->sk_timer); | |
746 | } | |
3f421baa ACM |
747 | EXPORT_SYMBOL(inet_csk_clear_xmit_timers); |
748 | ||
749 | void inet_csk_delete_keepalive_timer(struct sock *sk) | |
750 | { | |
751 | sk_stop_timer(sk, &sk->sk_timer); | |
752 | } | |
3f421baa ACM |
753 | EXPORT_SYMBOL(inet_csk_delete_keepalive_timer); |
754 | ||
755 | void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) | |
756 | { | |
757 | sk_reset_timer(sk, &sk->sk_timer, jiffies + len); | |
758 | } | |
3f421baa ACM |
759 | EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); |
760 | ||
e5895bc6 | 761 | struct dst_entry *inet_csk_route_req(const struct sock *sk, |
6bd023f3 | 762 | struct flowi4 *fl4, |
ba3f7f04 | 763 | const struct request_sock *req) |
3f421baa | 764 | { |
3f421baa | 765 | const struct inet_request_sock *ireq = inet_rsk(req); |
8b929ab1 | 766 | struct net *net = read_pnet(&ireq->ireq_net); |
c92e8c02 | 767 | struct ip_options_rcu *opt; |
8b929ab1 | 768 | struct rtable *rt; |
3f421baa | 769 | |
2ab2ddd3 ED |
770 | rcu_read_lock(); |
771 | opt = rcu_dereference(ireq->ireq_opt); | |
06f877d6 | 772 | |
8b929ab1 | 773 | flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, |
e79d9bc7 | 774 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, |
8b929ab1 | 775 | sk->sk_protocol, inet_sk_flowi_flags(sk), |
634fb979 | 776 | (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, |
8b929ab1 | 777 | ireq->ir_loc_addr, ireq->ir_rmt_port, |
e2d118a1 | 778 | htons(ireq->ir_num), sk->sk_uid); |
3df98d79 | 779 | security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); |
6bd023f3 | 780 | rt = ip_route_output_flow(net, fl4, sk); |
b23dd4fe | 781 | if (IS_ERR(rt)) |
857a6e0a | 782 | goto no_route; |
77d5bc7e | 783 | if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) |
857a6e0a | 784 | goto route_err; |
2ab2ddd3 | 785 | rcu_read_unlock(); |
d8d1f30b | 786 | return &rt->dst; |
857a6e0a IJ |
787 | |
788 | route_err: | |
789 | ip_rt_put(rt); | |
790 | no_route: | |
2ab2ddd3 | 791 | rcu_read_unlock(); |
b45386ef | 792 | __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); |
857a6e0a | 793 | return NULL; |
3f421baa | 794 | } |
3f421baa ACM |
795 | EXPORT_SYMBOL_GPL(inet_csk_route_req); |
796 | ||
a2432c4f | 797 | struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, |
77357a95 DM |
798 | struct sock *newsk, |
799 | const struct request_sock *req) | |
800 | { | |
801 | const struct inet_request_sock *ireq = inet_rsk(req); | |
8b929ab1 | 802 | struct net *net = read_pnet(&ireq->ireq_net); |
77357a95 | 803 | struct inet_sock *newinet = inet_sk(newsk); |
1a7b27c9 | 804 | struct ip_options_rcu *opt; |
77357a95 DM |
805 | struct flowi4 *fl4; |
806 | struct rtable *rt; | |
807 | ||
c92e8c02 | 808 | opt = rcu_dereference(ireq->ireq_opt); |
77357a95 | 809 | fl4 = &newinet->cork.fl.u.ip4; |
1a7b27c9 | 810 | |
8b929ab1 | 811 | flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, |
77357a95 DM |
812 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, |
813 | sk->sk_protocol, inet_sk_flowi_flags(sk), | |
634fb979 | 814 | (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, |
8b929ab1 | 815 | ireq->ir_loc_addr, ireq->ir_rmt_port, |
e2d118a1 | 816 | htons(ireq->ir_num), sk->sk_uid); |
3df98d79 | 817 | security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); |
77357a95 DM |
818 | rt = ip_route_output_flow(net, fl4, sk); |
819 | if (IS_ERR(rt)) | |
820 | goto no_route; | |
77d5bc7e | 821 | if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) |
77357a95 DM |
822 | goto route_err; |
823 | return &rt->dst; | |
824 | ||
825 | route_err: | |
826 | ip_rt_put(rt); | |
827 | no_route: | |
b45386ef | 828 | __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); |
77357a95 DM |
829 | return NULL; |
830 | } | |
831 | EXPORT_SYMBOL_GPL(inet_csk_route_child_sock); | |
832 | ||
0c3d79bc | 833 | /* Decide when to expire the request and when to resend SYN-ACK */ |
a594920f KI |
834 | static void syn_ack_recalc(struct request_sock *req, |
835 | const int max_syn_ack_retries, | |
836 | const u8 rskq_defer_accept, | |
837 | int *expire, int *resend) | |
0c3d79bc JA |
838 | { |
839 | if (!rskq_defer_accept) { | |
a594920f | 840 | *expire = req->num_timeout >= max_syn_ack_retries; |
0c3d79bc JA |
841 | *resend = 1; |
842 | return; | |
843 | } | |
a594920f KI |
844 | *expire = req->num_timeout >= max_syn_ack_retries && |
845 | (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept); | |
846 | /* Do not resend while waiting for data after ACK, | |
0c3d79bc JA |
847 | * start to resend on end of deferring period to give |
848 | * last chance for data or ACK to create established socket. | |
849 | */ | |
850 | *resend = !inet_rsk(req)->acked || | |
e6c022a4 | 851 | req->num_timeout >= rskq_defer_accept - 1; |
0c3d79bc JA |
852 | } |
853 | ||
1b70e977 | 854 | int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req) |
e6c022a4 | 855 | { |
1a2c6181 | 856 | int err = req->rsk_ops->rtx_syn_ack(parent, req); |
e6c022a4 ED |
857 | |
858 | if (!err) | |
859 | req->num_retrans++; | |
860 | return err; | |
861 | } | |
862 | EXPORT_SYMBOL(inet_rtx_syn_ack); | |
863 | ||
54b92e84 KI |
864 | static struct request_sock *inet_reqsk_clone(struct request_sock *req, |
865 | struct sock *sk) | |
866 | { | |
867 | struct sock *req_sk, *nreq_sk; | |
868 | struct request_sock *nreq; | |
869 | ||
870 | nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN); | |
871 | if (!nreq) { | |
55d444b3 KI |
872 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); |
873 | ||
54b92e84 KI |
874 | /* paired with refcount_inc_not_zero() in reuseport_migrate_sock() */ |
875 | sock_put(sk); | |
876 | return NULL; | |
877 | } | |
878 | ||
879 | req_sk = req_to_sk(req); | |
880 | nreq_sk = req_to_sk(nreq); | |
881 | ||
882 | memcpy(nreq_sk, req_sk, | |
883 | offsetof(struct sock, sk_dontcopy_begin)); | |
884 | memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end, | |
885 | req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end)); | |
886 | ||
887 | sk_node_init(&nreq_sk->sk_node); | |
888 | nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping; | |
a9418924 | 889 | #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING |
54b92e84 KI |
890 | nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping; |
891 | #endif | |
892 | nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu; | |
893 | ||
894 | nreq->rsk_listener = sk; | |
895 | ||
896 | /* We need not acquire fastopenq->lock | |
897 | * because the child socket is locked in inet_csk_listen_stop(). | |
898 | */ | |
899 | if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(nreq)->tfo_listener) | |
900 | rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq); | |
901 | ||
902 | return nreq; | |
903 | } | |
904 | ||
c905dee6 KI |
905 | static void reqsk_queue_migrated(struct request_sock_queue *queue, |
906 | const struct request_sock *req) | |
907 | { | |
908 | if (req->num_timeout == 0) | |
909 | atomic_inc(&queue->young); | |
910 | atomic_inc(&queue->qlen); | |
911 | } | |
912 | ||
54b92e84 KI |
913 | static void reqsk_migrate_reset(struct request_sock *req) |
914 | { | |
c905dee6 | 915 | req->saved_syn = NULL; |
54b92e84 KI |
916 | #if IS_ENABLED(CONFIG_IPV6) |
917 | inet_rsk(req)->ipv6_opt = NULL; | |
c905dee6 KI |
918 | inet_rsk(req)->pktopts = NULL; |
919 | #else | |
920 | inet_rsk(req)->ireq_opt = NULL; | |
54b92e84 KI |
921 | #endif |
922 | } | |
923 | ||
079096f1 | 924 | /* return true if req was found in the ehash table */ |
8b5e07d7 | 925 | static bool reqsk_queue_unlink(struct request_sock *req) |
b357a364 | 926 | { |
08eaef90 | 927 | struct sock *sk = req_to_sk(req); |
5e0724d0 | 928 | bool found = false; |
b357a364 | 929 | |
08eaef90 | 930 | if (sk_hashed(sk)) { |
429e42c1 KI |
931 | struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk); |
932 | spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash); | |
b357a364 | 933 | |
5e0724d0 | 934 | spin_lock(lock); |
08eaef90 | 935 | found = __sk_nulls_del_node_init_rcu(sk); |
5e0724d0 ED |
936 | spin_unlock(lock); |
937 | } | |
83fccfc3 | 938 | if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer)) |
b357a364 ED |
939 | reqsk_put(req); |
940 | return found; | |
941 | } | |
942 | ||
7233da86 | 943 | bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) |
b357a364 | 944 | { |
7233da86 AO |
945 | bool unlinked = reqsk_queue_unlink(req); |
946 | ||
947 | if (unlinked) { | |
b357a364 ED |
948 | reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); |
949 | reqsk_put(req); | |
950 | } | |
7233da86 | 951 | return unlinked; |
b357a364 ED |
952 | } |
953 | EXPORT_SYMBOL(inet_csk_reqsk_queue_drop); | |
954 | ||
f03f2e15 ED |
955 | void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req) |
956 | { | |
957 | inet_csk_reqsk_queue_drop(sk, req); | |
958 | reqsk_put(req); | |
959 | } | |
960 | EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put); | |
961 | ||
59f379f9 | 962 | static void reqsk_timer_handler(struct timer_list *t) |
a019d6fe | 963 | { |
59f379f9 | 964 | struct request_sock *req = from_timer(req, t, rsk_timer); |
c905dee6 | 965 | struct request_sock *nreq = NULL, *oreq = req; |
fa76ce73 | 966 | struct sock *sk_listener = req->rsk_listener; |
c905dee6 KI |
967 | struct inet_connection_sock *icsk; |
968 | struct request_sock_queue *queue; | |
969 | struct net *net; | |
a594920f | 970 | int max_syn_ack_retries, qlen, expire = 0, resend = 0; |
a019d6fe | 971 | |
c905dee6 KI |
972 | if (inet_sk_state_load(sk_listener) != TCP_LISTEN) { |
973 | struct sock *nsk; | |
974 | ||
975 | nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL); | |
976 | if (!nsk) | |
977 | goto drop; | |
978 | ||
979 | nreq = inet_reqsk_clone(req, nsk); | |
980 | if (!nreq) | |
981 | goto drop; | |
982 | ||
983 | /* The new timer for the cloned req can decrease the 2 | |
984 | * by calling inet_csk_reqsk_queue_drop_and_put(), so | |
985 | * hold another count to prevent use-after-free and | |
986 | * call reqsk_put() just before return. | |
987 | */ | |
988 | refcount_set(&nreq->rsk_refcnt, 2 + 1); | |
989 | timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED); | |
990 | reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req); | |
991 | ||
992 | req = nreq; | |
993 | sk_listener = nsk; | |
994 | } | |
a019d6fe | 995 | |
c905dee6 KI |
996 | icsk = inet_csk(sk_listener); |
997 | net = sock_net(sk_listener); | |
20a3b1c0 KI |
998 | max_syn_ack_retries = icsk->icsk_syn_retries ? : |
999 | READ_ONCE(net->ipv4.sysctl_tcp_synack_retries); | |
a019d6fe ACM |
1000 | /* Normally all the openreqs are young and become mature |
1001 | * (i.e. converted to established socket) for first timeout. | |
fd4f2cea | 1002 | * If synack was not acknowledged for 1 second, it means |
a019d6fe ACM |
1003 | * one of the following things: synack was lost, ack was lost, |
1004 | * rtt is high or nobody planned to ack (i.e. synflood). | |
1005 | * When server is a bit loaded, queue is populated with old | |
1006 | * open requests, reducing effective size of queue. | |
1007 | * When server is well loaded, queue size reduces to zero | |
1008 | * after several minutes of work. It is not synflood, | |
1009 | * it is normal operation. The solution is pruning | |
1010 | * too old entries overriding normal timeout, when | |
1011 | * situation becomes dangerous. | |
1012 | * | |
1013 | * Essentially, we reserve half of room for young | |
1014 | * embrions; and abort old ones without pity, if old | |
1015 | * ones are about to clog our table. | |
1016 | */ | |
c905dee6 | 1017 | queue = &icsk->icsk_accept_queue; |
aac065c5 | 1018 | qlen = reqsk_queue_len(queue); |
099ecf59 | 1019 | if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) { |
aac065c5 | 1020 | int young = reqsk_queue_len_young(queue) << 1; |
a019d6fe | 1021 | |
a594920f | 1022 | while (max_syn_ack_retries > 2) { |
2b41fab7 | 1023 | if (qlen < young) |
a019d6fe | 1024 | break; |
a594920f | 1025 | max_syn_ack_retries--; |
a019d6fe ACM |
1026 | young <<= 1; |
1027 | } | |
1028 | } | |
a594920f | 1029 | syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept), |
fa76ce73 | 1030 | &expire, &resend); |
42cb80a2 | 1031 | req->rsk_ops->syn_ack_timeout(req); |
fa76ce73 ED |
1032 | if (!expire && |
1033 | (!resend || | |
1034 | !inet_rtx_syn_ack(sk_listener, req) || | |
1035 | inet_rsk(req)->acked)) { | |
fa76ce73 | 1036 | if (req->num_timeout++ == 0) |
aac065c5 | 1037 | atomic_dec(&queue->young); |
5903123f | 1038 | mod_timer(&req->rsk_timer, jiffies + reqsk_timeout(req, TCP_RTO_MAX)); |
c905dee6 KI |
1039 | |
1040 | if (!nreq) | |
1041 | return; | |
1042 | ||
1043 | if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) { | |
1044 | /* delete timer */ | |
1045 | inet_csk_reqsk_queue_drop(sk_listener, nreq); | |
55d444b3 | 1046 | goto no_ownership; |
c905dee6 KI |
1047 | } |
1048 | ||
55d444b3 | 1049 | __NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQSUCCESS); |
c905dee6 KI |
1050 | reqsk_migrate_reset(oreq); |
1051 | reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq); | |
1052 | reqsk_put(oreq); | |
1053 | ||
1054 | reqsk_put(nreq); | |
fa76ce73 ED |
1055 | return; |
1056 | } | |
c905dee6 | 1057 | |
c905dee6 KI |
1058 | /* Even if we can clone the req, we may need not retransmit any more |
1059 | * SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another | |
1060 | * CPU may win the "own_req" race so that inet_ehash_insert() fails. | |
1061 | */ | |
1062 | if (nreq) { | |
55d444b3 KI |
1063 | __NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQFAILURE); |
1064 | no_ownership: | |
c905dee6 KI |
1065 | reqsk_migrate_reset(nreq); |
1066 | reqsk_queue_removed(queue, nreq); | |
1067 | __reqsk_free(nreq); | |
1068 | } | |
1069 | ||
55d444b3 | 1070 | drop: |
c905dee6 | 1071 | inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq); |
fa76ce73 | 1072 | } |
ec0a1966 | 1073 | |
079096f1 ED |
1074 | static void reqsk_queue_hash_req(struct request_sock *req, |
1075 | unsigned long timeout) | |
fa76ce73 | 1076 | { |
59f379f9 | 1077 | timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED); |
f3438bc7 | 1078 | mod_timer(&req->rsk_timer, jiffies + timeout); |
29c68526 | 1079 | |
01770a16 | 1080 | inet_ehash_insert(req_to_sk(req), NULL, NULL); |
fa76ce73 ED |
1081 | /* before letting lookups find us, make sure all req fields |
1082 | * are committed to memory and refcnt initialized. | |
1083 | */ | |
1084 | smp_wmb(); | |
41c6d650 | 1085 | refcount_set(&req->rsk_refcnt, 2 + 1); |
079096f1 | 1086 | } |
a019d6fe | 1087 | |
079096f1 ED |
1088 | void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, |
1089 | unsigned long timeout) | |
1090 | { | |
1091 | reqsk_queue_hash_req(req, timeout); | |
1092 | inet_csk_reqsk_queue_added(sk); | |
a019d6fe | 1093 | } |
079096f1 | 1094 | EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add); |
a019d6fe | 1095 | |
13230593 MM |
1096 | static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk, |
1097 | const gfp_t priority) | |
1098 | { | |
1099 | struct inet_connection_sock *icsk = inet_csk(newsk); | |
1100 | ||
1101 | if (!icsk->icsk_ulp_ops) | |
1102 | return; | |
1103 | ||
1104 | if (icsk->icsk_ulp_ops->clone) | |
1105 | icsk->icsk_ulp_ops->clone(req, newsk, priority); | |
1106 | } | |
1107 | ||
e56c57d0 ED |
1108 | /** |
1109 | * inet_csk_clone_lock - clone an inet socket, and lock its clone | |
1110 | * @sk: the socket to clone | |
1111 | * @req: request_sock | |
1112 | * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) | |
1113 | * | |
1114 | * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) | |
1115 | */ | |
1116 | struct sock *inet_csk_clone_lock(const struct sock *sk, | |
1117 | const struct request_sock *req, | |
1118 | const gfp_t priority) | |
9f1d2604 | 1119 | { |
e56c57d0 | 1120 | struct sock *newsk = sk_clone_lock(sk, priority); |
9f1d2604 | 1121 | |
00db4124 | 1122 | if (newsk) { |
9f1d2604 ACM |
1123 | struct inet_connection_sock *newicsk = inet_csk(newsk); |
1124 | ||
563e0bb0 | 1125 | inet_sk_set_state(newsk, TCP_SYN_RECV); |
9f1d2604 | 1126 | newicsk->icsk_bind_hash = NULL; |
28044fc1 | 1127 | newicsk->icsk_bind2_hash = NULL; |
9f1d2604 | 1128 | |
634fb979 | 1129 | inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port; |
b44084c2 ED |
1130 | inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num; |
1131 | inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num); | |
9f1d2604 | 1132 | |
85017869 ED |
1133 | /* listeners have SOCK_RCU_FREE, not the children */ |
1134 | sock_reset_flag(newsk, SOCK_RCU_FREE); | |
1135 | ||
657831ff ED |
1136 | inet_sk(newsk)->mc_list = NULL; |
1137 | ||
84f39b08 | 1138 | newsk->sk_mark = inet_rsk(req)->ir_mark; |
33cf7c90 ED |
1139 | atomic64_set(&newsk->sk_cookie, |
1140 | atomic64_read(&inet_rsk(req)->ir_cookie)); | |
84f39b08 | 1141 | |
9f1d2604 | 1142 | newicsk->icsk_retransmits = 0; |
6687e988 ACM |
1143 | newicsk->icsk_backoff = 0; |
1144 | newicsk->icsk_probes_out = 0; | |
9d9b1ee0 | 1145 | newicsk->icsk_probes_tstamp = 0; |
9f1d2604 ACM |
1146 | |
1147 | /* Deinitialize accept_queue to trap illegal accesses. */ | |
1148 | memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue)); | |
4237c75c | 1149 | |
13230593 MM |
1150 | inet_clone_ulp(req, newsk, priority); |
1151 | ||
4237c75c | 1152 | security_inet_csk_clone(newsk, req); |
9f1d2604 ACM |
1153 | } |
1154 | return newsk; | |
1155 | } | |
e56c57d0 | 1156 | EXPORT_SYMBOL_GPL(inet_csk_clone_lock); |
a019d6fe ACM |
1157 | |
1158 | /* | |
1159 | * At this point, there should be no process reference to this | |
1160 | * socket, and thus no user references at all. Therefore we | |
1161 | * can assume the socket waitqueue is inactive and nobody will | |
1162 | * try to jump onto it. | |
1163 | */ | |
1164 | void inet_csk_destroy_sock(struct sock *sk) | |
1165 | { | |
547b792c IJ |
1166 | WARN_ON(sk->sk_state != TCP_CLOSE); |
1167 | WARN_ON(!sock_flag(sk, SOCK_DEAD)); | |
a019d6fe ACM |
1168 | |
1169 | /* It cannot be in hash table! */ | |
547b792c | 1170 | WARN_ON(!sk_unhashed(sk)); |
a019d6fe | 1171 | |
c720c7e8 ED |
1172 | /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */ |
1173 | WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash); | |
a019d6fe ACM |
1174 | |
1175 | sk->sk_prot->destroy(sk); | |
1176 | ||
1177 | sk_stream_kill_queues(sk); | |
1178 | ||
1179 | xfrm_sk_free_policy(sk); | |
1180 | ||
1181 | sk_refcnt_debug_release(sk); | |
1182 | ||
19757ceb | 1183 | this_cpu_dec(*sk->sk_prot->orphan_count); |
c2a2efbb | 1184 | |
a019d6fe ACM |
1185 | sock_put(sk); |
1186 | } | |
a019d6fe ACM |
1187 | EXPORT_SYMBOL(inet_csk_destroy_sock); |
1188 | ||
e337e24d CP |
1189 | /* This function allows to force a closure of a socket after the call to |
1190 | * tcp/dccp_create_openreq_child(). | |
1191 | */ | |
1192 | void inet_csk_prepare_forced_close(struct sock *sk) | |
c10cb5fc | 1193 | __releases(&sk->sk_lock.slock) |
e337e24d CP |
1194 | { |
1195 | /* sk_clone_lock locked the socket and set refcnt to 2 */ | |
1196 | bh_unlock_sock(sk); | |
1197 | sock_put(sk); | |
2f8a397d | 1198 | inet_csk_prepare_for_destroy_sock(sk); |
6761893e | 1199 | inet_sk(sk)->inet_num = 0; |
e337e24d CP |
1200 | } |
1201 | EXPORT_SYMBOL(inet_csk_prepare_forced_close); | |
1202 | ||
2c02d41d PA |
1203 | static int inet_ulp_can_listen(const struct sock *sk) |
1204 | { | |
1205 | const struct inet_connection_sock *icsk = inet_csk(sk); | |
1206 | ||
1207 | if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone) | |
1208 | return -EINVAL; | |
1209 | ||
1210 | return 0; | |
1211 | } | |
1212 | ||
e7049395 | 1213 | int inet_csk_listen_start(struct sock *sk) |
a019d6fe | 1214 | { |
a019d6fe | 1215 | struct inet_connection_sock *icsk = inet_csk(sk); |
10cbc8f1 | 1216 | struct inet_sock *inet = inet_sk(sk); |
7a7160ed | 1217 | int err; |
a019d6fe | 1218 | |
2c02d41d PA |
1219 | err = inet_ulp_can_listen(sk); |
1220 | if (unlikely(err)) | |
1221 | return err; | |
1222 | ||
ef547f2a | 1223 | reqsk_queue_alloc(&icsk->icsk_accept_queue); |
a019d6fe | 1224 | |
a019d6fe ACM |
1225 | sk->sk_ack_backlog = 0; |
1226 | inet_csk_delack_init(sk); | |
1227 | ||
1228 | /* There is race window here: we announce ourselves listening, | |
1229 | * but this transition is still not validated by get_port(). | |
1230 | * It is OK, because this socket enters to hash table only | |
1231 | * after validation is complete. | |
1232 | */ | |
563e0bb0 | 1233 | inet_sk_state_store(sk, TCP_LISTEN); |
7a7160ed KI |
1234 | err = sk->sk_prot->get_port(sk, inet->inet_num); |
1235 | if (!err) { | |
c720c7e8 | 1236 | inet->inet_sport = htons(inet->inet_num); |
a019d6fe ACM |
1237 | |
1238 | sk_dst_reset(sk); | |
086c653f | 1239 | err = sk->sk_prot->hash(sk); |
a019d6fe | 1240 | |
086c653f CG |
1241 | if (likely(!err)) |
1242 | return 0; | |
a019d6fe ACM |
1243 | } |
1244 | ||
563e0bb0 | 1245 | inet_sk_set_state(sk, TCP_CLOSE); |
086c653f | 1246 | return err; |
a019d6fe | 1247 | } |
a019d6fe ACM |
1248 | EXPORT_SYMBOL_GPL(inet_csk_listen_start); |
1249 | ||
ebb516af ED |
1250 | static void inet_child_forget(struct sock *sk, struct request_sock *req, |
1251 | struct sock *child) | |
1252 | { | |
1253 | sk->sk_prot->disconnect(child, O_NONBLOCK); | |
1254 | ||
1255 | sock_orphan(child); | |
1256 | ||
19757ceb | 1257 | this_cpu_inc(*sk->sk_prot->orphan_count); |
ebb516af ED |
1258 | |
1259 | if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) { | |
d983ea6f | 1260 | BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req); |
ebb516af ED |
1261 | BUG_ON(sk != req->rsk_listener); |
1262 | ||
1263 | /* Paranoid, to prevent race condition if | |
1264 | * an inbound pkt destined for child is | |
1265 | * blocked by sock lock in tcp_v4_rcv(). | |
1266 | * Also to satisfy an assertion in | |
1267 | * tcp_v4_destroy_sock(). | |
1268 | */ | |
d983ea6f | 1269 | RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL); |
ebb516af ED |
1270 | } |
1271 | inet_csk_destroy_sock(child); | |
ebb516af ED |
1272 | } |
1273 | ||
7716682c ED |
1274 | struct sock *inet_csk_reqsk_queue_add(struct sock *sk, |
1275 | struct request_sock *req, | |
1276 | struct sock *child) | |
ebb516af ED |
1277 | { |
1278 | struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; | |
1279 | ||
1280 | spin_lock(&queue->rskq_lock); | |
1281 | if (unlikely(sk->sk_state != TCP_LISTEN)) { | |
1282 | inet_child_forget(sk, req, child); | |
7716682c | 1283 | child = NULL; |
ebb516af ED |
1284 | } else { |
1285 | req->sk = child; | |
1286 | req->dl_next = NULL; | |
1287 | if (queue->rskq_accept_head == NULL) | |
60b173ca | 1288 | WRITE_ONCE(queue->rskq_accept_head, req); |
ebb516af ED |
1289 | else |
1290 | queue->rskq_accept_tail->dl_next = req; | |
1291 | queue->rskq_accept_tail = req; | |
1292 | sk_acceptq_added(sk); | |
1293 | } | |
1294 | spin_unlock(&queue->rskq_lock); | |
7716682c | 1295 | return child; |
ebb516af ED |
1296 | } |
1297 | EXPORT_SYMBOL(inet_csk_reqsk_queue_add); | |
1298 | ||
5e0724d0 ED |
1299 | struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, |
1300 | struct request_sock *req, bool own_req) | |
1301 | { | |
1302 | if (own_req) { | |
d4f2c86b KI |
1303 | inet_csk_reqsk_queue_drop(req->rsk_listener, req); |
1304 | reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req); | |
1305 | ||
1306 | if (sk != req->rsk_listener) { | |
1307 | /* another listening sk has been selected, | |
1308 | * migrate the req to it. | |
1309 | */ | |
1310 | struct request_sock *nreq; | |
1311 | ||
1312 | /* hold a refcnt for the nreq->rsk_listener | |
1313 | * which is assigned in inet_reqsk_clone() | |
1314 | */ | |
1315 | sock_hold(sk); | |
1316 | nreq = inet_reqsk_clone(req, sk); | |
1317 | if (!nreq) { | |
1318 | inet_child_forget(sk, req, child); | |
1319 | goto child_put; | |
1320 | } | |
1321 | ||
1322 | refcount_set(&nreq->rsk_refcnt, 1); | |
1323 | if (inet_csk_reqsk_queue_add(sk, nreq, child)) { | |
55d444b3 | 1324 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQSUCCESS); |
d4f2c86b KI |
1325 | reqsk_migrate_reset(req); |
1326 | reqsk_put(req); | |
1327 | return child; | |
1328 | } | |
1329 | ||
55d444b3 | 1330 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); |
d4f2c86b KI |
1331 | reqsk_migrate_reset(nreq); |
1332 | __reqsk_free(nreq); | |
1333 | } else if (inet_csk_reqsk_queue_add(sk, req, child)) { | |
7716682c | 1334 | return child; |
d4f2c86b | 1335 | } |
5e0724d0 ED |
1336 | } |
1337 | /* Too bad, another child took ownership of the request, undo. */ | |
d4f2c86b | 1338 | child_put: |
5e0724d0 ED |
1339 | bh_unlock_sock(child); |
1340 | sock_put(child); | |
1341 | return NULL; | |
1342 | } | |
1343 | EXPORT_SYMBOL(inet_csk_complete_hashdance); | |
1344 | ||
a019d6fe ACM |
1345 | /* |
1346 | * This routine closes sockets which have been at least partially | |
1347 | * opened, but not yet accepted. | |
1348 | */ | |
1349 | void inet_csk_listen_stop(struct sock *sk) | |
1350 | { | |
1351 | struct inet_connection_sock *icsk = inet_csk(sk); | |
8336886f | 1352 | struct request_sock_queue *queue = &icsk->icsk_accept_queue; |
fff1f300 | 1353 | struct request_sock *next, *req; |
a019d6fe ACM |
1354 | |
1355 | /* Following specs, it would be better either to send FIN | |
1356 | * (and enter FIN-WAIT-1, it is normal close) | |
1357 | * or to send active reset (abort). | |
1358 | * Certainly, it is pretty dangerous while synflood, but it is | |
1359 | * bad justification for our negligence 8) | |
1360 | * To be honest, we are not able to make either | |
1361 | * of the variants now. --ANK | |
1362 | */ | |
fff1f300 | 1363 | while ((req = reqsk_queue_remove(queue, sk)) != NULL) { |
54b92e84 KI |
1364 | struct sock *child = req->sk, *nsk; |
1365 | struct request_sock *nreq; | |
a019d6fe | 1366 | |
a019d6fe ACM |
1367 | local_bh_disable(); |
1368 | bh_lock_sock(child); | |
547b792c | 1369 | WARN_ON(sock_owned_by_user(child)); |
a019d6fe ACM |
1370 | sock_hold(child); |
1371 | ||
54b92e84 KI |
1372 | nsk = reuseport_migrate_sock(sk, child, NULL); |
1373 | if (nsk) { | |
1374 | nreq = inet_reqsk_clone(req, nsk); | |
1375 | if (nreq) { | |
1376 | refcount_set(&nreq->rsk_refcnt, 1); | |
1377 | ||
1378 | if (inet_csk_reqsk_queue_add(nsk, nreq, child)) { | |
55d444b3 KI |
1379 | __NET_INC_STATS(sock_net(nsk), |
1380 | LINUX_MIB_TCPMIGRATEREQSUCCESS); | |
54b92e84 KI |
1381 | reqsk_migrate_reset(req); |
1382 | } else { | |
55d444b3 KI |
1383 | __NET_INC_STATS(sock_net(nsk), |
1384 | LINUX_MIB_TCPMIGRATEREQFAILURE); | |
54b92e84 KI |
1385 | reqsk_migrate_reset(nreq); |
1386 | __reqsk_free(nreq); | |
1387 | } | |
1388 | ||
1389 | /* inet_csk_reqsk_queue_add() has already | |
1390 | * called inet_child_forget() on failure case. | |
1391 | */ | |
1392 | goto skip_child_forget; | |
1393 | } | |
1394 | } | |
1395 | ||
ebb516af | 1396 | inet_child_forget(sk, req, child); |
54b92e84 | 1397 | skip_child_forget: |
da8ab578 | 1398 | reqsk_put(req); |
a019d6fe ACM |
1399 | bh_unlock_sock(child); |
1400 | local_bh_enable(); | |
1401 | sock_put(child); | |
1402 | ||
92d6f176 | 1403 | cond_resched(); |
a019d6fe | 1404 | } |
0536fcc0 | 1405 | if (queue->fastopenq.rskq_rst_head) { |
8336886f | 1406 | /* Free all the reqs queued in rskq_rst_head. */ |
0536fcc0 | 1407 | spin_lock_bh(&queue->fastopenq.lock); |
fff1f300 | 1408 | req = queue->fastopenq.rskq_rst_head; |
0536fcc0 ED |
1409 | queue->fastopenq.rskq_rst_head = NULL; |
1410 | spin_unlock_bh(&queue->fastopenq.lock); | |
fff1f300 ED |
1411 | while (req != NULL) { |
1412 | next = req->dl_next; | |
13854e5a | 1413 | reqsk_put(req); |
fff1f300 | 1414 | req = next; |
8336886f JC |
1415 | } |
1416 | } | |
ebb516af | 1417 | WARN_ON_ONCE(sk->sk_ack_backlog); |
a019d6fe | 1418 | } |
a019d6fe | 1419 | EXPORT_SYMBOL_GPL(inet_csk_listen_stop); |
af05dc93 ACM |
1420 | |
1421 | void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) | |
1422 | { | |
1423 | struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; | |
1424 | const struct inet_sock *inet = inet_sk(sk); | |
1425 | ||
1426 | sin->sin_family = AF_INET; | |
c720c7e8 ED |
1427 | sin->sin_addr.s_addr = inet->inet_daddr; |
1428 | sin->sin_port = inet->inet_dport; | |
af05dc93 | 1429 | } |
af05dc93 | 1430 | EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); |
c4d93909 | 1431 | |
80d0a69f DM |
1432 | static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl) |
1433 | { | |
5abf7f7e ED |
1434 | const struct inet_sock *inet = inet_sk(sk); |
1435 | const struct ip_options_rcu *inet_opt; | |
80d0a69f DM |
1436 | __be32 daddr = inet->inet_daddr; |
1437 | struct flowi4 *fl4; | |
1438 | struct rtable *rt; | |
1439 | ||
1440 | rcu_read_lock(); | |
1441 | inet_opt = rcu_dereference(inet->inet_opt); | |
1442 | if (inet_opt && inet_opt->opt.srr) | |
1443 | daddr = inet_opt->opt.faddr; | |
1444 | fl4 = &fl->u.ip4; | |
1445 | rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, | |
1446 | inet->inet_saddr, inet->inet_dport, | |
1447 | inet->inet_sport, sk->sk_protocol, | |
1448 | RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); | |
1449 | if (IS_ERR(rt)) | |
1450 | rt = NULL; | |
1451 | if (rt) | |
1452 | sk_setup_caps(sk, &rt->dst); | |
1453 | rcu_read_unlock(); | |
1454 | ||
1455 | return &rt->dst; | |
1456 | } | |
1457 | ||
1458 | struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu) | |
1459 | { | |
1460 | struct dst_entry *dst = __sk_dst_check(sk, 0); | |
1461 | struct inet_sock *inet = inet_sk(sk); | |
1462 | ||
1463 | if (!dst) { | |
1464 | dst = inet_csk_rebuild_route(sk, &inet->cork.fl); | |
1465 | if (!dst) | |
1466 | goto out; | |
1467 | } | |
bd085ef6 | 1468 | dst->ops->update_pmtu(dst, sk, NULL, mtu, true); |
80d0a69f DM |
1469 | |
1470 | dst = __sk_dst_check(sk, 0); | |
1471 | if (!dst) | |
1472 | dst = inet_csk_rebuild_route(sk, &inet->cork.fl); | |
1473 | out: | |
1474 | return dst; | |
1475 | } | |
1476 | EXPORT_SYMBOL_GPL(inet_csk_update_pmtu); |