Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
3f421baa ACM |
2 | /* |
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
4 | * operating system. INET is implemented using the BSD Socket | |
5 | * interface as the means of communication with the user level. | |
6 | * | |
7 | * Support for INET connection oriented protocols. | |
8 | * | |
9 | * Authors: See the TCP sources | |
3f421baa ACM |
10 | */ |
11 | ||
3f421baa ACM |
12 | #include <linux/module.h> |
13 | #include <linux/jhash.h> | |
14 | ||
15 | #include <net/inet_connection_sock.h> | |
16 | #include <net/inet_hashtables.h> | |
17 | #include <net/inet_timewait_sock.h> | |
18 | #include <net/ip.h> | |
19 | #include <net/route.h> | |
20 | #include <net/tcp_states.h> | |
a019d6fe | 21 | #include <net/xfrm.h> |
fa76ce73 | 22 | #include <net/tcp.h> |
c125e80b | 23 | #include <net/sock_reuseport.h> |
9691724e | 24 | #include <net/addrconf.h> |
3f421baa | 25 | |
fe38d2a1 | 26 | #if IS_ENABLED(CONFIG_IPV6) |
88d7fcfa MKL |
27 | /* match_sk*_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses |
28 | * if IPv6 only, and any IPv4 addresses | |
29 | * if not IPv6 only | |
30 | * match_sk*_wildcard == false: addresses must be exactly the same, i.e. | |
31 | * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY, | |
32 | * and 0.0.0.0 equals to 0.0.0.0 only | |
fe38d2a1 | 33 | */ |
7016e062 JP |
34 | static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6, |
35 | const struct in6_addr *sk2_rcv_saddr6, | |
36 | __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, | |
37 | bool sk1_ipv6only, bool sk2_ipv6only, | |
88d7fcfa MKL |
38 | bool match_sk1_wildcard, |
39 | bool match_sk2_wildcard) | |
fe38d2a1 | 40 | { |
637bc8bb | 41 | int addr_type = ipv6_addr_type(sk1_rcv_saddr6); |
fe38d2a1 JB |
42 | int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; |
43 | ||
44 | /* if both are mapped, treat as IPv4 */ | |
45 | if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) { | |
46 | if (!sk2_ipv6only) { | |
637bc8bb | 47 | if (sk1_rcv_saddr == sk2_rcv_saddr) |
7016e062 | 48 | return true; |
88d7fcfa MKL |
49 | return (match_sk1_wildcard && !sk1_rcv_saddr) || |
50 | (match_sk2_wildcard && !sk2_rcv_saddr); | |
fe38d2a1 | 51 | } |
7016e062 | 52 | return false; |
fe38d2a1 JB |
53 | } |
54 | ||
55 | if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY) | |
7016e062 | 56 | return true; |
fe38d2a1 | 57 | |
88d7fcfa | 58 | if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard && |
fe38d2a1 | 59 | !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) |
7016e062 | 60 | return true; |
fe38d2a1 | 61 | |
88d7fcfa | 62 | if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard && |
637bc8bb | 63 | !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED)) |
7016e062 | 64 | return true; |
fe38d2a1 JB |
65 | |
66 | if (sk2_rcv_saddr6 && | |
637bc8bb | 67 | ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6)) |
7016e062 | 68 | return true; |
fe38d2a1 | 69 | |
7016e062 | 70 | return false; |
fe38d2a1 JB |
71 | } |
72 | #endif | |
73 | ||
88d7fcfa MKL |
74 | /* match_sk*_wildcard == true: 0.0.0.0 equals to any IPv4 addresses |
75 | * match_sk*_wildcard == false: addresses must be exactly the same, i.e. | |
76 | * 0.0.0.0 only equals to 0.0.0.0 | |
fe38d2a1 | 77 | */ |
7016e062 | 78 | static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, |
88d7fcfa MKL |
79 | bool sk2_ipv6only, bool match_sk1_wildcard, |
80 | bool match_sk2_wildcard) | |
fe38d2a1 | 81 | { |
637bc8bb JB |
82 | if (!sk2_ipv6only) { |
83 | if (sk1_rcv_saddr == sk2_rcv_saddr) | |
7016e062 | 84 | return true; |
88d7fcfa MKL |
85 | return (match_sk1_wildcard && !sk1_rcv_saddr) || |
86 | (match_sk2_wildcard && !sk2_rcv_saddr); | |
fe38d2a1 | 87 | } |
7016e062 | 88 | return false; |
fe38d2a1 JB |
89 | } |
90 | ||
7016e062 JP |
91 | bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2, |
92 | bool match_wildcard) | |
fe38d2a1 JB |
93 | { |
94 | #if IS_ENABLED(CONFIG_IPV6) | |
95 | if (sk->sk_family == AF_INET6) | |
637bc8bb | 96 | return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr, |
319554f2 | 97 | inet6_rcv_saddr(sk2), |
637bc8bb JB |
98 | sk->sk_rcv_saddr, |
99 | sk2->sk_rcv_saddr, | |
100 | ipv6_only_sock(sk), | |
101 | ipv6_only_sock(sk2), | |
88d7fcfa | 102 | match_wildcard, |
637bc8bb | 103 | match_wildcard); |
fe38d2a1 | 104 | #endif |
637bc8bb | 105 | return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr, |
88d7fcfa MKL |
106 | ipv6_only_sock(sk2), match_wildcard, |
107 | match_wildcard); | |
fe38d2a1 JB |
108 | } |
109 | EXPORT_SYMBOL(inet_rcv_saddr_equal); | |
110 | ||
2dbb9b9e MKL |
111 | bool inet_rcv_saddr_any(const struct sock *sk) |
112 | { | |
113 | #if IS_ENABLED(CONFIG_IPV6) | |
114 | if (sk->sk_family == AF_INET6) | |
115 | return ipv6_addr_any(&sk->sk_v6_rcv_saddr); | |
116 | #endif | |
117 | return !sk->sk_rcv_saddr; | |
118 | } | |
119 | ||
41db7626 ED |
120 | /** |
121 | * inet_sk_get_local_port_range - fetch ephemeral ports range | |
122 | * @sk: socket | |
123 | * @low: pointer to low port | |
124 | * @high: pointer to high port | |
125 | * | |
126 | * Fetch netns port range (/proc/sys/net/ipv4/ip_local_port_range) | |
127 | * Range can be overridden if socket got IP_LOCAL_PORT_RANGE option. | |
128 | * Returns true if IP_LOCAL_PORT_RANGE was set on this socket. | |
129 | */ | |
130 | bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high) | |
91d0b78c | 131 | { |
91d0b78c | 132 | int lo, hi, sk_lo, sk_hi; |
41db7626 | 133 | bool local_range = false; |
d9f28735 | 134 | u32 sk_range; |
91d0b78c | 135 | |
41db7626 | 136 | inet_get_local_port_range(sock_net(sk), &lo, &hi); |
91d0b78c | 137 | |
41db7626 | 138 | sk_range = READ_ONCE(inet_sk(sk)->local_port_range); |
d9f28735 DL |
139 | if (unlikely(sk_range)) { |
140 | sk_lo = sk_range & 0xffff; | |
141 | sk_hi = sk_range >> 16; | |
91d0b78c | 142 | |
d9f28735 DL |
143 | if (lo <= sk_lo && sk_lo <= hi) |
144 | lo = sk_lo; | |
145 | if (lo <= sk_hi && sk_hi <= hi) | |
146 | hi = sk_hi; | |
41db7626 | 147 | local_range = true; |
d9f28735 | 148 | } |
91d0b78c JS |
149 | |
150 | *low = lo; | |
151 | *high = hi; | |
41db7626 | 152 | return local_range; |
91d0b78c JS |
153 | } |
154 | EXPORT_SYMBOL(inet_sk_get_local_port_range); | |
155 | ||
28044fc1 JK |
156 | static bool inet_use_bhash2_on_bind(const struct sock *sk) |
157 | { | |
158 | #if IS_ENABLED(CONFIG_IPV6) | |
159 | if (sk->sk_family == AF_INET6) { | |
ca79d80b | 160 | if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) |
5e07e672 KI |
161 | return false; |
162 | ||
ca79d80b | 163 | if (!ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) |
5e07e672 | 164 | return true; |
28044fc1 JK |
165 | } |
166 | #endif | |
167 | return sk->sk_rcv_saddr != htonl(INADDR_ANY); | |
168 | } | |
169 | ||
170 | static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2, | |
171 | kuid_t sk_uid, bool relax, | |
172 | bool reuseport_cb_ok, bool reuseport_ok) | |
173 | { | |
174 | int bound_dev_if2; | |
175 | ||
176 | if (sk == sk2) | |
177 | return false; | |
178 | ||
179 | bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if); | |
180 | ||
181 | if (!sk->sk_bound_dev_if || !bound_dev_if2 || | |
182 | sk->sk_bound_dev_if == bound_dev_if2) { | |
183 | if (sk->sk_reuse && sk2->sk_reuse && | |
184 | sk2->sk_state != TCP_LISTEN) { | |
185 | if (!relax || (!reuseport_ok && sk->sk_reuseport && | |
186 | sk2->sk_reuseport && reuseport_cb_ok && | |
187 | (sk2->sk_state == TCP_TIME_WAIT || | |
188 | uid_eq(sk_uid, sock_i_uid(sk2))))) | |
189 | return true; | |
190 | } else if (!reuseport_ok || !sk->sk_reuseport || | |
191 | !sk2->sk_reuseport || !reuseport_cb_ok || | |
192 | (sk2->sk_state != TCP_TIME_WAIT && | |
193 | !uid_eq(sk_uid, sock_i_uid(sk2)))) { | |
194 | return true; | |
195 | } | |
196 | } | |
197 | return false; | |
198 | } | |
199 | ||
936a192f KI |
200 | static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2, |
201 | kuid_t sk_uid, bool relax, | |
202 | bool reuseport_cb_ok, bool reuseport_ok) | |
203 | { | |
ea111449 KI |
204 | if (ipv6_only_sock(sk2)) { |
205 | if (sk->sk_family == AF_INET) | |
206 | return false; | |
207 | ||
208 | #if IS_ENABLED(CONFIG_IPV6) | |
209 | if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) | |
210 | return false; | |
211 | #endif | |
212 | } | |
936a192f KI |
213 | |
214 | return inet_bind_conflict(sk, sk2, sk_uid, relax, | |
215 | reuseport_cb_ok, reuseport_ok); | |
216 | } | |
217 | ||
28044fc1 JK |
218 | static bool inet_bhash2_conflict(const struct sock *sk, |
219 | const struct inet_bind2_bucket *tb2, | |
220 | kuid_t sk_uid, | |
221 | bool relax, bool reuseport_cb_ok, | |
222 | bool reuseport_ok) | |
223 | { | |
224 | struct sock *sk2; | |
225 | ||
770041d3 | 226 | sk_for_each_bound(sk2, &tb2->owners) { |
936a192f KI |
227 | if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax, |
228 | reuseport_cb_ok, reuseport_ok)) | |
229 | return true; | |
230 | } | |
28044fc1 | 231 | |
28044fc1 JK |
232 | return false; |
233 | } | |
234 | ||
b82ba728 KI |
235 | #define sk_for_each_bound_bhash(__sk, __tb2, __tb) \ |
236 | hlist_for_each_entry(__tb2, &(__tb)->bhash2, bhash_node) \ | |
9ceebd7a | 237 | sk_for_each_bound((__sk), &(__tb2)->owners) |
b82ba728 | 238 | |
28044fc1 | 239 | /* This should be called only when the tb and tb2 hashbuckets' locks are held */ |
593d1ebe JK |
240 | static int inet_csk_bind_conflict(const struct sock *sk, |
241 | const struct inet_bind_bucket *tb, | |
28044fc1 | 242 | const struct inet_bind2_bucket *tb2, /* may be null */ |
d5a42de8 JK |
243 | bool relax, bool reuseport_ok) |
244 | { | |
593d1ebe | 245 | kuid_t uid = sock_i_uid((struct sock *)sk); |
58655bc0 KI |
246 | struct sock_reuseport *reuseport_cb; |
247 | bool reuseport_cb_ok; | |
248 | struct sock *sk2; | |
3f421baa | 249 | |
333bb73f KI |
250 | rcu_read_lock(); |
251 | reuseport_cb = rcu_dereference(sk->sk_reuseport_cb); | |
252 | /* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */ | |
253 | reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks); | |
254 | rcu_read_unlock(); | |
255 | ||
58655bc0 KI |
256 | /* Conflicts with an existing IPV6_ADDR_ANY (if ipv6) or INADDR_ANY (if |
257 | * ipv4) should have been checked already. We need to do these two | |
258 | * checks separately because their spinlocks have to be acquired/released | |
259 | * independently of each other, to prevent possible deadlocks | |
260 | */ | |
261 | if (inet_use_bhash2_on_bind(sk)) | |
262 | return tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, | |
263 | reuseport_cb_ok, reuseport_ok); | |
264 | ||
265 | /* Unlike other sk lookup places we do not check | |
7477fd2e | 266 | * for sk_net here, since _all_ the socks listed |
28044fc1 JK |
267 | * in tb->owners and tb2->owners list belong |
268 | * to the same net - the one this bucket belongs to. | |
7477fd2e | 269 | */ |
b82ba728 KI |
270 | sk_for_each_bound_bhash(sk2, tb2, tb) { |
271 | if (!inet_bind_conflict(sk, sk2, uid, relax, reuseport_cb_ok, reuseport_ok)) | |
272 | continue; | |
273 | ||
274 | if (inet_rcv_saddr_equal(sk, sk2, true)) | |
275 | return true; | |
276 | } | |
277 | ||
58655bc0 | 278 | return false; |
28044fc1 JK |
279 | } |
280 | ||
281 | /* Determine if there is a bind conflict with an existing IPV6_ADDR_ANY (if ipv6) or | |
282 | * INADDR_ANY (if ipv4) socket. | |
283 | * | |
284 | * Caller must hold bhash hashbucket lock with local bh disabled, to protect | |
285 | * against concurrent binds on the port for addr any | |
286 | */ | |
287 | static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev, | |
288 | bool relax, bool reuseport_ok) | |
289 | { | |
290 | kuid_t uid = sock_i_uid((struct sock *)sk); | |
291 | const struct net *net = sock_net(sk); | |
292 | struct sock_reuseport *reuseport_cb; | |
293 | struct inet_bind_hashbucket *head2; | |
294 | struct inet_bind2_bucket *tb2; | |
d91ef1e1 | 295 | bool conflict = false; |
28044fc1 JK |
296 | bool reuseport_cb_ok; |
297 | ||
298 | rcu_read_lock(); | |
299 | reuseport_cb = rcu_dereference(sk->sk_reuseport_cb); | |
300 | /* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */ | |
301 | reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks); | |
302 | rcu_read_unlock(); | |
303 | ||
304 | head2 = inet_bhash2_addr_any_hashbucket(sk, net, port); | |
305 | ||
306 | spin_lock(&head2->lock); | |
307 | ||
d91ef1e1 KI |
308 | inet_bind_bucket_for_each(tb2, &head2->chain) { |
309 | if (!inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk)) | |
310 | continue; | |
28044fc1 | 311 | |
d91ef1e1 KI |
312 | if (!inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok, reuseport_ok)) |
313 | continue; | |
314 | ||
315 | conflict = true; | |
316 | break; | |
3f421baa | 317 | } |
28044fc1 JK |
318 | |
319 | spin_unlock(&head2->lock); | |
d91ef1e1 KI |
320 | |
321 | return conflict; | |
3f421baa | 322 | } |
971af18b | 323 | |
289141b7 JB |
324 | /* |
325 | * Find an open port number for the socket. Returns with the | |
28044fc1 | 326 | * inet_bind_hashbucket locks held if successful. |
3f421baa | 327 | */ |
289141b7 | 328 | static struct inet_bind_hashbucket * |
28044fc1 JK |
329 | inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret, |
330 | struct inet_bind2_bucket **tb2_ret, | |
331 | struct inet_bind_hashbucket **head2_ret, int *port_ret) | |
3f421baa | 332 | { |
235bd9d2 | 333 | struct inet_hashinfo *hinfo = tcp_get_hashinfo(sk); |
08eaef90 | 334 | int i, low, high, attempt_half, port, l3mdev; |
28044fc1 | 335 | struct inet_bind_hashbucket *head, *head2; |
3b1e0a65 | 336 | struct net *net = sock_net(sk); |
28044fc1 | 337 | struct inet_bind2_bucket *tb2; |
ea8add2b | 338 | struct inet_bind_bucket *tb; |
ea8add2b | 339 | u32 remaining, offset; |
08eaef90 | 340 | bool relax = false; |
3f421baa | 341 | |
3c82a21f | 342 | l3mdev = inet_sk_bound_l3mdev(sk); |
4b01a967 | 343 | ports_exhausted: |
ea8add2b ED |
344 | attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0; |
345 | other_half_scan: | |
91d0b78c | 346 | inet_sk_get_local_port_range(sk, &low, &high); |
ea8add2b ED |
347 | high++; /* [32768, 60999] -> [32768, 61000[ */ |
348 | if (high - low < 4) | |
349 | attempt_half = 0; | |
350 | if (attempt_half) { | |
351 | int half = low + (((high - low) >> 2) << 1); | |
352 | ||
353 | if (attempt_half == 1) | |
354 | high = half; | |
355 | else | |
356 | low = half; | |
357 | } | |
358 | remaining = high - low; | |
359 | if (likely(remaining > 1)) | |
360 | remaining &= ~1U; | |
3f421baa | 361 | |
8032bf12 | 362 | offset = get_random_u32_below(remaining); |
ea8add2b ED |
363 | /* __inet_hash_connect() favors ports having @low parity |
364 | * We do the opposite to not pollute connect() users. | |
365 | */ | |
366 | offset |= 1U; | |
ea8add2b ED |
367 | |
368 | other_parity_scan: | |
369 | port = low + offset; | |
370 | for (i = 0; i < remaining; i += 2, port += 2) { | |
371 | if (unlikely(port >= high)) | |
372 | port -= remaining; | |
373 | if (inet_is_local_reserved_port(net, port)) | |
374 | continue; | |
375 | head = &hinfo->bhash[inet_bhashfn(net, port, | |
376 | hinfo->bhash_size)]; | |
377 | spin_lock_bh(&head->lock); | |
28044fc1 JK |
378 | if (inet_use_bhash2_on_bind(sk)) { |
379 | if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, relax, false)) | |
380 | goto next_port; | |
381 | } | |
382 | ||
383 | head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); | |
384 | spin_lock(&head2->lock); | |
385 | tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); | |
ea8add2b | 386 | inet_bind_bucket_for_each(tb, &head->chain) |
28044fc1 JK |
387 | if (inet_bind_bucket_match(tb, net, port, l3mdev)) { |
388 | if (!inet_csk_bind_conflict(sk, tb, tb2, | |
389 | relax, false)) | |
6cd66616 | 390 | goto success; |
28044fc1 | 391 | spin_unlock(&head2->lock); |
ea8add2b | 392 | goto next_port; |
946f9eb2 | 393 | } |
289141b7 JB |
394 | tb = NULL; |
395 | goto success; | |
ea8add2b ED |
396 | next_port: |
397 | spin_unlock_bh(&head->lock); | |
398 | cond_resched(); | |
399 | } | |
400 | ||
ea8add2b ED |
401 | offset--; |
402 | if (!(offset & 1)) | |
403 | goto other_parity_scan; | |
404 | ||
405 | if (attempt_half == 1) { | |
406 | /* OK we now try the upper half of the range */ | |
407 | attempt_half = 2; | |
408 | goto other_half_scan; | |
409 | } | |
4b01a967 | 410 | |
0db23276 | 411 | if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) { |
4b01a967 KI |
412 | /* We still have a chance to connect to different destinations */ |
413 | relax = true; | |
414 | goto ports_exhausted; | |
415 | } | |
289141b7 JB |
416 | return NULL; |
417 | success: | |
418 | *port_ret = port; | |
419 | *tb_ret = tb; | |
28044fc1 JK |
420 | *tb2_ret = tb2; |
421 | *head2_ret = head2; | |
289141b7 JB |
422 | return head; |
423 | } | |
ea8add2b | 424 | |
637bc8bb JB |
425 | static inline int sk_reuseport_match(struct inet_bind_bucket *tb, |
426 | struct sock *sk) | |
427 | { | |
428 | kuid_t uid = sock_i_uid(sk); | |
429 | ||
430 | if (tb->fastreuseport <= 0) | |
431 | return 0; | |
432 | if (!sk->sk_reuseport) | |
433 | return 0; | |
434 | if (rcu_access_pointer(sk->sk_reuseport_cb)) | |
435 | return 0; | |
436 | if (!uid_eq(tb->fastuid, uid)) | |
437 | return 0; | |
438 | /* We only need to check the rcv_saddr if this tb was once marked | |
439 | * without fastreuseport and then was reset, as we can only know that | |
440 | * the fast_*rcv_saddr doesn't have any conflicts with the socks on the | |
441 | * owners list. | |
442 | */ | |
443 | if (tb->fastreuseport == FASTREUSEPORT_ANY) | |
444 | return 1; | |
445 | #if IS_ENABLED(CONFIG_IPV6) | |
446 | if (tb->fast_sk_family == AF_INET6) | |
447 | return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr, | |
7a56673b | 448 | inet6_rcv_saddr(sk), |
637bc8bb JB |
449 | tb->fast_rcv_saddr, |
450 | sk->sk_rcv_saddr, | |
451 | tb->fast_ipv6_only, | |
88d7fcfa | 452 | ipv6_only_sock(sk), true, false); |
637bc8bb JB |
453 | #endif |
454 | return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr, | |
88d7fcfa | 455 | ipv6_only_sock(sk), true, false); |
637bc8bb JB |
456 | } |
457 | ||
62ffc589 TF |
458 | void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, |
459 | struct sock *sk) | |
460 | { | |
461 | kuid_t uid = sock_i_uid(sk); | |
462 | bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; | |
463 | ||
8002d44f | 464 | if (hlist_empty(&tb->bhash2)) { |
62ffc589 TF |
465 | tb->fastreuse = reuse; |
466 | if (sk->sk_reuseport) { | |
467 | tb->fastreuseport = FASTREUSEPORT_ANY; | |
468 | tb->fastuid = uid; | |
469 | tb->fast_rcv_saddr = sk->sk_rcv_saddr; | |
470 | tb->fast_ipv6_only = ipv6_only_sock(sk); | |
471 | tb->fast_sk_family = sk->sk_family; | |
472 | #if IS_ENABLED(CONFIG_IPV6) | |
473 | tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; | |
474 | #endif | |
475 | } else { | |
476 | tb->fastreuseport = 0; | |
477 | } | |
478 | } else { | |
479 | if (!reuse) | |
480 | tb->fastreuse = 0; | |
481 | if (sk->sk_reuseport) { | |
482 | /* We didn't match or we don't have fastreuseport set on | |
483 | * the tb, but we have sk_reuseport set on this socket | |
484 | * and we know that there are no bind conflicts with | |
485 | * this socket in this tb, so reset our tb's reuseport | |
486 | * settings so that any subsequent sockets that match | |
487 | * our current socket will be put on the fast path. | |
488 | * | |
489 | * If we reset we need to set FASTREUSEPORT_STRICT so we | |
490 | * do extra checking for all subsequent sk_reuseport | |
491 | * socks. | |
492 | */ | |
493 | if (!sk_reuseport_match(tb, sk)) { | |
494 | tb->fastreuseport = FASTREUSEPORT_STRICT; | |
495 | tb->fastuid = uid; | |
496 | tb->fast_rcv_saddr = sk->sk_rcv_saddr; | |
497 | tb->fast_ipv6_only = ipv6_only_sock(sk); | |
498 | tb->fast_sk_family = sk->sk_family; | |
499 | #if IS_ENABLED(CONFIG_IPV6) | |
500 | tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr; | |
501 | #endif | |
502 | } | |
503 | } else { | |
504 | tb->fastreuseport = 0; | |
505 | } | |
506 | } | |
507 | } | |
508 | ||
289141b7 JB |
509 | /* Obtain a reference to a local port for the given sock, |
510 | * if snum is zero it means select any available local port. | |
511 | * We try to allocate an odd port (and leave even ports for connect()) | |
512 | */ | |
513 | int inet_csk_get_port(struct sock *sk, unsigned short snum) | |
514 | { | |
515 | bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; | |
28044fc1 JK |
516 | bool found_port = false, check_bind_conflict = true; |
517 | bool bhash_created = false, bhash2_created = false; | |
235bd9d2 | 518 | struct inet_hashinfo *hinfo = tcp_get_hashinfo(sk); |
7a7160ed | 519 | int ret = -EADDRINUSE, port = snum, l3mdev; |
28044fc1 JK |
520 | struct inet_bind_hashbucket *head, *head2; |
521 | struct inet_bind2_bucket *tb2 = NULL; | |
593d1ebe | 522 | struct inet_bind_bucket *tb = NULL; |
28044fc1 | 523 | bool head2_lock_acquired = false; |
08eaef90 | 524 | struct net *net = sock_net(sk); |
3c82a21f RS |
525 | |
526 | l3mdev = inet_sk_bound_l3mdev(sk); | |
289141b7 JB |
527 | |
528 | if (!port) { | |
28044fc1 | 529 | head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port); |
289141b7 JB |
530 | if (!head) |
531 | return ret; | |
28044fc1 JK |
532 | |
533 | head2_lock_acquired = true; | |
534 | ||
535 | if (tb && tb2) | |
536 | goto success; | |
537 | found_port = true; | |
538 | } else { | |
539 | head = &hinfo->bhash[inet_bhashfn(net, port, | |
540 | hinfo->bhash_size)]; | |
541 | spin_lock_bh(&head->lock); | |
542 | inet_bind_bucket_for_each(tb, &head->chain) | |
543 | if (inet_bind_bucket_match(tb, net, port, l3mdev)) | |
544 | break; | |
545 | } | |
546 | ||
547 | if (!tb) { | |
548 | tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net, | |
549 | head, port, l3mdev); | |
289141b7 | 550 | if (!tb) |
28044fc1 JK |
551 | goto fail_unlock; |
552 | bhash_created = true; | |
d5a42de8 | 553 | } |
4a17fd52 | 554 | |
28044fc1 | 555 | if (!found_port) { |
8002d44f | 556 | if (!hlist_empty(&tb->bhash2)) { |
28044fc1 JK |
557 | if (sk->sk_reuse == SK_FORCE_REUSE || |
558 | (tb->fastreuse > 0 && reuse) || | |
559 | sk_reuseport_match(tb, sk)) | |
560 | check_bind_conflict = false; | |
561 | } | |
562 | ||
563 | if (check_bind_conflict && inet_use_bhash2_on_bind(sk)) { | |
564 | if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, true, true)) | |
565 | goto fail_unlock; | |
566 | } | |
567 | ||
568 | head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); | |
569 | spin_lock(&head2->lock); | |
570 | head2_lock_acquired = true; | |
571 | tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); | |
572 | } | |
573 | ||
574 | if (!tb2) { | |
575 | tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, | |
822fb91f | 576 | net, head2, tb, sk); |
28044fc1 | 577 | if (!tb2) |
ea8add2b | 578 | goto fail_unlock; |
28044fc1 | 579 | bhash2_created = true; |
6cd66616 | 580 | } |
28044fc1 JK |
581 | |
582 | if (!found_port && check_bind_conflict) { | |
583 | if (inet_csk_bind_conflict(sk, tb, tb2, true, true)) | |
584 | goto fail_unlock; | |
585 | } | |
586 | ||
6cd66616 | 587 | success: |
62ffc589 TF |
588 | inet_csk_update_fastreuse(tb, sk); |
589 | ||
3f421baa | 590 | if (!inet_csk(sk)->icsk_bind_hash) |
28044fc1 | 591 | inet_bind_hash(sk, tb, tb2, port); |
547b792c | 592 | WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); |
28044fc1 | 593 | WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2); |
e905a9ed | 594 | ret = 0; |
3f421baa ACM |
595 | |
596 | fail_unlock: | |
28044fc1 | 597 | if (ret) { |
8002d44f KI |
598 | if (bhash2_created) |
599 | inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, tb2); | |
28044fc1 | 600 | if (bhash_created) |
d186f405 | 601 | inet_bind_bucket_destroy(tb); |
28044fc1 JK |
602 | } |
603 | if (head2_lock_acquired) | |
604 | spin_unlock(&head2->lock); | |
ea8add2b | 605 | spin_unlock_bh(&head->lock); |
3f421baa ACM |
606 | return ret; |
607 | } | |
3f421baa ACM |
608 | EXPORT_SYMBOL_GPL(inet_csk_get_port); |
609 | ||
610 | /* | |
611 | * Wait for an incoming connection, avoid race conditions. This must be called | |
612 | * with the socket locked. | |
613 | */ | |
614 | static int inet_csk_wait_for_connect(struct sock *sk, long timeo) | |
615 | { | |
616 | struct inet_connection_sock *icsk = inet_csk(sk); | |
617 | DEFINE_WAIT(wait); | |
618 | int err; | |
619 | ||
620 | /* | |
621 | * True wake-one mechanism for incoming connections: only | |
622 | * one process gets woken up, not the 'whole herd'. | |
623 | * Since we do not 'race & poll' for established sockets | |
624 | * anymore, the common case will execute the loop only once. | |
625 | * | |
626 | * Subtle issue: "add_wait_queue_exclusive()" will be added | |
627 | * after any current non-exclusive waiters, and we know that | |
628 | * it will always _stay_ after any new non-exclusive waiters | |
629 | * because all non-exclusive waiters are added at the | |
630 | * beginning of the wait-queue. As such, it's ok to "drop" | |
631 | * our exclusiveness temporarily when we get woken up without | |
632 | * having to remove and re-insert us on the wait queue. | |
633 | */ | |
634 | for (;;) { | |
aa395145 | 635 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, |
3f421baa ACM |
636 | TASK_INTERRUPTIBLE); |
637 | release_sock(sk); | |
638 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) | |
639 | timeo = schedule_timeout(timeo); | |
cb7cf8a3 | 640 | sched_annotate_sleep(); |
3f421baa ACM |
641 | lock_sock(sk); |
642 | err = 0; | |
643 | if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) | |
644 | break; | |
645 | err = -EINVAL; | |
646 | if (sk->sk_state != TCP_LISTEN) | |
647 | break; | |
648 | err = sock_intr_errno(timeo); | |
649 | if (signal_pending(current)) | |
650 | break; | |
651 | err = -EAGAIN; | |
652 | if (!timeo) | |
653 | break; | |
654 | } | |
aa395145 | 655 | finish_wait(sk_sleep(sk), &wait); |
3f421baa ACM |
656 | return err; |
657 | } | |
658 | ||
659 | /* | |
660 | * This will accept the next outstanding connection. | |
661 | */ | |
92ef0fd5 | 662 | struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg) |
3f421baa ACM |
663 | { |
664 | struct inet_connection_sock *icsk = inet_csk(sk); | |
8336886f | 665 | struct request_sock_queue *queue = &icsk->icsk_accept_queue; |
8336886f | 666 | struct request_sock *req; |
e3d95ad7 | 667 | struct sock *newsk; |
3f421baa ACM |
668 | int error; |
669 | ||
670 | lock_sock(sk); | |
671 | ||
672 | /* We need to make sure that this socket is listening, | |
673 | * and that it has something pending. | |
674 | */ | |
675 | error = -EINVAL; | |
676 | if (sk->sk_state != TCP_LISTEN) | |
677 | goto out_err; | |
678 | ||
679 | /* Find already established connection */ | |
8336886f | 680 | if (reqsk_queue_empty(queue)) { |
92ef0fd5 | 681 | long timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK); |
3f421baa ACM |
682 | |
683 | /* If this is a non blocking socket don't sleep */ | |
684 | error = -EAGAIN; | |
685 | if (!timeo) | |
686 | goto out_err; | |
687 | ||
688 | error = inet_csk_wait_for_connect(sk, timeo); | |
689 | if (error) | |
690 | goto out_err; | |
691 | } | |
fff1f300 | 692 | req = reqsk_queue_remove(queue, sk); |
7951e36a | 693 | arg->is_empty = reqsk_queue_empty(queue); |
8336886f JC |
694 | newsk = req->sk; |
695 | ||
e3d95ad7 | 696 | if (sk->sk_protocol == IPPROTO_TCP && |
0536fcc0 ED |
697 | tcp_rsk(req)->tfo_listener) { |
698 | spin_lock_bh(&queue->fastopenq.lock); | |
9439ce00 | 699 | if (tcp_rsk(req)->tfo_listener) { |
8336886f JC |
700 | /* We are still waiting for the final ACK from 3WHS |
701 | * so can't free req now. Instead, we set req->sk to | |
702 | * NULL to signify that the child socket is taken | |
703 | * so reqsk_fastopen_remove() will free the req | |
704 | * when 3WHS finishes (or is aborted). | |
705 | */ | |
706 | req->sk = NULL; | |
707 | req = NULL; | |
708 | } | |
0536fcc0 | 709 | spin_unlock_bh(&queue->fastopenq.lock); |
8336886f | 710 | } |
d752a498 | 711 | |
3f421baa ACM |
712 | out: |
713 | release_sock(sk); | |
06669ea3 | 714 | if (newsk && mem_cgroup_sockets_enabled) { |
9028cdeb | 715 | gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL; |
53bf9164 | 716 | int amt = 0; |
d752a498 SB |
717 | |
718 | /* atomically get the memory usage, set and charge the | |
06669ea3 | 719 | * newsk->sk_memcg. |
d752a498 SB |
720 | */ |
721 | lock_sock(newsk); | |
722 | ||
d752a498 | 723 | mem_cgroup_sk_alloc(newsk); |
53bf9164 AW |
724 | if (newsk->sk_memcg) { |
725 | /* The socket has not been accepted yet, no need | |
726 | * to look at newsk->sk_wmem_queued. | |
727 | */ | |
728 | amt = sk_mem_pages(newsk->sk_forward_alloc + | |
729 | atomic_read(&newsk->sk_rmem_alloc)); | |
730 | } | |
731 | ||
732 | if (amt) | |
9028cdeb SB |
733 | mem_cgroup_charge_skmem(newsk->sk_memcg, amt, gfp); |
734 | kmem_cache_charge(newsk, gfp); | |
d752a498 SB |
735 | |
736 | release_sock(newsk); | |
737 | } | |
8336886f | 738 | if (req) |
13854e5a | 739 | reqsk_put(req); |
198bc90e ZS |
740 | |
741 | if (newsk) | |
742 | inet_init_csk_locks(newsk); | |
743 | ||
3f421baa ACM |
744 | return newsk; |
745 | out_err: | |
746 | newsk = NULL; | |
8336886f | 747 | req = NULL; |
92ef0fd5 | 748 | arg->err = error; |
3f421baa ACM |
749 | goto out; |
750 | } | |
3f421baa ACM |
751 | EXPORT_SYMBOL(inet_csk_accept); |
752 | ||
753 | /* | |
754 | * Using different timers for retransmit, delayed acks and probes | |
e905a9ed | 755 | * We may wish use just one timer maintaining a list of expire jiffies |
3f421baa ACM |
756 | * to optimize. |
757 | */ | |
758 | void inet_csk_init_xmit_timers(struct sock *sk, | |
59f379f9 KC |
759 | void (*retransmit_handler)(struct timer_list *t), |
760 | void (*delack_handler)(struct timer_list *t), | |
761 | void (*keepalive_handler)(struct timer_list *t)) | |
3f421baa ACM |
762 | { |
763 | struct inet_connection_sock *icsk = inet_csk(sk); | |
764 | ||
59f379f9 KC |
765 | timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0); |
766 | timer_setup(&icsk->icsk_delack_timer, delack_handler, 0); | |
767 | timer_setup(&sk->sk_timer, keepalive_handler, 0); | |
3f421baa ACM |
768 | icsk->icsk_pending = icsk->icsk_ack.pending = 0; |
769 | } | |
3f421baa ACM |
770 | |
771 | void inet_csk_clear_xmit_timers(struct sock *sk) | |
772 | { | |
773 | struct inet_connection_sock *icsk = inet_csk(sk); | |
774 | ||
5a9071a7 | 775 | smp_store_release(&icsk->icsk_pending, 0); |
81df4fa9 | 776 | smp_store_release(&icsk->icsk_ack.pending, 0); |
3f421baa ACM |
777 | |
778 | sk_stop_timer(sk, &icsk->icsk_retransmit_timer); | |
779 | sk_stop_timer(sk, &icsk->icsk_delack_timer); | |
780 | sk_stop_timer(sk, &sk->sk_timer); | |
781 | } | |
3f421baa | 782 | |
151c9c72 ED |
783 | void inet_csk_clear_xmit_timers_sync(struct sock *sk) |
784 | { | |
785 | struct inet_connection_sock *icsk = inet_csk(sk); | |
786 | ||
787 | /* ongoing timer handlers need to acquire socket lock. */ | |
788 | sock_not_owned_by_me(sk); | |
789 | ||
5a9071a7 | 790 | smp_store_release(&icsk->icsk_pending, 0); |
81df4fa9 | 791 | smp_store_release(&icsk->icsk_ack.pending, 0); |
151c9c72 ED |
792 | |
793 | sk_stop_timer_sync(sk, &icsk->icsk_retransmit_timer); | |
794 | sk_stop_timer_sync(sk, &icsk->icsk_delack_timer); | |
795 | sk_stop_timer_sync(sk, &sk->sk_timer); | |
796 | } | |
797 | ||
e5895bc6 | 798 | struct dst_entry *inet_csk_route_req(const struct sock *sk, |
6bd023f3 | 799 | struct flowi4 *fl4, |
ba3f7f04 | 800 | const struct request_sock *req) |
3f421baa | 801 | { |
3f421baa | 802 | const struct inet_request_sock *ireq = inet_rsk(req); |
8b929ab1 | 803 | struct net *net = read_pnet(&ireq->ireq_net); |
c92e8c02 | 804 | struct ip_options_rcu *opt; |
8b929ab1 | 805 | struct rtable *rt; |
3f421baa | 806 | |
2ab2ddd3 ED |
807 | rcu_read_lock(); |
808 | opt = rcu_dereference(ireq->ireq_opt); | |
06f877d6 | 809 | |
8b929ab1 | 810 | flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, |
4b095281 | 811 | ip_sock_rt_tos(sk), ip_sock_rt_scope(sk), |
8b929ab1 | 812 | sk->sk_protocol, inet_sk_flowi_flags(sk), |
634fb979 | 813 | (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, |
8b929ab1 | 814 | ireq->ir_loc_addr, ireq->ir_rmt_port, |
e2d118a1 | 815 | htons(ireq->ir_num), sk->sk_uid); |
3df98d79 | 816 | security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); |
6bd023f3 | 817 | rt = ip_route_output_flow(net, fl4, sk); |
b23dd4fe | 818 | if (IS_ERR(rt)) |
857a6e0a | 819 | goto no_route; |
77d5bc7e | 820 | if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) |
857a6e0a | 821 | goto route_err; |
2ab2ddd3 | 822 | rcu_read_unlock(); |
d8d1f30b | 823 | return &rt->dst; |
857a6e0a IJ |
824 | |
825 | route_err: | |
826 | ip_rt_put(rt); | |
827 | no_route: | |
2ab2ddd3 | 828 | rcu_read_unlock(); |
b45386ef | 829 | __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); |
857a6e0a | 830 | return NULL; |
3f421baa | 831 | } |
3f421baa | 832 | |
a2432c4f | 833 | struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, |
77357a95 DM |
834 | struct sock *newsk, |
835 | const struct request_sock *req) | |
836 | { | |
837 | const struct inet_request_sock *ireq = inet_rsk(req); | |
8b929ab1 | 838 | struct net *net = read_pnet(&ireq->ireq_net); |
77357a95 | 839 | struct inet_sock *newinet = inet_sk(newsk); |
1a7b27c9 | 840 | struct ip_options_rcu *opt; |
77357a95 DM |
841 | struct flowi4 *fl4; |
842 | struct rtable *rt; | |
843 | ||
c92e8c02 | 844 | opt = rcu_dereference(ireq->ireq_opt); |
77357a95 | 845 | fl4 = &newinet->cork.fl.u.ip4; |
1a7b27c9 | 846 | |
8b929ab1 | 847 | flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, |
4b095281 | 848 | ip_sock_rt_tos(sk), ip_sock_rt_scope(sk), |
77357a95 | 849 | sk->sk_protocol, inet_sk_flowi_flags(sk), |
634fb979 | 850 | (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, |
8b929ab1 | 851 | ireq->ir_loc_addr, ireq->ir_rmt_port, |
e2d118a1 | 852 | htons(ireq->ir_num), sk->sk_uid); |
3df98d79 | 853 | security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); |
77357a95 DM |
854 | rt = ip_route_output_flow(net, fl4, sk); |
855 | if (IS_ERR(rt)) | |
856 | goto no_route; | |
77d5bc7e | 857 | if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) |
77357a95 DM |
858 | goto route_err; |
859 | return &rt->dst; | |
860 | ||
861 | route_err: | |
862 | ip_rt_put(rt); | |
863 | no_route: | |
b45386ef | 864 | __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); |
77357a95 DM |
865 | return NULL; |
866 | } | |
867 | EXPORT_SYMBOL_GPL(inet_csk_route_child_sock); | |
868 | ||
0c3d79bc | 869 | /* Decide when to expire the request and when to resend SYN-ACK */ |
a594920f KI |
870 | static void syn_ack_recalc(struct request_sock *req, |
871 | const int max_syn_ack_retries, | |
872 | const u8 rskq_defer_accept, | |
873 | int *expire, int *resend) | |
0c3d79bc JA |
874 | { |
875 | if (!rskq_defer_accept) { | |
a594920f | 876 | *expire = req->num_timeout >= max_syn_ack_retries; |
0c3d79bc JA |
877 | *resend = 1; |
878 | return; | |
879 | } | |
a594920f KI |
880 | *expire = req->num_timeout >= max_syn_ack_retries && |
881 | (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept); | |
882 | /* Do not resend while waiting for data after ACK, | |
0c3d79bc JA |
883 | * start to resend on end of deferring period to give |
884 | * last chance for data or ACK to create established socket. | |
885 | */ | |
886 | *resend = !inet_rsk(req)->acked || | |
e6c022a4 | 887 | req->num_timeout >= rskq_defer_accept - 1; |
0c3d79bc JA |
888 | } |
889 | ||
1b70e977 | 890 | int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req) |
e6c022a4 | 891 | { |
1a2c6181 | 892 | int err = req->rsk_ops->rtx_syn_ack(parent, req); |
e6c022a4 ED |
893 | |
894 | if (!err) | |
895 | req->num_retrans++; | |
896 | return err; | |
897 | } | |
e6c022a4 | 898 | |
6971d216 ED |
899 | static struct request_sock * |
900 | reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener, | |
901 | bool attach_listener) | |
902 | { | |
903 | struct request_sock *req; | |
904 | ||
905 | req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN); | |
906 | if (!req) | |
907 | return NULL; | |
908 | req->rsk_listener = NULL; | |
909 | if (attach_listener) { | |
910 | if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) { | |
911 | kmem_cache_free(ops->slab, req); | |
912 | return NULL; | |
913 | } | |
914 | req->rsk_listener = sk_listener; | |
915 | } | |
916 | req->rsk_ops = ops; | |
917 | req_to_sk(req)->sk_prot = sk_listener->sk_prot; | |
918 | sk_node_init(&req_to_sk(req)->sk_node); | |
919 | sk_tx_queue_clear(req_to_sk(req)); | |
920 | req->saved_syn = NULL; | |
921 | req->syncookie = 0; | |
922 | req->timeout = 0; | |
923 | req->num_timeout = 0; | |
924 | req->num_retrans = 0; | |
925 | req->sk = NULL; | |
926 | refcount_set(&req->rsk_refcnt, 0); | |
927 | ||
928 | return req; | |
929 | } | |
930 | #define reqsk_alloc(...) alloc_hooks(reqsk_alloc_noprof(__VA_ARGS__)) | |
931 | ||
adbe695a ED |
932 | struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, |
933 | struct sock *sk_listener, | |
934 | bool attach_listener) | |
935 | { | |
936 | struct request_sock *req = reqsk_alloc(ops, sk_listener, | |
937 | attach_listener); | |
938 | ||
939 | if (req) { | |
940 | struct inet_request_sock *ireq = inet_rsk(req); | |
941 | ||
942 | ireq->ireq_opt = NULL; | |
943 | #if IS_ENABLED(CONFIG_IPV6) | |
944 | ireq->pktopts = NULL; | |
945 | #endif | |
946 | atomic64_set(&ireq->ir_cookie, 0); | |
947 | ireq->ireq_state = TCP_NEW_SYN_RECV; | |
948 | write_pnet(&ireq->ireq_net, sock_net(sk_listener)); | |
949 | ireq->ireq_family = sk_listener->sk_family; | |
950 | req->timeout = TCP_TIMEOUT_INIT; | |
951 | } | |
952 | ||
953 | return req; | |
954 | } | |
955 | EXPORT_SYMBOL(inet_reqsk_alloc); | |
956 | ||
54b92e84 KI |
957 | static struct request_sock *inet_reqsk_clone(struct request_sock *req, |
958 | struct sock *sk) | |
959 | { | |
960 | struct sock *req_sk, *nreq_sk; | |
961 | struct request_sock *nreq; | |
962 | ||
963 | nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN); | |
964 | if (!nreq) { | |
55d444b3 KI |
965 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); |
966 | ||
54b92e84 KI |
967 | /* paired with refcount_inc_not_zero() in reuseport_migrate_sock() */ |
968 | sock_put(sk); | |
969 | return NULL; | |
970 | } | |
971 | ||
972 | req_sk = req_to_sk(req); | |
973 | nreq_sk = req_to_sk(nreq); | |
974 | ||
975 | memcpy(nreq_sk, req_sk, | |
976 | offsetof(struct sock, sk_dontcopy_begin)); | |
ff73f834 KC |
977 | unsafe_memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end, |
978 | req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end), | |
979 | /* alloc is larger than struct, see above */); | |
54b92e84 KI |
980 | |
981 | sk_node_init(&nreq_sk->sk_node); | |
982 | nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping; | |
a9418924 | 983 | #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING |
54b92e84 KI |
984 | nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping; |
985 | #endif | |
986 | nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu; | |
987 | ||
988 | nreq->rsk_listener = sk; | |
989 | ||
990 | /* We need not acquire fastopenq->lock | |
991 | * because the child socket is locked in inet_csk_listen_stop(). | |
992 | */ | |
993 | if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(nreq)->tfo_listener) | |
994 | rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq); | |
995 | ||
996 | return nreq; | |
997 | } | |
998 | ||
c905dee6 KI |
999 | static void reqsk_queue_migrated(struct request_sock_queue *queue, |
1000 | const struct request_sock *req) | |
1001 | { | |
1002 | if (req->num_timeout == 0) | |
1003 | atomic_inc(&queue->young); | |
1004 | atomic_inc(&queue->qlen); | |
1005 | } | |
1006 | ||
54b92e84 KI |
1007 | static void reqsk_migrate_reset(struct request_sock *req) |
1008 | { | |
c905dee6 | 1009 | req->saved_syn = NULL; |
54b92e84 KI |
1010 | #if IS_ENABLED(CONFIG_IPV6) |
1011 | inet_rsk(req)->ipv6_opt = NULL; | |
c905dee6 KI |
1012 | inet_rsk(req)->pktopts = NULL; |
1013 | #else | |
1014 | inet_rsk(req)->ireq_opt = NULL; | |
54b92e84 KI |
1015 | #endif |
1016 | } | |
1017 | ||
079096f1 | 1018 | /* return true if req was found in the ehash table */ |
8b5e07d7 | 1019 | static bool reqsk_queue_unlink(struct request_sock *req) |
b357a364 | 1020 | { |
08eaef90 | 1021 | struct sock *sk = req_to_sk(req); |
5e0724d0 | 1022 | bool found = false; |
b357a364 | 1023 | |
08eaef90 | 1024 | if (sk_hashed(sk)) { |
235bd9d2 KI |
1025 | struct inet_hashinfo *hashinfo = tcp_get_hashinfo(sk); |
1026 | spinlock_t *lock; | |
b357a364 | 1027 | |
235bd9d2 | 1028 | lock = inet_ehash_lockp(hashinfo, req->rsk_hash); |
5e0724d0 | 1029 | spin_lock(lock); |
08eaef90 | 1030 | found = __sk_nulls_del_node_init_rcu(sk); |
5e0724d0 ED |
1031 | spin_unlock(lock); |
1032 | } | |
e8c526f2 | 1033 | |
b357a364 ED |
1034 | return found; |
1035 | } | |
1036 | ||
e8c526f2 KI |
1037 | static bool __inet_csk_reqsk_queue_drop(struct sock *sk, |
1038 | struct request_sock *req, | |
1039 | bool from_timer) | |
b357a364 | 1040 | { |
7233da86 AO |
1041 | bool unlinked = reqsk_queue_unlink(req); |
1042 | ||
e8c526f2 KI |
1043 | if (!from_timer && timer_delete_sync(&req->rsk_timer)) |
1044 | reqsk_put(req); | |
1045 | ||
7233da86 | 1046 | if (unlinked) { |
b357a364 ED |
1047 | reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); |
1048 | reqsk_put(req); | |
1049 | } | |
e8c526f2 | 1050 | |
7233da86 | 1051 | return unlinked; |
b357a364 | 1052 | } |
e8c526f2 KI |
1053 | |
1054 | bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) | |
1055 | { | |
1056 | return __inet_csk_reqsk_queue_drop(sk, req, false); | |
1057 | } | |
b357a364 | 1058 | |
f03f2e15 ED |
1059 | void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req) |
1060 | { | |
1061 | inet_csk_reqsk_queue_drop(sk, req); | |
1062 | reqsk_put(req); | |
1063 | } | |
22d6c9ee | 1064 | EXPORT_IPV6_MOD(inet_csk_reqsk_queue_drop_and_put); |
f03f2e15 | 1065 | |
59f379f9 | 1066 | static void reqsk_timer_handler(struct timer_list *t) |
a019d6fe | 1067 | { |
41cb0855 | 1068 | struct request_sock *req = timer_container_of(req, t, rsk_timer); |
c905dee6 | 1069 | struct request_sock *nreq = NULL, *oreq = req; |
fa76ce73 | 1070 | struct sock *sk_listener = req->rsk_listener; |
c905dee6 KI |
1071 | struct inet_connection_sock *icsk; |
1072 | struct request_sock_queue *queue; | |
1073 | struct net *net; | |
a594920f | 1074 | int max_syn_ack_retries, qlen, expire = 0, resend = 0; |
a019d6fe | 1075 | |
c905dee6 KI |
1076 | if (inet_sk_state_load(sk_listener) != TCP_LISTEN) { |
1077 | struct sock *nsk; | |
1078 | ||
1079 | nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL); | |
1080 | if (!nsk) | |
1081 | goto drop; | |
1082 | ||
1083 | nreq = inet_reqsk_clone(req, nsk); | |
1084 | if (!nreq) | |
1085 | goto drop; | |
1086 | ||
1087 | /* The new timer for the cloned req can decrease the 2 | |
1088 | * by calling inet_csk_reqsk_queue_drop_and_put(), so | |
1089 | * hold another count to prevent use-after-free and | |
1090 | * call reqsk_put() just before return. | |
1091 | */ | |
1092 | refcount_set(&nreq->rsk_refcnt, 2 + 1); | |
1093 | timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED); | |
1094 | reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req); | |
1095 | ||
1096 | req = nreq; | |
1097 | sk_listener = nsk; | |
1098 | } | |
a019d6fe | 1099 | |
c905dee6 KI |
1100 | icsk = inet_csk(sk_listener); |
1101 | net = sock_net(sk_listener); | |
3a037f0f | 1102 | max_syn_ack_retries = READ_ONCE(icsk->icsk_syn_retries) ? : |
20a3b1c0 | 1103 | READ_ONCE(net->ipv4.sysctl_tcp_synack_retries); |
a019d6fe ACM |
1104 | /* Normally all the openreqs are young and become mature |
1105 | * (i.e. converted to established socket) for first timeout. | |
fd4f2cea | 1106 | * If synack was not acknowledged for 1 second, it means |
a019d6fe ACM |
1107 | * one of the following things: synack was lost, ack was lost, |
1108 | * rtt is high or nobody planned to ack (i.e. synflood). | |
1109 | * When server is a bit loaded, queue is populated with old | |
1110 | * open requests, reducing effective size of queue. | |
1111 | * When server is well loaded, queue size reduces to zero | |
1112 | * after several minutes of work. It is not synflood, | |
1113 | * it is normal operation. The solution is pruning | |
1114 | * too old entries overriding normal timeout, when | |
1115 | * situation becomes dangerous. | |
1116 | * | |
1117 | * Essentially, we reserve half of room for young | |
1118 | * embrions; and abort old ones without pity, if old | |
1119 | * ones are about to clog our table. | |
1120 | */ | |
c905dee6 | 1121 | queue = &icsk->icsk_accept_queue; |
aac065c5 | 1122 | qlen = reqsk_queue_len(queue); |
099ecf59 | 1123 | if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) { |
aac065c5 | 1124 | int young = reqsk_queue_len_young(queue) << 1; |
a019d6fe | 1125 | |
a594920f | 1126 | while (max_syn_ack_retries > 2) { |
2b41fab7 | 1127 | if (qlen < young) |
a019d6fe | 1128 | break; |
a594920f | 1129 | max_syn_ack_retries--; |
a019d6fe ACM |
1130 | young <<= 1; |
1131 | } | |
1132 | } | |
a594920f | 1133 | syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept), |
fa76ce73 | 1134 | &expire, &resend); |
42cb80a2 | 1135 | req->rsk_ops->syn_ack_timeout(req); |
fa76ce73 ED |
1136 | if (!expire && |
1137 | (!resend || | |
1138 | !inet_rtx_syn_ack(sk_listener, req) || | |
1139 | inet_rsk(req)->acked)) { | |
fa76ce73 | 1140 | if (req->num_timeout++ == 0) |
aac065c5 | 1141 | atomic_dec(&queue->young); |
5903123f | 1142 | mod_timer(&req->rsk_timer, jiffies + reqsk_timeout(req, TCP_RTO_MAX)); |
c905dee6 KI |
1143 | |
1144 | if (!nreq) | |
1145 | return; | |
1146 | ||
1147 | if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) { | |
1148 | /* delete timer */ | |
e8c526f2 | 1149 | __inet_csk_reqsk_queue_drop(sk_listener, nreq, true); |
55d444b3 | 1150 | goto no_ownership; |
c905dee6 KI |
1151 | } |
1152 | ||
55d444b3 | 1153 | __NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQSUCCESS); |
c905dee6 KI |
1154 | reqsk_migrate_reset(oreq); |
1155 | reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq); | |
1156 | reqsk_put(oreq); | |
1157 | ||
1158 | reqsk_put(nreq); | |
fa76ce73 ED |
1159 | return; |
1160 | } | |
c905dee6 | 1161 | |
c905dee6 KI |
1162 | /* Even if we can clone the req, we may need not retransmit any more |
1163 | * SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another | |
1164 | * CPU may win the "own_req" race so that inet_ehash_insert() fails. | |
1165 | */ | |
1166 | if (nreq) { | |
55d444b3 KI |
1167 | __NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQFAILURE); |
1168 | no_ownership: | |
c905dee6 KI |
1169 | reqsk_migrate_reset(nreq); |
1170 | reqsk_queue_removed(queue, nreq); | |
1171 | __reqsk_free(nreq); | |
1172 | } | |
1173 | ||
55d444b3 | 1174 | drop: |
e8c526f2 | 1175 | __inet_csk_reqsk_queue_drop(sk_listener, oreq, true); |
c31e72d0 | 1176 | reqsk_put(oreq); |
fa76ce73 | 1177 | } |
ec0a1966 | 1178 | |
ff46e3b4 | 1179 | static bool reqsk_queue_hash_req(struct request_sock *req, |
079096f1 | 1180 | unsigned long timeout) |
fa76ce73 | 1181 | { |
ff46e3b4 | 1182 | bool found_dup_sk = false; |
1183 | ||
1184 | if (!inet_ehash_insert(req_to_sk(req), NULL, &found_dup_sk)) | |
1185 | return false; | |
1186 | ||
1187 | /* The timer needs to be setup after a successful insertion. */ | |
59f379f9 | 1188 | timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED); |
f3438bc7 | 1189 | mod_timer(&req->rsk_timer, jiffies + timeout); |
29c68526 | 1190 | |
fa76ce73 ED |
1191 | /* before letting lookups find us, make sure all req fields |
1192 | * are committed to memory and refcnt initialized. | |
1193 | */ | |
1194 | smp_wmb(); | |
41c6d650 | 1195 | refcount_set(&req->rsk_refcnt, 2 + 1); |
ff46e3b4 | 1196 | return true; |
079096f1 | 1197 | } |
a019d6fe | 1198 | |
ff46e3b4 | 1199 | bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, |
079096f1 ED |
1200 | unsigned long timeout) |
1201 | { | |
ff46e3b4 | 1202 | if (!reqsk_queue_hash_req(req, timeout)) |
1203 | return false; | |
1204 | ||
079096f1 | 1205 | inet_csk_reqsk_queue_added(sk); |
ff46e3b4 | 1206 | return true; |
a019d6fe | 1207 | } |
a019d6fe | 1208 | |
13230593 MM |
1209 | static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk, |
1210 | const gfp_t priority) | |
1211 | { | |
1212 | struct inet_connection_sock *icsk = inet_csk(newsk); | |
1213 | ||
1214 | if (!icsk->icsk_ulp_ops) | |
1215 | return; | |
1216 | ||
be9832c2 | 1217 | icsk->icsk_ulp_ops->clone(req, newsk, priority); |
13230593 MM |
1218 | } |
1219 | ||
e56c57d0 ED |
1220 | /** |
1221 | * inet_csk_clone_lock - clone an inet socket, and lock its clone | |
1222 | * @sk: the socket to clone | |
1223 | * @req: request_sock | |
1224 | * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) | |
1225 | * | |
1226 | * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) | |
1227 | */ | |
1228 | struct sock *inet_csk_clone_lock(const struct sock *sk, | |
1229 | const struct request_sock *req, | |
1230 | const gfp_t priority) | |
9f1d2604 | 1231 | { |
e56c57d0 | 1232 | struct sock *newsk = sk_clone_lock(sk, priority); |
55250b83 | 1233 | struct inet_connection_sock *newicsk; |
a3a128f6 ED |
1234 | struct inet_request_sock *ireq; |
1235 | struct inet_sock *newinet; | |
9f1d2604 | 1236 | |
55250b83 ED |
1237 | if (!newsk) |
1238 | return NULL; | |
9f1d2604 | 1239 | |
55250b83 | 1240 | newicsk = inet_csk(newsk); |
a3a128f6 ED |
1241 | newinet = inet_sk(newsk); |
1242 | ireq = inet_rsk(req); | |
9f1d2604 | 1243 | |
55250b83 ED |
1244 | newicsk->icsk_bind_hash = NULL; |
1245 | newicsk->icsk_bind2_hash = NULL; | |
9f1d2604 | 1246 | |
a3a128f6 ED |
1247 | newinet->inet_dport = ireq->ir_rmt_port; |
1248 | newinet->inet_num = ireq->ir_num; | |
1249 | newinet->inet_sport = htons(ireq->ir_num); | |
1250 | ||
1251 | newsk->sk_bound_dev_if = ireq->ir_iif; | |
1252 | ||
1253 | newsk->sk_daddr = ireq->ir_rmt_addr; | |
1254 | newsk->sk_rcv_saddr = ireq->ir_loc_addr; | |
1255 | newinet->inet_saddr = ireq->ir_loc_addr; | |
1256 | ||
1257 | #if IS_ENABLED(CONFIG_IPV6) | |
1258 | newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr; | |
1259 | newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr; | |
1260 | #endif | |
85017869 | 1261 | |
55250b83 ED |
1262 | /* listeners have SOCK_RCU_FREE, not the children */ |
1263 | sock_reset_flag(newsk, SOCK_RCU_FREE); | |
657831ff | 1264 | |
55250b83 | 1265 | inet_sk(newsk)->mc_list = NULL; |
84f39b08 | 1266 | |
55250b83 ED |
1267 | newsk->sk_mark = inet_rsk(req)->ir_mark; |
1268 | atomic64_set(&newsk->sk_cookie, | |
1269 | atomic64_read(&inet_rsk(req)->ir_cookie)); | |
9f1d2604 | 1270 | |
55250b83 ED |
1271 | newicsk->icsk_retransmits = 0; |
1272 | newicsk->icsk_backoff = 0; | |
1273 | newicsk->icsk_probes_out = 0; | |
1274 | newicsk->icsk_probes_tstamp = 0; | |
4237c75c | 1275 | |
55250b83 ED |
1276 | /* Deinitialize accept_queue to trap illegal accesses. */ |
1277 | memset(&newicsk->icsk_accept_queue, 0, | |
1278 | sizeof(newicsk->icsk_accept_queue)); | |
1279 | ||
a3a128f6 ED |
1280 | inet_sk_set_state(newsk, TCP_SYN_RECV); |
1281 | ||
55250b83 ED |
1282 | inet_clone_ulp(req, newsk, priority); |
1283 | ||
1284 | security_inet_csk_clone(newsk, req); | |
13230593 | 1285 | |
9f1d2604 ACM |
1286 | return newsk; |
1287 | } | |
a019d6fe ACM |
1288 | |
1289 | /* | |
1290 | * At this point, there should be no process reference to this | |
1291 | * socket, and thus no user references at all. Therefore we | |
1292 | * can assume the socket waitqueue is inactive and nobody will | |
1293 | * try to jump onto it. | |
1294 | */ | |
1295 | void inet_csk_destroy_sock(struct sock *sk) | |
1296 | { | |
547b792c IJ |
1297 | WARN_ON(sk->sk_state != TCP_CLOSE); |
1298 | WARN_ON(!sock_flag(sk, SOCK_DEAD)); | |
a019d6fe ACM |
1299 | |
1300 | /* It cannot be in hash table! */ | |
547b792c | 1301 | WARN_ON(!sk_unhashed(sk)); |
a019d6fe | 1302 | |
c720c7e8 ED |
1303 | /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */ |
1304 | WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash); | |
a019d6fe ACM |
1305 | |
1306 | sk->sk_prot->destroy(sk); | |
1307 | ||
1308 | sk_stream_kill_queues(sk); | |
1309 | ||
1310 | xfrm_sk_free_policy(sk); | |
1311 | ||
19757ceb | 1312 | this_cpu_dec(*sk->sk_prot->orphan_count); |
c2a2efbb | 1313 | |
a019d6fe ACM |
1314 | sock_put(sk); |
1315 | } | |
a019d6fe ACM |
1316 | EXPORT_SYMBOL(inet_csk_destroy_sock); |
1317 | ||
e337e24d | 1318 | /* This function allows to force a closure of a socket after the call to |
2a63dd0e | 1319 | * tcp_create_openreq_child(). |
e337e24d CP |
1320 | */ |
1321 | void inet_csk_prepare_forced_close(struct sock *sk) | |
c10cb5fc | 1322 | __releases(&sk->sk_lock.slock) |
e337e24d CP |
1323 | { |
1324 | /* sk_clone_lock locked the socket and set refcnt to 2 */ | |
1325 | bh_unlock_sock(sk); | |
1326 | sock_put(sk); | |
2f8a397d | 1327 | inet_csk_prepare_for_destroy_sock(sk); |
6761893e | 1328 | inet_sk(sk)->inet_num = 0; |
e337e24d CP |
1329 | } |
1330 | EXPORT_SYMBOL(inet_csk_prepare_forced_close); | |
1331 | ||
2c02d41d PA |
1332 | static int inet_ulp_can_listen(const struct sock *sk) |
1333 | { | |
1334 | const struct inet_connection_sock *icsk = inet_csk(sk); | |
1335 | ||
1336 | if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone) | |
1337 | return -EINVAL; | |
1338 | ||
1339 | return 0; | |
1340 | } | |
1341 | ||
e7049395 | 1342 | int inet_csk_listen_start(struct sock *sk) |
a019d6fe | 1343 | { |
a019d6fe | 1344 | struct inet_connection_sock *icsk = inet_csk(sk); |
10cbc8f1 | 1345 | struct inet_sock *inet = inet_sk(sk); |
7a7160ed | 1346 | int err; |
a019d6fe | 1347 | |
2c02d41d PA |
1348 | err = inet_ulp_can_listen(sk); |
1349 | if (unlikely(err)) | |
1350 | return err; | |
1351 | ||
ef547f2a | 1352 | reqsk_queue_alloc(&icsk->icsk_accept_queue); |
a019d6fe | 1353 | |
a019d6fe ACM |
1354 | sk->sk_ack_backlog = 0; |
1355 | inet_csk_delack_init(sk); | |
1356 | ||
1357 | /* There is race window here: we announce ourselves listening, | |
1358 | * but this transition is still not validated by get_port(). | |
1359 | * It is OK, because this socket enters to hash table only | |
1360 | * after validation is complete. | |
1361 | */ | |
563e0bb0 | 1362 | inet_sk_state_store(sk, TCP_LISTEN); |
7a7160ed KI |
1363 | err = sk->sk_prot->get_port(sk, inet->inet_num); |
1364 | if (!err) { | |
c720c7e8 | 1365 | inet->inet_sport = htons(inet->inet_num); |
a019d6fe ACM |
1366 | |
1367 | sk_dst_reset(sk); | |
086c653f | 1368 | err = sk->sk_prot->hash(sk); |
a019d6fe | 1369 | |
086c653f CG |
1370 | if (likely(!err)) |
1371 | return 0; | |
a019d6fe ACM |
1372 | } |
1373 | ||
563e0bb0 | 1374 | inet_sk_set_state(sk, TCP_CLOSE); |
086c653f | 1375 | return err; |
a019d6fe | 1376 | } |
a019d6fe | 1377 | |
ebb516af ED |
1378 | static void inet_child_forget(struct sock *sk, struct request_sock *req, |
1379 | struct sock *child) | |
1380 | { | |
1381 | sk->sk_prot->disconnect(child, O_NONBLOCK); | |
1382 | ||
1383 | sock_orphan(child); | |
1384 | ||
19757ceb | 1385 | this_cpu_inc(*sk->sk_prot->orphan_count); |
ebb516af ED |
1386 | |
1387 | if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) { | |
d983ea6f | 1388 | BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req); |
ebb516af ED |
1389 | BUG_ON(sk != req->rsk_listener); |
1390 | ||
1391 | /* Paranoid, to prevent race condition if | |
1392 | * an inbound pkt destined for child is | |
1393 | * blocked by sock lock in tcp_v4_rcv(). | |
1394 | * Also to satisfy an assertion in | |
1395 | * tcp_v4_destroy_sock(). | |
1396 | */ | |
d983ea6f | 1397 | RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL); |
ebb516af ED |
1398 | } |
1399 | inet_csk_destroy_sock(child); | |
ebb516af ED |
1400 | } |
1401 | ||
7716682c ED |
1402 | struct sock *inet_csk_reqsk_queue_add(struct sock *sk, |
1403 | struct request_sock *req, | |
1404 | struct sock *child) | |
ebb516af ED |
1405 | { |
1406 | struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; | |
1407 | ||
1408 | spin_lock(&queue->rskq_lock); | |
1409 | if (unlikely(sk->sk_state != TCP_LISTEN)) { | |
1410 | inet_child_forget(sk, req, child); | |
7716682c | 1411 | child = NULL; |
ebb516af ED |
1412 | } else { |
1413 | req->sk = child; | |
1414 | req->dl_next = NULL; | |
1415 | if (queue->rskq_accept_head == NULL) | |
60b173ca | 1416 | WRITE_ONCE(queue->rskq_accept_head, req); |
ebb516af ED |
1417 | else |
1418 | queue->rskq_accept_tail->dl_next = req; | |
1419 | queue->rskq_accept_tail = req; | |
1420 | sk_acceptq_added(sk); | |
1421 | } | |
1422 | spin_unlock(&queue->rskq_lock); | |
7716682c | 1423 | return child; |
ebb516af ED |
1424 | } |
1425 | EXPORT_SYMBOL(inet_csk_reqsk_queue_add); | |
1426 | ||
5e0724d0 ED |
1427 | struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, |
1428 | struct request_sock *req, bool own_req) | |
1429 | { | |
1430 | if (own_req) { | |
d4f2c86b KI |
1431 | inet_csk_reqsk_queue_drop(req->rsk_listener, req); |
1432 | reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req); | |
1433 | ||
1434 | if (sk != req->rsk_listener) { | |
1435 | /* another listening sk has been selected, | |
1436 | * migrate the req to it. | |
1437 | */ | |
1438 | struct request_sock *nreq; | |
1439 | ||
1440 | /* hold a refcnt for the nreq->rsk_listener | |
1441 | * which is assigned in inet_reqsk_clone() | |
1442 | */ | |
1443 | sock_hold(sk); | |
1444 | nreq = inet_reqsk_clone(req, sk); | |
1445 | if (!nreq) { | |
1446 | inet_child_forget(sk, req, child); | |
1447 | goto child_put; | |
1448 | } | |
1449 | ||
1450 | refcount_set(&nreq->rsk_refcnt, 1); | |
1451 | if (inet_csk_reqsk_queue_add(sk, nreq, child)) { | |
55d444b3 | 1452 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQSUCCESS); |
d4f2c86b KI |
1453 | reqsk_migrate_reset(req); |
1454 | reqsk_put(req); | |
1455 | return child; | |
1456 | } | |
1457 | ||
55d444b3 | 1458 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); |
d4f2c86b KI |
1459 | reqsk_migrate_reset(nreq); |
1460 | __reqsk_free(nreq); | |
1461 | } else if (inet_csk_reqsk_queue_add(sk, req, child)) { | |
7716682c | 1462 | return child; |
d4f2c86b | 1463 | } |
5e0724d0 ED |
1464 | } |
1465 | /* Too bad, another child took ownership of the request, undo. */ | |
d4f2c86b | 1466 | child_put: |
5e0724d0 ED |
1467 | bh_unlock_sock(child); |
1468 | sock_put(child); | |
1469 | return NULL; | |
1470 | } | |
5e0724d0 | 1471 | |
a019d6fe ACM |
1472 | /* |
1473 | * This routine closes sockets which have been at least partially | |
1474 | * opened, but not yet accepted. | |
1475 | */ | |
1476 | void inet_csk_listen_stop(struct sock *sk) | |
1477 | { | |
1478 | struct inet_connection_sock *icsk = inet_csk(sk); | |
8336886f | 1479 | struct request_sock_queue *queue = &icsk->icsk_accept_queue; |
fff1f300 | 1480 | struct request_sock *next, *req; |
a019d6fe ACM |
1481 | |
1482 | /* Following specs, it would be better either to send FIN | |
1483 | * (and enter FIN-WAIT-1, it is normal close) | |
1484 | * or to send active reset (abort). | |
1485 | * Certainly, it is pretty dangerous while synflood, but it is | |
1486 | * bad justification for our negligence 8) | |
1487 | * To be honest, we are not able to make either | |
1488 | * of the variants now. --ANK | |
1489 | */ | |
fff1f300 | 1490 | while ((req = reqsk_queue_remove(queue, sk)) != NULL) { |
54b92e84 KI |
1491 | struct sock *child = req->sk, *nsk; |
1492 | struct request_sock *nreq; | |
a019d6fe | 1493 | |
a019d6fe ACM |
1494 | local_bh_disable(); |
1495 | bh_lock_sock(child); | |
547b792c | 1496 | WARN_ON(sock_owned_by_user(child)); |
a019d6fe ACM |
1497 | sock_hold(child); |
1498 | ||
54b92e84 KI |
1499 | nsk = reuseport_migrate_sock(sk, child, NULL); |
1500 | if (nsk) { | |
1501 | nreq = inet_reqsk_clone(req, nsk); | |
1502 | if (nreq) { | |
1503 | refcount_set(&nreq->rsk_refcnt, 1); | |
1504 | ||
1505 | if (inet_csk_reqsk_queue_add(nsk, nreq, child)) { | |
55d444b3 KI |
1506 | __NET_INC_STATS(sock_net(nsk), |
1507 | LINUX_MIB_TCPMIGRATEREQSUCCESS); | |
54b92e84 KI |
1508 | reqsk_migrate_reset(req); |
1509 | } else { | |
55d444b3 KI |
1510 | __NET_INC_STATS(sock_net(nsk), |
1511 | LINUX_MIB_TCPMIGRATEREQFAILURE); | |
54b92e84 KI |
1512 | reqsk_migrate_reset(nreq); |
1513 | __reqsk_free(nreq); | |
1514 | } | |
1515 | ||
1516 | /* inet_csk_reqsk_queue_add() has already | |
1517 | * called inet_child_forget() on failure case. | |
1518 | */ | |
1519 | goto skip_child_forget; | |
1520 | } | |
1521 | } | |
1522 | ||
ebb516af | 1523 | inet_child_forget(sk, req, child); |
54b92e84 | 1524 | skip_child_forget: |
da8ab578 | 1525 | reqsk_put(req); |
a019d6fe ACM |
1526 | bh_unlock_sock(child); |
1527 | local_bh_enable(); | |
1528 | sock_put(child); | |
1529 | ||
92d6f176 | 1530 | cond_resched(); |
a019d6fe | 1531 | } |
0536fcc0 | 1532 | if (queue->fastopenq.rskq_rst_head) { |
8336886f | 1533 | /* Free all the reqs queued in rskq_rst_head. */ |
0536fcc0 | 1534 | spin_lock_bh(&queue->fastopenq.lock); |
fff1f300 | 1535 | req = queue->fastopenq.rskq_rst_head; |
0536fcc0 ED |
1536 | queue->fastopenq.rskq_rst_head = NULL; |
1537 | spin_unlock_bh(&queue->fastopenq.lock); | |
fff1f300 ED |
1538 | while (req != NULL) { |
1539 | next = req->dl_next; | |
13854e5a | 1540 | reqsk_put(req); |
fff1f300 | 1541 | req = next; |
8336886f JC |
1542 | } |
1543 | } | |
ebb516af | 1544 | WARN_ON_ONCE(sk->sk_ack_backlog); |
a019d6fe | 1545 | } |
a019d6fe | 1546 | EXPORT_SYMBOL_GPL(inet_csk_listen_stop); |
af05dc93 | 1547 | |
80d0a69f DM |
1548 | static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl) |
1549 | { | |
5abf7f7e | 1550 | const struct inet_sock *inet = inet_sk(sk); |
80d0a69f DM |
1551 | struct flowi4 *fl4; |
1552 | struct rtable *rt; | |
1553 | ||
1554 | rcu_read_lock(); | |
80d0a69f | 1555 | fl4 = &fl->u.ip4; |
42e5ffc3 GN |
1556 | inet_sk_init_flowi4(inet, fl4); |
1557 | rt = ip_route_output_flow(sock_net(sk), fl4, sk); | |
80d0a69f DM |
1558 | if (IS_ERR(rt)) |
1559 | rt = NULL; | |
1560 | if (rt) | |
1561 | sk_setup_caps(sk, &rt->dst); | |
1562 | rcu_read_unlock(); | |
1563 | ||
1564 | return &rt->dst; | |
1565 | } | |
1566 | ||
1567 | struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu) | |
1568 | { | |
1569 | struct dst_entry *dst = __sk_dst_check(sk, 0); | |
1570 | struct inet_sock *inet = inet_sk(sk); | |
1571 | ||
1572 | if (!dst) { | |
1573 | dst = inet_csk_rebuild_route(sk, &inet->cork.fl); | |
1574 | if (!dst) | |
1575 | goto out; | |
1576 | } | |
bd085ef6 | 1577 | dst->ops->update_pmtu(dst, sk, NULL, mtu, true); |
80d0a69f DM |
1578 | |
1579 | dst = __sk_dst_check(sk, 0); | |
1580 | if (!dst) | |
1581 | dst = inet_csk_rebuild_route(sk, &inet->cork.fl); | |
1582 | out: | |
1583 | return dst; | |
1584 | } |