Merge branches 'acpi-resources', 'acpi-battery', 'acpi-doc' and 'acpi-pnp'
[linux-2.6-block.git] / net / ipv4 / inet_connection_sock.c
CommitLineData
3f421baa
ACM
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Support for INET connection oriented protocols.
7 *
8 * Authors: See the TCP sources
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or(at your option) any later version.
14 */
15
3f421baa
ACM
16#include <linux/module.h>
17#include <linux/jhash.h>
18
19#include <net/inet_connection_sock.h>
20#include <net/inet_hashtables.h>
21#include <net/inet_timewait_sock.h>
22#include <net/ip.h>
23#include <net/route.h>
24#include <net/tcp_states.h>
a019d6fe 25#include <net/xfrm.h>
fa76ce73 26#include <net/tcp.h>
3f421baa
ACM
27
28#ifdef INET_CSK_DEBUG
29const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
30EXPORT_SYMBOL(inet_csk_timer_bug_msg);
31#endif
32
0bbf87d8 33void inet_get_local_port_range(struct net *net, int *low, int *high)
227b60f5 34{
95c96174
ED
35 unsigned int seq;
36
227b60f5 37 do {
c9d8f1a6 38 seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
227b60f5 39
c9d8f1a6
CW
40 *low = net->ipv4.ip_local_ports.range[0];
41 *high = net->ipv4.ip_local_ports.range[1];
42 } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
227b60f5
SH
43}
44EXPORT_SYMBOL(inet_get_local_port_range);
3f421baa 45
971af18b 46int inet_csk_bind_conflict(const struct sock *sk,
aacd9289 47 const struct inet_bind_bucket *tb, bool relax)
3f421baa 48{
3f421baa 49 struct sock *sk2;
3f421baa 50 int reuse = sk->sk_reuse;
da5e3630
TH
51 int reuseport = sk->sk_reuseport;
52 kuid_t uid = sock_i_uid((struct sock *)sk);
3f421baa 53
7477fd2e
PE
54 /*
55 * Unlike other sk lookup places we do not check
56 * for sk_net here, since _all_ the socks listed
57 * in tb->owners list belong to the same net - the
58 * one this bucket belongs to.
59 */
60
b67bfe0d 61 sk_for_each_bound(sk2, &tb->owners) {
3f421baa
ACM
62 if (sk != sk2 &&
63 !inet_v6_ipv6only(sk2) &&
64 (!sk->sk_bound_dev_if ||
65 !sk2->sk_bound_dev_if ||
66 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
da5e3630
TH
67 if ((!reuse || !sk2->sk_reuse ||
68 sk2->sk_state == TCP_LISTEN) &&
69 (!reuseport || !sk2->sk_reuseport ||
70 (sk2->sk_state != TCP_TIME_WAIT &&
71 !uid_eq(uid, sock_i_uid(sk2))))) {
50805466
ED
72
73 if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
74 sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
3f421baa 75 break;
8d238b25 76 }
aacd9289
AC
77 if (!relax && reuse && sk2->sk_reuse &&
78 sk2->sk_state != TCP_LISTEN) {
aacd9289 79
50805466
ED
80 if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
81 sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
aacd9289
AC
82 break;
83 }
3f421baa
ACM
84 }
85 }
b67bfe0d 86 return sk2 != NULL;
3f421baa 87}
971af18b
ACM
88EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
89
3f421baa
ACM
90/* Obtain a reference to a local port for the given sock,
91 * if snum is zero it means select any available local port.
92 */
ab1e0a13 93int inet_csk_get_port(struct sock *sk, unsigned short snum)
3f421baa 94{
39d8cda7 95 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
3f421baa 96 struct inet_bind_hashbucket *head;
3f421baa 97 struct inet_bind_bucket *tb;
a9d8f911 98 int ret, attempts = 5;
3b1e0a65 99 struct net *net = sock_net(sk);
a9d8f911 100 int smallest_size = -1, smallest_rover;
da5e3630 101 kuid_t uid = sock_i_uid(sk);
3f421baa
ACM
102
103 local_bh_disable();
104 if (!snum) {
227b60f5
SH
105 int remaining, rover, low, high;
106
a9d8f911 107again:
0bbf87d8 108 inet_get_local_port_range(net, &low, &high);
a25de534 109 remaining = (high - low) + 1;
63862b5b 110 smallest_rover = rover = prandom_u32() % remaining + low;
3f421baa 111
a9d8f911 112 smallest_size = -1;
3f421baa 113 do {
122ff243 114 if (inet_is_local_reserved_port(net, rover))
e3826f1e 115 goto next_nolock;
7f635ab7
PE
116 head = &hashinfo->bhash[inet_bhashfn(net, rover,
117 hashinfo->bhash_size)];
3f421baa 118 spin_lock(&head->lock);
b67bfe0d 119 inet_bind_bucket_for_each(tb, &head->chain)
09ad9bc7 120 if (net_eq(ib_net(tb), net) && tb->port == rover) {
da5e3630
TH
121 if (((tb->fastreuse > 0 &&
122 sk->sk_reuse &&
123 sk->sk_state != TCP_LISTEN) ||
124 (tb->fastreuseport > 0 &&
125 sk->sk_reuseport &&
126 uid_eq(tb->fastuid, uid))) &&
a9d8f911
EP
127 (tb->num_owners < smallest_size || smallest_size == -1)) {
128 smallest_size = tb->num_owners;
129 smallest_rover = rover;
aacd9289
AC
130 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
131 !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
8d238b25 132 snum = smallest_rover;
fddb7b57 133 goto tb_found;
a9d8f911
EP
134 }
135 }
aacd9289 136 if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
2b05ad33 137 snum = rover;
fddb7b57 138 goto tb_found;
2b05ad33 139 }
3f421baa 140 goto next;
a9d8f911 141 }
3f421baa
ACM
142 break;
143 next:
144 spin_unlock(&head->lock);
e3826f1e 145 next_nolock:
6df71634
SH
146 if (++rover > high)
147 rover = low;
3f421baa 148 } while (--remaining > 0);
3f421baa
ACM
149
150 /* Exhausted local port range during search? It is not
151 * possible for us to be holding one of the bind hash
152 * locks if this test triggers, because if 'remaining'
153 * drops to zero, we broke out of the do/while loop at
154 * the top level, not from the 'break;' statement.
155 */
156 ret = 1;
a9d8f911
EP
157 if (remaining <= 0) {
158 if (smallest_size != -1) {
159 snum = smallest_rover;
160 goto have_snum;
161 }
3f421baa 162 goto fail;
a9d8f911 163 }
3f421baa
ACM
164 /* OK, here is the one we will use. HEAD is
165 * non-NULL and we hold it's mutex.
166 */
167 snum = rover;
168 } else {
a9d8f911 169have_snum:
7f635ab7
PE
170 head = &hashinfo->bhash[inet_bhashfn(net, snum,
171 hashinfo->bhash_size)];
3f421baa 172 spin_lock(&head->lock);
b67bfe0d 173 inet_bind_bucket_for_each(tb, &head->chain)
09ad9bc7 174 if (net_eq(ib_net(tb), net) && tb->port == snum)
3f421baa
ACM
175 goto tb_found;
176 }
177 tb = NULL;
178 goto tb_not_found;
179tb_found:
180 if (!hlist_empty(&tb->owners)) {
4a17fd52
PE
181 if (sk->sk_reuse == SK_FORCE_REUSE)
182 goto success;
183
da5e3630
TH
184 if (((tb->fastreuse > 0 &&
185 sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
186 (tb->fastreuseport > 0 &&
187 sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
a9d8f911 188 smallest_size == -1) {
3f421baa
ACM
189 goto success;
190 } else {
191 ret = 1;
aacd9289 192 if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
da5e3630 193 if (((sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
9c5e0c0b
TH
194 (tb->fastreuseport > 0 &&
195 sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
5add3009 196 smallest_size != -1 && --attempts >= 0) {
a9d8f911
EP
197 spin_unlock(&head->lock);
198 goto again;
199 }
aacd9289 200
3f421baa 201 goto fail_unlock;
a9d8f911 202 }
3f421baa
ACM
203 }
204 }
205tb_not_found:
206 ret = 1;
941b1d22
PE
207 if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
208 net, head, snum)) == NULL)
3f421baa
ACM
209 goto fail_unlock;
210 if (hlist_empty(&tb->owners)) {
211 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
212 tb->fastreuse = 1;
213 else
214 tb->fastreuse = 0;
da5e3630
TH
215 if (sk->sk_reuseport) {
216 tb->fastreuseport = 1;
217 tb->fastuid = uid;
9c5e0c0b 218 } else
da5e3630 219 tb->fastreuseport = 0;
da5e3630
TH
220 } else {
221 if (tb->fastreuse &&
222 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
223 tb->fastreuse = 0;
224 if (tb->fastreuseport &&
9c5e0c0b 225 (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid)))
da5e3630 226 tb->fastreuseport = 0;
da5e3630 227 }
3f421baa
ACM
228success:
229 if (!inet_csk(sk)->icsk_bind_hash)
230 inet_bind_hash(sk, tb, snum);
547b792c 231 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
e905a9ed 232 ret = 0;
3f421baa
ACM
233
234fail_unlock:
235 spin_unlock(&head->lock);
236fail:
237 local_bh_enable();
238 return ret;
239}
3f421baa
ACM
240EXPORT_SYMBOL_GPL(inet_csk_get_port);
241
242/*
243 * Wait for an incoming connection, avoid race conditions. This must be called
244 * with the socket locked.
245 */
246static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
247{
248 struct inet_connection_sock *icsk = inet_csk(sk);
249 DEFINE_WAIT(wait);
250 int err;
251
252 /*
253 * True wake-one mechanism for incoming connections: only
254 * one process gets woken up, not the 'whole herd'.
255 * Since we do not 'race & poll' for established sockets
256 * anymore, the common case will execute the loop only once.
257 *
258 * Subtle issue: "add_wait_queue_exclusive()" will be added
259 * after any current non-exclusive waiters, and we know that
260 * it will always _stay_ after any new non-exclusive waiters
261 * because all non-exclusive waiters are added at the
262 * beginning of the wait-queue. As such, it's ok to "drop"
263 * our exclusiveness temporarily when we get woken up without
264 * having to remove and re-insert us on the wait queue.
265 */
266 for (;;) {
aa395145 267 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
3f421baa
ACM
268 TASK_INTERRUPTIBLE);
269 release_sock(sk);
270 if (reqsk_queue_empty(&icsk->icsk_accept_queue))
271 timeo = schedule_timeout(timeo);
cb7cf8a3 272 sched_annotate_sleep();
3f421baa
ACM
273 lock_sock(sk);
274 err = 0;
275 if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
276 break;
277 err = -EINVAL;
278 if (sk->sk_state != TCP_LISTEN)
279 break;
280 err = sock_intr_errno(timeo);
281 if (signal_pending(current))
282 break;
283 err = -EAGAIN;
284 if (!timeo)
285 break;
286 }
aa395145 287 finish_wait(sk_sleep(sk), &wait);
3f421baa
ACM
288 return err;
289}
290
291/*
292 * This will accept the next outstanding connection.
293 */
294struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
295{
296 struct inet_connection_sock *icsk = inet_csk(sk);
8336886f 297 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
8336886f 298 struct request_sock *req;
e3d95ad7 299 struct sock *newsk;
3f421baa
ACM
300 int error;
301
302 lock_sock(sk);
303
304 /* We need to make sure that this socket is listening,
305 * and that it has something pending.
306 */
307 error = -EINVAL;
308 if (sk->sk_state != TCP_LISTEN)
309 goto out_err;
310
311 /* Find already established connection */
8336886f 312 if (reqsk_queue_empty(queue)) {
3f421baa
ACM
313 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
314
315 /* If this is a non blocking socket don't sleep */
316 error = -EAGAIN;
317 if (!timeo)
318 goto out_err;
319
320 error = inet_csk_wait_for_connect(sk, timeo);
321 if (error)
322 goto out_err;
323 }
8336886f
JC
324 req = reqsk_queue_remove(queue);
325 newsk = req->sk;
326
327 sk_acceptq_removed(sk);
e3d95ad7
ED
328 if (sk->sk_protocol == IPPROTO_TCP &&
329 tcp_rsk(req)->tfo_listener &&
330 queue->fastopenq) {
8336886f 331 spin_lock_bh(&queue->fastopenq->lock);
9439ce00 332 if (tcp_rsk(req)->tfo_listener) {
8336886f
JC
333 /* We are still waiting for the final ACK from 3WHS
334 * so can't free req now. Instead, we set req->sk to
335 * NULL to signify that the child socket is taken
336 * so reqsk_fastopen_remove() will free the req
337 * when 3WHS finishes (or is aborted).
338 */
339 req->sk = NULL;
340 req = NULL;
341 }
342 spin_unlock_bh(&queue->fastopenq->lock);
343 }
3f421baa
ACM
344out:
345 release_sock(sk);
8336886f 346 if (req)
13854e5a 347 reqsk_put(req);
3f421baa
ACM
348 return newsk;
349out_err:
350 newsk = NULL;
8336886f 351 req = NULL;
3f421baa
ACM
352 *err = error;
353 goto out;
354}
3f421baa
ACM
355EXPORT_SYMBOL(inet_csk_accept);
356
357/*
358 * Using different timers for retransmit, delayed acks and probes
e905a9ed 359 * We may wish use just one timer maintaining a list of expire jiffies
3f421baa
ACM
360 * to optimize.
361 */
362void inet_csk_init_xmit_timers(struct sock *sk,
363 void (*retransmit_handler)(unsigned long),
364 void (*delack_handler)(unsigned long),
365 void (*keepalive_handler)(unsigned long))
366{
367 struct inet_connection_sock *icsk = inet_csk(sk);
368
b24b8a24
PE
369 setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
370 (unsigned long)sk);
371 setup_timer(&icsk->icsk_delack_timer, delack_handler,
372 (unsigned long)sk);
373 setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
3f421baa
ACM
374 icsk->icsk_pending = icsk->icsk_ack.pending = 0;
375}
3f421baa
ACM
376EXPORT_SYMBOL(inet_csk_init_xmit_timers);
377
378void inet_csk_clear_xmit_timers(struct sock *sk)
379{
380 struct inet_connection_sock *icsk = inet_csk(sk);
381
382 icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
383
384 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
385 sk_stop_timer(sk, &icsk->icsk_delack_timer);
386 sk_stop_timer(sk, &sk->sk_timer);
387}
3f421baa
ACM
388EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
389
390void inet_csk_delete_keepalive_timer(struct sock *sk)
391{
392 sk_stop_timer(sk, &sk->sk_timer);
393}
3f421baa
ACM
394EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
395
396void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
397{
398 sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
399}
3f421baa
ACM
400EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
401
d9319100 402struct dst_entry *inet_csk_route_req(struct sock *sk,
6bd023f3 403 struct flowi4 *fl4,
ba3f7f04 404 const struct request_sock *req)
3f421baa 405{
3f421baa 406 const struct inet_request_sock *ireq = inet_rsk(req);
8b929ab1
ED
407 struct net *net = read_pnet(&ireq->ireq_net);
408 struct ip_options_rcu *opt = ireq->opt;
409 struct rtable *rt;
3f421baa 410
8b929ab1 411 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
e79d9bc7 412 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
8b929ab1 413 sk->sk_protocol, inet_sk_flowi_flags(sk),
634fb979 414 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
8b929ab1
ED
415 ireq->ir_loc_addr, ireq->ir_rmt_port,
416 htons(ireq->ir_num));
6bd023f3
DM
417 security_req_classify_flow(req, flowi4_to_flowi(fl4));
418 rt = ip_route_output_flow(net, fl4, sk);
b23dd4fe 419 if (IS_ERR(rt))
857a6e0a 420 goto no_route;
155e8336 421 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
857a6e0a 422 goto route_err;
d8d1f30b 423 return &rt->dst;
857a6e0a
IJ
424
425route_err:
426 ip_rt_put(rt);
427no_route:
428 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
429 return NULL;
3f421baa 430}
3f421baa
ACM
431EXPORT_SYMBOL_GPL(inet_csk_route_req);
432
77357a95
DM
433struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
434 struct sock *newsk,
435 const struct request_sock *req)
436{
437 const struct inet_request_sock *ireq = inet_rsk(req);
8b929ab1 438 struct net *net = read_pnet(&ireq->ireq_net);
77357a95 439 struct inet_sock *newinet = inet_sk(newsk);
1a7b27c9 440 struct ip_options_rcu *opt;
77357a95
DM
441 struct flowi4 *fl4;
442 struct rtable *rt;
443
444 fl4 = &newinet->cork.fl.u.ip4;
1a7b27c9
CP
445
446 rcu_read_lock();
447 opt = rcu_dereference(newinet->inet_opt);
8b929ab1 448 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
77357a95
DM
449 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
450 sk->sk_protocol, inet_sk_flowi_flags(sk),
634fb979 451 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
8b929ab1
ED
452 ireq->ir_loc_addr, ireq->ir_rmt_port,
453 htons(ireq->ir_num));
77357a95
DM
454 security_req_classify_flow(req, flowi4_to_flowi(fl4));
455 rt = ip_route_output_flow(net, fl4, sk);
456 if (IS_ERR(rt))
457 goto no_route;
155e8336 458 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
77357a95 459 goto route_err;
1a7b27c9 460 rcu_read_unlock();
77357a95
DM
461 return &rt->dst;
462
463route_err:
464 ip_rt_put(rt);
465no_route:
1a7b27c9 466 rcu_read_unlock();
77357a95
DM
467 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
468 return NULL;
469}
470EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
471
6b72977b 472static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
72a3effa 473 const u32 rnd, const u32 synq_hsize)
3f421baa 474{
6b72977b 475 return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
3f421baa
ACM
476}
477
dfd56b8b 478#if IS_ENABLED(CONFIG_IPV6)
3f421baa
ACM
479#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
480#else
fa76ce73 481#define AF_INET_FAMILY(fam) true
3f421baa
ACM
482#endif
483
fa76ce73
ED
484/* Note: this is temporary :
485 * req sock will no longer be in listener hash table
486*/
487struct request_sock *inet_csk_search_req(struct sock *sk,
488 const __be16 rport,
489 const __be32 raddr,
7f25afbb 490 const __be32 laddr)
3f421baa 491{
fa76ce73 492 struct inet_connection_sock *icsk = inet_csk(sk);
3f421baa 493 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
52452c54 494 struct request_sock *req;
fa76ce73
ED
495 u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd,
496 lopt->nr_table_entries);
3f421baa 497
b2827053 498 spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
fa76ce73 499 for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
3f421baa
ACM
500 const struct inet_request_sock *ireq = inet_rsk(req);
501
634fb979
ED
502 if (ireq->ir_rmt_port == rport &&
503 ireq->ir_rmt_addr == raddr &&
504 ireq->ir_loc_addr == laddr &&
3f421baa 505 AF_INET_FAMILY(req->rsk_ops->family)) {
fa76ce73 506 atomic_inc(&req->rsk_refcnt);
547b792c 507 WARN_ON(req->sk);
3f421baa
ACM
508 break;
509 }
510 }
b2827053 511 spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
3f421baa
ACM
512
513 return req;
514}
3f421baa
ACM
515EXPORT_SYMBOL_GPL(inet_csk_search_req);
516
517void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
c2977c22 518 unsigned long timeout)
3f421baa
ACM
519{
520 struct inet_connection_sock *icsk = inet_csk(sk);
521 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
634fb979
ED
522 const u32 h = inet_synq_hash(inet_rsk(req)->ir_rmt_addr,
523 inet_rsk(req)->ir_rmt_port,
3f421baa
ACM
524 lopt->hash_rnd, lopt->nr_table_entries);
525
526 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
527 inet_csk_reqsk_queue_added(sk, timeout);
528}
4bc2f18b 529EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
3f421baa 530
a019d6fe
ACM
531/* Only thing we need from tcp.h */
532extern int sysctl_tcp_synack_retries;
533
9f1d2604 534
0c3d79bc
JA
535/* Decide when to expire the request and when to resend SYN-ACK */
536static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
537 const int max_retries,
538 const u8 rskq_defer_accept,
539 int *expire, int *resend)
540{
541 if (!rskq_defer_accept) {
e6c022a4 542 *expire = req->num_timeout >= thresh;
0c3d79bc
JA
543 *resend = 1;
544 return;
545 }
e6c022a4
ED
546 *expire = req->num_timeout >= thresh &&
547 (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
0c3d79bc
JA
548 /*
549 * Do not resend while waiting for data after ACK,
550 * start to resend on end of deferring period to give
551 * last chance for data or ACK to create established socket.
552 */
553 *resend = !inet_rsk(req)->acked ||
e6c022a4 554 req->num_timeout >= rskq_defer_accept - 1;
0c3d79bc
JA
555}
556
e6c022a4
ED
557int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
558{
1a2c6181 559 int err = req->rsk_ops->rtx_syn_ack(parent, req);
e6c022a4
ED
560
561 if (!err)
562 req->num_retrans++;
563 return err;
564}
565EXPORT_SYMBOL(inet_rtx_syn_ack);
566
b357a364
ED
567/* return true if req was found in the syn_table[] */
568static bool reqsk_queue_unlink(struct request_sock_queue *queue,
569 struct request_sock *req)
570{
571 struct listen_sock *lopt = queue->listen_opt;
572 struct request_sock **prev;
573 bool found = false;
574
575 spin_lock(&queue->syn_wait_lock);
576
577 for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
578 prev = &(*prev)->dl_next) {
579 if (*prev == req) {
580 *prev = req->dl_next;
581 found = true;
582 break;
583 }
584 }
585
586 spin_unlock(&queue->syn_wait_lock);
587 if (del_timer(&req->rsk_timer))
588 reqsk_put(req);
589 return found;
590}
591
592void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
593{
594 if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) {
595 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
596 reqsk_put(req);
597 }
598}
599EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
600
fa76ce73 601static void reqsk_timer_handler(unsigned long data)
a019d6fe 602{
fa76ce73
ED
603 struct request_sock *req = (struct request_sock *)data;
604 struct sock *sk_listener = req->rsk_listener;
605 struct inet_connection_sock *icsk = inet_csk(sk_listener);
a019d6fe
ACM
606 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
607 struct listen_sock *lopt = queue->listen_opt;
2b41fab7 608 int qlen, expire = 0, resend = 0;
fa76ce73 609 int max_retries, thresh;
2b41fab7 610 u8 defer_accept;
a019d6fe 611
fa76ce73
ED
612 if (sk_listener->sk_state != TCP_LISTEN || !lopt) {
613 reqsk_put(req);
a019d6fe 614 return;
fa76ce73 615 }
a019d6fe 616
fa76ce73
ED
617 max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
618 thresh = max_retries;
a019d6fe
ACM
619 /* Normally all the openreqs are young and become mature
620 * (i.e. converted to established socket) for first timeout.
fd4f2cea 621 * If synack was not acknowledged for 1 second, it means
a019d6fe
ACM
622 * one of the following things: synack was lost, ack was lost,
623 * rtt is high or nobody planned to ack (i.e. synflood).
624 * When server is a bit loaded, queue is populated with old
625 * open requests, reducing effective size of queue.
626 * When server is well loaded, queue size reduces to zero
627 * after several minutes of work. It is not synflood,
628 * it is normal operation. The solution is pruning
629 * too old entries overriding normal timeout, when
630 * situation becomes dangerous.
631 *
632 * Essentially, we reserve half of room for young
633 * embrions; and abort old ones without pity, if old
634 * ones are about to clog our table.
635 */
2b41fab7
ED
636 qlen = listen_sock_qlen(lopt);
637 if (qlen >> (lopt->max_qlen_log - 1)) {
fa76ce73 638 int young = listen_sock_young(lopt) << 1;
a019d6fe
ACM
639
640 while (thresh > 2) {
2b41fab7 641 if (qlen < young)
a019d6fe
ACM
642 break;
643 thresh--;
644 young <<= 1;
645 }
646 }
2b41fab7
ED
647 defer_accept = READ_ONCE(queue->rskq_defer_accept);
648 if (defer_accept)
649 max_retries = defer_accept;
650 syn_ack_recalc(req, thresh, max_retries, defer_accept,
fa76ce73 651 &expire, &resend);
42cb80a2 652 req->rsk_ops->syn_ack_timeout(req);
fa76ce73
ED
653 if (!expire &&
654 (!resend ||
655 !inet_rtx_syn_ack(sk_listener, req) ||
656 inet_rsk(req)->acked)) {
657 unsigned long timeo;
658
659 if (req->num_timeout++ == 0)
660 atomic_inc(&lopt->young_dec);
661 timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
662 mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
663 return;
664 }
665 inet_csk_reqsk_queue_drop(sk_listener, req);
666 reqsk_put(req);
667}
ec0a1966 668
fa76ce73
ED
669void reqsk_queue_hash_req(struct request_sock_queue *queue,
670 u32 hash, struct request_sock *req,
671 unsigned long timeout)
672{
673 struct listen_sock *lopt = queue->listen_opt;
a019d6fe 674
fa76ce73
ED
675 req->num_retrans = 0;
676 req->num_timeout = 0;
677 req->sk = NULL;
a019d6fe 678
fa76ce73
ED
679 /* before letting lookups find us, make sure all req fields
680 * are committed to memory and refcnt initialized.
681 */
682 smp_wmb();
683 atomic_set(&req->rsk_refcnt, 2);
684 setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
685 req->rsk_hash = hash;
a019d6fe 686
b2827053 687 spin_lock(&queue->syn_wait_lock);
fa76ce73
ED
688 req->dl_next = lopt->syn_table[hash];
689 lopt->syn_table[hash] = req;
b2827053 690 spin_unlock(&queue->syn_wait_lock);
a019d6fe 691
fa76ce73 692 mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
a019d6fe 693}
fa76ce73 694EXPORT_SYMBOL(reqsk_queue_hash_req);
a019d6fe 695
e56c57d0
ED
696/**
697 * inet_csk_clone_lock - clone an inet socket, and lock its clone
698 * @sk: the socket to clone
699 * @req: request_sock
700 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
701 *
702 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
703 */
704struct sock *inet_csk_clone_lock(const struct sock *sk,
705 const struct request_sock *req,
706 const gfp_t priority)
9f1d2604 707{
e56c57d0 708 struct sock *newsk = sk_clone_lock(sk, priority);
9f1d2604 709
00db4124 710 if (newsk) {
9f1d2604
ACM
711 struct inet_connection_sock *newicsk = inet_csk(newsk);
712
713 newsk->sk_state = TCP_SYN_RECV;
714 newicsk->icsk_bind_hash = NULL;
715
634fb979 716 inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
b44084c2
ED
717 inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
718 inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
9f1d2604
ACM
719 newsk->sk_write_space = sk_stream_write_space;
720
84f39b08 721 newsk->sk_mark = inet_rsk(req)->ir_mark;
33cf7c90
ED
722 atomic64_set(&newsk->sk_cookie,
723 atomic64_read(&inet_rsk(req)->ir_cookie));
84f39b08 724
9f1d2604 725 newicsk->icsk_retransmits = 0;
6687e988
ACM
726 newicsk->icsk_backoff = 0;
727 newicsk->icsk_probes_out = 0;
9f1d2604
ACM
728
729 /* Deinitialize accept_queue to trap illegal accesses. */
730 memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
4237c75c
VY
731
732 security_inet_csk_clone(newsk, req);
9f1d2604
ACM
733 }
734 return newsk;
735}
e56c57d0 736EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
a019d6fe
ACM
737
738/*
739 * At this point, there should be no process reference to this
740 * socket, and thus no user references at all. Therefore we
741 * can assume the socket waitqueue is inactive and nobody will
742 * try to jump onto it.
743 */
744void inet_csk_destroy_sock(struct sock *sk)
745{
547b792c
IJ
746 WARN_ON(sk->sk_state != TCP_CLOSE);
747 WARN_ON(!sock_flag(sk, SOCK_DEAD));
a019d6fe
ACM
748
749 /* It cannot be in hash table! */
547b792c 750 WARN_ON(!sk_unhashed(sk));
a019d6fe 751
c720c7e8
ED
752 /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
753 WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
a019d6fe
ACM
754
755 sk->sk_prot->destroy(sk);
756
757 sk_stream_kill_queues(sk);
758
759 xfrm_sk_free_policy(sk);
760
761 sk_refcnt_debug_release(sk);
762
dd24c001 763 percpu_counter_dec(sk->sk_prot->orphan_count);
a019d6fe
ACM
764 sock_put(sk);
765}
a019d6fe
ACM
766EXPORT_SYMBOL(inet_csk_destroy_sock);
767
e337e24d
CP
768/* This function allows to force a closure of a socket after the call to
769 * tcp/dccp_create_openreq_child().
770 */
771void inet_csk_prepare_forced_close(struct sock *sk)
c10cb5fc 772 __releases(&sk->sk_lock.slock)
e337e24d
CP
773{
774 /* sk_clone_lock locked the socket and set refcnt to 2 */
775 bh_unlock_sock(sk);
776 sock_put(sk);
777
778 /* The below has to be done to allow calling inet_csk_destroy_sock */
779 sock_set_flag(sk, SOCK_DEAD);
780 percpu_counter_inc(sk->sk_prot->orphan_count);
781 inet_sk(sk)->inet_num = 0;
782}
783EXPORT_SYMBOL(inet_csk_prepare_forced_close);
784
a019d6fe
ACM
785int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
786{
787 struct inet_sock *inet = inet_sk(sk);
788 struct inet_connection_sock *icsk = inet_csk(sk);
789 int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
790
791 if (rc != 0)
792 return rc;
793
794 sk->sk_max_ack_backlog = 0;
795 sk->sk_ack_backlog = 0;
796 inet_csk_delack_init(sk);
797
798 /* There is race window here: we announce ourselves listening,
799 * but this transition is still not validated by get_port().
800 * It is OK, because this socket enters to hash table only
801 * after validation is complete.
802 */
803 sk->sk_state = TCP_LISTEN;
c720c7e8
ED
804 if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
805 inet->inet_sport = htons(inet->inet_num);
a019d6fe
ACM
806
807 sk_dst_reset(sk);
808 sk->sk_prot->hash(sk);
809
810 return 0;
811 }
812
813 sk->sk_state = TCP_CLOSE;
814 __reqsk_queue_destroy(&icsk->icsk_accept_queue);
815 return -EADDRINUSE;
816}
a019d6fe
ACM
817EXPORT_SYMBOL_GPL(inet_csk_listen_start);
818
819/*
820 * This routine closes sockets which have been at least partially
821 * opened, but not yet accepted.
822 */
823void inet_csk_listen_stop(struct sock *sk)
824{
825 struct inet_connection_sock *icsk = inet_csk(sk);
8336886f 826 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
a019d6fe
ACM
827 struct request_sock *acc_req;
828 struct request_sock *req;
829
a019d6fe 830 /* make all the listen_opt local to us */
8336886f 831 acc_req = reqsk_queue_yank_acceptq(queue);
a019d6fe
ACM
832
833 /* Following specs, it would be better either to send FIN
834 * (and enter FIN-WAIT-1, it is normal close)
835 * or to send active reset (abort).
836 * Certainly, it is pretty dangerous while synflood, but it is
837 * bad justification for our negligence 8)
838 * To be honest, we are not able to make either
839 * of the variants now. --ANK
840 */
8336886f 841 reqsk_queue_destroy(queue);
a019d6fe
ACM
842
843 while ((req = acc_req) != NULL) {
844 struct sock *child = req->sk;
845
846 acc_req = req->dl_next;
847
848 local_bh_disable();
849 bh_lock_sock(child);
547b792c 850 WARN_ON(sock_owned_by_user(child));
a019d6fe
ACM
851 sock_hold(child);
852
853 sk->sk_prot->disconnect(child, O_NONBLOCK);
854
855 sock_orphan(child);
856
eb4dea58
HX
857 percpu_counter_inc(sk->sk_prot->orphan_count);
858
9439ce00 859 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
8336886f 860 BUG_ON(tcp_sk(child)->fastopen_rsk != req);
9439ce00 861 BUG_ON(sk != req->rsk_listener);
8336886f
JC
862
863 /* Paranoid, to prevent race condition if
864 * an inbound pkt destined for child is
865 * blocked by sock lock in tcp_v4_rcv().
866 * Also to satisfy an assertion in
867 * tcp_v4_destroy_sock().
868 */
869 tcp_sk(child)->fastopen_rsk = NULL;
8336886f 870 }
a019d6fe
ACM
871 inet_csk_destroy_sock(child);
872
873 bh_unlock_sock(child);
874 local_bh_enable();
875 sock_put(child);
876
877 sk_acceptq_removed(sk);
13854e5a 878 reqsk_put(req);
a019d6fe 879 }
00db4124 880 if (queue->fastopenq) {
8336886f
JC
881 /* Free all the reqs queued in rskq_rst_head. */
882 spin_lock_bh(&queue->fastopenq->lock);
883 acc_req = queue->fastopenq->rskq_rst_head;
884 queue->fastopenq->rskq_rst_head = NULL;
885 spin_unlock_bh(&queue->fastopenq->lock);
886 while ((req = acc_req) != NULL) {
887 acc_req = req->dl_next;
13854e5a 888 reqsk_put(req);
8336886f
JC
889 }
890 }
547b792c 891 WARN_ON(sk->sk_ack_backlog);
a019d6fe 892}
a019d6fe 893EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
af05dc93
ACM
894
895void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
896{
897 struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
898 const struct inet_sock *inet = inet_sk(sk);
899
900 sin->sin_family = AF_INET;
c720c7e8
ED
901 sin->sin_addr.s_addr = inet->inet_daddr;
902 sin->sin_port = inet->inet_dport;
af05dc93 903}
af05dc93 904EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
c4d93909 905
dec73ff0
ACM
906#ifdef CONFIG_COMPAT
907int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
908 char __user *optval, int __user *optlen)
909{
dbeff12b 910 const struct inet_connection_sock *icsk = inet_csk(sk);
dec73ff0 911
00db4124 912 if (icsk->icsk_af_ops->compat_getsockopt)
dec73ff0
ACM
913 return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
914 optval, optlen);
915 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
916 optval, optlen);
917}
dec73ff0
ACM
918EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
919
920int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
b7058842 921 char __user *optval, unsigned int optlen)
dec73ff0 922{
dbeff12b 923 const struct inet_connection_sock *icsk = inet_csk(sk);
dec73ff0 924
00db4124 925 if (icsk->icsk_af_ops->compat_setsockopt)
dec73ff0
ACM
926 return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
927 optval, optlen);
928 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
929 optval, optlen);
930}
dec73ff0
ACM
931EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
932#endif
80d0a69f
DM
933
934static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
935{
5abf7f7e
ED
936 const struct inet_sock *inet = inet_sk(sk);
937 const struct ip_options_rcu *inet_opt;
80d0a69f
DM
938 __be32 daddr = inet->inet_daddr;
939 struct flowi4 *fl4;
940 struct rtable *rt;
941
942 rcu_read_lock();
943 inet_opt = rcu_dereference(inet->inet_opt);
944 if (inet_opt && inet_opt->opt.srr)
945 daddr = inet_opt->opt.faddr;
946 fl4 = &fl->u.ip4;
947 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
948 inet->inet_saddr, inet->inet_dport,
949 inet->inet_sport, sk->sk_protocol,
950 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
951 if (IS_ERR(rt))
952 rt = NULL;
953 if (rt)
954 sk_setup_caps(sk, &rt->dst);
955 rcu_read_unlock();
956
957 return &rt->dst;
958}
959
960struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
961{
962 struct dst_entry *dst = __sk_dst_check(sk, 0);
963 struct inet_sock *inet = inet_sk(sk);
964
965 if (!dst) {
966 dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
967 if (!dst)
968 goto out;
969 }
6700c270 970 dst->ops->update_pmtu(dst, sk, NULL, mtu);
80d0a69f
DM
971
972 dst = __sk_dst_check(sk, 0);
973 if (!dst)
974 dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
975out:
976 return dst;
977}
978EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);