Commit | Line | Data |
---|---|---|
3f421baa ACM |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Support for INET connection oriented protocols. | |
7 | * | |
8 | * Authors: See the TCP sources | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License | |
12 | * as published by the Free Software Foundation; either version | |
13 | * 2 of the License, or(at your option) any later version. | |
14 | */ | |
15 | ||
3f421baa ACM |
16 | #include <linux/module.h> |
17 | #include <linux/jhash.h> | |
18 | ||
19 | #include <net/inet_connection_sock.h> | |
20 | #include <net/inet_hashtables.h> | |
21 | #include <net/inet_timewait_sock.h> | |
22 | #include <net/ip.h> | |
23 | #include <net/route.h> | |
24 | #include <net/tcp_states.h> | |
a019d6fe | 25 | #include <net/xfrm.h> |
3f421baa ACM |
26 | |
27 | #ifdef INET_CSK_DEBUG | |
28 | const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n"; | |
29 | EXPORT_SYMBOL(inet_csk_timer_bug_msg); | |
30 | #endif | |
31 | ||
32 | /* | |
3c689b73 | 33 | * This struct holds the first and last local port number. |
3f421baa | 34 | */ |
3c689b73 ED |
35 | struct local_ports sysctl_local_ports __read_mostly = { |
36 | .lock = SEQLOCK_UNLOCKED, | |
37 | .range = { 32768, 61000 }, | |
38 | }; | |
227b60f5 SH |
39 | |
40 | void inet_get_local_port_range(int *low, int *high) | |
41 | { | |
42 | unsigned seq; | |
43 | do { | |
3c689b73 | 44 | seq = read_seqbegin(&sysctl_local_ports.lock); |
227b60f5 | 45 | |
3c689b73 ED |
46 | *low = sysctl_local_ports.range[0]; |
47 | *high = sysctl_local_ports.range[1]; | |
48 | } while (read_seqretry(&sysctl_local_ports.lock, seq)); | |
227b60f5 SH |
49 | } |
50 | EXPORT_SYMBOL(inet_get_local_port_range); | |
3f421baa | 51 | |
971af18b ACM |
52 | int inet_csk_bind_conflict(const struct sock *sk, |
53 | const struct inet_bind_bucket *tb) | |
3f421baa | 54 | { |
82103232 | 55 | const __be32 sk_rcv_saddr = inet_rcv_saddr(sk); |
3f421baa ACM |
56 | struct sock *sk2; |
57 | struct hlist_node *node; | |
58 | int reuse = sk->sk_reuse; | |
59 | ||
7477fd2e PE |
60 | /* |
61 | * Unlike other sk lookup places we do not check | |
62 | * for sk_net here, since _all_ the socks listed | |
63 | * in tb->owners list belong to the same net - the | |
64 | * one this bucket belongs to. | |
65 | */ | |
66 | ||
3f421baa ACM |
67 | sk_for_each_bound(sk2, node, &tb->owners) { |
68 | if (sk != sk2 && | |
69 | !inet_v6_ipv6only(sk2) && | |
70 | (!sk->sk_bound_dev_if || | |
71 | !sk2->sk_bound_dev_if || | |
72 | sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { | |
73 | if (!reuse || !sk2->sk_reuse || | |
74 | sk2->sk_state == TCP_LISTEN) { | |
82103232 | 75 | const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2); |
3f421baa ACM |
76 | if (!sk2_rcv_saddr || !sk_rcv_saddr || |
77 | sk2_rcv_saddr == sk_rcv_saddr) | |
78 | break; | |
79 | } | |
80 | } | |
81 | } | |
82 | return node != NULL; | |
83 | } | |
84 | ||
971af18b ACM |
85 | EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); |
86 | ||
3f421baa ACM |
87 | /* Obtain a reference to a local port for the given sock, |
88 | * if snum is zero it means select any available local port. | |
89 | */ | |
ab1e0a13 | 90 | int inet_csk_get_port(struct sock *sk, unsigned short snum) |
3f421baa | 91 | { |
39d8cda7 | 92 | struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; |
3f421baa ACM |
93 | struct inet_bind_hashbucket *head; |
94 | struct hlist_node *node; | |
95 | struct inet_bind_bucket *tb; | |
a9d8f911 | 96 | int ret, attempts = 5; |
3b1e0a65 | 97 | struct net *net = sock_net(sk); |
a9d8f911 | 98 | int smallest_size = -1, smallest_rover; |
3f421baa ACM |
99 | |
100 | local_bh_disable(); | |
101 | if (!snum) { | |
227b60f5 SH |
102 | int remaining, rover, low, high; |
103 | ||
a9d8f911 | 104 | again: |
227b60f5 | 105 | inet_get_local_port_range(&low, &high); |
a25de534 | 106 | remaining = (high - low) + 1; |
a9d8f911 | 107 | smallest_rover = rover = net_random() % remaining + low; |
3f421baa | 108 | |
a9d8f911 | 109 | smallest_size = -1; |
3f421baa | 110 | do { |
7f635ab7 PE |
111 | head = &hashinfo->bhash[inet_bhashfn(net, rover, |
112 | hashinfo->bhash_size)]; | |
3f421baa ACM |
113 | spin_lock(&head->lock); |
114 | inet_bind_bucket_for_each(tb, node, &head->chain) | |
09ad9bc7 | 115 | if (net_eq(ib_net(tb), net) && tb->port == rover) { |
a9d8f911 EP |
116 | if (tb->fastreuse > 0 && |
117 | sk->sk_reuse && | |
118 | sk->sk_state != TCP_LISTEN && | |
119 | (tb->num_owners < smallest_size || smallest_size == -1)) { | |
120 | smallest_size = tb->num_owners; | |
121 | smallest_rover = rover; | |
24dd1fa1 | 122 | if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { |
a9d8f911 EP |
123 | spin_unlock(&head->lock); |
124 | snum = smallest_rover; | |
125 | goto have_snum; | |
126 | } | |
127 | } | |
3f421baa | 128 | goto next; |
a9d8f911 | 129 | } |
3f421baa ACM |
130 | break; |
131 | next: | |
132 | spin_unlock(&head->lock); | |
6df71634 SH |
133 | if (++rover > high) |
134 | rover = low; | |
3f421baa | 135 | } while (--remaining > 0); |
3f421baa ACM |
136 | |
137 | /* Exhausted local port range during search? It is not | |
138 | * possible for us to be holding one of the bind hash | |
139 | * locks if this test triggers, because if 'remaining' | |
140 | * drops to zero, we broke out of the do/while loop at | |
141 | * the top level, not from the 'break;' statement. | |
142 | */ | |
143 | ret = 1; | |
a9d8f911 EP |
144 | if (remaining <= 0) { |
145 | if (smallest_size != -1) { | |
146 | snum = smallest_rover; | |
147 | goto have_snum; | |
148 | } | |
3f421baa | 149 | goto fail; |
a9d8f911 | 150 | } |
3f421baa ACM |
151 | /* OK, here is the one we will use. HEAD is |
152 | * non-NULL and we hold it's mutex. | |
153 | */ | |
154 | snum = rover; | |
155 | } else { | |
a9d8f911 | 156 | have_snum: |
7f635ab7 PE |
157 | head = &hashinfo->bhash[inet_bhashfn(net, snum, |
158 | hashinfo->bhash_size)]; | |
3f421baa ACM |
159 | spin_lock(&head->lock); |
160 | inet_bind_bucket_for_each(tb, node, &head->chain) | |
09ad9bc7 | 161 | if (net_eq(ib_net(tb), net) && tb->port == snum) |
3f421baa ACM |
162 | goto tb_found; |
163 | } | |
164 | tb = NULL; | |
165 | goto tb_not_found; | |
166 | tb_found: | |
167 | if (!hlist_empty(&tb->owners)) { | |
3f421baa | 168 | if (tb->fastreuse > 0 && |
a9d8f911 EP |
169 | sk->sk_reuse && sk->sk_state != TCP_LISTEN && |
170 | smallest_size == -1) { | |
3f421baa ACM |
171 | goto success; |
172 | } else { | |
173 | ret = 1; | |
a9d8f911 | 174 | if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { |
5add3009 SH |
175 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN && |
176 | smallest_size != -1 && --attempts >= 0) { | |
a9d8f911 EP |
177 | spin_unlock(&head->lock); |
178 | goto again; | |
179 | } | |
3f421baa | 180 | goto fail_unlock; |
a9d8f911 | 181 | } |
3f421baa ACM |
182 | } |
183 | } | |
184 | tb_not_found: | |
185 | ret = 1; | |
941b1d22 PE |
186 | if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep, |
187 | net, head, snum)) == NULL) | |
3f421baa ACM |
188 | goto fail_unlock; |
189 | if (hlist_empty(&tb->owners)) { | |
190 | if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) | |
191 | tb->fastreuse = 1; | |
192 | else | |
193 | tb->fastreuse = 0; | |
194 | } else if (tb->fastreuse && | |
195 | (!sk->sk_reuse || sk->sk_state == TCP_LISTEN)) | |
196 | tb->fastreuse = 0; | |
197 | success: | |
198 | if (!inet_csk(sk)->icsk_bind_hash) | |
199 | inet_bind_hash(sk, tb, snum); | |
547b792c | 200 | WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); |
e905a9ed | 201 | ret = 0; |
3f421baa ACM |
202 | |
203 | fail_unlock: | |
204 | spin_unlock(&head->lock); | |
205 | fail: | |
206 | local_bh_enable(); | |
207 | return ret; | |
208 | } | |
209 | ||
210 | EXPORT_SYMBOL_GPL(inet_csk_get_port); | |
211 | ||
212 | /* | |
213 | * Wait for an incoming connection, avoid race conditions. This must be called | |
214 | * with the socket locked. | |
215 | */ | |
216 | static int inet_csk_wait_for_connect(struct sock *sk, long timeo) | |
217 | { | |
218 | struct inet_connection_sock *icsk = inet_csk(sk); | |
219 | DEFINE_WAIT(wait); | |
220 | int err; | |
221 | ||
222 | /* | |
223 | * True wake-one mechanism for incoming connections: only | |
224 | * one process gets woken up, not the 'whole herd'. | |
225 | * Since we do not 'race & poll' for established sockets | |
226 | * anymore, the common case will execute the loop only once. | |
227 | * | |
228 | * Subtle issue: "add_wait_queue_exclusive()" will be added | |
229 | * after any current non-exclusive waiters, and we know that | |
230 | * it will always _stay_ after any new non-exclusive waiters | |
231 | * because all non-exclusive waiters are added at the | |
232 | * beginning of the wait-queue. As such, it's ok to "drop" | |
233 | * our exclusiveness temporarily when we get woken up without | |
234 | * having to remove and re-insert us on the wait queue. | |
235 | */ | |
236 | for (;;) { | |
237 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, | |
238 | TASK_INTERRUPTIBLE); | |
239 | release_sock(sk); | |
240 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) | |
241 | timeo = schedule_timeout(timeo); | |
242 | lock_sock(sk); | |
243 | err = 0; | |
244 | if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) | |
245 | break; | |
246 | err = -EINVAL; | |
247 | if (sk->sk_state != TCP_LISTEN) | |
248 | break; | |
249 | err = sock_intr_errno(timeo); | |
250 | if (signal_pending(current)) | |
251 | break; | |
252 | err = -EAGAIN; | |
253 | if (!timeo) | |
254 | break; | |
255 | } | |
256 | finish_wait(sk->sk_sleep, &wait); | |
257 | return err; | |
258 | } | |
259 | ||
260 | /* | |
261 | * This will accept the next outstanding connection. | |
262 | */ | |
263 | struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) | |
264 | { | |
265 | struct inet_connection_sock *icsk = inet_csk(sk); | |
266 | struct sock *newsk; | |
267 | int error; | |
268 | ||
269 | lock_sock(sk); | |
270 | ||
271 | /* We need to make sure that this socket is listening, | |
272 | * and that it has something pending. | |
273 | */ | |
274 | error = -EINVAL; | |
275 | if (sk->sk_state != TCP_LISTEN) | |
276 | goto out_err; | |
277 | ||
278 | /* Find already established connection */ | |
279 | if (reqsk_queue_empty(&icsk->icsk_accept_queue)) { | |
280 | long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); | |
281 | ||
282 | /* If this is a non blocking socket don't sleep */ | |
283 | error = -EAGAIN; | |
284 | if (!timeo) | |
285 | goto out_err; | |
286 | ||
287 | error = inet_csk_wait_for_connect(sk, timeo); | |
288 | if (error) | |
289 | goto out_err; | |
290 | } | |
291 | ||
292 | newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk); | |
547b792c | 293 | WARN_ON(newsk->sk_state == TCP_SYN_RECV); |
3f421baa ACM |
294 | out: |
295 | release_sock(sk); | |
296 | return newsk; | |
297 | out_err: | |
298 | newsk = NULL; | |
299 | *err = error; | |
300 | goto out; | |
301 | } | |
302 | ||
303 | EXPORT_SYMBOL(inet_csk_accept); | |
304 | ||
305 | /* | |
306 | * Using different timers for retransmit, delayed acks and probes | |
e905a9ed | 307 | * We may wish use just one timer maintaining a list of expire jiffies |
3f421baa ACM |
308 | * to optimize. |
309 | */ | |
310 | void inet_csk_init_xmit_timers(struct sock *sk, | |
311 | void (*retransmit_handler)(unsigned long), | |
312 | void (*delack_handler)(unsigned long), | |
313 | void (*keepalive_handler)(unsigned long)) | |
314 | { | |
315 | struct inet_connection_sock *icsk = inet_csk(sk); | |
316 | ||
b24b8a24 PE |
317 | setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler, |
318 | (unsigned long)sk); | |
319 | setup_timer(&icsk->icsk_delack_timer, delack_handler, | |
320 | (unsigned long)sk); | |
321 | setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk); | |
3f421baa ACM |
322 | icsk->icsk_pending = icsk->icsk_ack.pending = 0; |
323 | } | |
324 | ||
325 | EXPORT_SYMBOL(inet_csk_init_xmit_timers); | |
326 | ||
327 | void inet_csk_clear_xmit_timers(struct sock *sk) | |
328 | { | |
329 | struct inet_connection_sock *icsk = inet_csk(sk); | |
330 | ||
331 | icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0; | |
332 | ||
333 | sk_stop_timer(sk, &icsk->icsk_retransmit_timer); | |
334 | sk_stop_timer(sk, &icsk->icsk_delack_timer); | |
335 | sk_stop_timer(sk, &sk->sk_timer); | |
336 | } | |
337 | ||
338 | EXPORT_SYMBOL(inet_csk_clear_xmit_timers); | |
339 | ||
340 | void inet_csk_delete_keepalive_timer(struct sock *sk) | |
341 | { | |
342 | sk_stop_timer(sk, &sk->sk_timer); | |
343 | } | |
344 | ||
345 | EXPORT_SYMBOL(inet_csk_delete_keepalive_timer); | |
346 | ||
347 | void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) | |
348 | { | |
349 | sk_reset_timer(sk, &sk->sk_timer, jiffies + len); | |
350 | } | |
351 | ||
352 | EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); | |
353 | ||
d9319100 | 354 | struct dst_entry *inet_csk_route_req(struct sock *sk, |
3f421baa ACM |
355 | const struct request_sock *req) |
356 | { | |
357 | struct rtable *rt; | |
358 | const struct inet_request_sock *ireq = inet_rsk(req); | |
359 | struct ip_options *opt = inet_rsk(req)->opt; | |
360 | struct flowi fl = { .oif = sk->sk_bound_dev_if, | |
ffce9082 | 361 | .mark = sk->sk_mark, |
3f421baa ACM |
362 | .nl_u = { .ip4_u = |
363 | { .daddr = ((opt && opt->srr) ? | |
364 | opt->faddr : | |
365 | ireq->rmt_addr), | |
366 | .saddr = ireq->loc_addr, | |
367 | .tos = RT_CONN_FLAGS(sk) } }, | |
368 | .proto = sk->sk_protocol, | |
86b08d86 | 369 | .flags = inet_sk_flowi_flags(sk), |
3f421baa | 370 | .uli_u = { .ports = |
c720c7e8 | 371 | { .sport = inet_sk(sk)->inet_sport, |
3f421baa | 372 | .dport = ireq->rmt_port } } }; |
84a3aa00 | 373 | struct net *net = sock_net(sk); |
3f421baa | 374 | |
4237c75c | 375 | security_req_classify_flow(req, &fl); |
857a6e0a IJ |
376 | if (ip_route_output_flow(net, &rt, &fl, sk, 0)) |
377 | goto no_route; | |
378 | if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) | |
379 | goto route_err; | |
3f421baa | 380 | return &rt->u.dst; |
857a6e0a IJ |
381 | |
382 | route_err: | |
383 | ip_rt_put(rt); | |
384 | no_route: | |
385 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); | |
386 | return NULL; | |
3f421baa ACM |
387 | } |
388 | ||
389 | EXPORT_SYMBOL_GPL(inet_csk_route_req); | |
390 | ||
6b72977b | 391 | static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport, |
72a3effa | 392 | const u32 rnd, const u32 synq_hsize) |
3f421baa | 393 | { |
6b72977b | 394 | return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1); |
3f421baa ACM |
395 | } |
396 | ||
397 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | |
398 | #define AF_INET_FAMILY(fam) ((fam) == AF_INET) | |
399 | #else | |
400 | #define AF_INET_FAMILY(fam) 1 | |
401 | #endif | |
402 | ||
403 | struct request_sock *inet_csk_search_req(const struct sock *sk, | |
404 | struct request_sock ***prevp, | |
6b72977b | 405 | const __be16 rport, const __be32 raddr, |
7f25afbb | 406 | const __be32 laddr) |
3f421baa ACM |
407 | { |
408 | const struct inet_connection_sock *icsk = inet_csk(sk); | |
409 | struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; | |
410 | struct request_sock *req, **prev; | |
411 | ||
412 | for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd, | |
413 | lopt->nr_table_entries)]; | |
414 | (req = *prev) != NULL; | |
415 | prev = &req->dl_next) { | |
416 | const struct inet_request_sock *ireq = inet_rsk(req); | |
417 | ||
418 | if (ireq->rmt_port == rport && | |
419 | ireq->rmt_addr == raddr && | |
420 | ireq->loc_addr == laddr && | |
421 | AF_INET_FAMILY(req->rsk_ops->family)) { | |
547b792c | 422 | WARN_ON(req->sk); |
3f421baa ACM |
423 | *prevp = prev; |
424 | break; | |
425 | } | |
426 | } | |
427 | ||
428 | return req; | |
429 | } | |
430 | ||
431 | EXPORT_SYMBOL_GPL(inet_csk_search_req); | |
432 | ||
433 | void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, | |
c2977c22 | 434 | unsigned long timeout) |
3f421baa ACM |
435 | { |
436 | struct inet_connection_sock *icsk = inet_csk(sk); | |
437 | struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; | |
438 | const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, | |
439 | lopt->hash_rnd, lopt->nr_table_entries); | |
440 | ||
441 | reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); | |
442 | inet_csk_reqsk_queue_added(sk, timeout); | |
443 | } | |
444 | ||
a019d6fe ACM |
445 | /* Only thing we need from tcp.h */ |
446 | extern int sysctl_tcp_synack_retries; | |
447 | ||
3f421baa | 448 | EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add); |
9f1d2604 | 449 | |
0c3d79bc JA |
450 | /* Decide when to expire the request and when to resend SYN-ACK */ |
451 | static inline void syn_ack_recalc(struct request_sock *req, const int thresh, | |
452 | const int max_retries, | |
453 | const u8 rskq_defer_accept, | |
454 | int *expire, int *resend) | |
455 | { | |
456 | if (!rskq_defer_accept) { | |
457 | *expire = req->retrans >= thresh; | |
458 | *resend = 1; | |
459 | return; | |
460 | } | |
461 | *expire = req->retrans >= thresh && | |
462 | (!inet_rsk(req)->acked || req->retrans >= max_retries); | |
463 | /* | |
464 | * Do not resend while waiting for data after ACK, | |
465 | * start to resend on end of deferring period to give | |
466 | * last chance for data or ACK to create established socket. | |
467 | */ | |
468 | *resend = !inet_rsk(req)->acked || | |
469 | req->retrans >= rskq_defer_accept - 1; | |
470 | } | |
471 | ||
a019d6fe ACM |
472 | void inet_csk_reqsk_queue_prune(struct sock *parent, |
473 | const unsigned long interval, | |
474 | const unsigned long timeout, | |
475 | const unsigned long max_rto) | |
476 | { | |
477 | struct inet_connection_sock *icsk = inet_csk(parent); | |
478 | struct request_sock_queue *queue = &icsk->icsk_accept_queue; | |
479 | struct listen_sock *lopt = queue->listen_opt; | |
ec0a1966 DM |
480 | int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; |
481 | int thresh = max_retries; | |
a019d6fe ACM |
482 | unsigned long now = jiffies; |
483 | struct request_sock **reqp, *req; | |
484 | int i, budget; | |
485 | ||
486 | if (lopt == NULL || lopt->qlen == 0) | |
487 | return; | |
488 | ||
489 | /* Normally all the openreqs are young and become mature | |
490 | * (i.e. converted to established socket) for first timeout. | |
491 | * If synack was not acknowledged for 3 seconds, it means | |
492 | * one of the following things: synack was lost, ack was lost, | |
493 | * rtt is high or nobody planned to ack (i.e. synflood). | |
494 | * When server is a bit loaded, queue is populated with old | |
495 | * open requests, reducing effective size of queue. | |
496 | * When server is well loaded, queue size reduces to zero | |
497 | * after several minutes of work. It is not synflood, | |
498 | * it is normal operation. The solution is pruning | |
499 | * too old entries overriding normal timeout, when | |
500 | * situation becomes dangerous. | |
501 | * | |
502 | * Essentially, we reserve half of room for young | |
503 | * embrions; and abort old ones without pity, if old | |
504 | * ones are about to clog our table. | |
505 | */ | |
506 | if (lopt->qlen>>(lopt->max_qlen_log-1)) { | |
507 | int young = (lopt->qlen_young<<1); | |
508 | ||
509 | while (thresh > 2) { | |
510 | if (lopt->qlen < young) | |
511 | break; | |
512 | thresh--; | |
513 | young <<= 1; | |
514 | } | |
515 | } | |
516 | ||
ec0a1966 DM |
517 | if (queue->rskq_defer_accept) |
518 | max_retries = queue->rskq_defer_accept; | |
519 | ||
a019d6fe ACM |
520 | budget = 2 * (lopt->nr_table_entries / (timeout / interval)); |
521 | i = lopt->clock_hand; | |
522 | ||
523 | do { | |
524 | reqp=&lopt->syn_table[i]; | |
525 | while ((req = *reqp) != NULL) { | |
526 | if (time_after_eq(now, req->expires)) { | |
0c3d79bc JA |
527 | int expire = 0, resend = 0; |
528 | ||
529 | syn_ack_recalc(req, thresh, max_retries, | |
530 | queue->rskq_defer_accept, | |
531 | &expire, &resend); | |
532 | if (!expire && | |
533 | (!resend || | |
534 | !req->rsk_ops->rtx_syn_ack(parent, req) || | |
535 | inet_rsk(req)->acked)) { | |
a019d6fe ACM |
536 | unsigned long timeo; |
537 | ||
538 | if (req->retrans++ == 0) | |
539 | lopt->qlen_young--; | |
540 | timeo = min((timeout << req->retrans), max_rto); | |
541 | req->expires = now + timeo; | |
542 | reqp = &req->dl_next; | |
543 | continue; | |
544 | } | |
545 | ||
546 | /* Drop this request */ | |
547 | inet_csk_reqsk_queue_unlink(parent, req, reqp); | |
548 | reqsk_queue_removed(queue, req); | |
549 | reqsk_free(req); | |
550 | continue; | |
551 | } | |
552 | reqp = &req->dl_next; | |
553 | } | |
554 | ||
555 | i = (i + 1) & (lopt->nr_table_entries - 1); | |
556 | ||
557 | } while (--budget > 0); | |
558 | ||
559 | lopt->clock_hand = i; | |
560 | ||
561 | if (lopt->qlen) | |
562 | inet_csk_reset_keepalive_timer(parent, interval); | |
563 | } | |
564 | ||
565 | EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune); | |
566 | ||
9f1d2604 | 567 | struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req, |
dd0fc66f | 568 | const gfp_t priority) |
9f1d2604 ACM |
569 | { |
570 | struct sock *newsk = sk_clone(sk, priority); | |
571 | ||
572 | if (newsk != NULL) { | |
573 | struct inet_connection_sock *newicsk = inet_csk(newsk); | |
574 | ||
575 | newsk->sk_state = TCP_SYN_RECV; | |
576 | newicsk->icsk_bind_hash = NULL; | |
577 | ||
c720c7e8 ED |
578 | inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port; |
579 | inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port); | |
580 | inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port; | |
9f1d2604 ACM |
581 | newsk->sk_write_space = sk_stream_write_space; |
582 | ||
583 | newicsk->icsk_retransmits = 0; | |
6687e988 ACM |
584 | newicsk->icsk_backoff = 0; |
585 | newicsk->icsk_probes_out = 0; | |
9f1d2604 ACM |
586 | |
587 | /* Deinitialize accept_queue to trap illegal accesses. */ | |
588 | memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue)); | |
4237c75c VY |
589 | |
590 | security_inet_csk_clone(newsk, req); | |
9f1d2604 ACM |
591 | } |
592 | return newsk; | |
593 | } | |
594 | ||
595 | EXPORT_SYMBOL_GPL(inet_csk_clone); | |
a019d6fe ACM |
596 | |
597 | /* | |
598 | * At this point, there should be no process reference to this | |
599 | * socket, and thus no user references at all. Therefore we | |
600 | * can assume the socket waitqueue is inactive and nobody will | |
601 | * try to jump onto it. | |
602 | */ | |
603 | void inet_csk_destroy_sock(struct sock *sk) | |
604 | { | |
547b792c IJ |
605 | WARN_ON(sk->sk_state != TCP_CLOSE); |
606 | WARN_ON(!sock_flag(sk, SOCK_DEAD)); | |
a019d6fe ACM |
607 | |
608 | /* It cannot be in hash table! */ | |
547b792c | 609 | WARN_ON(!sk_unhashed(sk)); |
a019d6fe | 610 | |
c720c7e8 ED |
611 | /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */ |
612 | WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash); | |
a019d6fe ACM |
613 | |
614 | sk->sk_prot->destroy(sk); | |
615 | ||
616 | sk_stream_kill_queues(sk); | |
617 | ||
618 | xfrm_sk_free_policy(sk); | |
619 | ||
620 | sk_refcnt_debug_release(sk); | |
621 | ||
dd24c001 | 622 | percpu_counter_dec(sk->sk_prot->orphan_count); |
a019d6fe ACM |
623 | sock_put(sk); |
624 | } | |
625 | ||
626 | EXPORT_SYMBOL(inet_csk_destroy_sock); | |
627 | ||
628 | int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) | |
629 | { | |
630 | struct inet_sock *inet = inet_sk(sk); | |
631 | struct inet_connection_sock *icsk = inet_csk(sk); | |
632 | int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries); | |
633 | ||
634 | if (rc != 0) | |
635 | return rc; | |
636 | ||
637 | sk->sk_max_ack_backlog = 0; | |
638 | sk->sk_ack_backlog = 0; | |
639 | inet_csk_delack_init(sk); | |
640 | ||
641 | /* There is race window here: we announce ourselves listening, | |
642 | * but this transition is still not validated by get_port(). | |
643 | * It is OK, because this socket enters to hash table only | |
644 | * after validation is complete. | |
645 | */ | |
646 | sk->sk_state = TCP_LISTEN; | |
c720c7e8 ED |
647 | if (!sk->sk_prot->get_port(sk, inet->inet_num)) { |
648 | inet->inet_sport = htons(inet->inet_num); | |
a019d6fe ACM |
649 | |
650 | sk_dst_reset(sk); | |
651 | sk->sk_prot->hash(sk); | |
652 | ||
653 | return 0; | |
654 | } | |
655 | ||
656 | sk->sk_state = TCP_CLOSE; | |
657 | __reqsk_queue_destroy(&icsk->icsk_accept_queue); | |
658 | return -EADDRINUSE; | |
659 | } | |
660 | ||
661 | EXPORT_SYMBOL_GPL(inet_csk_listen_start); | |
662 | ||
663 | /* | |
664 | * This routine closes sockets which have been at least partially | |
665 | * opened, but not yet accepted. | |
666 | */ | |
667 | void inet_csk_listen_stop(struct sock *sk) | |
668 | { | |
669 | struct inet_connection_sock *icsk = inet_csk(sk); | |
670 | struct request_sock *acc_req; | |
671 | struct request_sock *req; | |
672 | ||
673 | inet_csk_delete_keepalive_timer(sk); | |
674 | ||
675 | /* make all the listen_opt local to us */ | |
676 | acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue); | |
677 | ||
678 | /* Following specs, it would be better either to send FIN | |
679 | * (and enter FIN-WAIT-1, it is normal close) | |
680 | * or to send active reset (abort). | |
681 | * Certainly, it is pretty dangerous while synflood, but it is | |
682 | * bad justification for our negligence 8) | |
683 | * To be honest, we are not able to make either | |
684 | * of the variants now. --ANK | |
685 | */ | |
686 | reqsk_queue_destroy(&icsk->icsk_accept_queue); | |
687 | ||
688 | while ((req = acc_req) != NULL) { | |
689 | struct sock *child = req->sk; | |
690 | ||
691 | acc_req = req->dl_next; | |
692 | ||
693 | local_bh_disable(); | |
694 | bh_lock_sock(child); | |
547b792c | 695 | WARN_ON(sock_owned_by_user(child)); |
a019d6fe ACM |
696 | sock_hold(child); |
697 | ||
698 | sk->sk_prot->disconnect(child, O_NONBLOCK); | |
699 | ||
700 | sock_orphan(child); | |
701 | ||
eb4dea58 HX |
702 | percpu_counter_inc(sk->sk_prot->orphan_count); |
703 | ||
a019d6fe ACM |
704 | inet_csk_destroy_sock(child); |
705 | ||
706 | bh_unlock_sock(child); | |
707 | local_bh_enable(); | |
708 | sock_put(child); | |
709 | ||
710 | sk_acceptq_removed(sk); | |
711 | __reqsk_free(req); | |
712 | } | |
547b792c | 713 | WARN_ON(sk->sk_ack_backlog); |
a019d6fe ACM |
714 | } |
715 | ||
716 | EXPORT_SYMBOL_GPL(inet_csk_listen_stop); | |
af05dc93 ACM |
717 | |
718 | void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) | |
719 | { | |
720 | struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; | |
721 | const struct inet_sock *inet = inet_sk(sk); | |
722 | ||
723 | sin->sin_family = AF_INET; | |
c720c7e8 ED |
724 | sin->sin_addr.s_addr = inet->inet_daddr; |
725 | sin->sin_port = inet->inet_dport; | |
af05dc93 ACM |
726 | } |
727 | ||
728 | EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); | |
c4d93909 | 729 | |
dec73ff0 ACM |
730 | #ifdef CONFIG_COMPAT |
731 | int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, | |
732 | char __user *optval, int __user *optlen) | |
733 | { | |
dbeff12b | 734 | const struct inet_connection_sock *icsk = inet_csk(sk); |
dec73ff0 ACM |
735 | |
736 | if (icsk->icsk_af_ops->compat_getsockopt != NULL) | |
737 | return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname, | |
738 | optval, optlen); | |
739 | return icsk->icsk_af_ops->getsockopt(sk, level, optname, | |
740 | optval, optlen); | |
741 | } | |
742 | ||
743 | EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt); | |
744 | ||
745 | int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, | |
b7058842 | 746 | char __user *optval, unsigned int optlen) |
dec73ff0 | 747 | { |
dbeff12b | 748 | const struct inet_connection_sock *icsk = inet_csk(sk); |
dec73ff0 ACM |
749 | |
750 | if (icsk->icsk_af_ops->compat_setsockopt != NULL) | |
751 | return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname, | |
752 | optval, optlen); | |
753 | return icsk->icsk_af_ops->setsockopt(sk, level, optname, | |
754 | optval, optlen); | |
755 | } | |
756 | ||
757 | EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt); | |
758 | #endif |