Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Implementation of the Transmission Control Protocol(TCP). | |
7 | * | |
8 | * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $ | |
9 | * | |
10 | * IPv4 specific functions | |
11 | * | |
12 | * | |
13 | * code split from: | |
14 | * linux/ipv4/tcp.c | |
15 | * linux/ipv4/tcp_input.c | |
16 | * linux/ipv4/tcp_output.c | |
17 | * | |
18 | * See tcp.c for author information | |
19 | * | |
20 | * This program is free software; you can redistribute it and/or | |
21 | * modify it under the terms of the GNU General Public License | |
22 | * as published by the Free Software Foundation; either version | |
23 | * 2 of the License, or (at your option) any later version. | |
24 | */ | |
25 | ||
26 | /* | |
27 | * Changes: | |
28 | * David S. Miller : New socket lookup architecture. | |
29 | * This code is dedicated to John Dyson. | |
30 | * David S. Miller : Change semantics of established hash, | |
31 | * half is devoted to TIME_WAIT sockets | |
32 | * and the rest go in the other half. | |
33 | * Andi Kleen : Add support for syncookies and fixed | |
34 | * some bugs: ip options weren't passed to | |
35 | * the TCP layer, missed a check for an | |
36 | * ACK bit. | |
37 | * Andi Kleen : Implemented fast path mtu discovery. | |
38 | * Fixed many serious bugs in the | |
60236fdd | 39 | * request_sock handling and moved |
1da177e4 LT |
40 | * most of it into the af independent code. |
41 | * Added tail drop and some other bugfixes. | |
caa20d9a | 42 | * Added new listen semantics. |
1da177e4 LT |
43 | * Mike McLagan : Routing by source |
44 | * Juan Jose Ciarlante: ip_dynaddr bits | |
45 | * Andi Kleen: various fixes. | |
46 | * Vitaly E. Lavrov : Transparent proxy revived after year | |
47 | * coma. | |
48 | * Andi Kleen : Fix new listen. | |
49 | * Andi Kleen : Fix accept error reporting. | |
50 | * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which | |
51 | * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind | |
52 | * a single port at the same time. | |
53 | */ | |
54 | ||
55 | #include <linux/config.h> | |
56 | ||
57 | #include <linux/types.h> | |
58 | #include <linux/fcntl.h> | |
59 | #include <linux/module.h> | |
60 | #include <linux/random.h> | |
61 | #include <linux/cache.h> | |
62 | #include <linux/jhash.h> | |
63 | #include <linux/init.h> | |
64 | #include <linux/times.h> | |
65 | ||
66 | #include <net/icmp.h> | |
304a1618 | 67 | #include <net/inet_hashtables.h> |
1da177e4 | 68 | #include <net/tcp.h> |
20380731 | 69 | #include <net/transp_v6.h> |
1da177e4 LT |
70 | #include <net/ipv6.h> |
71 | #include <net/inet_common.h> | |
6d6ee43e | 72 | #include <net/timewait_sock.h> |
1da177e4 LT |
73 | #include <net/xfrm.h> |
74 | ||
75 | #include <linux/inet.h> | |
76 | #include <linux/ipv6.h> | |
77 | #include <linux/stddef.h> | |
78 | #include <linux/proc_fs.h> | |
79 | #include <linux/seq_file.h> | |
80 | ||
1da177e4 LT |
81 | int sysctl_tcp_tw_reuse; |
82 | int sysctl_tcp_low_latency; | |
83 | ||
84 | /* Check TCP sequence numbers in ICMP packets. */ | |
85 | #define ICMP_MIN_LENGTH 8 | |
86 | ||
87 | /* Socket used for sending RSTs */ | |
88 | static struct socket *tcp_socket; | |
89 | ||
8292a17a | 90 | void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); |
1da177e4 | 91 | |
0f7ff927 ACM |
92 | struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { |
93 | .lhash_lock = RW_LOCK_UNLOCKED, | |
94 | .lhash_users = ATOMIC_INIT(0), | |
95 | .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), | |
1da177e4 LT |
96 | }; |
97 | ||
463c84b9 ACM |
98 | static int tcp_v4_get_port(struct sock *sk, unsigned short snum) |
99 | { | |
971af18b ACM |
100 | return inet_csk_get_port(&tcp_hashinfo, sk, snum, |
101 | inet_csk_bind_conflict); | |
463c84b9 ACM |
102 | } |
103 | ||
1da177e4 LT |
104 | static void tcp_v4_hash(struct sock *sk) |
105 | { | |
81849d10 | 106 | inet_hash(&tcp_hashinfo, sk); |
1da177e4 LT |
107 | } |
108 | ||
109 | void tcp_unhash(struct sock *sk) | |
110 | { | |
81849d10 | 111 | inet_unhash(&tcp_hashinfo, sk); |
1da177e4 LT |
112 | } |
113 | ||
1da177e4 LT |
114 | static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb) |
115 | { | |
116 | return secure_tcp_sequence_number(skb->nh.iph->daddr, | |
117 | skb->nh.iph->saddr, | |
118 | skb->h.th->dest, | |
119 | skb->h.th->source); | |
120 | } | |
121 | ||
6d6ee43e ACM |
122 | int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) |
123 | { | |
124 | const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw); | |
125 | struct tcp_sock *tp = tcp_sk(sk); | |
126 | ||
127 | /* With PAWS, it is safe from the viewpoint | |
128 | of data integrity. Even without PAWS it is safe provided sequence | |
129 | spaces do not overlap i.e. at data rates <= 80Mbit/sec. | |
130 | ||
131 | Actually, the idea is close to VJ's one, only timestamp cache is | |
132 | held not per host, but per port pair and TW bucket is used as state | |
133 | holder. | |
134 | ||
135 | If TW bucket has been already destroyed we fall back to VJ's scheme | |
136 | and use initial timestamp retrieved from peer table. | |
137 | */ | |
138 | if (tcptw->tw_ts_recent_stamp && | |
139 | (twp == NULL || (sysctl_tcp_tw_reuse && | |
140 | xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) { | |
141 | tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; | |
142 | if (tp->write_seq == 0) | |
143 | tp->write_seq = 1; | |
144 | tp->rx_opt.ts_recent = tcptw->tw_ts_recent; | |
145 | tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; | |
146 | sock_hold(sktw); | |
147 | return 1; | |
148 | } | |
149 | ||
150 | return 0; | |
151 | } | |
152 | ||
153 | EXPORT_SYMBOL_GPL(tcp_twsk_unique); | |
154 | ||
1da177e4 LT |
155 | /* This will initiate an outgoing connection. */ |
156 | int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |
157 | { | |
158 | struct inet_sock *inet = inet_sk(sk); | |
159 | struct tcp_sock *tp = tcp_sk(sk); | |
160 | struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; | |
161 | struct rtable *rt; | |
162 | u32 daddr, nexthop; | |
163 | int tmp; | |
164 | int err; | |
165 | ||
166 | if (addr_len < sizeof(struct sockaddr_in)) | |
167 | return -EINVAL; | |
168 | ||
169 | if (usin->sin_family != AF_INET) | |
170 | return -EAFNOSUPPORT; | |
171 | ||
172 | nexthop = daddr = usin->sin_addr.s_addr; | |
173 | if (inet->opt && inet->opt->srr) { | |
174 | if (!daddr) | |
175 | return -EINVAL; | |
176 | nexthop = inet->opt->faddr; | |
177 | } | |
178 | ||
179 | tmp = ip_route_connect(&rt, nexthop, inet->saddr, | |
180 | RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, | |
181 | IPPROTO_TCP, | |
182 | inet->sport, usin->sin_port, sk); | |
183 | if (tmp < 0) | |
184 | return tmp; | |
185 | ||
186 | if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { | |
187 | ip_rt_put(rt); | |
188 | return -ENETUNREACH; | |
189 | } | |
190 | ||
191 | if (!inet->opt || !inet->opt->srr) | |
192 | daddr = rt->rt_dst; | |
193 | ||
194 | if (!inet->saddr) | |
195 | inet->saddr = rt->rt_src; | |
196 | inet->rcv_saddr = inet->saddr; | |
197 | ||
198 | if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) { | |
199 | /* Reset inherited state */ | |
200 | tp->rx_opt.ts_recent = 0; | |
201 | tp->rx_opt.ts_recent_stamp = 0; | |
202 | tp->write_seq = 0; | |
203 | } | |
204 | ||
295ff7ed | 205 | if (tcp_death_row.sysctl_tw_recycle && |
1da177e4 LT |
206 | !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) { |
207 | struct inet_peer *peer = rt_get_peer(rt); | |
208 | ||
209 | /* VJ's idea. We save last timestamp seen from | |
210 | * the destination in peer table, when entering state TIME-WAIT | |
211 | * and initialize rx_opt.ts_recent from it, when trying new connection. | |
212 | */ | |
213 | ||
214 | if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) { | |
215 | tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; | |
216 | tp->rx_opt.ts_recent = peer->tcp_ts; | |
217 | } | |
218 | } | |
219 | ||
220 | inet->dport = usin->sin_port; | |
221 | inet->daddr = daddr; | |
222 | ||
d83d8461 | 223 | inet_csk(sk)->icsk_ext_hdr_len = 0; |
1da177e4 | 224 | if (inet->opt) |
d83d8461 | 225 | inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen; |
1da177e4 LT |
226 | |
227 | tp->rx_opt.mss_clamp = 536; | |
228 | ||
229 | /* Socket identity is still unknown (sport may be zero). | |
230 | * However we set state to SYN-SENT and not releasing socket | |
231 | * lock select source port, enter ourselves into the hash tables and | |
232 | * complete initialization after this. | |
233 | */ | |
234 | tcp_set_state(sk, TCP_SYN_SENT); | |
a7f5e7f1 | 235 | err = inet_hash_connect(&tcp_death_row, sk); |
1da177e4 LT |
236 | if (err) |
237 | goto failure; | |
238 | ||
239 | err = ip_route_newports(&rt, inet->sport, inet->dport, sk); | |
240 | if (err) | |
241 | goto failure; | |
242 | ||
243 | /* OK, now commit destination to socket. */ | |
6cbb0df7 | 244 | sk_setup_caps(sk, &rt->u.dst); |
1da177e4 LT |
245 | |
246 | if (!tp->write_seq) | |
247 | tp->write_seq = secure_tcp_sequence_number(inet->saddr, | |
248 | inet->daddr, | |
249 | inet->sport, | |
250 | usin->sin_port); | |
251 | ||
252 | inet->id = tp->write_seq ^ jiffies; | |
253 | ||
254 | err = tcp_connect(sk); | |
255 | rt = NULL; | |
256 | if (err) | |
257 | goto failure; | |
258 | ||
259 | return 0; | |
260 | ||
261 | failure: | |
262 | /* This unhashes the socket and releases the local port, if necessary. */ | |
263 | tcp_set_state(sk, TCP_CLOSE); | |
264 | ip_rt_put(rt); | |
265 | sk->sk_route_caps = 0; | |
266 | inet->dport = 0; | |
267 | return err; | |
268 | } | |
269 | ||
1da177e4 LT |
270 | /* |
271 | * This routine does path mtu discovery as defined in RFC1191. | |
272 | */ | |
40efc6fa | 273 | static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu) |
1da177e4 LT |
274 | { |
275 | struct dst_entry *dst; | |
276 | struct inet_sock *inet = inet_sk(sk); | |
1da177e4 LT |
277 | |
278 | /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs | |
279 | * send out by Linux are always <576bytes so they should go through | |
280 | * unfragmented). | |
281 | */ | |
282 | if (sk->sk_state == TCP_LISTEN) | |
283 | return; | |
284 | ||
285 | /* We don't check in the destentry if pmtu discovery is forbidden | |
286 | * on this route. We just assume that no packet_to_big packets | |
287 | * are send back when pmtu discovery is not active. | |
288 | * There is a small race when the user changes this flag in the | |
289 | * route, but I think that's acceptable. | |
290 | */ | |
291 | if ((dst = __sk_dst_check(sk, 0)) == NULL) | |
292 | return; | |
293 | ||
294 | dst->ops->update_pmtu(dst, mtu); | |
295 | ||
296 | /* Something is about to be wrong... Remember soft error | |
297 | * for the case, if this connection will not able to recover. | |
298 | */ | |
299 | if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) | |
300 | sk->sk_err_soft = EMSGSIZE; | |
301 | ||
302 | mtu = dst_mtu(dst); | |
303 | ||
304 | if (inet->pmtudisc != IP_PMTUDISC_DONT && | |
d83d8461 | 305 | inet_csk(sk)->icsk_pmtu_cookie > mtu) { |
1da177e4 LT |
306 | tcp_sync_mss(sk, mtu); |
307 | ||
308 | /* Resend the TCP packet because it's | |
309 | * clear that the old packet has been | |
310 | * dropped. This is the new "fast" path mtu | |
311 | * discovery. | |
312 | */ | |
313 | tcp_simple_retransmit(sk); | |
314 | } /* else let the usual retransmit timer handle it */ | |
315 | } | |
316 | ||
317 | /* | |
318 | * This routine is called by the ICMP module when it gets some | |
319 | * sort of error condition. If err < 0 then the socket should | |
320 | * be closed and the error returned to the user. If err > 0 | |
321 | * it's just the icmp type << 8 | icmp code. After adjustment | |
322 | * header points to the first 8 bytes of the tcp header. We need | |
323 | * to find the appropriate port. | |
324 | * | |
325 | * The locking strategy used here is very "optimistic". When | |
326 | * someone else accesses the socket the ICMP is just dropped | |
327 | * and for some paths there is no check at all. | |
328 | * A more general error queue to queue errors for later handling | |
329 | * is probably better. | |
330 | * | |
331 | */ | |
332 | ||
333 | void tcp_v4_err(struct sk_buff *skb, u32 info) | |
334 | { | |
335 | struct iphdr *iph = (struct iphdr *)skb->data; | |
336 | struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); | |
337 | struct tcp_sock *tp; | |
338 | struct inet_sock *inet; | |
339 | int type = skb->h.icmph->type; | |
340 | int code = skb->h.icmph->code; | |
341 | struct sock *sk; | |
342 | __u32 seq; | |
343 | int err; | |
344 | ||
345 | if (skb->len < (iph->ihl << 2) + 8) { | |
346 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | |
347 | return; | |
348 | } | |
349 | ||
e48c414e | 350 | sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr, |
463c84b9 | 351 | th->source, inet_iif(skb)); |
1da177e4 LT |
352 | if (!sk) { |
353 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | |
354 | return; | |
355 | } | |
356 | if (sk->sk_state == TCP_TIME_WAIT) { | |
8feaf0c0 | 357 | inet_twsk_put((struct inet_timewait_sock *)sk); |
1da177e4 LT |
358 | return; |
359 | } | |
360 | ||
361 | bh_lock_sock(sk); | |
362 | /* If too many ICMPs get dropped on busy | |
363 | * servers this needs to be solved differently. | |
364 | */ | |
365 | if (sock_owned_by_user(sk)) | |
366 | NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); | |
367 | ||
368 | if (sk->sk_state == TCP_CLOSE) | |
369 | goto out; | |
370 | ||
371 | tp = tcp_sk(sk); | |
372 | seq = ntohl(th->seq); | |
373 | if (sk->sk_state != TCP_LISTEN && | |
374 | !between(seq, tp->snd_una, tp->snd_nxt)) { | |
375 | NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS); | |
376 | goto out; | |
377 | } | |
378 | ||
379 | switch (type) { | |
380 | case ICMP_SOURCE_QUENCH: | |
381 | /* Just silently ignore these. */ | |
382 | goto out; | |
383 | case ICMP_PARAMETERPROB: | |
384 | err = EPROTO; | |
385 | break; | |
386 | case ICMP_DEST_UNREACH: | |
387 | if (code > NR_ICMP_UNREACH) | |
388 | goto out; | |
389 | ||
390 | if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ | |
391 | if (!sock_owned_by_user(sk)) | |
392 | do_pmtu_discovery(sk, iph, info); | |
393 | goto out; | |
394 | } | |
395 | ||
396 | err = icmp_err_convert[code].errno; | |
397 | break; | |
398 | case ICMP_TIME_EXCEEDED: | |
399 | err = EHOSTUNREACH; | |
400 | break; | |
401 | default: | |
402 | goto out; | |
403 | } | |
404 | ||
405 | switch (sk->sk_state) { | |
60236fdd | 406 | struct request_sock *req, **prev; |
1da177e4 LT |
407 | case TCP_LISTEN: |
408 | if (sock_owned_by_user(sk)) | |
409 | goto out; | |
410 | ||
463c84b9 ACM |
411 | req = inet_csk_search_req(sk, &prev, th->dest, |
412 | iph->daddr, iph->saddr); | |
1da177e4 LT |
413 | if (!req) |
414 | goto out; | |
415 | ||
416 | /* ICMPs are not backlogged, hence we cannot get | |
417 | an established socket here. | |
418 | */ | |
419 | BUG_TRAP(!req->sk); | |
420 | ||
2e6599cb | 421 | if (seq != tcp_rsk(req)->snt_isn) { |
1da177e4 LT |
422 | NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS); |
423 | goto out; | |
424 | } | |
425 | ||
426 | /* | |
427 | * Still in SYN_RECV, just remove it silently. | |
428 | * There is no good way to pass the error to the newly | |
429 | * created socket, and POSIX does not want network | |
430 | * errors returned from accept(). | |
431 | */ | |
463c84b9 | 432 | inet_csk_reqsk_queue_drop(sk, req, prev); |
1da177e4 LT |
433 | goto out; |
434 | ||
435 | case TCP_SYN_SENT: | |
436 | case TCP_SYN_RECV: /* Cannot happen. | |
437 | It can f.e. if SYNs crossed. | |
438 | */ | |
439 | if (!sock_owned_by_user(sk)) { | |
440 | TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); | |
441 | sk->sk_err = err; | |
442 | ||
443 | sk->sk_error_report(sk); | |
444 | ||
445 | tcp_done(sk); | |
446 | } else { | |
447 | sk->sk_err_soft = err; | |
448 | } | |
449 | goto out; | |
450 | } | |
451 | ||
452 | /* If we've already connected we will keep trying | |
453 | * until we time out, or the user gives up. | |
454 | * | |
455 | * rfc1122 4.2.3.9 allows to consider as hard errors | |
456 | * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too, | |
457 | * but it is obsoleted by pmtu discovery). | |
458 | * | |
459 | * Note, that in modern internet, where routing is unreliable | |
460 | * and in each dark corner broken firewalls sit, sending random | |
461 | * errors ordered by their masters even this two messages finally lose | |
462 | * their original sense (even Linux sends invalid PORT_UNREACHs) | |
463 | * | |
464 | * Now we are in compliance with RFCs. | |
465 | * --ANK (980905) | |
466 | */ | |
467 | ||
468 | inet = inet_sk(sk); | |
469 | if (!sock_owned_by_user(sk) && inet->recverr) { | |
470 | sk->sk_err = err; | |
471 | sk->sk_error_report(sk); | |
472 | } else { /* Only an error on timeout */ | |
473 | sk->sk_err_soft = err; | |
474 | } | |
475 | ||
476 | out: | |
477 | bh_unlock_sock(sk); | |
478 | sock_put(sk); | |
479 | } | |
480 | ||
481 | /* This routine computes an IPv4 TCP checksum. */ | |
8292a17a | 482 | void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) |
1da177e4 LT |
483 | { |
484 | struct inet_sock *inet = inet_sk(sk); | |
8292a17a | 485 | struct tcphdr *th = skb->h.th; |
1da177e4 LT |
486 | |
487 | if (skb->ip_summed == CHECKSUM_HW) { | |
488 | th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0); | |
489 | skb->csum = offsetof(struct tcphdr, check); | |
490 | } else { | |
491 | th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr, | |
492 | csum_partial((char *)th, | |
493 | th->doff << 2, | |
494 | skb->csum)); | |
495 | } | |
496 | } | |
497 | ||
498 | /* | |
499 | * This routine will send an RST to the other tcp. | |
500 | * | |
501 | * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.) | |
502 | * for reset. | |
503 | * Answer: if a packet caused RST, it is not for a socket | |
504 | * existing in our system, if it is matched to a socket, | |
505 | * it is just duplicate segment or bug in other side's TCP. | |
506 | * So that we build reply only basing on parameters | |
507 | * arrived with segment. | |
508 | * Exception: precedence violation. We do not implement it in any case. | |
509 | */ | |
510 | ||
511 | static void tcp_v4_send_reset(struct sk_buff *skb) | |
512 | { | |
513 | struct tcphdr *th = skb->h.th; | |
514 | struct tcphdr rth; | |
515 | struct ip_reply_arg arg; | |
516 | ||
517 | /* Never send a reset in response to a reset. */ | |
518 | if (th->rst) | |
519 | return; | |
520 | ||
521 | if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL) | |
522 | return; | |
523 | ||
524 | /* Swap the send and the receive. */ | |
525 | memset(&rth, 0, sizeof(struct tcphdr)); | |
526 | rth.dest = th->source; | |
527 | rth.source = th->dest; | |
528 | rth.doff = sizeof(struct tcphdr) / 4; | |
529 | rth.rst = 1; | |
530 | ||
531 | if (th->ack) { | |
532 | rth.seq = th->ack_seq; | |
533 | } else { | |
534 | rth.ack = 1; | |
535 | rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + | |
536 | skb->len - (th->doff << 2)); | |
537 | } | |
538 | ||
539 | memset(&arg, 0, sizeof arg); | |
540 | arg.iov[0].iov_base = (unsigned char *)&rth; | |
541 | arg.iov[0].iov_len = sizeof rth; | |
542 | arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, | |
543 | skb->nh.iph->saddr, /*XXX*/ | |
544 | sizeof(struct tcphdr), IPPROTO_TCP, 0); | |
545 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; | |
546 | ||
547 | ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth); | |
548 | ||
549 | TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); | |
550 | TCP_INC_STATS_BH(TCP_MIB_OUTRSTS); | |
551 | } | |
552 | ||
553 | /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states | |
554 | outside socket context is ugly, certainly. What can I do? | |
555 | */ | |
556 | ||
557 | static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, | |
558 | u32 win, u32 ts) | |
559 | { | |
560 | struct tcphdr *th = skb->h.th; | |
561 | struct { | |
562 | struct tcphdr th; | |
563 | u32 tsopt[3]; | |
564 | } rep; | |
565 | struct ip_reply_arg arg; | |
566 | ||
567 | memset(&rep.th, 0, sizeof(struct tcphdr)); | |
568 | memset(&arg, 0, sizeof arg); | |
569 | ||
570 | arg.iov[0].iov_base = (unsigned char *)&rep; | |
571 | arg.iov[0].iov_len = sizeof(rep.th); | |
572 | if (ts) { | |
573 | rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | | |
574 | (TCPOPT_TIMESTAMP << 8) | | |
575 | TCPOLEN_TIMESTAMP); | |
576 | rep.tsopt[1] = htonl(tcp_time_stamp); | |
577 | rep.tsopt[2] = htonl(ts); | |
578 | arg.iov[0].iov_len = sizeof(rep); | |
579 | } | |
580 | ||
581 | /* Swap the send and the receive. */ | |
582 | rep.th.dest = th->source; | |
583 | rep.th.source = th->dest; | |
584 | rep.th.doff = arg.iov[0].iov_len / 4; | |
585 | rep.th.seq = htonl(seq); | |
586 | rep.th.ack_seq = htonl(ack); | |
587 | rep.th.ack = 1; | |
588 | rep.th.window = htons(win); | |
589 | ||
590 | arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, | |
591 | skb->nh.iph->saddr, /*XXX*/ | |
592 | arg.iov[0].iov_len, IPPROTO_TCP, 0); | |
593 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; | |
594 | ||
595 | ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len); | |
596 | ||
597 | TCP_INC_STATS_BH(TCP_MIB_OUTSEGS); | |
598 | } | |
599 | ||
600 | static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) | |
601 | { | |
8feaf0c0 ACM |
602 | struct inet_timewait_sock *tw = inet_twsk(sk); |
603 | const struct tcp_timewait_sock *tcptw = tcp_twsk(sk); | |
1da177e4 | 604 | |
8feaf0c0 ACM |
605 | tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, |
606 | tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent); | |
1da177e4 | 607 | |
8feaf0c0 | 608 | inet_twsk_put(tw); |
1da177e4 LT |
609 | } |
610 | ||
60236fdd | 611 | static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) |
1da177e4 | 612 | { |
2e6599cb | 613 | tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, |
1da177e4 LT |
614 | req->ts_recent); |
615 | } | |
616 | ||
1da177e4 LT |
617 | /* |
618 | * Send a SYN-ACK after having received an ACK. | |
60236fdd | 619 | * This still operates on a request_sock only, not on a big |
1da177e4 LT |
620 | * socket. |
621 | */ | |
60236fdd | 622 | static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req, |
1da177e4 LT |
623 | struct dst_entry *dst) |
624 | { | |
2e6599cb | 625 | const struct inet_request_sock *ireq = inet_rsk(req); |
1da177e4 LT |
626 | int err = -1; |
627 | struct sk_buff * skb; | |
628 | ||
629 | /* First, grab a route. */ | |
463c84b9 | 630 | if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL) |
1da177e4 LT |
631 | goto out; |
632 | ||
633 | skb = tcp_make_synack(sk, dst, req); | |
634 | ||
635 | if (skb) { | |
636 | struct tcphdr *th = skb->h.th; | |
637 | ||
638 | th->check = tcp_v4_check(th, skb->len, | |
2e6599cb ACM |
639 | ireq->loc_addr, |
640 | ireq->rmt_addr, | |
1da177e4 LT |
641 | csum_partial((char *)th, skb->len, |
642 | skb->csum)); | |
643 | ||
2e6599cb ACM |
644 | err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, |
645 | ireq->rmt_addr, | |
646 | ireq->opt); | |
1da177e4 LT |
647 | if (err == NET_XMIT_CN) |
648 | err = 0; | |
649 | } | |
650 | ||
651 | out: | |
652 | dst_release(dst); | |
653 | return err; | |
654 | } | |
655 | ||
656 | /* | |
60236fdd | 657 | * IPv4 request_sock destructor. |
1da177e4 | 658 | */ |
60236fdd | 659 | static void tcp_v4_reqsk_destructor(struct request_sock *req) |
1da177e4 | 660 | { |
a51482bd | 661 | kfree(inet_rsk(req)->opt); |
1da177e4 LT |
662 | } |
663 | ||
40efc6fa | 664 | static void syn_flood_warning(struct sk_buff *skb) |
1da177e4 LT |
665 | { |
666 | static unsigned long warntime; | |
667 | ||
668 | if (time_after(jiffies, (warntime + HZ * 60))) { | |
669 | warntime = jiffies; | |
670 | printk(KERN_INFO | |
671 | "possible SYN flooding on port %d. Sending cookies.\n", | |
672 | ntohs(skb->h.th->dest)); | |
673 | } | |
674 | } | |
675 | ||
676 | /* | |
60236fdd | 677 | * Save and compile IPv4 options into the request_sock if needed. |
1da177e4 | 678 | */ |
40efc6fa SH |
679 | static struct ip_options *tcp_v4_save_options(struct sock *sk, |
680 | struct sk_buff *skb) | |
1da177e4 LT |
681 | { |
682 | struct ip_options *opt = &(IPCB(skb)->opt); | |
683 | struct ip_options *dopt = NULL; | |
684 | ||
685 | if (opt && opt->optlen) { | |
686 | int opt_size = optlength(opt); | |
687 | dopt = kmalloc(opt_size, GFP_ATOMIC); | |
688 | if (dopt) { | |
689 | if (ip_options_echo(dopt, skb)) { | |
690 | kfree(dopt); | |
691 | dopt = NULL; | |
692 | } | |
693 | } | |
694 | } | |
695 | return dopt; | |
696 | } | |
697 | ||
60236fdd | 698 | struct request_sock_ops tcp_request_sock_ops = { |
1da177e4 | 699 | .family = PF_INET, |
2e6599cb | 700 | .obj_size = sizeof(struct tcp_request_sock), |
1da177e4 | 701 | .rtx_syn_ack = tcp_v4_send_synack, |
60236fdd ACM |
702 | .send_ack = tcp_v4_reqsk_send_ack, |
703 | .destructor = tcp_v4_reqsk_destructor, | |
1da177e4 LT |
704 | .send_reset = tcp_v4_send_reset, |
705 | }; | |
706 | ||
6d6ee43e ACM |
707 | static struct timewait_sock_ops tcp_timewait_sock_ops = { |
708 | .twsk_obj_size = sizeof(struct tcp_timewait_sock), | |
709 | .twsk_unique = tcp_twsk_unique, | |
710 | }; | |
711 | ||
1da177e4 LT |
712 | int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) |
713 | { | |
2e6599cb | 714 | struct inet_request_sock *ireq; |
1da177e4 | 715 | struct tcp_options_received tmp_opt; |
60236fdd | 716 | struct request_sock *req; |
1da177e4 LT |
717 | __u32 saddr = skb->nh.iph->saddr; |
718 | __u32 daddr = skb->nh.iph->daddr; | |
719 | __u32 isn = TCP_SKB_CB(skb)->when; | |
720 | struct dst_entry *dst = NULL; | |
721 | #ifdef CONFIG_SYN_COOKIES | |
722 | int want_cookie = 0; | |
723 | #else | |
724 | #define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */ | |
725 | #endif | |
726 | ||
727 | /* Never answer to SYNs send to broadcast or multicast */ | |
728 | if (((struct rtable *)skb->dst)->rt_flags & | |
729 | (RTCF_BROADCAST | RTCF_MULTICAST)) | |
730 | goto drop; | |
731 | ||
732 | /* TW buckets are converted to open requests without | |
733 | * limitations, they conserve resources and peer is | |
734 | * evidently real one. | |
735 | */ | |
463c84b9 | 736 | if (inet_csk_reqsk_queue_is_full(sk) && !isn) { |
1da177e4 LT |
737 | #ifdef CONFIG_SYN_COOKIES |
738 | if (sysctl_tcp_syncookies) { | |
739 | want_cookie = 1; | |
740 | } else | |
741 | #endif | |
742 | goto drop; | |
743 | } | |
744 | ||
745 | /* Accept backlog is full. If we have already queued enough | |
746 | * of warm entries in syn queue, drop request. It is better than | |
747 | * clogging syn queue with openreqs with exponentially increasing | |
748 | * timeout. | |
749 | */ | |
463c84b9 | 750 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) |
1da177e4 LT |
751 | goto drop; |
752 | ||
60236fdd | 753 | req = reqsk_alloc(&tcp_request_sock_ops); |
1da177e4 LT |
754 | if (!req) |
755 | goto drop; | |
756 | ||
757 | tcp_clear_options(&tmp_opt); | |
758 | tmp_opt.mss_clamp = 536; | |
759 | tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss; | |
760 | ||
761 | tcp_parse_options(skb, &tmp_opt, 0); | |
762 | ||
763 | if (want_cookie) { | |
764 | tcp_clear_options(&tmp_opt); | |
765 | tmp_opt.saw_tstamp = 0; | |
766 | } | |
767 | ||
768 | if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) { | |
769 | /* Some OSes (unknown ones, but I see them on web server, which | |
770 | * contains information interesting only for windows' | |
771 | * users) do not send their stamp in SYN. It is easy case. | |
772 | * We simply do not advertise TS support. | |
773 | */ | |
774 | tmp_opt.saw_tstamp = 0; | |
775 | tmp_opt.tstamp_ok = 0; | |
776 | } | |
777 | tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; | |
778 | ||
779 | tcp_openreq_init(req, &tmp_opt, skb); | |
780 | ||
2e6599cb ACM |
781 | ireq = inet_rsk(req); |
782 | ireq->loc_addr = daddr; | |
783 | ireq->rmt_addr = saddr; | |
784 | ireq->opt = tcp_v4_save_options(sk, skb); | |
1da177e4 LT |
785 | if (!want_cookie) |
786 | TCP_ECN_create_request(req, skb->h.th); | |
787 | ||
788 | if (want_cookie) { | |
789 | #ifdef CONFIG_SYN_COOKIES | |
790 | syn_flood_warning(skb); | |
791 | #endif | |
792 | isn = cookie_v4_init_sequence(sk, skb, &req->mss); | |
793 | } else if (!isn) { | |
794 | struct inet_peer *peer = NULL; | |
795 | ||
796 | /* VJ's idea. We save last timestamp seen | |
797 | * from the destination in peer table, when entering | |
798 | * state TIME-WAIT, and check against it before | |
799 | * accepting new connection request. | |
800 | * | |
801 | * If "isn" is not zero, this request hit alive | |
802 | * timewait bucket, so that all the necessary checks | |
803 | * are made in the function processing timewait state. | |
804 | */ | |
805 | if (tmp_opt.saw_tstamp && | |
295ff7ed | 806 | tcp_death_row.sysctl_tw_recycle && |
463c84b9 | 807 | (dst = inet_csk_route_req(sk, req)) != NULL && |
1da177e4 LT |
808 | (peer = rt_get_peer((struct rtable *)dst)) != NULL && |
809 | peer->v4daddr == saddr) { | |
810 | if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL && | |
811 | (s32)(peer->tcp_ts - req->ts_recent) > | |
812 | TCP_PAWS_WINDOW) { | |
813 | NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED); | |
814 | dst_release(dst); | |
815 | goto drop_and_free; | |
816 | } | |
817 | } | |
818 | /* Kill the following clause, if you dislike this way. */ | |
819 | else if (!sysctl_tcp_syncookies && | |
463c84b9 | 820 | (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < |
1da177e4 LT |
821 | (sysctl_max_syn_backlog >> 2)) && |
822 | (!peer || !peer->tcp_ts_stamp) && | |
823 | (!dst || !dst_metric(dst, RTAX_RTT))) { | |
824 | /* Without syncookies last quarter of | |
825 | * backlog is filled with destinations, | |
826 | * proven to be alive. | |
827 | * It means that we continue to communicate | |
828 | * to destinations, already remembered | |
829 | * to the moment of synflood. | |
830 | */ | |
64ce2073 PM |
831 | LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open " |
832 | "request from %u.%u.%u.%u/%u\n", | |
833 | NIPQUAD(saddr), | |
834 | ntohs(skb->h.th->source)); | |
1da177e4 LT |
835 | dst_release(dst); |
836 | goto drop_and_free; | |
837 | } | |
838 | ||
839 | isn = tcp_v4_init_sequence(sk, skb); | |
840 | } | |
2e6599cb | 841 | tcp_rsk(req)->snt_isn = isn; |
1da177e4 LT |
842 | |
843 | if (tcp_v4_send_synack(sk, req, dst)) | |
844 | goto drop_and_free; | |
845 | ||
846 | if (want_cookie) { | |
60236fdd | 847 | reqsk_free(req); |
1da177e4 | 848 | } else { |
3f421baa | 849 | inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); |
1da177e4 LT |
850 | } |
851 | return 0; | |
852 | ||
853 | drop_and_free: | |
60236fdd | 854 | reqsk_free(req); |
1da177e4 LT |
855 | drop: |
856 | TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); | |
857 | return 0; | |
858 | } | |
859 | ||
860 | ||
861 | /* | |
862 | * The three way handshake has completed - we got a valid synack - | |
863 | * now create the new socket. | |
864 | */ | |
865 | struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |
60236fdd | 866 | struct request_sock *req, |
1da177e4 LT |
867 | struct dst_entry *dst) |
868 | { | |
2e6599cb | 869 | struct inet_request_sock *ireq; |
1da177e4 LT |
870 | struct inet_sock *newinet; |
871 | struct tcp_sock *newtp; | |
872 | struct sock *newsk; | |
873 | ||
874 | if (sk_acceptq_is_full(sk)) | |
875 | goto exit_overflow; | |
876 | ||
463c84b9 | 877 | if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL) |
1da177e4 LT |
878 | goto exit; |
879 | ||
880 | newsk = tcp_create_openreq_child(sk, req, skb); | |
881 | if (!newsk) | |
882 | goto exit; | |
883 | ||
6cbb0df7 | 884 | sk_setup_caps(newsk, dst); |
1da177e4 LT |
885 | |
886 | newtp = tcp_sk(newsk); | |
887 | newinet = inet_sk(newsk); | |
2e6599cb ACM |
888 | ireq = inet_rsk(req); |
889 | newinet->daddr = ireq->rmt_addr; | |
890 | newinet->rcv_saddr = ireq->loc_addr; | |
891 | newinet->saddr = ireq->loc_addr; | |
892 | newinet->opt = ireq->opt; | |
893 | ireq->opt = NULL; | |
463c84b9 | 894 | newinet->mc_index = inet_iif(skb); |
1da177e4 | 895 | newinet->mc_ttl = skb->nh.iph->ttl; |
d83d8461 | 896 | inet_csk(newsk)->icsk_ext_hdr_len = 0; |
1da177e4 | 897 | if (newinet->opt) |
d83d8461 | 898 | inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen; |
1da177e4 LT |
899 | newinet->id = newtp->write_seq ^ jiffies; |
900 | ||
901 | tcp_sync_mss(newsk, dst_mtu(dst)); | |
902 | newtp->advmss = dst_metric(dst, RTAX_ADVMSS); | |
903 | tcp_initialize_rcv_mss(newsk); | |
904 | ||
f3f05f70 | 905 | __inet_hash(&tcp_hashinfo, newsk, 0); |
2d8c4ce5 | 906 | __inet_inherit_port(&tcp_hashinfo, sk, newsk); |
1da177e4 LT |
907 | |
908 | return newsk; | |
909 | ||
910 | exit_overflow: | |
911 | NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS); | |
912 | exit: | |
913 | NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS); | |
914 | dst_release(dst); | |
915 | return NULL; | |
916 | } | |
917 | ||
918 | static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) | |
919 | { | |
920 | struct tcphdr *th = skb->h.th; | |
921 | struct iphdr *iph = skb->nh.iph; | |
1da177e4 | 922 | struct sock *nsk; |
60236fdd | 923 | struct request_sock **prev; |
1da177e4 | 924 | /* Find possible connection requests. */ |
463c84b9 ACM |
925 | struct request_sock *req = inet_csk_search_req(sk, &prev, th->source, |
926 | iph->saddr, iph->daddr); | |
1da177e4 LT |
927 | if (req) |
928 | return tcp_check_req(sk, skb, req, prev); | |
929 | ||
e48c414e ACM |
930 | nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr, |
931 | th->source, skb->nh.iph->daddr, | |
463c84b9 | 932 | ntohs(th->dest), inet_iif(skb)); |
1da177e4 LT |
933 | |
934 | if (nsk) { | |
935 | if (nsk->sk_state != TCP_TIME_WAIT) { | |
936 | bh_lock_sock(nsk); | |
937 | return nsk; | |
938 | } | |
8feaf0c0 | 939 | inet_twsk_put((struct inet_timewait_sock *)nsk); |
1da177e4 LT |
940 | return NULL; |
941 | } | |
942 | ||
943 | #ifdef CONFIG_SYN_COOKIES | |
944 | if (!th->rst && !th->syn && th->ack) | |
945 | sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt)); | |
946 | #endif | |
947 | return sk; | |
948 | } | |
949 | ||
950 | static int tcp_v4_checksum_init(struct sk_buff *skb) | |
951 | { | |
952 | if (skb->ip_summed == CHECKSUM_HW) { | |
1da177e4 | 953 | if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr, |
fb286bb2 HX |
954 | skb->nh.iph->daddr, skb->csum)) { |
955 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1da177e4 | 956 | return 0; |
fb286bb2 | 957 | } |
1da177e4 | 958 | } |
fb286bb2 HX |
959 | |
960 | skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, skb->nh.iph->daddr, | |
961 | skb->len, IPPROTO_TCP, 0); | |
962 | ||
1da177e4 | 963 | if (skb->len <= 76) { |
fb286bb2 | 964 | return __skb_checksum_complete(skb); |
1da177e4 LT |
965 | } |
966 | return 0; | |
967 | } | |
968 | ||
969 | ||
970 | /* The socket must have it's spinlock held when we get | |
971 | * here. | |
972 | * | |
973 | * We have a potential double-lock case here, so even when | |
974 | * doing backlog processing we use the BH locking scheme. | |
975 | * This is because we cannot sleep with the original spinlock | |
976 | * held. | |
977 | */ | |
978 | int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |
979 | { | |
980 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ | |
981 | TCP_CHECK_TIMER(sk); | |
982 | if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) | |
983 | goto reset; | |
984 | TCP_CHECK_TIMER(sk); | |
985 | return 0; | |
986 | } | |
987 | ||
988 | if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb)) | |
989 | goto csum_err; | |
990 | ||
991 | if (sk->sk_state == TCP_LISTEN) { | |
992 | struct sock *nsk = tcp_v4_hnd_req(sk, skb); | |
993 | if (!nsk) | |
994 | goto discard; | |
995 | ||
996 | if (nsk != sk) { | |
997 | if (tcp_child_process(sk, nsk, skb)) | |
998 | goto reset; | |
999 | return 0; | |
1000 | } | |
1001 | } | |
1002 | ||
1003 | TCP_CHECK_TIMER(sk); | |
1004 | if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) | |
1005 | goto reset; | |
1006 | TCP_CHECK_TIMER(sk); | |
1007 | return 0; | |
1008 | ||
1009 | reset: | |
1010 | tcp_v4_send_reset(skb); | |
1011 | discard: | |
1012 | kfree_skb(skb); | |
1013 | /* Be careful here. If this function gets more complicated and | |
1014 | * gcc suffers from register pressure on the x86, sk (in %ebx) | |
1015 | * might be destroyed here. This current version compiles correctly, | |
1016 | * but you have been warned. | |
1017 | */ | |
1018 | return 0; | |
1019 | ||
1020 | csum_err: | |
1021 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | |
1022 | goto discard; | |
1023 | } | |
1024 | ||
1025 | /* | |
1026 | * From tcp_input.c | |
1027 | */ | |
1028 | ||
1029 | int tcp_v4_rcv(struct sk_buff *skb) | |
1030 | { | |
1031 | struct tcphdr *th; | |
1032 | struct sock *sk; | |
1033 | int ret; | |
1034 | ||
1035 | if (skb->pkt_type != PACKET_HOST) | |
1036 | goto discard_it; | |
1037 | ||
1038 | /* Count it even if it's bad */ | |
1039 | TCP_INC_STATS_BH(TCP_MIB_INSEGS); | |
1040 | ||
1041 | if (!pskb_may_pull(skb, sizeof(struct tcphdr))) | |
1042 | goto discard_it; | |
1043 | ||
1044 | th = skb->h.th; | |
1045 | ||
1046 | if (th->doff < sizeof(struct tcphdr) / 4) | |
1047 | goto bad_packet; | |
1048 | if (!pskb_may_pull(skb, th->doff * 4)) | |
1049 | goto discard_it; | |
1050 | ||
1051 | /* An explanation is required here, I think. | |
1052 | * Packet length and doff are validated by header prediction, | |
caa20d9a | 1053 | * provided case of th->doff==0 is eliminated. |
1da177e4 LT |
1054 | * So, we defer the checks. */ |
1055 | if ((skb->ip_summed != CHECKSUM_UNNECESSARY && | |
fb286bb2 | 1056 | tcp_v4_checksum_init(skb))) |
1da177e4 LT |
1057 | goto bad_packet; |
1058 | ||
1059 | th = skb->h.th; | |
1060 | TCP_SKB_CB(skb)->seq = ntohl(th->seq); | |
1061 | TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + | |
1062 | skb->len - th->doff * 4); | |
1063 | TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); | |
1064 | TCP_SKB_CB(skb)->when = 0; | |
1065 | TCP_SKB_CB(skb)->flags = skb->nh.iph->tos; | |
1066 | TCP_SKB_CB(skb)->sacked = 0; | |
1067 | ||
e48c414e ACM |
1068 | sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source, |
1069 | skb->nh.iph->daddr, ntohs(th->dest), | |
463c84b9 | 1070 | inet_iif(skb)); |
1da177e4 LT |
1071 | |
1072 | if (!sk) | |
1073 | goto no_tcp_socket; | |
1074 | ||
1075 | process: | |
1076 | if (sk->sk_state == TCP_TIME_WAIT) | |
1077 | goto do_time_wait; | |
1078 | ||
1079 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) | |
1080 | goto discard_and_relse; | |
1081 | ||
1082 | if (sk_filter(sk, skb, 0)) | |
1083 | goto discard_and_relse; | |
1084 | ||
1085 | skb->dev = NULL; | |
1086 | ||
1087 | bh_lock_sock(sk); | |
1088 | ret = 0; | |
1089 | if (!sock_owned_by_user(sk)) { | |
1090 | if (!tcp_prequeue(sk, skb)) | |
1091 | ret = tcp_v4_do_rcv(sk, skb); | |
1092 | } else | |
1093 | sk_add_backlog(sk, skb); | |
1094 | bh_unlock_sock(sk); | |
1095 | ||
1096 | sock_put(sk); | |
1097 | ||
1098 | return ret; | |
1099 | ||
1100 | no_tcp_socket: | |
1101 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | |
1102 | goto discard_it; | |
1103 | ||
1104 | if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { | |
1105 | bad_packet: | |
1106 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | |
1107 | } else { | |
1108 | tcp_v4_send_reset(skb); | |
1109 | } | |
1110 | ||
1111 | discard_it: | |
1112 | /* Discard frame. */ | |
1113 | kfree_skb(skb); | |
1114 | return 0; | |
1115 | ||
1116 | discard_and_relse: | |
1117 | sock_put(sk); | |
1118 | goto discard_it; | |
1119 | ||
1120 | do_time_wait: | |
1121 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { | |
8feaf0c0 | 1122 | inet_twsk_put((struct inet_timewait_sock *) sk); |
1da177e4 LT |
1123 | goto discard_it; |
1124 | } | |
1125 | ||
1126 | if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { | |
1127 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | |
8feaf0c0 | 1128 | inet_twsk_put((struct inet_timewait_sock *) sk); |
1da177e4 LT |
1129 | goto discard_it; |
1130 | } | |
8feaf0c0 ACM |
1131 | switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk, |
1132 | skb, th)) { | |
1da177e4 | 1133 | case TCP_TW_SYN: { |
33b62231 ACM |
1134 | struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo, |
1135 | skb->nh.iph->daddr, | |
1136 | ntohs(th->dest), | |
463c84b9 | 1137 | inet_iif(skb)); |
1da177e4 | 1138 | if (sk2) { |
295ff7ed ACM |
1139 | inet_twsk_deschedule((struct inet_timewait_sock *)sk, |
1140 | &tcp_death_row); | |
8feaf0c0 | 1141 | inet_twsk_put((struct inet_timewait_sock *)sk); |
1da177e4 LT |
1142 | sk = sk2; |
1143 | goto process; | |
1144 | } | |
1145 | /* Fall through to ACK */ | |
1146 | } | |
1147 | case TCP_TW_ACK: | |
1148 | tcp_v4_timewait_ack(sk, skb); | |
1149 | break; | |
1150 | case TCP_TW_RST: | |
1151 | goto no_tcp_socket; | |
1152 | case TCP_TW_SUCCESS:; | |
1153 | } | |
1154 | goto discard_it; | |
1155 | } | |
1156 | ||
1da177e4 LT |
1157 | /* VJ's idea. Save last timestamp seen from this destination |
1158 | * and hold it at least for normal timewait interval to use for duplicate | |
1159 | * segment detection in subsequent connections, before they enter synchronized | |
1160 | * state. | |
1161 | */ | |
1162 | ||
1163 | int tcp_v4_remember_stamp(struct sock *sk) | |
1164 | { | |
1165 | struct inet_sock *inet = inet_sk(sk); | |
1166 | struct tcp_sock *tp = tcp_sk(sk); | |
1167 | struct rtable *rt = (struct rtable *)__sk_dst_get(sk); | |
1168 | struct inet_peer *peer = NULL; | |
1169 | int release_it = 0; | |
1170 | ||
1171 | if (!rt || rt->rt_dst != inet->daddr) { | |
1172 | peer = inet_getpeer(inet->daddr, 1); | |
1173 | release_it = 1; | |
1174 | } else { | |
1175 | if (!rt->peer) | |
1176 | rt_bind_peer(rt, 1); | |
1177 | peer = rt->peer; | |
1178 | } | |
1179 | ||
1180 | if (peer) { | |
1181 | if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 || | |
1182 | (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec && | |
1183 | peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) { | |
1184 | peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp; | |
1185 | peer->tcp_ts = tp->rx_opt.ts_recent; | |
1186 | } | |
1187 | if (release_it) | |
1188 | inet_putpeer(peer); | |
1189 | return 1; | |
1190 | } | |
1191 | ||
1192 | return 0; | |
1193 | } | |
1194 | ||
8feaf0c0 | 1195 | int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw) |
1da177e4 | 1196 | { |
8feaf0c0 | 1197 | struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1); |
1da177e4 LT |
1198 | |
1199 | if (peer) { | |
8feaf0c0 ACM |
1200 | const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); |
1201 | ||
1202 | if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 || | |
1da177e4 | 1203 | (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec && |
8feaf0c0 ACM |
1204 | peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) { |
1205 | peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp; | |
1206 | peer->tcp_ts = tcptw->tw_ts_recent; | |
1da177e4 LT |
1207 | } |
1208 | inet_putpeer(peer); | |
1209 | return 1; | |
1210 | } | |
1211 | ||
1212 | return 0; | |
1213 | } | |
1214 | ||
8292a17a | 1215 | struct inet_connection_sock_af_ops ipv4_specific = { |
1da177e4 LT |
1216 | .queue_xmit = ip_queue_xmit, |
1217 | .send_check = tcp_v4_send_check, | |
32519f11 | 1218 | .rebuild_header = inet_sk_rebuild_header, |
1da177e4 LT |
1219 | .conn_request = tcp_v4_conn_request, |
1220 | .syn_recv_sock = tcp_v4_syn_recv_sock, | |
1221 | .remember_stamp = tcp_v4_remember_stamp, | |
1222 | .net_header_len = sizeof(struct iphdr), | |
1223 | .setsockopt = ip_setsockopt, | |
1224 | .getsockopt = ip_getsockopt, | |
af05dc93 | 1225 | .addr2sockaddr = inet_csk_addr2sockaddr, |
1da177e4 LT |
1226 | .sockaddr_len = sizeof(struct sockaddr_in), |
1227 | }; | |
1228 | ||
1229 | /* NOTE: A lot of things set to zero explicitly by call to | |
1230 | * sk_alloc() so need not be done here. | |
1231 | */ | |
1232 | static int tcp_v4_init_sock(struct sock *sk) | |
1233 | { | |
6687e988 | 1234 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 LT |
1235 | struct tcp_sock *tp = tcp_sk(sk); |
1236 | ||
1237 | skb_queue_head_init(&tp->out_of_order_queue); | |
1238 | tcp_init_xmit_timers(sk); | |
1239 | tcp_prequeue_init(tp); | |
1240 | ||
6687e988 | 1241 | icsk->icsk_rto = TCP_TIMEOUT_INIT; |
1da177e4 LT |
1242 | tp->mdev = TCP_TIMEOUT_INIT; |
1243 | ||
1244 | /* So many TCP implementations out there (incorrectly) count the | |
1245 | * initial SYN frame in their delayed-ACK and congestion control | |
1246 | * algorithms that we must have the following bandaid to talk | |
1247 | * efficiently to them. -DaveM | |
1248 | */ | |
1249 | tp->snd_cwnd = 2; | |
1250 | ||
1251 | /* See draft-stevens-tcpca-spec-01 for discussion of the | |
1252 | * initialization of these values. | |
1253 | */ | |
1254 | tp->snd_ssthresh = 0x7fffffff; /* Infinity */ | |
1255 | tp->snd_cwnd_clamp = ~0; | |
c1b4a7e6 | 1256 | tp->mss_cache = 536; |
1da177e4 LT |
1257 | |
1258 | tp->reordering = sysctl_tcp_reordering; | |
6687e988 | 1259 | icsk->icsk_ca_ops = &tcp_init_congestion_ops; |
1da177e4 LT |
1260 | |
1261 | sk->sk_state = TCP_CLOSE; | |
1262 | ||
1263 | sk->sk_write_space = sk_stream_write_space; | |
1264 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); | |
1265 | ||
8292a17a | 1266 | icsk->icsk_af_ops = &ipv4_specific; |
d83d8461 | 1267 | icsk->icsk_sync_mss = tcp_sync_mss; |
1da177e4 LT |
1268 | |
1269 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; | |
1270 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; | |
1271 | ||
1272 | atomic_inc(&tcp_sockets_allocated); | |
1273 | ||
1274 | return 0; | |
1275 | } | |
1276 | ||
1277 | int tcp_v4_destroy_sock(struct sock *sk) | |
1278 | { | |
1279 | struct tcp_sock *tp = tcp_sk(sk); | |
1280 | ||
1281 | tcp_clear_xmit_timers(sk); | |
1282 | ||
6687e988 | 1283 | tcp_cleanup_congestion_control(sk); |
317a76f9 | 1284 | |
1da177e4 LT |
1285 | /* Cleanup up the write buffer. */ |
1286 | sk_stream_writequeue_purge(sk); | |
1287 | ||
1288 | /* Cleans up our, hopefully empty, out_of_order_queue. */ | |
1289 | __skb_queue_purge(&tp->out_of_order_queue); | |
1290 | ||
1291 | /* Clean prequeue, it must be empty really */ | |
1292 | __skb_queue_purge(&tp->ucopy.prequeue); | |
1293 | ||
1294 | /* Clean up a referenced TCP bind bucket. */ | |
463c84b9 | 1295 | if (inet_csk(sk)->icsk_bind_hash) |
2d8c4ce5 | 1296 | inet_put_port(&tcp_hashinfo, sk); |
1da177e4 LT |
1297 | |
1298 | /* | |
1299 | * If sendmsg cached page exists, toss it. | |
1300 | */ | |
1301 | if (sk->sk_sndmsg_page) { | |
1302 | __free_page(sk->sk_sndmsg_page); | |
1303 | sk->sk_sndmsg_page = NULL; | |
1304 | } | |
1305 | ||
1306 | atomic_dec(&tcp_sockets_allocated); | |
1307 | ||
1308 | return 0; | |
1309 | } | |
1310 | ||
1311 | EXPORT_SYMBOL(tcp_v4_destroy_sock); | |
1312 | ||
1313 | #ifdef CONFIG_PROC_FS | |
1314 | /* Proc filesystem TCP sock list dumping. */ | |
1315 | ||
8feaf0c0 | 1316 | static inline struct inet_timewait_sock *tw_head(struct hlist_head *head) |
1da177e4 LT |
1317 | { |
1318 | return hlist_empty(head) ? NULL : | |
8feaf0c0 | 1319 | list_entry(head->first, struct inet_timewait_sock, tw_node); |
1da177e4 LT |
1320 | } |
1321 | ||
8feaf0c0 | 1322 | static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw) |
1da177e4 LT |
1323 | { |
1324 | return tw->tw_node.next ? | |
1325 | hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL; | |
1326 | } | |
1327 | ||
1328 | static void *listening_get_next(struct seq_file *seq, void *cur) | |
1329 | { | |
463c84b9 | 1330 | struct inet_connection_sock *icsk; |
1da177e4 LT |
1331 | struct hlist_node *node; |
1332 | struct sock *sk = cur; | |
1333 | struct tcp_iter_state* st = seq->private; | |
1334 | ||
1335 | if (!sk) { | |
1336 | st->bucket = 0; | |
6e04e021 | 1337 | sk = sk_head(&tcp_hashinfo.listening_hash[0]); |
1da177e4 LT |
1338 | goto get_sk; |
1339 | } | |
1340 | ||
1341 | ++st->num; | |
1342 | ||
1343 | if (st->state == TCP_SEQ_STATE_OPENREQ) { | |
60236fdd | 1344 | struct request_sock *req = cur; |
1da177e4 | 1345 | |
463c84b9 | 1346 | icsk = inet_csk(st->syn_wait_sk); |
1da177e4 LT |
1347 | req = req->dl_next; |
1348 | while (1) { | |
1349 | while (req) { | |
60236fdd | 1350 | if (req->rsk_ops->family == st->family) { |
1da177e4 LT |
1351 | cur = req; |
1352 | goto out; | |
1353 | } | |
1354 | req = req->dl_next; | |
1355 | } | |
1356 | if (++st->sbucket >= TCP_SYNQ_HSIZE) | |
1357 | break; | |
1358 | get_req: | |
463c84b9 | 1359 | req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket]; |
1da177e4 LT |
1360 | } |
1361 | sk = sk_next(st->syn_wait_sk); | |
1362 | st->state = TCP_SEQ_STATE_LISTENING; | |
463c84b9 | 1363 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1da177e4 | 1364 | } else { |
463c84b9 ACM |
1365 | icsk = inet_csk(sk); |
1366 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | |
1367 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) | |
1da177e4 | 1368 | goto start_req; |
463c84b9 | 1369 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1da177e4 LT |
1370 | sk = sk_next(sk); |
1371 | } | |
1372 | get_sk: | |
1373 | sk_for_each_from(sk, node) { | |
1374 | if (sk->sk_family == st->family) { | |
1375 | cur = sk; | |
1376 | goto out; | |
1377 | } | |
463c84b9 ACM |
1378 | icsk = inet_csk(sk); |
1379 | read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | |
1380 | if (reqsk_queue_len(&icsk->icsk_accept_queue)) { | |
1da177e4 LT |
1381 | start_req: |
1382 | st->uid = sock_i_uid(sk); | |
1383 | st->syn_wait_sk = sk; | |
1384 | st->state = TCP_SEQ_STATE_OPENREQ; | |
1385 | st->sbucket = 0; | |
1386 | goto get_req; | |
1387 | } | |
463c84b9 | 1388 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
1da177e4 | 1389 | } |
0f7ff927 | 1390 | if (++st->bucket < INET_LHTABLE_SIZE) { |
6e04e021 | 1391 | sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]); |
1da177e4 LT |
1392 | goto get_sk; |
1393 | } | |
1394 | cur = NULL; | |
1395 | out: | |
1396 | return cur; | |
1397 | } | |
1398 | ||
1399 | static void *listening_get_idx(struct seq_file *seq, loff_t *pos) | |
1400 | { | |
1401 | void *rc = listening_get_next(seq, NULL); | |
1402 | ||
1403 | while (rc && *pos) { | |
1404 | rc = listening_get_next(seq, rc); | |
1405 | --*pos; | |
1406 | } | |
1407 | return rc; | |
1408 | } | |
1409 | ||
1410 | static void *established_get_first(struct seq_file *seq) | |
1411 | { | |
1412 | struct tcp_iter_state* st = seq->private; | |
1413 | void *rc = NULL; | |
1414 | ||
6e04e021 | 1415 | for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) { |
1da177e4 LT |
1416 | struct sock *sk; |
1417 | struct hlist_node *node; | |
8feaf0c0 | 1418 | struct inet_timewait_sock *tw; |
1da177e4 LT |
1419 | |
1420 | /* We can reschedule _before_ having picked the target: */ | |
1421 | cond_resched_softirq(); | |
1422 | ||
6e04e021 ACM |
1423 | read_lock(&tcp_hashinfo.ehash[st->bucket].lock); |
1424 | sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { | |
1da177e4 LT |
1425 | if (sk->sk_family != st->family) { |
1426 | continue; | |
1427 | } | |
1428 | rc = sk; | |
1429 | goto out; | |
1430 | } | |
1431 | st->state = TCP_SEQ_STATE_TIME_WAIT; | |
8feaf0c0 ACM |
1432 | inet_twsk_for_each(tw, node, |
1433 | &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) { | |
1da177e4 LT |
1434 | if (tw->tw_family != st->family) { |
1435 | continue; | |
1436 | } | |
1437 | rc = tw; | |
1438 | goto out; | |
1439 | } | |
6e04e021 | 1440 | read_unlock(&tcp_hashinfo.ehash[st->bucket].lock); |
1da177e4 LT |
1441 | st->state = TCP_SEQ_STATE_ESTABLISHED; |
1442 | } | |
1443 | out: | |
1444 | return rc; | |
1445 | } | |
1446 | ||
1447 | static void *established_get_next(struct seq_file *seq, void *cur) | |
1448 | { | |
1449 | struct sock *sk = cur; | |
8feaf0c0 | 1450 | struct inet_timewait_sock *tw; |
1da177e4 LT |
1451 | struct hlist_node *node; |
1452 | struct tcp_iter_state* st = seq->private; | |
1453 | ||
1454 | ++st->num; | |
1455 | ||
1456 | if (st->state == TCP_SEQ_STATE_TIME_WAIT) { | |
1457 | tw = cur; | |
1458 | tw = tw_next(tw); | |
1459 | get_tw: | |
1460 | while (tw && tw->tw_family != st->family) { | |
1461 | tw = tw_next(tw); | |
1462 | } | |
1463 | if (tw) { | |
1464 | cur = tw; | |
1465 | goto out; | |
1466 | } | |
6e04e021 | 1467 | read_unlock(&tcp_hashinfo.ehash[st->bucket].lock); |
1da177e4 LT |
1468 | st->state = TCP_SEQ_STATE_ESTABLISHED; |
1469 | ||
1470 | /* We can reschedule between buckets: */ | |
1471 | cond_resched_softirq(); | |
1472 | ||
6e04e021 ACM |
1473 | if (++st->bucket < tcp_hashinfo.ehash_size) { |
1474 | read_lock(&tcp_hashinfo.ehash[st->bucket].lock); | |
1475 | sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain); | |
1da177e4 LT |
1476 | } else { |
1477 | cur = NULL; | |
1478 | goto out; | |
1479 | } | |
1480 | } else | |
1481 | sk = sk_next(sk); | |
1482 | ||
1483 | sk_for_each_from(sk, node) { | |
1484 | if (sk->sk_family == st->family) | |
1485 | goto found; | |
1486 | } | |
1487 | ||
1488 | st->state = TCP_SEQ_STATE_TIME_WAIT; | |
6e04e021 | 1489 | tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain); |
1da177e4 LT |
1490 | goto get_tw; |
1491 | found: | |
1492 | cur = sk; | |
1493 | out: | |
1494 | return cur; | |
1495 | } | |
1496 | ||
1497 | static void *established_get_idx(struct seq_file *seq, loff_t pos) | |
1498 | { | |
1499 | void *rc = established_get_first(seq); | |
1500 | ||
1501 | while (rc && pos) { | |
1502 | rc = established_get_next(seq, rc); | |
1503 | --pos; | |
1504 | } | |
1505 | return rc; | |
1506 | } | |
1507 | ||
1508 | static void *tcp_get_idx(struct seq_file *seq, loff_t pos) | |
1509 | { | |
1510 | void *rc; | |
1511 | struct tcp_iter_state* st = seq->private; | |
1512 | ||
f3f05f70 | 1513 | inet_listen_lock(&tcp_hashinfo); |
1da177e4 LT |
1514 | st->state = TCP_SEQ_STATE_LISTENING; |
1515 | rc = listening_get_idx(seq, &pos); | |
1516 | ||
1517 | if (!rc) { | |
f3f05f70 | 1518 | inet_listen_unlock(&tcp_hashinfo); |
1da177e4 LT |
1519 | local_bh_disable(); |
1520 | st->state = TCP_SEQ_STATE_ESTABLISHED; | |
1521 | rc = established_get_idx(seq, pos); | |
1522 | } | |
1523 | ||
1524 | return rc; | |
1525 | } | |
1526 | ||
1527 | static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) | |
1528 | { | |
1529 | struct tcp_iter_state* st = seq->private; | |
1530 | st->state = TCP_SEQ_STATE_LISTENING; | |
1531 | st->num = 0; | |
1532 | return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; | |
1533 | } | |
1534 | ||
1535 | static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
1536 | { | |
1537 | void *rc = NULL; | |
1538 | struct tcp_iter_state* st; | |
1539 | ||
1540 | if (v == SEQ_START_TOKEN) { | |
1541 | rc = tcp_get_idx(seq, 0); | |
1542 | goto out; | |
1543 | } | |
1544 | st = seq->private; | |
1545 | ||
1546 | switch (st->state) { | |
1547 | case TCP_SEQ_STATE_OPENREQ: | |
1548 | case TCP_SEQ_STATE_LISTENING: | |
1549 | rc = listening_get_next(seq, v); | |
1550 | if (!rc) { | |
f3f05f70 | 1551 | inet_listen_unlock(&tcp_hashinfo); |
1da177e4 LT |
1552 | local_bh_disable(); |
1553 | st->state = TCP_SEQ_STATE_ESTABLISHED; | |
1554 | rc = established_get_first(seq); | |
1555 | } | |
1556 | break; | |
1557 | case TCP_SEQ_STATE_ESTABLISHED: | |
1558 | case TCP_SEQ_STATE_TIME_WAIT: | |
1559 | rc = established_get_next(seq, v); | |
1560 | break; | |
1561 | } | |
1562 | out: | |
1563 | ++*pos; | |
1564 | return rc; | |
1565 | } | |
1566 | ||
1567 | static void tcp_seq_stop(struct seq_file *seq, void *v) | |
1568 | { | |
1569 | struct tcp_iter_state* st = seq->private; | |
1570 | ||
1571 | switch (st->state) { | |
1572 | case TCP_SEQ_STATE_OPENREQ: | |
1573 | if (v) { | |
463c84b9 ACM |
1574 | struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk); |
1575 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | |
1da177e4 LT |
1576 | } |
1577 | case TCP_SEQ_STATE_LISTENING: | |
1578 | if (v != SEQ_START_TOKEN) | |
f3f05f70 | 1579 | inet_listen_unlock(&tcp_hashinfo); |
1da177e4 LT |
1580 | break; |
1581 | case TCP_SEQ_STATE_TIME_WAIT: | |
1582 | case TCP_SEQ_STATE_ESTABLISHED: | |
1583 | if (v) | |
6e04e021 | 1584 | read_unlock(&tcp_hashinfo.ehash[st->bucket].lock); |
1da177e4 LT |
1585 | local_bh_enable(); |
1586 | break; | |
1587 | } | |
1588 | } | |
1589 | ||
1590 | static int tcp_seq_open(struct inode *inode, struct file *file) | |
1591 | { | |
1592 | struct tcp_seq_afinfo *afinfo = PDE(inode)->data; | |
1593 | struct seq_file *seq; | |
1594 | struct tcp_iter_state *s; | |
1595 | int rc; | |
1596 | ||
1597 | if (unlikely(afinfo == NULL)) | |
1598 | return -EINVAL; | |
1599 | ||
1600 | s = kmalloc(sizeof(*s), GFP_KERNEL); | |
1601 | if (!s) | |
1602 | return -ENOMEM; | |
1603 | memset(s, 0, sizeof(*s)); | |
1604 | s->family = afinfo->family; | |
1605 | s->seq_ops.start = tcp_seq_start; | |
1606 | s->seq_ops.next = tcp_seq_next; | |
1607 | s->seq_ops.show = afinfo->seq_show; | |
1608 | s->seq_ops.stop = tcp_seq_stop; | |
1609 | ||
1610 | rc = seq_open(file, &s->seq_ops); | |
1611 | if (rc) | |
1612 | goto out_kfree; | |
1613 | seq = file->private_data; | |
1614 | seq->private = s; | |
1615 | out: | |
1616 | return rc; | |
1617 | out_kfree: | |
1618 | kfree(s); | |
1619 | goto out; | |
1620 | } | |
1621 | ||
1622 | int tcp_proc_register(struct tcp_seq_afinfo *afinfo) | |
1623 | { | |
1624 | int rc = 0; | |
1625 | struct proc_dir_entry *p; | |
1626 | ||
1627 | if (!afinfo) | |
1628 | return -EINVAL; | |
1629 | afinfo->seq_fops->owner = afinfo->owner; | |
1630 | afinfo->seq_fops->open = tcp_seq_open; | |
1631 | afinfo->seq_fops->read = seq_read; | |
1632 | afinfo->seq_fops->llseek = seq_lseek; | |
1633 | afinfo->seq_fops->release = seq_release_private; | |
1634 | ||
1635 | p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops); | |
1636 | if (p) | |
1637 | p->data = afinfo; | |
1638 | else | |
1639 | rc = -ENOMEM; | |
1640 | return rc; | |
1641 | } | |
1642 | ||
1643 | void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo) | |
1644 | { | |
1645 | if (!afinfo) | |
1646 | return; | |
1647 | proc_net_remove(afinfo->name); | |
1648 | memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); | |
1649 | } | |
1650 | ||
60236fdd | 1651 | static void get_openreq4(struct sock *sk, struct request_sock *req, |
1da177e4 LT |
1652 | char *tmpbuf, int i, int uid) |
1653 | { | |
2e6599cb | 1654 | const struct inet_request_sock *ireq = inet_rsk(req); |
1da177e4 LT |
1655 | int ttd = req->expires - jiffies; |
1656 | ||
1657 | sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" | |
1658 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p", | |
1659 | i, | |
2e6599cb | 1660 | ireq->loc_addr, |
1da177e4 | 1661 | ntohs(inet_sk(sk)->sport), |
2e6599cb ACM |
1662 | ireq->rmt_addr, |
1663 | ntohs(ireq->rmt_port), | |
1da177e4 LT |
1664 | TCP_SYN_RECV, |
1665 | 0, 0, /* could print option size, but that is af dependent. */ | |
1666 | 1, /* timers active (only the expire timer) */ | |
1667 | jiffies_to_clock_t(ttd), | |
1668 | req->retrans, | |
1669 | uid, | |
1670 | 0, /* non standard timer */ | |
1671 | 0, /* open_requests have no inode */ | |
1672 | atomic_read(&sk->sk_refcnt), | |
1673 | req); | |
1674 | } | |
1675 | ||
1676 | static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i) | |
1677 | { | |
1678 | int timer_active; | |
1679 | unsigned long timer_expires; | |
1680 | struct tcp_sock *tp = tcp_sk(sp); | |
463c84b9 | 1681 | const struct inet_connection_sock *icsk = inet_csk(sp); |
1da177e4 LT |
1682 | struct inet_sock *inet = inet_sk(sp); |
1683 | unsigned int dest = inet->daddr; | |
1684 | unsigned int src = inet->rcv_saddr; | |
1685 | __u16 destp = ntohs(inet->dport); | |
1686 | __u16 srcp = ntohs(inet->sport); | |
1687 | ||
463c84b9 | 1688 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) { |
1da177e4 | 1689 | timer_active = 1; |
463c84b9 ACM |
1690 | timer_expires = icsk->icsk_timeout; |
1691 | } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { | |
1da177e4 | 1692 | timer_active = 4; |
463c84b9 | 1693 | timer_expires = icsk->icsk_timeout; |
1da177e4 LT |
1694 | } else if (timer_pending(&sp->sk_timer)) { |
1695 | timer_active = 2; | |
1696 | timer_expires = sp->sk_timer.expires; | |
1697 | } else { | |
1698 | timer_active = 0; | |
1699 | timer_expires = jiffies; | |
1700 | } | |
1701 | ||
1702 | sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " | |
1703 | "%08X %5d %8d %lu %d %p %u %u %u %u %d", | |
1704 | i, src, srcp, dest, destp, sp->sk_state, | |
1705 | tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq, | |
1706 | timer_active, | |
1707 | jiffies_to_clock_t(timer_expires - jiffies), | |
463c84b9 | 1708 | icsk->icsk_retransmits, |
1da177e4 | 1709 | sock_i_uid(sp), |
6687e988 | 1710 | icsk->icsk_probes_out, |
1da177e4 LT |
1711 | sock_i_ino(sp), |
1712 | atomic_read(&sp->sk_refcnt), sp, | |
463c84b9 ACM |
1713 | icsk->icsk_rto, |
1714 | icsk->icsk_ack.ato, | |
1715 | (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, | |
1da177e4 LT |
1716 | tp->snd_cwnd, |
1717 | tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh); | |
1718 | } | |
1719 | ||
8feaf0c0 | 1720 | static void get_timewait4_sock(struct inet_timewait_sock *tw, char *tmpbuf, int i) |
1da177e4 LT |
1721 | { |
1722 | unsigned int dest, src; | |
1723 | __u16 destp, srcp; | |
1724 | int ttd = tw->tw_ttd - jiffies; | |
1725 | ||
1726 | if (ttd < 0) | |
1727 | ttd = 0; | |
1728 | ||
1729 | dest = tw->tw_daddr; | |
1730 | src = tw->tw_rcv_saddr; | |
1731 | destp = ntohs(tw->tw_dport); | |
1732 | srcp = ntohs(tw->tw_sport); | |
1733 | ||
1734 | sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" | |
1735 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p", | |
1736 | i, src, srcp, dest, destp, tw->tw_substate, 0, 0, | |
1737 | 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, | |
1738 | atomic_read(&tw->tw_refcnt), tw); | |
1739 | } | |
1740 | ||
1741 | #define TMPSZ 150 | |
1742 | ||
1743 | static int tcp4_seq_show(struct seq_file *seq, void *v) | |
1744 | { | |
1745 | struct tcp_iter_state* st; | |
1746 | char tmpbuf[TMPSZ + 1]; | |
1747 | ||
1748 | if (v == SEQ_START_TOKEN) { | |
1749 | seq_printf(seq, "%-*s\n", TMPSZ - 1, | |
1750 | " sl local_address rem_address st tx_queue " | |
1751 | "rx_queue tr tm->when retrnsmt uid timeout " | |
1752 | "inode"); | |
1753 | goto out; | |
1754 | } | |
1755 | st = seq->private; | |
1756 | ||
1757 | switch (st->state) { | |
1758 | case TCP_SEQ_STATE_LISTENING: | |
1759 | case TCP_SEQ_STATE_ESTABLISHED: | |
1760 | get_tcp4_sock(v, tmpbuf, st->num); | |
1761 | break; | |
1762 | case TCP_SEQ_STATE_OPENREQ: | |
1763 | get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid); | |
1764 | break; | |
1765 | case TCP_SEQ_STATE_TIME_WAIT: | |
1766 | get_timewait4_sock(v, tmpbuf, st->num); | |
1767 | break; | |
1768 | } | |
1769 | seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf); | |
1770 | out: | |
1771 | return 0; | |
1772 | } | |
1773 | ||
1774 | static struct file_operations tcp4_seq_fops; | |
1775 | static struct tcp_seq_afinfo tcp4_seq_afinfo = { | |
1776 | .owner = THIS_MODULE, | |
1777 | .name = "tcp", | |
1778 | .family = AF_INET, | |
1779 | .seq_show = tcp4_seq_show, | |
1780 | .seq_fops = &tcp4_seq_fops, | |
1781 | }; | |
1782 | ||
1783 | int __init tcp4_proc_init(void) | |
1784 | { | |
1785 | return tcp_proc_register(&tcp4_seq_afinfo); | |
1786 | } | |
1787 | ||
1788 | void tcp4_proc_exit(void) | |
1789 | { | |
1790 | tcp_proc_unregister(&tcp4_seq_afinfo); | |
1791 | } | |
1792 | #endif /* CONFIG_PROC_FS */ | |
1793 | ||
1794 | struct proto tcp_prot = { | |
1795 | .name = "TCP", | |
1796 | .owner = THIS_MODULE, | |
1797 | .close = tcp_close, | |
1798 | .connect = tcp_v4_connect, | |
1799 | .disconnect = tcp_disconnect, | |
463c84b9 | 1800 | .accept = inet_csk_accept, |
1da177e4 LT |
1801 | .ioctl = tcp_ioctl, |
1802 | .init = tcp_v4_init_sock, | |
1803 | .destroy = tcp_v4_destroy_sock, | |
1804 | .shutdown = tcp_shutdown, | |
1805 | .setsockopt = tcp_setsockopt, | |
1806 | .getsockopt = tcp_getsockopt, | |
1807 | .sendmsg = tcp_sendmsg, | |
1808 | .recvmsg = tcp_recvmsg, | |
1809 | .backlog_rcv = tcp_v4_do_rcv, | |
1810 | .hash = tcp_v4_hash, | |
1811 | .unhash = tcp_unhash, | |
1812 | .get_port = tcp_v4_get_port, | |
1813 | .enter_memory_pressure = tcp_enter_memory_pressure, | |
1814 | .sockets_allocated = &tcp_sockets_allocated, | |
0a5578cf | 1815 | .orphan_count = &tcp_orphan_count, |
1da177e4 LT |
1816 | .memory_allocated = &tcp_memory_allocated, |
1817 | .memory_pressure = &tcp_memory_pressure, | |
1818 | .sysctl_mem = sysctl_tcp_mem, | |
1819 | .sysctl_wmem = sysctl_tcp_wmem, | |
1820 | .sysctl_rmem = sysctl_tcp_rmem, | |
1821 | .max_header = MAX_TCP_HEADER, | |
1822 | .obj_size = sizeof(struct tcp_sock), | |
6d6ee43e | 1823 | .twsk_prot = &tcp_timewait_sock_ops, |
60236fdd | 1824 | .rsk_prot = &tcp_request_sock_ops, |
1da177e4 LT |
1825 | }; |
1826 | ||
1827 | ||
1828 | ||
1829 | void __init tcp_v4_init(struct net_proto_family *ops) | |
1830 | { | |
1831 | int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket); | |
1832 | if (err < 0) | |
1833 | panic("Failed to create the TCP control socket.\n"); | |
1834 | tcp_socket->sk->sk_allocation = GFP_ATOMIC; | |
1835 | inet_sk(tcp_socket->sk)->uc_ttl = -1; | |
1836 | ||
1837 | /* Unhash it so that IP input processing does not even | |
1838 | * see it, we do not wish this socket to see incoming | |
1839 | * packets. | |
1840 | */ | |
1841 | tcp_socket->sk->sk_prot->unhash(tcp_socket->sk); | |
1842 | } | |
1843 | ||
1844 | EXPORT_SYMBOL(ipv4_specific); | |
0f7ff927 | 1845 | EXPORT_SYMBOL(inet_bind_bucket_create); |
1da177e4 | 1846 | EXPORT_SYMBOL(tcp_hashinfo); |
1da177e4 | 1847 | EXPORT_SYMBOL(tcp_prot); |
1da177e4 LT |
1848 | EXPORT_SYMBOL(tcp_unhash); |
1849 | EXPORT_SYMBOL(tcp_v4_conn_request); | |
1850 | EXPORT_SYMBOL(tcp_v4_connect); | |
1851 | EXPORT_SYMBOL(tcp_v4_do_rcv); | |
1da177e4 LT |
1852 | EXPORT_SYMBOL(tcp_v4_remember_stamp); |
1853 | EXPORT_SYMBOL(tcp_v4_send_check); | |
1854 | EXPORT_SYMBOL(tcp_v4_syn_recv_sock); | |
1855 | ||
1856 | #ifdef CONFIG_PROC_FS | |
1857 | EXPORT_SYMBOL(tcp_proc_register); | |
1858 | EXPORT_SYMBOL(tcp_proc_unregister); | |
1859 | #endif | |
1860 | EXPORT_SYMBOL(sysctl_local_port_range); | |
1da177e4 LT |
1861 | EXPORT_SYMBOL(sysctl_tcp_low_latency); |
1862 | EXPORT_SYMBOL(sysctl_tcp_tw_reuse); | |
1863 |