netns: Fix icmp shutdown.
[linux-2.6-block.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
1da177e4 53
eb4dea58 54#include <linux/bottom_half.h>
1da177e4
LT
55#include <linux/types.h>
56#include <linux/fcntl.h>
57#include <linux/module.h>
58#include <linux/random.h>
59#include <linux/cache.h>
60#include <linux/jhash.h>
61#include <linux/init.h>
62#include <linux/times.h>
63
457c4cbc 64#include <net/net_namespace.h>
1da177e4 65#include <net/icmp.h>
304a1618 66#include <net/inet_hashtables.h>
1da177e4 67#include <net/tcp.h>
20380731 68#include <net/transp_v6.h>
1da177e4
LT
69#include <net/ipv6.h>
70#include <net/inet_common.h>
6d6ee43e 71#include <net/timewait_sock.h>
1da177e4 72#include <net/xfrm.h>
1a2449a8 73#include <net/netdma.h>
1da177e4
LT
74
75#include <linux/inet.h>
76#include <linux/ipv6.h>
77#include <linux/stddef.h>
78#include <linux/proc_fs.h>
79#include <linux/seq_file.h>
80
cfb6eeb4
YH
81#include <linux/crypto.h>
82#include <linux/scatterlist.h>
83
ab32ea5d
BH
84int sysctl_tcp_tw_reuse __read_mostly;
85int sysctl_tcp_low_latency __read_mostly;
1da177e4 86
1da177e4 87
cfb6eeb4 88#ifdef CONFIG_TCP_MD5SIG
7174259e
ACM
89static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
90 __be32 addr);
49a72dfb
AL
91static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
92 __be32 daddr, __be32 saddr, struct tcphdr *th);
9501f972
YH
93#else
94static inline
95struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
96{
97 return NULL;
98}
cfb6eeb4
YH
99#endif
100
5caea4ea 101struct inet_hashinfo tcp_hashinfo;
1da177e4 102
a94f723d 103static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
1da177e4 104{
eddc9ec5
ACM
105 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
106 ip_hdr(skb)->saddr,
aa8223c7
ACM
107 tcp_hdr(skb)->dest,
108 tcp_hdr(skb)->source);
1da177e4
LT
109}
110
6d6ee43e
ACM
111int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
112{
113 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
114 struct tcp_sock *tp = tcp_sk(sk);
115
116 /* With PAWS, it is safe from the viewpoint
117 of data integrity. Even without PAWS it is safe provided sequence
118 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
119
120 Actually, the idea is close to VJ's one, only timestamp cache is
121 held not per host, but per port pair and TW bucket is used as state
122 holder.
123
124 If TW bucket has been already destroyed we fall back to VJ's scheme
125 and use initial timestamp retrieved from peer table.
126 */
127 if (tcptw->tw_ts_recent_stamp &&
128 (twp == NULL || (sysctl_tcp_tw_reuse &&
9d729f72 129 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
130 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
131 if (tp->write_seq == 0)
132 tp->write_seq = 1;
133 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
134 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
135 sock_hold(sktw);
136 return 1;
137 }
138
139 return 0;
140}
141
142EXPORT_SYMBOL_GPL(tcp_twsk_unique);
143
1da177e4
LT
144/* This will initiate an outgoing connection. */
145int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
146{
147 struct inet_sock *inet = inet_sk(sk);
148 struct tcp_sock *tp = tcp_sk(sk);
149 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
150 struct rtable *rt;
bada8adc 151 __be32 daddr, nexthop;
1da177e4
LT
152 int tmp;
153 int err;
154
155 if (addr_len < sizeof(struct sockaddr_in))
156 return -EINVAL;
157
158 if (usin->sin_family != AF_INET)
159 return -EAFNOSUPPORT;
160
161 nexthop = daddr = usin->sin_addr.s_addr;
162 if (inet->opt && inet->opt->srr) {
163 if (!daddr)
164 return -EINVAL;
165 nexthop = inet->opt->faddr;
166 }
167
168 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
169 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
170 IPPROTO_TCP,
8eb9086f 171 inet->sport, usin->sin_port, sk, 1);
584bdf8c
WD
172 if (tmp < 0) {
173 if (tmp == -ENETUNREACH)
7c73a6fa 174 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
1da177e4 175 return tmp;
584bdf8c 176 }
1da177e4
LT
177
178 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
179 ip_rt_put(rt);
180 return -ENETUNREACH;
181 }
182
183 if (!inet->opt || !inet->opt->srr)
184 daddr = rt->rt_dst;
185
186 if (!inet->saddr)
187 inet->saddr = rt->rt_src;
188 inet->rcv_saddr = inet->saddr;
189
190 if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
191 /* Reset inherited state */
192 tp->rx_opt.ts_recent = 0;
193 tp->rx_opt.ts_recent_stamp = 0;
194 tp->write_seq = 0;
195 }
196
295ff7ed 197 if (tcp_death_row.sysctl_tw_recycle &&
1da177e4
LT
198 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
199 struct inet_peer *peer = rt_get_peer(rt);
7174259e
ACM
200 /*
201 * VJ's idea. We save last timestamp seen from
202 * the destination in peer table, when entering state
203 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
204 * when trying new connection.
1da177e4 205 */
7174259e 206 if (peer != NULL &&
9d729f72 207 peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) {
1da177e4
LT
208 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
209 tp->rx_opt.ts_recent = peer->tcp_ts;
210 }
211 }
212
213 inet->dport = usin->sin_port;
214 inet->daddr = daddr;
215
d83d8461 216 inet_csk(sk)->icsk_ext_hdr_len = 0;
1da177e4 217 if (inet->opt)
d83d8461 218 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
1da177e4
LT
219
220 tp->rx_opt.mss_clamp = 536;
221
222 /* Socket identity is still unknown (sport may be zero).
223 * However we set state to SYN-SENT and not releasing socket
224 * lock select source port, enter ourselves into the hash tables and
225 * complete initialization after this.
226 */
227 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 228 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
229 if (err)
230 goto failure;
231
7174259e
ACM
232 err = ip_route_newports(&rt, IPPROTO_TCP,
233 inet->sport, inet->dport, sk);
1da177e4
LT
234 if (err)
235 goto failure;
236
237 /* OK, now commit destination to socket. */
bcd76111 238 sk->sk_gso_type = SKB_GSO_TCPV4;
6cbb0df7 239 sk_setup_caps(sk, &rt->u.dst);
1da177e4
LT
240
241 if (!tp->write_seq)
242 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
243 inet->daddr,
244 inet->sport,
245 usin->sin_port);
246
247 inet->id = tp->write_seq ^ jiffies;
248
249 err = tcp_connect(sk);
250 rt = NULL;
251 if (err)
252 goto failure;
253
254 return 0;
255
256failure:
7174259e
ACM
257 /*
258 * This unhashes the socket and releases the local port,
259 * if necessary.
260 */
1da177e4
LT
261 tcp_set_state(sk, TCP_CLOSE);
262 ip_rt_put(rt);
263 sk->sk_route_caps = 0;
264 inet->dport = 0;
265 return err;
266}
267
1da177e4
LT
268/*
269 * This routine does path mtu discovery as defined in RFC1191.
270 */
40efc6fa 271static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
1da177e4
LT
272{
273 struct dst_entry *dst;
274 struct inet_sock *inet = inet_sk(sk);
1da177e4
LT
275
276 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
277 * send out by Linux are always <576bytes so they should go through
278 * unfragmented).
279 */
280 if (sk->sk_state == TCP_LISTEN)
281 return;
282
283 /* We don't check in the destentry if pmtu discovery is forbidden
284 * on this route. We just assume that no packet_to_big packets
285 * are send back when pmtu discovery is not active.
e905a9ed 286 * There is a small race when the user changes this flag in the
1da177e4
LT
287 * route, but I think that's acceptable.
288 */
289 if ((dst = __sk_dst_check(sk, 0)) == NULL)
290 return;
291
292 dst->ops->update_pmtu(dst, mtu);
293
294 /* Something is about to be wrong... Remember soft error
295 * for the case, if this connection will not able to recover.
296 */
297 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
298 sk->sk_err_soft = EMSGSIZE;
299
300 mtu = dst_mtu(dst);
301
302 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
d83d8461 303 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
304 tcp_sync_mss(sk, mtu);
305
306 /* Resend the TCP packet because it's
307 * clear that the old packet has been
308 * dropped. This is the new "fast" path mtu
309 * discovery.
310 */
311 tcp_simple_retransmit(sk);
312 } /* else let the usual retransmit timer handle it */
313}
314
315/*
316 * This routine is called by the ICMP module when it gets some
317 * sort of error condition. If err < 0 then the socket should
318 * be closed and the error returned to the user. If err > 0
319 * it's just the icmp type << 8 | icmp code. After adjustment
320 * header points to the first 8 bytes of the tcp header. We need
321 * to find the appropriate port.
322 *
323 * The locking strategy used here is very "optimistic". When
324 * someone else accesses the socket the ICMP is just dropped
325 * and for some paths there is no check at all.
326 * A more general error queue to queue errors for later handling
327 * is probably better.
328 *
329 */
330
331void tcp_v4_err(struct sk_buff *skb, u32 info)
332{
333 struct iphdr *iph = (struct iphdr *)skb->data;
334 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
335 struct tcp_sock *tp;
336 struct inet_sock *inet;
88c7664f
ACM
337 const int type = icmp_hdr(skb)->type;
338 const int code = icmp_hdr(skb)->code;
1da177e4
LT
339 struct sock *sk;
340 __u32 seq;
341 int err;
fd54d716 342 struct net *net = dev_net(skb->dev);
1da177e4
LT
343
344 if (skb->len < (iph->ihl << 2) + 8) {
dcfc23ca 345 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
346 return;
347 }
348
fd54d716 349 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
c67499c0 350 iph->saddr, th->source, inet_iif(skb));
1da177e4 351 if (!sk) {
dcfc23ca 352 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
353 return;
354 }
355 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 356 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
357 return;
358 }
359
360 bh_lock_sock(sk);
361 /* If too many ICMPs get dropped on busy
362 * servers this needs to be solved differently.
363 */
364 if (sock_owned_by_user(sk))
de0744af 365 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
366
367 if (sk->sk_state == TCP_CLOSE)
368 goto out;
369
370 tp = tcp_sk(sk);
371 seq = ntohl(th->seq);
372 if (sk->sk_state != TCP_LISTEN &&
373 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 374 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
375 goto out;
376 }
377
378 switch (type) {
379 case ICMP_SOURCE_QUENCH:
380 /* Just silently ignore these. */
381 goto out;
382 case ICMP_PARAMETERPROB:
383 err = EPROTO;
384 break;
385 case ICMP_DEST_UNREACH:
386 if (code > NR_ICMP_UNREACH)
387 goto out;
388
389 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
390 if (!sock_owned_by_user(sk))
391 do_pmtu_discovery(sk, iph, info);
392 goto out;
393 }
394
395 err = icmp_err_convert[code].errno;
396 break;
397 case ICMP_TIME_EXCEEDED:
398 err = EHOSTUNREACH;
399 break;
400 default:
401 goto out;
402 }
403
404 switch (sk->sk_state) {
60236fdd 405 struct request_sock *req, **prev;
1da177e4
LT
406 case TCP_LISTEN:
407 if (sock_owned_by_user(sk))
408 goto out;
409
463c84b9
ACM
410 req = inet_csk_search_req(sk, &prev, th->dest,
411 iph->daddr, iph->saddr);
1da177e4
LT
412 if (!req)
413 goto out;
414
415 /* ICMPs are not backlogged, hence we cannot get
416 an established socket here.
417 */
547b792c 418 WARN_ON(req->sk);
1da177e4 419
2e6599cb 420 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 421 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
422 goto out;
423 }
424
425 /*
426 * Still in SYN_RECV, just remove it silently.
427 * There is no good way to pass the error to the newly
428 * created socket, and POSIX does not want network
429 * errors returned from accept().
430 */
463c84b9 431 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
432 goto out;
433
434 case TCP_SYN_SENT:
435 case TCP_SYN_RECV: /* Cannot happen.
436 It can f.e. if SYNs crossed.
437 */
438 if (!sock_owned_by_user(sk)) {
1da177e4
LT
439 sk->sk_err = err;
440
441 sk->sk_error_report(sk);
442
443 tcp_done(sk);
444 } else {
445 sk->sk_err_soft = err;
446 }
447 goto out;
448 }
449
450 /* If we've already connected we will keep trying
451 * until we time out, or the user gives up.
452 *
453 * rfc1122 4.2.3.9 allows to consider as hard errors
454 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
455 * but it is obsoleted by pmtu discovery).
456 *
457 * Note, that in modern internet, where routing is unreliable
458 * and in each dark corner broken firewalls sit, sending random
459 * errors ordered by their masters even this two messages finally lose
460 * their original sense (even Linux sends invalid PORT_UNREACHs)
461 *
462 * Now we are in compliance with RFCs.
463 * --ANK (980905)
464 */
465
466 inet = inet_sk(sk);
467 if (!sock_owned_by_user(sk) && inet->recverr) {
468 sk->sk_err = err;
469 sk->sk_error_report(sk);
470 } else { /* Only an error on timeout */
471 sk->sk_err_soft = err;
472 }
473
474out:
475 bh_unlock_sock(sk);
476 sock_put(sk);
477}
478
479/* This routine computes an IPv4 TCP checksum. */
8292a17a 480void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
1da177e4
LT
481{
482 struct inet_sock *inet = inet_sk(sk);
aa8223c7 483 struct tcphdr *th = tcp_hdr(skb);
1da177e4 484
84fa7933 485 if (skb->ip_summed == CHECKSUM_PARTIAL) {
ba7808ea
FD
486 th->check = ~tcp_v4_check(len, inet->saddr,
487 inet->daddr, 0);
663ead3b 488 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 489 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 490 } else {
ba7808ea 491 th->check = tcp_v4_check(len, inet->saddr, inet->daddr,
07f0757a 492 csum_partial(th,
1da177e4
LT
493 th->doff << 2,
494 skb->csum));
495 }
496}
497
a430a43d
HX
498int tcp_v4_gso_send_check(struct sk_buff *skb)
499{
eddc9ec5 500 const struct iphdr *iph;
a430a43d
HX
501 struct tcphdr *th;
502
503 if (!pskb_may_pull(skb, sizeof(*th)))
504 return -EINVAL;
505
eddc9ec5 506 iph = ip_hdr(skb);
aa8223c7 507 th = tcp_hdr(skb);
a430a43d
HX
508
509 th->check = 0;
ba7808ea 510 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
663ead3b 511 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 512 skb->csum_offset = offsetof(struct tcphdr, check);
84fa7933 513 skb->ip_summed = CHECKSUM_PARTIAL;
a430a43d
HX
514 return 0;
515}
516
1da177e4
LT
517/*
518 * This routine will send an RST to the other tcp.
519 *
520 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
521 * for reset.
522 * Answer: if a packet caused RST, it is not for a socket
523 * existing in our system, if it is matched to a socket,
524 * it is just duplicate segment or bug in other side's TCP.
525 * So that we build reply only basing on parameters
526 * arrived with segment.
527 * Exception: precedence violation. We do not implement it in any case.
528 */
529
cfb6eeb4 530static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 531{
aa8223c7 532 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
533 struct {
534 struct tcphdr th;
535#ifdef CONFIG_TCP_MD5SIG
714e85be 536 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
537#endif
538 } rep;
1da177e4 539 struct ip_reply_arg arg;
cfb6eeb4
YH
540#ifdef CONFIG_TCP_MD5SIG
541 struct tcp_md5sig_key *key;
542#endif
a86b1e30 543 struct net *net;
1da177e4
LT
544
545 /* Never send a reset in response to a reset. */
546 if (th->rst)
547 return;
548
ee6b9673 549 if (skb->rtable->rt_type != RTN_LOCAL)
1da177e4
LT
550 return;
551
552 /* Swap the send and the receive. */
cfb6eeb4
YH
553 memset(&rep, 0, sizeof(rep));
554 rep.th.dest = th->source;
555 rep.th.source = th->dest;
556 rep.th.doff = sizeof(struct tcphdr) / 4;
557 rep.th.rst = 1;
1da177e4
LT
558
559 if (th->ack) {
cfb6eeb4 560 rep.th.seq = th->ack_seq;
1da177e4 561 } else {
cfb6eeb4
YH
562 rep.th.ack = 1;
563 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
564 skb->len - (th->doff << 2));
1da177e4
LT
565 }
566
7174259e 567 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
568 arg.iov[0].iov_base = (unsigned char *)&rep;
569 arg.iov[0].iov_len = sizeof(rep.th);
570
571#ifdef CONFIG_TCP_MD5SIG
eddc9ec5 572 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
cfb6eeb4
YH
573 if (key) {
574 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
575 (TCPOPT_NOP << 16) |
576 (TCPOPT_MD5SIG << 8) |
577 TCPOLEN_MD5SIG);
578 /* Update length and the length the header thinks exists */
579 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
580 rep.th.doff = arg.iov[0].iov_len / 4;
581
49a72dfb 582 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
78e645cb
IJ
583 key, ip_hdr(skb)->saddr,
584 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
585 }
586#endif
eddc9ec5
ACM
587 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
588 ip_hdr(skb)->saddr, /* XXX */
52cd5750 589 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1da177e4 590 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
88ef4a5a 591 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
1da177e4 592
a86b1e30
PE
593 net = dev_net(skb->dst->dev);
594 ip_send_reply(net->ipv4.tcp_sock, skb,
7feb49c8 595 &arg, arg.iov[0].iov_len);
1da177e4 596
63231bdd
PE
597 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
598 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1da177e4
LT
599}
600
601/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
602 outside socket context is ugly, certainly. What can I do?
603 */
604
9501f972
YH
605static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
606 u32 win, u32 ts, int oif,
88ef4a5a
KK
607 struct tcp_md5sig_key *key,
608 int reply_flags)
1da177e4 609{
aa8223c7 610 struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
611 struct {
612 struct tcphdr th;
714e85be 613 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 614#ifdef CONFIG_TCP_MD5SIG
714e85be 615 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
616#endif
617 ];
1da177e4
LT
618 } rep;
619 struct ip_reply_arg arg;
4dd7972d 620 struct net *net = dev_net(skb->dst->dev);
1da177e4
LT
621
622 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 623 memset(&arg, 0, sizeof(arg));
1da177e4
LT
624
625 arg.iov[0].iov_base = (unsigned char *)&rep;
626 arg.iov[0].iov_len = sizeof(rep.th);
627 if (ts) {
cfb6eeb4
YH
628 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
629 (TCPOPT_TIMESTAMP << 8) |
630 TCPOLEN_TIMESTAMP);
631 rep.opt[1] = htonl(tcp_time_stamp);
632 rep.opt[2] = htonl(ts);
cb48cfe8 633 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
634 }
635
636 /* Swap the send and the receive. */
637 rep.th.dest = th->source;
638 rep.th.source = th->dest;
639 rep.th.doff = arg.iov[0].iov_len / 4;
640 rep.th.seq = htonl(seq);
641 rep.th.ack_seq = htonl(ack);
642 rep.th.ack = 1;
643 rep.th.window = htons(win);
644
cfb6eeb4 645#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
646 if (key) {
647 int offset = (ts) ? 3 : 0;
648
649 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
650 (TCPOPT_NOP << 16) |
651 (TCPOPT_MD5SIG << 8) |
652 TCPOLEN_MD5SIG);
653 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
654 rep.th.doff = arg.iov[0].iov_len/4;
655
49a72dfb 656 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
90b7e112
AL
657 key, ip_hdr(skb)->saddr,
658 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
659 }
660#endif
88ef4a5a 661 arg.flags = reply_flags;
eddc9ec5
ACM
662 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
663 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
664 arg.iov[0].iov_len, IPPROTO_TCP, 0);
665 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
666 if (oif)
667 arg.bound_dev_if = oif;
1da177e4 668
a86b1e30 669 ip_send_reply(net->ipv4.tcp_sock, skb,
7feb49c8 670 &arg, arg.iov[0].iov_len);
1da177e4 671
63231bdd 672 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1da177e4
LT
673}
674
675static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
676{
8feaf0c0 677 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 678 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 679
9501f972 680 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 681 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972
YH
682 tcptw->tw_ts_recent,
683 tw->tw_bound_dev_if,
88ef4a5a
KK
684 tcp_twsk_md5_key(tcptw),
685 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
9501f972 686 );
1da177e4 687
8feaf0c0 688 inet_twsk_put(tw);
1da177e4
LT
689}
690
6edafaaf 691static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
7174259e 692 struct request_sock *req)
1da177e4 693{
9501f972 694 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
cfb6eeb4 695 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
9501f972
YH
696 req->ts_recent,
697 0,
88ef4a5a
KK
698 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
699 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
1da177e4
LT
700}
701
1da177e4 702/*
9bf1d83e 703 * Send a SYN-ACK after having received a SYN.
60236fdd 704 * This still operates on a request_sock only, not on a big
1da177e4
LT
705 * socket.
706 */
fd80eb94
DL
707static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
708 struct dst_entry *dst)
1da177e4 709{
2e6599cb 710 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
711 int err = -1;
712 struct sk_buff * skb;
713
714 /* First, grab a route. */
463c84b9 715 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
fd80eb94 716 return -1;
1da177e4
LT
717
718 skb = tcp_make_synack(sk, dst, req);
719
720 if (skb) {
aa8223c7 721 struct tcphdr *th = tcp_hdr(skb);
1da177e4 722
ba7808ea 723 th->check = tcp_v4_check(skb->len,
2e6599cb
ACM
724 ireq->loc_addr,
725 ireq->rmt_addr,
07f0757a 726 csum_partial(th, skb->len,
1da177e4
LT
727 skb->csum));
728
2e6599cb
ACM
729 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
730 ireq->rmt_addr,
731 ireq->opt);
b9df3cb8 732 err = net_xmit_eval(err);
1da177e4
LT
733 }
734
1da177e4
LT
735 dst_release(dst);
736 return err;
737}
738
fd80eb94
DL
739static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req)
740{
741 return __tcp_v4_send_synack(sk, req, NULL);
742}
743
1da177e4 744/*
60236fdd 745 * IPv4 request_sock destructor.
1da177e4 746 */
60236fdd 747static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 748{
a51482bd 749 kfree(inet_rsk(req)->opt);
1da177e4
LT
750}
751
80e40daa 752#ifdef CONFIG_SYN_COOKIES
40efc6fa 753static void syn_flood_warning(struct sk_buff *skb)
1da177e4
LT
754{
755 static unsigned long warntime;
756
757 if (time_after(jiffies, (warntime + HZ * 60))) {
758 warntime = jiffies;
759 printk(KERN_INFO
760 "possible SYN flooding on port %d. Sending cookies.\n",
aa8223c7 761 ntohs(tcp_hdr(skb)->dest));
1da177e4
LT
762 }
763}
80e40daa 764#endif
1da177e4
LT
765
766/*
60236fdd 767 * Save and compile IPv4 options into the request_sock if needed.
1da177e4 768 */
40efc6fa
SH
769static struct ip_options *tcp_v4_save_options(struct sock *sk,
770 struct sk_buff *skb)
1da177e4
LT
771{
772 struct ip_options *opt = &(IPCB(skb)->opt);
773 struct ip_options *dopt = NULL;
774
775 if (opt && opt->optlen) {
776 int opt_size = optlength(opt);
777 dopt = kmalloc(opt_size, GFP_ATOMIC);
778 if (dopt) {
779 if (ip_options_echo(dopt, skb)) {
780 kfree(dopt);
781 dopt = NULL;
782 }
783 }
784 }
785 return dopt;
786}
787
cfb6eeb4
YH
788#ifdef CONFIG_TCP_MD5SIG
789/*
790 * RFC2385 MD5 checksumming requires a mapping of
791 * IP address->MD5 Key.
792 * We need to maintain these in the sk structure.
793 */
794
795/* Find the Key structure for an address. */
7174259e
ACM
796static struct tcp_md5sig_key *
797 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
cfb6eeb4
YH
798{
799 struct tcp_sock *tp = tcp_sk(sk);
800 int i;
801
802 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
803 return NULL;
804 for (i = 0; i < tp->md5sig_info->entries4; i++) {
805 if (tp->md5sig_info->keys4[i].addr == addr)
f8ab18d2 806 return &tp->md5sig_info->keys4[i].base;
cfb6eeb4
YH
807 }
808 return NULL;
809}
810
811struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
812 struct sock *addr_sk)
813{
814 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr);
815}
816
817EXPORT_SYMBOL(tcp_v4_md5_lookup);
818
f5b99bcd
AB
819static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
820 struct request_sock *req)
cfb6eeb4
YH
821{
822 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
823}
824
825/* This can be called on a newly created socket, from other files */
826int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
827 u8 *newkey, u8 newkeylen)
828{
829 /* Add Key to the list */
b0a713e9 830 struct tcp_md5sig_key *key;
cfb6eeb4
YH
831 struct tcp_sock *tp = tcp_sk(sk);
832 struct tcp4_md5sig_key *keys;
833
b0a713e9 834 key = tcp_v4_md5_do_lookup(sk, addr);
cfb6eeb4
YH
835 if (key) {
836 /* Pre-existing entry - just update that one. */
b0a713e9
MD
837 kfree(key->key);
838 key->key = newkey;
839 key->keylen = newkeylen;
cfb6eeb4 840 } else {
f6685938
ACM
841 struct tcp_md5sig_info *md5sig;
842
cfb6eeb4 843 if (!tp->md5sig_info) {
f6685938
ACM
844 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
845 GFP_ATOMIC);
cfb6eeb4
YH
846 if (!tp->md5sig_info) {
847 kfree(newkey);
848 return -ENOMEM;
849 }
3d7dbeac 850 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
851 }
852 if (tcp_alloc_md5sig_pool() == NULL) {
853 kfree(newkey);
854 return -ENOMEM;
855 }
f6685938
ACM
856 md5sig = tp->md5sig_info;
857
858 if (md5sig->alloced4 == md5sig->entries4) {
859 keys = kmalloc((sizeof(*keys) *
e905a9ed 860 (md5sig->entries4 + 1)), GFP_ATOMIC);
cfb6eeb4
YH
861 if (!keys) {
862 kfree(newkey);
863 tcp_free_md5sig_pool();
864 return -ENOMEM;
865 }
866
f6685938
ACM
867 if (md5sig->entries4)
868 memcpy(keys, md5sig->keys4,
869 sizeof(*keys) * md5sig->entries4);
cfb6eeb4
YH
870
871 /* Free old key list, and reference new one */
a80cc20d 872 kfree(md5sig->keys4);
f6685938
ACM
873 md5sig->keys4 = keys;
874 md5sig->alloced4++;
cfb6eeb4 875 }
f6685938 876 md5sig->entries4++;
f8ab18d2
DM
877 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
878 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
879 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
cfb6eeb4
YH
880 }
881 return 0;
882}
883
884EXPORT_SYMBOL(tcp_v4_md5_do_add);
885
886static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
887 u8 *newkey, u8 newkeylen)
888{
889 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr,
890 newkey, newkeylen);
891}
892
893int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
894{
895 struct tcp_sock *tp = tcp_sk(sk);
896 int i;
897
898 for (i = 0; i < tp->md5sig_info->entries4; i++) {
899 if (tp->md5sig_info->keys4[i].addr == addr) {
900 /* Free the key */
f8ab18d2 901 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
902 tp->md5sig_info->entries4--;
903
904 if (tp->md5sig_info->entries4 == 0) {
905 kfree(tp->md5sig_info->keys4);
906 tp->md5sig_info->keys4 = NULL;
8228a18d 907 tp->md5sig_info->alloced4 = 0;
7174259e 908 } else if (tp->md5sig_info->entries4 != i) {
cfb6eeb4 909 /* Need to do some manipulation */
354faf09
YH
910 memmove(&tp->md5sig_info->keys4[i],
911 &tp->md5sig_info->keys4[i+1],
912 (tp->md5sig_info->entries4 - i) *
913 sizeof(struct tcp4_md5sig_key));
cfb6eeb4
YH
914 }
915 tcp_free_md5sig_pool();
916 return 0;
917 }
918 }
919 return -ENOENT;
920}
921
922EXPORT_SYMBOL(tcp_v4_md5_do_del);
923
7174259e 924static void tcp_v4_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
925{
926 struct tcp_sock *tp = tcp_sk(sk);
927
928 /* Free each key, then the set of key keys,
929 * the crypto element, and then decrement our
930 * hold on the last resort crypto.
931 */
932 if (tp->md5sig_info->entries4) {
933 int i;
934 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 935 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
936 tp->md5sig_info->entries4 = 0;
937 tcp_free_md5sig_pool();
938 }
939 if (tp->md5sig_info->keys4) {
940 kfree(tp->md5sig_info->keys4);
941 tp->md5sig_info->keys4 = NULL;
942 tp->md5sig_info->alloced4 = 0;
943 }
944}
945
7174259e
ACM
946static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
947 int optlen)
cfb6eeb4
YH
948{
949 struct tcp_md5sig cmd;
950 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
951 u8 *newkey;
952
953 if (optlen < sizeof(cmd))
954 return -EINVAL;
955
7174259e 956 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
957 return -EFAULT;
958
959 if (sin->sin_family != AF_INET)
960 return -EINVAL;
961
962 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
963 if (!tcp_sk(sk)->md5sig_info)
964 return -ENOENT;
965 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
966 }
967
968 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
969 return -EINVAL;
970
971 if (!tcp_sk(sk)->md5sig_info) {
972 struct tcp_sock *tp = tcp_sk(sk);
7174259e 973 struct tcp_md5sig_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
cfb6eeb4 974
cfb6eeb4
YH
975 if (!p)
976 return -EINVAL;
977
978 tp->md5sig_info = p;
3d7dbeac 979 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
980 }
981
f6685938 982 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
983 if (!newkey)
984 return -ENOMEM;
cfb6eeb4
YH
985 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
986 newkey, cmd.tcpm_keylen);
987}
988
49a72dfb
AL
989static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
990 __be32 daddr, __be32 saddr, int nbytes)
cfb6eeb4 991{
cfb6eeb4 992 struct tcp4_pseudohdr *bp;
49a72dfb 993 struct scatterlist sg;
cfb6eeb4
YH
994
995 bp = &hp->md5_blk.ip4;
cfb6eeb4
YH
996
997 /*
49a72dfb 998 * 1. the TCP pseudo-header (in the order: source IP address,
cfb6eeb4
YH
999 * destination IP address, zero-padded protocol number, and
1000 * segment length)
1001 */
1002 bp->saddr = saddr;
1003 bp->daddr = daddr;
1004 bp->pad = 0;
076fb722 1005 bp->protocol = IPPROTO_TCP;
49a72dfb 1006 bp->len = cpu_to_be16(nbytes);
c7da57a1 1007
49a72dfb
AL
1008 sg_init_one(&sg, bp, sizeof(*bp));
1009 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1010}
1011
1012static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1013 __be32 daddr, __be32 saddr, struct tcphdr *th)
1014{
1015 struct tcp_md5sig_pool *hp;
1016 struct hash_desc *desc;
1017
1018 hp = tcp_get_md5sig_pool();
1019 if (!hp)
1020 goto clear_hash_noput;
1021 desc = &hp->md5_desc;
1022
1023 if (crypto_hash_init(desc))
1024 goto clear_hash;
1025 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1026 goto clear_hash;
1027 if (tcp_md5_hash_header(hp, th))
1028 goto clear_hash;
1029 if (tcp_md5_hash_key(hp, key))
1030 goto clear_hash;
1031 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4
YH
1032 goto clear_hash;
1033
cfb6eeb4 1034 tcp_put_md5sig_pool();
cfb6eeb4 1035 return 0;
49a72dfb 1036
cfb6eeb4
YH
1037clear_hash:
1038 tcp_put_md5sig_pool();
1039clear_hash_noput:
1040 memset(md5_hash, 0, 16);
49a72dfb 1041 return 1;
cfb6eeb4
YH
1042}
1043
49a72dfb
AL
1044int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1045 struct sock *sk, struct request_sock *req,
1046 struct sk_buff *skb)
cfb6eeb4 1047{
49a72dfb
AL
1048 struct tcp_md5sig_pool *hp;
1049 struct hash_desc *desc;
1050 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
1051 __be32 saddr, daddr;
1052
1053 if (sk) {
1054 saddr = inet_sk(sk)->saddr;
1055 daddr = inet_sk(sk)->daddr;
49a72dfb
AL
1056 } else if (req) {
1057 saddr = inet_rsk(req)->loc_addr;
1058 daddr = inet_rsk(req)->rmt_addr;
cfb6eeb4 1059 } else {
49a72dfb
AL
1060 const struct iphdr *iph = ip_hdr(skb);
1061 saddr = iph->saddr;
1062 daddr = iph->daddr;
cfb6eeb4 1063 }
49a72dfb
AL
1064
1065 hp = tcp_get_md5sig_pool();
1066 if (!hp)
1067 goto clear_hash_noput;
1068 desc = &hp->md5_desc;
1069
1070 if (crypto_hash_init(desc))
1071 goto clear_hash;
1072
1073 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1074 goto clear_hash;
1075 if (tcp_md5_hash_header(hp, th))
1076 goto clear_hash;
1077 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1078 goto clear_hash;
1079 if (tcp_md5_hash_key(hp, key))
1080 goto clear_hash;
1081 if (crypto_hash_final(desc, md5_hash))
1082 goto clear_hash;
1083
1084 tcp_put_md5sig_pool();
1085 return 0;
1086
1087clear_hash:
1088 tcp_put_md5sig_pool();
1089clear_hash_noput:
1090 memset(md5_hash, 0, 16);
1091 return 1;
cfb6eeb4
YH
1092}
1093
49a72dfb 1094EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
cfb6eeb4 1095
7174259e 1096static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
cfb6eeb4
YH
1097{
1098 /*
1099 * This gets called for each TCP segment that arrives
1100 * so we want to be efficient.
1101 * We have 3 drop cases:
1102 * o No MD5 hash and one expected.
1103 * o MD5 hash and we're not expecting one.
1104 * o MD5 hash and its wrong.
1105 */
1106 __u8 *hash_location = NULL;
1107 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1108 const struct iphdr *iph = ip_hdr(skb);
aa8223c7 1109 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1110 int genhash;
cfb6eeb4
YH
1111 unsigned char newhash[16];
1112
1113 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
7d5d5525 1114 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1115
cfb6eeb4
YH
1116 /* We've parsed the options - do we have a hash? */
1117 if (!hash_expected && !hash_location)
1118 return 0;
1119
1120 if (hash_expected && !hash_location) {
785957d3 1121 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
1122 return 1;
1123 }
1124
1125 if (!hash_expected && hash_location) {
785957d3 1126 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
1127 return 1;
1128 }
1129
1130 /* Okay, so this is hash_expected and hash_location -
1131 * so we need to calculate the checksum.
1132 */
49a72dfb
AL
1133 genhash = tcp_v4_md5_hash_skb(newhash,
1134 hash_expected,
1135 NULL, NULL, skb);
cfb6eeb4
YH
1136
1137 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1138 if (net_ratelimit()) {
673d57e7
HH
1139 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1140 &iph->saddr, ntohs(th->source),
1141 &iph->daddr, ntohs(th->dest),
cfb6eeb4 1142 genhash ? " tcp_v4_calc_md5_hash failed" : "");
cfb6eeb4
YH
1143 }
1144 return 1;
1145 }
1146 return 0;
1147}
1148
1149#endif
1150
72a3effa 1151struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1152 .family = PF_INET,
2e6599cb 1153 .obj_size = sizeof(struct tcp_request_sock),
1da177e4 1154 .rtx_syn_ack = tcp_v4_send_synack,
60236fdd
ACM
1155 .send_ack = tcp_v4_reqsk_send_ack,
1156 .destructor = tcp_v4_reqsk_destructor,
1da177e4
LT
1157 .send_reset = tcp_v4_send_reset,
1158};
1159
cfb6eeb4 1160#ifdef CONFIG_TCP_MD5SIG
b6332e6c 1161static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
cfb6eeb4 1162 .md5_lookup = tcp_v4_reqsk_md5_lookup,
cfb6eeb4 1163};
b6332e6c 1164#endif
cfb6eeb4 1165
6d6ee43e
ACM
1166static struct timewait_sock_ops tcp_timewait_sock_ops = {
1167 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1168 .twsk_unique = tcp_twsk_unique,
cfb6eeb4 1169 .twsk_destructor= tcp_twsk_destructor,
6d6ee43e
ACM
1170};
1171
1da177e4
LT
1172int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1173{
2e6599cb 1174 struct inet_request_sock *ireq;
1da177e4 1175 struct tcp_options_received tmp_opt;
60236fdd 1176 struct request_sock *req;
eddc9ec5
ACM
1177 __be32 saddr = ip_hdr(skb)->saddr;
1178 __be32 daddr = ip_hdr(skb)->daddr;
1da177e4
LT
1179 __u32 isn = TCP_SKB_CB(skb)->when;
1180 struct dst_entry *dst = NULL;
1181#ifdef CONFIG_SYN_COOKIES
1182 int want_cookie = 0;
1183#else
1184#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1185#endif
1186
1187 /* Never answer to SYNs send to broadcast or multicast */
ee6b9673 1188 if (skb->rtable->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1189 goto drop;
1190
1191 /* TW buckets are converted to open requests without
1192 * limitations, they conserve resources and peer is
1193 * evidently real one.
1194 */
463c84b9 1195 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4
LT
1196#ifdef CONFIG_SYN_COOKIES
1197 if (sysctl_tcp_syncookies) {
1198 want_cookie = 1;
1199 } else
1200#endif
1201 goto drop;
1202 }
1203
1204 /* Accept backlog is full. If we have already queued enough
1205 * of warm entries in syn queue, drop request. It is better than
1206 * clogging syn queue with openreqs with exponentially increasing
1207 * timeout.
1208 */
463c84b9 1209 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1210 goto drop;
1211
ce4a7d0d 1212 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1da177e4
LT
1213 if (!req)
1214 goto drop;
1215
cfb6eeb4
YH
1216#ifdef CONFIG_TCP_MD5SIG
1217 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1218#endif
1219
1da177e4
LT
1220 tcp_clear_options(&tmp_opt);
1221 tmp_opt.mss_clamp = 536;
1222 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
1223
1224 tcp_parse_options(skb, &tmp_opt, 0);
1225
4dfc2817 1226 if (want_cookie && !tmp_opt.saw_tstamp)
1da177e4 1227 tcp_clear_options(&tmp_opt);
1da177e4
LT
1228
1229 if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
1230 /* Some OSes (unknown ones, but I see them on web server, which
1231 * contains information interesting only for windows'
1232 * users) do not send their stamp in SYN. It is easy case.
1233 * We simply do not advertise TS support.
1234 */
1235 tmp_opt.saw_tstamp = 0;
1236 tmp_opt.tstamp_ok = 0;
1237 }
1238 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1239
1240 tcp_openreq_init(req, &tmp_opt, skb);
1241
4237c75c
VY
1242 if (security_inet_conn_request(sk, skb, req))
1243 goto drop_and_free;
1244
2e6599cb
ACM
1245 ireq = inet_rsk(req);
1246 ireq->loc_addr = daddr;
1247 ireq->rmt_addr = saddr;
88ef4a5a 1248 ireq->no_srccheck = inet_sk(sk)->transparent;
2e6599cb 1249 ireq->opt = tcp_v4_save_options(sk, skb);
1da177e4 1250 if (!want_cookie)
aa8223c7 1251 TCP_ECN_create_request(req, tcp_hdr(skb));
1da177e4
LT
1252
1253 if (want_cookie) {
1254#ifdef CONFIG_SYN_COOKIES
1255 syn_flood_warning(skb);
4dfc2817 1256 req->cookie_ts = tmp_opt.tstamp_ok;
1da177e4
LT
1257#endif
1258 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1259 } else if (!isn) {
1260 struct inet_peer *peer = NULL;
1261
1262 /* VJ's idea. We save last timestamp seen
1263 * from the destination in peer table, when entering
1264 * state TIME-WAIT, and check against it before
1265 * accepting new connection request.
1266 *
1267 * If "isn" is not zero, this request hit alive
1268 * timewait bucket, so that all the necessary checks
1269 * are made in the function processing timewait state.
1270 */
1271 if (tmp_opt.saw_tstamp &&
295ff7ed 1272 tcp_death_row.sysctl_tw_recycle &&
463c84b9 1273 (dst = inet_csk_route_req(sk, req)) != NULL &&
1da177e4
LT
1274 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1275 peer->v4daddr == saddr) {
9d729f72 1276 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1da177e4
LT
1277 (s32)(peer->tcp_ts - req->ts_recent) >
1278 TCP_PAWS_WINDOW) {
de0744af 1279 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
7cd04fa7 1280 goto drop_and_release;
1da177e4
LT
1281 }
1282 }
1283 /* Kill the following clause, if you dislike this way. */
1284 else if (!sysctl_tcp_syncookies &&
463c84b9 1285 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1da177e4
LT
1286 (sysctl_max_syn_backlog >> 2)) &&
1287 (!peer || !peer->tcp_ts_stamp) &&
1288 (!dst || !dst_metric(dst, RTAX_RTT))) {
1289 /* Without syncookies last quarter of
1290 * backlog is filled with destinations,
1291 * proven to be alive.
1292 * It means that we continue to communicate
1293 * to destinations, already remembered
1294 * to the moment of synflood.
1295 */
673d57e7
HH
1296 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1297 &saddr, ntohs(tcp_hdr(skb)->source));
7cd04fa7 1298 goto drop_and_release;
1da177e4
LT
1299 }
1300
a94f723d 1301 isn = tcp_v4_init_sequence(skb);
1da177e4 1302 }
2e6599cb 1303 tcp_rsk(req)->snt_isn = isn;
1da177e4 1304
7cd04fa7 1305 if (__tcp_v4_send_synack(sk, req, dst) || want_cookie)
1da177e4
LT
1306 goto drop_and_free;
1307
7cd04fa7 1308 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1da177e4
LT
1309 return 0;
1310
7cd04fa7
DL
1311drop_and_release:
1312 dst_release(dst);
1da177e4 1313drop_and_free:
60236fdd 1314 reqsk_free(req);
1da177e4 1315drop:
1da177e4
LT
1316 return 0;
1317}
1318
1319
1320/*
1321 * The three way handshake has completed - we got a valid synack -
1322 * now create the new socket.
1323 */
1324struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1325 struct request_sock *req,
1da177e4
LT
1326 struct dst_entry *dst)
1327{
2e6599cb 1328 struct inet_request_sock *ireq;
1da177e4
LT
1329 struct inet_sock *newinet;
1330 struct tcp_sock *newtp;
1331 struct sock *newsk;
cfb6eeb4
YH
1332#ifdef CONFIG_TCP_MD5SIG
1333 struct tcp_md5sig_key *key;
1334#endif
1da177e4
LT
1335
1336 if (sk_acceptq_is_full(sk))
1337 goto exit_overflow;
1338
463c84b9 1339 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1da177e4
LT
1340 goto exit;
1341
1342 newsk = tcp_create_openreq_child(sk, req, skb);
1343 if (!newsk)
1344 goto exit;
1345
bcd76111 1346 newsk->sk_gso_type = SKB_GSO_TCPV4;
6cbb0df7 1347 sk_setup_caps(newsk, dst);
1da177e4
LT
1348
1349 newtp = tcp_sk(newsk);
1350 newinet = inet_sk(newsk);
2e6599cb
ACM
1351 ireq = inet_rsk(req);
1352 newinet->daddr = ireq->rmt_addr;
1353 newinet->rcv_saddr = ireq->loc_addr;
1354 newinet->saddr = ireq->loc_addr;
1355 newinet->opt = ireq->opt;
1356 ireq->opt = NULL;
463c84b9 1357 newinet->mc_index = inet_iif(skb);
eddc9ec5 1358 newinet->mc_ttl = ip_hdr(skb)->ttl;
d83d8461 1359 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1360 if (newinet->opt)
d83d8461 1361 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
1da177e4
LT
1362 newinet->id = newtp->write_seq ^ jiffies;
1363
5d424d5a 1364 tcp_mtup_init(newsk);
1da177e4
LT
1365 tcp_sync_mss(newsk, dst_mtu(dst));
1366 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
f5fff5dc
TQ
1367 if (tcp_sk(sk)->rx_opt.user_mss &&
1368 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1369 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1370
1da177e4
LT
1371 tcp_initialize_rcv_mss(newsk);
1372
cfb6eeb4
YH
1373#ifdef CONFIG_TCP_MD5SIG
1374 /* Copy over the MD5 key from the original socket */
1375 if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) {
1376 /*
1377 * We're using one, so create a matching key
1378 * on the newsk structure. If we fail to get
1379 * memory, then we end up not copying the key
1380 * across. Shucks.
1381 */
f6685938
ACM
1382 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1383 if (newkey != NULL)
cfb6eeb4
YH
1384 tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr,
1385 newkey, key->keylen);
49a72dfb 1386 newsk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
1387 }
1388#endif
1389
ab1e0a13
ACM
1390 __inet_hash_nolisten(newsk);
1391 __inet_inherit_port(sk, newsk);
1da177e4
LT
1392
1393 return newsk;
1394
1395exit_overflow:
de0744af 1396 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1da177e4 1397exit:
de0744af 1398 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1399 dst_release(dst);
1400 return NULL;
1401}
1402
1403static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1404{
aa8223c7 1405 struct tcphdr *th = tcp_hdr(skb);
eddc9ec5 1406 const struct iphdr *iph = ip_hdr(skb);
1da177e4 1407 struct sock *nsk;
60236fdd 1408 struct request_sock **prev;
1da177e4 1409 /* Find possible connection requests. */
463c84b9
ACM
1410 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1411 iph->saddr, iph->daddr);
1da177e4
LT
1412 if (req)
1413 return tcp_check_req(sk, skb, req, prev);
1414
3b1e0a65 1415 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
c67499c0 1416 th->source, iph->daddr, th->dest, inet_iif(skb));
1da177e4
LT
1417
1418 if (nsk) {
1419 if (nsk->sk_state != TCP_TIME_WAIT) {
1420 bh_lock_sock(nsk);
1421 return nsk;
1422 }
9469c7b4 1423 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1424 return NULL;
1425 }
1426
1427#ifdef CONFIG_SYN_COOKIES
1428 if (!th->rst && !th->syn && th->ack)
1429 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1430#endif
1431 return sk;
1432}
1433
b51655b9 1434static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1da177e4 1435{
eddc9ec5
ACM
1436 const struct iphdr *iph = ip_hdr(skb);
1437
84fa7933 1438 if (skb->ip_summed == CHECKSUM_COMPLETE) {
eddc9ec5
ACM
1439 if (!tcp_v4_check(skb->len, iph->saddr,
1440 iph->daddr, skb->csum)) {
fb286bb2 1441 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1442 return 0;
fb286bb2 1443 }
1da177e4 1444 }
fb286bb2 1445
eddc9ec5 1446 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
fb286bb2
HX
1447 skb->len, IPPROTO_TCP, 0);
1448
1da177e4 1449 if (skb->len <= 76) {
fb286bb2 1450 return __skb_checksum_complete(skb);
1da177e4
LT
1451 }
1452 return 0;
1453}
1454
1455
1456/* The socket must have it's spinlock held when we get
1457 * here.
1458 *
1459 * We have a potential double-lock case here, so even when
1460 * doing backlog processing we use the BH locking scheme.
1461 * This is because we cannot sleep with the original spinlock
1462 * held.
1463 */
1464int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1465{
cfb6eeb4
YH
1466 struct sock *rsk;
1467#ifdef CONFIG_TCP_MD5SIG
1468 /*
1469 * We really want to reject the packet as early as possible
1470 * if:
1471 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1472 * o There is an MD5 option and we're not expecting one
1473 */
7174259e 1474 if (tcp_v4_inbound_md5_hash(sk, skb))
cfb6eeb4
YH
1475 goto discard;
1476#endif
1477
1da177e4
LT
1478 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1479 TCP_CHECK_TIMER(sk);
aa8223c7 1480 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1481 rsk = sk;
1da177e4 1482 goto reset;
cfb6eeb4 1483 }
1da177e4
LT
1484 TCP_CHECK_TIMER(sk);
1485 return 0;
1486 }
1487
ab6a5bb6 1488 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1489 goto csum_err;
1490
1491 if (sk->sk_state == TCP_LISTEN) {
1492 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1493 if (!nsk)
1494 goto discard;
1495
1496 if (nsk != sk) {
cfb6eeb4
YH
1497 if (tcp_child_process(sk, nsk, skb)) {
1498 rsk = nsk;
1da177e4 1499 goto reset;
cfb6eeb4 1500 }
1da177e4
LT
1501 return 0;
1502 }
1503 }
1504
1505 TCP_CHECK_TIMER(sk);
aa8223c7 1506 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1507 rsk = sk;
1da177e4 1508 goto reset;
cfb6eeb4 1509 }
1da177e4
LT
1510 TCP_CHECK_TIMER(sk);
1511 return 0;
1512
1513reset:
cfb6eeb4 1514 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1515discard:
1516 kfree_skb(skb);
1517 /* Be careful here. If this function gets more complicated and
1518 * gcc suffers from register pressure on the x86, sk (in %ebx)
1519 * might be destroyed here. This current version compiles correctly,
1520 * but you have been warned.
1521 */
1522 return 0;
1523
1524csum_err:
63231bdd 1525 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1526 goto discard;
1527}
1528
1529/*
1530 * From tcp_input.c
1531 */
1532
1533int tcp_v4_rcv(struct sk_buff *skb)
1534{
eddc9ec5 1535 const struct iphdr *iph;
1da177e4
LT
1536 struct tcphdr *th;
1537 struct sock *sk;
1538 int ret;
a86b1e30 1539 struct net *net = dev_net(skb->dev);
1da177e4
LT
1540
1541 if (skb->pkt_type != PACKET_HOST)
1542 goto discard_it;
1543
1544 /* Count it even if it's bad */
63231bdd 1545 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1546
1547 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1548 goto discard_it;
1549
aa8223c7 1550 th = tcp_hdr(skb);
1da177e4
LT
1551
1552 if (th->doff < sizeof(struct tcphdr) / 4)
1553 goto bad_packet;
1554 if (!pskb_may_pull(skb, th->doff * 4))
1555 goto discard_it;
1556
1557 /* An explanation is required here, I think.
1558 * Packet length and doff are validated by header prediction,
caa20d9a 1559 * provided case of th->doff==0 is eliminated.
1da177e4 1560 * So, we defer the checks. */
60476372 1561 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1da177e4
LT
1562 goto bad_packet;
1563
aa8223c7 1564 th = tcp_hdr(skb);
eddc9ec5 1565 iph = ip_hdr(skb);
1da177e4
LT
1566 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1567 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1568 skb->len - th->doff * 4);
1569 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1570 TCP_SKB_CB(skb)->when = 0;
eddc9ec5 1571 TCP_SKB_CB(skb)->flags = iph->tos;
1da177e4
LT
1572 TCP_SKB_CB(skb)->sacked = 0;
1573
9a1f27c4 1574 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1575 if (!sk)
1576 goto no_tcp_socket;
1577
1578process:
1579 if (sk->sk_state == TCP_TIME_WAIT)
1580 goto do_time_wait;
1581
1582 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1583 goto discard_and_relse;
b59c2701 1584 nf_reset(skb);
1da177e4 1585
fda9ef5d 1586 if (sk_filter(sk, skb))
1da177e4
LT
1587 goto discard_and_relse;
1588
1589 skb->dev = NULL;
1590
c6366184 1591 bh_lock_sock_nested(sk);
1da177e4
LT
1592 ret = 0;
1593 if (!sock_owned_by_user(sk)) {
1a2449a8
CL
1594#ifdef CONFIG_NET_DMA
1595 struct tcp_sock *tp = tcp_sk(sk);
1596 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
f67b4599 1597 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1a2449a8 1598 if (tp->ucopy.dma_chan)
1da177e4 1599 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8
CL
1600 else
1601#endif
1602 {
1603 if (!tcp_prequeue(sk, skb))
1604 ret = tcp_v4_do_rcv(sk, skb);
1605 }
1da177e4
LT
1606 } else
1607 sk_add_backlog(sk, skb);
1608 bh_unlock_sock(sk);
1609
1610 sock_put(sk);
1611
1612 return ret;
1613
1614no_tcp_socket:
1615 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1616 goto discard_it;
1617
1618 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1619bad_packet:
63231bdd 1620 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1621 } else {
cfb6eeb4 1622 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1623 }
1624
1625discard_it:
1626 /* Discard frame. */
1627 kfree_skb(skb);
e905a9ed 1628 return 0;
1da177e4
LT
1629
1630discard_and_relse:
1631 sock_put(sk);
1632 goto discard_it;
1633
1634do_time_wait:
1635 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1636 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1637 goto discard_it;
1638 }
1639
1640 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
63231bdd 1641 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1642 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1643 goto discard_it;
1644 }
9469c7b4 1645 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1646 case TCP_TW_SYN: {
c346dca1 1647 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
c67499c0 1648 &tcp_hashinfo,
eddc9ec5 1649 iph->daddr, th->dest,
463c84b9 1650 inet_iif(skb));
1da177e4 1651 if (sk2) {
9469c7b4
YH
1652 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1653 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1654 sk = sk2;
1655 goto process;
1656 }
1657 /* Fall through to ACK */
1658 }
1659 case TCP_TW_ACK:
1660 tcp_v4_timewait_ack(sk, skb);
1661 break;
1662 case TCP_TW_RST:
1663 goto no_tcp_socket;
1664 case TCP_TW_SUCCESS:;
1665 }
1666 goto discard_it;
1667}
1668
1da177e4
LT
1669/* VJ's idea. Save last timestamp seen from this destination
1670 * and hold it at least for normal timewait interval to use for duplicate
1671 * segment detection in subsequent connections, before they enter synchronized
1672 * state.
1673 */
1674
1675int tcp_v4_remember_stamp(struct sock *sk)
1676{
1677 struct inet_sock *inet = inet_sk(sk);
1678 struct tcp_sock *tp = tcp_sk(sk);
1679 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1680 struct inet_peer *peer = NULL;
1681 int release_it = 0;
1682
1683 if (!rt || rt->rt_dst != inet->daddr) {
1684 peer = inet_getpeer(inet->daddr, 1);
1685 release_it = 1;
1686 } else {
1687 if (!rt->peer)
1688 rt_bind_peer(rt, 1);
1689 peer = rt->peer;
1690 }
1691
1692 if (peer) {
1693 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
9d729f72 1694 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
1da177e4
LT
1695 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
1696 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
1697 peer->tcp_ts = tp->rx_opt.ts_recent;
1698 }
1699 if (release_it)
1700 inet_putpeer(peer);
1701 return 1;
1702 }
1703
1704 return 0;
1705}
1706
8feaf0c0 1707int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1da177e4 1708{
8feaf0c0 1709 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1da177e4
LT
1710
1711 if (peer) {
8feaf0c0
ACM
1712 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1713
1714 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
9d729f72 1715 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
8feaf0c0
ACM
1716 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
1717 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
1718 peer->tcp_ts = tcptw->tw_ts_recent;
1da177e4
LT
1719 }
1720 inet_putpeer(peer);
1721 return 1;
1722 }
1723
1724 return 0;
1725}
1726
8292a17a 1727struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1728 .queue_xmit = ip_queue_xmit,
1729 .send_check = tcp_v4_send_check,
1730 .rebuild_header = inet_sk_rebuild_header,
1731 .conn_request = tcp_v4_conn_request,
1732 .syn_recv_sock = tcp_v4_syn_recv_sock,
1733 .remember_stamp = tcp_v4_remember_stamp,
1734 .net_header_len = sizeof(struct iphdr),
1735 .setsockopt = ip_setsockopt,
1736 .getsockopt = ip_getsockopt,
1737 .addr2sockaddr = inet_csk_addr2sockaddr,
1738 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1739 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1740#ifdef CONFIG_COMPAT
543d9cfe
ACM
1741 .compat_setsockopt = compat_ip_setsockopt,
1742 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1743#endif
1da177e4
LT
1744};
1745
cfb6eeb4 1746#ifdef CONFIG_TCP_MD5SIG
b6332e6c 1747static struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4 1748 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1749 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4
YH
1750 .md5_add = tcp_v4_md5_add_func,
1751 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1752};
b6332e6c 1753#endif
cfb6eeb4 1754
1da177e4
LT
1755/* NOTE: A lot of things set to zero explicitly by call to
1756 * sk_alloc() so need not be done here.
1757 */
1758static int tcp_v4_init_sock(struct sock *sk)
1759{
6687e988 1760 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1761 struct tcp_sock *tp = tcp_sk(sk);
1762
1763 skb_queue_head_init(&tp->out_of_order_queue);
1764 tcp_init_xmit_timers(sk);
1765 tcp_prequeue_init(tp);
1766
6687e988 1767 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1768 tp->mdev = TCP_TIMEOUT_INIT;
1769
1770 /* So many TCP implementations out there (incorrectly) count the
1771 * initial SYN frame in their delayed-ACK and congestion control
1772 * algorithms that we must have the following bandaid to talk
1773 * efficiently to them. -DaveM
1774 */
1775 tp->snd_cwnd = 2;
1776
1777 /* See draft-stevens-tcpca-spec-01 for discussion of the
1778 * initialization of these values.
1779 */
1780 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
1781 tp->snd_cwnd_clamp = ~0;
c1b4a7e6 1782 tp->mss_cache = 536;
1da177e4
LT
1783
1784 tp->reordering = sysctl_tcp_reordering;
6687e988 1785 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1da177e4
LT
1786
1787 sk->sk_state = TCP_CLOSE;
1788
1789 sk->sk_write_space = sk_stream_write_space;
1790 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1791
8292a17a 1792 icsk->icsk_af_ops = &ipv4_specific;
d83d8461 1793 icsk->icsk_sync_mss = tcp_sync_mss;
cfb6eeb4
YH
1794#ifdef CONFIG_TCP_MD5SIG
1795 tp->af_specific = &tcp_sock_ipv4_specific;
1796#endif
1da177e4
LT
1797
1798 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1799 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1800
eb4dea58 1801 local_bh_disable();
1748376b 1802 percpu_counter_inc(&tcp_sockets_allocated);
eb4dea58 1803 local_bh_enable();
1da177e4
LT
1804
1805 return 0;
1806}
1807
7d06b2e0 1808void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
1809{
1810 struct tcp_sock *tp = tcp_sk(sk);
1811
1812 tcp_clear_xmit_timers(sk);
1813
6687e988 1814 tcp_cleanup_congestion_control(sk);
317a76f9 1815
1da177e4 1816 /* Cleanup up the write buffer. */
fe067e8a 1817 tcp_write_queue_purge(sk);
1da177e4
LT
1818
1819 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1820 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1821
cfb6eeb4
YH
1822#ifdef CONFIG_TCP_MD5SIG
1823 /* Clean up the MD5 key list, if any */
1824 if (tp->md5sig_info) {
1825 tcp_v4_clear_md5_list(sk);
1826 kfree(tp->md5sig_info);
1827 tp->md5sig_info = NULL;
1828 }
1829#endif
1830
1a2449a8
CL
1831#ifdef CONFIG_NET_DMA
1832 /* Cleans up our sk_async_wait_queue */
e905a9ed 1833 __skb_queue_purge(&sk->sk_async_wait_queue);
1a2449a8
CL
1834#endif
1835
1da177e4
LT
1836 /* Clean prequeue, it must be empty really */
1837 __skb_queue_purge(&tp->ucopy.prequeue);
1838
1839 /* Clean up a referenced TCP bind bucket. */
463c84b9 1840 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1841 inet_put_port(sk);
1da177e4
LT
1842
1843 /*
1844 * If sendmsg cached page exists, toss it.
1845 */
1846 if (sk->sk_sndmsg_page) {
1847 __free_page(sk->sk_sndmsg_page);
1848 sk->sk_sndmsg_page = NULL;
1849 }
1850
1748376b 1851 percpu_counter_dec(&tcp_sockets_allocated);
1da177e4
LT
1852}
1853
1854EXPORT_SYMBOL(tcp_v4_destroy_sock);
1855
1856#ifdef CONFIG_PROC_FS
1857/* Proc filesystem TCP sock list dumping. */
1858
3ab5aee7 1859static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1da177e4 1860{
3ab5aee7 1861 return hlist_nulls_empty(head) ? NULL :
8feaf0c0 1862 list_entry(head->first, struct inet_timewait_sock, tw_node);
1da177e4
LT
1863}
1864
8feaf0c0 1865static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1da177e4 1866{
3ab5aee7
ED
1867 return !is_a_nulls(tw->tw_node.next) ?
1868 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1da177e4
LT
1869}
1870
1871static void *listening_get_next(struct seq_file *seq, void *cur)
1872{
463c84b9 1873 struct inet_connection_sock *icsk;
c25eb3bf 1874 struct hlist_nulls_node *node;
1da177e4 1875 struct sock *sk = cur;
5caea4ea 1876 struct inet_listen_hashbucket *ilb;
5799de0b 1877 struct tcp_iter_state *st = seq->private;
a4146b1b 1878 struct net *net = seq_file_net(seq);
1da177e4
LT
1879
1880 if (!sk) {
1881 st->bucket = 0;
5caea4ea
ED
1882 ilb = &tcp_hashinfo.listening_hash[0];
1883 spin_lock_bh(&ilb->lock);
c25eb3bf 1884 sk = sk_nulls_head(&ilb->head);
1da177e4
LT
1885 goto get_sk;
1886 }
5caea4ea 1887 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1da177e4
LT
1888 ++st->num;
1889
1890 if (st->state == TCP_SEQ_STATE_OPENREQ) {
60236fdd 1891 struct request_sock *req = cur;
1da177e4 1892
72a3effa 1893 icsk = inet_csk(st->syn_wait_sk);
1da177e4
LT
1894 req = req->dl_next;
1895 while (1) {
1896 while (req) {
bdccc4ca 1897 if (req->rsk_ops->family == st->family) {
1da177e4
LT
1898 cur = req;
1899 goto out;
1900 }
1901 req = req->dl_next;
1902 }
72a3effa 1903 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1da177e4
LT
1904 break;
1905get_req:
463c84b9 1906 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1da177e4
LT
1907 }
1908 sk = sk_next(st->syn_wait_sk);
1909 st->state = TCP_SEQ_STATE_LISTENING;
463c84b9 1910 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 1911 } else {
e905a9ed 1912 icsk = inet_csk(sk);
463c84b9
ACM
1913 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1914 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1da177e4 1915 goto start_req;
463c84b9 1916 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
1917 sk = sk_next(sk);
1918 }
1919get_sk:
c25eb3bf 1920 sk_nulls_for_each_from(sk, node) {
878628fb 1921 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
1da177e4
LT
1922 cur = sk;
1923 goto out;
1924 }
e905a9ed 1925 icsk = inet_csk(sk);
463c84b9
ACM
1926 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1927 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1da177e4
LT
1928start_req:
1929 st->uid = sock_i_uid(sk);
1930 st->syn_wait_sk = sk;
1931 st->state = TCP_SEQ_STATE_OPENREQ;
1932 st->sbucket = 0;
1933 goto get_req;
1934 }
463c84b9 1935 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 1936 }
5caea4ea 1937 spin_unlock_bh(&ilb->lock);
0f7ff927 1938 if (++st->bucket < INET_LHTABLE_SIZE) {
5caea4ea
ED
1939 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1940 spin_lock_bh(&ilb->lock);
c25eb3bf 1941 sk = sk_nulls_head(&ilb->head);
1da177e4
LT
1942 goto get_sk;
1943 }
1944 cur = NULL;
1945out:
1946 return cur;
1947}
1948
1949static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1950{
1951 void *rc = listening_get_next(seq, NULL);
1952
1953 while (rc && *pos) {
1954 rc = listening_get_next(seq, rc);
1955 --*pos;
1956 }
1957 return rc;
1958}
1959
6eac5604
AK
1960static inline int empty_bucket(struct tcp_iter_state *st)
1961{
3ab5aee7
ED
1962 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
1963 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
6eac5604
AK
1964}
1965
1da177e4
LT
1966static void *established_get_first(struct seq_file *seq)
1967{
5799de0b 1968 struct tcp_iter_state *st = seq->private;
a4146b1b 1969 struct net *net = seq_file_net(seq);
1da177e4
LT
1970 void *rc = NULL;
1971
6e04e021 1972 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
1da177e4 1973 struct sock *sk;
3ab5aee7 1974 struct hlist_nulls_node *node;
8feaf0c0 1975 struct inet_timewait_sock *tw;
9db66bdc 1976 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 1977
6eac5604
AK
1978 /* Lockless fast path for the common case of empty buckets */
1979 if (empty_bucket(st))
1980 continue;
1981
9db66bdc 1982 spin_lock_bh(lock);
3ab5aee7 1983 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 1984 if (sk->sk_family != st->family ||
878628fb 1985 !net_eq(sock_net(sk), net)) {
1da177e4
LT
1986 continue;
1987 }
1988 rc = sk;
1989 goto out;
1990 }
1991 st->state = TCP_SEQ_STATE_TIME_WAIT;
8feaf0c0 1992 inet_twsk_for_each(tw, node,
dbca9b27 1993 &tcp_hashinfo.ehash[st->bucket].twchain) {
28518fc1 1994 if (tw->tw_family != st->family ||
878628fb 1995 !net_eq(twsk_net(tw), net)) {
1da177e4
LT
1996 continue;
1997 }
1998 rc = tw;
1999 goto out;
2000 }
9db66bdc 2001 spin_unlock_bh(lock);
1da177e4
LT
2002 st->state = TCP_SEQ_STATE_ESTABLISHED;
2003 }
2004out:
2005 return rc;
2006}
2007
2008static void *established_get_next(struct seq_file *seq, void *cur)
2009{
2010 struct sock *sk = cur;
8feaf0c0 2011 struct inet_timewait_sock *tw;
3ab5aee7 2012 struct hlist_nulls_node *node;
5799de0b 2013 struct tcp_iter_state *st = seq->private;
a4146b1b 2014 struct net *net = seq_file_net(seq);
1da177e4
LT
2015
2016 ++st->num;
2017
2018 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2019 tw = cur;
2020 tw = tw_next(tw);
2021get_tw:
878628fb 2022 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
1da177e4
LT
2023 tw = tw_next(tw);
2024 }
2025 if (tw) {
2026 cur = tw;
2027 goto out;
2028 }
9db66bdc 2029 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2030 st->state = TCP_SEQ_STATE_ESTABLISHED;
2031
6eac5604
AK
2032 /* Look for next non empty bucket */
2033 while (++st->bucket < tcp_hashinfo.ehash_size &&
2034 empty_bucket(st))
2035 ;
2036 if (st->bucket >= tcp_hashinfo.ehash_size)
2037 return NULL;
2038
9db66bdc 2039 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
3ab5aee7 2040 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
1da177e4 2041 } else
3ab5aee7 2042 sk = sk_nulls_next(sk);
1da177e4 2043
3ab5aee7 2044 sk_nulls_for_each_from(sk, node) {
878628fb 2045 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1da177e4
LT
2046 goto found;
2047 }
2048
2049 st->state = TCP_SEQ_STATE_TIME_WAIT;
dbca9b27 2050 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
1da177e4
LT
2051 goto get_tw;
2052found:
2053 cur = sk;
2054out:
2055 return cur;
2056}
2057
2058static void *established_get_idx(struct seq_file *seq, loff_t pos)
2059{
2060 void *rc = established_get_first(seq);
2061
2062 while (rc && pos) {
2063 rc = established_get_next(seq, rc);
2064 --pos;
7174259e 2065 }
1da177e4
LT
2066 return rc;
2067}
2068
2069static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2070{
2071 void *rc;
5799de0b 2072 struct tcp_iter_state *st = seq->private;
1da177e4 2073
1da177e4
LT
2074 st->state = TCP_SEQ_STATE_LISTENING;
2075 rc = listening_get_idx(seq, &pos);
2076
2077 if (!rc) {
1da177e4
LT
2078 st->state = TCP_SEQ_STATE_ESTABLISHED;
2079 rc = established_get_idx(seq, pos);
2080 }
2081
2082 return rc;
2083}
2084
2085static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2086{
5799de0b 2087 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2088 st->state = TCP_SEQ_STATE_LISTENING;
2089 st->num = 0;
2090 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2091}
2092
2093static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2094{
2095 void *rc = NULL;
5799de0b 2096 struct tcp_iter_state *st;
1da177e4
LT
2097
2098 if (v == SEQ_START_TOKEN) {
2099 rc = tcp_get_idx(seq, 0);
2100 goto out;
2101 }
2102 st = seq->private;
2103
2104 switch (st->state) {
2105 case TCP_SEQ_STATE_OPENREQ:
2106 case TCP_SEQ_STATE_LISTENING:
2107 rc = listening_get_next(seq, v);
2108 if (!rc) {
1da177e4
LT
2109 st->state = TCP_SEQ_STATE_ESTABLISHED;
2110 rc = established_get_first(seq);
2111 }
2112 break;
2113 case TCP_SEQ_STATE_ESTABLISHED:
2114 case TCP_SEQ_STATE_TIME_WAIT:
2115 rc = established_get_next(seq, v);
2116 break;
2117 }
2118out:
2119 ++*pos;
2120 return rc;
2121}
2122
2123static void tcp_seq_stop(struct seq_file *seq, void *v)
2124{
5799de0b 2125 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2126
2127 switch (st->state) {
2128 case TCP_SEQ_STATE_OPENREQ:
2129 if (v) {
463c84b9
ACM
2130 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2131 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
2132 }
2133 case TCP_SEQ_STATE_LISTENING:
2134 if (v != SEQ_START_TOKEN)
5caea4ea 2135 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
1da177e4
LT
2136 break;
2137 case TCP_SEQ_STATE_TIME_WAIT:
2138 case TCP_SEQ_STATE_ESTABLISHED:
2139 if (v)
9db66bdc 2140 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2141 break;
2142 }
2143}
2144
2145static int tcp_seq_open(struct inode *inode, struct file *file)
2146{
2147 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1da177e4 2148 struct tcp_iter_state *s;
52d6f3f1 2149 int err;
1da177e4 2150
52d6f3f1
DL
2151 err = seq_open_net(inode, file, &afinfo->seq_ops,
2152 sizeof(struct tcp_iter_state));
2153 if (err < 0)
2154 return err;
f40c8174 2155
52d6f3f1 2156 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2157 s->family = afinfo->family;
f40c8174
DL
2158 return 0;
2159}
2160
6f8b13bc 2161int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2162{
2163 int rc = 0;
2164 struct proc_dir_entry *p;
2165
68fcadd1
DL
2166 afinfo->seq_fops.open = tcp_seq_open;
2167 afinfo->seq_fops.read = seq_read;
2168 afinfo->seq_fops.llseek = seq_lseek;
2169 afinfo->seq_fops.release = seq_release_net;
7174259e 2170
9427c4b3
DL
2171 afinfo->seq_ops.start = tcp_seq_start;
2172 afinfo->seq_ops.next = tcp_seq_next;
2173 afinfo->seq_ops.stop = tcp_seq_stop;
2174
84841c3c
DL
2175 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2176 &afinfo->seq_fops, afinfo);
2177 if (!p)
1da177e4
LT
2178 rc = -ENOMEM;
2179 return rc;
2180}
2181
6f8b13bc 2182void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2183{
6f8b13bc 2184 proc_net_remove(net, afinfo->name);
1da177e4
LT
2185}
2186
60236fdd 2187static void get_openreq4(struct sock *sk, struct request_sock *req,
5e659e4c 2188 struct seq_file *f, int i, int uid, int *len)
1da177e4 2189{
2e6599cb 2190 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
2191 int ttd = req->expires - jiffies;
2192
5e659e4c
PE
2193 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2194 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
1da177e4 2195 i,
2e6599cb 2196 ireq->loc_addr,
1da177e4 2197 ntohs(inet_sk(sk)->sport),
2e6599cb
ACM
2198 ireq->rmt_addr,
2199 ntohs(ireq->rmt_port),
1da177e4
LT
2200 TCP_SYN_RECV,
2201 0, 0, /* could print option size, but that is af dependent. */
2202 1, /* timers active (only the expire timer) */
2203 jiffies_to_clock_t(ttd),
2204 req->retrans,
2205 uid,
2206 0, /* non standard timer */
2207 0, /* open_requests have no inode */
2208 atomic_read(&sk->sk_refcnt),
5e659e4c
PE
2209 req,
2210 len);
1da177e4
LT
2211}
2212
5e659e4c 2213static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
1da177e4
LT
2214{
2215 int timer_active;
2216 unsigned long timer_expires;
cf4c6bf8
IJ
2217 struct tcp_sock *tp = tcp_sk(sk);
2218 const struct inet_connection_sock *icsk = inet_csk(sk);
2219 struct inet_sock *inet = inet_sk(sk);
714e85be
AV
2220 __be32 dest = inet->daddr;
2221 __be32 src = inet->rcv_saddr;
1da177e4
LT
2222 __u16 destp = ntohs(inet->dport);
2223 __u16 srcp = ntohs(inet->sport);
2224
463c84b9 2225 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2226 timer_active = 1;
463c84b9
ACM
2227 timer_expires = icsk->icsk_timeout;
2228 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2229 timer_active = 4;
463c84b9 2230 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2231 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2232 timer_active = 2;
cf4c6bf8 2233 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2234 } else {
2235 timer_active = 0;
2236 timer_expires = jiffies;
2237 }
2238
5e659e4c 2239 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
7be87351 2240 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
cf4c6bf8 2241 i, src, srcp, dest, destp, sk->sk_state,
47da8ee6 2242 tp->write_seq - tp->snd_una,
cf4c6bf8 2243 sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
7174259e 2244 (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
2245 timer_active,
2246 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2247 icsk->icsk_retransmits,
cf4c6bf8 2248 sock_i_uid(sk),
6687e988 2249 icsk->icsk_probes_out,
cf4c6bf8
IJ
2250 sock_i_ino(sk),
2251 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2252 jiffies_to_clock_t(icsk->icsk_rto),
2253 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2254 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2255 tp->snd_cwnd,
5e659e4c
PE
2256 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh,
2257 len);
1da177e4
LT
2258}
2259
7174259e 2260static void get_timewait4_sock(struct inet_timewait_sock *tw,
5e659e4c 2261 struct seq_file *f, int i, int *len)
1da177e4 2262{
23f33c2d 2263 __be32 dest, src;
1da177e4
LT
2264 __u16 destp, srcp;
2265 int ttd = tw->tw_ttd - jiffies;
2266
2267 if (ttd < 0)
2268 ttd = 0;
2269
2270 dest = tw->tw_daddr;
2271 src = tw->tw_rcv_saddr;
2272 destp = ntohs(tw->tw_dport);
2273 srcp = ntohs(tw->tw_sport);
2274
5e659e4c
PE
2275 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2276 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
1da177e4
LT
2277 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2278 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
5e659e4c 2279 atomic_read(&tw->tw_refcnt), tw, len);
1da177e4
LT
2280}
2281
2282#define TMPSZ 150
2283
2284static int tcp4_seq_show(struct seq_file *seq, void *v)
2285{
5799de0b 2286 struct tcp_iter_state *st;
5e659e4c 2287 int len;
1da177e4
LT
2288
2289 if (v == SEQ_START_TOKEN) {
2290 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2291 " sl local_address rem_address st tx_queue "
2292 "rx_queue tr tm->when retrnsmt uid timeout "
2293 "inode");
2294 goto out;
2295 }
2296 st = seq->private;
2297
2298 switch (st->state) {
2299 case TCP_SEQ_STATE_LISTENING:
2300 case TCP_SEQ_STATE_ESTABLISHED:
5e659e4c 2301 get_tcp4_sock(v, seq, st->num, &len);
1da177e4
LT
2302 break;
2303 case TCP_SEQ_STATE_OPENREQ:
5e659e4c 2304 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
1da177e4
LT
2305 break;
2306 case TCP_SEQ_STATE_TIME_WAIT:
5e659e4c 2307 get_timewait4_sock(v, seq, st->num, &len);
1da177e4
LT
2308 break;
2309 }
5e659e4c 2310 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
1da177e4
LT
2311out:
2312 return 0;
2313}
2314
1da177e4 2315static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2316 .name = "tcp",
2317 .family = AF_INET,
5f4472c5
DL
2318 .seq_fops = {
2319 .owner = THIS_MODULE,
2320 },
9427c4b3
DL
2321 .seq_ops = {
2322 .show = tcp4_seq_show,
2323 },
1da177e4
LT
2324};
2325
757764f6
PE
2326static int tcp4_proc_init_net(struct net *net)
2327{
2328 return tcp_proc_register(net, &tcp4_seq_afinfo);
2329}
2330
2331static void tcp4_proc_exit_net(struct net *net)
2332{
2333 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2334}
2335
2336static struct pernet_operations tcp4_net_ops = {
2337 .init = tcp4_proc_init_net,
2338 .exit = tcp4_proc_exit_net,
2339};
2340
1da177e4
LT
2341int __init tcp4_proc_init(void)
2342{
757764f6 2343 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2344}
2345
2346void tcp4_proc_exit(void)
2347{
757764f6 2348 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2349}
2350#endif /* CONFIG_PROC_FS */
2351
bf296b12
HX
2352struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2353{
2354 struct iphdr *iph = ip_hdr(skb);
2355
2356 switch (skb->ip_summed) {
2357 case CHECKSUM_COMPLETE:
86911732 2358 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
bf296b12
HX
2359 skb->csum)) {
2360 skb->ip_summed = CHECKSUM_UNNECESSARY;
2361 break;
2362 }
2363
2364 /* fall through */
2365 case CHECKSUM_NONE:
2366 NAPI_GRO_CB(skb)->flush = 1;
2367 return NULL;
2368 }
2369
2370 return tcp_gro_receive(head, skb);
2371}
2372EXPORT_SYMBOL(tcp4_gro_receive);
2373
2374int tcp4_gro_complete(struct sk_buff *skb)
2375{
2376 struct iphdr *iph = ip_hdr(skb);
2377 struct tcphdr *th = tcp_hdr(skb);
2378
2379 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2380 iph->saddr, iph->daddr, 0);
2381 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2382
2383 return tcp_gro_complete(skb);
2384}
2385EXPORT_SYMBOL(tcp4_gro_complete);
2386
1da177e4
LT
2387struct proto tcp_prot = {
2388 .name = "TCP",
2389 .owner = THIS_MODULE,
2390 .close = tcp_close,
2391 .connect = tcp_v4_connect,
2392 .disconnect = tcp_disconnect,
463c84b9 2393 .accept = inet_csk_accept,
1da177e4
LT
2394 .ioctl = tcp_ioctl,
2395 .init = tcp_v4_init_sock,
2396 .destroy = tcp_v4_destroy_sock,
2397 .shutdown = tcp_shutdown,
2398 .setsockopt = tcp_setsockopt,
2399 .getsockopt = tcp_getsockopt,
1da177e4
LT
2400 .recvmsg = tcp_recvmsg,
2401 .backlog_rcv = tcp_v4_do_rcv,
ab1e0a13
ACM
2402 .hash = inet_hash,
2403 .unhash = inet_unhash,
2404 .get_port = inet_csk_get_port,
1da177e4
LT
2405 .enter_memory_pressure = tcp_enter_memory_pressure,
2406 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2407 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2408 .memory_allocated = &tcp_memory_allocated,
2409 .memory_pressure = &tcp_memory_pressure,
2410 .sysctl_mem = sysctl_tcp_mem,
2411 .sysctl_wmem = sysctl_tcp_wmem,
2412 .sysctl_rmem = sysctl_tcp_rmem,
2413 .max_header = MAX_TCP_HEADER,
2414 .obj_size = sizeof(struct tcp_sock),
3ab5aee7 2415 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2416 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2417 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2418 .h.hashinfo = &tcp_hashinfo,
543d9cfe
ACM
2419#ifdef CONFIG_COMPAT
2420 .compat_setsockopt = compat_tcp_setsockopt,
2421 .compat_getsockopt = compat_tcp_getsockopt,
2422#endif
1da177e4
LT
2423};
2424
046ee902
DL
2425
2426static int __net_init tcp_sk_init(struct net *net)
2427{
2428 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2429 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2430}
2431
2432static void __net_exit tcp_sk_exit(struct net *net)
2433{
2434 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
d315492b 2435 inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET);
046ee902
DL
2436}
2437
2438static struct pernet_operations __net_initdata tcp_sk_ops = {
2439 .init = tcp_sk_init,
2440 .exit = tcp_sk_exit,
2441};
2442
9b0f976f 2443void __init tcp_v4_init(void)
1da177e4 2444{
5caea4ea 2445 inet_hashinfo_init(&tcp_hashinfo);
046ee902 2446 if (register_pernet_device(&tcp_sk_ops))
1da177e4 2447 panic("Failed to create the TCP control socket.\n");
1da177e4
LT
2448}
2449
2450EXPORT_SYMBOL(ipv4_specific);
1da177e4 2451EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 2452EXPORT_SYMBOL(tcp_prot);
1da177e4
LT
2453EXPORT_SYMBOL(tcp_v4_conn_request);
2454EXPORT_SYMBOL(tcp_v4_connect);
2455EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4
LT
2456EXPORT_SYMBOL(tcp_v4_remember_stamp);
2457EXPORT_SYMBOL(tcp_v4_send_check);
2458EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2459
2460#ifdef CONFIG_PROC_FS
2461EXPORT_SYMBOL(tcp_proc_register);
2462EXPORT_SYMBOL(tcp_proc_unregister);
2463#endif
1da177e4 2464EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 2465