Merge branch 'xgene-channel-number'
[linux-2.6-block.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
afd46503 53#define pr_fmt(fmt) "TCP: " fmt
1da177e4 54
eb4dea58 55#include <linux/bottom_half.h>
1da177e4
LT
56#include <linux/types.h>
57#include <linux/fcntl.h>
58#include <linux/module.h>
59#include <linux/random.h>
60#include <linux/cache.h>
61#include <linux/jhash.h>
62#include <linux/init.h>
63#include <linux/times.h>
5a0e3ad6 64#include <linux/slab.h>
1da177e4 65
457c4cbc 66#include <net/net_namespace.h>
1da177e4 67#include <net/icmp.h>
304a1618 68#include <net/inet_hashtables.h>
1da177e4 69#include <net/tcp.h>
20380731 70#include <net/transp_v6.h>
1da177e4
LT
71#include <net/ipv6.h>
72#include <net/inet_common.h>
6d6ee43e 73#include <net/timewait_sock.h>
1da177e4 74#include <net/xfrm.h>
6e5714ea 75#include <net/secure_seq.h>
076bb0c8 76#include <net/busy_poll.h>
1da177e4
LT
77
78#include <linux/inet.h>
79#include <linux/ipv6.h>
80#include <linux/stddef.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
83
cf80e0e4 84#include <crypto/hash.h>
cfb6eeb4
YH
85#include <linux/scatterlist.h>
86
ab32ea5d
BH
87int sysctl_tcp_tw_reuse __read_mostly;
88int sysctl_tcp_low_latency __read_mostly;
4bc2f18b 89EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 90
cfb6eeb4 91#ifdef CONFIG_TCP_MD5SIG
a915da9b 92static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 93 __be32 daddr, __be32 saddr, const struct tcphdr *th);
cfb6eeb4
YH
94#endif
95
5caea4ea 96struct inet_hashinfo tcp_hashinfo;
4bc2f18b 97EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 98
936b8bdb 99static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
1da177e4 100{
eddc9ec5
ACM
101 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
102 ip_hdr(skb)->saddr,
aa8223c7
ACM
103 tcp_hdr(skb)->dest,
104 tcp_hdr(skb)->source);
1da177e4
LT
105}
106
6d6ee43e
ACM
107int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
108{
109 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
110 struct tcp_sock *tp = tcp_sk(sk);
111
112 /* With PAWS, it is safe from the viewpoint
113 of data integrity. Even without PAWS it is safe provided sequence
114 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
115
116 Actually, the idea is close to VJ's one, only timestamp cache is
117 held not per host, but per port pair and TW bucket is used as state
118 holder.
119
120 If TW bucket has been already destroyed we fall back to VJ's scheme
121 and use initial timestamp retrieved from peer table.
122 */
123 if (tcptw->tw_ts_recent_stamp &&
51456b29 124 (!twp || (sysctl_tcp_tw_reuse &&
9d729f72 125 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
126 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
127 if (tp->write_seq == 0)
128 tp->write_seq = 1;
129 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
130 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
131 sock_hold(sktw);
132 return 1;
133 }
134
135 return 0;
136}
6d6ee43e
ACM
137EXPORT_SYMBOL_GPL(tcp_twsk_unique);
138
1da177e4
LT
139/* This will initiate an outgoing connection. */
140int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
141{
2d7192d6 142 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
1da177e4
LT
143 struct inet_sock *inet = inet_sk(sk);
144 struct tcp_sock *tp = tcp_sk(sk);
dca8b089 145 __be16 orig_sport, orig_dport;
bada8adc 146 __be32 daddr, nexthop;
da905bd1 147 struct flowi4 *fl4;
2d7192d6 148 struct rtable *rt;
1da177e4 149 int err;
f6d8bd05 150 struct ip_options_rcu *inet_opt;
1da177e4
LT
151
152 if (addr_len < sizeof(struct sockaddr_in))
153 return -EINVAL;
154
155 if (usin->sin_family != AF_INET)
156 return -EAFNOSUPPORT;
157
158 nexthop = daddr = usin->sin_addr.s_addr;
f6d8bd05 159 inet_opt = rcu_dereference_protected(inet->inet_opt,
1e1d04e6 160 lockdep_sock_is_held(sk));
f6d8bd05 161 if (inet_opt && inet_opt->opt.srr) {
1da177e4
LT
162 if (!daddr)
163 return -EINVAL;
f6d8bd05 164 nexthop = inet_opt->opt.faddr;
1da177e4
LT
165 }
166
dca8b089
DM
167 orig_sport = inet->inet_sport;
168 orig_dport = usin->sin_port;
da905bd1
DM
169 fl4 = &inet->cork.fl.u.ip4;
170 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
b23dd4fe
DM
171 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
172 IPPROTO_TCP,
0e0d44ab 173 orig_sport, orig_dport, sk);
b23dd4fe
DM
174 if (IS_ERR(rt)) {
175 err = PTR_ERR(rt);
176 if (err == -ENETUNREACH)
f1d8cba6 177 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
b23dd4fe 178 return err;
584bdf8c 179 }
1da177e4
LT
180
181 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
182 ip_rt_put(rt);
183 return -ENETUNREACH;
184 }
185
f6d8bd05 186 if (!inet_opt || !inet_opt->opt.srr)
da905bd1 187 daddr = fl4->daddr;
1da177e4 188
c720c7e8 189 if (!inet->inet_saddr)
da905bd1 190 inet->inet_saddr = fl4->saddr;
d1e559d0 191 sk_rcv_saddr_set(sk, inet->inet_saddr);
1da177e4 192
c720c7e8 193 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
1da177e4
LT
194 /* Reset inherited state */
195 tp->rx_opt.ts_recent = 0;
196 tp->rx_opt.ts_recent_stamp = 0;
ee995283
PE
197 if (likely(!tp->repair))
198 tp->write_seq = 0;
1da177e4
LT
199 }
200
295ff7ed 201 if (tcp_death_row.sysctl_tw_recycle &&
81166dd6
DM
202 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
203 tcp_fetch_timewait_stamp(sk, &rt->dst);
1da177e4 204
c720c7e8 205 inet->inet_dport = usin->sin_port;
d1e559d0 206 sk_daddr_set(sk, daddr);
1da177e4 207
d83d8461 208 inet_csk(sk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
209 if (inet_opt)
210 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1da177e4 211
bee7ca9e 212 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
1da177e4
LT
213
214 /* Socket identity is still unknown (sport may be zero).
215 * However we set state to SYN-SENT and not releasing socket
216 * lock select source port, enter ourselves into the hash tables and
217 * complete initialization after this.
218 */
219 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 220 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
221 if (err)
222 goto failure;
223
877d1f62 224 sk_set_txhash(sk);
9e7ceb06 225
da905bd1 226 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
b23dd4fe
DM
227 inet->inet_sport, inet->inet_dport, sk);
228 if (IS_ERR(rt)) {
229 err = PTR_ERR(rt);
230 rt = NULL;
1da177e4 231 goto failure;
b23dd4fe 232 }
1da177e4 233 /* OK, now commit destination to socket. */
bcd76111 234 sk->sk_gso_type = SKB_GSO_TCPV4;
d8d1f30b 235 sk_setup_caps(sk, &rt->dst);
1da177e4 236
ee995283 237 if (!tp->write_seq && likely(!tp->repair))
c720c7e8
ED
238 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
239 inet->inet_daddr,
240 inet->inet_sport,
1da177e4
LT
241 usin->sin_port);
242
c720c7e8 243 inet->inet_id = tp->write_seq ^ jiffies;
1da177e4 244
2b916477 245 err = tcp_connect(sk);
ee995283 246
1da177e4
LT
247 rt = NULL;
248 if (err)
249 goto failure;
250
251 return 0;
252
253failure:
7174259e
ACM
254 /*
255 * This unhashes the socket and releases the local port,
256 * if necessary.
257 */
1da177e4
LT
258 tcp_set_state(sk, TCP_CLOSE);
259 ip_rt_put(rt);
260 sk->sk_route_caps = 0;
c720c7e8 261 inet->inet_dport = 0;
1da177e4
LT
262 return err;
263}
4bc2f18b 264EXPORT_SYMBOL(tcp_v4_connect);
1da177e4 265
1da177e4 266/*
563d34d0
ED
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
1da177e4 270 */
4fab9071 271void tcp_v4_mtu_reduced(struct sock *sk)
1da177e4
LT
272{
273 struct dst_entry *dst;
274 struct inet_sock *inet = inet_sk(sk);
563d34d0 275 u32 mtu = tcp_sk(sk)->mtu_info;
1da177e4 276
80d0a69f
DM
277 dst = inet_csk_update_pmtu(sk, mtu);
278 if (!dst)
1da177e4
LT
279 return;
280
1da177e4
LT
281 /* Something is about to be wrong... Remember soft error
282 * for the case, if this connection will not able to recover.
283 */
284 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
285 sk->sk_err_soft = EMSGSIZE;
286
287 mtu = dst_mtu(dst);
288
289 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
482fc609 290 ip_sk_accept_pmtu(sk) &&
d83d8461 291 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
292 tcp_sync_mss(sk, mtu);
293
294 /* Resend the TCP packet because it's
295 * clear that the old packet has been
296 * dropped. This is the new "fast" path mtu
297 * discovery.
298 */
299 tcp_simple_retransmit(sk);
300 } /* else let the usual retransmit timer handle it */
301}
4fab9071 302EXPORT_SYMBOL(tcp_v4_mtu_reduced);
1da177e4 303
55be7a9c
DM
304static void do_redirect(struct sk_buff *skb, struct sock *sk)
305{
306 struct dst_entry *dst = __sk_dst_check(sk, 0);
307
1ed5c48f 308 if (dst)
6700c270 309 dst->ops->redirect(dst, sk, skb);
55be7a9c
DM
310}
311
26e37360
ED
312
313/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
9cf74903 314void tcp_req_err(struct sock *sk, u32 seq, bool abort)
26e37360
ED
315{
316 struct request_sock *req = inet_reqsk(sk);
317 struct net *net = sock_net(sk);
318
319 /* ICMPs are not backlogged, hence we cannot get
320 * an established socket here.
321 */
26e37360 322 if (seq != tcp_rsk(req)->snt_isn) {
02a1d6e7 323 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
9cf74903 324 } else if (abort) {
26e37360
ED
325 /*
326 * Still in SYN_RECV, just remove it silently.
327 * There is no good way to pass the error to the newly
328 * created socket, and POSIX does not want network
329 * errors returned from accept().
330 */
c6973669 331 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
9caad864 332 tcp_listendrop(req->rsk_listener);
26e37360 333 }
ef84d8ce 334 reqsk_put(req);
26e37360
ED
335}
336EXPORT_SYMBOL(tcp_req_err);
337
1da177e4
LT
338/*
339 * This routine is called by the ICMP module when it gets some
340 * sort of error condition. If err < 0 then the socket should
341 * be closed and the error returned to the user. If err > 0
342 * it's just the icmp type << 8 | icmp code. After adjustment
343 * header points to the first 8 bytes of the tcp header. We need
344 * to find the appropriate port.
345 *
346 * The locking strategy used here is very "optimistic". When
347 * someone else accesses the socket the ICMP is just dropped
348 * and for some paths there is no check at all.
349 * A more general error queue to queue errors for later handling
350 * is probably better.
351 *
352 */
353
4d1a2d9e 354void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
1da177e4 355{
b71d1d42 356 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
4d1a2d9e 357 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
f1ecd5d9 358 struct inet_connection_sock *icsk;
1da177e4
LT
359 struct tcp_sock *tp;
360 struct inet_sock *inet;
4d1a2d9e
DL
361 const int type = icmp_hdr(icmp_skb)->type;
362 const int code = icmp_hdr(icmp_skb)->code;
1da177e4 363 struct sock *sk;
f1ecd5d9 364 struct sk_buff *skb;
0a672f74
YC
365 struct request_sock *fastopen;
366 __u32 seq, snd_una;
f1ecd5d9 367 __u32 remaining;
1da177e4 368 int err;
4d1a2d9e 369 struct net *net = dev_net(icmp_skb->dev);
1da177e4 370
26e37360
ED
371 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
372 th->dest, iph->saddr, ntohs(th->source),
373 inet_iif(icmp_skb));
1da177e4 374 if (!sk) {
5d3848bc 375 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
1da177e4
LT
376 return;
377 }
378 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 379 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
380 return;
381 }
26e37360
ED
382 seq = ntohl(th->seq);
383 if (sk->sk_state == TCP_NEW_SYN_RECV)
9cf74903
ED
384 return tcp_req_err(sk, seq,
385 type == ICMP_PARAMETERPROB ||
386 type == ICMP_TIME_EXCEEDED ||
387 (type == ICMP_DEST_UNREACH &&
388 (code == ICMP_NET_UNREACH ||
389 code == ICMP_HOST_UNREACH)));
1da177e4
LT
390
391 bh_lock_sock(sk);
392 /* If too many ICMPs get dropped on busy
393 * servers this needs to be solved differently.
563d34d0
ED
394 * We do take care of PMTU discovery (RFC1191) special case :
395 * we can receive locally generated ICMP messages while socket is held.
1da177e4 396 */
b74aa930
ED
397 if (sock_owned_by_user(sk)) {
398 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
02a1d6e7 399 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
b74aa930 400 }
1da177e4
LT
401 if (sk->sk_state == TCP_CLOSE)
402 goto out;
403
97e3ecd1 404 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
02a1d6e7 405 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
97e3ecd1 406 goto out;
407 }
408
f1ecd5d9 409 icsk = inet_csk(sk);
1da177e4 410 tp = tcp_sk(sk);
0a672f74
YC
411 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
412 fastopen = tp->fastopen_rsk;
413 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
1da177e4 414 if (sk->sk_state != TCP_LISTEN &&
0a672f74 415 !between(seq, snd_una, tp->snd_nxt)) {
02a1d6e7 416 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
417 goto out;
418 }
419
420 switch (type) {
55be7a9c
DM
421 case ICMP_REDIRECT:
422 do_redirect(icmp_skb, sk);
423 goto out;
1da177e4
LT
424 case ICMP_SOURCE_QUENCH:
425 /* Just silently ignore these. */
426 goto out;
427 case ICMP_PARAMETERPROB:
428 err = EPROTO;
429 break;
430 case ICMP_DEST_UNREACH:
431 if (code > NR_ICMP_UNREACH)
432 goto out;
433
434 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
0d4f0608
ED
435 /* We are not interested in TCP_LISTEN and open_requests
436 * (SYN-ACKs send out by Linux are always <576bytes so
437 * they should go through unfragmented).
438 */
439 if (sk->sk_state == TCP_LISTEN)
440 goto out;
441
563d34d0 442 tp->mtu_info = info;
144d56e9 443 if (!sock_owned_by_user(sk)) {
563d34d0 444 tcp_v4_mtu_reduced(sk);
144d56e9
ED
445 } else {
446 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
447 sock_hold(sk);
448 }
1da177e4
LT
449 goto out;
450 }
451
452 err = icmp_err_convert[code].errno;
f1ecd5d9
DL
453 /* check if icmp_skb allows revert of backoff
454 * (see draft-zimmermann-tcp-lcd) */
455 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
456 break;
457 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
0a672f74 458 !icsk->icsk_backoff || fastopen)
f1ecd5d9
DL
459 break;
460
8f49c270
DM
461 if (sock_owned_by_user(sk))
462 break;
463
f1ecd5d9 464 icsk->icsk_backoff--;
fcdd1cf4
ED
465 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
466 TCP_TIMEOUT_INIT;
467 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
f1ecd5d9
DL
468
469 skb = tcp_write_queue_head(sk);
470 BUG_ON(!skb);
471
7faee5c0
ED
472 remaining = icsk->icsk_rto -
473 min(icsk->icsk_rto,
474 tcp_time_stamp - tcp_skb_timestamp(skb));
f1ecd5d9
DL
475
476 if (remaining) {
477 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
478 remaining, TCP_RTO_MAX);
f1ecd5d9
DL
479 } else {
480 /* RTO revert clocked out retransmission.
481 * Will retransmit now */
482 tcp_retransmit_timer(sk);
483 }
484
1da177e4
LT
485 break;
486 case ICMP_TIME_EXCEEDED:
487 err = EHOSTUNREACH;
488 break;
489 default:
490 goto out;
491 }
492
493 switch (sk->sk_state) {
1da177e4 494 case TCP_SYN_SENT:
0a672f74
YC
495 case TCP_SYN_RECV:
496 /* Only in fast or simultaneous open. If a fast open socket is
497 * is already accepted it is treated as a connected one below.
498 */
51456b29 499 if (fastopen && !fastopen->sk)
0a672f74
YC
500 break;
501
1da177e4 502 if (!sock_owned_by_user(sk)) {
1da177e4
LT
503 sk->sk_err = err;
504
505 sk->sk_error_report(sk);
506
507 tcp_done(sk);
508 } else {
509 sk->sk_err_soft = err;
510 }
511 goto out;
512 }
513
514 /* If we've already connected we will keep trying
515 * until we time out, or the user gives up.
516 *
517 * rfc1122 4.2.3.9 allows to consider as hard errors
518 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
519 * but it is obsoleted by pmtu discovery).
520 *
521 * Note, that in modern internet, where routing is unreliable
522 * and in each dark corner broken firewalls sit, sending random
523 * errors ordered by their masters even this two messages finally lose
524 * their original sense (even Linux sends invalid PORT_UNREACHs)
525 *
526 * Now we are in compliance with RFCs.
527 * --ANK (980905)
528 */
529
530 inet = inet_sk(sk);
531 if (!sock_owned_by_user(sk) && inet->recverr) {
532 sk->sk_err = err;
533 sk->sk_error_report(sk);
534 } else { /* Only an error on timeout */
535 sk->sk_err_soft = err;
536 }
537
538out:
539 bh_unlock_sock(sk);
540 sock_put(sk);
541}
542
28850dc7 543void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1da177e4 544{
aa8223c7 545 struct tcphdr *th = tcp_hdr(skb);
1da177e4 546
84fa7933 547 if (skb->ip_summed == CHECKSUM_PARTIAL) {
419f9f89 548 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
663ead3b 549 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 550 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 551 } else {
419f9f89 552 th->check = tcp_v4_check(skb->len, saddr, daddr,
07f0757a 553 csum_partial(th,
1da177e4
LT
554 th->doff << 2,
555 skb->csum));
556 }
557}
558
419f9f89 559/* This routine computes an IPv4 TCP checksum. */
bb296246 560void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
419f9f89 561{
cf533ea5 562 const struct inet_sock *inet = inet_sk(sk);
419f9f89
HX
563
564 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
565}
4bc2f18b 566EXPORT_SYMBOL(tcp_v4_send_check);
419f9f89 567
1da177e4
LT
568/*
569 * This routine will send an RST to the other tcp.
570 *
571 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
572 * for reset.
573 * Answer: if a packet caused RST, it is not for a socket
574 * existing in our system, if it is matched to a socket,
575 * it is just duplicate segment or bug in other side's TCP.
576 * So that we build reply only basing on parameters
577 * arrived with segment.
578 * Exception: precedence violation. We do not implement it in any case.
579 */
580
a00e7444 581static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
1da177e4 582{
cf533ea5 583 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
584 struct {
585 struct tcphdr th;
586#ifdef CONFIG_TCP_MD5SIG
714e85be 587 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
588#endif
589 } rep;
1da177e4 590 struct ip_reply_arg arg;
cfb6eeb4 591#ifdef CONFIG_TCP_MD5SIG
e46787f0 592 struct tcp_md5sig_key *key = NULL;
658ddaaf
SL
593 const __u8 *hash_location = NULL;
594 unsigned char newhash[16];
595 int genhash;
596 struct sock *sk1 = NULL;
cfb6eeb4 597#endif
a86b1e30 598 struct net *net;
1da177e4
LT
599
600 /* Never send a reset in response to a reset. */
601 if (th->rst)
602 return;
603
c3658e8d
ED
604 /* If sk not NULL, it means we did a successful lookup and incoming
605 * route had to be correct. prequeue might have dropped our dst.
606 */
607 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
1da177e4
LT
608 return;
609
610 /* Swap the send and the receive. */
cfb6eeb4
YH
611 memset(&rep, 0, sizeof(rep));
612 rep.th.dest = th->source;
613 rep.th.source = th->dest;
614 rep.th.doff = sizeof(struct tcphdr) / 4;
615 rep.th.rst = 1;
1da177e4
LT
616
617 if (th->ack) {
cfb6eeb4 618 rep.th.seq = th->ack_seq;
1da177e4 619 } else {
cfb6eeb4
YH
620 rep.th.ack = 1;
621 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
622 skb->len - (th->doff << 2));
1da177e4
LT
623 }
624
7174259e 625 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
626 arg.iov[0].iov_base = (unsigned char *)&rep;
627 arg.iov[0].iov_len = sizeof(rep.th);
628
0f85feae 629 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
cfb6eeb4 630#ifdef CONFIG_TCP_MD5SIG
3b24d854 631 rcu_read_lock();
658ddaaf 632 hash_location = tcp_parse_md5sig_option(th);
271c3b9b 633 if (sk && sk_fullsock(sk)) {
e46787f0
FW
634 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
635 &ip_hdr(skb)->saddr, AF_INET);
636 } else if (hash_location) {
658ddaaf
SL
637 /*
638 * active side is lost. Try to find listening socket through
639 * source port, and then find md5 key through listening socket.
640 * we are not loose security here:
641 * Incoming packet is checked with md5 hash with finding key,
642 * no RST generated if md5 hash doesn't match.
643 */
a583636a
CG
644 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
645 ip_hdr(skb)->saddr,
da5e3630 646 th->source, ip_hdr(skb)->daddr,
658ddaaf
SL
647 ntohs(th->source), inet_iif(skb));
648 /* don't send rst if it can't find key */
649 if (!sk1)
3b24d854
ED
650 goto out;
651
658ddaaf
SL
652 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
653 &ip_hdr(skb)->saddr, AF_INET);
654 if (!key)
3b24d854
ED
655 goto out;
656
658ddaaf 657
39f8e58e 658 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
658ddaaf 659 if (genhash || memcmp(hash_location, newhash, 16) != 0)
3b24d854
ED
660 goto out;
661
658ddaaf
SL
662 }
663
cfb6eeb4
YH
664 if (key) {
665 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
666 (TCPOPT_NOP << 16) |
667 (TCPOPT_MD5SIG << 8) |
668 TCPOLEN_MD5SIG);
669 /* Update length and the length the header thinks exists */
670 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
671 rep.th.doff = arg.iov[0].iov_len / 4;
672
49a72dfb 673 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
78e645cb
IJ
674 key, ip_hdr(skb)->saddr,
675 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
676 }
677#endif
eddc9ec5
ACM
678 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
679 ip_hdr(skb)->saddr, /* XXX */
52cd5750 680 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1da177e4 681 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
271c3b9b
FW
682 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
683
e2446eaa 684 /* When socket is gone, all binding information is lost.
4c675258
AK
685 * routing might fail in this case. No choice here, if we choose to force
686 * input interface, we will misroute in case of asymmetric route.
e2446eaa 687 */
4c675258
AK
688 if (sk)
689 arg.bound_dev_if = sk->sk_bound_dev_if;
1da177e4 690
271c3b9b
FW
691 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
692 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
693
66b13d99 694 arg.tos = ip_hdr(skb)->tos;
bdbbb852
ED
695 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
696 skb, &TCP_SKB_CB(skb)->header.h4.opt,
24a2d43d
ED
697 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
698 &arg, arg.iov[0].iov_len);
1da177e4 699
90bbcc60
ED
700 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
701 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
658ddaaf
SL
702
703#ifdef CONFIG_TCP_MD5SIG
3b24d854
ED
704out:
705 rcu_read_unlock();
658ddaaf 706#endif
1da177e4
LT
707}
708
709/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
710 outside socket context is ugly, certainly. What can I do?
711 */
712
e62a123b
ED
713static void tcp_v4_send_ack(struct net *net,
714 struct sk_buff *skb, u32 seq, u32 ack,
ee684b6f 715 u32 win, u32 tsval, u32 tsecr, int oif,
88ef4a5a 716 struct tcp_md5sig_key *key,
66b13d99 717 int reply_flags, u8 tos)
1da177e4 718{
cf533ea5 719 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
720 struct {
721 struct tcphdr th;
714e85be 722 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 723#ifdef CONFIG_TCP_MD5SIG
714e85be 724 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
725#endif
726 ];
1da177e4
LT
727 } rep;
728 struct ip_reply_arg arg;
729
730 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 731 memset(&arg, 0, sizeof(arg));
1da177e4
LT
732
733 arg.iov[0].iov_base = (unsigned char *)&rep;
734 arg.iov[0].iov_len = sizeof(rep.th);
ee684b6f 735 if (tsecr) {
cfb6eeb4
YH
736 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
737 (TCPOPT_TIMESTAMP << 8) |
738 TCPOLEN_TIMESTAMP);
ee684b6f
AV
739 rep.opt[1] = htonl(tsval);
740 rep.opt[2] = htonl(tsecr);
cb48cfe8 741 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
742 }
743
744 /* Swap the send and the receive. */
745 rep.th.dest = th->source;
746 rep.th.source = th->dest;
747 rep.th.doff = arg.iov[0].iov_len / 4;
748 rep.th.seq = htonl(seq);
749 rep.th.ack_seq = htonl(ack);
750 rep.th.ack = 1;
751 rep.th.window = htons(win);
752
cfb6eeb4 753#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4 754 if (key) {
ee684b6f 755 int offset = (tsecr) ? 3 : 0;
cfb6eeb4
YH
756
757 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
758 (TCPOPT_NOP << 16) |
759 (TCPOPT_MD5SIG << 8) |
760 TCPOLEN_MD5SIG);
761 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
762 rep.th.doff = arg.iov[0].iov_len/4;
763
49a72dfb 764 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
90b7e112
AL
765 key, ip_hdr(skb)->saddr,
766 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
767 }
768#endif
88ef4a5a 769 arg.flags = reply_flags;
eddc9ec5
ACM
770 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
771 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
772 arg.iov[0].iov_len, IPPROTO_TCP, 0);
773 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
774 if (oif)
775 arg.bound_dev_if = oif;
66b13d99 776 arg.tos = tos;
bdbbb852
ED
777 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
778 skb, &TCP_SKB_CB(skb)->header.h4.opt,
24a2d43d
ED
779 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
780 &arg, arg.iov[0].iov_len);
1da177e4 781
90bbcc60 782 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
1da177e4
LT
783}
784
785static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
786{
8feaf0c0 787 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 788 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 789
e62a123b
ED
790 tcp_v4_send_ack(sock_net(sk), skb,
791 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 792 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
ee684b6f 793 tcp_time_stamp + tcptw->tw_ts_offset,
9501f972
YH
794 tcptw->tw_ts_recent,
795 tw->tw_bound_dev_if,
88ef4a5a 796 tcp_twsk_md5_key(tcptw),
66b13d99
ED
797 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
798 tw->tw_tos
9501f972 799 );
1da177e4 800
8feaf0c0 801 inet_twsk_put(tw);
1da177e4
LT
802}
803
a00e7444 804static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
7174259e 805 struct request_sock *req)
1da177e4 806{
168a8f58
JC
807 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
808 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
809 */
e62a123b
ED
810 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
811 tcp_sk(sk)->snd_nxt;
812
813 tcp_v4_send_ack(sock_net(sk), skb, seq,
ed53d0ab 814 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
ee684b6f 815 tcp_time_stamp,
9501f972
YH
816 req->ts_recent,
817 0,
a915da9b
ED
818 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
819 AF_INET),
66b13d99
ED
820 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
821 ip_hdr(skb)->tos);
1da177e4
LT
822}
823
1da177e4 824/*
9bf1d83e 825 * Send a SYN-ACK after having received a SYN.
60236fdd 826 * This still operates on a request_sock only, not on a big
1da177e4
LT
827 * socket.
828 */
0f935dbe 829static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
d6274bd8 830 struct flowi *fl,
72659ecc 831 struct request_sock *req,
ca6fb065 832 struct tcp_fastopen_cookie *foc,
b3d05147 833 enum tcp_synack_type synack_type)
1da177e4 834{
2e6599cb 835 const struct inet_request_sock *ireq = inet_rsk(req);
6bd023f3 836 struct flowi4 fl4;
1da177e4 837 int err = -1;
d41db5af 838 struct sk_buff *skb;
1da177e4
LT
839
840 /* First, grab a route. */
ba3f7f04 841 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
fd80eb94 842 return -1;
1da177e4 843
b3d05147 844 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
1da177e4
LT
845
846 if (skb) {
634fb979 847 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1da177e4 848
634fb979
ED
849 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
850 ireq->ir_rmt_addr,
2e6599cb 851 ireq->opt);
b9df3cb8 852 err = net_xmit_eval(err);
1da177e4
LT
853 }
854
1da177e4
LT
855 return err;
856}
857
858/*
60236fdd 859 * IPv4 request_sock destructor.
1da177e4 860 */
60236fdd 861static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 862{
a51482bd 863 kfree(inet_rsk(req)->opt);
1da177e4
LT
864}
865
cfb6eeb4
YH
866#ifdef CONFIG_TCP_MD5SIG
867/*
868 * RFC2385 MD5 checksumming requires a mapping of
869 * IP address->MD5 Key.
870 * We need to maintain these in the sk structure.
871 */
872
873/* Find the Key structure for an address. */
b83e3deb 874struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
a915da9b
ED
875 const union tcp_md5_addr *addr,
876 int family)
cfb6eeb4 877{
fd3a154a 878 const struct tcp_sock *tp = tcp_sk(sk);
a915da9b 879 struct tcp_md5sig_key *key;
a915da9b 880 unsigned int size = sizeof(struct in_addr);
fd3a154a 881 const struct tcp_md5sig_info *md5sig;
cfb6eeb4 882
a8afca03
ED
883 /* caller either holds rcu_read_lock() or socket lock */
884 md5sig = rcu_dereference_check(tp->md5sig_info,
1e1d04e6 885 lockdep_sock_is_held(sk));
a8afca03 886 if (!md5sig)
cfb6eeb4 887 return NULL;
a915da9b
ED
888#if IS_ENABLED(CONFIG_IPV6)
889 if (family == AF_INET6)
890 size = sizeof(struct in6_addr);
891#endif
b67bfe0d 892 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
a915da9b
ED
893 if (key->family != family)
894 continue;
895 if (!memcmp(&key->addr, addr, size))
896 return key;
cfb6eeb4
YH
897 }
898 return NULL;
899}
a915da9b 900EXPORT_SYMBOL(tcp_md5_do_lookup);
cfb6eeb4 901
b83e3deb 902struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
fd3a154a 903 const struct sock *addr_sk)
cfb6eeb4 904{
b52e6921 905 const union tcp_md5_addr *addr;
a915da9b 906
b52e6921 907 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
a915da9b 908 return tcp_md5_do_lookup(sk, addr, AF_INET);
cfb6eeb4 909}
cfb6eeb4
YH
910EXPORT_SYMBOL(tcp_v4_md5_lookup);
911
cfb6eeb4 912/* This can be called on a newly created socket, from other files */
a915da9b
ED
913int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
914 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
cfb6eeb4
YH
915{
916 /* Add Key to the list */
b0a713e9 917 struct tcp_md5sig_key *key;
cfb6eeb4 918 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 919 struct tcp_md5sig_info *md5sig;
cfb6eeb4 920
c0353c7b 921 key = tcp_md5_do_lookup(sk, addr, family);
cfb6eeb4
YH
922 if (key) {
923 /* Pre-existing entry - just update that one. */
a915da9b 924 memcpy(key->key, newkey, newkeylen);
b0a713e9 925 key->keylen = newkeylen;
a915da9b
ED
926 return 0;
927 }
260fcbeb 928
a8afca03 929 md5sig = rcu_dereference_protected(tp->md5sig_info,
1e1d04e6 930 lockdep_sock_is_held(sk));
a915da9b
ED
931 if (!md5sig) {
932 md5sig = kmalloc(sizeof(*md5sig), gfp);
933 if (!md5sig)
cfb6eeb4 934 return -ENOMEM;
cfb6eeb4 935
a915da9b
ED
936 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
937 INIT_HLIST_HEAD(&md5sig->head);
a8afca03 938 rcu_assign_pointer(tp->md5sig_info, md5sig);
a915da9b 939 }
cfb6eeb4 940
5f3d9cb2 941 key = sock_kmalloc(sk, sizeof(*key), gfp);
a915da9b
ED
942 if (!key)
943 return -ENOMEM;
71cea17e 944 if (!tcp_alloc_md5sig_pool()) {
5f3d9cb2 945 sock_kfree_s(sk, key, sizeof(*key));
a915da9b 946 return -ENOMEM;
cfb6eeb4 947 }
a915da9b
ED
948
949 memcpy(key->key, newkey, newkeylen);
950 key->keylen = newkeylen;
951 key->family = family;
952 memcpy(&key->addr, addr,
953 (family == AF_INET6) ? sizeof(struct in6_addr) :
954 sizeof(struct in_addr));
955 hlist_add_head_rcu(&key->node, &md5sig->head);
cfb6eeb4
YH
956 return 0;
957}
a915da9b 958EXPORT_SYMBOL(tcp_md5_do_add);
cfb6eeb4 959
a915da9b 960int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
cfb6eeb4 961{
a915da9b
ED
962 struct tcp_md5sig_key *key;
963
c0353c7b 964 key = tcp_md5_do_lookup(sk, addr, family);
a915da9b
ED
965 if (!key)
966 return -ENOENT;
967 hlist_del_rcu(&key->node);
5f3d9cb2 968 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 969 kfree_rcu(key, rcu);
a915da9b 970 return 0;
cfb6eeb4 971}
a915da9b 972EXPORT_SYMBOL(tcp_md5_do_del);
cfb6eeb4 973
e0683e70 974static void tcp_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
975{
976 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 977 struct tcp_md5sig_key *key;
b67bfe0d 978 struct hlist_node *n;
a8afca03 979 struct tcp_md5sig_info *md5sig;
cfb6eeb4 980
a8afca03
ED
981 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
982
b67bfe0d 983 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
a915da9b 984 hlist_del_rcu(&key->node);
5f3d9cb2 985 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 986 kfree_rcu(key, rcu);
cfb6eeb4
YH
987 }
988}
989
7174259e
ACM
990static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
991 int optlen)
cfb6eeb4
YH
992{
993 struct tcp_md5sig cmd;
994 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
cfb6eeb4
YH
995
996 if (optlen < sizeof(cmd))
997 return -EINVAL;
998
7174259e 999 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
1000 return -EFAULT;
1001
1002 if (sin->sin_family != AF_INET)
1003 return -EINVAL;
1004
64a124ed 1005 if (!cmd.tcpm_keylen)
a915da9b
ED
1006 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1007 AF_INET);
cfb6eeb4
YH
1008
1009 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1010 return -EINVAL;
1011
a915da9b
ED
1012 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1013 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1014 GFP_KERNEL);
cfb6eeb4
YH
1015}
1016
49a72dfb
AL
1017static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1018 __be32 daddr, __be32 saddr, int nbytes)
cfb6eeb4 1019{
cfb6eeb4 1020 struct tcp4_pseudohdr *bp;
49a72dfb 1021 struct scatterlist sg;
cfb6eeb4
YH
1022
1023 bp = &hp->md5_blk.ip4;
cfb6eeb4
YH
1024
1025 /*
49a72dfb 1026 * 1. the TCP pseudo-header (in the order: source IP address,
cfb6eeb4
YH
1027 * destination IP address, zero-padded protocol number, and
1028 * segment length)
1029 */
1030 bp->saddr = saddr;
1031 bp->daddr = daddr;
1032 bp->pad = 0;
076fb722 1033 bp->protocol = IPPROTO_TCP;
49a72dfb 1034 bp->len = cpu_to_be16(nbytes);
c7da57a1 1035
49a72dfb 1036 sg_init_one(&sg, bp, sizeof(*bp));
cf80e0e4
HX
1037 ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp));
1038 return crypto_ahash_update(hp->md5_req);
49a72dfb
AL
1039}
1040
a915da9b 1041static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 1042 __be32 daddr, __be32 saddr, const struct tcphdr *th)
49a72dfb
AL
1043{
1044 struct tcp_md5sig_pool *hp;
cf80e0e4 1045 struct ahash_request *req;
49a72dfb
AL
1046
1047 hp = tcp_get_md5sig_pool();
1048 if (!hp)
1049 goto clear_hash_noput;
cf80e0e4 1050 req = hp->md5_req;
49a72dfb 1051
cf80e0e4 1052 if (crypto_ahash_init(req))
49a72dfb
AL
1053 goto clear_hash;
1054 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1055 goto clear_hash;
1056 if (tcp_md5_hash_header(hp, th))
1057 goto clear_hash;
1058 if (tcp_md5_hash_key(hp, key))
1059 goto clear_hash;
cf80e0e4
HX
1060 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1061 if (crypto_ahash_final(req))
cfb6eeb4
YH
1062 goto clear_hash;
1063
cfb6eeb4 1064 tcp_put_md5sig_pool();
cfb6eeb4 1065 return 0;
49a72dfb 1066
cfb6eeb4
YH
1067clear_hash:
1068 tcp_put_md5sig_pool();
1069clear_hash_noput:
1070 memset(md5_hash, 0, 16);
49a72dfb 1071 return 1;
cfb6eeb4
YH
1072}
1073
39f8e58e
ED
1074int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1075 const struct sock *sk,
318cf7aa 1076 const struct sk_buff *skb)
cfb6eeb4 1077{
49a72dfb 1078 struct tcp_md5sig_pool *hp;
cf80e0e4 1079 struct ahash_request *req;
318cf7aa 1080 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
1081 __be32 saddr, daddr;
1082
39f8e58e
ED
1083 if (sk) { /* valid for establish/request sockets */
1084 saddr = sk->sk_rcv_saddr;
1085 daddr = sk->sk_daddr;
cfb6eeb4 1086 } else {
49a72dfb
AL
1087 const struct iphdr *iph = ip_hdr(skb);
1088 saddr = iph->saddr;
1089 daddr = iph->daddr;
cfb6eeb4 1090 }
49a72dfb
AL
1091
1092 hp = tcp_get_md5sig_pool();
1093 if (!hp)
1094 goto clear_hash_noput;
cf80e0e4 1095 req = hp->md5_req;
49a72dfb 1096
cf80e0e4 1097 if (crypto_ahash_init(req))
49a72dfb
AL
1098 goto clear_hash;
1099
1100 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1101 goto clear_hash;
1102 if (tcp_md5_hash_header(hp, th))
1103 goto clear_hash;
1104 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1105 goto clear_hash;
1106 if (tcp_md5_hash_key(hp, key))
1107 goto clear_hash;
cf80e0e4
HX
1108 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1109 if (crypto_ahash_final(req))
49a72dfb
AL
1110 goto clear_hash;
1111
1112 tcp_put_md5sig_pool();
1113 return 0;
1114
1115clear_hash:
1116 tcp_put_md5sig_pool();
1117clear_hash_noput:
1118 memset(md5_hash, 0, 16);
1119 return 1;
cfb6eeb4 1120}
49a72dfb 1121EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
cfb6eeb4 1122
ba8e275a
ED
1123#endif
1124
ff74e23f 1125/* Called with rcu_read_lock() */
ba8e275a 1126static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
ff74e23f 1127 const struct sk_buff *skb)
cfb6eeb4 1128{
ba8e275a 1129#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
1130 /*
1131 * This gets called for each TCP segment that arrives
1132 * so we want to be efficient.
1133 * We have 3 drop cases:
1134 * o No MD5 hash and one expected.
1135 * o MD5 hash and we're not expecting one.
1136 * o MD5 hash and its wrong.
1137 */
cf533ea5 1138 const __u8 *hash_location = NULL;
cfb6eeb4 1139 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1140 const struct iphdr *iph = ip_hdr(skb);
cf533ea5 1141 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1142 int genhash;
cfb6eeb4
YH
1143 unsigned char newhash[16];
1144
a915da9b
ED
1145 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1146 AF_INET);
7d5d5525 1147 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1148
cfb6eeb4
YH
1149 /* We've parsed the options - do we have a hash? */
1150 if (!hash_expected && !hash_location)
a2a385d6 1151 return false;
cfb6eeb4
YH
1152
1153 if (hash_expected && !hash_location) {
02a1d6e7 1154 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
a2a385d6 1155 return true;
cfb6eeb4
YH
1156 }
1157
1158 if (!hash_expected && hash_location) {
02a1d6e7 1159 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
a2a385d6 1160 return true;
cfb6eeb4
YH
1161 }
1162
1163 /* Okay, so this is hash_expected and hash_location -
1164 * so we need to calculate the checksum.
1165 */
49a72dfb
AL
1166 genhash = tcp_v4_md5_hash_skb(newhash,
1167 hash_expected,
39f8e58e 1168 NULL, skb);
cfb6eeb4
YH
1169
1170 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
e87cc472
JP
1171 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1172 &iph->saddr, ntohs(th->source),
1173 &iph->daddr, ntohs(th->dest),
1174 genhash ? " tcp_v4_calc_md5_hash failed"
1175 : "");
a2a385d6 1176 return true;
cfb6eeb4 1177 }
a2a385d6 1178 return false;
cfb6eeb4 1179#endif
ba8e275a
ED
1180 return false;
1181}
cfb6eeb4 1182
b40cf18e
ED
1183static void tcp_v4_init_req(struct request_sock *req,
1184 const struct sock *sk_listener,
16bea70a
OP
1185 struct sk_buff *skb)
1186{
1187 struct inet_request_sock *ireq = inet_rsk(req);
1188
08d2cc3b
ED
1189 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1190 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1191 ireq->no_srccheck = inet_sk(sk_listener)->transparent;
16bea70a
OP
1192 ireq->opt = tcp_v4_save_options(skb);
1193}
1194
f964629e
ED
1195static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1196 struct flowi *fl,
d94e0417
OP
1197 const struct request_sock *req,
1198 bool *strict)
1199{
1200 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1201
1202 if (strict) {
1203 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1204 *strict = true;
1205 else
1206 *strict = false;
1207 }
1208
1209 return dst;
1210}
1211
72a3effa 1212struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1213 .family = PF_INET,
2e6599cb 1214 .obj_size = sizeof(struct tcp_request_sock),
5db92c99 1215 .rtx_syn_ack = tcp_rtx_synack,
60236fdd
ACM
1216 .send_ack = tcp_v4_reqsk_send_ack,
1217 .destructor = tcp_v4_reqsk_destructor,
1da177e4 1218 .send_reset = tcp_v4_send_reset,
688d1945 1219 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
1220};
1221
b2e4b3de 1222static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
2aec4a29 1223 .mss_clamp = TCP_MSS_DEFAULT,
16bea70a 1224#ifdef CONFIG_TCP_MD5SIG
fd3a154a 1225 .req_md5_lookup = tcp_v4_md5_lookup,
e3afe7b7 1226 .calc_md5_hash = tcp_v4_md5_hash_skb,
b6332e6c 1227#endif
16bea70a 1228 .init_req = tcp_v4_init_req,
fb7b37a7
OP
1229#ifdef CONFIG_SYN_COOKIES
1230 .cookie_init_seq = cookie_v4_init_sequence,
1231#endif
d94e0417 1232 .route_req = tcp_v4_route_req,
936b8bdb 1233 .init_seq = tcp_v4_init_sequence,
d6274bd8 1234 .send_synack = tcp_v4_send_synack,
16bea70a 1235};
cfb6eeb4 1236
1da177e4
LT
1237int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1238{
1da177e4 1239 /* Never answer to SYNs send to broadcast or multicast */
511c3f92 1240 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1241 goto drop;
1242
1fb6f159
OP
1243 return tcp_conn_request(&tcp_request_sock_ops,
1244 &tcp_request_sock_ipv4_ops, sk, skb);
1da177e4 1245
1da177e4 1246drop:
9caad864 1247 tcp_listendrop(sk);
1da177e4
LT
1248 return 0;
1249}
4bc2f18b 1250EXPORT_SYMBOL(tcp_v4_conn_request);
1da177e4
LT
1251
1252
1253/*
1254 * The three way handshake has completed - we got a valid synack -
1255 * now create the new socket.
1256 */
0c27171e 1257struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
60236fdd 1258 struct request_sock *req,
5e0724d0
ED
1259 struct dst_entry *dst,
1260 struct request_sock *req_unhash,
1261 bool *own_req)
1da177e4 1262{
2e6599cb 1263 struct inet_request_sock *ireq;
1da177e4
LT
1264 struct inet_sock *newinet;
1265 struct tcp_sock *newtp;
1266 struct sock *newsk;
cfb6eeb4
YH
1267#ifdef CONFIG_TCP_MD5SIG
1268 struct tcp_md5sig_key *key;
1269#endif
f6d8bd05 1270 struct ip_options_rcu *inet_opt;
1da177e4
LT
1271
1272 if (sk_acceptq_is_full(sk))
1273 goto exit_overflow;
1274
1da177e4
LT
1275 newsk = tcp_create_openreq_child(sk, req, skb);
1276 if (!newsk)
093d2823 1277 goto exit_nonewsk;
1da177e4 1278
bcd76111 1279 newsk->sk_gso_type = SKB_GSO_TCPV4;
fae6ef87 1280 inet_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1281
1282 newtp = tcp_sk(newsk);
1283 newinet = inet_sk(newsk);
2e6599cb 1284 ireq = inet_rsk(req);
d1e559d0
ED
1285 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1286 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
6dd9a14e 1287 newsk->sk_bound_dev_if = ireq->ir_iif;
634fb979 1288 newinet->inet_saddr = ireq->ir_loc_addr;
f6d8bd05
ED
1289 inet_opt = ireq->opt;
1290 rcu_assign_pointer(newinet->inet_opt, inet_opt);
2e6599cb 1291 ireq->opt = NULL;
463c84b9 1292 newinet->mc_index = inet_iif(skb);
eddc9ec5 1293 newinet->mc_ttl = ip_hdr(skb)->ttl;
4c507d28 1294 newinet->rcv_tos = ip_hdr(skb)->tos;
d83d8461 1295 inet_csk(newsk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
1296 if (inet_opt)
1297 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
c720c7e8 1298 newinet->inet_id = newtp->write_seq ^ jiffies;
1da177e4 1299
dfd25fff
ED
1300 if (!dst) {
1301 dst = inet_csk_route_child_sock(sk, newsk, req);
1302 if (!dst)
1303 goto put_and_exit;
1304 } else {
1305 /* syncookie case : see end of cookie_v4_check() */
1306 }
0e734419
DM
1307 sk_setup_caps(newsk, dst);
1308
81164413
DB
1309 tcp_ca_openreq_child(newsk, dst);
1310
1da177e4 1311 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1312 newtp->advmss = dst_metric_advmss(dst);
f5fff5dc
TQ
1313 if (tcp_sk(sk)->rx_opt.user_mss &&
1314 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1315 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1316
1da177e4
LT
1317 tcp_initialize_rcv_mss(newsk);
1318
cfb6eeb4
YH
1319#ifdef CONFIG_TCP_MD5SIG
1320 /* Copy over the MD5 key from the original socket */
a915da9b
ED
1321 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1322 AF_INET);
00db4124 1323 if (key) {
cfb6eeb4
YH
1324 /*
1325 * We're using one, so create a matching key
1326 * on the newsk structure. If we fail to get
1327 * memory, then we end up not copying the key
1328 * across. Shucks.
1329 */
a915da9b
ED
1330 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1331 AF_INET, key->key, key->keylen, GFP_ATOMIC);
a465419b 1332 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1333 }
1334#endif
1335
0e734419
DM
1336 if (__inet_inherit_port(sk, newsk) < 0)
1337 goto put_and_exit;
5e0724d0 1338 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
805c4bc0 1339 if (*own_req)
49a496c9 1340 tcp_move_syn(newtp, req);
1da177e4
LT
1341
1342 return newsk;
1343
1344exit_overflow:
02a1d6e7 1345 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823
BS
1346exit_nonewsk:
1347 dst_release(dst);
1da177e4 1348exit:
9caad864 1349 tcp_listendrop(sk);
1da177e4 1350 return NULL;
0e734419 1351put_and_exit:
e337e24d
CP
1352 inet_csk_prepare_forced_close(newsk);
1353 tcp_done(newsk);
0e734419 1354 goto exit;
1da177e4 1355}
4bc2f18b 1356EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1da177e4 1357
079096f1 1358static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1da177e4 1359{
079096f1 1360#ifdef CONFIG_SYN_COOKIES
52452c54 1361 const struct tcphdr *th = tcp_hdr(skb);
1da177e4 1362
af9b4738 1363 if (!th->syn)
461b74c3 1364 sk = cookie_v4_check(sk, skb);
1da177e4
LT
1365#endif
1366 return sk;
1367}
1368
1da177e4 1369/* The socket must have it's spinlock held when we get
e994b2f0 1370 * here, unless it is a TCP_LISTEN socket.
1da177e4
LT
1371 *
1372 * We have a potential double-lock case here, so even when
1373 * doing backlog processing we use the BH locking scheme.
1374 * This is because we cannot sleep with the original spinlock
1375 * held.
1376 */
1377int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1378{
cfb6eeb4 1379 struct sock *rsk;
cfb6eeb4 1380
1da177e4 1381 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
404e0a8b
ED
1382 struct dst_entry *dst = sk->sk_rx_dst;
1383
bdeab991 1384 sock_rps_save_rxhash(sk, skb);
3d97379a 1385 sk_mark_napi_id(sk, skb);
404e0a8b 1386 if (dst) {
505fbcf0 1387 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
51456b29 1388 !dst->ops->check(dst, 0)) {
92101b3b
DM
1389 dst_release(dst);
1390 sk->sk_rx_dst = NULL;
1391 }
1392 }
c995ae22 1393 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1da177e4
LT
1394 return 0;
1395 }
1396
12e25e10 1397 if (tcp_checksum_complete(skb))
1da177e4
LT
1398 goto csum_err;
1399
1400 if (sk->sk_state == TCP_LISTEN) {
079096f1
ED
1401 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1402
1da177e4
LT
1403 if (!nsk)
1404 goto discard;
1da177e4 1405 if (nsk != sk) {
bdeab991 1406 sock_rps_save_rxhash(nsk, skb);
38cb5245 1407 sk_mark_napi_id(nsk, skb);
cfb6eeb4
YH
1408 if (tcp_child_process(sk, nsk, skb)) {
1409 rsk = nsk;
1da177e4 1410 goto reset;
cfb6eeb4 1411 }
1da177e4
LT
1412 return 0;
1413 }
ca55158c 1414 } else
bdeab991 1415 sock_rps_save_rxhash(sk, skb);
ca55158c 1416
72ab4a86 1417 if (tcp_rcv_state_process(sk, skb)) {
cfb6eeb4 1418 rsk = sk;
1da177e4 1419 goto reset;
cfb6eeb4 1420 }
1da177e4
LT
1421 return 0;
1422
1423reset:
cfb6eeb4 1424 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1425discard:
1426 kfree_skb(skb);
1427 /* Be careful here. If this function gets more complicated and
1428 * gcc suffers from register pressure on the x86, sk (in %ebx)
1429 * might be destroyed here. This current version compiles correctly,
1430 * but you have been warned.
1431 */
1432 return 0;
1433
1434csum_err:
90bbcc60
ED
1435 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1436 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1437 goto discard;
1438}
4bc2f18b 1439EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4 1440
160eb5a6 1441void tcp_v4_early_demux(struct sk_buff *skb)
41063e9d 1442{
41063e9d
DM
1443 const struct iphdr *iph;
1444 const struct tcphdr *th;
1445 struct sock *sk;
41063e9d 1446
41063e9d 1447 if (skb->pkt_type != PACKET_HOST)
160eb5a6 1448 return;
41063e9d 1449
45f00f99 1450 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
160eb5a6 1451 return;
41063e9d
DM
1452
1453 iph = ip_hdr(skb);
45f00f99 1454 th = tcp_hdr(skb);
41063e9d
DM
1455
1456 if (th->doff < sizeof(struct tcphdr) / 4)
160eb5a6 1457 return;
41063e9d 1458
45f00f99 1459 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
41063e9d 1460 iph->saddr, th->source,
7011d085 1461 iph->daddr, ntohs(th->dest),
9cb429d6 1462 skb->skb_iif);
41063e9d
DM
1463 if (sk) {
1464 skb->sk = sk;
1465 skb->destructor = sock_edemux;
f7e4eb03 1466 if (sk_fullsock(sk)) {
d0c294c5 1467 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
505fbcf0 1468
41063e9d
DM
1469 if (dst)
1470 dst = dst_check(dst, 0);
92101b3b 1471 if (dst &&
505fbcf0 1472 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
92101b3b 1473 skb_dst_set_noref(skb, dst);
41063e9d
DM
1474 }
1475 }
41063e9d
DM
1476}
1477
b2fb4f54
ED
1478/* Packet is added to VJ-style prequeue for processing in process
1479 * context, if a reader task is waiting. Apparently, this exciting
1480 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1481 * failed somewhere. Latency? Burstiness? Well, at least now we will
1482 * see, why it failed. 8)8) --ANK
1483 *
1484 */
1485bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1486{
1487 struct tcp_sock *tp = tcp_sk(sk);
1488
1489 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1490 return false;
1491
1492 if (skb->len <= tcp_hdrlen(skb) &&
1493 skb_queue_len(&tp->ucopy.prequeue) == 0)
1494 return false;
1495
ca777eff
ED
1496 /* Before escaping RCU protected region, we need to take care of skb
1497 * dst. Prequeue is only enabled for established sockets.
1498 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1499 * Instead of doing full sk_rx_dst validity here, let's perform
1500 * an optimistic check.
1501 */
1502 if (likely(sk->sk_rx_dst))
1503 skb_dst_drop(skb);
1504 else
5037e9ef 1505 skb_dst_force_safe(skb);
ca777eff 1506
b2fb4f54
ED
1507 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1508 tp->ucopy.memory += skb->truesize;
0cef6a4c
ED
1509 if (skb_queue_len(&tp->ucopy.prequeue) >= 32 ||
1510 tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) {
b2fb4f54
ED
1511 struct sk_buff *skb1;
1512
1513 BUG_ON(sock_owned_by_user(sk));
0cef6a4c
ED
1514 __NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED,
1515 skb_queue_len(&tp->ucopy.prequeue));
b2fb4f54 1516
0cef6a4c 1517 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
b2fb4f54 1518 sk_backlog_rcv(sk, skb1);
b2fb4f54
ED
1519
1520 tp->ucopy.memory = 0;
1521 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1522 wake_up_interruptible_sync_poll(sk_sleep(sk),
1523 POLLIN | POLLRDNORM | POLLRDBAND);
1524 if (!inet_csk_ack_scheduled(sk))
1525 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1526 (3 * tcp_rto_min(sk)) / 4,
1527 TCP_RTO_MAX);
1528 }
1529 return true;
1530}
1531EXPORT_SYMBOL(tcp_prequeue);
1532
1da177e4
LT
1533/*
1534 * From tcp_input.c
1535 */
1536
1537int tcp_v4_rcv(struct sk_buff *skb)
1538{
3b24d854 1539 struct net *net = dev_net(skb->dev);
eddc9ec5 1540 const struct iphdr *iph;
cf533ea5 1541 const struct tcphdr *th;
3b24d854 1542 bool refcounted;
1da177e4
LT
1543 struct sock *sk;
1544 int ret;
1545
1546 if (skb->pkt_type != PACKET_HOST)
1547 goto discard_it;
1548
1549 /* Count it even if it's bad */
90bbcc60 1550 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1da177e4
LT
1551
1552 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1553 goto discard_it;
1554
aa8223c7 1555 th = tcp_hdr(skb);
1da177e4
LT
1556
1557 if (th->doff < sizeof(struct tcphdr) / 4)
1558 goto bad_packet;
1559 if (!pskb_may_pull(skb, th->doff * 4))
1560 goto discard_it;
1561
1562 /* An explanation is required here, I think.
1563 * Packet length and doff are validated by header prediction,
caa20d9a 1564 * provided case of th->doff==0 is eliminated.
1da177e4 1565 * So, we defer the checks. */
ed70fcfc
TH
1566
1567 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
6a5dc9e5 1568 goto csum_error;
1da177e4 1569
aa8223c7 1570 th = tcp_hdr(skb);
eddc9ec5 1571 iph = ip_hdr(skb);
971f10ec
ED
1572 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1573 * barrier() makes sure compiler wont play fool^Waliasing games.
1574 */
1575 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1576 sizeof(struct inet_skb_parm));
1577 barrier();
1578
1da177e4
LT
1579 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1580 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1581 skb->len - th->doff * 4);
1582 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
e11ecddf 1583 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
04317daf 1584 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
b82d1bb4 1585 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1da177e4
LT
1586 TCP_SKB_CB(skb)->sacked = 0;
1587
4bdc3d66 1588lookup:
a583636a 1589 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
3b24d854 1590 th->dest, &refcounted);
1da177e4
LT
1591 if (!sk)
1592 goto no_tcp_socket;
1593
bb134d5d
ED
1594process:
1595 if (sk->sk_state == TCP_TIME_WAIT)
1596 goto do_time_wait;
1597
079096f1
ED
1598 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1599 struct request_sock *req = inet_reqsk(sk);
7716682c 1600 struct sock *nsk;
079096f1
ED
1601
1602 sk = req->rsk_listener;
72923555
ED
1603 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1604 reqsk_put(req);
1605 goto discard_it;
1606 }
7716682c 1607 if (unlikely(sk->sk_state != TCP_LISTEN)) {
f03f2e15 1608 inet_csk_reqsk_queue_drop_and_put(sk, req);
4bdc3d66
ED
1609 goto lookup;
1610 }
3b24d854
ED
1611 /* We own a reference on the listener, increase it again
1612 * as we might lose it too soon.
1613 */
7716682c 1614 sock_hold(sk);
3b24d854 1615 refcounted = true;
7716682c 1616 nsk = tcp_check_req(sk, skb, req, false);
079096f1
ED
1617 if (!nsk) {
1618 reqsk_put(req);
7716682c 1619 goto discard_and_relse;
079096f1
ED
1620 }
1621 if (nsk == sk) {
079096f1
ED
1622 reqsk_put(req);
1623 } else if (tcp_child_process(sk, nsk, skb)) {
1624 tcp_v4_send_reset(nsk, skb);
7716682c 1625 goto discard_and_relse;
079096f1 1626 } else {
7716682c 1627 sock_put(sk);
079096f1
ED
1628 return 0;
1629 }
1630 }
6cce09f8 1631 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
02a1d6e7 1632 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
d218d111 1633 goto discard_and_relse;
6cce09f8 1634 }
d218d111 1635
1da177e4
LT
1636 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1637 goto discard_and_relse;
9ea88a15 1638
9ea88a15
DP
1639 if (tcp_v4_inbound_md5_hash(sk, skb))
1640 goto discard_and_relse;
9ea88a15 1641
b59c2701 1642 nf_reset(skb);
1da177e4 1643
fda9ef5d 1644 if (sk_filter(sk, skb))
1da177e4
LT
1645 goto discard_and_relse;
1646
1647 skb->dev = NULL;
1648
e994b2f0
ED
1649 if (sk->sk_state == TCP_LISTEN) {
1650 ret = tcp_v4_do_rcv(sk, skb);
1651 goto put_and_return;
1652 }
1653
1654 sk_incoming_cpu_update(sk);
1655
c6366184 1656 bh_lock_sock_nested(sk);
a44d6eac 1657 tcp_segs_in(tcp_sk(sk), skb);
1da177e4
LT
1658 ret = 0;
1659 if (!sock_owned_by_user(sk)) {
7bced397 1660 if (!tcp_prequeue(sk, skb))
1da177e4 1661 ret = tcp_v4_do_rcv(sk, skb);
da882c1f
ED
1662 } else if (unlikely(sk_add_backlog(sk, skb,
1663 sk->sk_rcvbuf + sk->sk_sndbuf))) {
6b03a53a 1664 bh_unlock_sock(sk);
02a1d6e7 1665 __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1666 goto discard_and_relse;
1667 }
1da177e4
LT
1668 bh_unlock_sock(sk);
1669
e994b2f0 1670put_and_return:
3b24d854
ED
1671 if (refcounted)
1672 sock_put(sk);
1da177e4
LT
1673
1674 return ret;
1675
1676no_tcp_socket:
1677 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1678 goto discard_it;
1679
12e25e10 1680 if (tcp_checksum_complete(skb)) {
6a5dc9e5 1681csum_error:
90bbcc60 1682 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1da177e4 1683bad_packet:
90bbcc60 1684 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1da177e4 1685 } else {
cfb6eeb4 1686 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1687 }
1688
1689discard_it:
1690 /* Discard frame. */
1691 kfree_skb(skb);
e905a9ed 1692 return 0;
1da177e4
LT
1693
1694discard_and_relse:
532182cd 1695 sk_drops_add(sk, skb);
3b24d854
ED
1696 if (refcounted)
1697 sock_put(sk);
1da177e4
LT
1698 goto discard_it;
1699
1700do_time_wait:
1701 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1702 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1703 goto discard_it;
1704 }
1705
6a5dc9e5
ED
1706 if (tcp_checksum_complete(skb)) {
1707 inet_twsk_put(inet_twsk(sk));
1708 goto csum_error;
1da177e4 1709 }
9469c7b4 1710 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1711 case TCP_TW_SYN: {
c346dca1 1712 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
a583636a
CG
1713 &tcp_hashinfo, skb,
1714 __tcp_hdrlen(th),
da5e3630 1715 iph->saddr, th->source,
eddc9ec5 1716 iph->daddr, th->dest,
463c84b9 1717 inet_iif(skb));
1da177e4 1718 if (sk2) {
dbe7faa4 1719 inet_twsk_deschedule_put(inet_twsk(sk));
1da177e4 1720 sk = sk2;
3b24d854 1721 refcounted = false;
1da177e4
LT
1722 goto process;
1723 }
1724 /* Fall through to ACK */
1725 }
1726 case TCP_TW_ACK:
1727 tcp_v4_timewait_ack(sk, skb);
1728 break;
1729 case TCP_TW_RST:
271c3b9b
FW
1730 tcp_v4_send_reset(sk, skb);
1731 inet_twsk_deschedule_put(inet_twsk(sk));
1732 goto discard_it;
1da177e4
LT
1733 case TCP_TW_SUCCESS:;
1734 }
1735 goto discard_it;
1736}
1737
ccb7c410
DM
1738static struct timewait_sock_ops tcp_timewait_sock_ops = {
1739 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1740 .twsk_unique = tcp_twsk_unique,
1741 .twsk_destructor= tcp_twsk_destructor,
ccb7c410 1742};
1da177e4 1743
63d02d15 1744void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
5d299f3d
ED
1745{
1746 struct dst_entry *dst = skb_dst(skb);
1747
5037e9ef 1748 if (dst && dst_hold_safe(dst)) {
ca777eff
ED
1749 sk->sk_rx_dst = dst;
1750 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1751 }
5d299f3d 1752}
63d02d15 1753EXPORT_SYMBOL(inet_sk_rx_dst_set);
5d299f3d 1754
3b401a81 1755const struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1756 .queue_xmit = ip_queue_xmit,
1757 .send_check = tcp_v4_send_check,
1758 .rebuild_header = inet_sk_rebuild_header,
5d299f3d 1759 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1760 .conn_request = tcp_v4_conn_request,
1761 .syn_recv_sock = tcp_v4_syn_recv_sock,
543d9cfe
ACM
1762 .net_header_len = sizeof(struct iphdr),
1763 .setsockopt = ip_setsockopt,
1764 .getsockopt = ip_getsockopt,
1765 .addr2sockaddr = inet_csk_addr2sockaddr,
1766 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1767 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1768#ifdef CONFIG_COMPAT
543d9cfe
ACM
1769 .compat_setsockopt = compat_ip_setsockopt,
1770 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1771#endif
4fab9071 1772 .mtu_reduced = tcp_v4_mtu_reduced,
1da177e4 1773};
4bc2f18b 1774EXPORT_SYMBOL(ipv4_specific);
1da177e4 1775
cfb6eeb4 1776#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1777static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4 1778 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1779 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1780 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1781};
b6332e6c 1782#endif
cfb6eeb4 1783
1da177e4
LT
1784/* NOTE: A lot of things set to zero explicitly by call to
1785 * sk_alloc() so need not be done here.
1786 */
1787static int tcp_v4_init_sock(struct sock *sk)
1788{
6687e988 1789 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1790
900f65d3 1791 tcp_init_sock(sk);
1da177e4 1792
8292a17a 1793 icsk->icsk_af_ops = &ipv4_specific;
900f65d3 1794
cfb6eeb4 1795#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1796 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
cfb6eeb4 1797#endif
1da177e4 1798
1da177e4
LT
1799 return 0;
1800}
1801
7d06b2e0 1802void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
1803{
1804 struct tcp_sock *tp = tcp_sk(sk);
1805
1806 tcp_clear_xmit_timers(sk);
1807
6687e988 1808 tcp_cleanup_congestion_control(sk);
317a76f9 1809
1da177e4 1810 /* Cleanup up the write buffer. */
fe067e8a 1811 tcp_write_queue_purge(sk);
1da177e4
LT
1812
1813 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1814 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1815
cfb6eeb4
YH
1816#ifdef CONFIG_TCP_MD5SIG
1817 /* Clean up the MD5 key list, if any */
1818 if (tp->md5sig_info) {
a915da9b 1819 tcp_clear_md5_list(sk);
a8afca03 1820 kfree_rcu(tp->md5sig_info, rcu);
cfb6eeb4
YH
1821 tp->md5sig_info = NULL;
1822 }
1823#endif
1a2449a8 1824
1da177e4
LT
1825 /* Clean prequeue, it must be empty really */
1826 __skb_queue_purge(&tp->ucopy.prequeue);
1827
1828 /* Clean up a referenced TCP bind bucket. */
463c84b9 1829 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1830 inet_put_port(sk);
1da177e4 1831
00db4124 1832 BUG_ON(tp->fastopen_rsk);
435cf559 1833
cf60af03
YC
1834 /* If socket is aborted during connect operation */
1835 tcp_free_fastopen_req(tp);
cd8ae852 1836 tcp_saved_syn_free(tp);
cf60af03 1837
180d8cd9 1838 sk_sockets_allocated_dec(sk);
3d596f7b 1839
baac50bb 1840 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3d596f7b 1841 sock_release_memcg(sk);
1da177e4 1842}
1da177e4
LT
1843EXPORT_SYMBOL(tcp_v4_destroy_sock);
1844
1845#ifdef CONFIG_PROC_FS
1846/* Proc filesystem TCP sock list dumping. */
1847
a8b690f9
TH
1848/*
1849 * Get next listener socket follow cur. If cur is NULL, get first socket
1850 * starting from bucket given in st->bucket; when st->bucket is zero the
1851 * very first socket in the hash table is returned.
1852 */
1da177e4
LT
1853static void *listening_get_next(struct seq_file *seq, void *cur)
1854{
5799de0b 1855 struct tcp_iter_state *st = seq->private;
a4146b1b 1856 struct net *net = seq_file_net(seq);
3b24d854
ED
1857 struct inet_listen_hashbucket *ilb;
1858 struct inet_connection_sock *icsk;
1859 struct sock *sk = cur;
1da177e4
LT
1860
1861 if (!sk) {
3b24d854 1862get_head:
a8b690f9 1863 ilb = &tcp_hashinfo.listening_hash[st->bucket];
5caea4ea 1864 spin_lock_bh(&ilb->lock);
3b24d854 1865 sk = sk_head(&ilb->head);
a8b690f9 1866 st->offset = 0;
1da177e4
LT
1867 goto get_sk;
1868 }
5caea4ea 1869 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1da177e4 1870 ++st->num;
a8b690f9 1871 ++st->offset;
1da177e4 1872
3b24d854 1873 sk = sk_next(sk);
1da177e4 1874get_sk:
3b24d854 1875 sk_for_each_from(sk) {
8475ef9f
PE
1876 if (!net_eq(sock_net(sk), net))
1877 continue;
3b24d854
ED
1878 if (sk->sk_family == st->family)
1879 return sk;
e905a9ed 1880 icsk = inet_csk(sk);
1da177e4 1881 }
5caea4ea 1882 spin_unlock_bh(&ilb->lock);
a8b690f9 1883 st->offset = 0;
3b24d854
ED
1884 if (++st->bucket < INET_LHTABLE_SIZE)
1885 goto get_head;
1886 return NULL;
1da177e4
LT
1887}
1888
1889static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1890{
a8b690f9
TH
1891 struct tcp_iter_state *st = seq->private;
1892 void *rc;
1893
1894 st->bucket = 0;
1895 st->offset = 0;
1896 rc = listening_get_next(seq, NULL);
1da177e4
LT
1897
1898 while (rc && *pos) {
1899 rc = listening_get_next(seq, rc);
1900 --*pos;
1901 }
1902 return rc;
1903}
1904
05dbc7b5 1905static inline bool empty_bucket(const struct tcp_iter_state *st)
6eac5604 1906{
05dbc7b5 1907 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
6eac5604
AK
1908}
1909
a8b690f9
TH
1910/*
1911 * Get first established socket starting from bucket given in st->bucket.
1912 * If st->bucket is zero, the very first socket in the hash is returned.
1913 */
1da177e4
LT
1914static void *established_get_first(struct seq_file *seq)
1915{
5799de0b 1916 struct tcp_iter_state *st = seq->private;
a4146b1b 1917 struct net *net = seq_file_net(seq);
1da177e4
LT
1918 void *rc = NULL;
1919
a8b690f9
TH
1920 st->offset = 0;
1921 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1da177e4 1922 struct sock *sk;
3ab5aee7 1923 struct hlist_nulls_node *node;
9db66bdc 1924 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 1925
6eac5604
AK
1926 /* Lockless fast path for the common case of empty buckets */
1927 if (empty_bucket(st))
1928 continue;
1929
9db66bdc 1930 spin_lock_bh(lock);
3ab5aee7 1931 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 1932 if (sk->sk_family != st->family ||
878628fb 1933 !net_eq(sock_net(sk), net)) {
1da177e4
LT
1934 continue;
1935 }
1936 rc = sk;
1937 goto out;
1938 }
9db66bdc 1939 spin_unlock_bh(lock);
1da177e4
LT
1940 }
1941out:
1942 return rc;
1943}
1944
1945static void *established_get_next(struct seq_file *seq, void *cur)
1946{
1947 struct sock *sk = cur;
3ab5aee7 1948 struct hlist_nulls_node *node;
5799de0b 1949 struct tcp_iter_state *st = seq->private;
a4146b1b 1950 struct net *net = seq_file_net(seq);
1da177e4
LT
1951
1952 ++st->num;
a8b690f9 1953 ++st->offset;
1da177e4 1954
05dbc7b5 1955 sk = sk_nulls_next(sk);
1da177e4 1956
3ab5aee7 1957 sk_nulls_for_each_from(sk, node) {
878628fb 1958 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
05dbc7b5 1959 return sk;
1da177e4
LT
1960 }
1961
05dbc7b5
ED
1962 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1963 ++st->bucket;
1964 return established_get_first(seq);
1da177e4
LT
1965}
1966
1967static void *established_get_idx(struct seq_file *seq, loff_t pos)
1968{
a8b690f9
TH
1969 struct tcp_iter_state *st = seq->private;
1970 void *rc;
1971
1972 st->bucket = 0;
1973 rc = established_get_first(seq);
1da177e4
LT
1974
1975 while (rc && pos) {
1976 rc = established_get_next(seq, rc);
1977 --pos;
7174259e 1978 }
1da177e4
LT
1979 return rc;
1980}
1981
1982static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1983{
1984 void *rc;
5799de0b 1985 struct tcp_iter_state *st = seq->private;
1da177e4 1986
1da177e4
LT
1987 st->state = TCP_SEQ_STATE_LISTENING;
1988 rc = listening_get_idx(seq, &pos);
1989
1990 if (!rc) {
1da177e4
LT
1991 st->state = TCP_SEQ_STATE_ESTABLISHED;
1992 rc = established_get_idx(seq, pos);
1993 }
1994
1995 return rc;
1996}
1997
a8b690f9
TH
1998static void *tcp_seek_last_pos(struct seq_file *seq)
1999{
2000 struct tcp_iter_state *st = seq->private;
2001 int offset = st->offset;
2002 int orig_num = st->num;
2003 void *rc = NULL;
2004
2005 switch (st->state) {
a8b690f9
TH
2006 case TCP_SEQ_STATE_LISTENING:
2007 if (st->bucket >= INET_LHTABLE_SIZE)
2008 break;
2009 st->state = TCP_SEQ_STATE_LISTENING;
2010 rc = listening_get_next(seq, NULL);
2011 while (offset-- && rc)
2012 rc = listening_get_next(seq, rc);
2013 if (rc)
2014 break;
2015 st->bucket = 0;
05dbc7b5 2016 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2017 /* Fallthrough */
2018 case TCP_SEQ_STATE_ESTABLISHED:
a8b690f9
TH
2019 if (st->bucket > tcp_hashinfo.ehash_mask)
2020 break;
2021 rc = established_get_first(seq);
2022 while (offset-- && rc)
2023 rc = established_get_next(seq, rc);
2024 }
2025
2026 st->num = orig_num;
2027
2028 return rc;
2029}
2030
1da177e4
LT
2031static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2032{
5799de0b 2033 struct tcp_iter_state *st = seq->private;
a8b690f9
TH
2034 void *rc;
2035
2036 if (*pos && *pos == st->last_pos) {
2037 rc = tcp_seek_last_pos(seq);
2038 if (rc)
2039 goto out;
2040 }
2041
1da177e4
LT
2042 st->state = TCP_SEQ_STATE_LISTENING;
2043 st->num = 0;
a8b690f9
TH
2044 st->bucket = 0;
2045 st->offset = 0;
2046 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2047
2048out:
2049 st->last_pos = *pos;
2050 return rc;
1da177e4
LT
2051}
2052
2053static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2054{
a8b690f9 2055 struct tcp_iter_state *st = seq->private;
1da177e4 2056 void *rc = NULL;
1da177e4
LT
2057
2058 if (v == SEQ_START_TOKEN) {
2059 rc = tcp_get_idx(seq, 0);
2060 goto out;
2061 }
1da177e4
LT
2062
2063 switch (st->state) {
1da177e4
LT
2064 case TCP_SEQ_STATE_LISTENING:
2065 rc = listening_get_next(seq, v);
2066 if (!rc) {
1da177e4 2067 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2068 st->bucket = 0;
2069 st->offset = 0;
1da177e4
LT
2070 rc = established_get_first(seq);
2071 }
2072 break;
2073 case TCP_SEQ_STATE_ESTABLISHED:
1da177e4
LT
2074 rc = established_get_next(seq, v);
2075 break;
2076 }
2077out:
2078 ++*pos;
a8b690f9 2079 st->last_pos = *pos;
1da177e4
LT
2080 return rc;
2081}
2082
2083static void tcp_seq_stop(struct seq_file *seq, void *v)
2084{
5799de0b 2085 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2086
2087 switch (st->state) {
1da177e4
LT
2088 case TCP_SEQ_STATE_LISTENING:
2089 if (v != SEQ_START_TOKEN)
5caea4ea 2090 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
1da177e4 2091 break;
1da177e4
LT
2092 case TCP_SEQ_STATE_ESTABLISHED:
2093 if (v)
9db66bdc 2094 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2095 break;
2096 }
2097}
2098
73cb88ec 2099int tcp_seq_open(struct inode *inode, struct file *file)
1da177e4 2100{
d9dda78b 2101 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
1da177e4 2102 struct tcp_iter_state *s;
52d6f3f1 2103 int err;
1da177e4 2104
52d6f3f1
DL
2105 err = seq_open_net(inode, file, &afinfo->seq_ops,
2106 sizeof(struct tcp_iter_state));
2107 if (err < 0)
2108 return err;
f40c8174 2109
52d6f3f1 2110 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2111 s->family = afinfo->family;
688d1945 2112 s->last_pos = 0;
f40c8174
DL
2113 return 0;
2114}
73cb88ec 2115EXPORT_SYMBOL(tcp_seq_open);
f40c8174 2116
6f8b13bc 2117int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2118{
2119 int rc = 0;
2120 struct proc_dir_entry *p;
2121
9427c4b3
DL
2122 afinfo->seq_ops.start = tcp_seq_start;
2123 afinfo->seq_ops.next = tcp_seq_next;
2124 afinfo->seq_ops.stop = tcp_seq_stop;
2125
84841c3c 2126 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
73cb88ec 2127 afinfo->seq_fops, afinfo);
84841c3c 2128 if (!p)
1da177e4
LT
2129 rc = -ENOMEM;
2130 return rc;
2131}
4bc2f18b 2132EXPORT_SYMBOL(tcp_proc_register);
1da177e4 2133
6f8b13bc 2134void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2135{
ece31ffd 2136 remove_proc_entry(afinfo->name, net->proc_net);
1da177e4 2137}
4bc2f18b 2138EXPORT_SYMBOL(tcp_proc_unregister);
1da177e4 2139
d4f06873 2140static void get_openreq4(const struct request_sock *req,
aa3a0c8c 2141 struct seq_file *f, int i)
1da177e4 2142{
2e6599cb 2143 const struct inet_request_sock *ireq = inet_rsk(req);
fa76ce73 2144 long delta = req->rsk_timer.expires - jiffies;
1da177e4 2145
5e659e4c 2146 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
652586df 2147 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
1da177e4 2148 i,
634fb979 2149 ireq->ir_loc_addr,
d4f06873 2150 ireq->ir_num,
634fb979
ED
2151 ireq->ir_rmt_addr,
2152 ntohs(ireq->ir_rmt_port),
1da177e4
LT
2153 TCP_SYN_RECV,
2154 0, 0, /* could print option size, but that is af dependent. */
2155 1, /* timers active (only the expire timer) */
a399a805 2156 jiffies_delta_to_clock_t(delta),
e6c022a4 2157 req->num_timeout,
aa3a0c8c
ED
2158 from_kuid_munged(seq_user_ns(f),
2159 sock_i_uid(req->rsk_listener)),
1da177e4
LT
2160 0, /* non standard timer */
2161 0, /* open_requests have no inode */
d4f06873 2162 0,
652586df 2163 req);
1da177e4
LT
2164}
2165
652586df 2166static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
1da177e4
LT
2167{
2168 int timer_active;
2169 unsigned long timer_expires;
cf533ea5 2170 const struct tcp_sock *tp = tcp_sk(sk);
cf4c6bf8 2171 const struct inet_connection_sock *icsk = inet_csk(sk);
cf533ea5 2172 const struct inet_sock *inet = inet_sk(sk);
0536fcc0 2173 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
c720c7e8
ED
2174 __be32 dest = inet->inet_daddr;
2175 __be32 src = inet->inet_rcv_saddr;
2176 __u16 destp = ntohs(inet->inet_dport);
2177 __u16 srcp = ntohs(inet->inet_sport);
49d09007 2178 int rx_queue;
00fd38d9 2179 int state;
1da177e4 2180
6ba8a3b1
ND
2181 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2182 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2183 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1da177e4 2184 timer_active = 1;
463c84b9
ACM
2185 timer_expires = icsk->icsk_timeout;
2186 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2187 timer_active = 4;
463c84b9 2188 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2189 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2190 timer_active = 2;
cf4c6bf8 2191 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2192 } else {
2193 timer_active = 0;
2194 timer_expires = jiffies;
2195 }
2196
00fd38d9
ED
2197 state = sk_state_load(sk);
2198 if (state == TCP_LISTEN)
49d09007
ED
2199 rx_queue = sk->sk_ack_backlog;
2200 else
00fd38d9
ED
2201 /* Because we don't lock the socket,
2202 * we might find a transient negative value.
49d09007
ED
2203 */
2204 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2205
5e659e4c 2206 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
652586df 2207 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
00fd38d9 2208 i, src, srcp, dest, destp, state,
47da8ee6 2209 tp->write_seq - tp->snd_una,
49d09007 2210 rx_queue,
1da177e4 2211 timer_active,
a399a805 2212 jiffies_delta_to_clock_t(timer_expires - jiffies),
463c84b9 2213 icsk->icsk_retransmits,
a7cb5a49 2214 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
6687e988 2215 icsk->icsk_probes_out,
cf4c6bf8
IJ
2216 sock_i_ino(sk),
2217 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2218 jiffies_to_clock_t(icsk->icsk_rto),
2219 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2220 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2221 tp->snd_cwnd,
00fd38d9
ED
2222 state == TCP_LISTEN ?
2223 fastopenq->max_qlen :
652586df 2224 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
1da177e4
LT
2225}
2226
cf533ea5 2227static void get_timewait4_sock(const struct inet_timewait_sock *tw,
652586df 2228 struct seq_file *f, int i)
1da177e4 2229{
789f558c 2230 long delta = tw->tw_timer.expires - jiffies;
23f33c2d 2231 __be32 dest, src;
1da177e4 2232 __u16 destp, srcp;
1da177e4
LT
2233
2234 dest = tw->tw_daddr;
2235 src = tw->tw_rcv_saddr;
2236 destp = ntohs(tw->tw_dport);
2237 srcp = ntohs(tw->tw_sport);
2238
5e659e4c 2239 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
652586df 2240 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
1da177e4 2241 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
a399a805 2242 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
652586df 2243 atomic_read(&tw->tw_refcnt), tw);
1da177e4
LT
2244}
2245
2246#define TMPSZ 150
2247
2248static int tcp4_seq_show(struct seq_file *seq, void *v)
2249{
5799de0b 2250 struct tcp_iter_state *st;
05dbc7b5 2251 struct sock *sk = v;
1da177e4 2252
652586df 2253 seq_setwidth(seq, TMPSZ - 1);
1da177e4 2254 if (v == SEQ_START_TOKEN) {
652586df 2255 seq_puts(seq, " sl local_address rem_address st tx_queue "
1da177e4
LT
2256 "rx_queue tr tm->when retrnsmt uid timeout "
2257 "inode");
2258 goto out;
2259 }
2260 st = seq->private;
2261
079096f1
ED
2262 if (sk->sk_state == TCP_TIME_WAIT)
2263 get_timewait4_sock(v, seq, st->num);
2264 else if (sk->sk_state == TCP_NEW_SYN_RECV)
aa3a0c8c 2265 get_openreq4(v, seq, st->num);
079096f1
ED
2266 else
2267 get_tcp4_sock(v, seq, st->num);
1da177e4 2268out:
652586df 2269 seq_pad(seq, '\n');
1da177e4
LT
2270 return 0;
2271}
2272
73cb88ec
AV
2273static const struct file_operations tcp_afinfo_seq_fops = {
2274 .owner = THIS_MODULE,
2275 .open = tcp_seq_open,
2276 .read = seq_read,
2277 .llseek = seq_lseek,
2278 .release = seq_release_net
2279};
2280
1da177e4 2281static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2282 .name = "tcp",
2283 .family = AF_INET,
73cb88ec 2284 .seq_fops = &tcp_afinfo_seq_fops,
9427c4b3
DL
2285 .seq_ops = {
2286 .show = tcp4_seq_show,
2287 },
1da177e4
LT
2288};
2289
2c8c1e72 2290static int __net_init tcp4_proc_init_net(struct net *net)
757764f6
PE
2291{
2292 return tcp_proc_register(net, &tcp4_seq_afinfo);
2293}
2294
2c8c1e72 2295static void __net_exit tcp4_proc_exit_net(struct net *net)
757764f6
PE
2296{
2297 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2298}
2299
2300static struct pernet_operations tcp4_net_ops = {
2301 .init = tcp4_proc_init_net,
2302 .exit = tcp4_proc_exit_net,
2303};
2304
1da177e4
LT
2305int __init tcp4_proc_init(void)
2306{
757764f6 2307 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2308}
2309
2310void tcp4_proc_exit(void)
2311{
757764f6 2312 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2313}
2314#endif /* CONFIG_PROC_FS */
2315
2316struct proto tcp_prot = {
2317 .name = "TCP",
2318 .owner = THIS_MODULE,
2319 .close = tcp_close,
2320 .connect = tcp_v4_connect,
2321 .disconnect = tcp_disconnect,
463c84b9 2322 .accept = inet_csk_accept,
1da177e4
LT
2323 .ioctl = tcp_ioctl,
2324 .init = tcp_v4_init_sock,
2325 .destroy = tcp_v4_destroy_sock,
2326 .shutdown = tcp_shutdown,
2327 .setsockopt = tcp_setsockopt,
2328 .getsockopt = tcp_getsockopt,
1da177e4 2329 .recvmsg = tcp_recvmsg,
7ba42910
CG
2330 .sendmsg = tcp_sendmsg,
2331 .sendpage = tcp_sendpage,
1da177e4 2332 .backlog_rcv = tcp_v4_do_rcv,
46d3ceab 2333 .release_cb = tcp_release_cb,
ab1e0a13
ACM
2334 .hash = inet_hash,
2335 .unhash = inet_unhash,
2336 .get_port = inet_csk_get_port,
1da177e4 2337 .enter_memory_pressure = tcp_enter_memory_pressure,
c9bee3b7 2338 .stream_memory_free = tcp_stream_memory_free,
1da177e4 2339 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2340 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2341 .memory_allocated = &tcp_memory_allocated,
2342 .memory_pressure = &tcp_memory_pressure,
a4fe34bf 2343 .sysctl_mem = sysctl_tcp_mem,
1da177e4
LT
2344 .sysctl_wmem = sysctl_tcp_wmem,
2345 .sysctl_rmem = sysctl_tcp_rmem,
2346 .max_header = MAX_TCP_HEADER,
2347 .obj_size = sizeof(struct tcp_sock),
3ab5aee7 2348 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2349 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2350 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2351 .h.hashinfo = &tcp_hashinfo,
7ba42910 2352 .no_autobind = true,
543d9cfe
ACM
2353#ifdef CONFIG_COMPAT
2354 .compat_setsockopt = compat_tcp_setsockopt,
2355 .compat_getsockopt = compat_tcp_getsockopt,
d1a4c0b3 2356#endif
c1e64e29 2357 .diag_destroy = tcp_abort,
1da177e4 2358};
4bc2f18b 2359EXPORT_SYMBOL(tcp_prot);
1da177e4 2360
bdbbb852
ED
2361static void __net_exit tcp_sk_exit(struct net *net)
2362{
2363 int cpu;
2364
2365 for_each_possible_cpu(cpu)
2366 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2367 free_percpu(net->ipv4.tcp_sk);
2368}
2369
046ee902
DL
2370static int __net_init tcp_sk_init(struct net *net)
2371{
bdbbb852
ED
2372 int res, cpu;
2373
2374 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2375 if (!net->ipv4.tcp_sk)
2376 return -ENOMEM;
2377
2378 for_each_possible_cpu(cpu) {
2379 struct sock *sk;
2380
2381 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2382 IPPROTO_TCP, net);
2383 if (res)
2384 goto fail;
a9d6532b 2385 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
bdbbb852
ED
2386 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2387 }
49213555 2388
5d134f1c 2389 net->ipv4.sysctl_tcp_ecn = 2;
49213555
DB
2390 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2391
b0f9ca53 2392 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
6b58e0a5 2393 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
05cbc0db 2394 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
046ee902 2395
13b287e8 2396 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
9bd6861b 2397 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
b840d15d 2398 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
13b287e8 2399
6fa25166 2400 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
7c083ecb 2401 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
0aca737d 2402 net->ipv4.sysctl_tcp_syncookies = 1;
1043e25f 2403 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
ae5c3f40 2404 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
c6214a97 2405 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
c402d9be 2406 net->ipv4.sysctl_tcp_orphan_retries = 0;
1e579caa 2407 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
4979f2d9 2408 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
12ed8244 2409
49213555 2410 return 0;
bdbbb852
ED
2411fail:
2412 tcp_sk_exit(net);
2413
2414 return res;
b099ce26
EB
2415}
2416
2417static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2418{
2419 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
046ee902
DL
2420}
2421
2422static struct pernet_operations __net_initdata tcp_sk_ops = {
b099ce26
EB
2423 .init = tcp_sk_init,
2424 .exit = tcp_sk_exit,
2425 .exit_batch = tcp_sk_exit_batch,
046ee902
DL
2426};
2427
9b0f976f 2428void __init tcp_v4_init(void)
1da177e4 2429{
5caea4ea 2430 inet_hashinfo_init(&tcp_hashinfo);
6a1b3054 2431 if (register_pernet_subsys(&tcp_sk_ops))
1da177e4 2432 panic("Failed to create the TCP control socket.\n");
1da177e4 2433}