Merge tag 'sh-for-4.8' of git://git.libc.org/linux-sh
[linux-2.6-block.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
afd46503 53#define pr_fmt(fmt) "TCP: " fmt
1da177e4 54
eb4dea58 55#include <linux/bottom_half.h>
1da177e4
LT
56#include <linux/types.h>
57#include <linux/fcntl.h>
58#include <linux/module.h>
59#include <linux/random.h>
60#include <linux/cache.h>
61#include <linux/jhash.h>
62#include <linux/init.h>
63#include <linux/times.h>
5a0e3ad6 64#include <linux/slab.h>
1da177e4 65
457c4cbc 66#include <net/net_namespace.h>
1da177e4 67#include <net/icmp.h>
304a1618 68#include <net/inet_hashtables.h>
1da177e4 69#include <net/tcp.h>
20380731 70#include <net/transp_v6.h>
1da177e4
LT
71#include <net/ipv6.h>
72#include <net/inet_common.h>
6d6ee43e 73#include <net/timewait_sock.h>
1da177e4 74#include <net/xfrm.h>
6e5714ea 75#include <net/secure_seq.h>
076bb0c8 76#include <net/busy_poll.h>
1da177e4
LT
77
78#include <linux/inet.h>
79#include <linux/ipv6.h>
80#include <linux/stddef.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
83
cf80e0e4 84#include <crypto/hash.h>
cfb6eeb4
YH
85#include <linux/scatterlist.h>
86
ab32ea5d
BH
87int sysctl_tcp_tw_reuse __read_mostly;
88int sysctl_tcp_low_latency __read_mostly;
4bc2f18b 89EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 90
cfb6eeb4 91#ifdef CONFIG_TCP_MD5SIG
a915da9b 92static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 93 __be32 daddr, __be32 saddr, const struct tcphdr *th);
cfb6eeb4
YH
94#endif
95
5caea4ea 96struct inet_hashinfo tcp_hashinfo;
4bc2f18b 97EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 98
936b8bdb 99static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
1da177e4 100{
eddc9ec5
ACM
101 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
102 ip_hdr(skb)->saddr,
aa8223c7
ACM
103 tcp_hdr(skb)->dest,
104 tcp_hdr(skb)->source);
1da177e4
LT
105}
106
6d6ee43e
ACM
107int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
108{
109 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
110 struct tcp_sock *tp = tcp_sk(sk);
111
112 /* With PAWS, it is safe from the viewpoint
113 of data integrity. Even without PAWS it is safe provided sequence
114 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
115
116 Actually, the idea is close to VJ's one, only timestamp cache is
117 held not per host, but per port pair and TW bucket is used as state
118 holder.
119
120 If TW bucket has been already destroyed we fall back to VJ's scheme
121 and use initial timestamp retrieved from peer table.
122 */
123 if (tcptw->tw_ts_recent_stamp &&
51456b29 124 (!twp || (sysctl_tcp_tw_reuse &&
9d729f72 125 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
126 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
127 if (tp->write_seq == 0)
128 tp->write_seq = 1;
129 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
130 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
131 sock_hold(sktw);
132 return 1;
133 }
134
135 return 0;
136}
6d6ee43e
ACM
137EXPORT_SYMBOL_GPL(tcp_twsk_unique);
138
1da177e4
LT
139/* This will initiate an outgoing connection. */
140int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
141{
2d7192d6 142 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
1da177e4
LT
143 struct inet_sock *inet = inet_sk(sk);
144 struct tcp_sock *tp = tcp_sk(sk);
dca8b089 145 __be16 orig_sport, orig_dport;
bada8adc 146 __be32 daddr, nexthop;
da905bd1 147 struct flowi4 *fl4;
2d7192d6 148 struct rtable *rt;
1da177e4 149 int err;
f6d8bd05 150 struct ip_options_rcu *inet_opt;
1da177e4
LT
151
152 if (addr_len < sizeof(struct sockaddr_in))
153 return -EINVAL;
154
155 if (usin->sin_family != AF_INET)
156 return -EAFNOSUPPORT;
157
158 nexthop = daddr = usin->sin_addr.s_addr;
f6d8bd05 159 inet_opt = rcu_dereference_protected(inet->inet_opt,
1e1d04e6 160 lockdep_sock_is_held(sk));
f6d8bd05 161 if (inet_opt && inet_opt->opt.srr) {
1da177e4
LT
162 if (!daddr)
163 return -EINVAL;
f6d8bd05 164 nexthop = inet_opt->opt.faddr;
1da177e4
LT
165 }
166
dca8b089
DM
167 orig_sport = inet->inet_sport;
168 orig_dport = usin->sin_port;
da905bd1
DM
169 fl4 = &inet->cork.fl.u.ip4;
170 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
b23dd4fe
DM
171 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
172 IPPROTO_TCP,
0e0d44ab 173 orig_sport, orig_dport, sk);
b23dd4fe
DM
174 if (IS_ERR(rt)) {
175 err = PTR_ERR(rt);
176 if (err == -ENETUNREACH)
f1d8cba6 177 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
b23dd4fe 178 return err;
584bdf8c 179 }
1da177e4
LT
180
181 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
182 ip_rt_put(rt);
183 return -ENETUNREACH;
184 }
185
f6d8bd05 186 if (!inet_opt || !inet_opt->opt.srr)
da905bd1 187 daddr = fl4->daddr;
1da177e4 188
c720c7e8 189 if (!inet->inet_saddr)
da905bd1 190 inet->inet_saddr = fl4->saddr;
d1e559d0 191 sk_rcv_saddr_set(sk, inet->inet_saddr);
1da177e4 192
c720c7e8 193 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
1da177e4
LT
194 /* Reset inherited state */
195 tp->rx_opt.ts_recent = 0;
196 tp->rx_opt.ts_recent_stamp = 0;
ee995283
PE
197 if (likely(!tp->repair))
198 tp->write_seq = 0;
1da177e4
LT
199 }
200
295ff7ed 201 if (tcp_death_row.sysctl_tw_recycle &&
81166dd6
DM
202 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
203 tcp_fetch_timewait_stamp(sk, &rt->dst);
1da177e4 204
c720c7e8 205 inet->inet_dport = usin->sin_port;
d1e559d0 206 sk_daddr_set(sk, daddr);
1da177e4 207
d83d8461 208 inet_csk(sk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
209 if (inet_opt)
210 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1da177e4 211
bee7ca9e 212 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
1da177e4
LT
213
214 /* Socket identity is still unknown (sport may be zero).
215 * However we set state to SYN-SENT and not releasing socket
216 * lock select source port, enter ourselves into the hash tables and
217 * complete initialization after this.
218 */
219 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 220 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
221 if (err)
222 goto failure;
223
877d1f62 224 sk_set_txhash(sk);
9e7ceb06 225
da905bd1 226 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
b23dd4fe
DM
227 inet->inet_sport, inet->inet_dport, sk);
228 if (IS_ERR(rt)) {
229 err = PTR_ERR(rt);
230 rt = NULL;
1da177e4 231 goto failure;
b23dd4fe 232 }
1da177e4 233 /* OK, now commit destination to socket. */
bcd76111 234 sk->sk_gso_type = SKB_GSO_TCPV4;
d8d1f30b 235 sk_setup_caps(sk, &rt->dst);
1da177e4 236
ee995283 237 if (!tp->write_seq && likely(!tp->repair))
c720c7e8
ED
238 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
239 inet->inet_daddr,
240 inet->inet_sport,
1da177e4
LT
241 usin->sin_port);
242
c720c7e8 243 inet->inet_id = tp->write_seq ^ jiffies;
1da177e4 244
2b916477 245 err = tcp_connect(sk);
ee995283 246
1da177e4
LT
247 rt = NULL;
248 if (err)
249 goto failure;
250
251 return 0;
252
253failure:
7174259e
ACM
254 /*
255 * This unhashes the socket and releases the local port,
256 * if necessary.
257 */
1da177e4
LT
258 tcp_set_state(sk, TCP_CLOSE);
259 ip_rt_put(rt);
260 sk->sk_route_caps = 0;
c720c7e8 261 inet->inet_dport = 0;
1da177e4
LT
262 return err;
263}
4bc2f18b 264EXPORT_SYMBOL(tcp_v4_connect);
1da177e4 265
1da177e4 266/*
563d34d0
ED
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
1da177e4 270 */
4fab9071 271void tcp_v4_mtu_reduced(struct sock *sk)
1da177e4
LT
272{
273 struct dst_entry *dst;
274 struct inet_sock *inet = inet_sk(sk);
563d34d0 275 u32 mtu = tcp_sk(sk)->mtu_info;
1da177e4 276
80d0a69f
DM
277 dst = inet_csk_update_pmtu(sk, mtu);
278 if (!dst)
1da177e4
LT
279 return;
280
1da177e4
LT
281 /* Something is about to be wrong... Remember soft error
282 * for the case, if this connection will not able to recover.
283 */
284 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
285 sk->sk_err_soft = EMSGSIZE;
286
287 mtu = dst_mtu(dst);
288
289 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
482fc609 290 ip_sk_accept_pmtu(sk) &&
d83d8461 291 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
292 tcp_sync_mss(sk, mtu);
293
294 /* Resend the TCP packet because it's
295 * clear that the old packet has been
296 * dropped. This is the new "fast" path mtu
297 * discovery.
298 */
299 tcp_simple_retransmit(sk);
300 } /* else let the usual retransmit timer handle it */
301}
4fab9071 302EXPORT_SYMBOL(tcp_v4_mtu_reduced);
1da177e4 303
55be7a9c
DM
304static void do_redirect(struct sk_buff *skb, struct sock *sk)
305{
306 struct dst_entry *dst = __sk_dst_check(sk, 0);
307
1ed5c48f 308 if (dst)
6700c270 309 dst->ops->redirect(dst, sk, skb);
55be7a9c
DM
310}
311
26e37360
ED
312
313/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
9cf74903 314void tcp_req_err(struct sock *sk, u32 seq, bool abort)
26e37360
ED
315{
316 struct request_sock *req = inet_reqsk(sk);
317 struct net *net = sock_net(sk);
318
319 /* ICMPs are not backlogged, hence we cannot get
320 * an established socket here.
321 */
26e37360 322 if (seq != tcp_rsk(req)->snt_isn) {
02a1d6e7 323 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
9cf74903 324 } else if (abort) {
26e37360
ED
325 /*
326 * Still in SYN_RECV, just remove it silently.
327 * There is no good way to pass the error to the newly
328 * created socket, and POSIX does not want network
329 * errors returned from accept().
330 */
c6973669 331 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
9caad864 332 tcp_listendrop(req->rsk_listener);
26e37360 333 }
ef84d8ce 334 reqsk_put(req);
26e37360
ED
335}
336EXPORT_SYMBOL(tcp_req_err);
337
1da177e4
LT
338/*
339 * This routine is called by the ICMP module when it gets some
340 * sort of error condition. If err < 0 then the socket should
341 * be closed and the error returned to the user. If err > 0
342 * it's just the icmp type << 8 | icmp code. After adjustment
343 * header points to the first 8 bytes of the tcp header. We need
344 * to find the appropriate port.
345 *
346 * The locking strategy used here is very "optimistic". When
347 * someone else accesses the socket the ICMP is just dropped
348 * and for some paths there is no check at all.
349 * A more general error queue to queue errors for later handling
350 * is probably better.
351 *
352 */
353
4d1a2d9e 354void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
1da177e4 355{
b71d1d42 356 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
4d1a2d9e 357 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
f1ecd5d9 358 struct inet_connection_sock *icsk;
1da177e4
LT
359 struct tcp_sock *tp;
360 struct inet_sock *inet;
4d1a2d9e
DL
361 const int type = icmp_hdr(icmp_skb)->type;
362 const int code = icmp_hdr(icmp_skb)->code;
1da177e4 363 struct sock *sk;
f1ecd5d9 364 struct sk_buff *skb;
0a672f74
YC
365 struct request_sock *fastopen;
366 __u32 seq, snd_una;
f1ecd5d9 367 __u32 remaining;
1da177e4 368 int err;
4d1a2d9e 369 struct net *net = dev_net(icmp_skb->dev);
1da177e4 370
26e37360
ED
371 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
372 th->dest, iph->saddr, ntohs(th->source),
373 inet_iif(icmp_skb));
1da177e4 374 if (!sk) {
5d3848bc 375 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
1da177e4
LT
376 return;
377 }
378 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 379 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
380 return;
381 }
26e37360
ED
382 seq = ntohl(th->seq);
383 if (sk->sk_state == TCP_NEW_SYN_RECV)
9cf74903
ED
384 return tcp_req_err(sk, seq,
385 type == ICMP_PARAMETERPROB ||
386 type == ICMP_TIME_EXCEEDED ||
387 (type == ICMP_DEST_UNREACH &&
388 (code == ICMP_NET_UNREACH ||
389 code == ICMP_HOST_UNREACH)));
1da177e4
LT
390
391 bh_lock_sock(sk);
392 /* If too many ICMPs get dropped on busy
393 * servers this needs to be solved differently.
563d34d0
ED
394 * We do take care of PMTU discovery (RFC1191) special case :
395 * we can receive locally generated ICMP messages while socket is held.
1da177e4 396 */
b74aa930
ED
397 if (sock_owned_by_user(sk)) {
398 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
02a1d6e7 399 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
b74aa930 400 }
1da177e4
LT
401 if (sk->sk_state == TCP_CLOSE)
402 goto out;
403
97e3ecd1 404 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
02a1d6e7 405 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
97e3ecd1 406 goto out;
407 }
408
f1ecd5d9 409 icsk = inet_csk(sk);
1da177e4 410 tp = tcp_sk(sk);
0a672f74
YC
411 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
412 fastopen = tp->fastopen_rsk;
413 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
1da177e4 414 if (sk->sk_state != TCP_LISTEN &&
0a672f74 415 !between(seq, snd_una, tp->snd_nxt)) {
02a1d6e7 416 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
417 goto out;
418 }
419
420 switch (type) {
55be7a9c
DM
421 case ICMP_REDIRECT:
422 do_redirect(icmp_skb, sk);
423 goto out;
1da177e4
LT
424 case ICMP_SOURCE_QUENCH:
425 /* Just silently ignore these. */
426 goto out;
427 case ICMP_PARAMETERPROB:
428 err = EPROTO;
429 break;
430 case ICMP_DEST_UNREACH:
431 if (code > NR_ICMP_UNREACH)
432 goto out;
433
434 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
0d4f0608
ED
435 /* We are not interested in TCP_LISTEN and open_requests
436 * (SYN-ACKs send out by Linux are always <576bytes so
437 * they should go through unfragmented).
438 */
439 if (sk->sk_state == TCP_LISTEN)
440 goto out;
441
563d34d0 442 tp->mtu_info = info;
144d56e9 443 if (!sock_owned_by_user(sk)) {
563d34d0 444 tcp_v4_mtu_reduced(sk);
144d56e9
ED
445 } else {
446 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
447 sock_hold(sk);
448 }
1da177e4
LT
449 goto out;
450 }
451
452 err = icmp_err_convert[code].errno;
f1ecd5d9
DL
453 /* check if icmp_skb allows revert of backoff
454 * (see draft-zimmermann-tcp-lcd) */
455 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
456 break;
457 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
0a672f74 458 !icsk->icsk_backoff || fastopen)
f1ecd5d9
DL
459 break;
460
8f49c270
DM
461 if (sock_owned_by_user(sk))
462 break;
463
f1ecd5d9 464 icsk->icsk_backoff--;
fcdd1cf4
ED
465 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
466 TCP_TIMEOUT_INIT;
467 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
f1ecd5d9
DL
468
469 skb = tcp_write_queue_head(sk);
470 BUG_ON(!skb);
471
7faee5c0
ED
472 remaining = icsk->icsk_rto -
473 min(icsk->icsk_rto,
474 tcp_time_stamp - tcp_skb_timestamp(skb));
f1ecd5d9
DL
475
476 if (remaining) {
477 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
478 remaining, TCP_RTO_MAX);
f1ecd5d9
DL
479 } else {
480 /* RTO revert clocked out retransmission.
481 * Will retransmit now */
482 tcp_retransmit_timer(sk);
483 }
484
1da177e4
LT
485 break;
486 case ICMP_TIME_EXCEEDED:
487 err = EHOSTUNREACH;
488 break;
489 default:
490 goto out;
491 }
492
493 switch (sk->sk_state) {
1da177e4 494 case TCP_SYN_SENT:
0a672f74
YC
495 case TCP_SYN_RECV:
496 /* Only in fast or simultaneous open. If a fast open socket is
497 * is already accepted it is treated as a connected one below.
498 */
51456b29 499 if (fastopen && !fastopen->sk)
0a672f74
YC
500 break;
501
1da177e4 502 if (!sock_owned_by_user(sk)) {
1da177e4
LT
503 sk->sk_err = err;
504
505 sk->sk_error_report(sk);
506
507 tcp_done(sk);
508 } else {
509 sk->sk_err_soft = err;
510 }
511 goto out;
512 }
513
514 /* If we've already connected we will keep trying
515 * until we time out, or the user gives up.
516 *
517 * rfc1122 4.2.3.9 allows to consider as hard errors
518 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
519 * but it is obsoleted by pmtu discovery).
520 *
521 * Note, that in modern internet, where routing is unreliable
522 * and in each dark corner broken firewalls sit, sending random
523 * errors ordered by their masters even this two messages finally lose
524 * their original sense (even Linux sends invalid PORT_UNREACHs)
525 *
526 * Now we are in compliance with RFCs.
527 * --ANK (980905)
528 */
529
530 inet = inet_sk(sk);
531 if (!sock_owned_by_user(sk) && inet->recverr) {
532 sk->sk_err = err;
533 sk->sk_error_report(sk);
534 } else { /* Only an error on timeout */
535 sk->sk_err_soft = err;
536 }
537
538out:
539 bh_unlock_sock(sk);
540 sock_put(sk);
541}
542
28850dc7 543void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1da177e4 544{
aa8223c7 545 struct tcphdr *th = tcp_hdr(skb);
1da177e4 546
84fa7933 547 if (skb->ip_summed == CHECKSUM_PARTIAL) {
419f9f89 548 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
663ead3b 549 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 550 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 551 } else {
419f9f89 552 th->check = tcp_v4_check(skb->len, saddr, daddr,
07f0757a 553 csum_partial(th,
1da177e4
LT
554 th->doff << 2,
555 skb->csum));
556 }
557}
558
419f9f89 559/* This routine computes an IPv4 TCP checksum. */
bb296246 560void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
419f9f89 561{
cf533ea5 562 const struct inet_sock *inet = inet_sk(sk);
419f9f89
HX
563
564 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
565}
4bc2f18b 566EXPORT_SYMBOL(tcp_v4_send_check);
419f9f89 567
1da177e4
LT
568/*
569 * This routine will send an RST to the other tcp.
570 *
571 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
572 * for reset.
573 * Answer: if a packet caused RST, it is not for a socket
574 * existing in our system, if it is matched to a socket,
575 * it is just duplicate segment or bug in other side's TCP.
576 * So that we build reply only basing on parameters
577 * arrived with segment.
578 * Exception: precedence violation. We do not implement it in any case.
579 */
580
a00e7444 581static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
1da177e4 582{
cf533ea5 583 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
584 struct {
585 struct tcphdr th;
586#ifdef CONFIG_TCP_MD5SIG
714e85be 587 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
588#endif
589 } rep;
1da177e4 590 struct ip_reply_arg arg;
cfb6eeb4 591#ifdef CONFIG_TCP_MD5SIG
e46787f0 592 struct tcp_md5sig_key *key = NULL;
658ddaaf
SL
593 const __u8 *hash_location = NULL;
594 unsigned char newhash[16];
595 int genhash;
596 struct sock *sk1 = NULL;
cfb6eeb4 597#endif
a86b1e30 598 struct net *net;
1da177e4
LT
599
600 /* Never send a reset in response to a reset. */
601 if (th->rst)
602 return;
603
c3658e8d
ED
604 /* If sk not NULL, it means we did a successful lookup and incoming
605 * route had to be correct. prequeue might have dropped our dst.
606 */
607 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
1da177e4
LT
608 return;
609
610 /* Swap the send and the receive. */
cfb6eeb4
YH
611 memset(&rep, 0, sizeof(rep));
612 rep.th.dest = th->source;
613 rep.th.source = th->dest;
614 rep.th.doff = sizeof(struct tcphdr) / 4;
615 rep.th.rst = 1;
1da177e4
LT
616
617 if (th->ack) {
cfb6eeb4 618 rep.th.seq = th->ack_seq;
1da177e4 619 } else {
cfb6eeb4
YH
620 rep.th.ack = 1;
621 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
622 skb->len - (th->doff << 2));
1da177e4
LT
623 }
624
7174259e 625 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
626 arg.iov[0].iov_base = (unsigned char *)&rep;
627 arg.iov[0].iov_len = sizeof(rep.th);
628
0f85feae 629 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
cfb6eeb4 630#ifdef CONFIG_TCP_MD5SIG
3b24d854 631 rcu_read_lock();
658ddaaf 632 hash_location = tcp_parse_md5sig_option(th);
271c3b9b 633 if (sk && sk_fullsock(sk)) {
e46787f0
FW
634 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
635 &ip_hdr(skb)->saddr, AF_INET);
636 } else if (hash_location) {
658ddaaf
SL
637 /*
638 * active side is lost. Try to find listening socket through
639 * source port, and then find md5 key through listening socket.
640 * we are not loose security here:
641 * Incoming packet is checked with md5 hash with finding key,
642 * no RST generated if md5 hash doesn't match.
643 */
a583636a
CG
644 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
645 ip_hdr(skb)->saddr,
da5e3630 646 th->source, ip_hdr(skb)->daddr,
658ddaaf
SL
647 ntohs(th->source), inet_iif(skb));
648 /* don't send rst if it can't find key */
649 if (!sk1)
3b24d854
ED
650 goto out;
651
658ddaaf
SL
652 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
653 &ip_hdr(skb)->saddr, AF_INET);
654 if (!key)
3b24d854
ED
655 goto out;
656
658ddaaf 657
39f8e58e 658 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
658ddaaf 659 if (genhash || memcmp(hash_location, newhash, 16) != 0)
3b24d854
ED
660 goto out;
661
658ddaaf
SL
662 }
663
cfb6eeb4
YH
664 if (key) {
665 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
666 (TCPOPT_NOP << 16) |
667 (TCPOPT_MD5SIG << 8) |
668 TCPOLEN_MD5SIG);
669 /* Update length and the length the header thinks exists */
670 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
671 rep.th.doff = arg.iov[0].iov_len / 4;
672
49a72dfb 673 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
78e645cb
IJ
674 key, ip_hdr(skb)->saddr,
675 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
676 }
677#endif
eddc9ec5
ACM
678 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
679 ip_hdr(skb)->saddr, /* XXX */
52cd5750 680 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1da177e4 681 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
271c3b9b
FW
682 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
683
e2446eaa 684 /* When socket is gone, all binding information is lost.
4c675258
AK
685 * routing might fail in this case. No choice here, if we choose to force
686 * input interface, we will misroute in case of asymmetric route.
e2446eaa 687 */
4c675258
AK
688 if (sk)
689 arg.bound_dev_if = sk->sk_bound_dev_if;
1da177e4 690
271c3b9b
FW
691 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
692 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
693
66b13d99 694 arg.tos = ip_hdr(skb)->tos;
47dcc20a 695 local_bh_disable();
bdbbb852
ED
696 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
697 skb, &TCP_SKB_CB(skb)->header.h4.opt,
24a2d43d
ED
698 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
699 &arg, arg.iov[0].iov_len);
1da177e4 700
90bbcc60
ED
701 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
702 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
47dcc20a 703 local_bh_enable();
658ddaaf
SL
704
705#ifdef CONFIG_TCP_MD5SIG
3b24d854
ED
706out:
707 rcu_read_unlock();
658ddaaf 708#endif
1da177e4
LT
709}
710
711/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
712 outside socket context is ugly, certainly. What can I do?
713 */
714
e62a123b
ED
715static void tcp_v4_send_ack(struct net *net,
716 struct sk_buff *skb, u32 seq, u32 ack,
ee684b6f 717 u32 win, u32 tsval, u32 tsecr, int oif,
88ef4a5a 718 struct tcp_md5sig_key *key,
66b13d99 719 int reply_flags, u8 tos)
1da177e4 720{
cf533ea5 721 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
722 struct {
723 struct tcphdr th;
714e85be 724 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 725#ifdef CONFIG_TCP_MD5SIG
714e85be 726 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
727#endif
728 ];
1da177e4
LT
729 } rep;
730 struct ip_reply_arg arg;
731
732 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 733 memset(&arg, 0, sizeof(arg));
1da177e4
LT
734
735 arg.iov[0].iov_base = (unsigned char *)&rep;
736 arg.iov[0].iov_len = sizeof(rep.th);
ee684b6f 737 if (tsecr) {
cfb6eeb4
YH
738 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
739 (TCPOPT_TIMESTAMP << 8) |
740 TCPOLEN_TIMESTAMP);
ee684b6f
AV
741 rep.opt[1] = htonl(tsval);
742 rep.opt[2] = htonl(tsecr);
cb48cfe8 743 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
744 }
745
746 /* Swap the send and the receive. */
747 rep.th.dest = th->source;
748 rep.th.source = th->dest;
749 rep.th.doff = arg.iov[0].iov_len / 4;
750 rep.th.seq = htonl(seq);
751 rep.th.ack_seq = htonl(ack);
752 rep.th.ack = 1;
753 rep.th.window = htons(win);
754
cfb6eeb4 755#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4 756 if (key) {
ee684b6f 757 int offset = (tsecr) ? 3 : 0;
cfb6eeb4
YH
758
759 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
760 (TCPOPT_NOP << 16) |
761 (TCPOPT_MD5SIG << 8) |
762 TCPOLEN_MD5SIG);
763 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
764 rep.th.doff = arg.iov[0].iov_len/4;
765
49a72dfb 766 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
90b7e112
AL
767 key, ip_hdr(skb)->saddr,
768 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
769 }
770#endif
88ef4a5a 771 arg.flags = reply_flags;
eddc9ec5
ACM
772 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
773 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
774 arg.iov[0].iov_len, IPPROTO_TCP, 0);
775 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
776 if (oif)
777 arg.bound_dev_if = oif;
66b13d99 778 arg.tos = tos;
47dcc20a 779 local_bh_disable();
bdbbb852
ED
780 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
781 skb, &TCP_SKB_CB(skb)->header.h4.opt,
24a2d43d
ED
782 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
783 &arg, arg.iov[0].iov_len);
1da177e4 784
90bbcc60 785 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
47dcc20a 786 local_bh_enable();
1da177e4
LT
787}
788
789static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
790{
8feaf0c0 791 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 792 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 793
e62a123b
ED
794 tcp_v4_send_ack(sock_net(sk), skb,
795 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 796 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
ee684b6f 797 tcp_time_stamp + tcptw->tw_ts_offset,
9501f972
YH
798 tcptw->tw_ts_recent,
799 tw->tw_bound_dev_if,
88ef4a5a 800 tcp_twsk_md5_key(tcptw),
66b13d99
ED
801 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
802 tw->tw_tos
9501f972 803 );
1da177e4 804
8feaf0c0 805 inet_twsk_put(tw);
1da177e4
LT
806}
807
a00e7444 808static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
7174259e 809 struct request_sock *req)
1da177e4 810{
168a8f58
JC
811 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
812 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
813 */
e62a123b
ED
814 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
815 tcp_sk(sk)->snd_nxt;
816
817 tcp_v4_send_ack(sock_net(sk), skb, seq,
ed53d0ab 818 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
ee684b6f 819 tcp_time_stamp,
9501f972
YH
820 req->ts_recent,
821 0,
a915da9b
ED
822 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
823 AF_INET),
66b13d99
ED
824 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
825 ip_hdr(skb)->tos);
1da177e4
LT
826}
827
1da177e4 828/*
9bf1d83e 829 * Send a SYN-ACK after having received a SYN.
60236fdd 830 * This still operates on a request_sock only, not on a big
1da177e4
LT
831 * socket.
832 */
0f935dbe 833static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
d6274bd8 834 struct flowi *fl,
72659ecc 835 struct request_sock *req,
ca6fb065 836 struct tcp_fastopen_cookie *foc,
b3d05147 837 enum tcp_synack_type synack_type)
1da177e4 838{
2e6599cb 839 const struct inet_request_sock *ireq = inet_rsk(req);
6bd023f3 840 struct flowi4 fl4;
1da177e4 841 int err = -1;
d41db5af 842 struct sk_buff *skb;
1da177e4
LT
843
844 /* First, grab a route. */
ba3f7f04 845 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
fd80eb94 846 return -1;
1da177e4 847
b3d05147 848 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
1da177e4
LT
849
850 if (skb) {
634fb979 851 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1da177e4 852
634fb979
ED
853 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
854 ireq->ir_rmt_addr,
2e6599cb 855 ireq->opt);
b9df3cb8 856 err = net_xmit_eval(err);
1da177e4
LT
857 }
858
1da177e4
LT
859 return err;
860}
861
862/*
60236fdd 863 * IPv4 request_sock destructor.
1da177e4 864 */
60236fdd 865static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 866{
a51482bd 867 kfree(inet_rsk(req)->opt);
1da177e4
LT
868}
869
cfb6eeb4
YH
870#ifdef CONFIG_TCP_MD5SIG
871/*
872 * RFC2385 MD5 checksumming requires a mapping of
873 * IP address->MD5 Key.
874 * We need to maintain these in the sk structure.
875 */
876
877/* Find the Key structure for an address. */
b83e3deb 878struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
a915da9b
ED
879 const union tcp_md5_addr *addr,
880 int family)
cfb6eeb4 881{
fd3a154a 882 const struct tcp_sock *tp = tcp_sk(sk);
a915da9b 883 struct tcp_md5sig_key *key;
a915da9b 884 unsigned int size = sizeof(struct in_addr);
fd3a154a 885 const struct tcp_md5sig_info *md5sig;
cfb6eeb4 886
a8afca03
ED
887 /* caller either holds rcu_read_lock() or socket lock */
888 md5sig = rcu_dereference_check(tp->md5sig_info,
1e1d04e6 889 lockdep_sock_is_held(sk));
a8afca03 890 if (!md5sig)
cfb6eeb4 891 return NULL;
a915da9b
ED
892#if IS_ENABLED(CONFIG_IPV6)
893 if (family == AF_INET6)
894 size = sizeof(struct in6_addr);
895#endif
b67bfe0d 896 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
a915da9b
ED
897 if (key->family != family)
898 continue;
899 if (!memcmp(&key->addr, addr, size))
900 return key;
cfb6eeb4
YH
901 }
902 return NULL;
903}
a915da9b 904EXPORT_SYMBOL(tcp_md5_do_lookup);
cfb6eeb4 905
b83e3deb 906struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
fd3a154a 907 const struct sock *addr_sk)
cfb6eeb4 908{
b52e6921 909 const union tcp_md5_addr *addr;
a915da9b 910
b52e6921 911 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
a915da9b 912 return tcp_md5_do_lookup(sk, addr, AF_INET);
cfb6eeb4 913}
cfb6eeb4
YH
914EXPORT_SYMBOL(tcp_v4_md5_lookup);
915
cfb6eeb4 916/* This can be called on a newly created socket, from other files */
a915da9b
ED
917int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
918 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
cfb6eeb4
YH
919{
920 /* Add Key to the list */
b0a713e9 921 struct tcp_md5sig_key *key;
cfb6eeb4 922 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 923 struct tcp_md5sig_info *md5sig;
cfb6eeb4 924
c0353c7b 925 key = tcp_md5_do_lookup(sk, addr, family);
cfb6eeb4
YH
926 if (key) {
927 /* Pre-existing entry - just update that one. */
a915da9b 928 memcpy(key->key, newkey, newkeylen);
b0a713e9 929 key->keylen = newkeylen;
a915da9b
ED
930 return 0;
931 }
260fcbeb 932
a8afca03 933 md5sig = rcu_dereference_protected(tp->md5sig_info,
1e1d04e6 934 lockdep_sock_is_held(sk));
a915da9b
ED
935 if (!md5sig) {
936 md5sig = kmalloc(sizeof(*md5sig), gfp);
937 if (!md5sig)
cfb6eeb4 938 return -ENOMEM;
cfb6eeb4 939
a915da9b
ED
940 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
941 INIT_HLIST_HEAD(&md5sig->head);
a8afca03 942 rcu_assign_pointer(tp->md5sig_info, md5sig);
a915da9b 943 }
cfb6eeb4 944
5f3d9cb2 945 key = sock_kmalloc(sk, sizeof(*key), gfp);
a915da9b
ED
946 if (!key)
947 return -ENOMEM;
71cea17e 948 if (!tcp_alloc_md5sig_pool()) {
5f3d9cb2 949 sock_kfree_s(sk, key, sizeof(*key));
a915da9b 950 return -ENOMEM;
cfb6eeb4 951 }
a915da9b
ED
952
953 memcpy(key->key, newkey, newkeylen);
954 key->keylen = newkeylen;
955 key->family = family;
956 memcpy(&key->addr, addr,
957 (family == AF_INET6) ? sizeof(struct in6_addr) :
958 sizeof(struct in_addr));
959 hlist_add_head_rcu(&key->node, &md5sig->head);
cfb6eeb4
YH
960 return 0;
961}
a915da9b 962EXPORT_SYMBOL(tcp_md5_do_add);
cfb6eeb4 963
a915da9b 964int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
cfb6eeb4 965{
a915da9b
ED
966 struct tcp_md5sig_key *key;
967
c0353c7b 968 key = tcp_md5_do_lookup(sk, addr, family);
a915da9b
ED
969 if (!key)
970 return -ENOENT;
971 hlist_del_rcu(&key->node);
5f3d9cb2 972 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 973 kfree_rcu(key, rcu);
a915da9b 974 return 0;
cfb6eeb4 975}
a915da9b 976EXPORT_SYMBOL(tcp_md5_do_del);
cfb6eeb4 977
e0683e70 978static void tcp_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
979{
980 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 981 struct tcp_md5sig_key *key;
b67bfe0d 982 struct hlist_node *n;
a8afca03 983 struct tcp_md5sig_info *md5sig;
cfb6eeb4 984
a8afca03
ED
985 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
986
b67bfe0d 987 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
a915da9b 988 hlist_del_rcu(&key->node);
5f3d9cb2 989 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 990 kfree_rcu(key, rcu);
cfb6eeb4
YH
991 }
992}
993
7174259e
ACM
994static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
995 int optlen)
cfb6eeb4
YH
996{
997 struct tcp_md5sig cmd;
998 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
cfb6eeb4
YH
999
1000 if (optlen < sizeof(cmd))
1001 return -EINVAL;
1002
7174259e 1003 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
1004 return -EFAULT;
1005
1006 if (sin->sin_family != AF_INET)
1007 return -EINVAL;
1008
64a124ed 1009 if (!cmd.tcpm_keylen)
a915da9b
ED
1010 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1011 AF_INET);
cfb6eeb4
YH
1012
1013 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1014 return -EINVAL;
1015
a915da9b
ED
1016 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1017 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1018 GFP_KERNEL);
cfb6eeb4
YH
1019}
1020
19689e38
ED
1021static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1022 __be32 daddr, __be32 saddr,
1023 const struct tcphdr *th, int nbytes)
cfb6eeb4 1024{
cfb6eeb4 1025 struct tcp4_pseudohdr *bp;
49a72dfb 1026 struct scatterlist sg;
19689e38 1027 struct tcphdr *_th;
cfb6eeb4 1028
19689e38 1029 bp = hp->scratch;
cfb6eeb4
YH
1030 bp->saddr = saddr;
1031 bp->daddr = daddr;
1032 bp->pad = 0;
076fb722 1033 bp->protocol = IPPROTO_TCP;
49a72dfb 1034 bp->len = cpu_to_be16(nbytes);
c7da57a1 1035
19689e38
ED
1036 _th = (struct tcphdr *)(bp + 1);
1037 memcpy(_th, th, sizeof(*th));
1038 _th->check = 0;
1039
1040 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1041 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1042 sizeof(*bp) + sizeof(*th));
cf80e0e4 1043 return crypto_ahash_update(hp->md5_req);
49a72dfb
AL
1044}
1045
a915da9b 1046static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 1047 __be32 daddr, __be32 saddr, const struct tcphdr *th)
49a72dfb
AL
1048{
1049 struct tcp_md5sig_pool *hp;
cf80e0e4 1050 struct ahash_request *req;
49a72dfb
AL
1051
1052 hp = tcp_get_md5sig_pool();
1053 if (!hp)
1054 goto clear_hash_noput;
cf80e0e4 1055 req = hp->md5_req;
49a72dfb 1056
cf80e0e4 1057 if (crypto_ahash_init(req))
49a72dfb 1058 goto clear_hash;
19689e38 1059 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
49a72dfb
AL
1060 goto clear_hash;
1061 if (tcp_md5_hash_key(hp, key))
1062 goto clear_hash;
cf80e0e4
HX
1063 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1064 if (crypto_ahash_final(req))
cfb6eeb4
YH
1065 goto clear_hash;
1066
cfb6eeb4 1067 tcp_put_md5sig_pool();
cfb6eeb4 1068 return 0;
49a72dfb 1069
cfb6eeb4
YH
1070clear_hash:
1071 tcp_put_md5sig_pool();
1072clear_hash_noput:
1073 memset(md5_hash, 0, 16);
49a72dfb 1074 return 1;
cfb6eeb4
YH
1075}
1076
39f8e58e
ED
1077int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1078 const struct sock *sk,
318cf7aa 1079 const struct sk_buff *skb)
cfb6eeb4 1080{
49a72dfb 1081 struct tcp_md5sig_pool *hp;
cf80e0e4 1082 struct ahash_request *req;
318cf7aa 1083 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
1084 __be32 saddr, daddr;
1085
39f8e58e
ED
1086 if (sk) { /* valid for establish/request sockets */
1087 saddr = sk->sk_rcv_saddr;
1088 daddr = sk->sk_daddr;
cfb6eeb4 1089 } else {
49a72dfb
AL
1090 const struct iphdr *iph = ip_hdr(skb);
1091 saddr = iph->saddr;
1092 daddr = iph->daddr;
cfb6eeb4 1093 }
49a72dfb
AL
1094
1095 hp = tcp_get_md5sig_pool();
1096 if (!hp)
1097 goto clear_hash_noput;
cf80e0e4 1098 req = hp->md5_req;
49a72dfb 1099
cf80e0e4 1100 if (crypto_ahash_init(req))
49a72dfb
AL
1101 goto clear_hash;
1102
19689e38 1103 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
49a72dfb
AL
1104 goto clear_hash;
1105 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1106 goto clear_hash;
1107 if (tcp_md5_hash_key(hp, key))
1108 goto clear_hash;
cf80e0e4
HX
1109 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1110 if (crypto_ahash_final(req))
49a72dfb
AL
1111 goto clear_hash;
1112
1113 tcp_put_md5sig_pool();
1114 return 0;
1115
1116clear_hash:
1117 tcp_put_md5sig_pool();
1118clear_hash_noput:
1119 memset(md5_hash, 0, 16);
1120 return 1;
cfb6eeb4 1121}
49a72dfb 1122EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
cfb6eeb4 1123
ba8e275a
ED
1124#endif
1125
ff74e23f 1126/* Called with rcu_read_lock() */
ba8e275a 1127static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
ff74e23f 1128 const struct sk_buff *skb)
cfb6eeb4 1129{
ba8e275a 1130#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
1131 /*
1132 * This gets called for each TCP segment that arrives
1133 * so we want to be efficient.
1134 * We have 3 drop cases:
1135 * o No MD5 hash and one expected.
1136 * o MD5 hash and we're not expecting one.
1137 * o MD5 hash and its wrong.
1138 */
cf533ea5 1139 const __u8 *hash_location = NULL;
cfb6eeb4 1140 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1141 const struct iphdr *iph = ip_hdr(skb);
cf533ea5 1142 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1143 int genhash;
cfb6eeb4
YH
1144 unsigned char newhash[16];
1145
a915da9b
ED
1146 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1147 AF_INET);
7d5d5525 1148 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1149
cfb6eeb4
YH
1150 /* We've parsed the options - do we have a hash? */
1151 if (!hash_expected && !hash_location)
a2a385d6 1152 return false;
cfb6eeb4
YH
1153
1154 if (hash_expected && !hash_location) {
c10d9310 1155 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
a2a385d6 1156 return true;
cfb6eeb4
YH
1157 }
1158
1159 if (!hash_expected && hash_location) {
c10d9310 1160 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
a2a385d6 1161 return true;
cfb6eeb4
YH
1162 }
1163
1164 /* Okay, so this is hash_expected and hash_location -
1165 * so we need to calculate the checksum.
1166 */
49a72dfb
AL
1167 genhash = tcp_v4_md5_hash_skb(newhash,
1168 hash_expected,
39f8e58e 1169 NULL, skb);
cfb6eeb4
YH
1170
1171 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
e87cc472
JP
1172 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1173 &iph->saddr, ntohs(th->source),
1174 &iph->daddr, ntohs(th->dest),
1175 genhash ? " tcp_v4_calc_md5_hash failed"
1176 : "");
a2a385d6 1177 return true;
cfb6eeb4 1178 }
a2a385d6 1179 return false;
cfb6eeb4 1180#endif
ba8e275a
ED
1181 return false;
1182}
cfb6eeb4 1183
b40cf18e
ED
1184static void tcp_v4_init_req(struct request_sock *req,
1185 const struct sock *sk_listener,
16bea70a
OP
1186 struct sk_buff *skb)
1187{
1188 struct inet_request_sock *ireq = inet_rsk(req);
1189
08d2cc3b
ED
1190 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1191 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1192 ireq->no_srccheck = inet_sk(sk_listener)->transparent;
16bea70a
OP
1193 ireq->opt = tcp_v4_save_options(skb);
1194}
1195
f964629e
ED
1196static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1197 struct flowi *fl,
d94e0417
OP
1198 const struct request_sock *req,
1199 bool *strict)
1200{
1201 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1202
1203 if (strict) {
1204 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1205 *strict = true;
1206 else
1207 *strict = false;
1208 }
1209
1210 return dst;
1211}
1212
72a3effa 1213struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1214 .family = PF_INET,
2e6599cb 1215 .obj_size = sizeof(struct tcp_request_sock),
5db92c99 1216 .rtx_syn_ack = tcp_rtx_synack,
60236fdd
ACM
1217 .send_ack = tcp_v4_reqsk_send_ack,
1218 .destructor = tcp_v4_reqsk_destructor,
1da177e4 1219 .send_reset = tcp_v4_send_reset,
688d1945 1220 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
1221};
1222
b2e4b3de 1223static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
2aec4a29 1224 .mss_clamp = TCP_MSS_DEFAULT,
16bea70a 1225#ifdef CONFIG_TCP_MD5SIG
fd3a154a 1226 .req_md5_lookup = tcp_v4_md5_lookup,
e3afe7b7 1227 .calc_md5_hash = tcp_v4_md5_hash_skb,
b6332e6c 1228#endif
16bea70a 1229 .init_req = tcp_v4_init_req,
fb7b37a7
OP
1230#ifdef CONFIG_SYN_COOKIES
1231 .cookie_init_seq = cookie_v4_init_sequence,
1232#endif
d94e0417 1233 .route_req = tcp_v4_route_req,
936b8bdb 1234 .init_seq = tcp_v4_init_sequence,
d6274bd8 1235 .send_synack = tcp_v4_send_synack,
16bea70a 1236};
cfb6eeb4 1237
1da177e4
LT
1238int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1239{
1da177e4 1240 /* Never answer to SYNs send to broadcast or multicast */
511c3f92 1241 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1242 goto drop;
1243
1fb6f159
OP
1244 return tcp_conn_request(&tcp_request_sock_ops,
1245 &tcp_request_sock_ipv4_ops, sk, skb);
1da177e4 1246
1da177e4 1247drop:
9caad864 1248 tcp_listendrop(sk);
1da177e4
LT
1249 return 0;
1250}
4bc2f18b 1251EXPORT_SYMBOL(tcp_v4_conn_request);
1da177e4
LT
1252
1253
1254/*
1255 * The three way handshake has completed - we got a valid synack -
1256 * now create the new socket.
1257 */
0c27171e 1258struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
60236fdd 1259 struct request_sock *req,
5e0724d0
ED
1260 struct dst_entry *dst,
1261 struct request_sock *req_unhash,
1262 bool *own_req)
1da177e4 1263{
2e6599cb 1264 struct inet_request_sock *ireq;
1da177e4
LT
1265 struct inet_sock *newinet;
1266 struct tcp_sock *newtp;
1267 struct sock *newsk;
cfb6eeb4
YH
1268#ifdef CONFIG_TCP_MD5SIG
1269 struct tcp_md5sig_key *key;
1270#endif
f6d8bd05 1271 struct ip_options_rcu *inet_opt;
1da177e4
LT
1272
1273 if (sk_acceptq_is_full(sk))
1274 goto exit_overflow;
1275
1da177e4
LT
1276 newsk = tcp_create_openreq_child(sk, req, skb);
1277 if (!newsk)
093d2823 1278 goto exit_nonewsk;
1da177e4 1279
bcd76111 1280 newsk->sk_gso_type = SKB_GSO_TCPV4;
fae6ef87 1281 inet_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1282
1283 newtp = tcp_sk(newsk);
1284 newinet = inet_sk(newsk);
2e6599cb 1285 ireq = inet_rsk(req);
d1e559d0
ED
1286 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1287 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
6dd9a14e 1288 newsk->sk_bound_dev_if = ireq->ir_iif;
634fb979 1289 newinet->inet_saddr = ireq->ir_loc_addr;
f6d8bd05
ED
1290 inet_opt = ireq->opt;
1291 rcu_assign_pointer(newinet->inet_opt, inet_opt);
2e6599cb 1292 ireq->opt = NULL;
463c84b9 1293 newinet->mc_index = inet_iif(skb);
eddc9ec5 1294 newinet->mc_ttl = ip_hdr(skb)->ttl;
4c507d28 1295 newinet->rcv_tos = ip_hdr(skb)->tos;
d83d8461 1296 inet_csk(newsk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
1297 if (inet_opt)
1298 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
c720c7e8 1299 newinet->inet_id = newtp->write_seq ^ jiffies;
1da177e4 1300
dfd25fff
ED
1301 if (!dst) {
1302 dst = inet_csk_route_child_sock(sk, newsk, req);
1303 if (!dst)
1304 goto put_and_exit;
1305 } else {
1306 /* syncookie case : see end of cookie_v4_check() */
1307 }
0e734419
DM
1308 sk_setup_caps(newsk, dst);
1309
81164413
DB
1310 tcp_ca_openreq_child(newsk, dst);
1311
1da177e4 1312 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1313 newtp->advmss = dst_metric_advmss(dst);
f5fff5dc
TQ
1314 if (tcp_sk(sk)->rx_opt.user_mss &&
1315 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1316 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1317
1da177e4
LT
1318 tcp_initialize_rcv_mss(newsk);
1319
cfb6eeb4
YH
1320#ifdef CONFIG_TCP_MD5SIG
1321 /* Copy over the MD5 key from the original socket */
a915da9b
ED
1322 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1323 AF_INET);
00db4124 1324 if (key) {
cfb6eeb4
YH
1325 /*
1326 * We're using one, so create a matching key
1327 * on the newsk structure. If we fail to get
1328 * memory, then we end up not copying the key
1329 * across. Shucks.
1330 */
a915da9b
ED
1331 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1332 AF_INET, key->key, key->keylen, GFP_ATOMIC);
a465419b 1333 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1334 }
1335#endif
1336
0e734419
DM
1337 if (__inet_inherit_port(sk, newsk) < 0)
1338 goto put_and_exit;
5e0724d0 1339 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
805c4bc0 1340 if (*own_req)
49a496c9 1341 tcp_move_syn(newtp, req);
1da177e4
LT
1342
1343 return newsk;
1344
1345exit_overflow:
c10d9310 1346 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823
BS
1347exit_nonewsk:
1348 dst_release(dst);
1da177e4 1349exit:
9caad864 1350 tcp_listendrop(sk);
1da177e4 1351 return NULL;
0e734419 1352put_and_exit:
e337e24d
CP
1353 inet_csk_prepare_forced_close(newsk);
1354 tcp_done(newsk);
0e734419 1355 goto exit;
1da177e4 1356}
4bc2f18b 1357EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1da177e4 1358
079096f1 1359static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1da177e4 1360{
079096f1 1361#ifdef CONFIG_SYN_COOKIES
52452c54 1362 const struct tcphdr *th = tcp_hdr(skb);
1da177e4 1363
af9b4738 1364 if (!th->syn)
461b74c3 1365 sk = cookie_v4_check(sk, skb);
1da177e4
LT
1366#endif
1367 return sk;
1368}
1369
1da177e4 1370/* The socket must have it's spinlock held when we get
e994b2f0 1371 * here, unless it is a TCP_LISTEN socket.
1da177e4
LT
1372 *
1373 * We have a potential double-lock case here, so even when
1374 * doing backlog processing we use the BH locking scheme.
1375 * This is because we cannot sleep with the original spinlock
1376 * held.
1377 */
1378int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1379{
cfb6eeb4 1380 struct sock *rsk;
cfb6eeb4 1381
1da177e4 1382 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
404e0a8b
ED
1383 struct dst_entry *dst = sk->sk_rx_dst;
1384
bdeab991 1385 sock_rps_save_rxhash(sk, skb);
3d97379a 1386 sk_mark_napi_id(sk, skb);
404e0a8b 1387 if (dst) {
505fbcf0 1388 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
51456b29 1389 !dst->ops->check(dst, 0)) {
92101b3b
DM
1390 dst_release(dst);
1391 sk->sk_rx_dst = NULL;
1392 }
1393 }
c995ae22 1394 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1da177e4
LT
1395 return 0;
1396 }
1397
12e25e10 1398 if (tcp_checksum_complete(skb))
1da177e4
LT
1399 goto csum_err;
1400
1401 if (sk->sk_state == TCP_LISTEN) {
079096f1
ED
1402 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1403
1da177e4
LT
1404 if (!nsk)
1405 goto discard;
1da177e4 1406 if (nsk != sk) {
bdeab991 1407 sock_rps_save_rxhash(nsk, skb);
38cb5245 1408 sk_mark_napi_id(nsk, skb);
cfb6eeb4
YH
1409 if (tcp_child_process(sk, nsk, skb)) {
1410 rsk = nsk;
1da177e4 1411 goto reset;
cfb6eeb4 1412 }
1da177e4
LT
1413 return 0;
1414 }
ca55158c 1415 } else
bdeab991 1416 sock_rps_save_rxhash(sk, skb);
ca55158c 1417
72ab4a86 1418 if (tcp_rcv_state_process(sk, skb)) {
cfb6eeb4 1419 rsk = sk;
1da177e4 1420 goto reset;
cfb6eeb4 1421 }
1da177e4
LT
1422 return 0;
1423
1424reset:
cfb6eeb4 1425 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1426discard:
1427 kfree_skb(skb);
1428 /* Be careful here. If this function gets more complicated and
1429 * gcc suffers from register pressure on the x86, sk (in %ebx)
1430 * might be destroyed here. This current version compiles correctly,
1431 * but you have been warned.
1432 */
1433 return 0;
1434
1435csum_err:
c10d9310
ED
1436 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1437 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1438 goto discard;
1439}
4bc2f18b 1440EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4 1441
160eb5a6 1442void tcp_v4_early_demux(struct sk_buff *skb)
41063e9d 1443{
41063e9d
DM
1444 const struct iphdr *iph;
1445 const struct tcphdr *th;
1446 struct sock *sk;
41063e9d 1447
41063e9d 1448 if (skb->pkt_type != PACKET_HOST)
160eb5a6 1449 return;
41063e9d 1450
45f00f99 1451 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
160eb5a6 1452 return;
41063e9d
DM
1453
1454 iph = ip_hdr(skb);
45f00f99 1455 th = tcp_hdr(skb);
41063e9d
DM
1456
1457 if (th->doff < sizeof(struct tcphdr) / 4)
160eb5a6 1458 return;
41063e9d 1459
45f00f99 1460 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
41063e9d 1461 iph->saddr, th->source,
7011d085 1462 iph->daddr, ntohs(th->dest),
9cb429d6 1463 skb->skb_iif);
41063e9d
DM
1464 if (sk) {
1465 skb->sk = sk;
1466 skb->destructor = sock_edemux;
f7e4eb03 1467 if (sk_fullsock(sk)) {
d0c294c5 1468 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
505fbcf0 1469
41063e9d
DM
1470 if (dst)
1471 dst = dst_check(dst, 0);
92101b3b 1472 if (dst &&
505fbcf0 1473 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
92101b3b 1474 skb_dst_set_noref(skb, dst);
41063e9d
DM
1475 }
1476 }
41063e9d
DM
1477}
1478
b2fb4f54
ED
1479/* Packet is added to VJ-style prequeue for processing in process
1480 * context, if a reader task is waiting. Apparently, this exciting
1481 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1482 * failed somewhere. Latency? Burstiness? Well, at least now we will
1483 * see, why it failed. 8)8) --ANK
1484 *
1485 */
1486bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1487{
1488 struct tcp_sock *tp = tcp_sk(sk);
1489
1490 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1491 return false;
1492
1493 if (skb->len <= tcp_hdrlen(skb) &&
1494 skb_queue_len(&tp->ucopy.prequeue) == 0)
1495 return false;
1496
ca777eff
ED
1497 /* Before escaping RCU protected region, we need to take care of skb
1498 * dst. Prequeue is only enabled for established sockets.
1499 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1500 * Instead of doing full sk_rx_dst validity here, let's perform
1501 * an optimistic check.
1502 */
1503 if (likely(sk->sk_rx_dst))
1504 skb_dst_drop(skb);
1505 else
5037e9ef 1506 skb_dst_force_safe(skb);
ca777eff 1507
b2fb4f54
ED
1508 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1509 tp->ucopy.memory += skb->truesize;
0cef6a4c
ED
1510 if (skb_queue_len(&tp->ucopy.prequeue) >= 32 ||
1511 tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) {
b2fb4f54
ED
1512 struct sk_buff *skb1;
1513
1514 BUG_ON(sock_owned_by_user(sk));
0cef6a4c
ED
1515 __NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED,
1516 skb_queue_len(&tp->ucopy.prequeue));
b2fb4f54 1517
0cef6a4c 1518 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
b2fb4f54 1519 sk_backlog_rcv(sk, skb1);
b2fb4f54
ED
1520
1521 tp->ucopy.memory = 0;
1522 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1523 wake_up_interruptible_sync_poll(sk_sleep(sk),
1524 POLLIN | POLLRDNORM | POLLRDBAND);
1525 if (!inet_csk_ack_scheduled(sk))
1526 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1527 (3 * tcp_rto_min(sk)) / 4,
1528 TCP_RTO_MAX);
1529 }
1530 return true;
1531}
1532EXPORT_SYMBOL(tcp_prequeue);
1533
1da177e4
LT
1534/*
1535 * From tcp_input.c
1536 */
1537
1538int tcp_v4_rcv(struct sk_buff *skb)
1539{
3b24d854 1540 struct net *net = dev_net(skb->dev);
eddc9ec5 1541 const struct iphdr *iph;
cf533ea5 1542 const struct tcphdr *th;
3b24d854 1543 bool refcounted;
1da177e4
LT
1544 struct sock *sk;
1545 int ret;
1546
1547 if (skb->pkt_type != PACKET_HOST)
1548 goto discard_it;
1549
1550 /* Count it even if it's bad */
90bbcc60 1551 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1da177e4
LT
1552
1553 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1554 goto discard_it;
1555
ea1627c2 1556 th = (const struct tcphdr *)skb->data;
1da177e4 1557
ea1627c2 1558 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1da177e4
LT
1559 goto bad_packet;
1560 if (!pskb_may_pull(skb, th->doff * 4))
1561 goto discard_it;
1562
1563 /* An explanation is required here, I think.
1564 * Packet length and doff are validated by header prediction,
caa20d9a 1565 * provided case of th->doff==0 is eliminated.
1da177e4 1566 * So, we defer the checks. */
ed70fcfc
TH
1567
1568 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
6a5dc9e5 1569 goto csum_error;
1da177e4 1570
ea1627c2 1571 th = (const struct tcphdr *)skb->data;
eddc9ec5 1572 iph = ip_hdr(skb);
971f10ec
ED
1573 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1574 * barrier() makes sure compiler wont play fool^Waliasing games.
1575 */
1576 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1577 sizeof(struct inet_skb_parm));
1578 barrier();
1579
1da177e4
LT
1580 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1581 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1582 skb->len - th->doff * 4);
1583 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
e11ecddf 1584 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
04317daf 1585 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
b82d1bb4 1586 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1da177e4
LT
1587 TCP_SKB_CB(skb)->sacked = 0;
1588
4bdc3d66 1589lookup:
a583636a 1590 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
3b24d854 1591 th->dest, &refcounted);
1da177e4
LT
1592 if (!sk)
1593 goto no_tcp_socket;
1594
bb134d5d
ED
1595process:
1596 if (sk->sk_state == TCP_TIME_WAIT)
1597 goto do_time_wait;
1598
079096f1
ED
1599 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1600 struct request_sock *req = inet_reqsk(sk);
7716682c 1601 struct sock *nsk;
079096f1
ED
1602
1603 sk = req->rsk_listener;
72923555
ED
1604 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1605 reqsk_put(req);
1606 goto discard_it;
1607 }
7716682c 1608 if (unlikely(sk->sk_state != TCP_LISTEN)) {
f03f2e15 1609 inet_csk_reqsk_queue_drop_and_put(sk, req);
4bdc3d66
ED
1610 goto lookup;
1611 }
3b24d854
ED
1612 /* We own a reference on the listener, increase it again
1613 * as we might lose it too soon.
1614 */
7716682c 1615 sock_hold(sk);
3b24d854 1616 refcounted = true;
7716682c 1617 nsk = tcp_check_req(sk, skb, req, false);
079096f1
ED
1618 if (!nsk) {
1619 reqsk_put(req);
7716682c 1620 goto discard_and_relse;
079096f1
ED
1621 }
1622 if (nsk == sk) {
079096f1
ED
1623 reqsk_put(req);
1624 } else if (tcp_child_process(sk, nsk, skb)) {
1625 tcp_v4_send_reset(nsk, skb);
7716682c 1626 goto discard_and_relse;
079096f1 1627 } else {
7716682c 1628 sock_put(sk);
079096f1
ED
1629 return 0;
1630 }
1631 }
6cce09f8 1632 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
02a1d6e7 1633 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
d218d111 1634 goto discard_and_relse;
6cce09f8 1635 }
d218d111 1636
1da177e4
LT
1637 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1638 goto discard_and_relse;
9ea88a15 1639
9ea88a15
DP
1640 if (tcp_v4_inbound_md5_hash(sk, skb))
1641 goto discard_and_relse;
9ea88a15 1642
b59c2701 1643 nf_reset(skb);
1da177e4 1644
fda9ef5d 1645 if (sk_filter(sk, skb))
1da177e4
LT
1646 goto discard_and_relse;
1647
1648 skb->dev = NULL;
1649
e994b2f0
ED
1650 if (sk->sk_state == TCP_LISTEN) {
1651 ret = tcp_v4_do_rcv(sk, skb);
1652 goto put_and_return;
1653 }
1654
1655 sk_incoming_cpu_update(sk);
1656
c6366184 1657 bh_lock_sock_nested(sk);
a44d6eac 1658 tcp_segs_in(tcp_sk(sk), skb);
1da177e4
LT
1659 ret = 0;
1660 if (!sock_owned_by_user(sk)) {
7bced397 1661 if (!tcp_prequeue(sk, skb))
1da177e4 1662 ret = tcp_v4_do_rcv(sk, skb);
da882c1f
ED
1663 } else if (unlikely(sk_add_backlog(sk, skb,
1664 sk->sk_rcvbuf + sk->sk_sndbuf))) {
6b03a53a 1665 bh_unlock_sock(sk);
02a1d6e7 1666 __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1667 goto discard_and_relse;
1668 }
1da177e4
LT
1669 bh_unlock_sock(sk);
1670
e994b2f0 1671put_and_return:
3b24d854
ED
1672 if (refcounted)
1673 sock_put(sk);
1da177e4
LT
1674
1675 return ret;
1676
1677no_tcp_socket:
1678 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1679 goto discard_it;
1680
12e25e10 1681 if (tcp_checksum_complete(skb)) {
6a5dc9e5 1682csum_error:
90bbcc60 1683 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1da177e4 1684bad_packet:
90bbcc60 1685 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1da177e4 1686 } else {
cfb6eeb4 1687 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1688 }
1689
1690discard_it:
1691 /* Discard frame. */
1692 kfree_skb(skb);
e905a9ed 1693 return 0;
1da177e4
LT
1694
1695discard_and_relse:
532182cd 1696 sk_drops_add(sk, skb);
3b24d854
ED
1697 if (refcounted)
1698 sock_put(sk);
1da177e4
LT
1699 goto discard_it;
1700
1701do_time_wait:
1702 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1703 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1704 goto discard_it;
1705 }
1706
6a5dc9e5
ED
1707 if (tcp_checksum_complete(skb)) {
1708 inet_twsk_put(inet_twsk(sk));
1709 goto csum_error;
1da177e4 1710 }
9469c7b4 1711 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1712 case TCP_TW_SYN: {
c346dca1 1713 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
a583636a
CG
1714 &tcp_hashinfo, skb,
1715 __tcp_hdrlen(th),
da5e3630 1716 iph->saddr, th->source,
eddc9ec5 1717 iph->daddr, th->dest,
463c84b9 1718 inet_iif(skb));
1da177e4 1719 if (sk2) {
dbe7faa4 1720 inet_twsk_deschedule_put(inet_twsk(sk));
1da177e4 1721 sk = sk2;
3b24d854 1722 refcounted = false;
1da177e4
LT
1723 goto process;
1724 }
1725 /* Fall through to ACK */
1726 }
1727 case TCP_TW_ACK:
1728 tcp_v4_timewait_ack(sk, skb);
1729 break;
1730 case TCP_TW_RST:
271c3b9b
FW
1731 tcp_v4_send_reset(sk, skb);
1732 inet_twsk_deschedule_put(inet_twsk(sk));
1733 goto discard_it;
1da177e4
LT
1734 case TCP_TW_SUCCESS:;
1735 }
1736 goto discard_it;
1737}
1738
ccb7c410
DM
1739static struct timewait_sock_ops tcp_timewait_sock_ops = {
1740 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1741 .twsk_unique = tcp_twsk_unique,
1742 .twsk_destructor= tcp_twsk_destructor,
ccb7c410 1743};
1da177e4 1744
63d02d15 1745void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
5d299f3d
ED
1746{
1747 struct dst_entry *dst = skb_dst(skb);
1748
5037e9ef 1749 if (dst && dst_hold_safe(dst)) {
ca777eff
ED
1750 sk->sk_rx_dst = dst;
1751 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1752 }
5d299f3d 1753}
63d02d15 1754EXPORT_SYMBOL(inet_sk_rx_dst_set);
5d299f3d 1755
3b401a81 1756const struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1757 .queue_xmit = ip_queue_xmit,
1758 .send_check = tcp_v4_send_check,
1759 .rebuild_header = inet_sk_rebuild_header,
5d299f3d 1760 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1761 .conn_request = tcp_v4_conn_request,
1762 .syn_recv_sock = tcp_v4_syn_recv_sock,
543d9cfe
ACM
1763 .net_header_len = sizeof(struct iphdr),
1764 .setsockopt = ip_setsockopt,
1765 .getsockopt = ip_getsockopt,
1766 .addr2sockaddr = inet_csk_addr2sockaddr,
1767 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1768 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1769#ifdef CONFIG_COMPAT
543d9cfe
ACM
1770 .compat_setsockopt = compat_ip_setsockopt,
1771 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1772#endif
4fab9071 1773 .mtu_reduced = tcp_v4_mtu_reduced,
1da177e4 1774};
4bc2f18b 1775EXPORT_SYMBOL(ipv4_specific);
1da177e4 1776
cfb6eeb4 1777#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1778static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4 1779 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1780 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1781 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1782};
b6332e6c 1783#endif
cfb6eeb4 1784
1da177e4
LT
1785/* NOTE: A lot of things set to zero explicitly by call to
1786 * sk_alloc() so need not be done here.
1787 */
1788static int tcp_v4_init_sock(struct sock *sk)
1789{
6687e988 1790 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1791
900f65d3 1792 tcp_init_sock(sk);
1da177e4 1793
8292a17a 1794 icsk->icsk_af_ops = &ipv4_specific;
900f65d3 1795
cfb6eeb4 1796#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1797 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
cfb6eeb4 1798#endif
1da177e4 1799
1da177e4
LT
1800 return 0;
1801}
1802
7d06b2e0 1803void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
1804{
1805 struct tcp_sock *tp = tcp_sk(sk);
1806
1807 tcp_clear_xmit_timers(sk);
1808
6687e988 1809 tcp_cleanup_congestion_control(sk);
317a76f9 1810
1da177e4 1811 /* Cleanup up the write buffer. */
fe067e8a 1812 tcp_write_queue_purge(sk);
1da177e4
LT
1813
1814 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1815 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1816
cfb6eeb4
YH
1817#ifdef CONFIG_TCP_MD5SIG
1818 /* Clean up the MD5 key list, if any */
1819 if (tp->md5sig_info) {
a915da9b 1820 tcp_clear_md5_list(sk);
a8afca03 1821 kfree_rcu(tp->md5sig_info, rcu);
cfb6eeb4
YH
1822 tp->md5sig_info = NULL;
1823 }
1824#endif
1a2449a8 1825
1da177e4
LT
1826 /* Clean prequeue, it must be empty really */
1827 __skb_queue_purge(&tp->ucopy.prequeue);
1828
1829 /* Clean up a referenced TCP bind bucket. */
463c84b9 1830 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1831 inet_put_port(sk);
1da177e4 1832
00db4124 1833 BUG_ON(tp->fastopen_rsk);
435cf559 1834
cf60af03
YC
1835 /* If socket is aborted during connect operation */
1836 tcp_free_fastopen_req(tp);
cd8ae852 1837 tcp_saved_syn_free(tp);
cf60af03 1838
777c6ae5 1839 local_bh_disable();
180d8cd9 1840 sk_sockets_allocated_dec(sk);
777c6ae5 1841 local_bh_enable();
3d596f7b 1842
baac50bb 1843 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3d596f7b 1844 sock_release_memcg(sk);
1da177e4 1845}
1da177e4
LT
1846EXPORT_SYMBOL(tcp_v4_destroy_sock);
1847
1848#ifdef CONFIG_PROC_FS
1849/* Proc filesystem TCP sock list dumping. */
1850
a8b690f9
TH
1851/*
1852 * Get next listener socket follow cur. If cur is NULL, get first socket
1853 * starting from bucket given in st->bucket; when st->bucket is zero the
1854 * very first socket in the hash table is returned.
1855 */
1da177e4
LT
1856static void *listening_get_next(struct seq_file *seq, void *cur)
1857{
5799de0b 1858 struct tcp_iter_state *st = seq->private;
a4146b1b 1859 struct net *net = seq_file_net(seq);
3b24d854
ED
1860 struct inet_listen_hashbucket *ilb;
1861 struct inet_connection_sock *icsk;
1862 struct sock *sk = cur;
1da177e4
LT
1863
1864 if (!sk) {
3b24d854 1865get_head:
a8b690f9 1866 ilb = &tcp_hashinfo.listening_hash[st->bucket];
5caea4ea 1867 spin_lock_bh(&ilb->lock);
3b24d854 1868 sk = sk_head(&ilb->head);
a8b690f9 1869 st->offset = 0;
1da177e4
LT
1870 goto get_sk;
1871 }
5caea4ea 1872 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1da177e4 1873 ++st->num;
a8b690f9 1874 ++st->offset;
1da177e4 1875
3b24d854 1876 sk = sk_next(sk);
1da177e4 1877get_sk:
3b24d854 1878 sk_for_each_from(sk) {
8475ef9f
PE
1879 if (!net_eq(sock_net(sk), net))
1880 continue;
3b24d854
ED
1881 if (sk->sk_family == st->family)
1882 return sk;
e905a9ed 1883 icsk = inet_csk(sk);
1da177e4 1884 }
5caea4ea 1885 spin_unlock_bh(&ilb->lock);
a8b690f9 1886 st->offset = 0;
3b24d854
ED
1887 if (++st->bucket < INET_LHTABLE_SIZE)
1888 goto get_head;
1889 return NULL;
1da177e4
LT
1890}
1891
1892static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1893{
a8b690f9
TH
1894 struct tcp_iter_state *st = seq->private;
1895 void *rc;
1896
1897 st->bucket = 0;
1898 st->offset = 0;
1899 rc = listening_get_next(seq, NULL);
1da177e4
LT
1900
1901 while (rc && *pos) {
1902 rc = listening_get_next(seq, rc);
1903 --*pos;
1904 }
1905 return rc;
1906}
1907
05dbc7b5 1908static inline bool empty_bucket(const struct tcp_iter_state *st)
6eac5604 1909{
05dbc7b5 1910 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
6eac5604
AK
1911}
1912
a8b690f9
TH
1913/*
1914 * Get first established socket starting from bucket given in st->bucket.
1915 * If st->bucket is zero, the very first socket in the hash is returned.
1916 */
1da177e4
LT
1917static void *established_get_first(struct seq_file *seq)
1918{
5799de0b 1919 struct tcp_iter_state *st = seq->private;
a4146b1b 1920 struct net *net = seq_file_net(seq);
1da177e4
LT
1921 void *rc = NULL;
1922
a8b690f9
TH
1923 st->offset = 0;
1924 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1da177e4 1925 struct sock *sk;
3ab5aee7 1926 struct hlist_nulls_node *node;
9db66bdc 1927 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 1928
6eac5604
AK
1929 /* Lockless fast path for the common case of empty buckets */
1930 if (empty_bucket(st))
1931 continue;
1932
9db66bdc 1933 spin_lock_bh(lock);
3ab5aee7 1934 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 1935 if (sk->sk_family != st->family ||
878628fb 1936 !net_eq(sock_net(sk), net)) {
1da177e4
LT
1937 continue;
1938 }
1939 rc = sk;
1940 goto out;
1941 }
9db66bdc 1942 spin_unlock_bh(lock);
1da177e4
LT
1943 }
1944out:
1945 return rc;
1946}
1947
1948static void *established_get_next(struct seq_file *seq, void *cur)
1949{
1950 struct sock *sk = cur;
3ab5aee7 1951 struct hlist_nulls_node *node;
5799de0b 1952 struct tcp_iter_state *st = seq->private;
a4146b1b 1953 struct net *net = seq_file_net(seq);
1da177e4
LT
1954
1955 ++st->num;
a8b690f9 1956 ++st->offset;
1da177e4 1957
05dbc7b5 1958 sk = sk_nulls_next(sk);
1da177e4 1959
3ab5aee7 1960 sk_nulls_for_each_from(sk, node) {
878628fb 1961 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
05dbc7b5 1962 return sk;
1da177e4
LT
1963 }
1964
05dbc7b5
ED
1965 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1966 ++st->bucket;
1967 return established_get_first(seq);
1da177e4
LT
1968}
1969
1970static void *established_get_idx(struct seq_file *seq, loff_t pos)
1971{
a8b690f9
TH
1972 struct tcp_iter_state *st = seq->private;
1973 void *rc;
1974
1975 st->bucket = 0;
1976 rc = established_get_first(seq);
1da177e4
LT
1977
1978 while (rc && pos) {
1979 rc = established_get_next(seq, rc);
1980 --pos;
7174259e 1981 }
1da177e4
LT
1982 return rc;
1983}
1984
1985static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1986{
1987 void *rc;
5799de0b 1988 struct tcp_iter_state *st = seq->private;
1da177e4 1989
1da177e4
LT
1990 st->state = TCP_SEQ_STATE_LISTENING;
1991 rc = listening_get_idx(seq, &pos);
1992
1993 if (!rc) {
1da177e4
LT
1994 st->state = TCP_SEQ_STATE_ESTABLISHED;
1995 rc = established_get_idx(seq, pos);
1996 }
1997
1998 return rc;
1999}
2000
a8b690f9
TH
2001static void *tcp_seek_last_pos(struct seq_file *seq)
2002{
2003 struct tcp_iter_state *st = seq->private;
2004 int offset = st->offset;
2005 int orig_num = st->num;
2006 void *rc = NULL;
2007
2008 switch (st->state) {
a8b690f9
TH
2009 case TCP_SEQ_STATE_LISTENING:
2010 if (st->bucket >= INET_LHTABLE_SIZE)
2011 break;
2012 st->state = TCP_SEQ_STATE_LISTENING;
2013 rc = listening_get_next(seq, NULL);
2014 while (offset-- && rc)
2015 rc = listening_get_next(seq, rc);
2016 if (rc)
2017 break;
2018 st->bucket = 0;
05dbc7b5 2019 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2020 /* Fallthrough */
2021 case TCP_SEQ_STATE_ESTABLISHED:
a8b690f9
TH
2022 if (st->bucket > tcp_hashinfo.ehash_mask)
2023 break;
2024 rc = established_get_first(seq);
2025 while (offset-- && rc)
2026 rc = established_get_next(seq, rc);
2027 }
2028
2029 st->num = orig_num;
2030
2031 return rc;
2032}
2033
1da177e4
LT
2034static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2035{
5799de0b 2036 struct tcp_iter_state *st = seq->private;
a8b690f9
TH
2037 void *rc;
2038
2039 if (*pos && *pos == st->last_pos) {
2040 rc = tcp_seek_last_pos(seq);
2041 if (rc)
2042 goto out;
2043 }
2044
1da177e4
LT
2045 st->state = TCP_SEQ_STATE_LISTENING;
2046 st->num = 0;
a8b690f9
TH
2047 st->bucket = 0;
2048 st->offset = 0;
2049 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2050
2051out:
2052 st->last_pos = *pos;
2053 return rc;
1da177e4
LT
2054}
2055
2056static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2057{
a8b690f9 2058 struct tcp_iter_state *st = seq->private;
1da177e4 2059 void *rc = NULL;
1da177e4
LT
2060
2061 if (v == SEQ_START_TOKEN) {
2062 rc = tcp_get_idx(seq, 0);
2063 goto out;
2064 }
1da177e4
LT
2065
2066 switch (st->state) {
1da177e4
LT
2067 case TCP_SEQ_STATE_LISTENING:
2068 rc = listening_get_next(seq, v);
2069 if (!rc) {
1da177e4 2070 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2071 st->bucket = 0;
2072 st->offset = 0;
1da177e4
LT
2073 rc = established_get_first(seq);
2074 }
2075 break;
2076 case TCP_SEQ_STATE_ESTABLISHED:
1da177e4
LT
2077 rc = established_get_next(seq, v);
2078 break;
2079 }
2080out:
2081 ++*pos;
a8b690f9 2082 st->last_pos = *pos;
1da177e4
LT
2083 return rc;
2084}
2085
2086static void tcp_seq_stop(struct seq_file *seq, void *v)
2087{
5799de0b 2088 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2089
2090 switch (st->state) {
1da177e4
LT
2091 case TCP_SEQ_STATE_LISTENING:
2092 if (v != SEQ_START_TOKEN)
5caea4ea 2093 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
1da177e4 2094 break;
1da177e4
LT
2095 case TCP_SEQ_STATE_ESTABLISHED:
2096 if (v)
9db66bdc 2097 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2098 break;
2099 }
2100}
2101
73cb88ec 2102int tcp_seq_open(struct inode *inode, struct file *file)
1da177e4 2103{
d9dda78b 2104 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
1da177e4 2105 struct tcp_iter_state *s;
52d6f3f1 2106 int err;
1da177e4 2107
52d6f3f1
DL
2108 err = seq_open_net(inode, file, &afinfo->seq_ops,
2109 sizeof(struct tcp_iter_state));
2110 if (err < 0)
2111 return err;
f40c8174 2112
52d6f3f1 2113 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2114 s->family = afinfo->family;
688d1945 2115 s->last_pos = 0;
f40c8174
DL
2116 return 0;
2117}
73cb88ec 2118EXPORT_SYMBOL(tcp_seq_open);
f40c8174 2119
6f8b13bc 2120int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2121{
2122 int rc = 0;
2123 struct proc_dir_entry *p;
2124
9427c4b3
DL
2125 afinfo->seq_ops.start = tcp_seq_start;
2126 afinfo->seq_ops.next = tcp_seq_next;
2127 afinfo->seq_ops.stop = tcp_seq_stop;
2128
84841c3c 2129 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
73cb88ec 2130 afinfo->seq_fops, afinfo);
84841c3c 2131 if (!p)
1da177e4
LT
2132 rc = -ENOMEM;
2133 return rc;
2134}
4bc2f18b 2135EXPORT_SYMBOL(tcp_proc_register);
1da177e4 2136
6f8b13bc 2137void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2138{
ece31ffd 2139 remove_proc_entry(afinfo->name, net->proc_net);
1da177e4 2140}
4bc2f18b 2141EXPORT_SYMBOL(tcp_proc_unregister);
1da177e4 2142
d4f06873 2143static void get_openreq4(const struct request_sock *req,
aa3a0c8c 2144 struct seq_file *f, int i)
1da177e4 2145{
2e6599cb 2146 const struct inet_request_sock *ireq = inet_rsk(req);
fa76ce73 2147 long delta = req->rsk_timer.expires - jiffies;
1da177e4 2148
5e659e4c 2149 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
652586df 2150 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
1da177e4 2151 i,
634fb979 2152 ireq->ir_loc_addr,
d4f06873 2153 ireq->ir_num,
634fb979
ED
2154 ireq->ir_rmt_addr,
2155 ntohs(ireq->ir_rmt_port),
1da177e4
LT
2156 TCP_SYN_RECV,
2157 0, 0, /* could print option size, but that is af dependent. */
2158 1, /* timers active (only the expire timer) */
a399a805 2159 jiffies_delta_to_clock_t(delta),
e6c022a4 2160 req->num_timeout,
aa3a0c8c
ED
2161 from_kuid_munged(seq_user_ns(f),
2162 sock_i_uid(req->rsk_listener)),
1da177e4
LT
2163 0, /* non standard timer */
2164 0, /* open_requests have no inode */
d4f06873 2165 0,
652586df 2166 req);
1da177e4
LT
2167}
2168
652586df 2169static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
1da177e4
LT
2170{
2171 int timer_active;
2172 unsigned long timer_expires;
cf533ea5 2173 const struct tcp_sock *tp = tcp_sk(sk);
cf4c6bf8 2174 const struct inet_connection_sock *icsk = inet_csk(sk);
cf533ea5 2175 const struct inet_sock *inet = inet_sk(sk);
0536fcc0 2176 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
c720c7e8
ED
2177 __be32 dest = inet->inet_daddr;
2178 __be32 src = inet->inet_rcv_saddr;
2179 __u16 destp = ntohs(inet->inet_dport);
2180 __u16 srcp = ntohs(inet->inet_sport);
49d09007 2181 int rx_queue;
00fd38d9 2182 int state;
1da177e4 2183
6ba8a3b1
ND
2184 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2185 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2186 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1da177e4 2187 timer_active = 1;
463c84b9
ACM
2188 timer_expires = icsk->icsk_timeout;
2189 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2190 timer_active = 4;
463c84b9 2191 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2192 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2193 timer_active = 2;
cf4c6bf8 2194 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2195 } else {
2196 timer_active = 0;
2197 timer_expires = jiffies;
2198 }
2199
00fd38d9
ED
2200 state = sk_state_load(sk);
2201 if (state == TCP_LISTEN)
49d09007
ED
2202 rx_queue = sk->sk_ack_backlog;
2203 else
00fd38d9
ED
2204 /* Because we don't lock the socket,
2205 * we might find a transient negative value.
49d09007
ED
2206 */
2207 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2208
5e659e4c 2209 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
652586df 2210 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
00fd38d9 2211 i, src, srcp, dest, destp, state,
47da8ee6 2212 tp->write_seq - tp->snd_una,
49d09007 2213 rx_queue,
1da177e4 2214 timer_active,
a399a805 2215 jiffies_delta_to_clock_t(timer_expires - jiffies),
463c84b9 2216 icsk->icsk_retransmits,
a7cb5a49 2217 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
6687e988 2218 icsk->icsk_probes_out,
cf4c6bf8
IJ
2219 sock_i_ino(sk),
2220 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2221 jiffies_to_clock_t(icsk->icsk_rto),
2222 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2223 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2224 tp->snd_cwnd,
00fd38d9
ED
2225 state == TCP_LISTEN ?
2226 fastopenq->max_qlen :
652586df 2227 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
1da177e4
LT
2228}
2229
cf533ea5 2230static void get_timewait4_sock(const struct inet_timewait_sock *tw,
652586df 2231 struct seq_file *f, int i)
1da177e4 2232{
789f558c 2233 long delta = tw->tw_timer.expires - jiffies;
23f33c2d 2234 __be32 dest, src;
1da177e4 2235 __u16 destp, srcp;
1da177e4
LT
2236
2237 dest = tw->tw_daddr;
2238 src = tw->tw_rcv_saddr;
2239 destp = ntohs(tw->tw_dport);
2240 srcp = ntohs(tw->tw_sport);
2241
5e659e4c 2242 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
652586df 2243 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
1da177e4 2244 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
a399a805 2245 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
652586df 2246 atomic_read(&tw->tw_refcnt), tw);
1da177e4
LT
2247}
2248
2249#define TMPSZ 150
2250
2251static int tcp4_seq_show(struct seq_file *seq, void *v)
2252{
5799de0b 2253 struct tcp_iter_state *st;
05dbc7b5 2254 struct sock *sk = v;
1da177e4 2255
652586df 2256 seq_setwidth(seq, TMPSZ - 1);
1da177e4 2257 if (v == SEQ_START_TOKEN) {
652586df 2258 seq_puts(seq, " sl local_address rem_address st tx_queue "
1da177e4
LT
2259 "rx_queue tr tm->when retrnsmt uid timeout "
2260 "inode");
2261 goto out;
2262 }
2263 st = seq->private;
2264
079096f1
ED
2265 if (sk->sk_state == TCP_TIME_WAIT)
2266 get_timewait4_sock(v, seq, st->num);
2267 else if (sk->sk_state == TCP_NEW_SYN_RECV)
aa3a0c8c 2268 get_openreq4(v, seq, st->num);
079096f1
ED
2269 else
2270 get_tcp4_sock(v, seq, st->num);
1da177e4 2271out:
652586df 2272 seq_pad(seq, '\n');
1da177e4
LT
2273 return 0;
2274}
2275
73cb88ec
AV
2276static const struct file_operations tcp_afinfo_seq_fops = {
2277 .owner = THIS_MODULE,
2278 .open = tcp_seq_open,
2279 .read = seq_read,
2280 .llseek = seq_lseek,
2281 .release = seq_release_net
2282};
2283
1da177e4 2284static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2285 .name = "tcp",
2286 .family = AF_INET,
73cb88ec 2287 .seq_fops = &tcp_afinfo_seq_fops,
9427c4b3
DL
2288 .seq_ops = {
2289 .show = tcp4_seq_show,
2290 },
1da177e4
LT
2291};
2292
2c8c1e72 2293static int __net_init tcp4_proc_init_net(struct net *net)
757764f6
PE
2294{
2295 return tcp_proc_register(net, &tcp4_seq_afinfo);
2296}
2297
2c8c1e72 2298static void __net_exit tcp4_proc_exit_net(struct net *net)
757764f6
PE
2299{
2300 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2301}
2302
2303static struct pernet_operations tcp4_net_ops = {
2304 .init = tcp4_proc_init_net,
2305 .exit = tcp4_proc_exit_net,
2306};
2307
1da177e4
LT
2308int __init tcp4_proc_init(void)
2309{
757764f6 2310 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2311}
2312
2313void tcp4_proc_exit(void)
2314{
757764f6 2315 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2316}
2317#endif /* CONFIG_PROC_FS */
2318
2319struct proto tcp_prot = {
2320 .name = "TCP",
2321 .owner = THIS_MODULE,
2322 .close = tcp_close,
2323 .connect = tcp_v4_connect,
2324 .disconnect = tcp_disconnect,
463c84b9 2325 .accept = inet_csk_accept,
1da177e4
LT
2326 .ioctl = tcp_ioctl,
2327 .init = tcp_v4_init_sock,
2328 .destroy = tcp_v4_destroy_sock,
2329 .shutdown = tcp_shutdown,
2330 .setsockopt = tcp_setsockopt,
2331 .getsockopt = tcp_getsockopt,
1da177e4 2332 .recvmsg = tcp_recvmsg,
7ba42910
CG
2333 .sendmsg = tcp_sendmsg,
2334 .sendpage = tcp_sendpage,
1da177e4 2335 .backlog_rcv = tcp_v4_do_rcv,
46d3ceab 2336 .release_cb = tcp_release_cb,
ab1e0a13
ACM
2337 .hash = inet_hash,
2338 .unhash = inet_unhash,
2339 .get_port = inet_csk_get_port,
1da177e4 2340 .enter_memory_pressure = tcp_enter_memory_pressure,
c9bee3b7 2341 .stream_memory_free = tcp_stream_memory_free,
1da177e4 2342 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2343 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2344 .memory_allocated = &tcp_memory_allocated,
2345 .memory_pressure = &tcp_memory_pressure,
a4fe34bf 2346 .sysctl_mem = sysctl_tcp_mem,
1da177e4
LT
2347 .sysctl_wmem = sysctl_tcp_wmem,
2348 .sysctl_rmem = sysctl_tcp_rmem,
2349 .max_header = MAX_TCP_HEADER,
2350 .obj_size = sizeof(struct tcp_sock),
3ab5aee7 2351 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2352 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2353 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2354 .h.hashinfo = &tcp_hashinfo,
7ba42910 2355 .no_autobind = true,
543d9cfe
ACM
2356#ifdef CONFIG_COMPAT
2357 .compat_setsockopt = compat_tcp_setsockopt,
2358 .compat_getsockopt = compat_tcp_getsockopt,
d1a4c0b3 2359#endif
c1e64e29 2360 .diag_destroy = tcp_abort,
1da177e4 2361};
4bc2f18b 2362EXPORT_SYMBOL(tcp_prot);
1da177e4 2363
bdbbb852
ED
2364static void __net_exit tcp_sk_exit(struct net *net)
2365{
2366 int cpu;
2367
2368 for_each_possible_cpu(cpu)
2369 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2370 free_percpu(net->ipv4.tcp_sk);
2371}
2372
046ee902
DL
2373static int __net_init tcp_sk_init(struct net *net)
2374{
bdbbb852
ED
2375 int res, cpu;
2376
2377 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2378 if (!net->ipv4.tcp_sk)
2379 return -ENOMEM;
2380
2381 for_each_possible_cpu(cpu) {
2382 struct sock *sk;
2383
2384 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2385 IPPROTO_TCP, net);
2386 if (res)
2387 goto fail;
a9d6532b 2388 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
bdbbb852
ED
2389 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2390 }
49213555 2391
5d134f1c 2392 net->ipv4.sysctl_tcp_ecn = 2;
49213555
DB
2393 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2394
b0f9ca53 2395 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
6b58e0a5 2396 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
05cbc0db 2397 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
046ee902 2398
13b287e8 2399 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
9bd6861b 2400 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
b840d15d 2401 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
13b287e8 2402
6fa25166 2403 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
7c083ecb 2404 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
0aca737d 2405 net->ipv4.sysctl_tcp_syncookies = 1;
1043e25f 2406 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
ae5c3f40 2407 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
c6214a97 2408 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
c402d9be 2409 net->ipv4.sysctl_tcp_orphan_retries = 0;
1e579caa 2410 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
4979f2d9 2411 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
12ed8244 2412
49213555 2413 return 0;
bdbbb852
ED
2414fail:
2415 tcp_sk_exit(net);
2416
2417 return res;
b099ce26
EB
2418}
2419
2420static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2421{
2422 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
046ee902
DL
2423}
2424
2425static struct pernet_operations __net_initdata tcp_sk_ops = {
b099ce26
EB
2426 .init = tcp_sk_init,
2427 .exit = tcp_sk_exit,
2428 .exit_batch = tcp_sk_exit_batch,
046ee902
DL
2429};
2430
9b0f976f 2431void __init tcp_v4_init(void)
1da177e4 2432{
5caea4ea 2433 inet_hashinfo_init(&tcp_hashinfo);
6a1b3054 2434 if (register_pernet_subsys(&tcp_sk_ops))
1da177e4 2435 panic("Failed to create the TCP control socket.\n");
1da177e4 2436}