netback: don't store invalid vif pointer
[linux-2.6-block.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
afd46503 53#define pr_fmt(fmt) "TCP: " fmt
1da177e4 54
eb4dea58 55#include <linux/bottom_half.h>
1da177e4
LT
56#include <linux/types.h>
57#include <linux/fcntl.h>
58#include <linux/module.h>
59#include <linux/random.h>
60#include <linux/cache.h>
61#include <linux/jhash.h>
62#include <linux/init.h>
63#include <linux/times.h>
5a0e3ad6 64#include <linux/slab.h>
1da177e4 65
457c4cbc 66#include <net/net_namespace.h>
1da177e4 67#include <net/icmp.h>
304a1618 68#include <net/inet_hashtables.h>
1da177e4 69#include <net/tcp.h>
20380731 70#include <net/transp_v6.h>
1da177e4
LT
71#include <net/ipv6.h>
72#include <net/inet_common.h>
6d6ee43e 73#include <net/timewait_sock.h>
1da177e4 74#include <net/xfrm.h>
6e5714ea 75#include <net/secure_seq.h>
d1a4c0b3 76#include <net/tcp_memcontrol.h>
076bb0c8 77#include <net/busy_poll.h>
1da177e4
LT
78
79#include <linux/inet.h>
80#include <linux/ipv6.h>
81#include <linux/stddef.h>
82#include <linux/proc_fs.h>
83#include <linux/seq_file.h>
84
cfb6eeb4
YH
85#include <linux/crypto.h>
86#include <linux/scatterlist.h>
87
ab32ea5d
BH
88int sysctl_tcp_tw_reuse __read_mostly;
89int sysctl_tcp_low_latency __read_mostly;
4bc2f18b 90EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 91
cfb6eeb4 92#ifdef CONFIG_TCP_MD5SIG
a915da9b 93static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 94 __be32 daddr, __be32 saddr, const struct tcphdr *th);
cfb6eeb4
YH
95#endif
96
5caea4ea 97struct inet_hashinfo tcp_hashinfo;
4bc2f18b 98EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 99
936b8bdb 100static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
1da177e4 101{
eddc9ec5
ACM
102 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103 ip_hdr(skb)->saddr,
aa8223c7
ACM
104 tcp_hdr(skb)->dest,
105 tcp_hdr(skb)->source);
1da177e4
LT
106}
107
6d6ee43e
ACM
108int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109{
110 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 struct tcp_sock *tp = tcp_sk(sk);
112
113 /* With PAWS, it is safe from the viewpoint
114 of data integrity. Even without PAWS it is safe provided sequence
115 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116
117 Actually, the idea is close to VJ's one, only timestamp cache is
118 held not per host, but per port pair and TW bucket is used as state
119 holder.
120
121 If TW bucket has been already destroyed we fall back to VJ's scheme
122 and use initial timestamp retrieved from peer table.
123 */
124 if (tcptw->tw_ts_recent_stamp &&
125 (twp == NULL || (sysctl_tcp_tw_reuse &&
9d729f72 126 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
127 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 if (tp->write_seq == 0)
129 tp->write_seq = 1;
130 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
131 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132 sock_hold(sktw);
133 return 1;
134 }
135
136 return 0;
137}
6d6ee43e
ACM
138EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139
1da177e4
LT
140/* This will initiate an outgoing connection. */
141int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142{
2d7192d6 143 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
1da177e4
LT
144 struct inet_sock *inet = inet_sk(sk);
145 struct tcp_sock *tp = tcp_sk(sk);
dca8b089 146 __be16 orig_sport, orig_dport;
bada8adc 147 __be32 daddr, nexthop;
da905bd1 148 struct flowi4 *fl4;
2d7192d6 149 struct rtable *rt;
1da177e4 150 int err;
f6d8bd05 151 struct ip_options_rcu *inet_opt;
1da177e4
LT
152
153 if (addr_len < sizeof(struct sockaddr_in))
154 return -EINVAL;
155
156 if (usin->sin_family != AF_INET)
157 return -EAFNOSUPPORT;
158
159 nexthop = daddr = usin->sin_addr.s_addr;
f6d8bd05
ED
160 inet_opt = rcu_dereference_protected(inet->inet_opt,
161 sock_owned_by_user(sk));
162 if (inet_opt && inet_opt->opt.srr) {
1da177e4
LT
163 if (!daddr)
164 return -EINVAL;
f6d8bd05 165 nexthop = inet_opt->opt.faddr;
1da177e4
LT
166 }
167
dca8b089
DM
168 orig_sport = inet->inet_sport;
169 orig_dport = usin->sin_port;
da905bd1
DM
170 fl4 = &inet->cork.fl.u.ip4;
171 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
b23dd4fe
DM
172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173 IPPROTO_TCP,
0e0d44ab 174 orig_sport, orig_dport, sk);
b23dd4fe
DM
175 if (IS_ERR(rt)) {
176 err = PTR_ERR(rt);
177 if (err == -ENETUNREACH)
f1d8cba6 178 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
b23dd4fe 179 return err;
584bdf8c 180 }
1da177e4
LT
181
182 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 ip_rt_put(rt);
184 return -ENETUNREACH;
185 }
186
f6d8bd05 187 if (!inet_opt || !inet_opt->opt.srr)
da905bd1 188 daddr = fl4->daddr;
1da177e4 189
c720c7e8 190 if (!inet->inet_saddr)
da905bd1 191 inet->inet_saddr = fl4->saddr;
c720c7e8 192 inet->inet_rcv_saddr = inet->inet_saddr;
1da177e4 193
c720c7e8 194 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
1da177e4
LT
195 /* Reset inherited state */
196 tp->rx_opt.ts_recent = 0;
197 tp->rx_opt.ts_recent_stamp = 0;
ee995283
PE
198 if (likely(!tp->repair))
199 tp->write_seq = 0;
1da177e4
LT
200 }
201
295ff7ed 202 if (tcp_death_row.sysctl_tw_recycle &&
81166dd6
DM
203 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204 tcp_fetch_timewait_stamp(sk, &rt->dst);
1da177e4 205
c720c7e8
ED
206 inet->inet_dport = usin->sin_port;
207 inet->inet_daddr = daddr;
1da177e4 208
d83d8461 209 inet_csk(sk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
210 if (inet_opt)
211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1da177e4 212
bee7ca9e 213 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
1da177e4
LT
214
215 /* Socket identity is still unknown (sport may be zero).
216 * However we set state to SYN-SENT and not releasing socket
217 * lock select source port, enter ourselves into the hash tables and
218 * complete initialization after this.
219 */
220 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 221 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
222 if (err)
223 goto failure;
224
9e7ceb06
SP
225 inet_set_txhash(sk);
226
da905bd1 227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
b23dd4fe
DM
228 inet->inet_sport, inet->inet_dport, sk);
229 if (IS_ERR(rt)) {
230 err = PTR_ERR(rt);
231 rt = NULL;
1da177e4 232 goto failure;
b23dd4fe 233 }
1da177e4 234 /* OK, now commit destination to socket. */
bcd76111 235 sk->sk_gso_type = SKB_GSO_TCPV4;
d8d1f30b 236 sk_setup_caps(sk, &rt->dst);
1da177e4 237
ee995283 238 if (!tp->write_seq && likely(!tp->repair))
c720c7e8
ED
239 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240 inet->inet_daddr,
241 inet->inet_sport,
1da177e4
LT
242 usin->sin_port);
243
c720c7e8 244 inet->inet_id = tp->write_seq ^ jiffies;
1da177e4 245
2b916477 246 err = tcp_connect(sk);
ee995283 247
1da177e4
LT
248 rt = NULL;
249 if (err)
250 goto failure;
251
252 return 0;
253
254failure:
7174259e
ACM
255 /*
256 * This unhashes the socket and releases the local port,
257 * if necessary.
258 */
1da177e4
LT
259 tcp_set_state(sk, TCP_CLOSE);
260 ip_rt_put(rt);
261 sk->sk_route_caps = 0;
c720c7e8 262 inet->inet_dport = 0;
1da177e4
LT
263 return err;
264}
4bc2f18b 265EXPORT_SYMBOL(tcp_v4_connect);
1da177e4 266
1da177e4 267/*
563d34d0
ED
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
1da177e4 271 */
4fab9071 272void tcp_v4_mtu_reduced(struct sock *sk)
1da177e4
LT
273{
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk);
563d34d0 276 u32 mtu = tcp_sk(sk)->mtu_info;
1da177e4 277
80d0a69f
DM
278 dst = inet_csk_update_pmtu(sk, mtu);
279 if (!dst)
1da177e4
LT
280 return;
281
1da177e4
LT
282 /* Something is about to be wrong... Remember soft error
283 * for the case, if this connection will not able to recover.
284 */
285 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 sk->sk_err_soft = EMSGSIZE;
287
288 mtu = dst_mtu(dst);
289
290 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
482fc609 291 ip_sk_accept_pmtu(sk) &&
d83d8461 292 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
293 tcp_sync_mss(sk, mtu);
294
295 /* Resend the TCP packet because it's
296 * clear that the old packet has been
297 * dropped. This is the new "fast" path mtu
298 * discovery.
299 */
300 tcp_simple_retransmit(sk);
301 } /* else let the usual retransmit timer handle it */
302}
4fab9071 303EXPORT_SYMBOL(tcp_v4_mtu_reduced);
1da177e4 304
55be7a9c
DM
305static void do_redirect(struct sk_buff *skb, struct sock *sk)
306{
307 struct dst_entry *dst = __sk_dst_check(sk, 0);
308
1ed5c48f 309 if (dst)
6700c270 310 dst->ops->redirect(dst, sk, skb);
55be7a9c
DM
311}
312
1da177e4
LT
313/*
314 * This routine is called by the ICMP module when it gets some
315 * sort of error condition. If err < 0 then the socket should
316 * be closed and the error returned to the user. If err > 0
317 * it's just the icmp type << 8 | icmp code. After adjustment
318 * header points to the first 8 bytes of the tcp header. We need
319 * to find the appropriate port.
320 *
321 * The locking strategy used here is very "optimistic". When
322 * someone else accesses the socket the ICMP is just dropped
323 * and for some paths there is no check at all.
324 * A more general error queue to queue errors for later handling
325 * is probably better.
326 *
327 */
328
4d1a2d9e 329void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
1da177e4 330{
b71d1d42 331 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
4d1a2d9e 332 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
f1ecd5d9 333 struct inet_connection_sock *icsk;
1da177e4
LT
334 struct tcp_sock *tp;
335 struct inet_sock *inet;
4d1a2d9e
DL
336 const int type = icmp_hdr(icmp_skb)->type;
337 const int code = icmp_hdr(icmp_skb)->code;
1da177e4 338 struct sock *sk;
f1ecd5d9 339 struct sk_buff *skb;
0a672f74
YC
340 struct request_sock *fastopen;
341 __u32 seq, snd_una;
f1ecd5d9 342 __u32 remaining;
1da177e4 343 int err;
4d1a2d9e 344 struct net *net = dev_net(icmp_skb->dev);
1da177e4 345
fd54d716 346 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
4d1a2d9e 347 iph->saddr, th->source, inet_iif(icmp_skb));
1da177e4 348 if (!sk) {
dcfc23ca 349 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
350 return;
351 }
352 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 353 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
354 return;
355 }
356
357 bh_lock_sock(sk);
358 /* If too many ICMPs get dropped on busy
359 * servers this needs to be solved differently.
563d34d0
ED
360 * We do take care of PMTU discovery (RFC1191) special case :
361 * we can receive locally generated ICMP messages while socket is held.
1da177e4 362 */
b74aa930
ED
363 if (sock_owned_by_user(sk)) {
364 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
365 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
366 }
1da177e4
LT
367 if (sk->sk_state == TCP_CLOSE)
368 goto out;
369
97e3ecd1 370 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
371 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
372 goto out;
373 }
374
f1ecd5d9 375 icsk = inet_csk(sk);
1da177e4
LT
376 tp = tcp_sk(sk);
377 seq = ntohl(th->seq);
0a672f74
YC
378 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
379 fastopen = tp->fastopen_rsk;
380 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
1da177e4 381 if (sk->sk_state != TCP_LISTEN &&
0a672f74 382 !between(seq, snd_una, tp->snd_nxt)) {
de0744af 383 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
384 goto out;
385 }
386
387 switch (type) {
55be7a9c
DM
388 case ICMP_REDIRECT:
389 do_redirect(icmp_skb, sk);
390 goto out;
1da177e4
LT
391 case ICMP_SOURCE_QUENCH:
392 /* Just silently ignore these. */
393 goto out;
394 case ICMP_PARAMETERPROB:
395 err = EPROTO;
396 break;
397 case ICMP_DEST_UNREACH:
398 if (code > NR_ICMP_UNREACH)
399 goto out;
400
401 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
0d4f0608
ED
402 /* We are not interested in TCP_LISTEN and open_requests
403 * (SYN-ACKs send out by Linux are always <576bytes so
404 * they should go through unfragmented).
405 */
406 if (sk->sk_state == TCP_LISTEN)
407 goto out;
408
563d34d0 409 tp->mtu_info = info;
144d56e9 410 if (!sock_owned_by_user(sk)) {
563d34d0 411 tcp_v4_mtu_reduced(sk);
144d56e9
ED
412 } else {
413 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
414 sock_hold(sk);
415 }
1da177e4
LT
416 goto out;
417 }
418
419 err = icmp_err_convert[code].errno;
f1ecd5d9
DL
420 /* check if icmp_skb allows revert of backoff
421 * (see draft-zimmermann-tcp-lcd) */
422 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
423 break;
424 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
0a672f74 425 !icsk->icsk_backoff || fastopen)
f1ecd5d9
DL
426 break;
427
8f49c270
DM
428 if (sock_owned_by_user(sk))
429 break;
430
f1ecd5d9 431 icsk->icsk_backoff--;
fcdd1cf4
ED
432 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
433 TCP_TIMEOUT_INIT;
434 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
f1ecd5d9
DL
435
436 skb = tcp_write_queue_head(sk);
437 BUG_ON(!skb);
438
7faee5c0
ED
439 remaining = icsk->icsk_rto -
440 min(icsk->icsk_rto,
441 tcp_time_stamp - tcp_skb_timestamp(skb));
f1ecd5d9
DL
442
443 if (remaining) {
444 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
445 remaining, TCP_RTO_MAX);
f1ecd5d9
DL
446 } else {
447 /* RTO revert clocked out retransmission.
448 * Will retransmit now */
449 tcp_retransmit_timer(sk);
450 }
451
1da177e4
LT
452 break;
453 case ICMP_TIME_EXCEEDED:
454 err = EHOSTUNREACH;
455 break;
456 default:
457 goto out;
458 }
459
460 switch (sk->sk_state) {
60236fdd 461 struct request_sock *req, **prev;
1da177e4
LT
462 case TCP_LISTEN:
463 if (sock_owned_by_user(sk))
464 goto out;
465
463c84b9
ACM
466 req = inet_csk_search_req(sk, &prev, th->dest,
467 iph->daddr, iph->saddr);
1da177e4
LT
468 if (!req)
469 goto out;
470
471 /* ICMPs are not backlogged, hence we cannot get
472 an established socket here.
473 */
547b792c 474 WARN_ON(req->sk);
1da177e4 475
2e6599cb 476 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 477 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
478 goto out;
479 }
480
481 /*
482 * Still in SYN_RECV, just remove it silently.
483 * There is no good way to pass the error to the newly
484 * created socket, and POSIX does not want network
485 * errors returned from accept().
486 */
463c84b9 487 inet_csk_reqsk_queue_drop(sk, req, prev);
848bf15f 488 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
489 goto out;
490
491 case TCP_SYN_SENT:
0a672f74
YC
492 case TCP_SYN_RECV:
493 /* Only in fast or simultaneous open. If a fast open socket is
494 * is already accepted it is treated as a connected one below.
495 */
496 if (fastopen && fastopen->sk == NULL)
497 break;
498
1da177e4 499 if (!sock_owned_by_user(sk)) {
1da177e4
LT
500 sk->sk_err = err;
501
502 sk->sk_error_report(sk);
503
504 tcp_done(sk);
505 } else {
506 sk->sk_err_soft = err;
507 }
508 goto out;
509 }
510
511 /* If we've already connected we will keep trying
512 * until we time out, or the user gives up.
513 *
514 * rfc1122 4.2.3.9 allows to consider as hard errors
515 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
516 * but it is obsoleted by pmtu discovery).
517 *
518 * Note, that in modern internet, where routing is unreliable
519 * and in each dark corner broken firewalls sit, sending random
520 * errors ordered by their masters even this two messages finally lose
521 * their original sense (even Linux sends invalid PORT_UNREACHs)
522 *
523 * Now we are in compliance with RFCs.
524 * --ANK (980905)
525 */
526
527 inet = inet_sk(sk);
528 if (!sock_owned_by_user(sk) && inet->recverr) {
529 sk->sk_err = err;
530 sk->sk_error_report(sk);
531 } else { /* Only an error on timeout */
532 sk->sk_err_soft = err;
533 }
534
535out:
536 bh_unlock_sock(sk);
537 sock_put(sk);
538}
539
28850dc7 540void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1da177e4 541{
aa8223c7 542 struct tcphdr *th = tcp_hdr(skb);
1da177e4 543
84fa7933 544 if (skb->ip_summed == CHECKSUM_PARTIAL) {
419f9f89 545 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
663ead3b 546 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 547 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 548 } else {
419f9f89 549 th->check = tcp_v4_check(skb->len, saddr, daddr,
07f0757a 550 csum_partial(th,
1da177e4
LT
551 th->doff << 2,
552 skb->csum));
553 }
554}
555
419f9f89 556/* This routine computes an IPv4 TCP checksum. */
bb296246 557void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
419f9f89 558{
cf533ea5 559 const struct inet_sock *inet = inet_sk(sk);
419f9f89
HX
560
561 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
562}
4bc2f18b 563EXPORT_SYMBOL(tcp_v4_send_check);
419f9f89 564
1da177e4
LT
565/*
566 * This routine will send an RST to the other tcp.
567 *
568 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
569 * for reset.
570 * Answer: if a packet caused RST, it is not for a socket
571 * existing in our system, if it is matched to a socket,
572 * it is just duplicate segment or bug in other side's TCP.
573 * So that we build reply only basing on parameters
574 * arrived with segment.
575 * Exception: precedence violation. We do not implement it in any case.
576 */
577
cfb6eeb4 578static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 579{
cf533ea5 580 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
581 struct {
582 struct tcphdr th;
583#ifdef CONFIG_TCP_MD5SIG
714e85be 584 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
585#endif
586 } rep;
1da177e4 587 struct ip_reply_arg arg;
cfb6eeb4
YH
588#ifdef CONFIG_TCP_MD5SIG
589 struct tcp_md5sig_key *key;
658ddaaf
SL
590 const __u8 *hash_location = NULL;
591 unsigned char newhash[16];
592 int genhash;
593 struct sock *sk1 = NULL;
cfb6eeb4 594#endif
a86b1e30 595 struct net *net;
1da177e4
LT
596
597 /* Never send a reset in response to a reset. */
598 if (th->rst)
599 return;
600
c3658e8d
ED
601 /* If sk not NULL, it means we did a successful lookup and incoming
602 * route had to be correct. prequeue might have dropped our dst.
603 */
604 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
1da177e4
LT
605 return;
606
607 /* Swap the send and the receive. */
cfb6eeb4
YH
608 memset(&rep, 0, sizeof(rep));
609 rep.th.dest = th->source;
610 rep.th.source = th->dest;
611 rep.th.doff = sizeof(struct tcphdr) / 4;
612 rep.th.rst = 1;
1da177e4
LT
613
614 if (th->ack) {
cfb6eeb4 615 rep.th.seq = th->ack_seq;
1da177e4 616 } else {
cfb6eeb4
YH
617 rep.th.ack = 1;
618 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
619 skb->len - (th->doff << 2));
1da177e4
LT
620 }
621
7174259e 622 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
623 arg.iov[0].iov_base = (unsigned char *)&rep;
624 arg.iov[0].iov_len = sizeof(rep.th);
625
626#ifdef CONFIG_TCP_MD5SIG
658ddaaf
SL
627 hash_location = tcp_parse_md5sig_option(th);
628 if (!sk && hash_location) {
629 /*
630 * active side is lost. Try to find listening socket through
631 * source port, and then find md5 key through listening socket.
632 * we are not loose security here:
633 * Incoming packet is checked with md5 hash with finding key,
634 * no RST generated if md5 hash doesn't match.
635 */
636 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
da5e3630
TH
637 &tcp_hashinfo, ip_hdr(skb)->saddr,
638 th->source, ip_hdr(skb)->daddr,
658ddaaf
SL
639 ntohs(th->source), inet_iif(skb));
640 /* don't send rst if it can't find key */
641 if (!sk1)
642 return;
643 rcu_read_lock();
644 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
645 &ip_hdr(skb)->saddr, AF_INET);
646 if (!key)
647 goto release_sk1;
648
649 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
650 if (genhash || memcmp(hash_location, newhash, 16) != 0)
651 goto release_sk1;
652 } else {
653 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
654 &ip_hdr(skb)->saddr,
655 AF_INET) : NULL;
656 }
657
cfb6eeb4
YH
658 if (key) {
659 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
660 (TCPOPT_NOP << 16) |
661 (TCPOPT_MD5SIG << 8) |
662 TCPOLEN_MD5SIG);
663 /* Update length and the length the header thinks exists */
664 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
665 rep.th.doff = arg.iov[0].iov_len / 4;
666
49a72dfb 667 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
78e645cb
IJ
668 key, ip_hdr(skb)->saddr,
669 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
670 }
671#endif
eddc9ec5
ACM
672 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
673 ip_hdr(skb)->saddr, /* XXX */
52cd5750 674 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1da177e4 675 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
88ef4a5a 676 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
e2446eaa 677 /* When socket is gone, all binding information is lost.
4c675258
AK
678 * routing might fail in this case. No choice here, if we choose to force
679 * input interface, we will misroute in case of asymmetric route.
e2446eaa 680 */
4c675258
AK
681 if (sk)
682 arg.bound_dev_if = sk->sk_bound_dev_if;
1da177e4 683
adf30907 684 net = dev_net(skb_dst(skb)->dev);
66b13d99 685 arg.tos = ip_hdr(skb)->tos;
24a2d43d
ED
686 ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
687 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
688 &arg, arg.iov[0].iov_len);
1da177e4 689
63231bdd
PE
690 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
691 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
658ddaaf
SL
692
693#ifdef CONFIG_TCP_MD5SIG
694release_sk1:
695 if (sk1) {
696 rcu_read_unlock();
697 sock_put(sk1);
698 }
699#endif
1da177e4
LT
700}
701
702/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
703 outside socket context is ugly, certainly. What can I do?
704 */
705
9501f972 706static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
ee684b6f 707 u32 win, u32 tsval, u32 tsecr, int oif,
88ef4a5a 708 struct tcp_md5sig_key *key,
66b13d99 709 int reply_flags, u8 tos)
1da177e4 710{
cf533ea5 711 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
712 struct {
713 struct tcphdr th;
714e85be 714 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 715#ifdef CONFIG_TCP_MD5SIG
714e85be 716 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
717#endif
718 ];
1da177e4
LT
719 } rep;
720 struct ip_reply_arg arg;
adf30907 721 struct net *net = dev_net(skb_dst(skb)->dev);
1da177e4
LT
722
723 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 724 memset(&arg, 0, sizeof(arg));
1da177e4
LT
725
726 arg.iov[0].iov_base = (unsigned char *)&rep;
727 arg.iov[0].iov_len = sizeof(rep.th);
ee684b6f 728 if (tsecr) {
cfb6eeb4
YH
729 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
730 (TCPOPT_TIMESTAMP << 8) |
731 TCPOLEN_TIMESTAMP);
ee684b6f
AV
732 rep.opt[1] = htonl(tsval);
733 rep.opt[2] = htonl(tsecr);
cb48cfe8 734 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
735 }
736
737 /* Swap the send and the receive. */
738 rep.th.dest = th->source;
739 rep.th.source = th->dest;
740 rep.th.doff = arg.iov[0].iov_len / 4;
741 rep.th.seq = htonl(seq);
742 rep.th.ack_seq = htonl(ack);
743 rep.th.ack = 1;
744 rep.th.window = htons(win);
745
cfb6eeb4 746#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4 747 if (key) {
ee684b6f 748 int offset = (tsecr) ? 3 : 0;
cfb6eeb4
YH
749
750 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
751 (TCPOPT_NOP << 16) |
752 (TCPOPT_MD5SIG << 8) |
753 TCPOLEN_MD5SIG);
754 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
755 rep.th.doff = arg.iov[0].iov_len/4;
756
49a72dfb 757 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
90b7e112
AL
758 key, ip_hdr(skb)->saddr,
759 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
760 }
761#endif
88ef4a5a 762 arg.flags = reply_flags;
eddc9ec5
ACM
763 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
764 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
765 arg.iov[0].iov_len, IPPROTO_TCP, 0);
766 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
767 if (oif)
768 arg.bound_dev_if = oif;
66b13d99 769 arg.tos = tos;
24a2d43d
ED
770 ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
771 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
772 &arg, arg.iov[0].iov_len);
1da177e4 773
63231bdd 774 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1da177e4
LT
775}
776
777static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
778{
8feaf0c0 779 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 780 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 781
9501f972 782 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 783 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
ee684b6f 784 tcp_time_stamp + tcptw->tw_ts_offset,
9501f972
YH
785 tcptw->tw_ts_recent,
786 tw->tw_bound_dev_if,
88ef4a5a 787 tcp_twsk_md5_key(tcptw),
66b13d99
ED
788 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
789 tw->tw_tos
9501f972 790 );
1da177e4 791
8feaf0c0 792 inet_twsk_put(tw);
1da177e4
LT
793}
794
6edafaaf 795static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
7174259e 796 struct request_sock *req)
1da177e4 797{
168a8f58
JC
798 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
799 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
800 */
801 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
802 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
803 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
ee684b6f 804 tcp_time_stamp,
9501f972
YH
805 req->ts_recent,
806 0,
a915da9b
ED
807 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
808 AF_INET),
66b13d99
ED
809 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
810 ip_hdr(skb)->tos);
1da177e4
LT
811}
812
1da177e4 813/*
9bf1d83e 814 * Send a SYN-ACK after having received a SYN.
60236fdd 815 * This still operates on a request_sock only, not on a big
1da177e4
LT
816 * socket.
817 */
72659ecc 818static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
d6274bd8 819 struct flowi *fl,
72659ecc 820 struct request_sock *req,
843f4a55
YC
821 u16 queue_mapping,
822 struct tcp_fastopen_cookie *foc)
1da177e4 823{
2e6599cb 824 const struct inet_request_sock *ireq = inet_rsk(req);
6bd023f3 825 struct flowi4 fl4;
1da177e4 826 int err = -1;
d41db5af 827 struct sk_buff *skb;
1da177e4
LT
828
829 /* First, grab a route. */
ba3f7f04 830 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
fd80eb94 831 return -1;
1da177e4 832
843f4a55 833 skb = tcp_make_synack(sk, dst, req, foc);
1da177e4
LT
834
835 if (skb) {
634fb979 836 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1da177e4 837
fff32699 838 skb_set_queue_mapping(skb, queue_mapping);
634fb979
ED
839 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
840 ireq->ir_rmt_addr,
2e6599cb 841 ireq->opt);
b9df3cb8 842 err = net_xmit_eval(err);
1da177e4
LT
843 }
844
1da177e4
LT
845 return err;
846}
847
848/*
60236fdd 849 * IPv4 request_sock destructor.
1da177e4 850 */
60236fdd 851static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 852{
a51482bd 853 kfree(inet_rsk(req)->opt);
1da177e4
LT
854}
855
946cedcc 856/*
a2a385d6 857 * Return true if a syncookie should be sent
946cedcc 858 */
a2a385d6 859bool tcp_syn_flood_action(struct sock *sk,
946cedcc
ED
860 const struct sk_buff *skb,
861 const char *proto)
1da177e4 862{
946cedcc 863 const char *msg = "Dropping request";
a2a385d6 864 bool want_cookie = false;
946cedcc
ED
865 struct listen_sock *lopt;
866
2a1d4bd4 867#ifdef CONFIG_SYN_COOKIES
946cedcc 868 if (sysctl_tcp_syncookies) {
2a1d4bd4 869 msg = "Sending cookies";
a2a385d6 870 want_cookie = true;
946cedcc
ED
871 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
872 } else
80e40daa 873#endif
946cedcc
ED
874 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
875
876 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
5ad37d5d 877 if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
946cedcc 878 lopt->synflood_warned = 1;
afd46503 879 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
946cedcc
ED
880 proto, ntohs(tcp_hdr(skb)->dest), msg);
881 }
882 return want_cookie;
2a1d4bd4 883}
946cedcc 884EXPORT_SYMBOL(tcp_syn_flood_action);
1da177e4 885
cfb6eeb4
YH
886#ifdef CONFIG_TCP_MD5SIG
887/*
888 * RFC2385 MD5 checksumming requires a mapping of
889 * IP address->MD5 Key.
890 * We need to maintain these in the sk structure.
891 */
892
893/* Find the Key structure for an address. */
a915da9b
ED
894struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
895 const union tcp_md5_addr *addr,
896 int family)
cfb6eeb4
YH
897{
898 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 899 struct tcp_md5sig_key *key;
a915da9b 900 unsigned int size = sizeof(struct in_addr);
a8afca03 901 struct tcp_md5sig_info *md5sig;
cfb6eeb4 902
a8afca03
ED
903 /* caller either holds rcu_read_lock() or socket lock */
904 md5sig = rcu_dereference_check(tp->md5sig_info,
b4fb05ea
ED
905 sock_owned_by_user(sk) ||
906 lockdep_is_held(&sk->sk_lock.slock));
a8afca03 907 if (!md5sig)
cfb6eeb4 908 return NULL;
a915da9b
ED
909#if IS_ENABLED(CONFIG_IPV6)
910 if (family == AF_INET6)
911 size = sizeof(struct in6_addr);
912#endif
b67bfe0d 913 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
a915da9b
ED
914 if (key->family != family)
915 continue;
916 if (!memcmp(&key->addr, addr, size))
917 return key;
cfb6eeb4
YH
918 }
919 return NULL;
920}
a915da9b 921EXPORT_SYMBOL(tcp_md5_do_lookup);
cfb6eeb4
YH
922
923struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
924 struct sock *addr_sk)
925{
a915da9b
ED
926 union tcp_md5_addr *addr;
927
928 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
929 return tcp_md5_do_lookup(sk, addr, AF_INET);
cfb6eeb4 930}
cfb6eeb4
YH
931EXPORT_SYMBOL(tcp_v4_md5_lookup);
932
f5b99bcd
AB
933static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
934 struct request_sock *req)
cfb6eeb4 935{
a915da9b
ED
936 union tcp_md5_addr *addr;
937
634fb979 938 addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
a915da9b 939 return tcp_md5_do_lookup(sk, addr, AF_INET);
cfb6eeb4
YH
940}
941
942/* This can be called on a newly created socket, from other files */
a915da9b
ED
943int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
944 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
cfb6eeb4
YH
945{
946 /* Add Key to the list */
b0a713e9 947 struct tcp_md5sig_key *key;
cfb6eeb4 948 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 949 struct tcp_md5sig_info *md5sig;
cfb6eeb4 950
c0353c7b 951 key = tcp_md5_do_lookup(sk, addr, family);
cfb6eeb4
YH
952 if (key) {
953 /* Pre-existing entry - just update that one. */
a915da9b 954 memcpy(key->key, newkey, newkeylen);
b0a713e9 955 key->keylen = newkeylen;
a915da9b
ED
956 return 0;
957 }
260fcbeb 958
a8afca03
ED
959 md5sig = rcu_dereference_protected(tp->md5sig_info,
960 sock_owned_by_user(sk));
a915da9b
ED
961 if (!md5sig) {
962 md5sig = kmalloc(sizeof(*md5sig), gfp);
963 if (!md5sig)
cfb6eeb4 964 return -ENOMEM;
cfb6eeb4 965
a915da9b
ED
966 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
967 INIT_HLIST_HEAD(&md5sig->head);
a8afca03 968 rcu_assign_pointer(tp->md5sig_info, md5sig);
a915da9b 969 }
cfb6eeb4 970
5f3d9cb2 971 key = sock_kmalloc(sk, sizeof(*key), gfp);
a915da9b
ED
972 if (!key)
973 return -ENOMEM;
71cea17e 974 if (!tcp_alloc_md5sig_pool()) {
5f3d9cb2 975 sock_kfree_s(sk, key, sizeof(*key));
a915da9b 976 return -ENOMEM;
cfb6eeb4 977 }
a915da9b
ED
978
979 memcpy(key->key, newkey, newkeylen);
980 key->keylen = newkeylen;
981 key->family = family;
982 memcpy(&key->addr, addr,
983 (family == AF_INET6) ? sizeof(struct in6_addr) :
984 sizeof(struct in_addr));
985 hlist_add_head_rcu(&key->node, &md5sig->head);
cfb6eeb4
YH
986 return 0;
987}
a915da9b 988EXPORT_SYMBOL(tcp_md5_do_add);
cfb6eeb4 989
a915da9b 990int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
cfb6eeb4 991{
a915da9b
ED
992 struct tcp_md5sig_key *key;
993
c0353c7b 994 key = tcp_md5_do_lookup(sk, addr, family);
a915da9b
ED
995 if (!key)
996 return -ENOENT;
997 hlist_del_rcu(&key->node);
5f3d9cb2 998 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 999 kfree_rcu(key, rcu);
a915da9b 1000 return 0;
cfb6eeb4 1001}
a915da9b 1002EXPORT_SYMBOL(tcp_md5_do_del);
cfb6eeb4 1003
e0683e70 1004static void tcp_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
1005{
1006 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 1007 struct tcp_md5sig_key *key;
b67bfe0d 1008 struct hlist_node *n;
a8afca03 1009 struct tcp_md5sig_info *md5sig;
cfb6eeb4 1010
a8afca03
ED
1011 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1012
b67bfe0d 1013 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
a915da9b 1014 hlist_del_rcu(&key->node);
5f3d9cb2 1015 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 1016 kfree_rcu(key, rcu);
cfb6eeb4
YH
1017 }
1018}
1019
7174259e
ACM
1020static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1021 int optlen)
cfb6eeb4
YH
1022{
1023 struct tcp_md5sig cmd;
1024 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
cfb6eeb4
YH
1025
1026 if (optlen < sizeof(cmd))
1027 return -EINVAL;
1028
7174259e 1029 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
1030 return -EFAULT;
1031
1032 if (sin->sin_family != AF_INET)
1033 return -EINVAL;
1034
64a124ed 1035 if (!cmd.tcpm_keylen)
a915da9b
ED
1036 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1037 AF_INET);
cfb6eeb4
YH
1038
1039 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1040 return -EINVAL;
1041
a915da9b
ED
1042 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1043 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1044 GFP_KERNEL);
cfb6eeb4
YH
1045}
1046
49a72dfb
AL
1047static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1048 __be32 daddr, __be32 saddr, int nbytes)
cfb6eeb4 1049{
cfb6eeb4 1050 struct tcp4_pseudohdr *bp;
49a72dfb 1051 struct scatterlist sg;
cfb6eeb4
YH
1052
1053 bp = &hp->md5_blk.ip4;
cfb6eeb4
YH
1054
1055 /*
49a72dfb 1056 * 1. the TCP pseudo-header (in the order: source IP address,
cfb6eeb4
YH
1057 * destination IP address, zero-padded protocol number, and
1058 * segment length)
1059 */
1060 bp->saddr = saddr;
1061 bp->daddr = daddr;
1062 bp->pad = 0;
076fb722 1063 bp->protocol = IPPROTO_TCP;
49a72dfb 1064 bp->len = cpu_to_be16(nbytes);
c7da57a1 1065
49a72dfb
AL
1066 sg_init_one(&sg, bp, sizeof(*bp));
1067 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1068}
1069
a915da9b 1070static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 1071 __be32 daddr, __be32 saddr, const struct tcphdr *th)
49a72dfb
AL
1072{
1073 struct tcp_md5sig_pool *hp;
1074 struct hash_desc *desc;
1075
1076 hp = tcp_get_md5sig_pool();
1077 if (!hp)
1078 goto clear_hash_noput;
1079 desc = &hp->md5_desc;
1080
1081 if (crypto_hash_init(desc))
1082 goto clear_hash;
1083 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1084 goto clear_hash;
1085 if (tcp_md5_hash_header(hp, th))
1086 goto clear_hash;
1087 if (tcp_md5_hash_key(hp, key))
1088 goto clear_hash;
1089 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4
YH
1090 goto clear_hash;
1091
cfb6eeb4 1092 tcp_put_md5sig_pool();
cfb6eeb4 1093 return 0;
49a72dfb 1094
cfb6eeb4
YH
1095clear_hash:
1096 tcp_put_md5sig_pool();
1097clear_hash_noput:
1098 memset(md5_hash, 0, 16);
49a72dfb 1099 return 1;
cfb6eeb4
YH
1100}
1101
49a72dfb 1102int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
318cf7aa
ED
1103 const struct sock *sk, const struct request_sock *req,
1104 const struct sk_buff *skb)
cfb6eeb4 1105{
49a72dfb
AL
1106 struct tcp_md5sig_pool *hp;
1107 struct hash_desc *desc;
318cf7aa 1108 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
1109 __be32 saddr, daddr;
1110
1111 if (sk) {
c720c7e8
ED
1112 saddr = inet_sk(sk)->inet_saddr;
1113 daddr = inet_sk(sk)->inet_daddr;
49a72dfb 1114 } else if (req) {
634fb979
ED
1115 saddr = inet_rsk(req)->ir_loc_addr;
1116 daddr = inet_rsk(req)->ir_rmt_addr;
cfb6eeb4 1117 } else {
49a72dfb
AL
1118 const struct iphdr *iph = ip_hdr(skb);
1119 saddr = iph->saddr;
1120 daddr = iph->daddr;
cfb6eeb4 1121 }
49a72dfb
AL
1122
1123 hp = tcp_get_md5sig_pool();
1124 if (!hp)
1125 goto clear_hash_noput;
1126 desc = &hp->md5_desc;
1127
1128 if (crypto_hash_init(desc))
1129 goto clear_hash;
1130
1131 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1132 goto clear_hash;
1133 if (tcp_md5_hash_header(hp, th))
1134 goto clear_hash;
1135 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1136 goto clear_hash;
1137 if (tcp_md5_hash_key(hp, key))
1138 goto clear_hash;
1139 if (crypto_hash_final(desc, md5_hash))
1140 goto clear_hash;
1141
1142 tcp_put_md5sig_pool();
1143 return 0;
1144
1145clear_hash:
1146 tcp_put_md5sig_pool();
1147clear_hash_noput:
1148 memset(md5_hash, 0, 16);
1149 return 1;
cfb6eeb4 1150}
49a72dfb 1151EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
cfb6eeb4 1152
9ea88a15
DP
1153static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
1154 const struct sk_buff *skb)
cfb6eeb4
YH
1155{
1156 /*
1157 * This gets called for each TCP segment that arrives
1158 * so we want to be efficient.
1159 * We have 3 drop cases:
1160 * o No MD5 hash and one expected.
1161 * o MD5 hash and we're not expecting one.
1162 * o MD5 hash and its wrong.
1163 */
cf533ea5 1164 const __u8 *hash_location = NULL;
cfb6eeb4 1165 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1166 const struct iphdr *iph = ip_hdr(skb);
cf533ea5 1167 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1168 int genhash;
cfb6eeb4
YH
1169 unsigned char newhash[16];
1170
a915da9b
ED
1171 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1172 AF_INET);
7d5d5525 1173 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1174
cfb6eeb4
YH
1175 /* We've parsed the options - do we have a hash? */
1176 if (!hash_expected && !hash_location)
a2a385d6 1177 return false;
cfb6eeb4
YH
1178
1179 if (hash_expected && !hash_location) {
785957d3 1180 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
a2a385d6 1181 return true;
cfb6eeb4
YH
1182 }
1183
1184 if (!hash_expected && hash_location) {
785957d3 1185 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
a2a385d6 1186 return true;
cfb6eeb4
YH
1187 }
1188
1189 /* Okay, so this is hash_expected and hash_location -
1190 * so we need to calculate the checksum.
1191 */
49a72dfb
AL
1192 genhash = tcp_v4_md5_hash_skb(newhash,
1193 hash_expected,
1194 NULL, NULL, skb);
cfb6eeb4
YH
1195
1196 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
e87cc472
JP
1197 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1198 &iph->saddr, ntohs(th->source),
1199 &iph->daddr, ntohs(th->dest),
1200 genhash ? " tcp_v4_calc_md5_hash failed"
1201 : "");
a2a385d6 1202 return true;
cfb6eeb4 1203 }
a2a385d6 1204 return false;
cfb6eeb4
YH
1205}
1206
9ea88a15
DP
1207static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1208{
1209 bool ret;
1210
1211 rcu_read_lock();
1212 ret = __tcp_v4_inbound_md5_hash(sk, skb);
1213 rcu_read_unlock();
1214
1215 return ret;
1216}
1217
cfb6eeb4
YH
1218#endif
1219
16bea70a
OP
1220static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
1221 struct sk_buff *skb)
1222{
1223 struct inet_request_sock *ireq = inet_rsk(req);
1224
1225 ireq->ir_loc_addr = ip_hdr(skb)->daddr;
1226 ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
1227 ireq->no_srccheck = inet_sk(sk)->transparent;
1228 ireq->opt = tcp_v4_save_options(skb);
1229}
1230
d94e0417
OP
1231static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1232 const struct request_sock *req,
1233 bool *strict)
1234{
1235 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1236
1237 if (strict) {
1238 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1239 *strict = true;
1240 else
1241 *strict = false;
1242 }
1243
1244 return dst;
1245}
1246
72a3effa 1247struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1248 .family = PF_INET,
2e6599cb 1249 .obj_size = sizeof(struct tcp_request_sock),
5db92c99 1250 .rtx_syn_ack = tcp_rtx_synack,
60236fdd
ACM
1251 .send_ack = tcp_v4_reqsk_send_ack,
1252 .destructor = tcp_v4_reqsk_destructor,
1da177e4 1253 .send_reset = tcp_v4_send_reset,
688d1945 1254 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
1255};
1256
b2e4b3de 1257static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
2aec4a29 1258 .mss_clamp = TCP_MSS_DEFAULT,
16bea70a 1259#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4 1260 .md5_lookup = tcp_v4_reqsk_md5_lookup,
e3afe7b7 1261 .calc_md5_hash = tcp_v4_md5_hash_skb,
b6332e6c 1262#endif
16bea70a 1263 .init_req = tcp_v4_init_req,
fb7b37a7
OP
1264#ifdef CONFIG_SYN_COOKIES
1265 .cookie_init_seq = cookie_v4_init_sequence,
1266#endif
d94e0417 1267 .route_req = tcp_v4_route_req,
936b8bdb 1268 .init_seq = tcp_v4_init_sequence,
d6274bd8 1269 .send_synack = tcp_v4_send_synack,
695da14e 1270 .queue_hash_add = inet_csk_reqsk_queue_hash_add,
16bea70a 1271};
cfb6eeb4 1272
1da177e4
LT
1273int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1274{
1da177e4 1275 /* Never answer to SYNs send to broadcast or multicast */
511c3f92 1276 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1277 goto drop;
1278
1fb6f159
OP
1279 return tcp_conn_request(&tcp_request_sock_ops,
1280 &tcp_request_sock_ipv4_ops, sk, skb);
1da177e4 1281
1da177e4 1282drop:
848bf15f 1283 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1284 return 0;
1285}
4bc2f18b 1286EXPORT_SYMBOL(tcp_v4_conn_request);
1da177e4
LT
1287
1288
1289/*
1290 * The three way handshake has completed - we got a valid synack -
1291 * now create the new socket.
1292 */
1293struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1294 struct request_sock *req,
1da177e4
LT
1295 struct dst_entry *dst)
1296{
2e6599cb 1297 struct inet_request_sock *ireq;
1da177e4
LT
1298 struct inet_sock *newinet;
1299 struct tcp_sock *newtp;
1300 struct sock *newsk;
cfb6eeb4
YH
1301#ifdef CONFIG_TCP_MD5SIG
1302 struct tcp_md5sig_key *key;
1303#endif
f6d8bd05 1304 struct ip_options_rcu *inet_opt;
1da177e4
LT
1305
1306 if (sk_acceptq_is_full(sk))
1307 goto exit_overflow;
1308
1da177e4
LT
1309 newsk = tcp_create_openreq_child(sk, req, skb);
1310 if (!newsk)
093d2823 1311 goto exit_nonewsk;
1da177e4 1312
bcd76111 1313 newsk->sk_gso_type = SKB_GSO_TCPV4;
fae6ef87 1314 inet_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1315
1316 newtp = tcp_sk(newsk);
1317 newinet = inet_sk(newsk);
2e6599cb 1318 ireq = inet_rsk(req);
634fb979
ED
1319 newinet->inet_daddr = ireq->ir_rmt_addr;
1320 newinet->inet_rcv_saddr = ireq->ir_loc_addr;
1321 newinet->inet_saddr = ireq->ir_loc_addr;
f6d8bd05
ED
1322 inet_opt = ireq->opt;
1323 rcu_assign_pointer(newinet->inet_opt, inet_opt);
2e6599cb 1324 ireq->opt = NULL;
463c84b9 1325 newinet->mc_index = inet_iif(skb);
eddc9ec5 1326 newinet->mc_ttl = ip_hdr(skb)->ttl;
4c507d28 1327 newinet->rcv_tos = ip_hdr(skb)->tos;
d83d8461 1328 inet_csk(newsk)->icsk_ext_hdr_len = 0;
b73c3d0e 1329 inet_set_txhash(newsk);
f6d8bd05
ED
1330 if (inet_opt)
1331 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
c720c7e8 1332 newinet->inet_id = newtp->write_seq ^ jiffies;
1da177e4 1333
dfd25fff
ED
1334 if (!dst) {
1335 dst = inet_csk_route_child_sock(sk, newsk, req);
1336 if (!dst)
1337 goto put_and_exit;
1338 } else {
1339 /* syncookie case : see end of cookie_v4_check() */
1340 }
0e734419
DM
1341 sk_setup_caps(newsk, dst);
1342
1da177e4 1343 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1344 newtp->advmss = dst_metric_advmss(dst);
f5fff5dc
TQ
1345 if (tcp_sk(sk)->rx_opt.user_mss &&
1346 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1347 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1348
1da177e4
LT
1349 tcp_initialize_rcv_mss(newsk);
1350
cfb6eeb4
YH
1351#ifdef CONFIG_TCP_MD5SIG
1352 /* Copy over the MD5 key from the original socket */
a915da9b
ED
1353 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1354 AF_INET);
c720c7e8 1355 if (key != NULL) {
cfb6eeb4
YH
1356 /*
1357 * We're using one, so create a matching key
1358 * on the newsk structure. If we fail to get
1359 * memory, then we end up not copying the key
1360 * across. Shucks.
1361 */
a915da9b
ED
1362 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1363 AF_INET, key->key, key->keylen, GFP_ATOMIC);
a465419b 1364 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1365 }
1366#endif
1367
0e734419
DM
1368 if (__inet_inherit_port(sk, newsk) < 0)
1369 goto put_and_exit;
9327f705 1370 __inet_hash_nolisten(newsk, NULL);
1da177e4
LT
1371
1372 return newsk;
1373
1374exit_overflow:
de0744af 1375 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823
BS
1376exit_nonewsk:
1377 dst_release(dst);
1da177e4 1378exit:
de0744af 1379 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4 1380 return NULL;
0e734419 1381put_and_exit:
e337e24d
CP
1382 inet_csk_prepare_forced_close(newsk);
1383 tcp_done(newsk);
0e734419 1384 goto exit;
1da177e4 1385}
4bc2f18b 1386EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1da177e4
LT
1387
1388static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1389{
aa8223c7 1390 struct tcphdr *th = tcp_hdr(skb);
eddc9ec5 1391 const struct iphdr *iph = ip_hdr(skb);
1da177e4 1392 struct sock *nsk;
60236fdd 1393 struct request_sock **prev;
1da177e4 1394 /* Find possible connection requests. */
463c84b9
ACM
1395 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1396 iph->saddr, iph->daddr);
1da177e4 1397 if (req)
8336886f 1398 return tcp_check_req(sk, skb, req, prev, false);
1da177e4 1399
3b1e0a65 1400 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
c67499c0 1401 th->source, iph->daddr, th->dest, inet_iif(skb));
1da177e4
LT
1402
1403 if (nsk) {
1404 if (nsk->sk_state != TCP_TIME_WAIT) {
1405 bh_lock_sock(nsk);
1406 return nsk;
1407 }
9469c7b4 1408 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1409 return NULL;
1410 }
1411
1412#ifdef CONFIG_SYN_COOKIES
af9b4738 1413 if (!th->syn)
461b74c3 1414 sk = cookie_v4_check(sk, skb);
1da177e4
LT
1415#endif
1416 return sk;
1417}
1418
1da177e4
LT
1419/* The socket must have it's spinlock held when we get
1420 * here.
1421 *
1422 * We have a potential double-lock case here, so even when
1423 * doing backlog processing we use the BH locking scheme.
1424 * This is because we cannot sleep with the original spinlock
1425 * held.
1426 */
1427int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1428{
cfb6eeb4 1429 struct sock *rsk;
cfb6eeb4 1430
1da177e4 1431 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
404e0a8b
ED
1432 struct dst_entry *dst = sk->sk_rx_dst;
1433
bdeab991 1434 sock_rps_save_rxhash(sk, skb);
404e0a8b 1435 if (dst) {
505fbcf0
ED
1436 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1437 dst->ops->check(dst, 0) == NULL) {
92101b3b
DM
1438 dst_release(dst);
1439 sk->sk_rx_dst = NULL;
1440 }
1441 }
c995ae22 1442 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1da177e4
LT
1443 return 0;
1444 }
1445
ab6a5bb6 1446 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1447 goto csum_err;
1448
1449 if (sk->sk_state == TCP_LISTEN) {
1450 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1451 if (!nsk)
1452 goto discard;
1453
1454 if (nsk != sk) {
bdeab991 1455 sock_rps_save_rxhash(nsk, skb);
cfb6eeb4
YH
1456 if (tcp_child_process(sk, nsk, skb)) {
1457 rsk = nsk;
1da177e4 1458 goto reset;
cfb6eeb4 1459 }
1da177e4
LT
1460 return 0;
1461 }
ca55158c 1462 } else
bdeab991 1463 sock_rps_save_rxhash(sk, skb);
ca55158c 1464
aa8223c7 1465 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1466 rsk = sk;
1da177e4 1467 goto reset;
cfb6eeb4 1468 }
1da177e4
LT
1469 return 0;
1470
1471reset:
cfb6eeb4 1472 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1473discard:
1474 kfree_skb(skb);
1475 /* Be careful here. If this function gets more complicated and
1476 * gcc suffers from register pressure on the x86, sk (in %ebx)
1477 * might be destroyed here. This current version compiles correctly,
1478 * but you have been warned.
1479 */
1480 return 0;
1481
1482csum_err:
6a5dc9e5 1483 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
63231bdd 1484 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1485 goto discard;
1486}
4bc2f18b 1487EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4 1488
160eb5a6 1489void tcp_v4_early_demux(struct sk_buff *skb)
41063e9d 1490{
41063e9d
DM
1491 const struct iphdr *iph;
1492 const struct tcphdr *th;
1493 struct sock *sk;
41063e9d 1494
41063e9d 1495 if (skb->pkt_type != PACKET_HOST)
160eb5a6 1496 return;
41063e9d 1497
45f00f99 1498 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
160eb5a6 1499 return;
41063e9d
DM
1500
1501 iph = ip_hdr(skb);
45f00f99 1502 th = tcp_hdr(skb);
41063e9d
DM
1503
1504 if (th->doff < sizeof(struct tcphdr) / 4)
160eb5a6 1505 return;
41063e9d 1506
45f00f99 1507 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
41063e9d 1508 iph->saddr, th->source,
7011d085 1509 iph->daddr, ntohs(th->dest),
9cb429d6 1510 skb->skb_iif);
41063e9d
DM
1511 if (sk) {
1512 skb->sk = sk;
1513 skb->destructor = sock_edemux;
1514 if (sk->sk_state != TCP_TIME_WAIT) {
1515 struct dst_entry *dst = sk->sk_rx_dst;
505fbcf0 1516
41063e9d
DM
1517 if (dst)
1518 dst = dst_check(dst, 0);
92101b3b 1519 if (dst &&
505fbcf0 1520 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
92101b3b 1521 skb_dst_set_noref(skb, dst);
41063e9d
DM
1522 }
1523 }
41063e9d
DM
1524}
1525
b2fb4f54
ED
1526/* Packet is added to VJ-style prequeue for processing in process
1527 * context, if a reader task is waiting. Apparently, this exciting
1528 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1529 * failed somewhere. Latency? Burstiness? Well, at least now we will
1530 * see, why it failed. 8)8) --ANK
1531 *
1532 */
1533bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1534{
1535 struct tcp_sock *tp = tcp_sk(sk);
1536
1537 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1538 return false;
1539
1540 if (skb->len <= tcp_hdrlen(skb) &&
1541 skb_queue_len(&tp->ucopy.prequeue) == 0)
1542 return false;
1543
ca777eff
ED
1544 /* Before escaping RCU protected region, we need to take care of skb
1545 * dst. Prequeue is only enabled for established sockets.
1546 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1547 * Instead of doing full sk_rx_dst validity here, let's perform
1548 * an optimistic check.
1549 */
1550 if (likely(sk->sk_rx_dst))
1551 skb_dst_drop(skb);
1552 else
1553 skb_dst_force(skb);
1554
b2fb4f54
ED
1555 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1556 tp->ucopy.memory += skb->truesize;
1557 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1558 struct sk_buff *skb1;
1559
1560 BUG_ON(sock_owned_by_user(sk));
1561
1562 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1563 sk_backlog_rcv(sk, skb1);
1564 NET_INC_STATS_BH(sock_net(sk),
1565 LINUX_MIB_TCPPREQUEUEDROPPED);
1566 }
1567
1568 tp->ucopy.memory = 0;
1569 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1570 wake_up_interruptible_sync_poll(sk_sleep(sk),
1571 POLLIN | POLLRDNORM | POLLRDBAND);
1572 if (!inet_csk_ack_scheduled(sk))
1573 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1574 (3 * tcp_rto_min(sk)) / 4,
1575 TCP_RTO_MAX);
1576 }
1577 return true;
1578}
1579EXPORT_SYMBOL(tcp_prequeue);
1580
1da177e4
LT
1581/*
1582 * From tcp_input.c
1583 */
1584
1585int tcp_v4_rcv(struct sk_buff *skb)
1586{
eddc9ec5 1587 const struct iphdr *iph;
cf533ea5 1588 const struct tcphdr *th;
1da177e4
LT
1589 struct sock *sk;
1590 int ret;
a86b1e30 1591 struct net *net = dev_net(skb->dev);
1da177e4
LT
1592
1593 if (skb->pkt_type != PACKET_HOST)
1594 goto discard_it;
1595
1596 /* Count it even if it's bad */
63231bdd 1597 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1598
1599 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1600 goto discard_it;
1601
aa8223c7 1602 th = tcp_hdr(skb);
1da177e4
LT
1603
1604 if (th->doff < sizeof(struct tcphdr) / 4)
1605 goto bad_packet;
1606 if (!pskb_may_pull(skb, th->doff * 4))
1607 goto discard_it;
1608
1609 /* An explanation is required here, I think.
1610 * Packet length and doff are validated by header prediction,
caa20d9a 1611 * provided case of th->doff==0 is eliminated.
1da177e4 1612 * So, we defer the checks. */
ed70fcfc
TH
1613
1614 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
6a5dc9e5 1615 goto csum_error;
1da177e4 1616
aa8223c7 1617 th = tcp_hdr(skb);
eddc9ec5 1618 iph = ip_hdr(skb);
971f10ec
ED
1619 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1620 * barrier() makes sure compiler wont play fool^Waliasing games.
1621 */
1622 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1623 sizeof(struct inet_skb_parm));
1624 barrier();
1625
1da177e4
LT
1626 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1627 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1628 skb->len - th->doff * 4);
1629 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
e11ecddf 1630 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
04317daf 1631 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
b82d1bb4 1632 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1da177e4
LT
1633 TCP_SKB_CB(skb)->sacked = 0;
1634
9a1f27c4 1635 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1636 if (!sk)
1637 goto no_tcp_socket;
1638
bb134d5d
ED
1639process:
1640 if (sk->sk_state == TCP_TIME_WAIT)
1641 goto do_time_wait;
1642
6cce09f8
ED
1643 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1644 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
d218d111 1645 goto discard_and_relse;
6cce09f8 1646 }
d218d111 1647
1da177e4
LT
1648 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1649 goto discard_and_relse;
9ea88a15
DP
1650
1651#ifdef CONFIG_TCP_MD5SIG
1652 /*
1653 * We really want to reject the packet as early as possible
1654 * if:
1655 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1656 * o There is an MD5 option and we're not expecting one
1657 */
1658 if (tcp_v4_inbound_md5_hash(sk, skb))
1659 goto discard_and_relse;
1660#endif
1661
b59c2701 1662 nf_reset(skb);
1da177e4 1663
fda9ef5d 1664 if (sk_filter(sk, skb))
1da177e4
LT
1665 goto discard_and_relse;
1666
8b80cda5 1667 sk_mark_napi_id(sk, skb);
1da177e4
LT
1668 skb->dev = NULL;
1669
c6366184 1670 bh_lock_sock_nested(sk);
1da177e4
LT
1671 ret = 0;
1672 if (!sock_owned_by_user(sk)) {
7bced397 1673 if (!tcp_prequeue(sk, skb))
1da177e4 1674 ret = tcp_v4_do_rcv(sk, skb);
da882c1f
ED
1675 } else if (unlikely(sk_add_backlog(sk, skb,
1676 sk->sk_rcvbuf + sk->sk_sndbuf))) {
6b03a53a 1677 bh_unlock_sock(sk);
6cce09f8 1678 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1679 goto discard_and_relse;
1680 }
1da177e4
LT
1681 bh_unlock_sock(sk);
1682
1683 sock_put(sk);
1684
1685 return ret;
1686
1687no_tcp_socket:
1688 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1689 goto discard_it;
1690
1691 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
6a5dc9e5
ED
1692csum_error:
1693 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1da177e4 1694bad_packet:
63231bdd 1695 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1696 } else {
cfb6eeb4 1697 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1698 }
1699
1700discard_it:
1701 /* Discard frame. */
1702 kfree_skb(skb);
e905a9ed 1703 return 0;
1da177e4
LT
1704
1705discard_and_relse:
1706 sock_put(sk);
1707 goto discard_it;
1708
1709do_time_wait:
1710 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1711 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1712 goto discard_it;
1713 }
1714
6a5dc9e5 1715 if (skb->len < (th->doff << 2)) {
9469c7b4 1716 inet_twsk_put(inet_twsk(sk));
6a5dc9e5
ED
1717 goto bad_packet;
1718 }
1719 if (tcp_checksum_complete(skb)) {
1720 inet_twsk_put(inet_twsk(sk));
1721 goto csum_error;
1da177e4 1722 }
9469c7b4 1723 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1724 case TCP_TW_SYN: {
c346dca1 1725 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
c67499c0 1726 &tcp_hashinfo,
da5e3630 1727 iph->saddr, th->source,
eddc9ec5 1728 iph->daddr, th->dest,
463c84b9 1729 inet_iif(skb));
1da177e4 1730 if (sk2) {
9469c7b4
YH
1731 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1732 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1733 sk = sk2;
1734 goto process;
1735 }
1736 /* Fall through to ACK */
1737 }
1738 case TCP_TW_ACK:
1739 tcp_v4_timewait_ack(sk, skb);
1740 break;
1741 case TCP_TW_RST:
1742 goto no_tcp_socket;
1743 case TCP_TW_SUCCESS:;
1744 }
1745 goto discard_it;
1746}
1747
ccb7c410
DM
1748static struct timewait_sock_ops tcp_timewait_sock_ops = {
1749 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1750 .twsk_unique = tcp_twsk_unique,
1751 .twsk_destructor= tcp_twsk_destructor,
ccb7c410 1752};
1da177e4 1753
63d02d15 1754void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
5d299f3d
ED
1755{
1756 struct dst_entry *dst = skb_dst(skb);
1757
ca777eff
ED
1758 if (dst) {
1759 dst_hold(dst);
1760 sk->sk_rx_dst = dst;
1761 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1762 }
5d299f3d 1763}
63d02d15 1764EXPORT_SYMBOL(inet_sk_rx_dst_set);
5d299f3d 1765
3b401a81 1766const struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1767 .queue_xmit = ip_queue_xmit,
1768 .send_check = tcp_v4_send_check,
1769 .rebuild_header = inet_sk_rebuild_header,
5d299f3d 1770 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1771 .conn_request = tcp_v4_conn_request,
1772 .syn_recv_sock = tcp_v4_syn_recv_sock,
543d9cfe
ACM
1773 .net_header_len = sizeof(struct iphdr),
1774 .setsockopt = ip_setsockopt,
1775 .getsockopt = ip_getsockopt,
1776 .addr2sockaddr = inet_csk_addr2sockaddr,
1777 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1778 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1779#ifdef CONFIG_COMPAT
543d9cfe
ACM
1780 .compat_setsockopt = compat_ip_setsockopt,
1781 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1782#endif
4fab9071 1783 .mtu_reduced = tcp_v4_mtu_reduced,
1da177e4 1784};
4bc2f18b 1785EXPORT_SYMBOL(ipv4_specific);
1da177e4 1786
cfb6eeb4 1787#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1788static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4 1789 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1790 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1791 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1792};
b6332e6c 1793#endif
cfb6eeb4 1794
1da177e4
LT
1795/* NOTE: A lot of things set to zero explicitly by call to
1796 * sk_alloc() so need not be done here.
1797 */
1798static int tcp_v4_init_sock(struct sock *sk)
1799{
6687e988 1800 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1801
900f65d3 1802 tcp_init_sock(sk);
1da177e4 1803
8292a17a 1804 icsk->icsk_af_ops = &ipv4_specific;
900f65d3 1805
cfb6eeb4 1806#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1807 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
cfb6eeb4 1808#endif
1da177e4 1809
1da177e4
LT
1810 return 0;
1811}
1812
7d06b2e0 1813void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
1814{
1815 struct tcp_sock *tp = tcp_sk(sk);
1816
1817 tcp_clear_xmit_timers(sk);
1818
6687e988 1819 tcp_cleanup_congestion_control(sk);
317a76f9 1820
1da177e4 1821 /* Cleanup up the write buffer. */
fe067e8a 1822 tcp_write_queue_purge(sk);
1da177e4
LT
1823
1824 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1825 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1826
cfb6eeb4
YH
1827#ifdef CONFIG_TCP_MD5SIG
1828 /* Clean up the MD5 key list, if any */
1829 if (tp->md5sig_info) {
a915da9b 1830 tcp_clear_md5_list(sk);
a8afca03 1831 kfree_rcu(tp->md5sig_info, rcu);
cfb6eeb4
YH
1832 tp->md5sig_info = NULL;
1833 }
1834#endif
1a2449a8 1835
1da177e4
LT
1836 /* Clean prequeue, it must be empty really */
1837 __skb_queue_purge(&tp->ucopy.prequeue);
1838
1839 /* Clean up a referenced TCP bind bucket. */
463c84b9 1840 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1841 inet_put_port(sk);
1da177e4 1842
168a8f58 1843 BUG_ON(tp->fastopen_rsk != NULL);
435cf559 1844
cf60af03
YC
1845 /* If socket is aborted during connect operation */
1846 tcp_free_fastopen_req(tp);
1847
180d8cd9 1848 sk_sockets_allocated_dec(sk);
d1a4c0b3 1849 sock_release_memcg(sk);
1da177e4 1850}
1da177e4
LT
1851EXPORT_SYMBOL(tcp_v4_destroy_sock);
1852
1853#ifdef CONFIG_PROC_FS
1854/* Proc filesystem TCP sock list dumping. */
1855
a8b690f9
TH
1856/*
1857 * Get next listener socket follow cur. If cur is NULL, get first socket
1858 * starting from bucket given in st->bucket; when st->bucket is zero the
1859 * very first socket in the hash table is returned.
1860 */
1da177e4
LT
1861static void *listening_get_next(struct seq_file *seq, void *cur)
1862{
463c84b9 1863 struct inet_connection_sock *icsk;
c25eb3bf 1864 struct hlist_nulls_node *node;
1da177e4 1865 struct sock *sk = cur;
5caea4ea 1866 struct inet_listen_hashbucket *ilb;
5799de0b 1867 struct tcp_iter_state *st = seq->private;
a4146b1b 1868 struct net *net = seq_file_net(seq);
1da177e4
LT
1869
1870 if (!sk) {
a8b690f9 1871 ilb = &tcp_hashinfo.listening_hash[st->bucket];
5caea4ea 1872 spin_lock_bh(&ilb->lock);
c25eb3bf 1873 sk = sk_nulls_head(&ilb->head);
a8b690f9 1874 st->offset = 0;
1da177e4
LT
1875 goto get_sk;
1876 }
5caea4ea 1877 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1da177e4 1878 ++st->num;
a8b690f9 1879 ++st->offset;
1da177e4
LT
1880
1881 if (st->state == TCP_SEQ_STATE_OPENREQ) {
60236fdd 1882 struct request_sock *req = cur;
1da177e4 1883
72a3effa 1884 icsk = inet_csk(st->syn_wait_sk);
1da177e4
LT
1885 req = req->dl_next;
1886 while (1) {
1887 while (req) {
bdccc4ca 1888 if (req->rsk_ops->family == st->family) {
1da177e4
LT
1889 cur = req;
1890 goto out;
1891 }
1892 req = req->dl_next;
1893 }
72a3effa 1894 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1da177e4
LT
1895 break;
1896get_req:
463c84b9 1897 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1da177e4 1898 }
1bde5ac4 1899 sk = sk_nulls_next(st->syn_wait_sk);
1da177e4 1900 st->state = TCP_SEQ_STATE_LISTENING;
463c84b9 1901 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 1902 } else {
e905a9ed 1903 icsk = inet_csk(sk);
463c84b9
ACM
1904 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1905 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1da177e4 1906 goto start_req;
463c84b9 1907 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1bde5ac4 1908 sk = sk_nulls_next(sk);
1da177e4
LT
1909 }
1910get_sk:
c25eb3bf 1911 sk_nulls_for_each_from(sk, node) {
8475ef9f
PE
1912 if (!net_eq(sock_net(sk), net))
1913 continue;
1914 if (sk->sk_family == st->family) {
1da177e4
LT
1915 cur = sk;
1916 goto out;
1917 }
e905a9ed 1918 icsk = inet_csk(sk);
463c84b9
ACM
1919 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1920 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1da177e4
LT
1921start_req:
1922 st->uid = sock_i_uid(sk);
1923 st->syn_wait_sk = sk;
1924 st->state = TCP_SEQ_STATE_OPENREQ;
1925 st->sbucket = 0;
1926 goto get_req;
1927 }
463c84b9 1928 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 1929 }
5caea4ea 1930 spin_unlock_bh(&ilb->lock);
a8b690f9 1931 st->offset = 0;
0f7ff927 1932 if (++st->bucket < INET_LHTABLE_SIZE) {
5caea4ea
ED
1933 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1934 spin_lock_bh(&ilb->lock);
c25eb3bf 1935 sk = sk_nulls_head(&ilb->head);
1da177e4
LT
1936 goto get_sk;
1937 }
1938 cur = NULL;
1939out:
1940 return cur;
1941}
1942
1943static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1944{
a8b690f9
TH
1945 struct tcp_iter_state *st = seq->private;
1946 void *rc;
1947
1948 st->bucket = 0;
1949 st->offset = 0;
1950 rc = listening_get_next(seq, NULL);
1da177e4
LT
1951
1952 while (rc && *pos) {
1953 rc = listening_get_next(seq, rc);
1954 --*pos;
1955 }
1956 return rc;
1957}
1958
05dbc7b5 1959static inline bool empty_bucket(const struct tcp_iter_state *st)
6eac5604 1960{
05dbc7b5 1961 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
6eac5604
AK
1962}
1963
a8b690f9
TH
1964/*
1965 * Get first established socket starting from bucket given in st->bucket.
1966 * If st->bucket is zero, the very first socket in the hash is returned.
1967 */
1da177e4
LT
1968static void *established_get_first(struct seq_file *seq)
1969{
5799de0b 1970 struct tcp_iter_state *st = seq->private;
a4146b1b 1971 struct net *net = seq_file_net(seq);
1da177e4
LT
1972 void *rc = NULL;
1973
a8b690f9
TH
1974 st->offset = 0;
1975 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1da177e4 1976 struct sock *sk;
3ab5aee7 1977 struct hlist_nulls_node *node;
9db66bdc 1978 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 1979
6eac5604
AK
1980 /* Lockless fast path for the common case of empty buckets */
1981 if (empty_bucket(st))
1982 continue;
1983
9db66bdc 1984 spin_lock_bh(lock);
3ab5aee7 1985 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 1986 if (sk->sk_family != st->family ||
878628fb 1987 !net_eq(sock_net(sk), net)) {
1da177e4
LT
1988 continue;
1989 }
1990 rc = sk;
1991 goto out;
1992 }
9db66bdc 1993 spin_unlock_bh(lock);
1da177e4
LT
1994 }
1995out:
1996 return rc;
1997}
1998
1999static void *established_get_next(struct seq_file *seq, void *cur)
2000{
2001 struct sock *sk = cur;
3ab5aee7 2002 struct hlist_nulls_node *node;
5799de0b 2003 struct tcp_iter_state *st = seq->private;
a4146b1b 2004 struct net *net = seq_file_net(seq);
1da177e4
LT
2005
2006 ++st->num;
a8b690f9 2007 ++st->offset;
1da177e4 2008
05dbc7b5 2009 sk = sk_nulls_next(sk);
1da177e4 2010
3ab5aee7 2011 sk_nulls_for_each_from(sk, node) {
878628fb 2012 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
05dbc7b5 2013 return sk;
1da177e4
LT
2014 }
2015
05dbc7b5
ED
2016 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2017 ++st->bucket;
2018 return established_get_first(seq);
1da177e4
LT
2019}
2020
2021static void *established_get_idx(struct seq_file *seq, loff_t pos)
2022{
a8b690f9
TH
2023 struct tcp_iter_state *st = seq->private;
2024 void *rc;
2025
2026 st->bucket = 0;
2027 rc = established_get_first(seq);
1da177e4
LT
2028
2029 while (rc && pos) {
2030 rc = established_get_next(seq, rc);
2031 --pos;
7174259e 2032 }
1da177e4
LT
2033 return rc;
2034}
2035
2036static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2037{
2038 void *rc;
5799de0b 2039 struct tcp_iter_state *st = seq->private;
1da177e4 2040
1da177e4
LT
2041 st->state = TCP_SEQ_STATE_LISTENING;
2042 rc = listening_get_idx(seq, &pos);
2043
2044 if (!rc) {
1da177e4
LT
2045 st->state = TCP_SEQ_STATE_ESTABLISHED;
2046 rc = established_get_idx(seq, pos);
2047 }
2048
2049 return rc;
2050}
2051
a8b690f9
TH
2052static void *tcp_seek_last_pos(struct seq_file *seq)
2053{
2054 struct tcp_iter_state *st = seq->private;
2055 int offset = st->offset;
2056 int orig_num = st->num;
2057 void *rc = NULL;
2058
2059 switch (st->state) {
2060 case TCP_SEQ_STATE_OPENREQ:
2061 case TCP_SEQ_STATE_LISTENING:
2062 if (st->bucket >= INET_LHTABLE_SIZE)
2063 break;
2064 st->state = TCP_SEQ_STATE_LISTENING;
2065 rc = listening_get_next(seq, NULL);
2066 while (offset-- && rc)
2067 rc = listening_get_next(seq, rc);
2068 if (rc)
2069 break;
2070 st->bucket = 0;
05dbc7b5 2071 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2072 /* Fallthrough */
2073 case TCP_SEQ_STATE_ESTABLISHED:
a8b690f9
TH
2074 if (st->bucket > tcp_hashinfo.ehash_mask)
2075 break;
2076 rc = established_get_first(seq);
2077 while (offset-- && rc)
2078 rc = established_get_next(seq, rc);
2079 }
2080
2081 st->num = orig_num;
2082
2083 return rc;
2084}
2085
1da177e4
LT
2086static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2087{
5799de0b 2088 struct tcp_iter_state *st = seq->private;
a8b690f9
TH
2089 void *rc;
2090
2091 if (*pos && *pos == st->last_pos) {
2092 rc = tcp_seek_last_pos(seq);
2093 if (rc)
2094 goto out;
2095 }
2096
1da177e4
LT
2097 st->state = TCP_SEQ_STATE_LISTENING;
2098 st->num = 0;
a8b690f9
TH
2099 st->bucket = 0;
2100 st->offset = 0;
2101 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2102
2103out:
2104 st->last_pos = *pos;
2105 return rc;
1da177e4
LT
2106}
2107
2108static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2109{
a8b690f9 2110 struct tcp_iter_state *st = seq->private;
1da177e4 2111 void *rc = NULL;
1da177e4
LT
2112
2113 if (v == SEQ_START_TOKEN) {
2114 rc = tcp_get_idx(seq, 0);
2115 goto out;
2116 }
1da177e4
LT
2117
2118 switch (st->state) {
2119 case TCP_SEQ_STATE_OPENREQ:
2120 case TCP_SEQ_STATE_LISTENING:
2121 rc = listening_get_next(seq, v);
2122 if (!rc) {
1da177e4 2123 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2124 st->bucket = 0;
2125 st->offset = 0;
1da177e4
LT
2126 rc = established_get_first(seq);
2127 }
2128 break;
2129 case TCP_SEQ_STATE_ESTABLISHED:
1da177e4
LT
2130 rc = established_get_next(seq, v);
2131 break;
2132 }
2133out:
2134 ++*pos;
a8b690f9 2135 st->last_pos = *pos;
1da177e4
LT
2136 return rc;
2137}
2138
2139static void tcp_seq_stop(struct seq_file *seq, void *v)
2140{
5799de0b 2141 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2142
2143 switch (st->state) {
2144 case TCP_SEQ_STATE_OPENREQ:
2145 if (v) {
463c84b9
ACM
2146 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2147 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
2148 }
2149 case TCP_SEQ_STATE_LISTENING:
2150 if (v != SEQ_START_TOKEN)
5caea4ea 2151 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
1da177e4 2152 break;
1da177e4
LT
2153 case TCP_SEQ_STATE_ESTABLISHED:
2154 if (v)
9db66bdc 2155 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2156 break;
2157 }
2158}
2159
73cb88ec 2160int tcp_seq_open(struct inode *inode, struct file *file)
1da177e4 2161{
d9dda78b 2162 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
1da177e4 2163 struct tcp_iter_state *s;
52d6f3f1 2164 int err;
1da177e4 2165
52d6f3f1
DL
2166 err = seq_open_net(inode, file, &afinfo->seq_ops,
2167 sizeof(struct tcp_iter_state));
2168 if (err < 0)
2169 return err;
f40c8174 2170
52d6f3f1 2171 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2172 s->family = afinfo->family;
688d1945 2173 s->last_pos = 0;
f40c8174
DL
2174 return 0;
2175}
73cb88ec 2176EXPORT_SYMBOL(tcp_seq_open);
f40c8174 2177
6f8b13bc 2178int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2179{
2180 int rc = 0;
2181 struct proc_dir_entry *p;
2182
9427c4b3
DL
2183 afinfo->seq_ops.start = tcp_seq_start;
2184 afinfo->seq_ops.next = tcp_seq_next;
2185 afinfo->seq_ops.stop = tcp_seq_stop;
2186
84841c3c 2187 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
73cb88ec 2188 afinfo->seq_fops, afinfo);
84841c3c 2189 if (!p)
1da177e4
LT
2190 rc = -ENOMEM;
2191 return rc;
2192}
4bc2f18b 2193EXPORT_SYMBOL(tcp_proc_register);
1da177e4 2194
6f8b13bc 2195void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2196{
ece31ffd 2197 remove_proc_entry(afinfo->name, net->proc_net);
1da177e4 2198}
4bc2f18b 2199EXPORT_SYMBOL(tcp_proc_unregister);
1da177e4 2200
cf533ea5 2201static void get_openreq4(const struct sock *sk, const struct request_sock *req,
652586df 2202 struct seq_file *f, int i, kuid_t uid)
1da177e4 2203{
2e6599cb 2204 const struct inet_request_sock *ireq = inet_rsk(req);
a399a805 2205 long delta = req->expires - jiffies;
1da177e4 2206
5e659e4c 2207 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
652586df 2208 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
1da177e4 2209 i,
634fb979 2210 ireq->ir_loc_addr,
c720c7e8 2211 ntohs(inet_sk(sk)->inet_sport),
634fb979
ED
2212 ireq->ir_rmt_addr,
2213 ntohs(ireq->ir_rmt_port),
1da177e4
LT
2214 TCP_SYN_RECV,
2215 0, 0, /* could print option size, but that is af dependent. */
2216 1, /* timers active (only the expire timer) */
a399a805 2217 jiffies_delta_to_clock_t(delta),
e6c022a4 2218 req->num_timeout,
a7cb5a49 2219 from_kuid_munged(seq_user_ns(f), uid),
1da177e4
LT
2220 0, /* non standard timer */
2221 0, /* open_requests have no inode */
2222 atomic_read(&sk->sk_refcnt),
652586df 2223 req);
1da177e4
LT
2224}
2225
652586df 2226static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
1da177e4
LT
2227{
2228 int timer_active;
2229 unsigned long timer_expires;
cf533ea5 2230 const struct tcp_sock *tp = tcp_sk(sk);
cf4c6bf8 2231 const struct inet_connection_sock *icsk = inet_csk(sk);
cf533ea5 2232 const struct inet_sock *inet = inet_sk(sk);
168a8f58 2233 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
c720c7e8
ED
2234 __be32 dest = inet->inet_daddr;
2235 __be32 src = inet->inet_rcv_saddr;
2236 __u16 destp = ntohs(inet->inet_dport);
2237 __u16 srcp = ntohs(inet->inet_sport);
49d09007 2238 int rx_queue;
1da177e4 2239
6ba8a3b1
ND
2240 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2241 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2242 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1da177e4 2243 timer_active = 1;
463c84b9
ACM
2244 timer_expires = icsk->icsk_timeout;
2245 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2246 timer_active = 4;
463c84b9 2247 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2248 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2249 timer_active = 2;
cf4c6bf8 2250 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2251 } else {
2252 timer_active = 0;
2253 timer_expires = jiffies;
2254 }
2255
49d09007
ED
2256 if (sk->sk_state == TCP_LISTEN)
2257 rx_queue = sk->sk_ack_backlog;
2258 else
2259 /*
2260 * because we dont lock socket, we might find a transient negative value
2261 */
2262 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2263
5e659e4c 2264 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
652586df 2265 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
cf4c6bf8 2266 i, src, srcp, dest, destp, sk->sk_state,
47da8ee6 2267 tp->write_seq - tp->snd_una,
49d09007 2268 rx_queue,
1da177e4 2269 timer_active,
a399a805 2270 jiffies_delta_to_clock_t(timer_expires - jiffies),
463c84b9 2271 icsk->icsk_retransmits,
a7cb5a49 2272 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
6687e988 2273 icsk->icsk_probes_out,
cf4c6bf8
IJ
2274 sock_i_ino(sk),
2275 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2276 jiffies_to_clock_t(icsk->icsk_rto),
2277 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2278 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2279 tp->snd_cwnd,
168a8f58
JC
2280 sk->sk_state == TCP_LISTEN ?
2281 (fastopenq ? fastopenq->max_qlen : 0) :
652586df 2282 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
1da177e4
LT
2283}
2284
cf533ea5 2285static void get_timewait4_sock(const struct inet_timewait_sock *tw,
652586df 2286 struct seq_file *f, int i)
1da177e4 2287{
23f33c2d 2288 __be32 dest, src;
1da177e4 2289 __u16 destp, srcp;
e2a1d3e4 2290 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
1da177e4
LT
2291
2292 dest = tw->tw_daddr;
2293 src = tw->tw_rcv_saddr;
2294 destp = ntohs(tw->tw_dport);
2295 srcp = ntohs(tw->tw_sport);
2296
5e659e4c 2297 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
652586df 2298 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
1da177e4 2299 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
a399a805 2300 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
652586df 2301 atomic_read(&tw->tw_refcnt), tw);
1da177e4
LT
2302}
2303
2304#define TMPSZ 150
2305
2306static int tcp4_seq_show(struct seq_file *seq, void *v)
2307{
5799de0b 2308 struct tcp_iter_state *st;
05dbc7b5 2309 struct sock *sk = v;
1da177e4 2310
652586df 2311 seq_setwidth(seq, TMPSZ - 1);
1da177e4 2312 if (v == SEQ_START_TOKEN) {
652586df 2313 seq_puts(seq, " sl local_address rem_address st tx_queue "
1da177e4
LT
2314 "rx_queue tr tm->when retrnsmt uid timeout "
2315 "inode");
2316 goto out;
2317 }
2318 st = seq->private;
2319
2320 switch (st->state) {
2321 case TCP_SEQ_STATE_LISTENING:
2322 case TCP_SEQ_STATE_ESTABLISHED:
05dbc7b5 2323 if (sk->sk_state == TCP_TIME_WAIT)
652586df 2324 get_timewait4_sock(v, seq, st->num);
05dbc7b5 2325 else
652586df 2326 get_tcp4_sock(v, seq, st->num);
1da177e4
LT
2327 break;
2328 case TCP_SEQ_STATE_OPENREQ:
652586df 2329 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
1da177e4
LT
2330 break;
2331 }
1da177e4 2332out:
652586df 2333 seq_pad(seq, '\n');
1da177e4
LT
2334 return 0;
2335}
2336
73cb88ec
AV
2337static const struct file_operations tcp_afinfo_seq_fops = {
2338 .owner = THIS_MODULE,
2339 .open = tcp_seq_open,
2340 .read = seq_read,
2341 .llseek = seq_lseek,
2342 .release = seq_release_net
2343};
2344
1da177e4 2345static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2346 .name = "tcp",
2347 .family = AF_INET,
73cb88ec 2348 .seq_fops = &tcp_afinfo_seq_fops,
9427c4b3
DL
2349 .seq_ops = {
2350 .show = tcp4_seq_show,
2351 },
1da177e4
LT
2352};
2353
2c8c1e72 2354static int __net_init tcp4_proc_init_net(struct net *net)
757764f6
PE
2355{
2356 return tcp_proc_register(net, &tcp4_seq_afinfo);
2357}
2358
2c8c1e72 2359static void __net_exit tcp4_proc_exit_net(struct net *net)
757764f6
PE
2360{
2361 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2362}
2363
2364static struct pernet_operations tcp4_net_ops = {
2365 .init = tcp4_proc_init_net,
2366 .exit = tcp4_proc_exit_net,
2367};
2368
1da177e4
LT
2369int __init tcp4_proc_init(void)
2370{
757764f6 2371 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2372}
2373
2374void tcp4_proc_exit(void)
2375{
757764f6 2376 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2377}
2378#endif /* CONFIG_PROC_FS */
2379
2380struct proto tcp_prot = {
2381 .name = "TCP",
2382 .owner = THIS_MODULE,
2383 .close = tcp_close,
2384 .connect = tcp_v4_connect,
2385 .disconnect = tcp_disconnect,
463c84b9 2386 .accept = inet_csk_accept,
1da177e4
LT
2387 .ioctl = tcp_ioctl,
2388 .init = tcp_v4_init_sock,
2389 .destroy = tcp_v4_destroy_sock,
2390 .shutdown = tcp_shutdown,
2391 .setsockopt = tcp_setsockopt,
2392 .getsockopt = tcp_getsockopt,
1da177e4 2393 .recvmsg = tcp_recvmsg,
7ba42910
CG
2394 .sendmsg = tcp_sendmsg,
2395 .sendpage = tcp_sendpage,
1da177e4 2396 .backlog_rcv = tcp_v4_do_rcv,
46d3ceab 2397 .release_cb = tcp_release_cb,
ab1e0a13
ACM
2398 .hash = inet_hash,
2399 .unhash = inet_unhash,
2400 .get_port = inet_csk_get_port,
1da177e4 2401 .enter_memory_pressure = tcp_enter_memory_pressure,
c9bee3b7 2402 .stream_memory_free = tcp_stream_memory_free,
1da177e4 2403 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2404 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2405 .memory_allocated = &tcp_memory_allocated,
2406 .memory_pressure = &tcp_memory_pressure,
a4fe34bf 2407 .sysctl_mem = sysctl_tcp_mem,
1da177e4
LT
2408 .sysctl_wmem = sysctl_tcp_wmem,
2409 .sysctl_rmem = sysctl_tcp_rmem,
2410 .max_header = MAX_TCP_HEADER,
2411 .obj_size = sizeof(struct tcp_sock),
3ab5aee7 2412 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2413 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2414 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2415 .h.hashinfo = &tcp_hashinfo,
7ba42910 2416 .no_autobind = true,
543d9cfe
ACM
2417#ifdef CONFIG_COMPAT
2418 .compat_setsockopt = compat_tcp_setsockopt,
2419 .compat_getsockopt = compat_tcp_getsockopt,
2420#endif
c255a458 2421#ifdef CONFIG_MEMCG_KMEM
d1a4c0b3
GC
2422 .init_cgroup = tcp_init_cgroup,
2423 .destroy_cgroup = tcp_destroy_cgroup,
2424 .proto_cgroup = tcp_proto_cgroup,
2425#endif
1da177e4 2426};
4bc2f18b 2427EXPORT_SYMBOL(tcp_prot);
1da177e4 2428
046ee902
DL
2429static int __net_init tcp_sk_init(struct net *net)
2430{
5d134f1c 2431 net->ipv4.sysctl_tcp_ecn = 2;
be9f4a44 2432 return 0;
046ee902
DL
2433}
2434
2435static void __net_exit tcp_sk_exit(struct net *net)
2436{
b099ce26
EB
2437}
2438
2439static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2440{
2441 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
046ee902
DL
2442}
2443
2444static struct pernet_operations __net_initdata tcp_sk_ops = {
b099ce26
EB
2445 .init = tcp_sk_init,
2446 .exit = tcp_sk_exit,
2447 .exit_batch = tcp_sk_exit_batch,
046ee902
DL
2448};
2449
9b0f976f 2450void __init tcp_v4_init(void)
1da177e4 2451{
5caea4ea 2452 inet_hashinfo_init(&tcp_hashinfo);
6a1b3054 2453 if (register_pernet_subsys(&tcp_sk_ops))
1da177e4 2454 panic("Failed to create the TCP control socket.\n");
1da177e4 2455}