fib: use __fls() on non null argument
[linux-block.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
afd46503 53#define pr_fmt(fmt) "TCP: " fmt
1da177e4 54
eb4dea58 55#include <linux/bottom_half.h>
1da177e4
LT
56#include <linux/types.h>
57#include <linux/fcntl.h>
58#include <linux/module.h>
59#include <linux/random.h>
60#include <linux/cache.h>
61#include <linux/jhash.h>
62#include <linux/init.h>
63#include <linux/times.h>
5a0e3ad6 64#include <linux/slab.h>
1da177e4 65
457c4cbc 66#include <net/net_namespace.h>
1da177e4 67#include <net/icmp.h>
304a1618 68#include <net/inet_hashtables.h>
1da177e4 69#include <net/tcp.h>
20380731 70#include <net/transp_v6.h>
1da177e4
LT
71#include <net/ipv6.h>
72#include <net/inet_common.h>
6d6ee43e 73#include <net/timewait_sock.h>
1da177e4 74#include <net/xfrm.h>
1a2449a8 75#include <net/netdma.h>
6e5714ea 76#include <net/secure_seq.h>
d1a4c0b3 77#include <net/tcp_memcontrol.h>
1da177e4
LT
78
79#include <linux/inet.h>
80#include <linux/ipv6.h>
81#include <linux/stddef.h>
82#include <linux/proc_fs.h>
83#include <linux/seq_file.h>
84
cfb6eeb4
YH
85#include <linux/crypto.h>
86#include <linux/scatterlist.h>
87
ab32ea5d
BH
88int sysctl_tcp_tw_reuse __read_mostly;
89int sysctl_tcp_low_latency __read_mostly;
4bc2f18b 90EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 91
1da177e4 92
cfb6eeb4 93#ifdef CONFIG_TCP_MD5SIG
a915da9b 94static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
cfb6eeb4
YH
96#endif
97
5caea4ea 98struct inet_hashinfo tcp_hashinfo;
4bc2f18b 99EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 100
cf533ea5 101static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
1da177e4 102{
eddc9ec5
ACM
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
104 ip_hdr(skb)->saddr,
aa8223c7
ACM
105 tcp_hdr(skb)->dest,
106 tcp_hdr(skb)->source);
1da177e4
LT
107}
108
6d6ee43e
ACM
109int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110{
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
113
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
120 holder.
121
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
124 */
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
9d729f72 127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
130 tp->write_seq = 1;
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
133 sock_hold(sktw);
134 return 1;
135 }
136
137 return 0;
138}
6d6ee43e
ACM
139EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140
ee995283
PE
141static int tcp_repair_connect(struct sock *sk)
142{
143 tcp_connect_init(sk);
144 tcp_finish_connect(sk, NULL);
145
146 return 0;
147}
148
1da177e4
LT
149/* This will initiate an outgoing connection. */
150int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
151{
2d7192d6 152 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
1da177e4
LT
153 struct inet_sock *inet = inet_sk(sk);
154 struct tcp_sock *tp = tcp_sk(sk);
dca8b089 155 __be16 orig_sport, orig_dport;
bada8adc 156 __be32 daddr, nexthop;
da905bd1 157 struct flowi4 *fl4;
2d7192d6 158 struct rtable *rt;
1da177e4 159 int err;
f6d8bd05 160 struct ip_options_rcu *inet_opt;
1da177e4
LT
161
162 if (addr_len < sizeof(struct sockaddr_in))
163 return -EINVAL;
164
165 if (usin->sin_family != AF_INET)
166 return -EAFNOSUPPORT;
167
168 nexthop = daddr = usin->sin_addr.s_addr;
f6d8bd05
ED
169 inet_opt = rcu_dereference_protected(inet->inet_opt,
170 sock_owned_by_user(sk));
171 if (inet_opt && inet_opt->opt.srr) {
1da177e4
LT
172 if (!daddr)
173 return -EINVAL;
f6d8bd05 174 nexthop = inet_opt->opt.faddr;
1da177e4
LT
175 }
176
dca8b089
DM
177 orig_sport = inet->inet_sport;
178 orig_dport = usin->sin_port;
da905bd1
DM
179 fl4 = &inet->cork.fl.u.ip4;
180 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
b23dd4fe
DM
181 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
182 IPPROTO_TCP,
183 orig_sport, orig_dport, sk, true);
184 if (IS_ERR(rt)) {
185 err = PTR_ERR(rt);
186 if (err == -ENETUNREACH)
7c73a6fa 187 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
b23dd4fe 188 return err;
584bdf8c 189 }
1da177e4
LT
190
191 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
192 ip_rt_put(rt);
193 return -ENETUNREACH;
194 }
195
f6d8bd05 196 if (!inet_opt || !inet_opt->opt.srr)
da905bd1 197 daddr = fl4->daddr;
1da177e4 198
c720c7e8 199 if (!inet->inet_saddr)
da905bd1 200 inet->inet_saddr = fl4->saddr;
c720c7e8 201 inet->inet_rcv_saddr = inet->inet_saddr;
1da177e4 202
c720c7e8 203 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
1da177e4
LT
204 /* Reset inherited state */
205 tp->rx_opt.ts_recent = 0;
206 tp->rx_opt.ts_recent_stamp = 0;
ee995283
PE
207 if (likely(!tp->repair))
208 tp->write_seq = 0;
1da177e4
LT
209 }
210
295ff7ed 211 if (tcp_death_row.sysctl_tw_recycle &&
81166dd6
DM
212 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
213 tcp_fetch_timewait_stamp(sk, &rt->dst);
1da177e4 214
c720c7e8
ED
215 inet->inet_dport = usin->sin_port;
216 inet->inet_daddr = daddr;
1da177e4 217
d83d8461 218 inet_csk(sk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
219 if (inet_opt)
220 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1da177e4 221
bee7ca9e 222 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
1da177e4
LT
223
224 /* Socket identity is still unknown (sport may be zero).
225 * However we set state to SYN-SENT and not releasing socket
226 * lock select source port, enter ourselves into the hash tables and
227 * complete initialization after this.
228 */
229 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 230 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
231 if (err)
232 goto failure;
233
da905bd1 234 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
b23dd4fe
DM
235 inet->inet_sport, inet->inet_dport, sk);
236 if (IS_ERR(rt)) {
237 err = PTR_ERR(rt);
238 rt = NULL;
1da177e4 239 goto failure;
b23dd4fe 240 }
1da177e4 241 /* OK, now commit destination to socket. */
bcd76111 242 sk->sk_gso_type = SKB_GSO_TCPV4;
d8d1f30b 243 sk_setup_caps(sk, &rt->dst);
1da177e4 244
ee995283 245 if (!tp->write_seq && likely(!tp->repair))
c720c7e8
ED
246 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
247 inet->inet_daddr,
248 inet->inet_sport,
1da177e4
LT
249 usin->sin_port);
250
c720c7e8 251 inet->inet_id = tp->write_seq ^ jiffies;
1da177e4 252
ee995283
PE
253 if (likely(!tp->repair))
254 err = tcp_connect(sk);
255 else
256 err = tcp_repair_connect(sk);
257
1da177e4
LT
258 rt = NULL;
259 if (err)
260 goto failure;
261
262 return 0;
263
264failure:
7174259e
ACM
265 /*
266 * This unhashes the socket and releases the local port,
267 * if necessary.
268 */
1da177e4
LT
269 tcp_set_state(sk, TCP_CLOSE);
270 ip_rt_put(rt);
271 sk->sk_route_caps = 0;
c720c7e8 272 inet->inet_dport = 0;
1da177e4
LT
273 return err;
274}
4bc2f18b 275EXPORT_SYMBOL(tcp_v4_connect);
1da177e4 276
1da177e4 277/*
563d34d0
ED
278 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
279 * It can be called through tcp_release_cb() if socket was owned by user
280 * at the time tcp_v4_err() was called to handle ICMP message.
1da177e4 281 */
563d34d0 282static void tcp_v4_mtu_reduced(struct sock *sk)
1da177e4
LT
283{
284 struct dst_entry *dst;
285 struct inet_sock *inet = inet_sk(sk);
563d34d0 286 u32 mtu = tcp_sk(sk)->mtu_info;
1da177e4
LT
287
288 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
289 * send out by Linux are always <576bytes so they should go through
290 * unfragmented).
291 */
292 if (sk->sk_state == TCP_LISTEN)
293 return;
294
80d0a69f
DM
295 dst = inet_csk_update_pmtu(sk, mtu);
296 if (!dst)
1da177e4
LT
297 return;
298
1da177e4
LT
299 /* Something is about to be wrong... Remember soft error
300 * for the case, if this connection will not able to recover.
301 */
302 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
303 sk->sk_err_soft = EMSGSIZE;
304
305 mtu = dst_mtu(dst);
306
307 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
d83d8461 308 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
309 tcp_sync_mss(sk, mtu);
310
311 /* Resend the TCP packet because it's
312 * clear that the old packet has been
313 * dropped. This is the new "fast" path mtu
314 * discovery.
315 */
316 tcp_simple_retransmit(sk);
317 } /* else let the usual retransmit timer handle it */
318}
319
55be7a9c
DM
320static void do_redirect(struct sk_buff *skb, struct sock *sk)
321{
322 struct dst_entry *dst = __sk_dst_check(sk, 0);
323
1ed5c48f 324 if (dst)
6700c270 325 dst->ops->redirect(dst, sk, skb);
55be7a9c
DM
326}
327
1da177e4
LT
328/*
329 * This routine is called by the ICMP module when it gets some
330 * sort of error condition. If err < 0 then the socket should
331 * be closed and the error returned to the user. If err > 0
332 * it's just the icmp type << 8 | icmp code. After adjustment
333 * header points to the first 8 bytes of the tcp header. We need
334 * to find the appropriate port.
335 *
336 * The locking strategy used here is very "optimistic". When
337 * someone else accesses the socket the ICMP is just dropped
338 * and for some paths there is no check at all.
339 * A more general error queue to queue errors for later handling
340 * is probably better.
341 *
342 */
343
4d1a2d9e 344void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
1da177e4 345{
b71d1d42 346 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
4d1a2d9e 347 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
f1ecd5d9 348 struct inet_connection_sock *icsk;
1da177e4
LT
349 struct tcp_sock *tp;
350 struct inet_sock *inet;
4d1a2d9e
DL
351 const int type = icmp_hdr(icmp_skb)->type;
352 const int code = icmp_hdr(icmp_skb)->code;
1da177e4 353 struct sock *sk;
f1ecd5d9 354 struct sk_buff *skb;
1da177e4 355 __u32 seq;
f1ecd5d9 356 __u32 remaining;
1da177e4 357 int err;
4d1a2d9e 358 struct net *net = dev_net(icmp_skb->dev);
1da177e4 359
4d1a2d9e 360 if (icmp_skb->len < (iph->ihl << 2) + 8) {
dcfc23ca 361 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
362 return;
363 }
364
fd54d716 365 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
4d1a2d9e 366 iph->saddr, th->source, inet_iif(icmp_skb));
1da177e4 367 if (!sk) {
dcfc23ca 368 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
369 return;
370 }
371 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 372 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
373 return;
374 }
375
376 bh_lock_sock(sk);
377 /* If too many ICMPs get dropped on busy
378 * servers this needs to be solved differently.
563d34d0
ED
379 * We do take care of PMTU discovery (RFC1191) special case :
380 * we can receive locally generated ICMP messages while socket is held.
1da177e4 381 */
563d34d0
ED
382 if (sock_owned_by_user(sk) &&
383 type != ICMP_DEST_UNREACH &&
384 code != ICMP_FRAG_NEEDED)
de0744af 385 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
386
387 if (sk->sk_state == TCP_CLOSE)
388 goto out;
389
97e3ecd1 390 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
391 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
392 goto out;
393 }
394
f1ecd5d9 395 icsk = inet_csk(sk);
1da177e4
LT
396 tp = tcp_sk(sk);
397 seq = ntohl(th->seq);
398 if (sk->sk_state != TCP_LISTEN &&
399 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 400 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
401 goto out;
402 }
403
404 switch (type) {
55be7a9c
DM
405 case ICMP_REDIRECT:
406 do_redirect(icmp_skb, sk);
407 goto out;
1da177e4
LT
408 case ICMP_SOURCE_QUENCH:
409 /* Just silently ignore these. */
410 goto out;
411 case ICMP_PARAMETERPROB:
412 err = EPROTO;
413 break;
414 case ICMP_DEST_UNREACH:
415 if (code > NR_ICMP_UNREACH)
416 goto out;
417
418 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
563d34d0 419 tp->mtu_info = info;
1da177e4 420 if (!sock_owned_by_user(sk))
563d34d0
ED
421 tcp_v4_mtu_reduced(sk);
422 else
423 set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
1da177e4
LT
424 goto out;
425 }
426
427 err = icmp_err_convert[code].errno;
f1ecd5d9
DL
428 /* check if icmp_skb allows revert of backoff
429 * (see draft-zimmermann-tcp-lcd) */
430 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
431 break;
432 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
433 !icsk->icsk_backoff)
434 break;
435
8f49c270
DM
436 if (sock_owned_by_user(sk))
437 break;
438
f1ecd5d9 439 icsk->icsk_backoff--;
9ad7c049
JC
440 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
441 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
f1ecd5d9
DL
442 tcp_bound_rto(sk);
443
444 skb = tcp_write_queue_head(sk);
445 BUG_ON(!skb);
446
447 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
448 tcp_time_stamp - TCP_SKB_CB(skb)->when);
449
450 if (remaining) {
451 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
452 remaining, TCP_RTO_MAX);
f1ecd5d9
DL
453 } else {
454 /* RTO revert clocked out retransmission.
455 * Will retransmit now */
456 tcp_retransmit_timer(sk);
457 }
458
1da177e4
LT
459 break;
460 case ICMP_TIME_EXCEEDED:
461 err = EHOSTUNREACH;
462 break;
463 default:
464 goto out;
465 }
466
467 switch (sk->sk_state) {
60236fdd 468 struct request_sock *req, **prev;
1da177e4
LT
469 case TCP_LISTEN:
470 if (sock_owned_by_user(sk))
471 goto out;
472
463c84b9
ACM
473 req = inet_csk_search_req(sk, &prev, th->dest,
474 iph->daddr, iph->saddr);
1da177e4
LT
475 if (!req)
476 goto out;
477
478 /* ICMPs are not backlogged, hence we cannot get
479 an established socket here.
480 */
547b792c 481 WARN_ON(req->sk);
1da177e4 482
2e6599cb 483 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 484 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
485 goto out;
486 }
487
488 /*
489 * Still in SYN_RECV, just remove it silently.
490 * There is no good way to pass the error to the newly
491 * created socket, and POSIX does not want network
492 * errors returned from accept().
493 */
463c84b9 494 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
495 goto out;
496
497 case TCP_SYN_SENT:
498 case TCP_SYN_RECV: /* Cannot happen.
499 It can f.e. if SYNs crossed.
500 */
501 if (!sock_owned_by_user(sk)) {
1da177e4
LT
502 sk->sk_err = err;
503
504 sk->sk_error_report(sk);
505
506 tcp_done(sk);
507 } else {
508 sk->sk_err_soft = err;
509 }
510 goto out;
511 }
512
513 /* If we've already connected we will keep trying
514 * until we time out, or the user gives up.
515 *
516 * rfc1122 4.2.3.9 allows to consider as hard errors
517 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
518 * but it is obsoleted by pmtu discovery).
519 *
520 * Note, that in modern internet, where routing is unreliable
521 * and in each dark corner broken firewalls sit, sending random
522 * errors ordered by their masters even this two messages finally lose
523 * their original sense (even Linux sends invalid PORT_UNREACHs)
524 *
525 * Now we are in compliance with RFCs.
526 * --ANK (980905)
527 */
528
529 inet = inet_sk(sk);
530 if (!sock_owned_by_user(sk) && inet->recverr) {
531 sk->sk_err = err;
532 sk->sk_error_report(sk);
533 } else { /* Only an error on timeout */
534 sk->sk_err_soft = err;
535 }
536
537out:
538 bh_unlock_sock(sk);
539 sock_put(sk);
540}
541
419f9f89
HX
542static void __tcp_v4_send_check(struct sk_buff *skb,
543 __be32 saddr, __be32 daddr)
1da177e4 544{
aa8223c7 545 struct tcphdr *th = tcp_hdr(skb);
1da177e4 546
84fa7933 547 if (skb->ip_summed == CHECKSUM_PARTIAL) {
419f9f89 548 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
663ead3b 549 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 550 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 551 } else {
419f9f89 552 th->check = tcp_v4_check(skb->len, saddr, daddr,
07f0757a 553 csum_partial(th,
1da177e4
LT
554 th->doff << 2,
555 skb->csum));
556 }
557}
558
419f9f89 559/* This routine computes an IPv4 TCP checksum. */
bb296246 560void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
419f9f89 561{
cf533ea5 562 const struct inet_sock *inet = inet_sk(sk);
419f9f89
HX
563
564 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
565}
4bc2f18b 566EXPORT_SYMBOL(tcp_v4_send_check);
419f9f89 567
a430a43d
HX
568int tcp_v4_gso_send_check(struct sk_buff *skb)
569{
eddc9ec5 570 const struct iphdr *iph;
a430a43d
HX
571 struct tcphdr *th;
572
573 if (!pskb_may_pull(skb, sizeof(*th)))
574 return -EINVAL;
575
eddc9ec5 576 iph = ip_hdr(skb);
aa8223c7 577 th = tcp_hdr(skb);
a430a43d
HX
578
579 th->check = 0;
84fa7933 580 skb->ip_summed = CHECKSUM_PARTIAL;
419f9f89 581 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
a430a43d
HX
582 return 0;
583}
584
1da177e4
LT
585/*
586 * This routine will send an RST to the other tcp.
587 *
588 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
589 * for reset.
590 * Answer: if a packet caused RST, it is not for a socket
591 * existing in our system, if it is matched to a socket,
592 * it is just duplicate segment or bug in other side's TCP.
593 * So that we build reply only basing on parameters
594 * arrived with segment.
595 * Exception: precedence violation. We do not implement it in any case.
596 */
597
cfb6eeb4 598static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 599{
cf533ea5 600 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
601 struct {
602 struct tcphdr th;
603#ifdef CONFIG_TCP_MD5SIG
714e85be 604 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
605#endif
606 } rep;
1da177e4 607 struct ip_reply_arg arg;
cfb6eeb4
YH
608#ifdef CONFIG_TCP_MD5SIG
609 struct tcp_md5sig_key *key;
658ddaaf
SL
610 const __u8 *hash_location = NULL;
611 unsigned char newhash[16];
612 int genhash;
613 struct sock *sk1 = NULL;
cfb6eeb4 614#endif
a86b1e30 615 struct net *net;
1da177e4
LT
616
617 /* Never send a reset in response to a reset. */
618 if (th->rst)
619 return;
620
511c3f92 621 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
1da177e4
LT
622 return;
623
624 /* Swap the send and the receive. */
cfb6eeb4
YH
625 memset(&rep, 0, sizeof(rep));
626 rep.th.dest = th->source;
627 rep.th.source = th->dest;
628 rep.th.doff = sizeof(struct tcphdr) / 4;
629 rep.th.rst = 1;
1da177e4
LT
630
631 if (th->ack) {
cfb6eeb4 632 rep.th.seq = th->ack_seq;
1da177e4 633 } else {
cfb6eeb4
YH
634 rep.th.ack = 1;
635 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
636 skb->len - (th->doff << 2));
1da177e4
LT
637 }
638
7174259e 639 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
640 arg.iov[0].iov_base = (unsigned char *)&rep;
641 arg.iov[0].iov_len = sizeof(rep.th);
642
643#ifdef CONFIG_TCP_MD5SIG
658ddaaf
SL
644 hash_location = tcp_parse_md5sig_option(th);
645 if (!sk && hash_location) {
646 /*
647 * active side is lost. Try to find listening socket through
648 * source port, and then find md5 key through listening socket.
649 * we are not loose security here:
650 * Incoming packet is checked with md5 hash with finding key,
651 * no RST generated if md5 hash doesn't match.
652 */
653 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
654 &tcp_hashinfo, ip_hdr(skb)->daddr,
655 ntohs(th->source), inet_iif(skb));
656 /* don't send rst if it can't find key */
657 if (!sk1)
658 return;
659 rcu_read_lock();
660 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
661 &ip_hdr(skb)->saddr, AF_INET);
662 if (!key)
663 goto release_sk1;
664
665 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
666 if (genhash || memcmp(hash_location, newhash, 16) != 0)
667 goto release_sk1;
668 } else {
669 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
670 &ip_hdr(skb)->saddr,
671 AF_INET) : NULL;
672 }
673
cfb6eeb4
YH
674 if (key) {
675 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
676 (TCPOPT_NOP << 16) |
677 (TCPOPT_MD5SIG << 8) |
678 TCPOLEN_MD5SIG);
679 /* Update length and the length the header thinks exists */
680 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
681 rep.th.doff = arg.iov[0].iov_len / 4;
682
49a72dfb 683 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
78e645cb
IJ
684 key, ip_hdr(skb)->saddr,
685 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
686 }
687#endif
eddc9ec5
ACM
688 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
689 ip_hdr(skb)->saddr, /* XXX */
52cd5750 690 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1da177e4 691 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
88ef4a5a 692 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
e2446eaa
SL
693 /* When socket is gone, all binding information is lost.
694 * routing might fail in this case. using iif for oif to
695 * make sure we can deliver it
696 */
697 arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
1da177e4 698
adf30907 699 net = dev_net(skb_dst(skb)->dev);
66b13d99 700 arg.tos = ip_hdr(skb)->tos;
be9f4a44 701 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
70e73416 702 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
1da177e4 703
63231bdd
PE
704 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
705 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
658ddaaf
SL
706
707#ifdef CONFIG_TCP_MD5SIG
708release_sk1:
709 if (sk1) {
710 rcu_read_unlock();
711 sock_put(sk1);
712 }
713#endif
1da177e4
LT
714}
715
716/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
717 outside socket context is ugly, certainly. What can I do?
718 */
719
9501f972
YH
720static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
721 u32 win, u32 ts, int oif,
88ef4a5a 722 struct tcp_md5sig_key *key,
66b13d99 723 int reply_flags, u8 tos)
1da177e4 724{
cf533ea5 725 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
726 struct {
727 struct tcphdr th;
714e85be 728 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 729#ifdef CONFIG_TCP_MD5SIG
714e85be 730 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
731#endif
732 ];
1da177e4
LT
733 } rep;
734 struct ip_reply_arg arg;
adf30907 735 struct net *net = dev_net(skb_dst(skb)->dev);
1da177e4
LT
736
737 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 738 memset(&arg, 0, sizeof(arg));
1da177e4
LT
739
740 arg.iov[0].iov_base = (unsigned char *)&rep;
741 arg.iov[0].iov_len = sizeof(rep.th);
742 if (ts) {
cfb6eeb4
YH
743 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
744 (TCPOPT_TIMESTAMP << 8) |
745 TCPOLEN_TIMESTAMP);
746 rep.opt[1] = htonl(tcp_time_stamp);
747 rep.opt[2] = htonl(ts);
cb48cfe8 748 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
749 }
750
751 /* Swap the send and the receive. */
752 rep.th.dest = th->source;
753 rep.th.source = th->dest;
754 rep.th.doff = arg.iov[0].iov_len / 4;
755 rep.th.seq = htonl(seq);
756 rep.th.ack_seq = htonl(ack);
757 rep.th.ack = 1;
758 rep.th.window = htons(win);
759
cfb6eeb4 760#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
761 if (key) {
762 int offset = (ts) ? 3 : 0;
763
764 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
765 (TCPOPT_NOP << 16) |
766 (TCPOPT_MD5SIG << 8) |
767 TCPOLEN_MD5SIG);
768 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
769 rep.th.doff = arg.iov[0].iov_len/4;
770
49a72dfb 771 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
90b7e112
AL
772 key, ip_hdr(skb)->saddr,
773 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
774 }
775#endif
88ef4a5a 776 arg.flags = reply_flags;
eddc9ec5
ACM
777 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
778 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
779 arg.iov[0].iov_len, IPPROTO_TCP, 0);
780 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
781 if (oif)
782 arg.bound_dev_if = oif;
66b13d99 783 arg.tos = tos;
be9f4a44 784 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
70e73416 785 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
1da177e4 786
63231bdd 787 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1da177e4
LT
788}
789
790static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
791{
8feaf0c0 792 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 793 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 794
9501f972 795 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 796 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972
YH
797 tcptw->tw_ts_recent,
798 tw->tw_bound_dev_if,
88ef4a5a 799 tcp_twsk_md5_key(tcptw),
66b13d99
ED
800 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
801 tw->tw_tos
9501f972 802 );
1da177e4 803
8feaf0c0 804 inet_twsk_put(tw);
1da177e4
LT
805}
806
6edafaaf 807static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
7174259e 808 struct request_sock *req)
1da177e4 809{
9501f972 810 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
cfb6eeb4 811 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
9501f972
YH
812 req->ts_recent,
813 0,
a915da9b
ED
814 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
815 AF_INET),
66b13d99
ED
816 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
817 ip_hdr(skb)->tos);
1da177e4
LT
818}
819
1da177e4 820/*
9bf1d83e 821 * Send a SYN-ACK after having received a SYN.
60236fdd 822 * This still operates on a request_sock only, not on a big
1da177e4
LT
823 * socket.
824 */
72659ecc
OP
825static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
826 struct request_sock *req,
fff32699 827 struct request_values *rvp,
7586eceb
ED
828 u16 queue_mapping,
829 bool nocache)
1da177e4 830{
2e6599cb 831 const struct inet_request_sock *ireq = inet_rsk(req);
6bd023f3 832 struct flowi4 fl4;
1da177e4
LT
833 int err = -1;
834 struct sk_buff * skb;
835
836 /* First, grab a route. */
ba3f7f04 837 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
fd80eb94 838 return -1;
1da177e4 839
e6b4d113 840 skb = tcp_make_synack(sk, dst, req, rvp);
1da177e4
LT
841
842 if (skb) {
419f9f89 843 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
1da177e4 844
fff32699 845 skb_set_queue_mapping(skb, queue_mapping);
2e6599cb
ACM
846 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
847 ireq->rmt_addr,
848 ireq->opt);
b9df3cb8 849 err = net_xmit_eval(err);
1da177e4
LT
850 }
851
1da177e4
LT
852 return err;
853}
854
72659ecc 855static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
e6b4d113 856 struct request_values *rvp)
fd80eb94 857{
72659ecc 858 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
7586eceb 859 return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
fd80eb94
DL
860}
861
1da177e4 862/*
60236fdd 863 * IPv4 request_sock destructor.
1da177e4 864 */
60236fdd 865static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 866{
a51482bd 867 kfree(inet_rsk(req)->opt);
1da177e4
LT
868}
869
946cedcc 870/*
a2a385d6 871 * Return true if a syncookie should be sent
946cedcc 872 */
a2a385d6 873bool tcp_syn_flood_action(struct sock *sk,
946cedcc
ED
874 const struct sk_buff *skb,
875 const char *proto)
1da177e4 876{
946cedcc 877 const char *msg = "Dropping request";
a2a385d6 878 bool want_cookie = false;
946cedcc
ED
879 struct listen_sock *lopt;
880
881
1da177e4 882
2a1d4bd4 883#ifdef CONFIG_SYN_COOKIES
946cedcc 884 if (sysctl_tcp_syncookies) {
2a1d4bd4 885 msg = "Sending cookies";
a2a385d6 886 want_cookie = true;
946cedcc
ED
887 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
888 } else
80e40daa 889#endif
946cedcc
ED
890 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
891
892 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
893 if (!lopt->synflood_warned) {
894 lopt->synflood_warned = 1;
afd46503 895 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
946cedcc
ED
896 proto, ntohs(tcp_hdr(skb)->dest), msg);
897 }
898 return want_cookie;
2a1d4bd4 899}
946cedcc 900EXPORT_SYMBOL(tcp_syn_flood_action);
1da177e4
LT
901
902/*
60236fdd 903 * Save and compile IPv4 options into the request_sock if needed.
1da177e4 904 */
f6d8bd05
ED
905static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
906 struct sk_buff *skb)
1da177e4 907{
f6d8bd05
ED
908 const struct ip_options *opt = &(IPCB(skb)->opt);
909 struct ip_options_rcu *dopt = NULL;
1da177e4
LT
910
911 if (opt && opt->optlen) {
f6d8bd05
ED
912 int opt_size = sizeof(*dopt) + opt->optlen;
913
1da177e4
LT
914 dopt = kmalloc(opt_size, GFP_ATOMIC);
915 if (dopt) {
f6d8bd05 916 if (ip_options_echo(&dopt->opt, skb)) {
1da177e4
LT
917 kfree(dopt);
918 dopt = NULL;
919 }
920 }
921 }
922 return dopt;
923}
924
cfb6eeb4
YH
925#ifdef CONFIG_TCP_MD5SIG
926/*
927 * RFC2385 MD5 checksumming requires a mapping of
928 * IP address->MD5 Key.
929 * We need to maintain these in the sk structure.
930 */
931
932/* Find the Key structure for an address. */
a915da9b
ED
933struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
934 const union tcp_md5_addr *addr,
935 int family)
cfb6eeb4
YH
936{
937 struct tcp_sock *tp = tcp_sk(sk);
a915da9b
ED
938 struct tcp_md5sig_key *key;
939 struct hlist_node *pos;
940 unsigned int size = sizeof(struct in_addr);
a8afca03 941 struct tcp_md5sig_info *md5sig;
cfb6eeb4 942
a8afca03
ED
943 /* caller either holds rcu_read_lock() or socket lock */
944 md5sig = rcu_dereference_check(tp->md5sig_info,
b4fb05ea
ED
945 sock_owned_by_user(sk) ||
946 lockdep_is_held(&sk->sk_lock.slock));
a8afca03 947 if (!md5sig)
cfb6eeb4 948 return NULL;
a915da9b
ED
949#if IS_ENABLED(CONFIG_IPV6)
950 if (family == AF_INET6)
951 size = sizeof(struct in6_addr);
952#endif
a8afca03 953 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
a915da9b
ED
954 if (key->family != family)
955 continue;
956 if (!memcmp(&key->addr, addr, size))
957 return key;
cfb6eeb4
YH
958 }
959 return NULL;
960}
a915da9b 961EXPORT_SYMBOL(tcp_md5_do_lookup);
cfb6eeb4
YH
962
963struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
964 struct sock *addr_sk)
965{
a915da9b
ED
966 union tcp_md5_addr *addr;
967
968 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
969 return tcp_md5_do_lookup(sk, addr, AF_INET);
cfb6eeb4 970}
cfb6eeb4
YH
971EXPORT_SYMBOL(tcp_v4_md5_lookup);
972
f5b99bcd
AB
973static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
974 struct request_sock *req)
cfb6eeb4 975{
a915da9b
ED
976 union tcp_md5_addr *addr;
977
978 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
979 return tcp_md5_do_lookup(sk, addr, AF_INET);
cfb6eeb4
YH
980}
981
982/* This can be called on a newly created socket, from other files */
a915da9b
ED
983int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
984 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
cfb6eeb4
YH
985{
986 /* Add Key to the list */
b0a713e9 987 struct tcp_md5sig_key *key;
cfb6eeb4 988 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 989 struct tcp_md5sig_info *md5sig;
cfb6eeb4 990
a915da9b 991 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
cfb6eeb4
YH
992 if (key) {
993 /* Pre-existing entry - just update that one. */
a915da9b 994 memcpy(key->key, newkey, newkeylen);
b0a713e9 995 key->keylen = newkeylen;
a915da9b
ED
996 return 0;
997 }
260fcbeb 998
a8afca03
ED
999 md5sig = rcu_dereference_protected(tp->md5sig_info,
1000 sock_owned_by_user(sk));
a915da9b
ED
1001 if (!md5sig) {
1002 md5sig = kmalloc(sizeof(*md5sig), gfp);
1003 if (!md5sig)
cfb6eeb4 1004 return -ENOMEM;
cfb6eeb4 1005
a915da9b
ED
1006 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1007 INIT_HLIST_HEAD(&md5sig->head);
a8afca03 1008 rcu_assign_pointer(tp->md5sig_info, md5sig);
a915da9b 1009 }
cfb6eeb4 1010
5f3d9cb2 1011 key = sock_kmalloc(sk, sizeof(*key), gfp);
a915da9b
ED
1012 if (!key)
1013 return -ENOMEM;
1014 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
5f3d9cb2 1015 sock_kfree_s(sk, key, sizeof(*key));
a915da9b 1016 return -ENOMEM;
cfb6eeb4 1017 }
a915da9b
ED
1018
1019 memcpy(key->key, newkey, newkeylen);
1020 key->keylen = newkeylen;
1021 key->family = family;
1022 memcpy(&key->addr, addr,
1023 (family == AF_INET6) ? sizeof(struct in6_addr) :
1024 sizeof(struct in_addr));
1025 hlist_add_head_rcu(&key->node, &md5sig->head);
cfb6eeb4
YH
1026 return 0;
1027}
a915da9b 1028EXPORT_SYMBOL(tcp_md5_do_add);
cfb6eeb4 1029
a915da9b 1030int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
cfb6eeb4
YH
1031{
1032 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 1033 struct tcp_md5sig_key *key;
a8afca03 1034 struct tcp_md5sig_info *md5sig;
a915da9b
ED
1035
1036 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1037 if (!key)
1038 return -ENOENT;
1039 hlist_del_rcu(&key->node);
5f3d9cb2 1040 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 1041 kfree_rcu(key, rcu);
a8afca03
ED
1042 md5sig = rcu_dereference_protected(tp->md5sig_info,
1043 sock_owned_by_user(sk));
1044 if (hlist_empty(&md5sig->head))
a915da9b
ED
1045 tcp_free_md5sig_pool();
1046 return 0;
cfb6eeb4 1047}
a915da9b 1048EXPORT_SYMBOL(tcp_md5_do_del);
cfb6eeb4 1049
a915da9b 1050void tcp_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
1051{
1052 struct tcp_sock *tp = tcp_sk(sk);
a915da9b
ED
1053 struct tcp_md5sig_key *key;
1054 struct hlist_node *pos, *n;
a8afca03 1055 struct tcp_md5sig_info *md5sig;
cfb6eeb4 1056
a8afca03
ED
1057 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1058
1059 if (!hlist_empty(&md5sig->head))
cfb6eeb4 1060 tcp_free_md5sig_pool();
a8afca03 1061 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
a915da9b 1062 hlist_del_rcu(&key->node);
5f3d9cb2 1063 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 1064 kfree_rcu(key, rcu);
cfb6eeb4
YH
1065 }
1066}
1067
7174259e
ACM
1068static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1069 int optlen)
cfb6eeb4
YH
1070{
1071 struct tcp_md5sig cmd;
1072 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
cfb6eeb4
YH
1073
1074 if (optlen < sizeof(cmd))
1075 return -EINVAL;
1076
7174259e 1077 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
1078 return -EFAULT;
1079
1080 if (sin->sin_family != AF_INET)
1081 return -EINVAL;
1082
a8afca03 1083 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
a915da9b
ED
1084 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1085 AF_INET);
cfb6eeb4
YH
1086
1087 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1088 return -EINVAL;
1089
a915da9b
ED
1090 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1091 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1092 GFP_KERNEL);
cfb6eeb4
YH
1093}
1094
49a72dfb
AL
1095static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1096 __be32 daddr, __be32 saddr, int nbytes)
cfb6eeb4 1097{
cfb6eeb4 1098 struct tcp4_pseudohdr *bp;
49a72dfb 1099 struct scatterlist sg;
cfb6eeb4
YH
1100
1101 bp = &hp->md5_blk.ip4;
cfb6eeb4
YH
1102
1103 /*
49a72dfb 1104 * 1. the TCP pseudo-header (in the order: source IP address,
cfb6eeb4
YH
1105 * destination IP address, zero-padded protocol number, and
1106 * segment length)
1107 */
1108 bp->saddr = saddr;
1109 bp->daddr = daddr;
1110 bp->pad = 0;
076fb722 1111 bp->protocol = IPPROTO_TCP;
49a72dfb 1112 bp->len = cpu_to_be16(nbytes);
c7da57a1 1113
49a72dfb
AL
1114 sg_init_one(&sg, bp, sizeof(*bp));
1115 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1116}
1117
a915da9b 1118static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 1119 __be32 daddr, __be32 saddr, const struct tcphdr *th)
49a72dfb
AL
1120{
1121 struct tcp_md5sig_pool *hp;
1122 struct hash_desc *desc;
1123
1124 hp = tcp_get_md5sig_pool();
1125 if (!hp)
1126 goto clear_hash_noput;
1127 desc = &hp->md5_desc;
1128
1129 if (crypto_hash_init(desc))
1130 goto clear_hash;
1131 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1132 goto clear_hash;
1133 if (tcp_md5_hash_header(hp, th))
1134 goto clear_hash;
1135 if (tcp_md5_hash_key(hp, key))
1136 goto clear_hash;
1137 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4
YH
1138 goto clear_hash;
1139
cfb6eeb4 1140 tcp_put_md5sig_pool();
cfb6eeb4 1141 return 0;
49a72dfb 1142
cfb6eeb4
YH
1143clear_hash:
1144 tcp_put_md5sig_pool();
1145clear_hash_noput:
1146 memset(md5_hash, 0, 16);
49a72dfb 1147 return 1;
cfb6eeb4
YH
1148}
1149
49a72dfb 1150int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
318cf7aa
ED
1151 const struct sock *sk, const struct request_sock *req,
1152 const struct sk_buff *skb)
cfb6eeb4 1153{
49a72dfb
AL
1154 struct tcp_md5sig_pool *hp;
1155 struct hash_desc *desc;
318cf7aa 1156 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
1157 __be32 saddr, daddr;
1158
1159 if (sk) {
c720c7e8
ED
1160 saddr = inet_sk(sk)->inet_saddr;
1161 daddr = inet_sk(sk)->inet_daddr;
49a72dfb
AL
1162 } else if (req) {
1163 saddr = inet_rsk(req)->loc_addr;
1164 daddr = inet_rsk(req)->rmt_addr;
cfb6eeb4 1165 } else {
49a72dfb
AL
1166 const struct iphdr *iph = ip_hdr(skb);
1167 saddr = iph->saddr;
1168 daddr = iph->daddr;
cfb6eeb4 1169 }
49a72dfb
AL
1170
1171 hp = tcp_get_md5sig_pool();
1172 if (!hp)
1173 goto clear_hash_noput;
1174 desc = &hp->md5_desc;
1175
1176 if (crypto_hash_init(desc))
1177 goto clear_hash;
1178
1179 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1180 goto clear_hash;
1181 if (tcp_md5_hash_header(hp, th))
1182 goto clear_hash;
1183 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1184 goto clear_hash;
1185 if (tcp_md5_hash_key(hp, key))
1186 goto clear_hash;
1187 if (crypto_hash_final(desc, md5_hash))
1188 goto clear_hash;
1189
1190 tcp_put_md5sig_pool();
1191 return 0;
1192
1193clear_hash:
1194 tcp_put_md5sig_pool();
1195clear_hash_noput:
1196 memset(md5_hash, 0, 16);
1197 return 1;
cfb6eeb4 1198}
49a72dfb 1199EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
cfb6eeb4 1200
a2a385d6 1201static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
cfb6eeb4
YH
1202{
1203 /*
1204 * This gets called for each TCP segment that arrives
1205 * so we want to be efficient.
1206 * We have 3 drop cases:
1207 * o No MD5 hash and one expected.
1208 * o MD5 hash and we're not expecting one.
1209 * o MD5 hash and its wrong.
1210 */
cf533ea5 1211 const __u8 *hash_location = NULL;
cfb6eeb4 1212 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1213 const struct iphdr *iph = ip_hdr(skb);
cf533ea5 1214 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1215 int genhash;
cfb6eeb4
YH
1216 unsigned char newhash[16];
1217
a915da9b
ED
1218 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1219 AF_INET);
7d5d5525 1220 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1221
cfb6eeb4
YH
1222 /* We've parsed the options - do we have a hash? */
1223 if (!hash_expected && !hash_location)
a2a385d6 1224 return false;
cfb6eeb4
YH
1225
1226 if (hash_expected && !hash_location) {
785957d3 1227 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
a2a385d6 1228 return true;
cfb6eeb4
YH
1229 }
1230
1231 if (!hash_expected && hash_location) {
785957d3 1232 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
a2a385d6 1233 return true;
cfb6eeb4
YH
1234 }
1235
1236 /* Okay, so this is hash_expected and hash_location -
1237 * so we need to calculate the checksum.
1238 */
49a72dfb
AL
1239 genhash = tcp_v4_md5_hash_skb(newhash,
1240 hash_expected,
1241 NULL, NULL, skb);
cfb6eeb4
YH
1242
1243 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
e87cc472
JP
1244 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1245 &iph->saddr, ntohs(th->source),
1246 &iph->daddr, ntohs(th->dest),
1247 genhash ? " tcp_v4_calc_md5_hash failed"
1248 : "");
a2a385d6 1249 return true;
cfb6eeb4 1250 }
a2a385d6 1251 return false;
cfb6eeb4
YH
1252}
1253
1254#endif
1255
72a3effa 1256struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1257 .family = PF_INET,
2e6599cb 1258 .obj_size = sizeof(struct tcp_request_sock),
72659ecc 1259 .rtx_syn_ack = tcp_v4_rtx_synack,
60236fdd
ACM
1260 .send_ack = tcp_v4_reqsk_send_ack,
1261 .destructor = tcp_v4_reqsk_destructor,
1da177e4 1262 .send_reset = tcp_v4_send_reset,
72659ecc 1263 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
1264};
1265
cfb6eeb4 1266#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1267static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
cfb6eeb4 1268 .md5_lookup = tcp_v4_reqsk_md5_lookup,
e3afe7b7 1269 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1270};
b6332e6c 1271#endif
cfb6eeb4 1272
1da177e4
LT
1273int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1274{
4957faad 1275 struct tcp_extend_values tmp_ext;
1da177e4 1276 struct tcp_options_received tmp_opt;
cf533ea5 1277 const u8 *hash_location;
60236fdd 1278 struct request_sock *req;
e6b4d113 1279 struct inet_request_sock *ireq;
4957faad 1280 struct tcp_sock *tp = tcp_sk(sk);
e6b4d113 1281 struct dst_entry *dst = NULL;
eddc9ec5
ACM
1282 __be32 saddr = ip_hdr(skb)->saddr;
1283 __be32 daddr = ip_hdr(skb)->daddr;
1da177e4 1284 __u32 isn = TCP_SKB_CB(skb)->when;
a2a385d6 1285 bool want_cookie = false;
1da177e4
LT
1286
1287 /* Never answer to SYNs send to broadcast or multicast */
511c3f92 1288 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1289 goto drop;
1290
1291 /* TW buckets are converted to open requests without
1292 * limitations, they conserve resources and peer is
1293 * evidently real one.
1294 */
463c84b9 1295 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
946cedcc
ED
1296 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1297 if (!want_cookie)
1298 goto drop;
1da177e4
LT
1299 }
1300
1301 /* Accept backlog is full. If we have already queued enough
1302 * of warm entries in syn queue, drop request. It is better than
1303 * clogging syn queue with openreqs with exponentially increasing
1304 * timeout.
1305 */
463c84b9 1306 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1307 goto drop;
1308
ce4a7d0d 1309 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1da177e4
LT
1310 if (!req)
1311 goto drop;
1312
cfb6eeb4
YH
1313#ifdef CONFIG_TCP_MD5SIG
1314 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1315#endif
1316
1da177e4 1317 tcp_clear_options(&tmp_opt);
bee7ca9e 1318 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
4957faad 1319 tmp_opt.user_mss = tp->rx_opt.user_mss;
2100c8d2 1320 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
4957faad
WAS
1321
1322 if (tmp_opt.cookie_plus > 0 &&
1323 tmp_opt.saw_tstamp &&
1324 !tp->rx_opt.cookie_out_never &&
1325 (sysctl_tcp_cookie_size > 0 ||
1326 (tp->cookie_values != NULL &&
1327 tp->cookie_values->cookie_desired > 0))) {
1328 u8 *c;
1329 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1330 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1331
1332 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1333 goto drop_and_release;
1334
1335 /* Secret recipe starts with IP addresses */
0eae88f3
ED
1336 *mess++ ^= (__force u32)daddr;
1337 *mess++ ^= (__force u32)saddr;
1da177e4 1338
4957faad
WAS
1339 /* plus variable length Initiator Cookie */
1340 c = (u8 *)mess;
1341 while (l-- > 0)
1342 *c++ ^= *hash_location++;
1343
a2a385d6 1344 want_cookie = false; /* not our kind of cookie */
4957faad
WAS
1345 tmp_ext.cookie_out_never = 0; /* false */
1346 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1347 } else if (!tp->rx_opt.cookie_in_always) {
1348 /* redundant indications, but ensure initialization. */
1349 tmp_ext.cookie_out_never = 1; /* true */
1350 tmp_ext.cookie_plus = 0;
1351 } else {
1352 goto drop_and_release;
1353 }
1354 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1da177e4 1355
4dfc2817 1356 if (want_cookie && !tmp_opt.saw_tstamp)
1da177e4 1357 tcp_clear_options(&tmp_opt);
1da177e4 1358
1da177e4 1359 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1da177e4
LT
1360 tcp_openreq_init(req, &tmp_opt, skb);
1361
bb5b7c11
DM
1362 ireq = inet_rsk(req);
1363 ireq->loc_addr = daddr;
1364 ireq->rmt_addr = saddr;
1365 ireq->no_srccheck = inet_sk(sk)->transparent;
1366 ireq->opt = tcp_v4_save_options(sk, skb);
1367
284904aa 1368 if (security_inet_conn_request(sk, skb, req))
bb5b7c11 1369 goto drop_and_free;
284904aa 1370
172d69e6 1371 if (!want_cookie || tmp_opt.tstamp_ok)
bd14b1b2 1372 TCP_ECN_create_request(req, skb);
1da177e4
LT
1373
1374 if (want_cookie) {
1da177e4 1375 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
172d69e6 1376 req->cookie_ts = tmp_opt.tstamp_ok;
1da177e4 1377 } else if (!isn) {
6bd023f3 1378 struct flowi4 fl4;
1da177e4
LT
1379
1380 /* VJ's idea. We save last timestamp seen
1381 * from the destination in peer table, when entering
1382 * state TIME-WAIT, and check against it before
1383 * accepting new connection request.
1384 *
1385 * If "isn" is not zero, this request hit alive
1386 * timewait bucket, so that all the necessary checks
1387 * are made in the function processing timewait state.
1388 */
1389 if (tmp_opt.saw_tstamp &&
295ff7ed 1390 tcp_death_row.sysctl_tw_recycle &&
ba3f7f04 1391 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
81166dd6
DM
1392 fl4.daddr == saddr) {
1393 if (!tcp_peer_is_proven(req, dst, true)) {
de0744af 1394 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
7cd04fa7 1395 goto drop_and_release;
1da177e4
LT
1396 }
1397 }
1398 /* Kill the following clause, if you dislike this way. */
1399 else if (!sysctl_tcp_syncookies &&
463c84b9 1400 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1da177e4 1401 (sysctl_max_syn_backlog >> 2)) &&
81166dd6 1402 !tcp_peer_is_proven(req, dst, false)) {
1da177e4
LT
1403 /* Without syncookies last quarter of
1404 * backlog is filled with destinations,
1405 * proven to be alive.
1406 * It means that we continue to communicate
1407 * to destinations, already remembered
1408 * to the moment of synflood.
1409 */
afd46503 1410 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
673d57e7 1411 &saddr, ntohs(tcp_hdr(skb)->source));
7cd04fa7 1412 goto drop_and_release;
1da177e4
LT
1413 }
1414
a94f723d 1415 isn = tcp_v4_init_sequence(skb);
1da177e4 1416 }
2e6599cb 1417 tcp_rsk(req)->snt_isn = isn;
9ad7c049 1418 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1da177e4 1419
72659ecc 1420 if (tcp_v4_send_synack(sk, dst, req,
fff32699 1421 (struct request_values *)&tmp_ext,
7586eceb
ED
1422 skb_get_queue_mapping(skb),
1423 want_cookie) ||
4957faad 1424 want_cookie)
1da177e4
LT
1425 goto drop_and_free;
1426
7cd04fa7 1427 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1da177e4
LT
1428 return 0;
1429
7cd04fa7
DL
1430drop_and_release:
1431 dst_release(dst);
1da177e4 1432drop_and_free:
60236fdd 1433 reqsk_free(req);
1da177e4 1434drop:
1da177e4
LT
1435 return 0;
1436}
4bc2f18b 1437EXPORT_SYMBOL(tcp_v4_conn_request);
1da177e4
LT
1438
1439
1440/*
1441 * The three way handshake has completed - we got a valid synack -
1442 * now create the new socket.
1443 */
1444struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1445 struct request_sock *req,
1da177e4
LT
1446 struct dst_entry *dst)
1447{
2e6599cb 1448 struct inet_request_sock *ireq;
1da177e4
LT
1449 struct inet_sock *newinet;
1450 struct tcp_sock *newtp;
1451 struct sock *newsk;
cfb6eeb4
YH
1452#ifdef CONFIG_TCP_MD5SIG
1453 struct tcp_md5sig_key *key;
1454#endif
f6d8bd05 1455 struct ip_options_rcu *inet_opt;
1da177e4
LT
1456
1457 if (sk_acceptq_is_full(sk))
1458 goto exit_overflow;
1459
1da177e4
LT
1460 newsk = tcp_create_openreq_child(sk, req, skb);
1461 if (!newsk)
093d2823 1462 goto exit_nonewsk;
1da177e4 1463
bcd76111 1464 newsk->sk_gso_type = SKB_GSO_TCPV4;
1da177e4
LT
1465
1466 newtp = tcp_sk(newsk);
1467 newinet = inet_sk(newsk);
2e6599cb 1468 ireq = inet_rsk(req);
c720c7e8
ED
1469 newinet->inet_daddr = ireq->rmt_addr;
1470 newinet->inet_rcv_saddr = ireq->loc_addr;
1471 newinet->inet_saddr = ireq->loc_addr;
f6d8bd05
ED
1472 inet_opt = ireq->opt;
1473 rcu_assign_pointer(newinet->inet_opt, inet_opt);
2e6599cb 1474 ireq->opt = NULL;
463c84b9 1475 newinet->mc_index = inet_iif(skb);
eddc9ec5 1476 newinet->mc_ttl = ip_hdr(skb)->ttl;
4c507d28 1477 newinet->rcv_tos = ip_hdr(skb)->tos;
d83d8461 1478 inet_csk(newsk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
1479 if (inet_opt)
1480 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
c720c7e8 1481 newinet->inet_id = newtp->write_seq ^ jiffies;
1da177e4 1482
dfd25fff
ED
1483 if (!dst) {
1484 dst = inet_csk_route_child_sock(sk, newsk, req);
1485 if (!dst)
1486 goto put_and_exit;
1487 } else {
1488 /* syncookie case : see end of cookie_v4_check() */
1489 }
0e734419
DM
1490 sk_setup_caps(newsk, dst);
1491
5d424d5a 1492 tcp_mtup_init(newsk);
1da177e4 1493 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1494 newtp->advmss = dst_metric_advmss(dst);
f5fff5dc
TQ
1495 if (tcp_sk(sk)->rx_opt.user_mss &&
1496 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1497 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1498
1da177e4 1499 tcp_initialize_rcv_mss(newsk);
9ad7c049
JC
1500 if (tcp_rsk(req)->snt_synack)
1501 tcp_valid_rtt_meas(newsk,
1502 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1503 newtp->total_retrans = req->retrans;
1da177e4 1504
cfb6eeb4
YH
1505#ifdef CONFIG_TCP_MD5SIG
1506 /* Copy over the MD5 key from the original socket */
a915da9b
ED
1507 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1508 AF_INET);
c720c7e8 1509 if (key != NULL) {
cfb6eeb4
YH
1510 /*
1511 * We're using one, so create a matching key
1512 * on the newsk structure. If we fail to get
1513 * memory, then we end up not copying the key
1514 * across. Shucks.
1515 */
a915da9b
ED
1516 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1517 AF_INET, key->key, key->keylen, GFP_ATOMIC);
a465419b 1518 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1519 }
1520#endif
1521
0e734419
DM
1522 if (__inet_inherit_port(sk, newsk) < 0)
1523 goto put_and_exit;
9327f705 1524 __inet_hash_nolisten(newsk, NULL);
1da177e4
LT
1525
1526 return newsk;
1527
1528exit_overflow:
de0744af 1529 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823
BS
1530exit_nonewsk:
1531 dst_release(dst);
1da177e4 1532exit:
de0744af 1533 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4 1534 return NULL;
0e734419 1535put_and_exit:
709e8697 1536 tcp_clear_xmit_timers(newsk);
d8a6e65f 1537 tcp_cleanup_congestion_control(newsk);
918eb399 1538 bh_unlock_sock(newsk);
0e734419
DM
1539 sock_put(newsk);
1540 goto exit;
1da177e4 1541}
4bc2f18b 1542EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1da177e4
LT
1543
1544static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1545{
aa8223c7 1546 struct tcphdr *th = tcp_hdr(skb);
eddc9ec5 1547 const struct iphdr *iph = ip_hdr(skb);
1da177e4 1548 struct sock *nsk;
60236fdd 1549 struct request_sock **prev;
1da177e4 1550 /* Find possible connection requests. */
463c84b9
ACM
1551 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1552 iph->saddr, iph->daddr);
1da177e4
LT
1553 if (req)
1554 return tcp_check_req(sk, skb, req, prev);
1555
3b1e0a65 1556 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
c67499c0 1557 th->source, iph->daddr, th->dest, inet_iif(skb));
1da177e4
LT
1558
1559 if (nsk) {
1560 if (nsk->sk_state != TCP_TIME_WAIT) {
1561 bh_lock_sock(nsk);
1562 return nsk;
1563 }
9469c7b4 1564 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1565 return NULL;
1566 }
1567
1568#ifdef CONFIG_SYN_COOKIES
af9b4738 1569 if (!th->syn)
1da177e4
LT
1570 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1571#endif
1572 return sk;
1573}
1574
b51655b9 1575static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1da177e4 1576{
eddc9ec5
ACM
1577 const struct iphdr *iph = ip_hdr(skb);
1578
84fa7933 1579 if (skb->ip_summed == CHECKSUM_COMPLETE) {
eddc9ec5
ACM
1580 if (!tcp_v4_check(skb->len, iph->saddr,
1581 iph->daddr, skb->csum)) {
fb286bb2 1582 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1583 return 0;
fb286bb2 1584 }
1da177e4 1585 }
fb286bb2 1586
eddc9ec5 1587 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
fb286bb2
HX
1588 skb->len, IPPROTO_TCP, 0);
1589
1da177e4 1590 if (skb->len <= 76) {
fb286bb2 1591 return __skb_checksum_complete(skb);
1da177e4
LT
1592 }
1593 return 0;
1594}
1595
1596
1597/* The socket must have it's spinlock held when we get
1598 * here.
1599 *
1600 * We have a potential double-lock case here, so even when
1601 * doing backlog processing we use the BH locking scheme.
1602 * This is because we cannot sleep with the original spinlock
1603 * held.
1604 */
1605int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1606{
cfb6eeb4
YH
1607 struct sock *rsk;
1608#ifdef CONFIG_TCP_MD5SIG
1609 /*
1610 * We really want to reject the packet as early as possible
1611 * if:
1612 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1613 * o There is an MD5 option and we're not expecting one
1614 */
7174259e 1615 if (tcp_v4_inbound_md5_hash(sk, skb))
cfb6eeb4
YH
1616 goto discard;
1617#endif
1618
1da177e4 1619 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
404e0a8b
ED
1620 struct dst_entry *dst = sk->sk_rx_dst;
1621
bdeab991 1622 sock_rps_save_rxhash(sk, skb);
404e0a8b 1623 if (dst) {
505fbcf0
ED
1624 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1625 dst->ops->check(dst, 0) == NULL) {
92101b3b
DM
1626 dst_release(dst);
1627 sk->sk_rx_dst = NULL;
1628 }
1629 }
404e0a8b
ED
1630 if (unlikely(sk->sk_rx_dst == NULL))
1631 inet_sk_rx_dst_set(sk, skb);
1632
aa8223c7 1633 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1634 rsk = sk;
1da177e4 1635 goto reset;
cfb6eeb4 1636 }
1da177e4
LT
1637 return 0;
1638 }
1639
ab6a5bb6 1640 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1641 goto csum_err;
1642
1643 if (sk->sk_state == TCP_LISTEN) {
1644 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1645 if (!nsk)
1646 goto discard;
1647
1648 if (nsk != sk) {
bdeab991 1649 sock_rps_save_rxhash(nsk, skb);
cfb6eeb4
YH
1650 if (tcp_child_process(sk, nsk, skb)) {
1651 rsk = nsk;
1da177e4 1652 goto reset;
cfb6eeb4 1653 }
1da177e4
LT
1654 return 0;
1655 }
ca55158c 1656 } else
bdeab991 1657 sock_rps_save_rxhash(sk, skb);
ca55158c 1658
aa8223c7 1659 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1660 rsk = sk;
1da177e4 1661 goto reset;
cfb6eeb4 1662 }
1da177e4
LT
1663 return 0;
1664
1665reset:
cfb6eeb4 1666 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1667discard:
1668 kfree_skb(skb);
1669 /* Be careful here. If this function gets more complicated and
1670 * gcc suffers from register pressure on the x86, sk (in %ebx)
1671 * might be destroyed here. This current version compiles correctly,
1672 * but you have been warned.
1673 */
1674 return 0;
1675
1676csum_err:
63231bdd 1677 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1678 goto discard;
1679}
4bc2f18b 1680EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4 1681
160eb5a6 1682void tcp_v4_early_demux(struct sk_buff *skb)
41063e9d
DM
1683{
1684 struct net *net = dev_net(skb->dev);
1685 const struct iphdr *iph;
1686 const struct tcphdr *th;
1687 struct sock *sk;
41063e9d 1688
41063e9d 1689 if (skb->pkt_type != PACKET_HOST)
160eb5a6 1690 return;
41063e9d
DM
1691
1692 if (!pskb_may_pull(skb, ip_hdrlen(skb) + sizeof(struct tcphdr)))
160eb5a6 1693 return;
41063e9d
DM
1694
1695 iph = ip_hdr(skb);
1696 th = (struct tcphdr *) ((char *)iph + ip_hdrlen(skb));
1697
1698 if (th->doff < sizeof(struct tcphdr) / 4)
160eb5a6 1699 return;
41063e9d 1700
41063e9d
DM
1701 sk = __inet_lookup_established(net, &tcp_hashinfo,
1702 iph->saddr, th->source,
7011d085 1703 iph->daddr, ntohs(th->dest),
9cb429d6 1704 skb->skb_iif);
41063e9d
DM
1705 if (sk) {
1706 skb->sk = sk;
1707 skb->destructor = sock_edemux;
1708 if (sk->sk_state != TCP_TIME_WAIT) {
1709 struct dst_entry *dst = sk->sk_rx_dst;
505fbcf0 1710
41063e9d
DM
1711 if (dst)
1712 dst = dst_check(dst, 0);
92101b3b 1713 if (dst &&
505fbcf0 1714 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
92101b3b 1715 skb_dst_set_noref(skb, dst);
41063e9d
DM
1716 }
1717 }
41063e9d
DM
1718}
1719
1da177e4
LT
1720/*
1721 * From tcp_input.c
1722 */
1723
1724int tcp_v4_rcv(struct sk_buff *skb)
1725{
eddc9ec5 1726 const struct iphdr *iph;
cf533ea5 1727 const struct tcphdr *th;
1da177e4
LT
1728 struct sock *sk;
1729 int ret;
a86b1e30 1730 struct net *net = dev_net(skb->dev);
1da177e4
LT
1731
1732 if (skb->pkt_type != PACKET_HOST)
1733 goto discard_it;
1734
1735 /* Count it even if it's bad */
63231bdd 1736 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1737
1738 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1739 goto discard_it;
1740
aa8223c7 1741 th = tcp_hdr(skb);
1da177e4
LT
1742
1743 if (th->doff < sizeof(struct tcphdr) / 4)
1744 goto bad_packet;
1745 if (!pskb_may_pull(skb, th->doff * 4))
1746 goto discard_it;
1747
1748 /* An explanation is required here, I think.
1749 * Packet length and doff are validated by header prediction,
caa20d9a 1750 * provided case of th->doff==0 is eliminated.
1da177e4 1751 * So, we defer the checks. */
60476372 1752 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1da177e4
LT
1753 goto bad_packet;
1754
aa8223c7 1755 th = tcp_hdr(skb);
eddc9ec5 1756 iph = ip_hdr(skb);
1da177e4
LT
1757 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1758 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1759 skb->len - th->doff * 4);
1760 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1761 TCP_SKB_CB(skb)->when = 0;
b82d1bb4 1762 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1da177e4
LT
1763 TCP_SKB_CB(skb)->sacked = 0;
1764
9a1f27c4 1765 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1766 if (!sk)
1767 goto no_tcp_socket;
1768
bb134d5d
ED
1769process:
1770 if (sk->sk_state == TCP_TIME_WAIT)
1771 goto do_time_wait;
1772
6cce09f8
ED
1773 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1774 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
d218d111 1775 goto discard_and_relse;
6cce09f8 1776 }
d218d111 1777
1da177e4
LT
1778 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1779 goto discard_and_relse;
b59c2701 1780 nf_reset(skb);
1da177e4 1781
fda9ef5d 1782 if (sk_filter(sk, skb))
1da177e4
LT
1783 goto discard_and_relse;
1784
1785 skb->dev = NULL;
1786
c6366184 1787 bh_lock_sock_nested(sk);
1da177e4
LT
1788 ret = 0;
1789 if (!sock_owned_by_user(sk)) {
1a2449a8
CL
1790#ifdef CONFIG_NET_DMA
1791 struct tcp_sock *tp = tcp_sk(sk);
1792 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
a2bd1140 1793 tp->ucopy.dma_chan = net_dma_find_channel();
1a2449a8 1794 if (tp->ucopy.dma_chan)
1da177e4 1795 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8
CL
1796 else
1797#endif
1798 {
1799 if (!tcp_prequeue(sk, skb))
ae8d7f88 1800 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8 1801 }
da882c1f
ED
1802 } else if (unlikely(sk_add_backlog(sk, skb,
1803 sk->sk_rcvbuf + sk->sk_sndbuf))) {
6b03a53a 1804 bh_unlock_sock(sk);
6cce09f8 1805 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1806 goto discard_and_relse;
1807 }
1da177e4
LT
1808 bh_unlock_sock(sk);
1809
1810 sock_put(sk);
1811
1812 return ret;
1813
1814no_tcp_socket:
1815 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1816 goto discard_it;
1817
1818 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1819bad_packet:
63231bdd 1820 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1821 } else {
cfb6eeb4 1822 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1823 }
1824
1825discard_it:
1826 /* Discard frame. */
1827 kfree_skb(skb);
e905a9ed 1828 return 0;
1da177e4
LT
1829
1830discard_and_relse:
1831 sock_put(sk);
1832 goto discard_it;
1833
1834do_time_wait:
1835 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1836 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1837 goto discard_it;
1838 }
1839
1840 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
63231bdd 1841 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1842 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1843 goto discard_it;
1844 }
9469c7b4 1845 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1846 case TCP_TW_SYN: {
c346dca1 1847 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
c67499c0 1848 &tcp_hashinfo,
eddc9ec5 1849 iph->daddr, th->dest,
463c84b9 1850 inet_iif(skb));
1da177e4 1851 if (sk2) {
9469c7b4
YH
1852 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1853 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1854 sk = sk2;
1855 goto process;
1856 }
1857 /* Fall through to ACK */
1858 }
1859 case TCP_TW_ACK:
1860 tcp_v4_timewait_ack(sk, skb);
1861 break;
1862 case TCP_TW_RST:
1863 goto no_tcp_socket;
1864 case TCP_TW_SUCCESS:;
1865 }
1866 goto discard_it;
1867}
1868
ccb7c410
DM
1869static struct timewait_sock_ops tcp_timewait_sock_ops = {
1870 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1871 .twsk_unique = tcp_twsk_unique,
1872 .twsk_destructor= tcp_twsk_destructor,
ccb7c410 1873};
1da177e4 1874
3b401a81 1875const struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1876 .queue_xmit = ip_queue_xmit,
1877 .send_check = tcp_v4_send_check,
1878 .rebuild_header = inet_sk_rebuild_header,
1879 .conn_request = tcp_v4_conn_request,
1880 .syn_recv_sock = tcp_v4_syn_recv_sock,
543d9cfe
ACM
1881 .net_header_len = sizeof(struct iphdr),
1882 .setsockopt = ip_setsockopt,
1883 .getsockopt = ip_getsockopt,
1884 .addr2sockaddr = inet_csk_addr2sockaddr,
1885 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1886 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1887#ifdef CONFIG_COMPAT
543d9cfe
ACM
1888 .compat_setsockopt = compat_ip_setsockopt,
1889 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1890#endif
1da177e4 1891};
4bc2f18b 1892EXPORT_SYMBOL(ipv4_specific);
1da177e4 1893
cfb6eeb4 1894#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1895static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4 1896 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1897 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1898 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1899};
b6332e6c 1900#endif
cfb6eeb4 1901
1da177e4
LT
1902/* NOTE: A lot of things set to zero explicitly by call to
1903 * sk_alloc() so need not be done here.
1904 */
1905static int tcp_v4_init_sock(struct sock *sk)
1906{
6687e988 1907 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1908
900f65d3 1909 tcp_init_sock(sk);
1da177e4 1910
8292a17a 1911 icsk->icsk_af_ops = &ipv4_specific;
900f65d3 1912
cfb6eeb4 1913#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1914 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
cfb6eeb4 1915#endif
1da177e4 1916
1da177e4
LT
1917 return 0;
1918}
1919
7d06b2e0 1920void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
1921{
1922 struct tcp_sock *tp = tcp_sk(sk);
1923
1924 tcp_clear_xmit_timers(sk);
1925
6687e988 1926 tcp_cleanup_congestion_control(sk);
317a76f9 1927
1da177e4 1928 /* Cleanup up the write buffer. */
fe067e8a 1929 tcp_write_queue_purge(sk);
1da177e4
LT
1930
1931 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1932 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1933
cfb6eeb4
YH
1934#ifdef CONFIG_TCP_MD5SIG
1935 /* Clean up the MD5 key list, if any */
1936 if (tp->md5sig_info) {
a915da9b 1937 tcp_clear_md5_list(sk);
a8afca03 1938 kfree_rcu(tp->md5sig_info, rcu);
cfb6eeb4
YH
1939 tp->md5sig_info = NULL;
1940 }
1941#endif
1942
1a2449a8
CL
1943#ifdef CONFIG_NET_DMA
1944 /* Cleans up our sk_async_wait_queue */
e905a9ed 1945 __skb_queue_purge(&sk->sk_async_wait_queue);
1a2449a8
CL
1946#endif
1947
1da177e4
LT
1948 /* Clean prequeue, it must be empty really */
1949 __skb_queue_purge(&tp->ucopy.prequeue);
1950
1951 /* Clean up a referenced TCP bind bucket. */
463c84b9 1952 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1953 inet_put_port(sk);
1da177e4
LT
1954
1955 /*
1956 * If sendmsg cached page exists, toss it.
1957 */
1958 if (sk->sk_sndmsg_page) {
1959 __free_page(sk->sk_sndmsg_page);
1960 sk->sk_sndmsg_page = NULL;
1961 }
1962
435cf559
WAS
1963 /* TCP Cookie Transactions */
1964 if (tp->cookie_values != NULL) {
1965 kref_put(&tp->cookie_values->kref,
1966 tcp_cookie_values_release);
1967 tp->cookie_values = NULL;
1968 }
1969
cf60af03
YC
1970 /* If socket is aborted during connect operation */
1971 tcp_free_fastopen_req(tp);
1972
180d8cd9 1973 sk_sockets_allocated_dec(sk);
d1a4c0b3 1974 sock_release_memcg(sk);
1da177e4 1975}
1da177e4
LT
1976EXPORT_SYMBOL(tcp_v4_destroy_sock);
1977
1978#ifdef CONFIG_PROC_FS
1979/* Proc filesystem TCP sock list dumping. */
1980
3ab5aee7 1981static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1da177e4 1982{
3ab5aee7 1983 return hlist_nulls_empty(head) ? NULL :
8feaf0c0 1984 list_entry(head->first, struct inet_timewait_sock, tw_node);
1da177e4
LT
1985}
1986
8feaf0c0 1987static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1da177e4 1988{
3ab5aee7
ED
1989 return !is_a_nulls(tw->tw_node.next) ?
1990 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1da177e4
LT
1991}
1992
a8b690f9
TH
1993/*
1994 * Get next listener socket follow cur. If cur is NULL, get first socket
1995 * starting from bucket given in st->bucket; when st->bucket is zero the
1996 * very first socket in the hash table is returned.
1997 */
1da177e4
LT
1998static void *listening_get_next(struct seq_file *seq, void *cur)
1999{
463c84b9 2000 struct inet_connection_sock *icsk;
c25eb3bf 2001 struct hlist_nulls_node *node;
1da177e4 2002 struct sock *sk = cur;
5caea4ea 2003 struct inet_listen_hashbucket *ilb;
5799de0b 2004 struct tcp_iter_state *st = seq->private;
a4146b1b 2005 struct net *net = seq_file_net(seq);
1da177e4
LT
2006
2007 if (!sk) {
a8b690f9 2008 ilb = &tcp_hashinfo.listening_hash[st->bucket];
5caea4ea 2009 spin_lock_bh(&ilb->lock);
c25eb3bf 2010 sk = sk_nulls_head(&ilb->head);
a8b690f9 2011 st->offset = 0;
1da177e4
LT
2012 goto get_sk;
2013 }
5caea4ea 2014 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1da177e4 2015 ++st->num;
a8b690f9 2016 ++st->offset;
1da177e4
LT
2017
2018 if (st->state == TCP_SEQ_STATE_OPENREQ) {
60236fdd 2019 struct request_sock *req = cur;
1da177e4 2020
72a3effa 2021 icsk = inet_csk(st->syn_wait_sk);
1da177e4
LT
2022 req = req->dl_next;
2023 while (1) {
2024 while (req) {
bdccc4ca 2025 if (req->rsk_ops->family == st->family) {
1da177e4
LT
2026 cur = req;
2027 goto out;
2028 }
2029 req = req->dl_next;
2030 }
72a3effa 2031 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1da177e4
LT
2032 break;
2033get_req:
463c84b9 2034 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1da177e4 2035 }
1bde5ac4 2036 sk = sk_nulls_next(st->syn_wait_sk);
1da177e4 2037 st->state = TCP_SEQ_STATE_LISTENING;
463c84b9 2038 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 2039 } else {
e905a9ed 2040 icsk = inet_csk(sk);
463c84b9
ACM
2041 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2042 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1da177e4 2043 goto start_req;
463c84b9 2044 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1bde5ac4 2045 sk = sk_nulls_next(sk);
1da177e4
LT
2046 }
2047get_sk:
c25eb3bf 2048 sk_nulls_for_each_from(sk, node) {
8475ef9f
PE
2049 if (!net_eq(sock_net(sk), net))
2050 continue;
2051 if (sk->sk_family == st->family) {
1da177e4
LT
2052 cur = sk;
2053 goto out;
2054 }
e905a9ed 2055 icsk = inet_csk(sk);
463c84b9
ACM
2056 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2057 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1da177e4
LT
2058start_req:
2059 st->uid = sock_i_uid(sk);
2060 st->syn_wait_sk = sk;
2061 st->state = TCP_SEQ_STATE_OPENREQ;
2062 st->sbucket = 0;
2063 goto get_req;
2064 }
463c84b9 2065 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 2066 }
5caea4ea 2067 spin_unlock_bh(&ilb->lock);
a8b690f9 2068 st->offset = 0;
0f7ff927 2069 if (++st->bucket < INET_LHTABLE_SIZE) {
5caea4ea
ED
2070 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2071 spin_lock_bh(&ilb->lock);
c25eb3bf 2072 sk = sk_nulls_head(&ilb->head);
1da177e4
LT
2073 goto get_sk;
2074 }
2075 cur = NULL;
2076out:
2077 return cur;
2078}
2079
2080static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2081{
a8b690f9
TH
2082 struct tcp_iter_state *st = seq->private;
2083 void *rc;
2084
2085 st->bucket = 0;
2086 st->offset = 0;
2087 rc = listening_get_next(seq, NULL);
1da177e4
LT
2088
2089 while (rc && *pos) {
2090 rc = listening_get_next(seq, rc);
2091 --*pos;
2092 }
2093 return rc;
2094}
2095
a2a385d6 2096static inline bool empty_bucket(struct tcp_iter_state *st)
6eac5604 2097{
3ab5aee7
ED
2098 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2099 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
6eac5604
AK
2100}
2101
a8b690f9
TH
2102/*
2103 * Get first established socket starting from bucket given in st->bucket.
2104 * If st->bucket is zero, the very first socket in the hash is returned.
2105 */
1da177e4
LT
2106static void *established_get_first(struct seq_file *seq)
2107{
5799de0b 2108 struct tcp_iter_state *st = seq->private;
a4146b1b 2109 struct net *net = seq_file_net(seq);
1da177e4
LT
2110 void *rc = NULL;
2111
a8b690f9
TH
2112 st->offset = 0;
2113 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1da177e4 2114 struct sock *sk;
3ab5aee7 2115 struct hlist_nulls_node *node;
8feaf0c0 2116 struct inet_timewait_sock *tw;
9db66bdc 2117 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 2118
6eac5604
AK
2119 /* Lockless fast path for the common case of empty buckets */
2120 if (empty_bucket(st))
2121 continue;
2122
9db66bdc 2123 spin_lock_bh(lock);
3ab5aee7 2124 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 2125 if (sk->sk_family != st->family ||
878628fb 2126 !net_eq(sock_net(sk), net)) {
1da177e4
LT
2127 continue;
2128 }
2129 rc = sk;
2130 goto out;
2131 }
2132 st->state = TCP_SEQ_STATE_TIME_WAIT;
8feaf0c0 2133 inet_twsk_for_each(tw, node,
dbca9b27 2134 &tcp_hashinfo.ehash[st->bucket].twchain) {
28518fc1 2135 if (tw->tw_family != st->family ||
878628fb 2136 !net_eq(twsk_net(tw), net)) {
1da177e4
LT
2137 continue;
2138 }
2139 rc = tw;
2140 goto out;
2141 }
9db66bdc 2142 spin_unlock_bh(lock);
1da177e4
LT
2143 st->state = TCP_SEQ_STATE_ESTABLISHED;
2144 }
2145out:
2146 return rc;
2147}
2148
2149static void *established_get_next(struct seq_file *seq, void *cur)
2150{
2151 struct sock *sk = cur;
8feaf0c0 2152 struct inet_timewait_sock *tw;
3ab5aee7 2153 struct hlist_nulls_node *node;
5799de0b 2154 struct tcp_iter_state *st = seq->private;
a4146b1b 2155 struct net *net = seq_file_net(seq);
1da177e4
LT
2156
2157 ++st->num;
a8b690f9 2158 ++st->offset;
1da177e4
LT
2159
2160 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2161 tw = cur;
2162 tw = tw_next(tw);
2163get_tw:
878628fb 2164 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
1da177e4
LT
2165 tw = tw_next(tw);
2166 }
2167 if (tw) {
2168 cur = tw;
2169 goto out;
2170 }
9db66bdc 2171 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2172 st->state = TCP_SEQ_STATE_ESTABLISHED;
2173
6eac5604 2174 /* Look for next non empty bucket */
a8b690f9 2175 st->offset = 0;
f373b53b 2176 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
6eac5604
AK
2177 empty_bucket(st))
2178 ;
f373b53b 2179 if (st->bucket > tcp_hashinfo.ehash_mask)
6eac5604
AK
2180 return NULL;
2181
9db66bdc 2182 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
3ab5aee7 2183 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
1da177e4 2184 } else
3ab5aee7 2185 sk = sk_nulls_next(sk);
1da177e4 2186
3ab5aee7 2187 sk_nulls_for_each_from(sk, node) {
878628fb 2188 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1da177e4
LT
2189 goto found;
2190 }
2191
2192 st->state = TCP_SEQ_STATE_TIME_WAIT;
dbca9b27 2193 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
1da177e4
LT
2194 goto get_tw;
2195found:
2196 cur = sk;
2197out:
2198 return cur;
2199}
2200
2201static void *established_get_idx(struct seq_file *seq, loff_t pos)
2202{
a8b690f9
TH
2203 struct tcp_iter_state *st = seq->private;
2204 void *rc;
2205
2206 st->bucket = 0;
2207 rc = established_get_first(seq);
1da177e4
LT
2208
2209 while (rc && pos) {
2210 rc = established_get_next(seq, rc);
2211 --pos;
7174259e 2212 }
1da177e4
LT
2213 return rc;
2214}
2215
2216static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2217{
2218 void *rc;
5799de0b 2219 struct tcp_iter_state *st = seq->private;
1da177e4 2220
1da177e4
LT
2221 st->state = TCP_SEQ_STATE_LISTENING;
2222 rc = listening_get_idx(seq, &pos);
2223
2224 if (!rc) {
1da177e4
LT
2225 st->state = TCP_SEQ_STATE_ESTABLISHED;
2226 rc = established_get_idx(seq, pos);
2227 }
2228
2229 return rc;
2230}
2231
a8b690f9
TH
2232static void *tcp_seek_last_pos(struct seq_file *seq)
2233{
2234 struct tcp_iter_state *st = seq->private;
2235 int offset = st->offset;
2236 int orig_num = st->num;
2237 void *rc = NULL;
2238
2239 switch (st->state) {
2240 case TCP_SEQ_STATE_OPENREQ:
2241 case TCP_SEQ_STATE_LISTENING:
2242 if (st->bucket >= INET_LHTABLE_SIZE)
2243 break;
2244 st->state = TCP_SEQ_STATE_LISTENING;
2245 rc = listening_get_next(seq, NULL);
2246 while (offset-- && rc)
2247 rc = listening_get_next(seq, rc);
2248 if (rc)
2249 break;
2250 st->bucket = 0;
2251 /* Fallthrough */
2252 case TCP_SEQ_STATE_ESTABLISHED:
2253 case TCP_SEQ_STATE_TIME_WAIT:
2254 st->state = TCP_SEQ_STATE_ESTABLISHED;
2255 if (st->bucket > tcp_hashinfo.ehash_mask)
2256 break;
2257 rc = established_get_first(seq);
2258 while (offset-- && rc)
2259 rc = established_get_next(seq, rc);
2260 }
2261
2262 st->num = orig_num;
2263
2264 return rc;
2265}
2266
1da177e4
LT
2267static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2268{
5799de0b 2269 struct tcp_iter_state *st = seq->private;
a8b690f9
TH
2270 void *rc;
2271
2272 if (*pos && *pos == st->last_pos) {
2273 rc = tcp_seek_last_pos(seq);
2274 if (rc)
2275 goto out;
2276 }
2277
1da177e4
LT
2278 st->state = TCP_SEQ_STATE_LISTENING;
2279 st->num = 0;
a8b690f9
TH
2280 st->bucket = 0;
2281 st->offset = 0;
2282 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2283
2284out:
2285 st->last_pos = *pos;
2286 return rc;
1da177e4
LT
2287}
2288
2289static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2290{
a8b690f9 2291 struct tcp_iter_state *st = seq->private;
1da177e4 2292 void *rc = NULL;
1da177e4
LT
2293
2294 if (v == SEQ_START_TOKEN) {
2295 rc = tcp_get_idx(seq, 0);
2296 goto out;
2297 }
1da177e4
LT
2298
2299 switch (st->state) {
2300 case TCP_SEQ_STATE_OPENREQ:
2301 case TCP_SEQ_STATE_LISTENING:
2302 rc = listening_get_next(seq, v);
2303 if (!rc) {
1da177e4 2304 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2305 st->bucket = 0;
2306 st->offset = 0;
1da177e4
LT
2307 rc = established_get_first(seq);
2308 }
2309 break;
2310 case TCP_SEQ_STATE_ESTABLISHED:
2311 case TCP_SEQ_STATE_TIME_WAIT:
2312 rc = established_get_next(seq, v);
2313 break;
2314 }
2315out:
2316 ++*pos;
a8b690f9 2317 st->last_pos = *pos;
1da177e4
LT
2318 return rc;
2319}
2320
2321static void tcp_seq_stop(struct seq_file *seq, void *v)
2322{
5799de0b 2323 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2324
2325 switch (st->state) {
2326 case TCP_SEQ_STATE_OPENREQ:
2327 if (v) {
463c84b9
ACM
2328 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2329 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
2330 }
2331 case TCP_SEQ_STATE_LISTENING:
2332 if (v != SEQ_START_TOKEN)
5caea4ea 2333 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
1da177e4
LT
2334 break;
2335 case TCP_SEQ_STATE_TIME_WAIT:
2336 case TCP_SEQ_STATE_ESTABLISHED:
2337 if (v)
9db66bdc 2338 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2339 break;
2340 }
2341}
2342
73cb88ec 2343int tcp_seq_open(struct inode *inode, struct file *file)
1da177e4
LT
2344{
2345 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1da177e4 2346 struct tcp_iter_state *s;
52d6f3f1 2347 int err;
1da177e4 2348
52d6f3f1
DL
2349 err = seq_open_net(inode, file, &afinfo->seq_ops,
2350 sizeof(struct tcp_iter_state));
2351 if (err < 0)
2352 return err;
f40c8174 2353
52d6f3f1 2354 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2355 s->family = afinfo->family;
a8b690f9 2356 s->last_pos = 0;
f40c8174
DL
2357 return 0;
2358}
73cb88ec 2359EXPORT_SYMBOL(tcp_seq_open);
f40c8174 2360
6f8b13bc 2361int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2362{
2363 int rc = 0;
2364 struct proc_dir_entry *p;
2365
9427c4b3
DL
2366 afinfo->seq_ops.start = tcp_seq_start;
2367 afinfo->seq_ops.next = tcp_seq_next;
2368 afinfo->seq_ops.stop = tcp_seq_stop;
2369
84841c3c 2370 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
73cb88ec 2371 afinfo->seq_fops, afinfo);
84841c3c 2372 if (!p)
1da177e4
LT
2373 rc = -ENOMEM;
2374 return rc;
2375}
4bc2f18b 2376EXPORT_SYMBOL(tcp_proc_register);
1da177e4 2377
6f8b13bc 2378void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2379{
6f8b13bc 2380 proc_net_remove(net, afinfo->name);
1da177e4 2381}
4bc2f18b 2382EXPORT_SYMBOL(tcp_proc_unregister);
1da177e4 2383
cf533ea5 2384static void get_openreq4(const struct sock *sk, const struct request_sock *req,
5e659e4c 2385 struct seq_file *f, int i, int uid, int *len)
1da177e4 2386{
2e6599cb 2387 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
2388 int ttd = req->expires - jiffies;
2389
5e659e4c 2390 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
71338aa7 2391 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
1da177e4 2392 i,
2e6599cb 2393 ireq->loc_addr,
c720c7e8 2394 ntohs(inet_sk(sk)->inet_sport),
2e6599cb
ACM
2395 ireq->rmt_addr,
2396 ntohs(ireq->rmt_port),
1da177e4
LT
2397 TCP_SYN_RECV,
2398 0, 0, /* could print option size, but that is af dependent. */
2399 1, /* timers active (only the expire timer) */
2400 jiffies_to_clock_t(ttd),
2401 req->retrans,
2402 uid,
2403 0, /* non standard timer */
2404 0, /* open_requests have no inode */
2405 atomic_read(&sk->sk_refcnt),
5e659e4c
PE
2406 req,
2407 len);
1da177e4
LT
2408}
2409
5e659e4c 2410static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
1da177e4
LT
2411{
2412 int timer_active;
2413 unsigned long timer_expires;
cf533ea5 2414 const struct tcp_sock *tp = tcp_sk(sk);
cf4c6bf8 2415 const struct inet_connection_sock *icsk = inet_csk(sk);
cf533ea5 2416 const struct inet_sock *inet = inet_sk(sk);
c720c7e8
ED
2417 __be32 dest = inet->inet_daddr;
2418 __be32 src = inet->inet_rcv_saddr;
2419 __u16 destp = ntohs(inet->inet_dport);
2420 __u16 srcp = ntohs(inet->inet_sport);
49d09007 2421 int rx_queue;
1da177e4 2422
463c84b9 2423 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2424 timer_active = 1;
463c84b9
ACM
2425 timer_expires = icsk->icsk_timeout;
2426 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2427 timer_active = 4;
463c84b9 2428 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2429 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2430 timer_active = 2;
cf4c6bf8 2431 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2432 } else {
2433 timer_active = 0;
2434 timer_expires = jiffies;
2435 }
2436
49d09007
ED
2437 if (sk->sk_state == TCP_LISTEN)
2438 rx_queue = sk->sk_ack_backlog;
2439 else
2440 /*
2441 * because we dont lock socket, we might find a transient negative value
2442 */
2443 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2444
5e659e4c 2445 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
71338aa7 2446 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
cf4c6bf8 2447 i, src, srcp, dest, destp, sk->sk_state,
47da8ee6 2448 tp->write_seq - tp->snd_una,
49d09007 2449 rx_queue,
1da177e4
LT
2450 timer_active,
2451 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2452 icsk->icsk_retransmits,
cf4c6bf8 2453 sock_i_uid(sk),
6687e988 2454 icsk->icsk_probes_out,
cf4c6bf8
IJ
2455 sock_i_ino(sk),
2456 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2457 jiffies_to_clock_t(icsk->icsk_rto),
2458 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2459 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2460 tp->snd_cwnd,
0b6a05c1 2461 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
5e659e4c 2462 len);
1da177e4
LT
2463}
2464
cf533ea5 2465static void get_timewait4_sock(const struct inet_timewait_sock *tw,
5e659e4c 2466 struct seq_file *f, int i, int *len)
1da177e4 2467{
23f33c2d 2468 __be32 dest, src;
1da177e4
LT
2469 __u16 destp, srcp;
2470 int ttd = tw->tw_ttd - jiffies;
2471
2472 if (ttd < 0)
2473 ttd = 0;
2474
2475 dest = tw->tw_daddr;
2476 src = tw->tw_rcv_saddr;
2477 destp = ntohs(tw->tw_dport);
2478 srcp = ntohs(tw->tw_sport);
2479
5e659e4c 2480 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
71338aa7 2481 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
1da177e4
LT
2482 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2483 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
5e659e4c 2484 atomic_read(&tw->tw_refcnt), tw, len);
1da177e4
LT
2485}
2486
2487#define TMPSZ 150
2488
2489static int tcp4_seq_show(struct seq_file *seq, void *v)
2490{
5799de0b 2491 struct tcp_iter_state *st;
5e659e4c 2492 int len;
1da177e4
LT
2493
2494 if (v == SEQ_START_TOKEN) {
2495 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2496 " sl local_address rem_address st tx_queue "
2497 "rx_queue tr tm->when retrnsmt uid timeout "
2498 "inode");
2499 goto out;
2500 }
2501 st = seq->private;
2502
2503 switch (st->state) {
2504 case TCP_SEQ_STATE_LISTENING:
2505 case TCP_SEQ_STATE_ESTABLISHED:
5e659e4c 2506 get_tcp4_sock(v, seq, st->num, &len);
1da177e4
LT
2507 break;
2508 case TCP_SEQ_STATE_OPENREQ:
5e659e4c 2509 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
1da177e4
LT
2510 break;
2511 case TCP_SEQ_STATE_TIME_WAIT:
5e659e4c 2512 get_timewait4_sock(v, seq, st->num, &len);
1da177e4
LT
2513 break;
2514 }
5e659e4c 2515 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
1da177e4
LT
2516out:
2517 return 0;
2518}
2519
73cb88ec
AV
2520static const struct file_operations tcp_afinfo_seq_fops = {
2521 .owner = THIS_MODULE,
2522 .open = tcp_seq_open,
2523 .read = seq_read,
2524 .llseek = seq_lseek,
2525 .release = seq_release_net
2526};
2527
1da177e4 2528static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2529 .name = "tcp",
2530 .family = AF_INET,
73cb88ec 2531 .seq_fops = &tcp_afinfo_seq_fops,
9427c4b3
DL
2532 .seq_ops = {
2533 .show = tcp4_seq_show,
2534 },
1da177e4
LT
2535};
2536
2c8c1e72 2537static int __net_init tcp4_proc_init_net(struct net *net)
757764f6
PE
2538{
2539 return tcp_proc_register(net, &tcp4_seq_afinfo);
2540}
2541
2c8c1e72 2542static void __net_exit tcp4_proc_exit_net(struct net *net)
757764f6
PE
2543{
2544 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2545}
2546
2547static struct pernet_operations tcp4_net_ops = {
2548 .init = tcp4_proc_init_net,
2549 .exit = tcp4_proc_exit_net,
2550};
2551
1da177e4
LT
2552int __init tcp4_proc_init(void)
2553{
757764f6 2554 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2555}
2556
2557void tcp4_proc_exit(void)
2558{
757764f6 2559 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2560}
2561#endif /* CONFIG_PROC_FS */
2562
bf296b12
HX
2563struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2564{
b71d1d42 2565 const struct iphdr *iph = skb_gro_network_header(skb);
bf296b12
HX
2566
2567 switch (skb->ip_summed) {
2568 case CHECKSUM_COMPLETE:
86911732 2569 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
bf296b12
HX
2570 skb->csum)) {
2571 skb->ip_summed = CHECKSUM_UNNECESSARY;
2572 break;
2573 }
2574
2575 /* fall through */
2576 case CHECKSUM_NONE:
2577 NAPI_GRO_CB(skb)->flush = 1;
2578 return NULL;
2579 }
2580
2581 return tcp_gro_receive(head, skb);
2582}
bf296b12
HX
2583
2584int tcp4_gro_complete(struct sk_buff *skb)
2585{
b71d1d42 2586 const struct iphdr *iph = ip_hdr(skb);
bf296b12
HX
2587 struct tcphdr *th = tcp_hdr(skb);
2588
2589 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2590 iph->saddr, iph->daddr, 0);
2591 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2592
2593 return tcp_gro_complete(skb);
2594}
bf296b12 2595
1da177e4
LT
2596struct proto tcp_prot = {
2597 .name = "TCP",
2598 .owner = THIS_MODULE,
2599 .close = tcp_close,
2600 .connect = tcp_v4_connect,
2601 .disconnect = tcp_disconnect,
463c84b9 2602 .accept = inet_csk_accept,
1da177e4
LT
2603 .ioctl = tcp_ioctl,
2604 .init = tcp_v4_init_sock,
2605 .destroy = tcp_v4_destroy_sock,
2606 .shutdown = tcp_shutdown,
2607 .setsockopt = tcp_setsockopt,
2608 .getsockopt = tcp_getsockopt,
1da177e4 2609 .recvmsg = tcp_recvmsg,
7ba42910
CG
2610 .sendmsg = tcp_sendmsg,
2611 .sendpage = tcp_sendpage,
1da177e4 2612 .backlog_rcv = tcp_v4_do_rcv,
46d3ceab 2613 .release_cb = tcp_release_cb,
563d34d0 2614 .mtu_reduced = tcp_v4_mtu_reduced,
ab1e0a13
ACM
2615 .hash = inet_hash,
2616 .unhash = inet_unhash,
2617 .get_port = inet_csk_get_port,
1da177e4
LT
2618 .enter_memory_pressure = tcp_enter_memory_pressure,
2619 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2620 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2621 .memory_allocated = &tcp_memory_allocated,
2622 .memory_pressure = &tcp_memory_pressure,
1da177e4
LT
2623 .sysctl_wmem = sysctl_tcp_wmem,
2624 .sysctl_rmem = sysctl_tcp_rmem,
2625 .max_header = MAX_TCP_HEADER,
2626 .obj_size = sizeof(struct tcp_sock),
3ab5aee7 2627 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2628 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2629 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2630 .h.hashinfo = &tcp_hashinfo,
7ba42910 2631 .no_autobind = true,
543d9cfe
ACM
2632#ifdef CONFIG_COMPAT
2633 .compat_setsockopt = compat_tcp_setsockopt,
2634 .compat_getsockopt = compat_tcp_getsockopt,
2635#endif
c255a458 2636#ifdef CONFIG_MEMCG_KMEM
d1a4c0b3
GC
2637 .init_cgroup = tcp_init_cgroup,
2638 .destroy_cgroup = tcp_destroy_cgroup,
2639 .proto_cgroup = tcp_proto_cgroup,
2640#endif
1da177e4 2641};
4bc2f18b 2642EXPORT_SYMBOL(tcp_prot);
1da177e4 2643
046ee902
DL
2644static int __net_init tcp_sk_init(struct net *net)
2645{
be9f4a44 2646 return 0;
046ee902
DL
2647}
2648
2649static void __net_exit tcp_sk_exit(struct net *net)
2650{
b099ce26
EB
2651}
2652
2653static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2654{
2655 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
046ee902
DL
2656}
2657
2658static struct pernet_operations __net_initdata tcp_sk_ops = {
b099ce26
EB
2659 .init = tcp_sk_init,
2660 .exit = tcp_sk_exit,
2661 .exit_batch = tcp_sk_exit_batch,
046ee902
DL
2662};
2663
9b0f976f 2664void __init tcp_v4_init(void)
1da177e4 2665{
5caea4ea 2666 inet_hashinfo_init(&tcp_hashinfo);
6a1b3054 2667 if (register_pernet_subsys(&tcp_sk_ops))
1da177e4 2668 panic("Failed to create the TCP control socket.\n");
1da177e4 2669}