ipv4: Remove all RTCF_DIRECTSRC handliing.
[linux-2.6-block.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
afd46503 53#define pr_fmt(fmt) "TCP: " fmt
1da177e4 54
eb4dea58 55#include <linux/bottom_half.h>
1da177e4
LT
56#include <linux/types.h>
57#include <linux/fcntl.h>
58#include <linux/module.h>
59#include <linux/random.h>
60#include <linux/cache.h>
61#include <linux/jhash.h>
62#include <linux/init.h>
63#include <linux/times.h>
5a0e3ad6 64#include <linux/slab.h>
1da177e4 65
457c4cbc 66#include <net/net_namespace.h>
1da177e4 67#include <net/icmp.h>
304a1618 68#include <net/inet_hashtables.h>
1da177e4 69#include <net/tcp.h>
20380731 70#include <net/transp_v6.h>
1da177e4
LT
71#include <net/ipv6.h>
72#include <net/inet_common.h>
6d6ee43e 73#include <net/timewait_sock.h>
1da177e4 74#include <net/xfrm.h>
1a2449a8 75#include <net/netdma.h>
6e5714ea 76#include <net/secure_seq.h>
d1a4c0b3 77#include <net/tcp_memcontrol.h>
1da177e4
LT
78
79#include <linux/inet.h>
80#include <linux/ipv6.h>
81#include <linux/stddef.h>
82#include <linux/proc_fs.h>
83#include <linux/seq_file.h>
84
cfb6eeb4
YH
85#include <linux/crypto.h>
86#include <linux/scatterlist.h>
87
ab32ea5d
BH
88int sysctl_tcp_tw_reuse __read_mostly;
89int sysctl_tcp_low_latency __read_mostly;
4bc2f18b 90EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 91
1da177e4 92
cfb6eeb4 93#ifdef CONFIG_TCP_MD5SIG
a915da9b 94static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
cfb6eeb4
YH
96#endif
97
5caea4ea 98struct inet_hashinfo tcp_hashinfo;
4bc2f18b 99EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 100
cf533ea5 101static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
1da177e4 102{
eddc9ec5
ACM
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
104 ip_hdr(skb)->saddr,
aa8223c7
ACM
105 tcp_hdr(skb)->dest,
106 tcp_hdr(skb)->source);
1da177e4
LT
107}
108
6d6ee43e
ACM
109int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110{
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
113
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
120 holder.
121
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
124 */
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
9d729f72 127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
130 tp->write_seq = 1;
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
133 sock_hold(sktw);
134 return 1;
135 }
136
137 return 0;
138}
6d6ee43e
ACM
139EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140
ee995283
PE
141static int tcp_repair_connect(struct sock *sk)
142{
143 tcp_connect_init(sk);
144 tcp_finish_connect(sk, NULL);
145
146 return 0;
147}
148
1da177e4
LT
149/* This will initiate an outgoing connection. */
150int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
151{
2d7192d6 152 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
1da177e4
LT
153 struct inet_sock *inet = inet_sk(sk);
154 struct tcp_sock *tp = tcp_sk(sk);
dca8b089 155 __be16 orig_sport, orig_dport;
bada8adc 156 __be32 daddr, nexthop;
da905bd1 157 struct flowi4 *fl4;
2d7192d6 158 struct rtable *rt;
1da177e4 159 int err;
f6d8bd05 160 struct ip_options_rcu *inet_opt;
1da177e4
LT
161
162 if (addr_len < sizeof(struct sockaddr_in))
163 return -EINVAL;
164
165 if (usin->sin_family != AF_INET)
166 return -EAFNOSUPPORT;
167
168 nexthop = daddr = usin->sin_addr.s_addr;
f6d8bd05
ED
169 inet_opt = rcu_dereference_protected(inet->inet_opt,
170 sock_owned_by_user(sk));
171 if (inet_opt && inet_opt->opt.srr) {
1da177e4
LT
172 if (!daddr)
173 return -EINVAL;
f6d8bd05 174 nexthop = inet_opt->opt.faddr;
1da177e4
LT
175 }
176
dca8b089
DM
177 orig_sport = inet->inet_sport;
178 orig_dport = usin->sin_port;
da905bd1
DM
179 fl4 = &inet->cork.fl.u.ip4;
180 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
b23dd4fe
DM
181 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
182 IPPROTO_TCP,
183 orig_sport, orig_dport, sk, true);
184 if (IS_ERR(rt)) {
185 err = PTR_ERR(rt);
186 if (err == -ENETUNREACH)
7c73a6fa 187 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
b23dd4fe 188 return err;
584bdf8c 189 }
1da177e4
LT
190
191 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
192 ip_rt_put(rt);
193 return -ENETUNREACH;
194 }
195
f6d8bd05 196 if (!inet_opt || !inet_opt->opt.srr)
da905bd1 197 daddr = fl4->daddr;
1da177e4 198
c720c7e8 199 if (!inet->inet_saddr)
da905bd1 200 inet->inet_saddr = fl4->saddr;
c720c7e8 201 inet->inet_rcv_saddr = inet->inet_saddr;
1da177e4 202
c720c7e8 203 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
1da177e4
LT
204 /* Reset inherited state */
205 tp->rx_opt.ts_recent = 0;
206 tp->rx_opt.ts_recent_stamp = 0;
ee995283
PE
207 if (likely(!tp->repair))
208 tp->write_seq = 0;
1da177e4
LT
209 }
210
295ff7ed 211 if (tcp_death_row.sysctl_tw_recycle &&
81166dd6
DM
212 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
213 tcp_fetch_timewait_stamp(sk, &rt->dst);
1da177e4 214
c720c7e8
ED
215 inet->inet_dport = usin->sin_port;
216 inet->inet_daddr = daddr;
1da177e4 217
d83d8461 218 inet_csk(sk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
219 if (inet_opt)
220 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1da177e4 221
bee7ca9e 222 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
1da177e4
LT
223
224 /* Socket identity is still unknown (sport may be zero).
225 * However we set state to SYN-SENT and not releasing socket
226 * lock select source port, enter ourselves into the hash tables and
227 * complete initialization after this.
228 */
229 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 230 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
231 if (err)
232 goto failure;
233
da905bd1 234 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
b23dd4fe
DM
235 inet->inet_sport, inet->inet_dport, sk);
236 if (IS_ERR(rt)) {
237 err = PTR_ERR(rt);
238 rt = NULL;
1da177e4 239 goto failure;
b23dd4fe 240 }
1da177e4 241 /* OK, now commit destination to socket. */
bcd76111 242 sk->sk_gso_type = SKB_GSO_TCPV4;
d8d1f30b 243 sk_setup_caps(sk, &rt->dst);
1da177e4 244
ee995283 245 if (!tp->write_seq && likely(!tp->repair))
c720c7e8
ED
246 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
247 inet->inet_daddr,
248 inet->inet_sport,
1da177e4
LT
249 usin->sin_port);
250
c720c7e8 251 inet->inet_id = tp->write_seq ^ jiffies;
1da177e4 252
ee995283
PE
253 if (likely(!tp->repair))
254 err = tcp_connect(sk);
255 else
256 err = tcp_repair_connect(sk);
257
1da177e4
LT
258 rt = NULL;
259 if (err)
260 goto failure;
261
262 return 0;
263
264failure:
7174259e
ACM
265 /*
266 * This unhashes the socket and releases the local port,
267 * if necessary.
268 */
1da177e4
LT
269 tcp_set_state(sk, TCP_CLOSE);
270 ip_rt_put(rt);
271 sk->sk_route_caps = 0;
c720c7e8 272 inet->inet_dport = 0;
1da177e4
LT
273 return err;
274}
4bc2f18b 275EXPORT_SYMBOL(tcp_v4_connect);
1da177e4 276
1da177e4 277/*
563d34d0
ED
278 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
279 * It can be called through tcp_release_cb() if socket was owned by user
280 * at the time tcp_v4_err() was called to handle ICMP message.
1da177e4 281 */
563d34d0 282static void tcp_v4_mtu_reduced(struct sock *sk)
1da177e4
LT
283{
284 struct dst_entry *dst;
285 struct inet_sock *inet = inet_sk(sk);
563d34d0 286 u32 mtu = tcp_sk(sk)->mtu_info;
1da177e4
LT
287
288 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
289 * send out by Linux are always <576bytes so they should go through
290 * unfragmented).
291 */
292 if (sk->sk_state == TCP_LISTEN)
293 return;
294
80d0a69f
DM
295 dst = inet_csk_update_pmtu(sk, mtu);
296 if (!dst)
1da177e4
LT
297 return;
298
1da177e4
LT
299 /* Something is about to be wrong... Remember soft error
300 * for the case, if this connection will not able to recover.
301 */
302 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
303 sk->sk_err_soft = EMSGSIZE;
304
305 mtu = dst_mtu(dst);
306
307 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
d83d8461 308 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
309 tcp_sync_mss(sk, mtu);
310
311 /* Resend the TCP packet because it's
312 * clear that the old packet has been
313 * dropped. This is the new "fast" path mtu
314 * discovery.
315 */
316 tcp_simple_retransmit(sk);
317 } /* else let the usual retransmit timer handle it */
318}
319
55be7a9c
DM
320static void do_redirect(struct sk_buff *skb, struct sock *sk)
321{
322 struct dst_entry *dst = __sk_dst_check(sk, 0);
323
1ed5c48f 324 if (dst)
6700c270 325 dst->ops->redirect(dst, sk, skb);
55be7a9c
DM
326}
327
1da177e4
LT
328/*
329 * This routine is called by the ICMP module when it gets some
330 * sort of error condition. If err < 0 then the socket should
331 * be closed and the error returned to the user. If err > 0
332 * it's just the icmp type << 8 | icmp code. After adjustment
333 * header points to the first 8 bytes of the tcp header. We need
334 * to find the appropriate port.
335 *
336 * The locking strategy used here is very "optimistic". When
337 * someone else accesses the socket the ICMP is just dropped
338 * and for some paths there is no check at all.
339 * A more general error queue to queue errors for later handling
340 * is probably better.
341 *
342 */
343
4d1a2d9e 344void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
1da177e4 345{
b71d1d42 346 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
4d1a2d9e 347 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
f1ecd5d9 348 struct inet_connection_sock *icsk;
1da177e4
LT
349 struct tcp_sock *tp;
350 struct inet_sock *inet;
4d1a2d9e
DL
351 const int type = icmp_hdr(icmp_skb)->type;
352 const int code = icmp_hdr(icmp_skb)->code;
1da177e4 353 struct sock *sk;
f1ecd5d9 354 struct sk_buff *skb;
1da177e4 355 __u32 seq;
f1ecd5d9 356 __u32 remaining;
1da177e4 357 int err;
4d1a2d9e 358 struct net *net = dev_net(icmp_skb->dev);
1da177e4 359
4d1a2d9e 360 if (icmp_skb->len < (iph->ihl << 2) + 8) {
dcfc23ca 361 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
362 return;
363 }
364
fd54d716 365 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
4d1a2d9e 366 iph->saddr, th->source, inet_iif(icmp_skb));
1da177e4 367 if (!sk) {
dcfc23ca 368 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
369 return;
370 }
371 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 372 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
373 return;
374 }
375
376 bh_lock_sock(sk);
377 /* If too many ICMPs get dropped on busy
378 * servers this needs to be solved differently.
563d34d0
ED
379 * We do take care of PMTU discovery (RFC1191) special case :
380 * we can receive locally generated ICMP messages while socket is held.
1da177e4 381 */
563d34d0
ED
382 if (sock_owned_by_user(sk) &&
383 type != ICMP_DEST_UNREACH &&
384 code != ICMP_FRAG_NEEDED)
de0744af 385 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
386
387 if (sk->sk_state == TCP_CLOSE)
388 goto out;
389
97e3ecd1 390 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
391 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
392 goto out;
393 }
394
f1ecd5d9 395 icsk = inet_csk(sk);
1da177e4
LT
396 tp = tcp_sk(sk);
397 seq = ntohl(th->seq);
398 if (sk->sk_state != TCP_LISTEN &&
399 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 400 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
401 goto out;
402 }
403
404 switch (type) {
55be7a9c
DM
405 case ICMP_REDIRECT:
406 do_redirect(icmp_skb, sk);
407 goto out;
1da177e4
LT
408 case ICMP_SOURCE_QUENCH:
409 /* Just silently ignore these. */
410 goto out;
411 case ICMP_PARAMETERPROB:
412 err = EPROTO;
413 break;
414 case ICMP_DEST_UNREACH:
415 if (code > NR_ICMP_UNREACH)
416 goto out;
417
418 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
563d34d0 419 tp->mtu_info = info;
1da177e4 420 if (!sock_owned_by_user(sk))
563d34d0
ED
421 tcp_v4_mtu_reduced(sk);
422 else
423 set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
1da177e4
LT
424 goto out;
425 }
426
427 err = icmp_err_convert[code].errno;
f1ecd5d9
DL
428 /* check if icmp_skb allows revert of backoff
429 * (see draft-zimmermann-tcp-lcd) */
430 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
431 break;
432 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
433 !icsk->icsk_backoff)
434 break;
435
8f49c270
DM
436 if (sock_owned_by_user(sk))
437 break;
438
f1ecd5d9 439 icsk->icsk_backoff--;
9ad7c049
JC
440 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
441 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
f1ecd5d9
DL
442 tcp_bound_rto(sk);
443
444 skb = tcp_write_queue_head(sk);
445 BUG_ON(!skb);
446
447 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
448 tcp_time_stamp - TCP_SKB_CB(skb)->when);
449
450 if (remaining) {
451 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
452 remaining, TCP_RTO_MAX);
f1ecd5d9
DL
453 } else {
454 /* RTO revert clocked out retransmission.
455 * Will retransmit now */
456 tcp_retransmit_timer(sk);
457 }
458
1da177e4
LT
459 break;
460 case ICMP_TIME_EXCEEDED:
461 err = EHOSTUNREACH;
462 break;
463 default:
464 goto out;
465 }
466
467 switch (sk->sk_state) {
60236fdd 468 struct request_sock *req, **prev;
1da177e4
LT
469 case TCP_LISTEN:
470 if (sock_owned_by_user(sk))
471 goto out;
472
463c84b9
ACM
473 req = inet_csk_search_req(sk, &prev, th->dest,
474 iph->daddr, iph->saddr);
1da177e4
LT
475 if (!req)
476 goto out;
477
478 /* ICMPs are not backlogged, hence we cannot get
479 an established socket here.
480 */
547b792c 481 WARN_ON(req->sk);
1da177e4 482
2e6599cb 483 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 484 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
485 goto out;
486 }
487
488 /*
489 * Still in SYN_RECV, just remove it silently.
490 * There is no good way to pass the error to the newly
491 * created socket, and POSIX does not want network
492 * errors returned from accept().
493 */
463c84b9 494 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
495 goto out;
496
497 case TCP_SYN_SENT:
498 case TCP_SYN_RECV: /* Cannot happen.
499 It can f.e. if SYNs crossed.
500 */
501 if (!sock_owned_by_user(sk)) {
1da177e4
LT
502 sk->sk_err = err;
503
504 sk->sk_error_report(sk);
505
506 tcp_done(sk);
507 } else {
508 sk->sk_err_soft = err;
509 }
510 goto out;
511 }
512
513 /* If we've already connected we will keep trying
514 * until we time out, or the user gives up.
515 *
516 * rfc1122 4.2.3.9 allows to consider as hard errors
517 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
518 * but it is obsoleted by pmtu discovery).
519 *
520 * Note, that in modern internet, where routing is unreliable
521 * and in each dark corner broken firewalls sit, sending random
522 * errors ordered by their masters even this two messages finally lose
523 * their original sense (even Linux sends invalid PORT_UNREACHs)
524 *
525 * Now we are in compliance with RFCs.
526 * --ANK (980905)
527 */
528
529 inet = inet_sk(sk);
530 if (!sock_owned_by_user(sk) && inet->recverr) {
531 sk->sk_err = err;
532 sk->sk_error_report(sk);
533 } else { /* Only an error on timeout */
534 sk->sk_err_soft = err;
535 }
536
537out:
538 bh_unlock_sock(sk);
539 sock_put(sk);
540}
541
419f9f89
HX
542static void __tcp_v4_send_check(struct sk_buff *skb,
543 __be32 saddr, __be32 daddr)
1da177e4 544{
aa8223c7 545 struct tcphdr *th = tcp_hdr(skb);
1da177e4 546
84fa7933 547 if (skb->ip_summed == CHECKSUM_PARTIAL) {
419f9f89 548 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
663ead3b 549 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 550 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 551 } else {
419f9f89 552 th->check = tcp_v4_check(skb->len, saddr, daddr,
07f0757a 553 csum_partial(th,
1da177e4
LT
554 th->doff << 2,
555 skb->csum));
556 }
557}
558
419f9f89 559/* This routine computes an IPv4 TCP checksum. */
bb296246 560void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
419f9f89 561{
cf533ea5 562 const struct inet_sock *inet = inet_sk(sk);
419f9f89
HX
563
564 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
565}
4bc2f18b 566EXPORT_SYMBOL(tcp_v4_send_check);
419f9f89 567
a430a43d
HX
568int tcp_v4_gso_send_check(struct sk_buff *skb)
569{
eddc9ec5 570 const struct iphdr *iph;
a430a43d
HX
571 struct tcphdr *th;
572
573 if (!pskb_may_pull(skb, sizeof(*th)))
574 return -EINVAL;
575
eddc9ec5 576 iph = ip_hdr(skb);
aa8223c7 577 th = tcp_hdr(skb);
a430a43d
HX
578
579 th->check = 0;
84fa7933 580 skb->ip_summed = CHECKSUM_PARTIAL;
419f9f89 581 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
a430a43d
HX
582 return 0;
583}
584
1da177e4
LT
585/*
586 * This routine will send an RST to the other tcp.
587 *
588 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
589 * for reset.
590 * Answer: if a packet caused RST, it is not for a socket
591 * existing in our system, if it is matched to a socket,
592 * it is just duplicate segment or bug in other side's TCP.
593 * So that we build reply only basing on parameters
594 * arrived with segment.
595 * Exception: precedence violation. We do not implement it in any case.
596 */
597
cfb6eeb4 598static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 599{
cf533ea5 600 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
601 struct {
602 struct tcphdr th;
603#ifdef CONFIG_TCP_MD5SIG
714e85be 604 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
605#endif
606 } rep;
1da177e4 607 struct ip_reply_arg arg;
cfb6eeb4
YH
608#ifdef CONFIG_TCP_MD5SIG
609 struct tcp_md5sig_key *key;
658ddaaf
SL
610 const __u8 *hash_location = NULL;
611 unsigned char newhash[16];
612 int genhash;
613 struct sock *sk1 = NULL;
cfb6eeb4 614#endif
a86b1e30 615 struct net *net;
1da177e4
LT
616
617 /* Never send a reset in response to a reset. */
618 if (th->rst)
619 return;
620
511c3f92 621 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
1da177e4
LT
622 return;
623
624 /* Swap the send and the receive. */
cfb6eeb4
YH
625 memset(&rep, 0, sizeof(rep));
626 rep.th.dest = th->source;
627 rep.th.source = th->dest;
628 rep.th.doff = sizeof(struct tcphdr) / 4;
629 rep.th.rst = 1;
1da177e4
LT
630
631 if (th->ack) {
cfb6eeb4 632 rep.th.seq = th->ack_seq;
1da177e4 633 } else {
cfb6eeb4
YH
634 rep.th.ack = 1;
635 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
636 skb->len - (th->doff << 2));
1da177e4
LT
637 }
638
7174259e 639 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
640 arg.iov[0].iov_base = (unsigned char *)&rep;
641 arg.iov[0].iov_len = sizeof(rep.th);
642
643#ifdef CONFIG_TCP_MD5SIG
658ddaaf
SL
644 hash_location = tcp_parse_md5sig_option(th);
645 if (!sk && hash_location) {
646 /*
647 * active side is lost. Try to find listening socket through
648 * source port, and then find md5 key through listening socket.
649 * we are not loose security here:
650 * Incoming packet is checked with md5 hash with finding key,
651 * no RST generated if md5 hash doesn't match.
652 */
653 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
654 &tcp_hashinfo, ip_hdr(skb)->daddr,
655 ntohs(th->source), inet_iif(skb));
656 /* don't send rst if it can't find key */
657 if (!sk1)
658 return;
659 rcu_read_lock();
660 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
661 &ip_hdr(skb)->saddr, AF_INET);
662 if (!key)
663 goto release_sk1;
664
665 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
666 if (genhash || memcmp(hash_location, newhash, 16) != 0)
667 goto release_sk1;
668 } else {
669 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
670 &ip_hdr(skb)->saddr,
671 AF_INET) : NULL;
672 }
673
cfb6eeb4
YH
674 if (key) {
675 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
676 (TCPOPT_NOP << 16) |
677 (TCPOPT_MD5SIG << 8) |
678 TCPOLEN_MD5SIG);
679 /* Update length and the length the header thinks exists */
680 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
681 rep.th.doff = arg.iov[0].iov_len / 4;
682
49a72dfb 683 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
78e645cb
IJ
684 key, ip_hdr(skb)->saddr,
685 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
686 }
687#endif
eddc9ec5
ACM
688 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
689 ip_hdr(skb)->saddr, /* XXX */
52cd5750 690 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1da177e4 691 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
88ef4a5a 692 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
e2446eaa
SL
693 /* When socket is gone, all binding information is lost.
694 * routing might fail in this case. using iif for oif to
695 * make sure we can deliver it
696 */
697 arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
1da177e4 698
adf30907 699 net = dev_net(skb_dst(skb)->dev);
66b13d99 700 arg.tos = ip_hdr(skb)->tos;
be9f4a44 701 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
70e73416 702 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
1da177e4 703
63231bdd
PE
704 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
705 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
658ddaaf
SL
706
707#ifdef CONFIG_TCP_MD5SIG
708release_sk1:
709 if (sk1) {
710 rcu_read_unlock();
711 sock_put(sk1);
712 }
713#endif
1da177e4
LT
714}
715
716/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
717 outside socket context is ugly, certainly. What can I do?
718 */
719
9501f972
YH
720static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
721 u32 win, u32 ts, int oif,
88ef4a5a 722 struct tcp_md5sig_key *key,
66b13d99 723 int reply_flags, u8 tos)
1da177e4 724{
cf533ea5 725 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
726 struct {
727 struct tcphdr th;
714e85be 728 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 729#ifdef CONFIG_TCP_MD5SIG
714e85be 730 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
731#endif
732 ];
1da177e4
LT
733 } rep;
734 struct ip_reply_arg arg;
adf30907 735 struct net *net = dev_net(skb_dst(skb)->dev);
1da177e4
LT
736
737 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 738 memset(&arg, 0, sizeof(arg));
1da177e4
LT
739
740 arg.iov[0].iov_base = (unsigned char *)&rep;
741 arg.iov[0].iov_len = sizeof(rep.th);
742 if (ts) {
cfb6eeb4
YH
743 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
744 (TCPOPT_TIMESTAMP << 8) |
745 TCPOLEN_TIMESTAMP);
746 rep.opt[1] = htonl(tcp_time_stamp);
747 rep.opt[2] = htonl(ts);
cb48cfe8 748 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
749 }
750
751 /* Swap the send and the receive. */
752 rep.th.dest = th->source;
753 rep.th.source = th->dest;
754 rep.th.doff = arg.iov[0].iov_len / 4;
755 rep.th.seq = htonl(seq);
756 rep.th.ack_seq = htonl(ack);
757 rep.th.ack = 1;
758 rep.th.window = htons(win);
759
cfb6eeb4 760#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
761 if (key) {
762 int offset = (ts) ? 3 : 0;
763
764 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
765 (TCPOPT_NOP << 16) |
766 (TCPOPT_MD5SIG << 8) |
767 TCPOLEN_MD5SIG);
768 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
769 rep.th.doff = arg.iov[0].iov_len/4;
770
49a72dfb 771 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
90b7e112
AL
772 key, ip_hdr(skb)->saddr,
773 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
774 }
775#endif
88ef4a5a 776 arg.flags = reply_flags;
eddc9ec5
ACM
777 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
778 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
779 arg.iov[0].iov_len, IPPROTO_TCP, 0);
780 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
781 if (oif)
782 arg.bound_dev_if = oif;
66b13d99 783 arg.tos = tos;
be9f4a44 784 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
70e73416 785 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
1da177e4 786
63231bdd 787 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1da177e4
LT
788}
789
790static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
791{
8feaf0c0 792 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 793 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 794
9501f972 795 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 796 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972
YH
797 tcptw->tw_ts_recent,
798 tw->tw_bound_dev_if,
88ef4a5a 799 tcp_twsk_md5_key(tcptw),
66b13d99
ED
800 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
801 tw->tw_tos
9501f972 802 );
1da177e4 803
8feaf0c0 804 inet_twsk_put(tw);
1da177e4
LT
805}
806
6edafaaf 807static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
7174259e 808 struct request_sock *req)
1da177e4 809{
9501f972 810 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
cfb6eeb4 811 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
9501f972
YH
812 req->ts_recent,
813 0,
a915da9b
ED
814 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
815 AF_INET),
66b13d99
ED
816 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
817 ip_hdr(skb)->tos);
1da177e4
LT
818}
819
1da177e4 820/*
9bf1d83e 821 * Send a SYN-ACK after having received a SYN.
60236fdd 822 * This still operates on a request_sock only, not on a big
1da177e4
LT
823 * socket.
824 */
72659ecc
OP
825static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
826 struct request_sock *req,
fff32699 827 struct request_values *rvp,
7586eceb
ED
828 u16 queue_mapping,
829 bool nocache)
1da177e4 830{
2e6599cb 831 const struct inet_request_sock *ireq = inet_rsk(req);
6bd023f3 832 struct flowi4 fl4;
1da177e4
LT
833 int err = -1;
834 struct sk_buff * skb;
835
836 /* First, grab a route. */
ba3f7f04 837 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
fd80eb94 838 return -1;
1da177e4 839
e6b4d113 840 skb = tcp_make_synack(sk, dst, req, rvp);
1da177e4
LT
841
842 if (skb) {
419f9f89 843 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
1da177e4 844
fff32699 845 skb_set_queue_mapping(skb, queue_mapping);
2e6599cb
ACM
846 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
847 ireq->rmt_addr,
848 ireq->opt);
b9df3cb8 849 err = net_xmit_eval(err);
1da177e4
LT
850 }
851
1da177e4
LT
852 return err;
853}
854
72659ecc 855static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
e6b4d113 856 struct request_values *rvp)
fd80eb94 857{
72659ecc 858 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
7586eceb 859 return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
fd80eb94
DL
860}
861
1da177e4 862/*
60236fdd 863 * IPv4 request_sock destructor.
1da177e4 864 */
60236fdd 865static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 866{
a51482bd 867 kfree(inet_rsk(req)->opt);
1da177e4
LT
868}
869
946cedcc 870/*
a2a385d6 871 * Return true if a syncookie should be sent
946cedcc 872 */
a2a385d6 873bool tcp_syn_flood_action(struct sock *sk,
946cedcc
ED
874 const struct sk_buff *skb,
875 const char *proto)
1da177e4 876{
946cedcc 877 const char *msg = "Dropping request";
a2a385d6 878 bool want_cookie = false;
946cedcc
ED
879 struct listen_sock *lopt;
880
881
1da177e4 882
2a1d4bd4 883#ifdef CONFIG_SYN_COOKIES
946cedcc 884 if (sysctl_tcp_syncookies) {
2a1d4bd4 885 msg = "Sending cookies";
a2a385d6 886 want_cookie = true;
946cedcc
ED
887 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
888 } else
80e40daa 889#endif
946cedcc
ED
890 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
891
892 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
893 if (!lopt->synflood_warned) {
894 lopt->synflood_warned = 1;
afd46503 895 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
946cedcc
ED
896 proto, ntohs(tcp_hdr(skb)->dest), msg);
897 }
898 return want_cookie;
2a1d4bd4 899}
946cedcc 900EXPORT_SYMBOL(tcp_syn_flood_action);
1da177e4
LT
901
902/*
60236fdd 903 * Save and compile IPv4 options into the request_sock if needed.
1da177e4 904 */
f6d8bd05
ED
905static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
906 struct sk_buff *skb)
1da177e4 907{
f6d8bd05
ED
908 const struct ip_options *opt = &(IPCB(skb)->opt);
909 struct ip_options_rcu *dopt = NULL;
1da177e4
LT
910
911 if (opt && opt->optlen) {
f6d8bd05
ED
912 int opt_size = sizeof(*dopt) + opt->optlen;
913
1da177e4
LT
914 dopt = kmalloc(opt_size, GFP_ATOMIC);
915 if (dopt) {
f6d8bd05 916 if (ip_options_echo(&dopt->opt, skb)) {
1da177e4
LT
917 kfree(dopt);
918 dopt = NULL;
919 }
920 }
921 }
922 return dopt;
923}
924
cfb6eeb4
YH
925#ifdef CONFIG_TCP_MD5SIG
926/*
927 * RFC2385 MD5 checksumming requires a mapping of
928 * IP address->MD5 Key.
929 * We need to maintain these in the sk structure.
930 */
931
932/* Find the Key structure for an address. */
a915da9b
ED
933struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
934 const union tcp_md5_addr *addr,
935 int family)
cfb6eeb4
YH
936{
937 struct tcp_sock *tp = tcp_sk(sk);
a915da9b
ED
938 struct tcp_md5sig_key *key;
939 struct hlist_node *pos;
940 unsigned int size = sizeof(struct in_addr);
a8afca03 941 struct tcp_md5sig_info *md5sig;
cfb6eeb4 942
a8afca03
ED
943 /* caller either holds rcu_read_lock() or socket lock */
944 md5sig = rcu_dereference_check(tp->md5sig_info,
b4fb05ea
ED
945 sock_owned_by_user(sk) ||
946 lockdep_is_held(&sk->sk_lock.slock));
a8afca03 947 if (!md5sig)
cfb6eeb4 948 return NULL;
a915da9b
ED
949#if IS_ENABLED(CONFIG_IPV6)
950 if (family == AF_INET6)
951 size = sizeof(struct in6_addr);
952#endif
a8afca03 953 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
a915da9b
ED
954 if (key->family != family)
955 continue;
956 if (!memcmp(&key->addr, addr, size))
957 return key;
cfb6eeb4
YH
958 }
959 return NULL;
960}
a915da9b 961EXPORT_SYMBOL(tcp_md5_do_lookup);
cfb6eeb4
YH
962
963struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
964 struct sock *addr_sk)
965{
a915da9b
ED
966 union tcp_md5_addr *addr;
967
968 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
969 return tcp_md5_do_lookup(sk, addr, AF_INET);
cfb6eeb4 970}
cfb6eeb4
YH
971EXPORT_SYMBOL(tcp_v4_md5_lookup);
972
f5b99bcd
AB
973static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
974 struct request_sock *req)
cfb6eeb4 975{
a915da9b
ED
976 union tcp_md5_addr *addr;
977
978 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
979 return tcp_md5_do_lookup(sk, addr, AF_INET);
cfb6eeb4
YH
980}
981
982/* This can be called on a newly created socket, from other files */
a915da9b
ED
983int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
984 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
cfb6eeb4
YH
985{
986 /* Add Key to the list */
b0a713e9 987 struct tcp_md5sig_key *key;
cfb6eeb4 988 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 989 struct tcp_md5sig_info *md5sig;
cfb6eeb4 990
a915da9b 991 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
cfb6eeb4
YH
992 if (key) {
993 /* Pre-existing entry - just update that one. */
a915da9b 994 memcpy(key->key, newkey, newkeylen);
b0a713e9 995 key->keylen = newkeylen;
a915da9b
ED
996 return 0;
997 }
260fcbeb 998
a8afca03
ED
999 md5sig = rcu_dereference_protected(tp->md5sig_info,
1000 sock_owned_by_user(sk));
a915da9b
ED
1001 if (!md5sig) {
1002 md5sig = kmalloc(sizeof(*md5sig), gfp);
1003 if (!md5sig)
cfb6eeb4 1004 return -ENOMEM;
cfb6eeb4 1005
a915da9b
ED
1006 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1007 INIT_HLIST_HEAD(&md5sig->head);
a8afca03 1008 rcu_assign_pointer(tp->md5sig_info, md5sig);
a915da9b 1009 }
cfb6eeb4 1010
5f3d9cb2 1011 key = sock_kmalloc(sk, sizeof(*key), gfp);
a915da9b
ED
1012 if (!key)
1013 return -ENOMEM;
1014 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
5f3d9cb2 1015 sock_kfree_s(sk, key, sizeof(*key));
a915da9b 1016 return -ENOMEM;
cfb6eeb4 1017 }
a915da9b
ED
1018
1019 memcpy(key->key, newkey, newkeylen);
1020 key->keylen = newkeylen;
1021 key->family = family;
1022 memcpy(&key->addr, addr,
1023 (family == AF_INET6) ? sizeof(struct in6_addr) :
1024 sizeof(struct in_addr));
1025 hlist_add_head_rcu(&key->node, &md5sig->head);
cfb6eeb4
YH
1026 return 0;
1027}
a915da9b 1028EXPORT_SYMBOL(tcp_md5_do_add);
cfb6eeb4 1029
a915da9b 1030int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
cfb6eeb4
YH
1031{
1032 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 1033 struct tcp_md5sig_key *key;
a8afca03 1034 struct tcp_md5sig_info *md5sig;
a915da9b
ED
1035
1036 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1037 if (!key)
1038 return -ENOENT;
1039 hlist_del_rcu(&key->node);
5f3d9cb2 1040 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 1041 kfree_rcu(key, rcu);
a8afca03
ED
1042 md5sig = rcu_dereference_protected(tp->md5sig_info,
1043 sock_owned_by_user(sk));
1044 if (hlist_empty(&md5sig->head))
a915da9b
ED
1045 tcp_free_md5sig_pool();
1046 return 0;
cfb6eeb4 1047}
a915da9b 1048EXPORT_SYMBOL(tcp_md5_do_del);
cfb6eeb4 1049
a915da9b 1050void tcp_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
1051{
1052 struct tcp_sock *tp = tcp_sk(sk);
a915da9b
ED
1053 struct tcp_md5sig_key *key;
1054 struct hlist_node *pos, *n;
a8afca03 1055 struct tcp_md5sig_info *md5sig;
cfb6eeb4 1056
a8afca03
ED
1057 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1058
1059 if (!hlist_empty(&md5sig->head))
cfb6eeb4 1060 tcp_free_md5sig_pool();
a8afca03 1061 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
a915da9b 1062 hlist_del_rcu(&key->node);
5f3d9cb2 1063 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 1064 kfree_rcu(key, rcu);
cfb6eeb4
YH
1065 }
1066}
1067
7174259e
ACM
1068static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1069 int optlen)
cfb6eeb4
YH
1070{
1071 struct tcp_md5sig cmd;
1072 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
cfb6eeb4
YH
1073
1074 if (optlen < sizeof(cmd))
1075 return -EINVAL;
1076
7174259e 1077 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
1078 return -EFAULT;
1079
1080 if (sin->sin_family != AF_INET)
1081 return -EINVAL;
1082
a8afca03 1083 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
a915da9b
ED
1084 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1085 AF_INET);
cfb6eeb4
YH
1086
1087 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1088 return -EINVAL;
1089
a915da9b
ED
1090 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1091 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1092 GFP_KERNEL);
cfb6eeb4
YH
1093}
1094
49a72dfb
AL
1095static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1096 __be32 daddr, __be32 saddr, int nbytes)
cfb6eeb4 1097{
cfb6eeb4 1098 struct tcp4_pseudohdr *bp;
49a72dfb 1099 struct scatterlist sg;
cfb6eeb4
YH
1100
1101 bp = &hp->md5_blk.ip4;
cfb6eeb4
YH
1102
1103 /*
49a72dfb 1104 * 1. the TCP pseudo-header (in the order: source IP address,
cfb6eeb4
YH
1105 * destination IP address, zero-padded protocol number, and
1106 * segment length)
1107 */
1108 bp->saddr = saddr;
1109 bp->daddr = daddr;
1110 bp->pad = 0;
076fb722 1111 bp->protocol = IPPROTO_TCP;
49a72dfb 1112 bp->len = cpu_to_be16(nbytes);
c7da57a1 1113
49a72dfb
AL
1114 sg_init_one(&sg, bp, sizeof(*bp));
1115 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1116}
1117
a915da9b 1118static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 1119 __be32 daddr, __be32 saddr, const struct tcphdr *th)
49a72dfb
AL
1120{
1121 struct tcp_md5sig_pool *hp;
1122 struct hash_desc *desc;
1123
1124 hp = tcp_get_md5sig_pool();
1125 if (!hp)
1126 goto clear_hash_noput;
1127 desc = &hp->md5_desc;
1128
1129 if (crypto_hash_init(desc))
1130 goto clear_hash;
1131 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1132 goto clear_hash;
1133 if (tcp_md5_hash_header(hp, th))
1134 goto clear_hash;
1135 if (tcp_md5_hash_key(hp, key))
1136 goto clear_hash;
1137 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4
YH
1138 goto clear_hash;
1139
cfb6eeb4 1140 tcp_put_md5sig_pool();
cfb6eeb4 1141 return 0;
49a72dfb 1142
cfb6eeb4
YH
1143clear_hash:
1144 tcp_put_md5sig_pool();
1145clear_hash_noput:
1146 memset(md5_hash, 0, 16);
49a72dfb 1147 return 1;
cfb6eeb4
YH
1148}
1149
49a72dfb 1150int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
318cf7aa
ED
1151 const struct sock *sk, const struct request_sock *req,
1152 const struct sk_buff *skb)
cfb6eeb4 1153{
49a72dfb
AL
1154 struct tcp_md5sig_pool *hp;
1155 struct hash_desc *desc;
318cf7aa 1156 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
1157 __be32 saddr, daddr;
1158
1159 if (sk) {
c720c7e8
ED
1160 saddr = inet_sk(sk)->inet_saddr;
1161 daddr = inet_sk(sk)->inet_daddr;
49a72dfb
AL
1162 } else if (req) {
1163 saddr = inet_rsk(req)->loc_addr;
1164 daddr = inet_rsk(req)->rmt_addr;
cfb6eeb4 1165 } else {
49a72dfb
AL
1166 const struct iphdr *iph = ip_hdr(skb);
1167 saddr = iph->saddr;
1168 daddr = iph->daddr;
cfb6eeb4 1169 }
49a72dfb
AL
1170
1171 hp = tcp_get_md5sig_pool();
1172 if (!hp)
1173 goto clear_hash_noput;
1174 desc = &hp->md5_desc;
1175
1176 if (crypto_hash_init(desc))
1177 goto clear_hash;
1178
1179 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1180 goto clear_hash;
1181 if (tcp_md5_hash_header(hp, th))
1182 goto clear_hash;
1183 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1184 goto clear_hash;
1185 if (tcp_md5_hash_key(hp, key))
1186 goto clear_hash;
1187 if (crypto_hash_final(desc, md5_hash))
1188 goto clear_hash;
1189
1190 tcp_put_md5sig_pool();
1191 return 0;
1192
1193clear_hash:
1194 tcp_put_md5sig_pool();
1195clear_hash_noput:
1196 memset(md5_hash, 0, 16);
1197 return 1;
cfb6eeb4 1198}
49a72dfb 1199EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
cfb6eeb4 1200
a2a385d6 1201static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
cfb6eeb4
YH
1202{
1203 /*
1204 * This gets called for each TCP segment that arrives
1205 * so we want to be efficient.
1206 * We have 3 drop cases:
1207 * o No MD5 hash and one expected.
1208 * o MD5 hash and we're not expecting one.
1209 * o MD5 hash and its wrong.
1210 */
cf533ea5 1211 const __u8 *hash_location = NULL;
cfb6eeb4 1212 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1213 const struct iphdr *iph = ip_hdr(skb);
cf533ea5 1214 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1215 int genhash;
cfb6eeb4
YH
1216 unsigned char newhash[16];
1217
a915da9b
ED
1218 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1219 AF_INET);
7d5d5525 1220 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1221
cfb6eeb4
YH
1222 /* We've parsed the options - do we have a hash? */
1223 if (!hash_expected && !hash_location)
a2a385d6 1224 return false;
cfb6eeb4
YH
1225
1226 if (hash_expected && !hash_location) {
785957d3 1227 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
a2a385d6 1228 return true;
cfb6eeb4
YH
1229 }
1230
1231 if (!hash_expected && hash_location) {
785957d3 1232 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
a2a385d6 1233 return true;
cfb6eeb4
YH
1234 }
1235
1236 /* Okay, so this is hash_expected and hash_location -
1237 * so we need to calculate the checksum.
1238 */
49a72dfb
AL
1239 genhash = tcp_v4_md5_hash_skb(newhash,
1240 hash_expected,
1241 NULL, NULL, skb);
cfb6eeb4
YH
1242
1243 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
e87cc472
JP
1244 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1245 &iph->saddr, ntohs(th->source),
1246 &iph->daddr, ntohs(th->dest),
1247 genhash ? " tcp_v4_calc_md5_hash failed"
1248 : "");
a2a385d6 1249 return true;
cfb6eeb4 1250 }
a2a385d6 1251 return false;
cfb6eeb4
YH
1252}
1253
1254#endif
1255
72a3effa 1256struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1257 .family = PF_INET,
2e6599cb 1258 .obj_size = sizeof(struct tcp_request_sock),
72659ecc 1259 .rtx_syn_ack = tcp_v4_rtx_synack,
60236fdd
ACM
1260 .send_ack = tcp_v4_reqsk_send_ack,
1261 .destructor = tcp_v4_reqsk_destructor,
1da177e4 1262 .send_reset = tcp_v4_send_reset,
72659ecc 1263 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
1264};
1265
cfb6eeb4 1266#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1267static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
cfb6eeb4 1268 .md5_lookup = tcp_v4_reqsk_md5_lookup,
e3afe7b7 1269 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1270};
b6332e6c 1271#endif
cfb6eeb4 1272
1da177e4
LT
1273int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1274{
4957faad 1275 struct tcp_extend_values tmp_ext;
1da177e4 1276 struct tcp_options_received tmp_opt;
cf533ea5 1277 const u8 *hash_location;
60236fdd 1278 struct request_sock *req;
e6b4d113 1279 struct inet_request_sock *ireq;
4957faad 1280 struct tcp_sock *tp = tcp_sk(sk);
e6b4d113 1281 struct dst_entry *dst = NULL;
eddc9ec5
ACM
1282 __be32 saddr = ip_hdr(skb)->saddr;
1283 __be32 daddr = ip_hdr(skb)->daddr;
1da177e4 1284 __u32 isn = TCP_SKB_CB(skb)->when;
a2a385d6 1285 bool want_cookie = false;
1da177e4
LT
1286
1287 /* Never answer to SYNs send to broadcast or multicast */
511c3f92 1288 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1289 goto drop;
1290
1291 /* TW buckets are converted to open requests without
1292 * limitations, they conserve resources and peer is
1293 * evidently real one.
1294 */
463c84b9 1295 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
946cedcc
ED
1296 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1297 if (!want_cookie)
1298 goto drop;
1da177e4
LT
1299 }
1300
1301 /* Accept backlog is full. If we have already queued enough
1302 * of warm entries in syn queue, drop request. It is better than
1303 * clogging syn queue with openreqs with exponentially increasing
1304 * timeout.
1305 */
463c84b9 1306 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1307 goto drop;
1308
ce4a7d0d 1309 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1da177e4
LT
1310 if (!req)
1311 goto drop;
1312
cfb6eeb4
YH
1313#ifdef CONFIG_TCP_MD5SIG
1314 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1315#endif
1316
1da177e4 1317 tcp_clear_options(&tmp_opt);
bee7ca9e 1318 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
4957faad 1319 tmp_opt.user_mss = tp->rx_opt.user_mss;
2100c8d2 1320 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
4957faad
WAS
1321
1322 if (tmp_opt.cookie_plus > 0 &&
1323 tmp_opt.saw_tstamp &&
1324 !tp->rx_opt.cookie_out_never &&
1325 (sysctl_tcp_cookie_size > 0 ||
1326 (tp->cookie_values != NULL &&
1327 tp->cookie_values->cookie_desired > 0))) {
1328 u8 *c;
1329 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1330 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1331
1332 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1333 goto drop_and_release;
1334
1335 /* Secret recipe starts with IP addresses */
0eae88f3
ED
1336 *mess++ ^= (__force u32)daddr;
1337 *mess++ ^= (__force u32)saddr;
1da177e4 1338
4957faad
WAS
1339 /* plus variable length Initiator Cookie */
1340 c = (u8 *)mess;
1341 while (l-- > 0)
1342 *c++ ^= *hash_location++;
1343
a2a385d6 1344 want_cookie = false; /* not our kind of cookie */
4957faad
WAS
1345 tmp_ext.cookie_out_never = 0; /* false */
1346 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1347 } else if (!tp->rx_opt.cookie_in_always) {
1348 /* redundant indications, but ensure initialization. */
1349 tmp_ext.cookie_out_never = 1; /* true */
1350 tmp_ext.cookie_plus = 0;
1351 } else {
1352 goto drop_and_release;
1353 }
1354 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1da177e4 1355
4dfc2817 1356 if (want_cookie && !tmp_opt.saw_tstamp)
1da177e4 1357 tcp_clear_options(&tmp_opt);
1da177e4 1358
1da177e4 1359 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1da177e4
LT
1360 tcp_openreq_init(req, &tmp_opt, skb);
1361
bb5b7c11
DM
1362 ireq = inet_rsk(req);
1363 ireq->loc_addr = daddr;
1364 ireq->rmt_addr = saddr;
1365 ireq->no_srccheck = inet_sk(sk)->transparent;
1366 ireq->opt = tcp_v4_save_options(sk, skb);
1367
284904aa 1368 if (security_inet_conn_request(sk, skb, req))
bb5b7c11 1369 goto drop_and_free;
284904aa 1370
172d69e6 1371 if (!want_cookie || tmp_opt.tstamp_ok)
bd14b1b2 1372 TCP_ECN_create_request(req, skb);
1da177e4
LT
1373
1374 if (want_cookie) {
1da177e4 1375 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
172d69e6 1376 req->cookie_ts = tmp_opt.tstamp_ok;
1da177e4 1377 } else if (!isn) {
6bd023f3 1378 struct flowi4 fl4;
1da177e4
LT
1379
1380 /* VJ's idea. We save last timestamp seen
1381 * from the destination in peer table, when entering
1382 * state TIME-WAIT, and check against it before
1383 * accepting new connection request.
1384 *
1385 * If "isn" is not zero, this request hit alive
1386 * timewait bucket, so that all the necessary checks
1387 * are made in the function processing timewait state.
1388 */
1389 if (tmp_opt.saw_tstamp &&
295ff7ed 1390 tcp_death_row.sysctl_tw_recycle &&
ba3f7f04 1391 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
81166dd6
DM
1392 fl4.daddr == saddr) {
1393 if (!tcp_peer_is_proven(req, dst, true)) {
de0744af 1394 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
7cd04fa7 1395 goto drop_and_release;
1da177e4
LT
1396 }
1397 }
1398 /* Kill the following clause, if you dislike this way. */
1399 else if (!sysctl_tcp_syncookies &&
463c84b9 1400 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1da177e4 1401 (sysctl_max_syn_backlog >> 2)) &&
81166dd6 1402 !tcp_peer_is_proven(req, dst, false)) {
1da177e4
LT
1403 /* Without syncookies last quarter of
1404 * backlog is filled with destinations,
1405 * proven to be alive.
1406 * It means that we continue to communicate
1407 * to destinations, already remembered
1408 * to the moment of synflood.
1409 */
afd46503 1410 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
673d57e7 1411 &saddr, ntohs(tcp_hdr(skb)->source));
7cd04fa7 1412 goto drop_and_release;
1da177e4
LT
1413 }
1414
a94f723d 1415 isn = tcp_v4_init_sequence(skb);
1da177e4 1416 }
2e6599cb 1417 tcp_rsk(req)->snt_isn = isn;
9ad7c049 1418 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1da177e4 1419
72659ecc 1420 if (tcp_v4_send_synack(sk, dst, req,
fff32699 1421 (struct request_values *)&tmp_ext,
7586eceb
ED
1422 skb_get_queue_mapping(skb),
1423 want_cookie) ||
4957faad 1424 want_cookie)
1da177e4
LT
1425 goto drop_and_free;
1426
7cd04fa7 1427 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1da177e4
LT
1428 return 0;
1429
7cd04fa7
DL
1430drop_and_release:
1431 dst_release(dst);
1da177e4 1432drop_and_free:
60236fdd 1433 reqsk_free(req);
1da177e4 1434drop:
1da177e4
LT
1435 return 0;
1436}
4bc2f18b 1437EXPORT_SYMBOL(tcp_v4_conn_request);
1da177e4
LT
1438
1439
1440/*
1441 * The three way handshake has completed - we got a valid synack -
1442 * now create the new socket.
1443 */
1444struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1445 struct request_sock *req,
1da177e4
LT
1446 struct dst_entry *dst)
1447{
2e6599cb 1448 struct inet_request_sock *ireq;
1da177e4
LT
1449 struct inet_sock *newinet;
1450 struct tcp_sock *newtp;
1451 struct sock *newsk;
cfb6eeb4
YH
1452#ifdef CONFIG_TCP_MD5SIG
1453 struct tcp_md5sig_key *key;
1454#endif
f6d8bd05 1455 struct ip_options_rcu *inet_opt;
1da177e4
LT
1456
1457 if (sk_acceptq_is_full(sk))
1458 goto exit_overflow;
1459
1da177e4
LT
1460 newsk = tcp_create_openreq_child(sk, req, skb);
1461 if (!newsk)
093d2823 1462 goto exit_nonewsk;
1da177e4 1463
bcd76111 1464 newsk->sk_gso_type = SKB_GSO_TCPV4;
1da177e4
LT
1465
1466 newtp = tcp_sk(newsk);
1467 newinet = inet_sk(newsk);
2e6599cb 1468 ireq = inet_rsk(req);
c720c7e8
ED
1469 newinet->inet_daddr = ireq->rmt_addr;
1470 newinet->inet_rcv_saddr = ireq->loc_addr;
1471 newinet->inet_saddr = ireq->loc_addr;
f6d8bd05
ED
1472 inet_opt = ireq->opt;
1473 rcu_assign_pointer(newinet->inet_opt, inet_opt);
2e6599cb 1474 ireq->opt = NULL;
463c84b9 1475 newinet->mc_index = inet_iif(skb);
eddc9ec5 1476 newinet->mc_ttl = ip_hdr(skb)->ttl;
4c507d28 1477 newinet->rcv_tos = ip_hdr(skb)->tos;
d83d8461 1478 inet_csk(newsk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
1479 if (inet_opt)
1480 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
c720c7e8 1481 newinet->inet_id = newtp->write_seq ^ jiffies;
1da177e4 1482
dfd25fff
ED
1483 if (!dst) {
1484 dst = inet_csk_route_child_sock(sk, newsk, req);
1485 if (!dst)
1486 goto put_and_exit;
1487 } else {
1488 /* syncookie case : see end of cookie_v4_check() */
1489 }
0e734419
DM
1490 sk_setup_caps(newsk, dst);
1491
5d424d5a 1492 tcp_mtup_init(newsk);
1da177e4 1493 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1494 newtp->advmss = dst_metric_advmss(dst);
f5fff5dc
TQ
1495 if (tcp_sk(sk)->rx_opt.user_mss &&
1496 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1497 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1498
1da177e4 1499 tcp_initialize_rcv_mss(newsk);
9ad7c049
JC
1500 if (tcp_rsk(req)->snt_synack)
1501 tcp_valid_rtt_meas(newsk,
1502 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1503 newtp->total_retrans = req->retrans;
1da177e4 1504
cfb6eeb4
YH
1505#ifdef CONFIG_TCP_MD5SIG
1506 /* Copy over the MD5 key from the original socket */
a915da9b
ED
1507 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1508 AF_INET);
c720c7e8 1509 if (key != NULL) {
cfb6eeb4
YH
1510 /*
1511 * We're using one, so create a matching key
1512 * on the newsk structure. If we fail to get
1513 * memory, then we end up not copying the key
1514 * across. Shucks.
1515 */
a915da9b
ED
1516 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1517 AF_INET, key->key, key->keylen, GFP_ATOMIC);
a465419b 1518 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1519 }
1520#endif
1521
0e734419
DM
1522 if (__inet_inherit_port(sk, newsk) < 0)
1523 goto put_and_exit;
9327f705 1524 __inet_hash_nolisten(newsk, NULL);
1da177e4
LT
1525
1526 return newsk;
1527
1528exit_overflow:
de0744af 1529 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823
BS
1530exit_nonewsk:
1531 dst_release(dst);
1da177e4 1532exit:
de0744af 1533 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4 1534 return NULL;
0e734419 1535put_and_exit:
709e8697 1536 tcp_clear_xmit_timers(newsk);
d8a6e65f 1537 tcp_cleanup_congestion_control(newsk);
918eb399 1538 bh_unlock_sock(newsk);
0e734419
DM
1539 sock_put(newsk);
1540 goto exit;
1da177e4 1541}
4bc2f18b 1542EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1da177e4
LT
1543
1544static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1545{
aa8223c7 1546 struct tcphdr *th = tcp_hdr(skb);
eddc9ec5 1547 const struct iphdr *iph = ip_hdr(skb);
1da177e4 1548 struct sock *nsk;
60236fdd 1549 struct request_sock **prev;
1da177e4 1550 /* Find possible connection requests. */
463c84b9
ACM
1551 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1552 iph->saddr, iph->daddr);
1da177e4
LT
1553 if (req)
1554 return tcp_check_req(sk, skb, req, prev);
1555
3b1e0a65 1556 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
c67499c0 1557 th->source, iph->daddr, th->dest, inet_iif(skb));
1da177e4
LT
1558
1559 if (nsk) {
1560 if (nsk->sk_state != TCP_TIME_WAIT) {
1561 bh_lock_sock(nsk);
1562 return nsk;
1563 }
9469c7b4 1564 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1565 return NULL;
1566 }
1567
1568#ifdef CONFIG_SYN_COOKIES
af9b4738 1569 if (!th->syn)
1da177e4
LT
1570 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1571#endif
1572 return sk;
1573}
1574
b51655b9 1575static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1da177e4 1576{
eddc9ec5
ACM
1577 const struct iphdr *iph = ip_hdr(skb);
1578
84fa7933 1579 if (skb->ip_summed == CHECKSUM_COMPLETE) {
eddc9ec5
ACM
1580 if (!tcp_v4_check(skb->len, iph->saddr,
1581 iph->daddr, skb->csum)) {
fb286bb2 1582 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1583 return 0;
fb286bb2 1584 }
1da177e4 1585 }
fb286bb2 1586
eddc9ec5 1587 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
fb286bb2
HX
1588 skb->len, IPPROTO_TCP, 0);
1589
1da177e4 1590 if (skb->len <= 76) {
fb286bb2 1591 return __skb_checksum_complete(skb);
1da177e4
LT
1592 }
1593 return 0;
1594}
1595
1596
1597/* The socket must have it's spinlock held when we get
1598 * here.
1599 *
1600 * We have a potential double-lock case here, so even when
1601 * doing backlog processing we use the BH locking scheme.
1602 * This is because we cannot sleep with the original spinlock
1603 * held.
1604 */
1605int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1606{
cfb6eeb4
YH
1607 struct sock *rsk;
1608#ifdef CONFIG_TCP_MD5SIG
1609 /*
1610 * We really want to reject the packet as early as possible
1611 * if:
1612 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1613 * o There is an MD5 option and we're not expecting one
1614 */
7174259e 1615 if (tcp_v4_inbound_md5_hash(sk, skb))
cfb6eeb4
YH
1616 goto discard;
1617#endif
1618
1da177e4 1619 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
bdeab991 1620 sock_rps_save_rxhash(sk, skb);
aa8223c7 1621 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1622 rsk = sk;
1da177e4 1623 goto reset;
cfb6eeb4 1624 }
1da177e4
LT
1625 return 0;
1626 }
1627
ab6a5bb6 1628 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1629 goto csum_err;
1630
1631 if (sk->sk_state == TCP_LISTEN) {
1632 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1633 if (!nsk)
1634 goto discard;
1635
1636 if (nsk != sk) {
bdeab991 1637 sock_rps_save_rxhash(nsk, skb);
cfb6eeb4
YH
1638 if (tcp_child_process(sk, nsk, skb)) {
1639 rsk = nsk;
1da177e4 1640 goto reset;
cfb6eeb4 1641 }
1da177e4
LT
1642 return 0;
1643 }
ca55158c 1644 } else
bdeab991 1645 sock_rps_save_rxhash(sk, skb);
ca55158c 1646
aa8223c7 1647 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1648 rsk = sk;
1da177e4 1649 goto reset;
cfb6eeb4 1650 }
1da177e4
LT
1651 return 0;
1652
1653reset:
cfb6eeb4 1654 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1655discard:
1656 kfree_skb(skb);
1657 /* Be careful here. If this function gets more complicated and
1658 * gcc suffers from register pressure on the x86, sk (in %ebx)
1659 * might be destroyed here. This current version compiles correctly,
1660 * but you have been warned.
1661 */
1662 return 0;
1663
1664csum_err:
63231bdd 1665 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1666 goto discard;
1667}
4bc2f18b 1668EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4 1669
160eb5a6 1670void tcp_v4_early_demux(struct sk_buff *skb)
41063e9d
DM
1671{
1672 struct net *net = dev_net(skb->dev);
1673 const struct iphdr *iph;
1674 const struct tcphdr *th;
fd62e09b 1675 struct net_device *dev;
41063e9d 1676 struct sock *sk;
41063e9d 1677
41063e9d 1678 if (skb->pkt_type != PACKET_HOST)
160eb5a6 1679 return;
41063e9d
DM
1680
1681 if (!pskb_may_pull(skb, ip_hdrlen(skb) + sizeof(struct tcphdr)))
160eb5a6 1682 return;
41063e9d
DM
1683
1684 iph = ip_hdr(skb);
1685 th = (struct tcphdr *) ((char *)iph + ip_hdrlen(skb));
1686
1687 if (th->doff < sizeof(struct tcphdr) / 4)
160eb5a6 1688 return;
41063e9d
DM
1689
1690 if (!pskb_may_pull(skb, ip_hdrlen(skb) + th->doff * 4))
160eb5a6 1691 return;
41063e9d 1692
fd62e09b 1693 dev = skb->dev;
41063e9d
DM
1694 sk = __inet_lookup_established(net, &tcp_hashinfo,
1695 iph->saddr, th->source,
7011d085 1696 iph->daddr, ntohs(th->dest),
fd62e09b 1697 dev->ifindex);
41063e9d
DM
1698 if (sk) {
1699 skb->sk = sk;
1700 skb->destructor = sock_edemux;
1701 if (sk->sk_state != TCP_TIME_WAIT) {
1702 struct dst_entry *dst = sk->sk_rx_dst;
1703 if (dst)
1704 dst = dst_check(dst, 0);
1705 if (dst) {
fd62e09b
DM
1706 struct rtable *rt = (struct rtable *) dst;
1707
160eb5a6 1708 if (rt->rt_iif == dev->ifindex)
fd62e09b 1709 skb_dst_set_noref(skb, dst);
41063e9d
DM
1710 }
1711 }
1712 }
41063e9d
DM
1713}
1714
1da177e4
LT
1715/*
1716 * From tcp_input.c
1717 */
1718
1719int tcp_v4_rcv(struct sk_buff *skb)
1720{
eddc9ec5 1721 const struct iphdr *iph;
cf533ea5 1722 const struct tcphdr *th;
1da177e4
LT
1723 struct sock *sk;
1724 int ret;
a86b1e30 1725 struct net *net = dev_net(skb->dev);
1da177e4
LT
1726
1727 if (skb->pkt_type != PACKET_HOST)
1728 goto discard_it;
1729
1730 /* Count it even if it's bad */
63231bdd 1731 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1732
1733 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1734 goto discard_it;
1735
aa8223c7 1736 th = tcp_hdr(skb);
1da177e4
LT
1737
1738 if (th->doff < sizeof(struct tcphdr) / 4)
1739 goto bad_packet;
1740 if (!pskb_may_pull(skb, th->doff * 4))
1741 goto discard_it;
1742
1743 /* An explanation is required here, I think.
1744 * Packet length and doff are validated by header prediction,
caa20d9a 1745 * provided case of th->doff==0 is eliminated.
1da177e4 1746 * So, we defer the checks. */
60476372 1747 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1da177e4
LT
1748 goto bad_packet;
1749
aa8223c7 1750 th = tcp_hdr(skb);
eddc9ec5 1751 iph = ip_hdr(skb);
1da177e4
LT
1752 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1753 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1754 skb->len - th->doff * 4);
1755 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1756 TCP_SKB_CB(skb)->when = 0;
b82d1bb4 1757 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1da177e4
LT
1758 TCP_SKB_CB(skb)->sacked = 0;
1759
9a1f27c4 1760 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1761 if (!sk)
1762 goto no_tcp_socket;
1763
bb134d5d
ED
1764process:
1765 if (sk->sk_state == TCP_TIME_WAIT)
1766 goto do_time_wait;
1767
6cce09f8
ED
1768 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1769 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
d218d111 1770 goto discard_and_relse;
6cce09f8 1771 }
d218d111 1772
1da177e4
LT
1773 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1774 goto discard_and_relse;
b59c2701 1775 nf_reset(skb);
1da177e4 1776
fda9ef5d 1777 if (sk_filter(sk, skb))
1da177e4
LT
1778 goto discard_and_relse;
1779
1780 skb->dev = NULL;
1781
c6366184 1782 bh_lock_sock_nested(sk);
1da177e4
LT
1783 ret = 0;
1784 if (!sock_owned_by_user(sk)) {
1a2449a8
CL
1785#ifdef CONFIG_NET_DMA
1786 struct tcp_sock *tp = tcp_sk(sk);
1787 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
a2bd1140 1788 tp->ucopy.dma_chan = net_dma_find_channel();
1a2449a8 1789 if (tp->ucopy.dma_chan)
1da177e4 1790 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8
CL
1791 else
1792#endif
1793 {
1794 if (!tcp_prequeue(sk, skb))
ae8d7f88 1795 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8 1796 }
da882c1f
ED
1797 } else if (unlikely(sk_add_backlog(sk, skb,
1798 sk->sk_rcvbuf + sk->sk_sndbuf))) {
6b03a53a 1799 bh_unlock_sock(sk);
6cce09f8 1800 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1801 goto discard_and_relse;
1802 }
1da177e4
LT
1803 bh_unlock_sock(sk);
1804
1805 sock_put(sk);
1806
1807 return ret;
1808
1809no_tcp_socket:
1810 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1811 goto discard_it;
1812
1813 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1814bad_packet:
63231bdd 1815 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1816 } else {
cfb6eeb4 1817 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1818 }
1819
1820discard_it:
1821 /* Discard frame. */
1822 kfree_skb(skb);
e905a9ed 1823 return 0;
1da177e4
LT
1824
1825discard_and_relse:
1826 sock_put(sk);
1827 goto discard_it;
1828
1829do_time_wait:
1830 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1831 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1832 goto discard_it;
1833 }
1834
1835 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
63231bdd 1836 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1837 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1838 goto discard_it;
1839 }
9469c7b4 1840 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1841 case TCP_TW_SYN: {
c346dca1 1842 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
c67499c0 1843 &tcp_hashinfo,
eddc9ec5 1844 iph->daddr, th->dest,
463c84b9 1845 inet_iif(skb));
1da177e4 1846 if (sk2) {
9469c7b4
YH
1847 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1848 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1849 sk = sk2;
1850 goto process;
1851 }
1852 /* Fall through to ACK */
1853 }
1854 case TCP_TW_ACK:
1855 tcp_v4_timewait_ack(sk, skb);
1856 break;
1857 case TCP_TW_RST:
1858 goto no_tcp_socket;
1859 case TCP_TW_SUCCESS:;
1860 }
1861 goto discard_it;
1862}
1863
ccb7c410
DM
1864static struct timewait_sock_ops tcp_timewait_sock_ops = {
1865 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1866 .twsk_unique = tcp_twsk_unique,
1867 .twsk_destructor= tcp_twsk_destructor,
ccb7c410 1868};
1da177e4 1869
3b401a81 1870const struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1871 .queue_xmit = ip_queue_xmit,
1872 .send_check = tcp_v4_send_check,
1873 .rebuild_header = inet_sk_rebuild_header,
1874 .conn_request = tcp_v4_conn_request,
1875 .syn_recv_sock = tcp_v4_syn_recv_sock,
543d9cfe
ACM
1876 .net_header_len = sizeof(struct iphdr),
1877 .setsockopt = ip_setsockopt,
1878 .getsockopt = ip_getsockopt,
1879 .addr2sockaddr = inet_csk_addr2sockaddr,
1880 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1881 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1882#ifdef CONFIG_COMPAT
543d9cfe
ACM
1883 .compat_setsockopt = compat_ip_setsockopt,
1884 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1885#endif
1da177e4 1886};
4bc2f18b 1887EXPORT_SYMBOL(ipv4_specific);
1da177e4 1888
cfb6eeb4 1889#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1890static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4 1891 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1892 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1893 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1894};
b6332e6c 1895#endif
cfb6eeb4 1896
1da177e4
LT
1897/* NOTE: A lot of things set to zero explicitly by call to
1898 * sk_alloc() so need not be done here.
1899 */
1900static int tcp_v4_init_sock(struct sock *sk)
1901{
6687e988 1902 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1903
900f65d3 1904 tcp_init_sock(sk);
1da177e4 1905
8292a17a 1906 icsk->icsk_af_ops = &ipv4_specific;
900f65d3 1907
cfb6eeb4 1908#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1909 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
cfb6eeb4 1910#endif
1da177e4 1911
1da177e4
LT
1912 return 0;
1913}
1914
7d06b2e0 1915void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
1916{
1917 struct tcp_sock *tp = tcp_sk(sk);
1918
1919 tcp_clear_xmit_timers(sk);
1920
6687e988 1921 tcp_cleanup_congestion_control(sk);
317a76f9 1922
1da177e4 1923 /* Cleanup up the write buffer. */
fe067e8a 1924 tcp_write_queue_purge(sk);
1da177e4
LT
1925
1926 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1927 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1928
cfb6eeb4
YH
1929#ifdef CONFIG_TCP_MD5SIG
1930 /* Clean up the MD5 key list, if any */
1931 if (tp->md5sig_info) {
a915da9b 1932 tcp_clear_md5_list(sk);
a8afca03 1933 kfree_rcu(tp->md5sig_info, rcu);
cfb6eeb4
YH
1934 tp->md5sig_info = NULL;
1935 }
1936#endif
1937
1a2449a8
CL
1938#ifdef CONFIG_NET_DMA
1939 /* Cleans up our sk_async_wait_queue */
e905a9ed 1940 __skb_queue_purge(&sk->sk_async_wait_queue);
1a2449a8
CL
1941#endif
1942
1da177e4
LT
1943 /* Clean prequeue, it must be empty really */
1944 __skb_queue_purge(&tp->ucopy.prequeue);
1945
1946 /* Clean up a referenced TCP bind bucket. */
463c84b9 1947 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1948 inet_put_port(sk);
1da177e4
LT
1949
1950 /*
1951 * If sendmsg cached page exists, toss it.
1952 */
1953 if (sk->sk_sndmsg_page) {
1954 __free_page(sk->sk_sndmsg_page);
1955 sk->sk_sndmsg_page = NULL;
1956 }
1957
435cf559
WAS
1958 /* TCP Cookie Transactions */
1959 if (tp->cookie_values != NULL) {
1960 kref_put(&tp->cookie_values->kref,
1961 tcp_cookie_values_release);
1962 tp->cookie_values = NULL;
1963 }
1964
cf60af03
YC
1965 /* If socket is aborted during connect operation */
1966 tcp_free_fastopen_req(tp);
1967
180d8cd9 1968 sk_sockets_allocated_dec(sk);
d1a4c0b3 1969 sock_release_memcg(sk);
1da177e4 1970}
1da177e4
LT
1971EXPORT_SYMBOL(tcp_v4_destroy_sock);
1972
1973#ifdef CONFIG_PROC_FS
1974/* Proc filesystem TCP sock list dumping. */
1975
3ab5aee7 1976static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1da177e4 1977{
3ab5aee7 1978 return hlist_nulls_empty(head) ? NULL :
8feaf0c0 1979 list_entry(head->first, struct inet_timewait_sock, tw_node);
1da177e4
LT
1980}
1981
8feaf0c0 1982static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1da177e4 1983{
3ab5aee7
ED
1984 return !is_a_nulls(tw->tw_node.next) ?
1985 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1da177e4
LT
1986}
1987
a8b690f9
TH
1988/*
1989 * Get next listener socket follow cur. If cur is NULL, get first socket
1990 * starting from bucket given in st->bucket; when st->bucket is zero the
1991 * very first socket in the hash table is returned.
1992 */
1da177e4
LT
1993static void *listening_get_next(struct seq_file *seq, void *cur)
1994{
463c84b9 1995 struct inet_connection_sock *icsk;
c25eb3bf 1996 struct hlist_nulls_node *node;
1da177e4 1997 struct sock *sk = cur;
5caea4ea 1998 struct inet_listen_hashbucket *ilb;
5799de0b 1999 struct tcp_iter_state *st = seq->private;
a4146b1b 2000 struct net *net = seq_file_net(seq);
1da177e4
LT
2001
2002 if (!sk) {
a8b690f9 2003 ilb = &tcp_hashinfo.listening_hash[st->bucket];
5caea4ea 2004 spin_lock_bh(&ilb->lock);
c25eb3bf 2005 sk = sk_nulls_head(&ilb->head);
a8b690f9 2006 st->offset = 0;
1da177e4
LT
2007 goto get_sk;
2008 }
5caea4ea 2009 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1da177e4 2010 ++st->num;
a8b690f9 2011 ++st->offset;
1da177e4
LT
2012
2013 if (st->state == TCP_SEQ_STATE_OPENREQ) {
60236fdd 2014 struct request_sock *req = cur;
1da177e4 2015
72a3effa 2016 icsk = inet_csk(st->syn_wait_sk);
1da177e4
LT
2017 req = req->dl_next;
2018 while (1) {
2019 while (req) {
bdccc4ca 2020 if (req->rsk_ops->family == st->family) {
1da177e4
LT
2021 cur = req;
2022 goto out;
2023 }
2024 req = req->dl_next;
2025 }
72a3effa 2026 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1da177e4
LT
2027 break;
2028get_req:
463c84b9 2029 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1da177e4 2030 }
1bde5ac4 2031 sk = sk_nulls_next(st->syn_wait_sk);
1da177e4 2032 st->state = TCP_SEQ_STATE_LISTENING;
463c84b9 2033 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 2034 } else {
e905a9ed 2035 icsk = inet_csk(sk);
463c84b9
ACM
2036 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2037 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1da177e4 2038 goto start_req;
463c84b9 2039 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1bde5ac4 2040 sk = sk_nulls_next(sk);
1da177e4
LT
2041 }
2042get_sk:
c25eb3bf 2043 sk_nulls_for_each_from(sk, node) {
8475ef9f
PE
2044 if (!net_eq(sock_net(sk), net))
2045 continue;
2046 if (sk->sk_family == st->family) {
1da177e4
LT
2047 cur = sk;
2048 goto out;
2049 }
e905a9ed 2050 icsk = inet_csk(sk);
463c84b9
ACM
2051 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2052 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1da177e4
LT
2053start_req:
2054 st->uid = sock_i_uid(sk);
2055 st->syn_wait_sk = sk;
2056 st->state = TCP_SEQ_STATE_OPENREQ;
2057 st->sbucket = 0;
2058 goto get_req;
2059 }
463c84b9 2060 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 2061 }
5caea4ea 2062 spin_unlock_bh(&ilb->lock);
a8b690f9 2063 st->offset = 0;
0f7ff927 2064 if (++st->bucket < INET_LHTABLE_SIZE) {
5caea4ea
ED
2065 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2066 spin_lock_bh(&ilb->lock);
c25eb3bf 2067 sk = sk_nulls_head(&ilb->head);
1da177e4
LT
2068 goto get_sk;
2069 }
2070 cur = NULL;
2071out:
2072 return cur;
2073}
2074
2075static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2076{
a8b690f9
TH
2077 struct tcp_iter_state *st = seq->private;
2078 void *rc;
2079
2080 st->bucket = 0;
2081 st->offset = 0;
2082 rc = listening_get_next(seq, NULL);
1da177e4
LT
2083
2084 while (rc && *pos) {
2085 rc = listening_get_next(seq, rc);
2086 --*pos;
2087 }
2088 return rc;
2089}
2090
a2a385d6 2091static inline bool empty_bucket(struct tcp_iter_state *st)
6eac5604 2092{
3ab5aee7
ED
2093 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2094 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
6eac5604
AK
2095}
2096
a8b690f9
TH
2097/*
2098 * Get first established socket starting from bucket given in st->bucket.
2099 * If st->bucket is zero, the very first socket in the hash is returned.
2100 */
1da177e4
LT
2101static void *established_get_first(struct seq_file *seq)
2102{
5799de0b 2103 struct tcp_iter_state *st = seq->private;
a4146b1b 2104 struct net *net = seq_file_net(seq);
1da177e4
LT
2105 void *rc = NULL;
2106
a8b690f9
TH
2107 st->offset = 0;
2108 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1da177e4 2109 struct sock *sk;
3ab5aee7 2110 struct hlist_nulls_node *node;
8feaf0c0 2111 struct inet_timewait_sock *tw;
9db66bdc 2112 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 2113
6eac5604
AK
2114 /* Lockless fast path for the common case of empty buckets */
2115 if (empty_bucket(st))
2116 continue;
2117
9db66bdc 2118 spin_lock_bh(lock);
3ab5aee7 2119 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 2120 if (sk->sk_family != st->family ||
878628fb 2121 !net_eq(sock_net(sk), net)) {
1da177e4
LT
2122 continue;
2123 }
2124 rc = sk;
2125 goto out;
2126 }
2127 st->state = TCP_SEQ_STATE_TIME_WAIT;
8feaf0c0 2128 inet_twsk_for_each(tw, node,
dbca9b27 2129 &tcp_hashinfo.ehash[st->bucket].twchain) {
28518fc1 2130 if (tw->tw_family != st->family ||
878628fb 2131 !net_eq(twsk_net(tw), net)) {
1da177e4
LT
2132 continue;
2133 }
2134 rc = tw;
2135 goto out;
2136 }
9db66bdc 2137 spin_unlock_bh(lock);
1da177e4
LT
2138 st->state = TCP_SEQ_STATE_ESTABLISHED;
2139 }
2140out:
2141 return rc;
2142}
2143
2144static void *established_get_next(struct seq_file *seq, void *cur)
2145{
2146 struct sock *sk = cur;
8feaf0c0 2147 struct inet_timewait_sock *tw;
3ab5aee7 2148 struct hlist_nulls_node *node;
5799de0b 2149 struct tcp_iter_state *st = seq->private;
a4146b1b 2150 struct net *net = seq_file_net(seq);
1da177e4
LT
2151
2152 ++st->num;
a8b690f9 2153 ++st->offset;
1da177e4
LT
2154
2155 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2156 tw = cur;
2157 tw = tw_next(tw);
2158get_tw:
878628fb 2159 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
1da177e4
LT
2160 tw = tw_next(tw);
2161 }
2162 if (tw) {
2163 cur = tw;
2164 goto out;
2165 }
9db66bdc 2166 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2167 st->state = TCP_SEQ_STATE_ESTABLISHED;
2168
6eac5604 2169 /* Look for next non empty bucket */
a8b690f9 2170 st->offset = 0;
f373b53b 2171 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
6eac5604
AK
2172 empty_bucket(st))
2173 ;
f373b53b 2174 if (st->bucket > tcp_hashinfo.ehash_mask)
6eac5604
AK
2175 return NULL;
2176
9db66bdc 2177 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
3ab5aee7 2178 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
1da177e4 2179 } else
3ab5aee7 2180 sk = sk_nulls_next(sk);
1da177e4 2181
3ab5aee7 2182 sk_nulls_for_each_from(sk, node) {
878628fb 2183 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1da177e4
LT
2184 goto found;
2185 }
2186
2187 st->state = TCP_SEQ_STATE_TIME_WAIT;
dbca9b27 2188 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
1da177e4
LT
2189 goto get_tw;
2190found:
2191 cur = sk;
2192out:
2193 return cur;
2194}
2195
2196static void *established_get_idx(struct seq_file *seq, loff_t pos)
2197{
a8b690f9
TH
2198 struct tcp_iter_state *st = seq->private;
2199 void *rc;
2200
2201 st->bucket = 0;
2202 rc = established_get_first(seq);
1da177e4
LT
2203
2204 while (rc && pos) {
2205 rc = established_get_next(seq, rc);
2206 --pos;
7174259e 2207 }
1da177e4
LT
2208 return rc;
2209}
2210
2211static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2212{
2213 void *rc;
5799de0b 2214 struct tcp_iter_state *st = seq->private;
1da177e4 2215
1da177e4
LT
2216 st->state = TCP_SEQ_STATE_LISTENING;
2217 rc = listening_get_idx(seq, &pos);
2218
2219 if (!rc) {
1da177e4
LT
2220 st->state = TCP_SEQ_STATE_ESTABLISHED;
2221 rc = established_get_idx(seq, pos);
2222 }
2223
2224 return rc;
2225}
2226
a8b690f9
TH
2227static void *tcp_seek_last_pos(struct seq_file *seq)
2228{
2229 struct tcp_iter_state *st = seq->private;
2230 int offset = st->offset;
2231 int orig_num = st->num;
2232 void *rc = NULL;
2233
2234 switch (st->state) {
2235 case TCP_SEQ_STATE_OPENREQ:
2236 case TCP_SEQ_STATE_LISTENING:
2237 if (st->bucket >= INET_LHTABLE_SIZE)
2238 break;
2239 st->state = TCP_SEQ_STATE_LISTENING;
2240 rc = listening_get_next(seq, NULL);
2241 while (offset-- && rc)
2242 rc = listening_get_next(seq, rc);
2243 if (rc)
2244 break;
2245 st->bucket = 0;
2246 /* Fallthrough */
2247 case TCP_SEQ_STATE_ESTABLISHED:
2248 case TCP_SEQ_STATE_TIME_WAIT:
2249 st->state = TCP_SEQ_STATE_ESTABLISHED;
2250 if (st->bucket > tcp_hashinfo.ehash_mask)
2251 break;
2252 rc = established_get_first(seq);
2253 while (offset-- && rc)
2254 rc = established_get_next(seq, rc);
2255 }
2256
2257 st->num = orig_num;
2258
2259 return rc;
2260}
2261
1da177e4
LT
2262static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2263{
5799de0b 2264 struct tcp_iter_state *st = seq->private;
a8b690f9
TH
2265 void *rc;
2266
2267 if (*pos && *pos == st->last_pos) {
2268 rc = tcp_seek_last_pos(seq);
2269 if (rc)
2270 goto out;
2271 }
2272
1da177e4
LT
2273 st->state = TCP_SEQ_STATE_LISTENING;
2274 st->num = 0;
a8b690f9
TH
2275 st->bucket = 0;
2276 st->offset = 0;
2277 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2278
2279out:
2280 st->last_pos = *pos;
2281 return rc;
1da177e4
LT
2282}
2283
2284static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2285{
a8b690f9 2286 struct tcp_iter_state *st = seq->private;
1da177e4 2287 void *rc = NULL;
1da177e4
LT
2288
2289 if (v == SEQ_START_TOKEN) {
2290 rc = tcp_get_idx(seq, 0);
2291 goto out;
2292 }
1da177e4
LT
2293
2294 switch (st->state) {
2295 case TCP_SEQ_STATE_OPENREQ:
2296 case TCP_SEQ_STATE_LISTENING:
2297 rc = listening_get_next(seq, v);
2298 if (!rc) {
1da177e4 2299 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2300 st->bucket = 0;
2301 st->offset = 0;
1da177e4
LT
2302 rc = established_get_first(seq);
2303 }
2304 break;
2305 case TCP_SEQ_STATE_ESTABLISHED:
2306 case TCP_SEQ_STATE_TIME_WAIT:
2307 rc = established_get_next(seq, v);
2308 break;
2309 }
2310out:
2311 ++*pos;
a8b690f9 2312 st->last_pos = *pos;
1da177e4
LT
2313 return rc;
2314}
2315
2316static void tcp_seq_stop(struct seq_file *seq, void *v)
2317{
5799de0b 2318 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2319
2320 switch (st->state) {
2321 case TCP_SEQ_STATE_OPENREQ:
2322 if (v) {
463c84b9
ACM
2323 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2324 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
2325 }
2326 case TCP_SEQ_STATE_LISTENING:
2327 if (v != SEQ_START_TOKEN)
5caea4ea 2328 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
1da177e4
LT
2329 break;
2330 case TCP_SEQ_STATE_TIME_WAIT:
2331 case TCP_SEQ_STATE_ESTABLISHED:
2332 if (v)
9db66bdc 2333 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2334 break;
2335 }
2336}
2337
73cb88ec 2338int tcp_seq_open(struct inode *inode, struct file *file)
1da177e4
LT
2339{
2340 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1da177e4 2341 struct tcp_iter_state *s;
52d6f3f1 2342 int err;
1da177e4 2343
52d6f3f1
DL
2344 err = seq_open_net(inode, file, &afinfo->seq_ops,
2345 sizeof(struct tcp_iter_state));
2346 if (err < 0)
2347 return err;
f40c8174 2348
52d6f3f1 2349 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2350 s->family = afinfo->family;
a8b690f9 2351 s->last_pos = 0;
f40c8174
DL
2352 return 0;
2353}
73cb88ec 2354EXPORT_SYMBOL(tcp_seq_open);
f40c8174 2355
6f8b13bc 2356int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2357{
2358 int rc = 0;
2359 struct proc_dir_entry *p;
2360
9427c4b3
DL
2361 afinfo->seq_ops.start = tcp_seq_start;
2362 afinfo->seq_ops.next = tcp_seq_next;
2363 afinfo->seq_ops.stop = tcp_seq_stop;
2364
84841c3c 2365 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
73cb88ec 2366 afinfo->seq_fops, afinfo);
84841c3c 2367 if (!p)
1da177e4
LT
2368 rc = -ENOMEM;
2369 return rc;
2370}
4bc2f18b 2371EXPORT_SYMBOL(tcp_proc_register);
1da177e4 2372
6f8b13bc 2373void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2374{
6f8b13bc 2375 proc_net_remove(net, afinfo->name);
1da177e4 2376}
4bc2f18b 2377EXPORT_SYMBOL(tcp_proc_unregister);
1da177e4 2378
cf533ea5 2379static void get_openreq4(const struct sock *sk, const struct request_sock *req,
5e659e4c 2380 struct seq_file *f, int i, int uid, int *len)
1da177e4 2381{
2e6599cb 2382 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
2383 int ttd = req->expires - jiffies;
2384
5e659e4c 2385 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
71338aa7 2386 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
1da177e4 2387 i,
2e6599cb 2388 ireq->loc_addr,
c720c7e8 2389 ntohs(inet_sk(sk)->inet_sport),
2e6599cb
ACM
2390 ireq->rmt_addr,
2391 ntohs(ireq->rmt_port),
1da177e4
LT
2392 TCP_SYN_RECV,
2393 0, 0, /* could print option size, but that is af dependent. */
2394 1, /* timers active (only the expire timer) */
2395 jiffies_to_clock_t(ttd),
2396 req->retrans,
2397 uid,
2398 0, /* non standard timer */
2399 0, /* open_requests have no inode */
2400 atomic_read(&sk->sk_refcnt),
5e659e4c
PE
2401 req,
2402 len);
1da177e4
LT
2403}
2404
5e659e4c 2405static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
1da177e4
LT
2406{
2407 int timer_active;
2408 unsigned long timer_expires;
cf533ea5 2409 const struct tcp_sock *tp = tcp_sk(sk);
cf4c6bf8 2410 const struct inet_connection_sock *icsk = inet_csk(sk);
cf533ea5 2411 const struct inet_sock *inet = inet_sk(sk);
c720c7e8
ED
2412 __be32 dest = inet->inet_daddr;
2413 __be32 src = inet->inet_rcv_saddr;
2414 __u16 destp = ntohs(inet->inet_dport);
2415 __u16 srcp = ntohs(inet->inet_sport);
49d09007 2416 int rx_queue;
1da177e4 2417
463c84b9 2418 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2419 timer_active = 1;
463c84b9
ACM
2420 timer_expires = icsk->icsk_timeout;
2421 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2422 timer_active = 4;
463c84b9 2423 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2424 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2425 timer_active = 2;
cf4c6bf8 2426 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2427 } else {
2428 timer_active = 0;
2429 timer_expires = jiffies;
2430 }
2431
49d09007
ED
2432 if (sk->sk_state == TCP_LISTEN)
2433 rx_queue = sk->sk_ack_backlog;
2434 else
2435 /*
2436 * because we dont lock socket, we might find a transient negative value
2437 */
2438 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2439
5e659e4c 2440 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
71338aa7 2441 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
cf4c6bf8 2442 i, src, srcp, dest, destp, sk->sk_state,
47da8ee6 2443 tp->write_seq - tp->snd_una,
49d09007 2444 rx_queue,
1da177e4
LT
2445 timer_active,
2446 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2447 icsk->icsk_retransmits,
cf4c6bf8 2448 sock_i_uid(sk),
6687e988 2449 icsk->icsk_probes_out,
cf4c6bf8
IJ
2450 sock_i_ino(sk),
2451 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2452 jiffies_to_clock_t(icsk->icsk_rto),
2453 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2454 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2455 tp->snd_cwnd,
0b6a05c1 2456 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
5e659e4c 2457 len);
1da177e4
LT
2458}
2459
cf533ea5 2460static void get_timewait4_sock(const struct inet_timewait_sock *tw,
5e659e4c 2461 struct seq_file *f, int i, int *len)
1da177e4 2462{
23f33c2d 2463 __be32 dest, src;
1da177e4
LT
2464 __u16 destp, srcp;
2465 int ttd = tw->tw_ttd - jiffies;
2466
2467 if (ttd < 0)
2468 ttd = 0;
2469
2470 dest = tw->tw_daddr;
2471 src = tw->tw_rcv_saddr;
2472 destp = ntohs(tw->tw_dport);
2473 srcp = ntohs(tw->tw_sport);
2474
5e659e4c 2475 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
71338aa7 2476 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
1da177e4
LT
2477 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2478 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
5e659e4c 2479 atomic_read(&tw->tw_refcnt), tw, len);
1da177e4
LT
2480}
2481
2482#define TMPSZ 150
2483
2484static int tcp4_seq_show(struct seq_file *seq, void *v)
2485{
5799de0b 2486 struct tcp_iter_state *st;
5e659e4c 2487 int len;
1da177e4
LT
2488
2489 if (v == SEQ_START_TOKEN) {
2490 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2491 " sl local_address rem_address st tx_queue "
2492 "rx_queue tr tm->when retrnsmt uid timeout "
2493 "inode");
2494 goto out;
2495 }
2496 st = seq->private;
2497
2498 switch (st->state) {
2499 case TCP_SEQ_STATE_LISTENING:
2500 case TCP_SEQ_STATE_ESTABLISHED:
5e659e4c 2501 get_tcp4_sock(v, seq, st->num, &len);
1da177e4
LT
2502 break;
2503 case TCP_SEQ_STATE_OPENREQ:
5e659e4c 2504 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
1da177e4
LT
2505 break;
2506 case TCP_SEQ_STATE_TIME_WAIT:
5e659e4c 2507 get_timewait4_sock(v, seq, st->num, &len);
1da177e4
LT
2508 break;
2509 }
5e659e4c 2510 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
1da177e4
LT
2511out:
2512 return 0;
2513}
2514
73cb88ec
AV
2515static const struct file_operations tcp_afinfo_seq_fops = {
2516 .owner = THIS_MODULE,
2517 .open = tcp_seq_open,
2518 .read = seq_read,
2519 .llseek = seq_lseek,
2520 .release = seq_release_net
2521};
2522
1da177e4 2523static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2524 .name = "tcp",
2525 .family = AF_INET,
73cb88ec 2526 .seq_fops = &tcp_afinfo_seq_fops,
9427c4b3
DL
2527 .seq_ops = {
2528 .show = tcp4_seq_show,
2529 },
1da177e4
LT
2530};
2531
2c8c1e72 2532static int __net_init tcp4_proc_init_net(struct net *net)
757764f6
PE
2533{
2534 return tcp_proc_register(net, &tcp4_seq_afinfo);
2535}
2536
2c8c1e72 2537static void __net_exit tcp4_proc_exit_net(struct net *net)
757764f6
PE
2538{
2539 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2540}
2541
2542static struct pernet_operations tcp4_net_ops = {
2543 .init = tcp4_proc_init_net,
2544 .exit = tcp4_proc_exit_net,
2545};
2546
1da177e4
LT
2547int __init tcp4_proc_init(void)
2548{
757764f6 2549 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2550}
2551
2552void tcp4_proc_exit(void)
2553{
757764f6 2554 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2555}
2556#endif /* CONFIG_PROC_FS */
2557
bf296b12
HX
2558struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2559{
b71d1d42 2560 const struct iphdr *iph = skb_gro_network_header(skb);
bf296b12
HX
2561
2562 switch (skb->ip_summed) {
2563 case CHECKSUM_COMPLETE:
86911732 2564 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
bf296b12
HX
2565 skb->csum)) {
2566 skb->ip_summed = CHECKSUM_UNNECESSARY;
2567 break;
2568 }
2569
2570 /* fall through */
2571 case CHECKSUM_NONE:
2572 NAPI_GRO_CB(skb)->flush = 1;
2573 return NULL;
2574 }
2575
2576 return tcp_gro_receive(head, skb);
2577}
bf296b12
HX
2578
2579int tcp4_gro_complete(struct sk_buff *skb)
2580{
b71d1d42 2581 const struct iphdr *iph = ip_hdr(skb);
bf296b12
HX
2582 struct tcphdr *th = tcp_hdr(skb);
2583
2584 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2585 iph->saddr, iph->daddr, 0);
2586 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2587
2588 return tcp_gro_complete(skb);
2589}
bf296b12 2590
1da177e4
LT
2591struct proto tcp_prot = {
2592 .name = "TCP",
2593 .owner = THIS_MODULE,
2594 .close = tcp_close,
2595 .connect = tcp_v4_connect,
2596 .disconnect = tcp_disconnect,
463c84b9 2597 .accept = inet_csk_accept,
1da177e4
LT
2598 .ioctl = tcp_ioctl,
2599 .init = tcp_v4_init_sock,
2600 .destroy = tcp_v4_destroy_sock,
2601 .shutdown = tcp_shutdown,
2602 .setsockopt = tcp_setsockopt,
2603 .getsockopt = tcp_getsockopt,
1da177e4 2604 .recvmsg = tcp_recvmsg,
7ba42910
CG
2605 .sendmsg = tcp_sendmsg,
2606 .sendpage = tcp_sendpage,
1da177e4 2607 .backlog_rcv = tcp_v4_do_rcv,
46d3ceab 2608 .release_cb = tcp_release_cb,
563d34d0 2609 .mtu_reduced = tcp_v4_mtu_reduced,
ab1e0a13
ACM
2610 .hash = inet_hash,
2611 .unhash = inet_unhash,
2612 .get_port = inet_csk_get_port,
1da177e4
LT
2613 .enter_memory_pressure = tcp_enter_memory_pressure,
2614 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2615 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2616 .memory_allocated = &tcp_memory_allocated,
2617 .memory_pressure = &tcp_memory_pressure,
1da177e4
LT
2618 .sysctl_wmem = sysctl_tcp_wmem,
2619 .sysctl_rmem = sysctl_tcp_rmem,
2620 .max_header = MAX_TCP_HEADER,
2621 .obj_size = sizeof(struct tcp_sock),
3ab5aee7 2622 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2623 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2624 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2625 .h.hashinfo = &tcp_hashinfo,
7ba42910 2626 .no_autobind = true,
543d9cfe
ACM
2627#ifdef CONFIG_COMPAT
2628 .compat_setsockopt = compat_tcp_setsockopt,
2629 .compat_getsockopt = compat_tcp_getsockopt,
2630#endif
d1a4c0b3
GC
2631#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2632 .init_cgroup = tcp_init_cgroup,
2633 .destroy_cgroup = tcp_destroy_cgroup,
2634 .proto_cgroup = tcp_proto_cgroup,
2635#endif
1da177e4 2636};
4bc2f18b 2637EXPORT_SYMBOL(tcp_prot);
1da177e4 2638
046ee902
DL
2639static int __net_init tcp_sk_init(struct net *net)
2640{
be9f4a44 2641 return 0;
046ee902
DL
2642}
2643
2644static void __net_exit tcp_sk_exit(struct net *net)
2645{
b099ce26
EB
2646}
2647
2648static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2649{
2650 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
046ee902
DL
2651}
2652
2653static struct pernet_operations __net_initdata tcp_sk_ops = {
b099ce26
EB
2654 .init = tcp_sk_init,
2655 .exit = tcp_sk_exit,
2656 .exit_batch = tcp_sk_exit_batch,
046ee902
DL
2657};
2658
9b0f976f 2659void __init tcp_v4_init(void)
1da177e4 2660{
5caea4ea 2661 inet_hashinfo_init(&tcp_hashinfo);
6a1b3054 2662 if (register_pernet_subsys(&tcp_sk_ops))
1da177e4 2663 panic("Failed to create the TCP control socket.\n");
1da177e4 2664}