Merge branch 'fixes-for-3.6' of git://gitorious.org/linux-can/linux-can
[linux-2.6-block.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
5a0e3ad6 41#include <linux/slab.h>
1da177e4
LT
42
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
1da177e4
LT
59#include <net/snmp.h>
60#include <net/dsfield.h>
6d6ee43e 61#include <net/timewait_sock.h>
18134bed 62#include <net/netdma.h>
3d58b5fa 63#include <net/inet_common.h>
6e5714ea 64#include <net/secure_seq.h>
d1a4c0b3 65#include <net/tcp_memcontrol.h>
1da177e4
LT
66
67#include <asm/uaccess.h>
68
69#include <linux/proc_fs.h>
70#include <linux/seq_file.h>
71
cfb6eeb4
YH
72#include <linux/crypto.h>
73#include <linux/scatterlist.h>
74
cfb6eeb4 75static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
6edafaaf
GJ
76static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77 struct request_sock *req);
1da177e4
LT
78
79static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
8ad50d96 80static void __tcp_v6_send_check(struct sk_buff *skb,
b71d1d42
ED
81 const struct in6_addr *saddr,
82 const struct in6_addr *daddr);
1da177e4 83
3b401a81
SH
84static const struct inet_connection_sock_af_ops ipv6_mapped;
85static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 86#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
87static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
88static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972
YH
89#else
90static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
b71d1d42 91 const struct in6_addr *addr)
9501f972
YH
92{
93 return NULL;
94}
a928630a 95#endif
1da177e4 96
fae6ef87
NC
97static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
98{
99 struct dst_entry *dst = skb_dst(skb);
100 const struct rt6_info *rt = (const struct rt6_info *)dst;
101
102 dst_hold(dst);
103 sk->sk_rx_dst = dst;
104 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
105 if (rt->rt6i_node)
106 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
107}
108
1da177e4
LT
109static void tcp_v6_hash(struct sock *sk)
110{
111 if (sk->sk_state != TCP_CLOSE) {
8292a17a 112 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
1da177e4
LT
113 tcp_prot.hash(sk);
114 return;
115 }
116 local_bh_disable();
9327f705 117 __inet6_hash(sk, NULL);
1da177e4
LT
118 local_bh_enable();
119 }
120}
121
684f2176 122static __inline__ __sum16 tcp_v6_check(int len,
b71d1d42
ED
123 const struct in6_addr *saddr,
124 const struct in6_addr *daddr,
868c86bc 125 __wsum base)
1da177e4
LT
126{
127 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
128}
129
cf533ea5 130static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
1da177e4 131{
0660e03f
ACM
132 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
133 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7
ACM
134 tcp_hdr(skb)->dest,
135 tcp_hdr(skb)->source);
1da177e4
LT
136}
137
1ab1457c 138static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
139 int addr_len)
140{
141 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 142 struct inet_sock *inet = inet_sk(sk);
d83d8461 143 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
144 struct ipv6_pinfo *np = inet6_sk(sk);
145 struct tcp_sock *tp = tcp_sk(sk);
20c59de2 146 struct in6_addr *saddr = NULL, *final_p, final;
493f377d 147 struct rt6_info *rt;
4c9483b2 148 struct flowi6 fl6;
1da177e4
LT
149 struct dst_entry *dst;
150 int addr_type;
151 int err;
152
1ab1457c 153 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
154 return -EINVAL;
155
1ab1457c 156 if (usin->sin6_family != AF_INET6)
a02cec21 157 return -EAFNOSUPPORT;
1da177e4 158
4c9483b2 159 memset(&fl6, 0, sizeof(fl6));
1da177e4
LT
160
161 if (np->sndflow) {
4c9483b2
DM
162 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
163 IP6_ECN_flow_init(fl6.flowlabel);
164 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1da177e4 165 struct ip6_flowlabel *flowlabel;
4c9483b2 166 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1da177e4
LT
167 if (flowlabel == NULL)
168 return -EINVAL;
4e3fd7a0 169 usin->sin6_addr = flowlabel->dst;
1da177e4
LT
170 fl6_sock_release(flowlabel);
171 }
172 }
173
174 /*
1ab1457c
YH
175 * connect() to INADDR_ANY means loopback (BSD'ism).
176 */
177
178 if(ipv6_addr_any(&usin->sin6_addr))
179 usin->sin6_addr.s6_addr[15] = 0x1;
1da177e4
LT
180
181 addr_type = ipv6_addr_type(&usin->sin6_addr);
182
183 if(addr_type & IPV6_ADDR_MULTICAST)
184 return -ENETUNREACH;
185
186 if (addr_type&IPV6_ADDR_LINKLOCAL) {
187 if (addr_len >= sizeof(struct sockaddr_in6) &&
188 usin->sin6_scope_id) {
189 /* If interface is set while binding, indices
190 * must coincide.
191 */
192 if (sk->sk_bound_dev_if &&
193 sk->sk_bound_dev_if != usin->sin6_scope_id)
194 return -EINVAL;
195
196 sk->sk_bound_dev_if = usin->sin6_scope_id;
197 }
198
199 /* Connect to link-local address requires an interface */
200 if (!sk->sk_bound_dev_if)
201 return -EINVAL;
202 }
203
204 if (tp->rx_opt.ts_recent_stamp &&
205 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
206 tp->rx_opt.ts_recent = 0;
207 tp->rx_opt.ts_recent_stamp = 0;
208 tp->write_seq = 0;
209 }
210
4e3fd7a0 211 np->daddr = usin->sin6_addr;
4c9483b2 212 np->flow_label = fl6.flowlabel;
1da177e4
LT
213
214 /*
215 * TCP over IPv4
216 */
217
218 if (addr_type == IPV6_ADDR_MAPPED) {
d83d8461 219 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
220 struct sockaddr_in sin;
221
222 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
223
224 if (__ipv6_only_sock(sk))
225 return -ENETUNREACH;
226
227 sin.sin_family = AF_INET;
228 sin.sin_port = usin->sin6_port;
229 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
230
d83d8461 231 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 232 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
233#ifdef CONFIG_TCP_MD5SIG
234 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
235#endif
1da177e4
LT
236
237 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
238
239 if (err) {
d83d8461
ACM
240 icsk->icsk_ext_hdr_len = exthdrlen;
241 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 242 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
243#ifdef CONFIG_TCP_MD5SIG
244 tp->af_specific = &tcp_sock_ipv6_specific;
245#endif
1da177e4
LT
246 goto failure;
247 } else {
c720c7e8
ED
248 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
249 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
250 &np->rcv_saddr);
1da177e4
LT
251 }
252
253 return err;
254 }
255
256 if (!ipv6_addr_any(&np->rcv_saddr))
257 saddr = &np->rcv_saddr;
258
4c9483b2 259 fl6.flowi6_proto = IPPROTO_TCP;
4e3fd7a0
AD
260 fl6.daddr = np->daddr;
261 fl6.saddr = saddr ? *saddr : np->saddr;
4c9483b2
DM
262 fl6.flowi6_oif = sk->sk_bound_dev_if;
263 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
264 fl6.fl6_dport = usin->sin6_port;
265 fl6.fl6_sport = inet->inet_sport;
1da177e4 266
4c9483b2 267 final_p = fl6_update_dst(&fl6, np->opt, &final);
1da177e4 268
4c9483b2 269 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
beb8d13b 270
4c9483b2 271 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
68d0c6d3
DM
272 if (IS_ERR(dst)) {
273 err = PTR_ERR(dst);
1da177e4 274 goto failure;
14e50e57 275 }
1da177e4
LT
276
277 if (saddr == NULL) {
4c9483b2 278 saddr = &fl6.saddr;
4e3fd7a0 279 np->rcv_saddr = *saddr;
1da177e4
LT
280 }
281
282 /* set the source address */
4e3fd7a0 283 np->saddr = *saddr;
c720c7e8 284 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 285
f83ef8c0 286 sk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 287 __ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 288
493f377d
DM
289 rt = (struct rt6_info *) dst;
290 if (tcp_death_row.sysctl_tw_recycle &&
291 !tp->rx_opt.ts_recent_stamp &&
81166dd6
DM
292 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr))
293 tcp_fetch_timewait_stamp(sk, dst);
493f377d 294
d83d8461 295 icsk->icsk_ext_hdr_len = 0;
1da177e4 296 if (np->opt)
d83d8461
ACM
297 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
298 np->opt->opt_nflen);
1da177e4
LT
299
300 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
301
c720c7e8 302 inet->inet_dport = usin->sin6_port;
1da177e4
LT
303
304 tcp_set_state(sk, TCP_SYN_SENT);
d8313f5c 305 err = inet6_hash_connect(&tcp_death_row, sk);
1da177e4
LT
306 if (err)
307 goto late_failure;
308
309 if (!tp->write_seq)
310 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
311 np->daddr.s6_addr32,
c720c7e8
ED
312 inet->inet_sport,
313 inet->inet_dport);
1da177e4
LT
314
315 err = tcp_connect(sk);
316 if (err)
317 goto late_failure;
318
319 return 0;
320
321late_failure:
322 tcp_set_state(sk, TCP_CLOSE);
323 __sk_dst_reset(sk);
324failure:
c720c7e8 325 inet->inet_dport = 0;
1da177e4
LT
326 sk->sk_route_caps = 0;
327 return err;
328}
329
563d34d0
ED
330static void tcp_v6_mtu_reduced(struct sock *sk)
331{
332 struct dst_entry *dst;
333
334 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
335 return;
336
337 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
338 if (!dst)
339 return;
340
341 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
342 tcp_sync_mss(sk, dst_mtu(dst));
343 tcp_simple_retransmit(sk);
344 }
345}
346
1da177e4 347static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 348 u8 type, u8 code, int offset, __be32 info)
1da177e4 349{
b71d1d42 350 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
505cbfc5 351 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
1da177e4
LT
352 struct ipv6_pinfo *np;
353 struct sock *sk;
354 int err;
1ab1457c 355 struct tcp_sock *tp;
1da177e4 356 __u32 seq;
ca12a1a4 357 struct net *net = dev_net(skb->dev);
1da177e4 358
ca12a1a4 359 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
d86e0dac 360 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
1da177e4
LT
361
362 if (sk == NULL) {
e41b5368
DL
363 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
364 ICMP6_MIB_INERRORS);
1da177e4
LT
365 return;
366 }
367
368 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 369 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
370 return;
371 }
372
373 bh_lock_sock(sk);
563d34d0 374 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
de0744af 375 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
376
377 if (sk->sk_state == TCP_CLOSE)
378 goto out;
379
e802af9c
SH
380 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
381 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
382 goto out;
383 }
384
1da177e4 385 tp = tcp_sk(sk);
1ab1457c 386 seq = ntohl(th->seq);
1da177e4
LT
387 if (sk->sk_state != TCP_LISTEN &&
388 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 389 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
390 goto out;
391 }
392
393 np = inet6_sk(sk);
394
ec18d9a2
DM
395 if (type == NDISC_REDIRECT) {
396 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
397
1ed5c48f 398 if (dst)
6700c270 399 dst->ops->redirect(dst, sk, skb);
ec18d9a2
DM
400 }
401
1da177e4 402 if (type == ICMPV6_PKT_TOOBIG) {
563d34d0
ED
403 tp->mtu_info = ntohl(info);
404 if (!sock_owned_by_user(sk))
405 tcp_v6_mtu_reduced(sk);
406 else
407 set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
1da177e4
LT
408 goto out;
409 }
410
411 icmpv6_err_convert(type, code, &err);
412
60236fdd 413 /* Might be for an request_sock */
1da177e4 414 switch (sk->sk_state) {
60236fdd 415 struct request_sock *req, **prev;
1da177e4
LT
416 case TCP_LISTEN:
417 if (sock_owned_by_user(sk))
418 goto out;
419
8129765a
ACM
420 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
421 &hdr->saddr, inet6_iif(skb));
1da177e4
LT
422 if (!req)
423 goto out;
424
425 /* ICMPs are not backlogged, hence we cannot get
426 * an established socket here.
427 */
547b792c 428 WARN_ON(req->sk != NULL);
1da177e4 429
2e6599cb 430 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 431 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
432 goto out;
433 }
434
463c84b9 435 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
436 goto out;
437
438 case TCP_SYN_SENT:
439 case TCP_SYN_RECV: /* Cannot happen.
1ab1457c 440 It can, it SYNs are crossed. --ANK */
1da177e4 441 if (!sock_owned_by_user(sk)) {
1da177e4
LT
442 sk->sk_err = err;
443 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
444
445 tcp_done(sk);
446 } else
447 sk->sk_err_soft = err;
448 goto out;
449 }
450
451 if (!sock_owned_by_user(sk) && np->recverr) {
452 sk->sk_err = err;
453 sk->sk_error_report(sk);
454 } else
455 sk->sk_err_soft = err;
456
457out:
458 bh_unlock_sock(sk);
459 sock_put(sk);
460}
461
462
9f10d3f6
NC
463static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
464 struct flowi6 *fl6,
3840a06e 465 struct request_sock *req,
fff32699
ED
466 struct request_values *rvp,
467 u16 queue_mapping)
1da177e4 468{
ca304b61 469 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
470 struct ipv6_pinfo *np = inet6_sk(sk);
471 struct sk_buff * skb;
9494218f 472 int err = -ENOMEM;
1da177e4 473
9f10d3f6
NC
474 /* First, grab a route. */
475 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
fd80eb94 476 goto done;
9494218f 477
e6b4d113 478 skb = tcp_make_synack(sk, dst, req, rvp);
9494218f 479
1da177e4 480 if (skb) {
8ad50d96 481 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
1da177e4 482
9f10d3f6 483 fl6->daddr = treq->rmt_addr;
fff32699 484 skb_set_queue_mapping(skb, queue_mapping);
43264e0b 485 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
b9df3cb8 486 err = net_xmit_eval(err);
1da177e4
LT
487 }
488
489done:
1da177e4
LT
490 return err;
491}
492
72659ecc
OP
493static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
494 struct request_values *rvp)
495{
9f10d3f6
NC
496 struct flowi6 fl6;
497
72659ecc 498 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
9f10d3f6 499 return tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0);
72659ecc
OP
500}
501
60236fdd 502static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 503{
800d55f1 504 kfree_skb(inet6_rsk(req)->pktopts);
1da177e4
LT
505}
506
cfb6eeb4
YH
507#ifdef CONFIG_TCP_MD5SIG
508static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
b71d1d42 509 const struct in6_addr *addr)
cfb6eeb4 510{
a915da9b 511 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
cfb6eeb4
YH
512}
513
514static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
515 struct sock *addr_sk)
516{
517 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
518}
519
520static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
521 struct request_sock *req)
522{
523 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
524}
525
cfb6eeb4
YH
526static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
527 int optlen)
528{
529 struct tcp_md5sig cmd;
530 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
cfb6eeb4
YH
531
532 if (optlen < sizeof(cmd))
533 return -EINVAL;
534
535 if (copy_from_user(&cmd, optval, sizeof(cmd)))
536 return -EFAULT;
537
538 if (sin6->sin6_family != AF_INET6)
539 return -EINVAL;
540
541 if (!cmd.tcpm_keylen) {
e773e4fa 542 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
a915da9b
ED
543 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
544 AF_INET);
545 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
546 AF_INET6);
cfb6eeb4
YH
547 }
548
549 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
550 return -EINVAL;
551
a915da9b
ED
552 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
553 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
554 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4 555
a915da9b
ED
556 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
557 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
558}
559
49a72dfb 560static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
b71d1d42
ED
561 const struct in6_addr *daddr,
562 const struct in6_addr *saddr, int nbytes)
cfb6eeb4 563{
cfb6eeb4 564 struct tcp6_pseudohdr *bp;
49a72dfb 565 struct scatterlist sg;
8d26d76d 566
cfb6eeb4 567 bp = &hp->md5_blk.ip6;
cfb6eeb4 568 /* 1. TCP pseudo-header (RFC2460) */
4e3fd7a0
AD
569 bp->saddr = *saddr;
570 bp->daddr = *daddr;
49a72dfb 571 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 572 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 573
49a72dfb
AL
574 sg_init_one(&sg, bp, sizeof(*bp));
575 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
576}
c7da57a1 577
49a72dfb 578static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
b71d1d42 579 const struct in6_addr *daddr, struct in6_addr *saddr,
318cf7aa 580 const struct tcphdr *th)
49a72dfb
AL
581{
582 struct tcp_md5sig_pool *hp;
583 struct hash_desc *desc;
584
585 hp = tcp_get_md5sig_pool();
586 if (!hp)
587 goto clear_hash_noput;
588 desc = &hp->md5_desc;
589
590 if (crypto_hash_init(desc))
591 goto clear_hash;
592 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
593 goto clear_hash;
594 if (tcp_md5_hash_header(hp, th))
595 goto clear_hash;
596 if (tcp_md5_hash_key(hp, key))
597 goto clear_hash;
598 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4 599 goto clear_hash;
cfb6eeb4 600
cfb6eeb4 601 tcp_put_md5sig_pool();
cfb6eeb4 602 return 0;
49a72dfb 603
cfb6eeb4
YH
604clear_hash:
605 tcp_put_md5sig_pool();
606clear_hash_noput:
607 memset(md5_hash, 0, 16);
49a72dfb 608 return 1;
cfb6eeb4
YH
609}
610
49a72dfb 611static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
318cf7aa
ED
612 const struct sock *sk,
613 const struct request_sock *req,
614 const struct sk_buff *skb)
cfb6eeb4 615{
b71d1d42 616 const struct in6_addr *saddr, *daddr;
49a72dfb
AL
617 struct tcp_md5sig_pool *hp;
618 struct hash_desc *desc;
318cf7aa 619 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
620
621 if (sk) {
622 saddr = &inet6_sk(sk)->saddr;
623 daddr = &inet6_sk(sk)->daddr;
49a72dfb 624 } else if (req) {
cfb6eeb4
YH
625 saddr = &inet6_rsk(req)->loc_addr;
626 daddr = &inet6_rsk(req)->rmt_addr;
49a72dfb 627 } else {
b71d1d42 628 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
49a72dfb
AL
629 saddr = &ip6h->saddr;
630 daddr = &ip6h->daddr;
cfb6eeb4 631 }
49a72dfb
AL
632
633 hp = tcp_get_md5sig_pool();
634 if (!hp)
635 goto clear_hash_noput;
636 desc = &hp->md5_desc;
637
638 if (crypto_hash_init(desc))
639 goto clear_hash;
640
641 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
642 goto clear_hash;
643 if (tcp_md5_hash_header(hp, th))
644 goto clear_hash;
645 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
646 goto clear_hash;
647 if (tcp_md5_hash_key(hp, key))
648 goto clear_hash;
649 if (crypto_hash_final(desc, md5_hash))
650 goto clear_hash;
651
652 tcp_put_md5sig_pool();
653 return 0;
654
655clear_hash:
656 tcp_put_md5sig_pool();
657clear_hash_noput:
658 memset(md5_hash, 0, 16);
659 return 1;
cfb6eeb4
YH
660}
661
318cf7aa 662static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
cfb6eeb4 663{
cf533ea5 664 const __u8 *hash_location = NULL;
cfb6eeb4 665 struct tcp_md5sig_key *hash_expected;
b71d1d42 666 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
318cf7aa 667 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 668 int genhash;
cfb6eeb4
YH
669 u8 newhash[16];
670
671 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 672 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 673
785957d3
DM
674 /* We've parsed the options - do we have a hash? */
675 if (!hash_expected && !hash_location)
676 return 0;
677
678 if (hash_expected && !hash_location) {
679 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
680 return 1;
681 }
682
785957d3
DM
683 if (!hash_expected && hash_location) {
684 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
685 return 1;
686 }
687
688 /* check the signature */
49a72dfb
AL
689 genhash = tcp_v6_md5_hash_skb(newhash,
690 hash_expected,
691 NULL, NULL, skb);
692
cfb6eeb4 693 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
e87cc472
JP
694 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
695 genhash ? "failed" : "mismatch",
696 &ip6h->saddr, ntohs(th->source),
697 &ip6h->daddr, ntohs(th->dest));
cfb6eeb4
YH
698 return 1;
699 }
700 return 0;
701}
702#endif
703
c6aefafb 704struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 705 .family = AF_INET6,
2e6599cb 706 .obj_size = sizeof(struct tcp6_request_sock),
72659ecc 707 .rtx_syn_ack = tcp_v6_rtx_synack,
60236fdd
ACM
708 .send_ack = tcp_v6_reqsk_send_ack,
709 .destructor = tcp_v6_reqsk_destructor,
72659ecc
OP
710 .send_reset = tcp_v6_send_reset,
711 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
712};
713
cfb6eeb4 714#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 715static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
cfb6eeb4 716 .md5_lookup = tcp_v6_reqsk_md5_lookup,
e3afe7b7 717 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 718};
b6332e6c 719#endif
cfb6eeb4 720
8ad50d96 721static void __tcp_v6_send_check(struct sk_buff *skb,
b71d1d42 722 const struct in6_addr *saddr, const struct in6_addr *daddr)
1da177e4 723{
aa8223c7 724 struct tcphdr *th = tcp_hdr(skb);
1da177e4 725
84fa7933 726 if (skb->ip_summed == CHECKSUM_PARTIAL) {
8ad50d96 727 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
663ead3b 728 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 729 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 730 } else {
8ad50d96
HX
731 th->check = tcp_v6_check(skb->len, saddr, daddr,
732 csum_partial(th, th->doff << 2,
733 skb->csum));
1da177e4
LT
734 }
735}
736
bb296246 737static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
8ad50d96
HX
738{
739 struct ipv6_pinfo *np = inet6_sk(sk);
740
741 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
742}
743
a430a43d
HX
744static int tcp_v6_gso_send_check(struct sk_buff *skb)
745{
b71d1d42 746 const struct ipv6hdr *ipv6h;
a430a43d
HX
747 struct tcphdr *th;
748
749 if (!pskb_may_pull(skb, sizeof(*th)))
750 return -EINVAL;
751
0660e03f 752 ipv6h = ipv6_hdr(skb);
aa8223c7 753 th = tcp_hdr(skb);
a430a43d
HX
754
755 th->check = 0;
84fa7933 756 skb->ip_summed = CHECKSUM_PARTIAL;
8ad50d96 757 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
a430a43d
HX
758 return 0;
759}
1da177e4 760
36990673
HX
761static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
762 struct sk_buff *skb)
684f2176 763{
b71d1d42 764 const struct ipv6hdr *iph = skb_gro_network_header(skb);
684f2176
HX
765
766 switch (skb->ip_summed) {
767 case CHECKSUM_COMPLETE:
86911732 768 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
684f2176
HX
769 skb->csum)) {
770 skb->ip_summed = CHECKSUM_UNNECESSARY;
771 break;
772 }
773
774 /* fall through */
775 case CHECKSUM_NONE:
776 NAPI_GRO_CB(skb)->flush = 1;
777 return NULL;
778 }
779
780 return tcp_gro_receive(head, skb);
781}
684f2176 782
36990673 783static int tcp6_gro_complete(struct sk_buff *skb)
684f2176 784{
b71d1d42 785 const struct ipv6hdr *iph = ipv6_hdr(skb);
684f2176
HX
786 struct tcphdr *th = tcp_hdr(skb);
787
788 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
789 &iph->saddr, &iph->daddr, 0);
790 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
791
792 return tcp_gro_complete(skb);
793}
684f2176 794
626e264d 795static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
b903d324 796 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
1da177e4 797{
cf533ea5
ED
798 const struct tcphdr *th = tcp_hdr(skb);
799 struct tcphdr *t1;
1da177e4 800 struct sk_buff *buff;
4c9483b2 801 struct flowi6 fl6;
adf30907 802 struct net *net = dev_net(skb_dst(skb)->dev);
e5047992 803 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 804 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 805 struct dst_entry *dst;
81ada62d 806 __be32 *topt;
1da177e4 807
626e264d
IJ
808 if (ts)
809 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 810#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
811 if (key)
812 tot_len += TCPOLEN_MD5SIG_ALIGNED;
813#endif
814
cfb6eeb4 815 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 816 GFP_ATOMIC);
1ab1457c
YH
817 if (buff == NULL)
818 return;
1da177e4 819
cfb6eeb4 820 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 821
cfb6eeb4 822 t1 = (struct tcphdr *) skb_push(buff, tot_len);
6651ffc8 823 skb_reset_transport_header(buff);
1da177e4
LT
824
825 /* Swap the send and the receive. */
826 memset(t1, 0, sizeof(*t1));
827 t1->dest = th->source;
828 t1->source = th->dest;
cfb6eeb4 829 t1->doff = tot_len / 4;
626e264d
IJ
830 t1->seq = htonl(seq);
831 t1->ack_seq = htonl(ack);
832 t1->ack = !rst || !th->ack;
833 t1->rst = rst;
834 t1->window = htons(win);
1da177e4 835
81ada62d
IJ
836 topt = (__be32 *)(t1 + 1);
837
626e264d
IJ
838 if (ts) {
839 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
840 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
841 *topt++ = htonl(tcp_time_stamp);
842 *topt++ = htonl(ts);
843 }
844
cfb6eeb4
YH
845#ifdef CONFIG_TCP_MD5SIG
846 if (key) {
81ada62d
IJ
847 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
848 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
849 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
850 &ipv6_hdr(skb)->saddr,
851 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
852 }
853#endif
854
4c9483b2 855 memset(&fl6, 0, sizeof(fl6));
4e3fd7a0
AD
856 fl6.daddr = ipv6_hdr(skb)->saddr;
857 fl6.saddr = ipv6_hdr(skb)->daddr;
1da177e4 858
e5700aff
DM
859 buff->ip_summed = CHECKSUM_PARTIAL;
860 buff->csum = 0;
861
4c9483b2 862 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1da177e4 863
4c9483b2
DM
864 fl6.flowi6_proto = IPPROTO_TCP;
865 fl6.flowi6_oif = inet6_iif(skb);
1958b856
DM
866 fl6.fl6_dport = t1->dest;
867 fl6.fl6_sport = t1->source;
4c9483b2 868 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1da177e4 869
c20121ae
DL
870 /* Pass a socket to ip6_dst_lookup either it is for RST
871 * Underlying function will use this to retrieve the network
872 * namespace
873 */
4c9483b2 874 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
68d0c6d3
DM
875 if (!IS_ERR(dst)) {
876 skb_dst_set(buff, dst);
b903d324 877 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
68d0c6d3
DM
878 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
879 if (rst)
880 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
881 return;
1da177e4
LT
882 }
883
884 kfree_skb(buff);
885}
886
626e264d 887static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 888{
cf533ea5 889 const struct tcphdr *th = tcp_hdr(skb);
626e264d 890 u32 seq = 0, ack_seq = 0;
fa3e5b4e 891 struct tcp_md5sig_key *key = NULL;
658ddaaf
SL
892#ifdef CONFIG_TCP_MD5SIG
893 const __u8 *hash_location = NULL;
894 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
895 unsigned char newhash[16];
896 int genhash;
897 struct sock *sk1 = NULL;
898#endif
1da177e4 899
626e264d 900 if (th->rst)
1da177e4
LT
901 return;
902
626e264d
IJ
903 if (!ipv6_unicast_destination(skb))
904 return;
1da177e4 905
cfb6eeb4 906#ifdef CONFIG_TCP_MD5SIG
658ddaaf
SL
907 hash_location = tcp_parse_md5sig_option(th);
908 if (!sk && hash_location) {
909 /*
910 * active side is lost. Try to find listening socket through
911 * source port, and then find md5 key through listening socket.
912 * we are not loose security here:
913 * Incoming packet is checked with md5 hash with finding key,
914 * no RST generated if md5 hash doesn't match.
915 */
916 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
917 &tcp_hashinfo, &ipv6h->daddr,
918 ntohs(th->source), inet6_iif(skb));
919 if (!sk1)
920 return;
921
922 rcu_read_lock();
923 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
924 if (!key)
925 goto release_sk1;
926
927 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
928 if (genhash || memcmp(hash_location, newhash, 16) != 0)
929 goto release_sk1;
930 } else {
931 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
932 }
cfb6eeb4
YH
933#endif
934
626e264d
IJ
935 if (th->ack)
936 seq = ntohl(th->ack_seq);
937 else
938 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
939 (th->doff << 2);
1da177e4 940
b903d324 941 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
658ddaaf
SL
942
943#ifdef CONFIG_TCP_MD5SIG
944release_sk1:
945 if (sk1) {
946 rcu_read_unlock();
947 sock_put(sk1);
948 }
949#endif
626e264d 950}
1da177e4 951
626e264d 952static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
b903d324 953 struct tcp_md5sig_key *key, u8 tclass)
626e264d 954{
b903d324 955 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
1da177e4
LT
956}
957
958static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
959{
8feaf0c0 960 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 961 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 962
9501f972 963 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 964 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
b903d324
ED
965 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
966 tw->tw_tclass);
1da177e4 967
8feaf0c0 968 inet_twsk_put(tw);
1da177e4
LT
969}
970
6edafaaf
GJ
971static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
972 struct request_sock *req)
1da177e4 973{
9501f972 974 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
b903d324 975 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
1da177e4
LT
976}
977
978
979static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
980{
60236fdd 981 struct request_sock *req, **prev;
aa8223c7 982 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
983 struct sock *nsk;
984
985 /* Find possible connection requests. */
8129765a 986 req = inet6_csk_search_req(sk, &prev, th->source,
0660e03f
ACM
987 &ipv6_hdr(skb)->saddr,
988 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1da177e4
LT
989 if (req)
990 return tcp_check_req(sk, skb, req, prev);
991
3b1e0a65 992 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
d86e0dac
PE
993 &ipv6_hdr(skb)->saddr, th->source,
994 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1da177e4
LT
995
996 if (nsk) {
997 if (nsk->sk_state != TCP_TIME_WAIT) {
998 bh_lock_sock(nsk);
999 return nsk;
1000 }
9469c7b4 1001 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1002 return NULL;
1003 }
1004
c6aefafb 1005#ifdef CONFIG_SYN_COOKIES
af9b4738 1006 if (!th->syn)
c6aefafb 1007 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1008#endif
1009 return sk;
1010}
1011
1da177e4
LT
1012/* FIXME: this is substantially similar to the ipv4 code.
1013 * Can some kind of merge be done? -- erics
1014 */
1015static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1016{
4957faad 1017 struct tcp_extend_values tmp_ext;
e6b4d113 1018 struct tcp_options_received tmp_opt;
cf533ea5 1019 const u8 *hash_location;
e6b4d113 1020 struct request_sock *req;
ca304b61 1021 struct inet6_request_sock *treq;
1da177e4 1022 struct ipv6_pinfo *np = inet6_sk(sk);
1da177e4 1023 struct tcp_sock *tp = tcp_sk(sk);
e6b4d113 1024 __u32 isn = TCP_SKB_CB(skb)->when;
493f377d 1025 struct dst_entry *dst = NULL;
3840a06e 1026 struct flowi6 fl6;
a2a385d6 1027 bool want_cookie = false;
1da177e4
LT
1028
1029 if (skb->protocol == htons(ETH_P_IP))
1030 return tcp_v4_conn_request(sk, skb);
1031
1032 if (!ipv6_unicast_destination(skb))
1ab1457c 1033 goto drop;
1da177e4 1034
463c84b9 1035 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
946cedcc
ED
1036 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1037 if (!want_cookie)
1038 goto drop;
1da177e4
LT
1039 }
1040
463c84b9 1041 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1042 goto drop;
1043
ca304b61 1044 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1da177e4
LT
1045 if (req == NULL)
1046 goto drop;
1047
cfb6eeb4
YH
1048#ifdef CONFIG_TCP_MD5SIG
1049 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1050#endif
1051
1da177e4
LT
1052 tcp_clear_options(&tmp_opt);
1053 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1054 tmp_opt.user_mss = tp->rx_opt.user_mss;
2100c8d2 1055 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
4957faad
WAS
1056
1057 if (tmp_opt.cookie_plus > 0 &&
1058 tmp_opt.saw_tstamp &&
1059 !tp->rx_opt.cookie_out_never &&
1060 (sysctl_tcp_cookie_size > 0 ||
1061 (tp->cookie_values != NULL &&
1062 tp->cookie_values->cookie_desired > 0))) {
1063 u8 *c;
1064 u32 *d;
1065 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1066 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1067
1068 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1069 goto drop_and_free;
1070
1071 /* Secret recipe starts with IP addresses */
0eae88f3 1072 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
4957faad
WAS
1073 *mess++ ^= *d++;
1074 *mess++ ^= *d++;
1075 *mess++ ^= *d++;
1076 *mess++ ^= *d++;
0eae88f3 1077 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
4957faad
WAS
1078 *mess++ ^= *d++;
1079 *mess++ ^= *d++;
1080 *mess++ ^= *d++;
1081 *mess++ ^= *d++;
1082
1083 /* plus variable length Initiator Cookie */
1084 c = (u8 *)mess;
1085 while (l-- > 0)
1086 *c++ ^= *hash_location++;
1da177e4 1087
a2a385d6 1088 want_cookie = false; /* not our kind of cookie */
4957faad
WAS
1089 tmp_ext.cookie_out_never = 0; /* false */
1090 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1091 } else if (!tp->rx_opt.cookie_in_always) {
1092 /* redundant indications, but ensure initialization. */
1093 tmp_ext.cookie_out_never = 1; /* true */
1094 tmp_ext.cookie_plus = 0;
1095 } else {
1096 goto drop_and_free;
1097 }
1098 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1da177e4 1099
4dfc2817 1100 if (want_cookie && !tmp_opt.saw_tstamp)
c6aefafb 1101 tcp_clear_options(&tmp_opt);
c6aefafb 1102
1da177e4
LT
1103 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1104 tcp_openreq_init(req, &tmp_opt, skb);
1105
ca304b61 1106 treq = inet6_rsk(req);
4e3fd7a0
AD
1107 treq->rmt_addr = ipv6_hdr(skb)->saddr;
1108 treq->loc_addr = ipv6_hdr(skb)->daddr;
172d69e6 1109 if (!want_cookie || tmp_opt.tstamp_ok)
bd14b1b2 1110 TCP_ECN_create_request(req, skb);
c6aefafb 1111
4d0fe50c
ED
1112 treq->iif = sk->sk_bound_dev_if;
1113
1114 /* So that link locals have meaning */
1115 if (!sk->sk_bound_dev_if &&
1116 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1117 treq->iif = inet6_iif(skb);
1118
2bbdf389 1119 if (!isn) {
c6aefafb
GG
1120 if (ipv6_opt_accepted(sk, skb) ||
1121 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1122 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1123 atomic_inc(&skb->users);
1124 treq->pktopts = skb;
1125 }
493f377d
DM
1126
1127 if (want_cookie) {
2bbdf389
FW
1128 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1129 req->cookie_ts = tmp_opt.tstamp_ok;
493f377d
DM
1130 goto have_isn;
1131 }
1132
1133 /* VJ's idea. We save last timestamp seen
1134 * from the destination in peer table, when entering
1135 * state TIME-WAIT, and check against it before
1136 * accepting new connection request.
1137 *
1138 * If "isn" is not zero, this request hit alive
1139 * timewait bucket, so that all the necessary checks
1140 * are made in the function processing timewait state.
1141 */
1142 if (tmp_opt.saw_tstamp &&
1143 tcp_death_row.sysctl_tw_recycle &&
81166dd6
DM
1144 (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
1145 if (!tcp_peer_is_proven(req, dst, true)) {
493f377d
DM
1146 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1147 goto drop_and_release;
1148 }
1149 }
1150 /* Kill the following clause, if you dislike this way. */
1151 else if (!sysctl_tcp_syncookies &&
1152 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1153 (sysctl_max_syn_backlog >> 2)) &&
81166dd6 1154 !tcp_peer_is_proven(req, dst, false)) {
493f377d
DM
1155 /* Without syncookies last quarter of
1156 * backlog is filled with destinations,
1157 * proven to be alive.
1158 * It means that we continue to communicate
1159 * to destinations, already remembered
1160 * to the moment of synflood.
1161 */
1162 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1163 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1164 goto drop_and_release;
2bbdf389 1165 }
493f377d
DM
1166
1167 isn = tcp_v6_init_sequence(skb);
c6aefafb 1168 }
493f377d 1169have_isn:
2e6599cb 1170 tcp_rsk(req)->snt_isn = isn;
9ad7c049 1171 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1da177e4 1172
437c5b53
NC
1173 if (security_inet_conn_request(sk, skb, req))
1174 goto drop_and_release;
4237c75c 1175
9f10d3f6 1176 if (tcp_v6_send_synack(sk, dst, &fl6, req,
fff32699
ED
1177 (struct request_values *)&tmp_ext,
1178 skb_get_queue_mapping(skb)) ||
4957faad 1179 want_cookie)
e6b4d113 1180 goto drop_and_free;
1da177e4 1181
e6b4d113
WAS
1182 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1183 return 0;
1da177e4 1184
493f377d
DM
1185drop_and_release:
1186 dst_release(dst);
e6b4d113
WAS
1187drop_and_free:
1188 reqsk_free(req);
1da177e4 1189drop:
1da177e4
LT
1190 return 0; /* don't send reset */
1191}
1192
1193static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1194 struct request_sock *req,
1da177e4
LT
1195 struct dst_entry *dst)
1196{
78d15e82 1197 struct inet6_request_sock *treq;
1da177e4
LT
1198 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1199 struct tcp6_sock *newtcp6sk;
1200 struct inet_sock *newinet;
1201 struct tcp_sock *newtp;
1202 struct sock *newsk;
cfb6eeb4
YH
1203#ifdef CONFIG_TCP_MD5SIG
1204 struct tcp_md5sig_key *key;
1205#endif
3840a06e 1206 struct flowi6 fl6;
1da177e4
LT
1207
1208 if (skb->protocol == htons(ETH_P_IP)) {
1209 /*
1210 * v6 mapped
1211 */
1212
1213 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1214
1ab1457c 1215 if (newsk == NULL)
1da177e4
LT
1216 return NULL;
1217
1218 newtcp6sk = (struct tcp6_sock *)newsk;
1219 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1220
1221 newinet = inet_sk(newsk);
1222 newnp = inet6_sk(newsk);
1223 newtp = tcp_sk(newsk);
1224
1225 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1226
c720c7e8 1227 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1da177e4 1228
c720c7e8 1229 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1da177e4 1230
4e3fd7a0 1231 newnp->rcv_saddr = newnp->saddr;
1da177e4 1232
8292a17a 1233 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1234 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1235#ifdef CONFIG_TCP_MD5SIG
1236 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1237#endif
1238
676a1184
YZ
1239 newnp->ipv6_ac_list = NULL;
1240 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1241 newnp->pktoptions = NULL;
1242 newnp->opt = NULL;
505cbfc5 1243 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1244 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
4c507d28 1245 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1da177e4 1246
e6848976
ACM
1247 /*
1248 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1249 * here, tcp_create_openreq_child now does this for us, see the comment in
1250 * that function for the gory details. -acme
1da177e4 1251 */
1da177e4
LT
1252
1253 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1254 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1255 Sync it now.
1256 */
d83d8461 1257 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1258
1259 return newsk;
1260 }
1261
78d15e82 1262 treq = inet6_rsk(req);
1da177e4
LT
1263
1264 if (sk_acceptq_is_full(sk))
1265 goto out_overflow;
1266
493f377d 1267 if (!dst) {
3840a06e 1268 dst = inet6_csk_route_req(sk, &fl6, req);
493f377d 1269 if (!dst)
1da177e4 1270 goto out;
1ab1457c 1271 }
1da177e4
LT
1272
1273 newsk = tcp_create_openreq_child(sk, req, skb);
1274 if (newsk == NULL)
093d2823 1275 goto out_nonewsk;
1da177e4 1276
e6848976
ACM
1277 /*
1278 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1279 * count here, tcp_create_openreq_child now does this for us, see the
1280 * comment in that function for the gory details. -acme
1281 */
1da177e4 1282
59eed279 1283 newsk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 1284 __ip6_dst_store(newsk, dst, NULL, NULL);
fae6ef87 1285 inet6_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1286
1287 newtcp6sk = (struct tcp6_sock *)newsk;
1288 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1289
1290 newtp = tcp_sk(newsk);
1291 newinet = inet_sk(newsk);
1292 newnp = inet6_sk(newsk);
1293
1294 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1295
4e3fd7a0
AD
1296 newnp->daddr = treq->rmt_addr;
1297 newnp->saddr = treq->loc_addr;
1298 newnp->rcv_saddr = treq->loc_addr;
2e6599cb 1299 newsk->sk_bound_dev_if = treq->iif;
1da177e4 1300
1ab1457c 1301 /* Now IPv6 options...
1da177e4
LT
1302
1303 First: no IPv4 options.
1304 */
f6d8bd05 1305 newinet->inet_opt = NULL;
676a1184 1306 newnp->ipv6_ac_list = NULL;
d35690be 1307 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1308
1309 /* Clone RX bits */
1310 newnp->rxopt.all = np->rxopt.all;
1311
1312 /* Clone pktoptions received with SYN */
1313 newnp->pktoptions = NULL;
2e6599cb 1314 if (treq->pktopts != NULL) {
99a1dec7
MG
1315 newnp->pktoptions = skb_clone(treq->pktopts,
1316 sk_gfp_atomic(sk, GFP_ATOMIC));
ab185d7b 1317 consume_skb(treq->pktopts);
2e6599cb 1318 treq->pktopts = NULL;
1da177e4
LT
1319 if (newnp->pktoptions)
1320 skb_set_owner_r(newnp->pktoptions, newsk);
1321 }
1322 newnp->opt = NULL;
505cbfc5 1323 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1324 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
4c507d28 1325 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1da177e4
LT
1326
1327 /* Clone native IPv6 options from listening socket (if any)
1328
1329 Yes, keeping reference count would be much more clever,
1330 but we make one more one thing there: reattach optmem
1331 to newsk.
1332 */
43264e0b
RL
1333 if (np->opt)
1334 newnp->opt = ipv6_dup_options(newsk, np->opt);
1da177e4 1335
d83d8461 1336 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1337 if (newnp->opt)
d83d8461
ACM
1338 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1339 newnp->opt->opt_flen);
1da177e4 1340
5d424d5a 1341 tcp_mtup_init(newsk);
1da177e4 1342 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1343 newtp->advmss = dst_metric_advmss(dst);
d135c522
NC
1344 if (tcp_sk(sk)->rx_opt.user_mss &&
1345 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1346 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1347
1da177e4 1348 tcp_initialize_rcv_mss(newsk);
9ad7c049
JC
1349 if (tcp_rsk(req)->snt_synack)
1350 tcp_valid_rtt_meas(newsk,
1351 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1352 newtp->total_retrans = req->retrans;
1da177e4 1353
c720c7e8
ED
1354 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1355 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1356
cfb6eeb4
YH
1357#ifdef CONFIG_TCP_MD5SIG
1358 /* Copy over the MD5 key from the original socket */
1359 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1360 /* We're using one, so create a matching key
1361 * on the newsk structure. If we fail to get
1362 * memory, then we end up not copying the key
1363 * across. Shucks.
1364 */
a915da9b 1365 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
99a1dec7
MG
1366 AF_INET6, key->key, key->keylen,
1367 sk_gfp_atomic(sk, GFP_ATOMIC));
cfb6eeb4
YH
1368 }
1369#endif
1370
093d2823
BS
1371 if (__inet_inherit_port(sk, newsk) < 0) {
1372 sock_put(newsk);
1373 goto out;
1374 }
9327f705 1375 __inet6_hash(newsk, NULL);
1da177e4
LT
1376
1377 return newsk;
1378
1379out_overflow:
de0744af 1380 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823 1381out_nonewsk:
1da177e4 1382 dst_release(dst);
093d2823
BS
1383out:
1384 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1385 return NULL;
1386}
1387
b51655b9 1388static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1da177e4 1389{
84fa7933 1390 if (skb->ip_summed == CHECKSUM_COMPLETE) {
684f2176 1391 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
0660e03f 1392 &ipv6_hdr(skb)->daddr, skb->csum)) {
fb286bb2 1393 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1394 return 0;
fb286bb2 1395 }
1da177e4 1396 }
fb286bb2 1397
684f2176 1398 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
0660e03f
ACM
1399 &ipv6_hdr(skb)->saddr,
1400 &ipv6_hdr(skb)->daddr, 0));
fb286bb2 1401
1da177e4 1402 if (skb->len <= 76) {
fb286bb2 1403 return __skb_checksum_complete(skb);
1da177e4
LT
1404 }
1405 return 0;
1406}
1407
1408/* The socket must have it's spinlock held when we get
1409 * here.
1410 *
1411 * We have a potential double-lock case here, so even when
1412 * doing backlog processing we use the BH locking scheme.
1413 * This is because we cannot sleep with the original spinlock
1414 * held.
1415 */
1416static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1417{
1418 struct ipv6_pinfo *np = inet6_sk(sk);
1419 struct tcp_sock *tp;
1420 struct sk_buff *opt_skb = NULL;
1421
1422 /* Imagine: socket is IPv6. IPv4 packet arrives,
1423 goes to IPv4 receive handler and backlogged.
1424 From backlog it always goes here. Kerboom...
1425 Fortunately, tcp_rcv_established and rcv_established
1426 handle them correctly, but it is not case with
1427 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1428 */
1429
1430 if (skb->protocol == htons(ETH_P_IP))
1431 return tcp_v4_do_rcv(sk, skb);
1432
cfb6eeb4
YH
1433#ifdef CONFIG_TCP_MD5SIG
1434 if (tcp_v6_inbound_md5_hash (sk, skb))
1435 goto discard;
1436#endif
1437
fda9ef5d 1438 if (sk_filter(sk, skb))
1da177e4
LT
1439 goto discard;
1440
1441 /*
1442 * socket locking is here for SMP purposes as backlog rcv
1443 * is currently called with bh processing disabled.
1444 */
1445
1446 /* Do Stevens' IPV6_PKTOPTIONS.
1447
1448 Yes, guys, it is the only place in our code, where we
1449 may make it not affecting IPv4.
1450 The rest of code is protocol independent,
1451 and I do not like idea to uglify IPv4.
1452
1453 Actually, all the idea behind IPV6_PKTOPTIONS
1454 looks not very well thought. For now we latch
1455 options, received in the last packet, enqueued
1456 by tcp. Feel free to propose better solution.
1ab1457c 1457 --ANK (980728)
1da177e4
LT
1458 */
1459 if (np->rxopt.all)
99a1dec7 1460 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1da177e4
LT
1461
1462 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
5d299f3d
ED
1463 struct dst_entry *dst = sk->sk_rx_dst;
1464
bdeab991 1465 sock_rps_save_rxhash(sk, skb);
5d299f3d
ED
1466 if (dst) {
1467 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1468 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1469 dst_release(dst);
1470 sk->sk_rx_dst = NULL;
1471 }
1472 }
1473
aa8223c7 1474 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1da177e4 1475 goto reset;
1da177e4
LT
1476 if (opt_skb)
1477 goto ipv6_pktoptions;
1478 return 0;
1479 }
1480
ab6a5bb6 1481 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1482 goto csum_err;
1483
1ab1457c 1484 if (sk->sk_state == TCP_LISTEN) {
1da177e4
LT
1485 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1486 if (!nsk)
1487 goto discard;
1488
1489 /*
1490 * Queue it on the new socket if the new socket is active,
1491 * otherwise we just shortcircuit this and continue with
1492 * the new socket..
1493 */
1ab1457c 1494 if(nsk != sk) {
bdeab991 1495 sock_rps_save_rxhash(nsk, skb);
1da177e4
LT
1496 if (tcp_child_process(sk, nsk, skb))
1497 goto reset;
1498 if (opt_skb)
1499 __kfree_skb(opt_skb);
1500 return 0;
1501 }
47482f13 1502 } else
bdeab991 1503 sock_rps_save_rxhash(sk, skb);
1da177e4 1504
aa8223c7 1505 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1da177e4 1506 goto reset;
1da177e4
LT
1507 if (opt_skb)
1508 goto ipv6_pktoptions;
1509 return 0;
1510
1511reset:
cfb6eeb4 1512 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1513discard:
1514 if (opt_skb)
1515 __kfree_skb(opt_skb);
1516 kfree_skb(skb);
1517 return 0;
1518csum_err:
63231bdd 1519 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1520 goto discard;
1521
1522
1523ipv6_pktoptions:
1524 /* Do you ask, what is it?
1525
1526 1. skb was enqueued by tcp.
1527 2. skb is added to tail of read queue, rather than out of order.
1528 3. socket is not in passive state.
1529 4. Finally, it really contains options, which user wants to receive.
1530 */
1531 tp = tcp_sk(sk);
1532 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1533 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1534 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
505cbfc5 1535 np->mcast_oif = inet6_iif(opt_skb);
333fad53 1536 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1537 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
4c507d28
JB
1538 if (np->rxopt.bits.rxtclass)
1539 np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1da177e4
LT
1540 if (ipv6_opt_accepted(sk, opt_skb)) {
1541 skb_set_owner_r(opt_skb, sk);
1542 opt_skb = xchg(&np->pktoptions, opt_skb);
1543 } else {
1544 __kfree_skb(opt_skb);
1545 opt_skb = xchg(&np->pktoptions, NULL);
1546 }
1547 }
1548
800d55f1 1549 kfree_skb(opt_skb);
1da177e4
LT
1550 return 0;
1551}
1552
e5bbef20 1553static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1554{
cf533ea5 1555 const struct tcphdr *th;
b71d1d42 1556 const struct ipv6hdr *hdr;
1da177e4
LT
1557 struct sock *sk;
1558 int ret;
a86b1e30 1559 struct net *net = dev_net(skb->dev);
1da177e4
LT
1560
1561 if (skb->pkt_type != PACKET_HOST)
1562 goto discard_it;
1563
1564 /*
1565 * Count it even if it's bad.
1566 */
63231bdd 1567 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1568
1569 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1570 goto discard_it;
1571
aa8223c7 1572 th = tcp_hdr(skb);
1da177e4
LT
1573
1574 if (th->doff < sizeof(struct tcphdr)/4)
1575 goto bad_packet;
1576 if (!pskb_may_pull(skb, th->doff*4))
1577 goto discard_it;
1578
60476372 1579 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1da177e4
LT
1580 goto bad_packet;
1581
aa8223c7 1582 th = tcp_hdr(skb);
e802af9c 1583 hdr = ipv6_hdr(skb);
1da177e4
LT
1584 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1585 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1586 skb->len - th->doff*4);
1587 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1588 TCP_SKB_CB(skb)->when = 0;
b82d1bb4 1589 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1da177e4
LT
1590 TCP_SKB_CB(skb)->sacked = 0;
1591
9a1f27c4 1592 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1593 if (!sk)
1594 goto no_tcp_socket;
1595
1596process:
1597 if (sk->sk_state == TCP_TIME_WAIT)
1598 goto do_time_wait;
1599
e802af9c
SH
1600 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1601 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1602 goto discard_and_relse;
1603 }
1604
1da177e4
LT
1605 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1606 goto discard_and_relse;
1607
fda9ef5d 1608 if (sk_filter(sk, skb))
1da177e4
LT
1609 goto discard_and_relse;
1610
1611 skb->dev = NULL;
1612
293b9c42 1613 bh_lock_sock_nested(sk);
1da177e4
LT
1614 ret = 0;
1615 if (!sock_owned_by_user(sk)) {
1a2449a8 1616#ifdef CONFIG_NET_DMA
1ab1457c 1617 struct tcp_sock *tp = tcp_sk(sk);
b4caea8a 1618 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
a2bd1140 1619 tp->ucopy.dma_chan = net_dma_find_channel();
1ab1457c
YH
1620 if (tp->ucopy.dma_chan)
1621 ret = tcp_v6_do_rcv(sk, skb);
1622 else
1a2449a8
CL
1623#endif
1624 {
1625 if (!tcp_prequeue(sk, skb))
1626 ret = tcp_v6_do_rcv(sk, skb);
1627 }
da882c1f
ED
1628 } else if (unlikely(sk_add_backlog(sk, skb,
1629 sk->sk_rcvbuf + sk->sk_sndbuf))) {
6b03a53a 1630 bh_unlock_sock(sk);
6cce09f8 1631 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1632 goto discard_and_relse;
1633 }
1da177e4
LT
1634 bh_unlock_sock(sk);
1635
1636 sock_put(sk);
1637 return ret ? -1 : 0;
1638
1639no_tcp_socket:
1640 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1641 goto discard_it;
1642
1643 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1644bad_packet:
63231bdd 1645 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1646 } else {
cfb6eeb4 1647 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1648 }
1649
1650discard_it:
1651
1652 /*
1653 * Discard frame
1654 */
1655
1656 kfree_skb(skb);
1657 return 0;
1658
1659discard_and_relse:
1660 sock_put(sk);
1661 goto discard_it;
1662
1663do_time_wait:
1664 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1665 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1666 goto discard_it;
1667 }
1668
1669 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
63231bdd 1670 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1671 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1672 goto discard_it;
1673 }
1674
9469c7b4 1675 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1676 case TCP_TW_SYN:
1677 {
1678 struct sock *sk2;
1679
c346dca1 1680 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
0660e03f 1681 &ipv6_hdr(skb)->daddr,
505cbfc5 1682 ntohs(th->dest), inet6_iif(skb));
1da177e4 1683 if (sk2 != NULL) {
295ff7ed
ACM
1684 struct inet_timewait_sock *tw = inet_twsk(sk);
1685 inet_twsk_deschedule(tw, &tcp_death_row);
1686 inet_twsk_put(tw);
1da177e4
LT
1687 sk = sk2;
1688 goto process;
1689 }
1690 /* Fall through to ACK */
1691 }
1692 case TCP_TW_ACK:
1693 tcp_v6_timewait_ack(sk, skb);
1694 break;
1695 case TCP_TW_RST:
1696 goto no_tcp_socket;
1697 case TCP_TW_SUCCESS:;
1698 }
1699 goto discard_it;
1700}
1701
c7109986
ED
1702static void tcp_v6_early_demux(struct sk_buff *skb)
1703{
1704 const struct ipv6hdr *hdr;
1705 const struct tcphdr *th;
1706 struct sock *sk;
1707
1708 if (skb->pkt_type != PACKET_HOST)
1709 return;
1710
1711 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1712 return;
1713
1714 hdr = ipv6_hdr(skb);
1715 th = tcp_hdr(skb);
1716
1717 if (th->doff < sizeof(struct tcphdr) / 4)
1718 return;
1719
1720 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1721 &hdr->saddr, th->source,
1722 &hdr->daddr, ntohs(th->dest),
1723 inet6_iif(skb));
1724 if (sk) {
1725 skb->sk = sk;
1726 skb->destructor = sock_edemux;
1727 if (sk->sk_state != TCP_TIME_WAIT) {
1728 struct dst_entry *dst = sk->sk_rx_dst;
1729 struct inet_sock *icsk = inet_sk(sk);
1730 if (dst)
5d299f3d 1731 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
c7109986 1732 if (dst &&
5d299f3d 1733 icsk->rx_dst_ifindex == skb->skb_iif)
c7109986
ED
1734 skb_dst_set_noref(skb, dst);
1735 }
1736 }
1737}
1738
ccb7c410
DM
1739static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1740 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1741 .twsk_unique = tcp_twsk_unique,
1742 .twsk_destructor= tcp_twsk_destructor,
ccb7c410
DM
1743};
1744
3b401a81 1745static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1746 .queue_xmit = inet6_csk_xmit,
1747 .send_check = tcp_v6_send_check,
1748 .rebuild_header = inet6_sk_rebuild_header,
5d299f3d 1749 .sk_rx_dst_set = inet6_sk_rx_dst_set,
543d9cfe
ACM
1750 .conn_request = tcp_v6_conn_request,
1751 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe 1752 .net_header_len = sizeof(struct ipv6hdr),
67469601 1753 .net_frag_header_len = sizeof(struct frag_hdr),
543d9cfe
ACM
1754 .setsockopt = ipv6_setsockopt,
1755 .getsockopt = ipv6_getsockopt,
1756 .addr2sockaddr = inet6_csk_addr2sockaddr,
1757 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1758 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1759#ifdef CONFIG_COMPAT
543d9cfe
ACM
1760 .compat_setsockopt = compat_ipv6_setsockopt,
1761 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1762#endif
1da177e4
LT
1763};
1764
cfb6eeb4 1765#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1766static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1767 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1768 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 1769 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1770};
a928630a 1771#endif
cfb6eeb4 1772
1da177e4
LT
1773/*
1774 * TCP over IPv4 via INET6 API
1775 */
1776
3b401a81 1777static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1778 .queue_xmit = ip_queue_xmit,
1779 .send_check = tcp_v4_send_check,
1780 .rebuild_header = inet_sk_rebuild_header,
63d02d15 1781 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1782 .conn_request = tcp_v6_conn_request,
1783 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe
ACM
1784 .net_header_len = sizeof(struct iphdr),
1785 .setsockopt = ipv6_setsockopt,
1786 .getsockopt = ipv6_getsockopt,
1787 .addr2sockaddr = inet6_csk_addr2sockaddr,
1788 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1789 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1790#ifdef CONFIG_COMPAT
543d9cfe
ACM
1791 .compat_setsockopt = compat_ipv6_setsockopt,
1792 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1793#endif
1da177e4
LT
1794};
1795
cfb6eeb4 1796#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1797static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1798 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1799 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1800 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1801};
a928630a 1802#endif
cfb6eeb4 1803
1da177e4
LT
1804/* NOTE: A lot of things set to zero explicitly by call to
1805 * sk_alloc() so need not be done here.
1806 */
1807static int tcp_v6_init_sock(struct sock *sk)
1808{
6687e988 1809 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1810
900f65d3 1811 tcp_init_sock(sk);
1da177e4 1812
8292a17a 1813 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 1814
cfb6eeb4 1815#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1816 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
cfb6eeb4
YH
1817#endif
1818
1da177e4
LT
1819 return 0;
1820}
1821
7d06b2e0 1822static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1823{
1da177e4 1824 tcp_v4_destroy_sock(sk);
7d06b2e0 1825 inet6_destroy_sock(sk);
1da177e4
LT
1826}
1827
952a10be 1828#ifdef CONFIG_PROC_FS
1da177e4 1829/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1830static void get_openreq6(struct seq_file *seq,
cf533ea5 1831 const struct sock *sk, struct request_sock *req, int i, int uid)
1da177e4 1832{
1da177e4 1833 int ttd = req->expires - jiffies;
b71d1d42
ED
1834 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1835 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1da177e4
LT
1836
1837 if (ttd < 0)
1838 ttd = 0;
1839
1da177e4
LT
1840 seq_printf(seq,
1841 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 1842 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
1843 i,
1844 src->s6_addr32[0], src->s6_addr32[1],
1845 src->s6_addr32[2], src->s6_addr32[3],
fd507037 1846 ntohs(inet_rsk(req)->loc_port),
1da177e4
LT
1847 dest->s6_addr32[0], dest->s6_addr32[1],
1848 dest->s6_addr32[2], dest->s6_addr32[3],
2e6599cb 1849 ntohs(inet_rsk(req)->rmt_port),
1da177e4
LT
1850 TCP_SYN_RECV,
1851 0,0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1852 1, /* timers active (only the expire timer) */
1853 jiffies_to_clock_t(ttd),
1da177e4
LT
1854 req->retrans,
1855 uid,
1ab1457c 1856 0, /* non standard timer */
1da177e4
LT
1857 0, /* open_requests have no inode */
1858 0, req);
1859}
1860
1861static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1862{
b71d1d42 1863 const struct in6_addr *dest, *src;
1da177e4
LT
1864 __u16 destp, srcp;
1865 int timer_active;
1866 unsigned long timer_expires;
cf533ea5
ED
1867 const struct inet_sock *inet = inet_sk(sp);
1868 const struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1869 const struct inet_connection_sock *icsk = inet_csk(sp);
cf533ea5 1870 const struct ipv6_pinfo *np = inet6_sk(sp);
1da177e4
LT
1871
1872 dest = &np->daddr;
1873 src = &np->rcv_saddr;
c720c7e8
ED
1874 destp = ntohs(inet->inet_dport);
1875 srcp = ntohs(inet->inet_sport);
463c84b9
ACM
1876
1877 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 1878 timer_active = 1;
463c84b9
ACM
1879 timer_expires = icsk->icsk_timeout;
1880 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1881 timer_active = 4;
463c84b9 1882 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1883 } else if (timer_pending(&sp->sk_timer)) {
1884 timer_active = 2;
1885 timer_expires = sp->sk_timer.expires;
1886 } else {
1887 timer_active = 0;
1888 timer_expires = jiffies;
1889 }
1890
1891 seq_printf(seq,
1892 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 1893 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1da177e4
LT
1894 i,
1895 src->s6_addr32[0], src->s6_addr32[1],
1896 src->s6_addr32[2], src->s6_addr32[3], srcp,
1897 dest->s6_addr32[0], dest->s6_addr32[1],
1898 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1ab1457c 1899 sp->sk_state,
47da8ee6
SS
1900 tp->write_seq-tp->snd_una,
1901 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
1902 timer_active,
1903 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 1904 icsk->icsk_retransmits,
1da177e4 1905 sock_i_uid(sp),
6687e988 1906 icsk->icsk_probes_out,
1da177e4
LT
1907 sock_i_ino(sp),
1908 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
1909 jiffies_to_clock_t(icsk->icsk_rto),
1910 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 1911 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
0b6a05c1
IJ
1912 tp->snd_cwnd,
1913 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1da177e4
LT
1914 );
1915}
1916
1ab1457c 1917static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1918 struct inet_timewait_sock *tw, int i)
1da177e4 1919{
b71d1d42 1920 const struct in6_addr *dest, *src;
1da177e4 1921 __u16 destp, srcp;
cf533ea5 1922 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1da177e4
LT
1923 int ttd = tw->tw_ttd - jiffies;
1924
1925 if (ttd < 0)
1926 ttd = 0;
1927
0fa1a53e
ACM
1928 dest = &tw6->tw_v6_daddr;
1929 src = &tw6->tw_v6_rcv_saddr;
1da177e4
LT
1930 destp = ntohs(tw->tw_dport);
1931 srcp = ntohs(tw->tw_sport);
1932
1933 seq_printf(seq,
1934 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 1935 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
1936 i,
1937 src->s6_addr32[0], src->s6_addr32[1],
1938 src->s6_addr32[2], src->s6_addr32[3], srcp,
1939 dest->s6_addr32[0], dest->s6_addr32[1],
1940 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1941 tw->tw_substate, 0, 0,
1942 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1943 atomic_read(&tw->tw_refcnt), tw);
1944}
1945
1da177e4
LT
1946static int tcp6_seq_show(struct seq_file *seq, void *v)
1947{
1948 struct tcp_iter_state *st;
1949
1950 if (v == SEQ_START_TOKEN) {
1951 seq_puts(seq,
1952 " sl "
1953 "local_address "
1954 "remote_address "
1955 "st tx_queue rx_queue tr tm->when retrnsmt"
1956 " uid timeout inode\n");
1957 goto out;
1958 }
1959 st = seq->private;
1960
1961 switch (st->state) {
1962 case TCP_SEQ_STATE_LISTENING:
1963 case TCP_SEQ_STATE_ESTABLISHED:
1964 get_tcp6_sock(seq, v, st->num);
1965 break;
1966 case TCP_SEQ_STATE_OPENREQ:
1967 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1968 break;
1969 case TCP_SEQ_STATE_TIME_WAIT:
1970 get_timewait6_sock(seq, v, st->num);
1971 break;
1972 }
1973out:
1974 return 0;
1975}
1976
73cb88ec
AV
1977static const struct file_operations tcp6_afinfo_seq_fops = {
1978 .owner = THIS_MODULE,
1979 .open = tcp_seq_open,
1980 .read = seq_read,
1981 .llseek = seq_lseek,
1982 .release = seq_release_net
1983};
1984
1da177e4 1985static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
1986 .name = "tcp6",
1987 .family = AF_INET6,
73cb88ec 1988 .seq_fops = &tcp6_afinfo_seq_fops,
9427c4b3
DL
1989 .seq_ops = {
1990 .show = tcp6_seq_show,
1991 },
1da177e4
LT
1992};
1993
2c8c1e72 1994int __net_init tcp6_proc_init(struct net *net)
1da177e4 1995{
6f8b13bc 1996 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
1997}
1998
6f8b13bc 1999void tcp6_proc_exit(struct net *net)
1da177e4 2000{
6f8b13bc 2001 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
2002}
2003#endif
2004
2005struct proto tcpv6_prot = {
2006 .name = "TCPv6",
2007 .owner = THIS_MODULE,
2008 .close = tcp_close,
2009 .connect = tcp_v6_connect,
2010 .disconnect = tcp_disconnect,
463c84b9 2011 .accept = inet_csk_accept,
1da177e4
LT
2012 .ioctl = tcp_ioctl,
2013 .init = tcp_v6_init_sock,
2014 .destroy = tcp_v6_destroy_sock,
2015 .shutdown = tcp_shutdown,
2016 .setsockopt = tcp_setsockopt,
2017 .getsockopt = tcp_getsockopt,
1da177e4 2018 .recvmsg = tcp_recvmsg,
7ba42910
CG
2019 .sendmsg = tcp_sendmsg,
2020 .sendpage = tcp_sendpage,
1da177e4 2021 .backlog_rcv = tcp_v6_do_rcv,
46d3ceab 2022 .release_cb = tcp_release_cb,
563d34d0 2023 .mtu_reduced = tcp_v6_mtu_reduced,
1da177e4 2024 .hash = tcp_v6_hash,
ab1e0a13
ACM
2025 .unhash = inet_unhash,
2026 .get_port = inet_csk_get_port,
1da177e4
LT
2027 .enter_memory_pressure = tcp_enter_memory_pressure,
2028 .sockets_allocated = &tcp_sockets_allocated,
2029 .memory_allocated = &tcp_memory_allocated,
2030 .memory_pressure = &tcp_memory_pressure,
0a5578cf 2031 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2032 .sysctl_wmem = sysctl_tcp_wmem,
2033 .sysctl_rmem = sysctl_tcp_rmem,
2034 .max_header = MAX_TCP_HEADER,
2035 .obj_size = sizeof(struct tcp6_sock),
3ab5aee7 2036 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2037 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 2038 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 2039 .h.hashinfo = &tcp_hashinfo,
7ba42910 2040 .no_autobind = true,
543d9cfe
ACM
2041#ifdef CONFIG_COMPAT
2042 .compat_setsockopt = compat_tcp_setsockopt,
2043 .compat_getsockopt = compat_tcp_getsockopt,
2044#endif
c255a458 2045#ifdef CONFIG_MEMCG_KMEM
d1a4c0b3
GC
2046 .proto_cgroup = tcp_proto_cgroup,
2047#endif
1da177e4
LT
2048};
2049
41135cc8 2050static const struct inet6_protocol tcpv6_protocol = {
c7109986 2051 .early_demux = tcp_v6_early_demux,
1da177e4
LT
2052 .handler = tcp_v6_rcv,
2053 .err_handler = tcp_v6_err,
a430a43d 2054 .gso_send_check = tcp_v6_gso_send_check,
adcfc7d0 2055 .gso_segment = tcp_tso_segment,
684f2176
HX
2056 .gro_receive = tcp6_gro_receive,
2057 .gro_complete = tcp6_gro_complete,
1da177e4
LT
2058 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2059};
2060
1da177e4
LT
2061static struct inet_protosw tcpv6_protosw = {
2062 .type = SOCK_STREAM,
2063 .protocol = IPPROTO_TCP,
2064 .prot = &tcpv6_prot,
2065 .ops = &inet6_stream_ops,
1da177e4 2066 .no_check = 0,
d83d8461
ACM
2067 .flags = INET_PROTOSW_PERMANENT |
2068 INET_PROTOSW_ICSK,
1da177e4
LT
2069};
2070
2c8c1e72 2071static int __net_init tcpv6_net_init(struct net *net)
93ec926b 2072{
5677242f
DL
2073 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2074 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
2075}
2076
2c8c1e72 2077static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 2078{
5677242f 2079 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
2080}
2081
2c8c1e72 2082static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26
EB
2083{
2084 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
93ec926b
DL
2085}
2086
2087static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
2088 .init = tcpv6_net_init,
2089 .exit = tcpv6_net_exit,
2090 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
2091};
2092
7f4e4868 2093int __init tcpv6_init(void)
1da177e4 2094{
7f4e4868
DL
2095 int ret;
2096
2097 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2098 if (ret)
2099 goto out;
2100
1da177e4 2101 /* register inet6 protocol */
7f4e4868
DL
2102 ret = inet6_register_protosw(&tcpv6_protosw);
2103 if (ret)
2104 goto out_tcpv6_protocol;
2105
93ec926b 2106 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2107 if (ret)
2108 goto out_tcpv6_protosw;
2109out:
2110 return ret;
ae0f7d5f 2111
7f4e4868
DL
2112out_tcpv6_protocol:
2113 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2114out_tcpv6_protosw:
2115 inet6_unregister_protosw(&tcpv6_protosw);
2116 goto out;
2117}
2118
09f7709f 2119void tcpv6_exit(void)
7f4e4868 2120{
93ec926b 2121 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2122 inet6_unregister_protosw(&tcpv6_protosw);
2123 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2124}