tcpv6: convert opt[] -> topt in tcp_v6_send_reset
[linux-2.6-block.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#include <linux/module.h>
1da177e4
LT
27#include <linux/errno.h>
28#include <linux/types.h>
29#include <linux/socket.h>
30#include <linux/sockios.h>
31#include <linux/net.h>
32#include <linux/jiffies.h>
33#include <linux/in.h>
34#include <linux/in6.h>
35#include <linux/netdevice.h>
36#include <linux/init.h>
37#include <linux/jhash.h>
38#include <linux/ipsec.h>
39#include <linux/times.h>
40
41#include <linux/ipv6.h>
42#include <linux/icmpv6.h>
43#include <linux/random.h>
44
45#include <net/tcp.h>
46#include <net/ndisc.h>
5324a040 47#include <net/inet6_hashtables.h>
8129765a 48#include <net/inet6_connection_sock.h>
1da177e4
LT
49#include <net/ipv6.h>
50#include <net/transp_v6.h>
51#include <net/addrconf.h>
52#include <net/ip6_route.h>
53#include <net/ip6_checksum.h>
54#include <net/inet_ecn.h>
55#include <net/protocol.h>
56#include <net/xfrm.h>
1da177e4
LT
57#include <net/snmp.h>
58#include <net/dsfield.h>
6d6ee43e 59#include <net/timewait_sock.h>
18134bed 60#include <net/netdma.h>
3d58b5fa 61#include <net/inet_common.h>
1da177e4
LT
62
63#include <asm/uaccess.h>
64
65#include <linux/proc_fs.h>
66#include <linux/seq_file.h>
67
cfb6eeb4
YH
68#include <linux/crypto.h>
69#include <linux/scatterlist.h>
70
cfb6eeb4 71static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
6edafaaf
GJ
72static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
73 struct request_sock *req);
1da177e4
LT
74
75static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 76
8292a17a
ACM
77static struct inet_connection_sock_af_ops ipv6_mapped;
78static struct inet_connection_sock_af_ops ipv6_specific;
a928630a 79#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
80static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
81static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972
YH
82#else
83static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
84 struct in6_addr *addr)
85{
86 return NULL;
87}
a928630a 88#endif
1da177e4 89
1da177e4
LT
90static void tcp_v6_hash(struct sock *sk)
91{
92 if (sk->sk_state != TCP_CLOSE) {
8292a17a 93 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
1da177e4
LT
94 tcp_prot.hash(sk);
95 return;
96 }
97 local_bh_disable();
ab1e0a13 98 __inet6_hash(sk);
1da177e4
LT
99 local_bh_enable();
100 }
101}
102
868c86bc 103static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
1ab1457c
YH
104 struct in6_addr *saddr,
105 struct in6_addr *daddr,
868c86bc 106 __wsum base)
1da177e4
LT
107{
108 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
109}
110
a94f723d 111static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
1da177e4 112{
0660e03f
ACM
113 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
114 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7
ACM
115 tcp_hdr(skb)->dest,
116 tcp_hdr(skb)->source);
1da177e4
LT
117}
118
1ab1457c 119static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
120 int addr_len)
121{
122 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 123 struct inet_sock *inet = inet_sk(sk);
d83d8461 124 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
125 struct ipv6_pinfo *np = inet6_sk(sk);
126 struct tcp_sock *tp = tcp_sk(sk);
127 struct in6_addr *saddr = NULL, *final_p = NULL, final;
128 struct flowi fl;
129 struct dst_entry *dst;
130 int addr_type;
131 int err;
132
1ab1457c 133 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
134 return -EINVAL;
135
1ab1457c 136 if (usin->sin6_family != AF_INET6)
1da177e4
LT
137 return(-EAFNOSUPPORT);
138
139 memset(&fl, 0, sizeof(fl));
140
141 if (np->sndflow) {
142 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
143 IP6_ECN_flow_init(fl.fl6_flowlabel);
144 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
145 struct ip6_flowlabel *flowlabel;
146 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
147 if (flowlabel == NULL)
148 return -EINVAL;
149 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
150 fl6_sock_release(flowlabel);
151 }
152 }
153
154 /*
1ab1457c
YH
155 * connect() to INADDR_ANY means loopback (BSD'ism).
156 */
157
158 if(ipv6_addr_any(&usin->sin6_addr))
159 usin->sin6_addr.s6_addr[15] = 0x1;
1da177e4
LT
160
161 addr_type = ipv6_addr_type(&usin->sin6_addr);
162
163 if(addr_type & IPV6_ADDR_MULTICAST)
164 return -ENETUNREACH;
165
166 if (addr_type&IPV6_ADDR_LINKLOCAL) {
167 if (addr_len >= sizeof(struct sockaddr_in6) &&
168 usin->sin6_scope_id) {
169 /* If interface is set while binding, indices
170 * must coincide.
171 */
172 if (sk->sk_bound_dev_if &&
173 sk->sk_bound_dev_if != usin->sin6_scope_id)
174 return -EINVAL;
175
176 sk->sk_bound_dev_if = usin->sin6_scope_id;
177 }
178
179 /* Connect to link-local address requires an interface */
180 if (!sk->sk_bound_dev_if)
181 return -EINVAL;
182 }
183
184 if (tp->rx_opt.ts_recent_stamp &&
185 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
186 tp->rx_opt.ts_recent = 0;
187 tp->rx_opt.ts_recent_stamp = 0;
188 tp->write_seq = 0;
189 }
190
191 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
192 np->flow_label = fl.fl6_flowlabel;
193
194 /*
195 * TCP over IPv4
196 */
197
198 if (addr_type == IPV6_ADDR_MAPPED) {
d83d8461 199 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
200 struct sockaddr_in sin;
201
202 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
203
204 if (__ipv6_only_sock(sk))
205 return -ENETUNREACH;
206
207 sin.sin_family = AF_INET;
208 sin.sin_port = usin->sin6_port;
209 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
210
d83d8461 211 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 212 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
213#ifdef CONFIG_TCP_MD5SIG
214 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
215#endif
1da177e4
LT
216
217 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
218
219 if (err) {
d83d8461
ACM
220 icsk->icsk_ext_hdr_len = exthdrlen;
221 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 222 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
223#ifdef CONFIG_TCP_MD5SIG
224 tp->af_specific = &tcp_sock_ipv6_specific;
225#endif
1da177e4
LT
226 goto failure;
227 } else {
228 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
229 inet->saddr);
230 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
231 inet->rcv_saddr);
232 }
233
234 return err;
235 }
236
237 if (!ipv6_addr_any(&np->rcv_saddr))
238 saddr = &np->rcv_saddr;
239
240 fl.proto = IPPROTO_TCP;
241 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
242 ipv6_addr_copy(&fl.fl6_src,
243 (saddr ? saddr : &np->saddr));
244 fl.oif = sk->sk_bound_dev_if;
245 fl.fl_ip_dport = usin->sin6_port;
246 fl.fl_ip_sport = inet->sport;
247
248 if (np->opt && np->opt->srcrt) {
249 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
250 ipv6_addr_copy(&final, &fl.fl6_dst);
251 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
252 final_p = &final;
253 }
254
beb8d13b
VY
255 security_sk_classify_flow(sk, &fl);
256
1da177e4
LT
257 err = ip6_dst_lookup(sk, &dst, &fl);
258 if (err)
259 goto failure;
260 if (final_p)
261 ipv6_addr_copy(&fl.fl6_dst, final_p);
262
bb72845e 263 if ((err = __xfrm_lookup(&dst, &fl, sk, XFRM_LOOKUP_WAIT)) < 0) {
14e50e57
DM
264 if (err == -EREMOTE)
265 err = ip6_dst_blackhole(sk, &dst, &fl);
266 if (err < 0)
267 goto failure;
268 }
1da177e4
LT
269
270 if (saddr == NULL) {
271 saddr = &fl.fl6_src;
272 ipv6_addr_copy(&np->rcv_saddr, saddr);
273 }
274
275 /* set the source address */
276 ipv6_addr_copy(&np->saddr, saddr);
277 inet->rcv_saddr = LOOPBACK4_IPV6;
278
f83ef8c0 279 sk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 280 __ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 281
d83d8461 282 icsk->icsk_ext_hdr_len = 0;
1da177e4 283 if (np->opt)
d83d8461
ACM
284 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
285 np->opt->opt_nflen);
1da177e4
LT
286
287 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
288
289 inet->dport = usin->sin6_port;
290
291 tcp_set_state(sk, TCP_SYN_SENT);
d8313f5c 292 err = inet6_hash_connect(&tcp_death_row, sk);
1da177e4
LT
293 if (err)
294 goto late_failure;
295
296 if (!tp->write_seq)
297 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
298 np->daddr.s6_addr32,
299 inet->sport,
300 inet->dport);
301
302 err = tcp_connect(sk);
303 if (err)
304 goto late_failure;
305
306 return 0;
307
308late_failure:
309 tcp_set_state(sk, TCP_CLOSE);
310 __sk_dst_reset(sk);
311failure:
312 inet->dport = 0;
313 sk->sk_route_caps = 0;
314 return err;
315}
316
317static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
04ce6909 318 int type, int code, int offset, __be32 info)
1da177e4
LT
319{
320 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
505cbfc5 321 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
1da177e4
LT
322 struct ipv6_pinfo *np;
323 struct sock *sk;
324 int err;
1ab1457c 325 struct tcp_sock *tp;
1da177e4 326 __u32 seq;
ca12a1a4 327 struct net *net = dev_net(skb->dev);
1da177e4 328
ca12a1a4 329 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
d86e0dac 330 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
1da177e4
LT
331
332 if (sk == NULL) {
e41b5368
DL
333 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
334 ICMP6_MIB_INERRORS);
1da177e4
LT
335 return;
336 }
337
338 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 339 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
340 return;
341 }
342
343 bh_lock_sock(sk);
344 if (sock_owned_by_user(sk))
de0744af 345 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
346
347 if (sk->sk_state == TCP_CLOSE)
348 goto out;
349
350 tp = tcp_sk(sk);
1ab1457c 351 seq = ntohl(th->seq);
1da177e4
LT
352 if (sk->sk_state != TCP_LISTEN &&
353 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 354 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
355 goto out;
356 }
357
358 np = inet6_sk(sk);
359
360 if (type == ICMPV6_PKT_TOOBIG) {
361 struct dst_entry *dst = NULL;
362
363 if (sock_owned_by_user(sk))
364 goto out;
365 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
366 goto out;
367
368 /* icmp should have updated the destination cache entry */
369 dst = __sk_dst_check(sk, np->dst_cookie);
370
371 if (dst == NULL) {
372 struct inet_sock *inet = inet_sk(sk);
373 struct flowi fl;
374
375 /* BUGGG_FUTURE: Again, it is not clear how
376 to handle rthdr case. Ignore this complexity
377 for now.
378 */
379 memset(&fl, 0, sizeof(fl));
380 fl.proto = IPPROTO_TCP;
381 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
382 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
383 fl.oif = sk->sk_bound_dev_if;
384 fl.fl_ip_dport = inet->dport;
385 fl.fl_ip_sport = inet->sport;
beb8d13b 386 security_skb_classify_flow(skb, &fl);
1da177e4
LT
387
388 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
389 sk->sk_err_soft = -err;
390 goto out;
391 }
392
393 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
394 sk->sk_err_soft = -err;
395 goto out;
396 }
397
398 } else
399 dst_hold(dst);
400
d83d8461 401 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
1da177e4
LT
402 tcp_sync_mss(sk, dst_mtu(dst));
403 tcp_simple_retransmit(sk);
404 } /* else let the usual retransmit timer handle it */
405 dst_release(dst);
406 goto out;
407 }
408
409 icmpv6_err_convert(type, code, &err);
410
60236fdd 411 /* Might be for an request_sock */
1da177e4 412 switch (sk->sk_state) {
60236fdd 413 struct request_sock *req, **prev;
1da177e4
LT
414 case TCP_LISTEN:
415 if (sock_owned_by_user(sk))
416 goto out;
417
8129765a
ACM
418 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
419 &hdr->saddr, inet6_iif(skb));
1da177e4
LT
420 if (!req)
421 goto out;
422
423 /* ICMPs are not backlogged, hence we cannot get
424 * an established socket here.
425 */
547b792c 426 WARN_ON(req->sk != NULL);
1da177e4 427
2e6599cb 428 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 429 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
430 goto out;
431 }
432
463c84b9 433 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
434 goto out;
435
436 case TCP_SYN_SENT:
437 case TCP_SYN_RECV: /* Cannot happen.
1ab1457c 438 It can, it SYNs are crossed. --ANK */
1da177e4 439 if (!sock_owned_by_user(sk)) {
1da177e4
LT
440 sk->sk_err = err;
441 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
442
443 tcp_done(sk);
444 } else
445 sk->sk_err_soft = err;
446 goto out;
447 }
448
449 if (!sock_owned_by_user(sk) && np->recverr) {
450 sk->sk_err = err;
451 sk->sk_error_report(sk);
452 } else
453 sk->sk_err_soft = err;
454
455out:
456 bh_unlock_sock(sk);
457 sock_put(sk);
458}
459
460
fd80eb94 461static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
1da177e4 462{
ca304b61 463 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
464 struct ipv6_pinfo *np = inet6_sk(sk);
465 struct sk_buff * skb;
466 struct ipv6_txoptions *opt = NULL;
467 struct in6_addr * final_p = NULL, final;
468 struct flowi fl;
fd80eb94 469 struct dst_entry *dst;
1da177e4
LT
470 int err = -1;
471
472 memset(&fl, 0, sizeof(fl));
473 fl.proto = IPPROTO_TCP;
2e6599cb
ACM
474 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
475 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 476 fl.fl6_flowlabel = 0;
2e6599cb
ACM
477 fl.oif = treq->iif;
478 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1da177e4 479 fl.fl_ip_sport = inet_sk(sk)->sport;
4237c75c 480 security_req_classify_flow(req, &fl);
1da177e4 481
fd80eb94
DL
482 opt = np->opt;
483 if (opt && opt->srcrt) {
484 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
485 ipv6_addr_copy(&final, &fl.fl6_dst);
486 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
487 final_p = &final;
1da177e4
LT
488 }
489
fd80eb94
DL
490 err = ip6_dst_lookup(sk, &dst, &fl);
491 if (err)
492 goto done;
493 if (final_p)
494 ipv6_addr_copy(&fl.fl6_dst, final_p);
495 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
496 goto done;
497
1da177e4
LT
498 skb = tcp_make_synack(sk, dst, req);
499 if (skb) {
aa8223c7 500 struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
501
502 th->check = tcp_v6_check(th, skb->len,
2e6599cb 503 &treq->loc_addr, &treq->rmt_addr,
1da177e4
LT
504 csum_partial((char *)th, skb->len, skb->csum));
505
2e6599cb 506 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4 507 err = ip6_xmit(sk, skb, &fl, opt, 0);
b9df3cb8 508 err = net_xmit_eval(err);
1da177e4
LT
509 }
510
511done:
1ab1457c 512 if (opt && opt != np->opt)
1da177e4 513 sock_kfree_s(sk, opt, opt->tot_len);
78b91042 514 dst_release(dst);
1da177e4
LT
515 return err;
516}
517
c6aefafb
GG
518static inline void syn_flood_warning(struct sk_buff *skb)
519{
520#ifdef CONFIG_SYN_COOKIES
521 if (sysctl_tcp_syncookies)
522 printk(KERN_INFO
523 "TCPv6: Possible SYN flooding on port %d. "
524 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
525 else
526#endif
527 printk(KERN_INFO
528 "TCPv6: Possible SYN flooding on port %d. "
529 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
530}
531
60236fdd 532static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 533{
ca304b61
ACM
534 if (inet6_rsk(req)->pktopts)
535 kfree_skb(inet6_rsk(req)->pktopts);
1da177e4
LT
536}
537
cfb6eeb4
YH
538#ifdef CONFIG_TCP_MD5SIG
539static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
540 struct in6_addr *addr)
541{
542 struct tcp_sock *tp = tcp_sk(sk);
543 int i;
544
545 BUG_ON(tp == NULL);
546
547 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
548 return NULL;
549
550 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 551 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
f8ab18d2 552 return &tp->md5sig_info->keys6[i].base;
cfb6eeb4
YH
553 }
554 return NULL;
555}
556
557static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
558 struct sock *addr_sk)
559{
560 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
561}
562
563static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
564 struct request_sock *req)
565{
566 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
567}
568
569static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
570 char *newkey, u8 newkeylen)
571{
572 /* Add key to the list */
b0a713e9 573 struct tcp_md5sig_key *key;
cfb6eeb4
YH
574 struct tcp_sock *tp = tcp_sk(sk);
575 struct tcp6_md5sig_key *keys;
576
b0a713e9 577 key = tcp_v6_md5_do_lookup(sk, peer);
cfb6eeb4
YH
578 if (key) {
579 /* modify existing entry - just update that one */
b0a713e9
MD
580 kfree(key->key);
581 key->key = newkey;
582 key->keylen = newkeylen;
cfb6eeb4
YH
583 } else {
584 /* reallocate new list if current one is full. */
585 if (!tp->md5sig_info) {
586 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
587 if (!tp->md5sig_info) {
588 kfree(newkey);
589 return -ENOMEM;
590 }
3d7dbeac 591 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4 592 }
aacbe8c8
YH
593 if (tcp_alloc_md5sig_pool() == NULL) {
594 kfree(newkey);
595 return -ENOMEM;
596 }
cfb6eeb4
YH
597 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
598 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
599 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
600
601 if (!keys) {
602 tcp_free_md5sig_pool();
603 kfree(newkey);
604 return -ENOMEM;
605 }
606
607 if (tp->md5sig_info->entries6)
608 memmove(keys, tp->md5sig_info->keys6,
609 (sizeof (tp->md5sig_info->keys6[0]) *
610 tp->md5sig_info->entries6));
611
612 kfree(tp->md5sig_info->keys6);
613 tp->md5sig_info->keys6 = keys;
614 tp->md5sig_info->alloced6++;
615 }
616
617 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
618 peer);
f8ab18d2
DM
619 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
620 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
cfb6eeb4
YH
621
622 tp->md5sig_info->entries6++;
623 }
624 return 0;
625}
626
627static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
628 u8 *newkey, __u8 newkeylen)
629{
630 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
631 newkey, newkeylen);
632}
633
634static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
635{
636 struct tcp_sock *tp = tcp_sk(sk);
637 int i;
638
639 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 640 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
cfb6eeb4 641 /* Free the key */
f8ab18d2 642 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
643 tp->md5sig_info->entries6--;
644
645 if (tp->md5sig_info->entries6 == 0) {
646 kfree(tp->md5sig_info->keys6);
647 tp->md5sig_info->keys6 = NULL;
ca983cef 648 tp->md5sig_info->alloced6 = 0;
cfb6eeb4
YH
649 } else {
650 /* shrink the database */
651 if (tp->md5sig_info->entries6 != i)
652 memmove(&tp->md5sig_info->keys6[i],
653 &tp->md5sig_info->keys6[i+1],
654 (tp->md5sig_info->entries6 - i)
655 * sizeof (tp->md5sig_info->keys6[0]));
656 }
77adefdc
YH
657 tcp_free_md5sig_pool();
658 return 0;
cfb6eeb4
YH
659 }
660 }
661 return -ENOENT;
662}
663
664static void tcp_v6_clear_md5_list (struct sock *sk)
665{
666 struct tcp_sock *tp = tcp_sk(sk);
667 int i;
668
669 if (tp->md5sig_info->entries6) {
670 for (i = 0; i < tp->md5sig_info->entries6; i++)
f8ab18d2 671 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
672 tp->md5sig_info->entries6 = 0;
673 tcp_free_md5sig_pool();
674 }
675
676 kfree(tp->md5sig_info->keys6);
677 tp->md5sig_info->keys6 = NULL;
678 tp->md5sig_info->alloced6 = 0;
679
680 if (tp->md5sig_info->entries4) {
681 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 682 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
683 tp->md5sig_info->entries4 = 0;
684 tcp_free_md5sig_pool();
685 }
686
687 kfree(tp->md5sig_info->keys4);
688 tp->md5sig_info->keys4 = NULL;
689 tp->md5sig_info->alloced4 = 0;
690}
691
692static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
693 int optlen)
694{
695 struct tcp_md5sig cmd;
696 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
697 u8 *newkey;
698
699 if (optlen < sizeof(cmd))
700 return -EINVAL;
701
702 if (copy_from_user(&cmd, optval, sizeof(cmd)))
703 return -EFAULT;
704
705 if (sin6->sin6_family != AF_INET6)
706 return -EINVAL;
707
708 if (!cmd.tcpm_keylen) {
709 if (!tcp_sk(sk)->md5sig_info)
710 return -ENOENT;
e773e4fa 711 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
cfb6eeb4
YH
712 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
713 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
714 }
715
716 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
717 return -EINVAL;
718
719 if (!tcp_sk(sk)->md5sig_info) {
720 struct tcp_sock *tp = tcp_sk(sk);
721 struct tcp_md5sig_info *p;
722
723 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
724 if (!p)
725 return -ENOMEM;
726
727 tp->md5sig_info = p;
3d7dbeac 728 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
729 }
730
af879cc7 731 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
732 if (!newkey)
733 return -ENOMEM;
e773e4fa 734 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
cfb6eeb4
YH
735 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
736 newkey, cmd.tcpm_keylen);
737 }
738 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
739}
740
49a72dfb
AL
741static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
742 struct in6_addr *daddr,
743 struct in6_addr *saddr, int nbytes)
cfb6eeb4 744{
cfb6eeb4 745 struct tcp6_pseudohdr *bp;
49a72dfb 746 struct scatterlist sg;
8d26d76d 747
cfb6eeb4 748 bp = &hp->md5_blk.ip6;
cfb6eeb4
YH
749 /* 1. TCP pseudo-header (RFC2460) */
750 ipv6_addr_copy(&bp->saddr, saddr);
751 ipv6_addr_copy(&bp->daddr, daddr);
49a72dfb 752 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 753 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 754
49a72dfb
AL
755 sg_init_one(&sg, bp, sizeof(*bp));
756 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
757}
c7da57a1 758
49a72dfb
AL
759static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
760 struct in6_addr *daddr, struct in6_addr *saddr,
761 struct tcphdr *th)
762{
763 struct tcp_md5sig_pool *hp;
764 struct hash_desc *desc;
765
766 hp = tcp_get_md5sig_pool();
767 if (!hp)
768 goto clear_hash_noput;
769 desc = &hp->md5_desc;
770
771 if (crypto_hash_init(desc))
772 goto clear_hash;
773 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
774 goto clear_hash;
775 if (tcp_md5_hash_header(hp, th))
776 goto clear_hash;
777 if (tcp_md5_hash_key(hp, key))
778 goto clear_hash;
779 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4 780 goto clear_hash;
cfb6eeb4 781
cfb6eeb4 782 tcp_put_md5sig_pool();
cfb6eeb4 783 return 0;
49a72dfb 784
cfb6eeb4
YH
785clear_hash:
786 tcp_put_md5sig_pool();
787clear_hash_noput:
788 memset(md5_hash, 0, 16);
49a72dfb 789 return 1;
cfb6eeb4
YH
790}
791
49a72dfb
AL
792static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
793 struct sock *sk, struct request_sock *req,
794 struct sk_buff *skb)
cfb6eeb4
YH
795{
796 struct in6_addr *saddr, *daddr;
49a72dfb
AL
797 struct tcp_md5sig_pool *hp;
798 struct hash_desc *desc;
799 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
800
801 if (sk) {
802 saddr = &inet6_sk(sk)->saddr;
803 daddr = &inet6_sk(sk)->daddr;
49a72dfb 804 } else if (req) {
cfb6eeb4
YH
805 saddr = &inet6_rsk(req)->loc_addr;
806 daddr = &inet6_rsk(req)->rmt_addr;
49a72dfb
AL
807 } else {
808 struct ipv6hdr *ip6h = ipv6_hdr(skb);
809 saddr = &ip6h->saddr;
810 daddr = &ip6h->daddr;
cfb6eeb4 811 }
49a72dfb
AL
812
813 hp = tcp_get_md5sig_pool();
814 if (!hp)
815 goto clear_hash_noput;
816 desc = &hp->md5_desc;
817
818 if (crypto_hash_init(desc))
819 goto clear_hash;
820
821 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
822 goto clear_hash;
823 if (tcp_md5_hash_header(hp, th))
824 goto clear_hash;
825 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
826 goto clear_hash;
827 if (tcp_md5_hash_key(hp, key))
828 goto clear_hash;
829 if (crypto_hash_final(desc, md5_hash))
830 goto clear_hash;
831
832 tcp_put_md5sig_pool();
833 return 0;
834
835clear_hash:
836 tcp_put_md5sig_pool();
837clear_hash_noput:
838 memset(md5_hash, 0, 16);
839 return 1;
cfb6eeb4
YH
840}
841
842static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
843{
844 __u8 *hash_location = NULL;
845 struct tcp_md5sig_key *hash_expected;
0660e03f 846 struct ipv6hdr *ip6h = ipv6_hdr(skb);
aa8223c7 847 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 848 int genhash;
cfb6eeb4
YH
849 u8 newhash[16];
850
851 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 852 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 853
785957d3
DM
854 /* We've parsed the options - do we have a hash? */
855 if (!hash_expected && !hash_location)
856 return 0;
857
858 if (hash_expected && !hash_location) {
859 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
860 return 1;
861 }
862
785957d3
DM
863 if (!hash_expected && hash_location) {
864 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
865 return 1;
866 }
867
868 /* check the signature */
49a72dfb
AL
869 genhash = tcp_v6_md5_hash_skb(newhash,
870 hash_expected,
871 NULL, NULL, skb);
872
cfb6eeb4
YH
873 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
874 if (net_ratelimit()) {
875 printk(KERN_INFO "MD5 Hash %s for "
876 "(" NIP6_FMT ", %u)->"
877 "(" NIP6_FMT ", %u)\n",
878 genhash ? "failed" : "mismatch",
879 NIP6(ip6h->saddr), ntohs(th->source),
880 NIP6(ip6h->daddr), ntohs(th->dest));
881 }
882 return 1;
883 }
884 return 0;
885}
886#endif
887
c6aefafb 888struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 889 .family = AF_INET6,
2e6599cb 890 .obj_size = sizeof(struct tcp6_request_sock),
1da177e4 891 .rtx_syn_ack = tcp_v6_send_synack,
60236fdd
ACM
892 .send_ack = tcp_v6_reqsk_send_ack,
893 .destructor = tcp_v6_reqsk_destructor,
1da177e4
LT
894 .send_reset = tcp_v6_send_reset
895};
896
cfb6eeb4 897#ifdef CONFIG_TCP_MD5SIG
b6332e6c 898static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
cfb6eeb4 899 .md5_lookup = tcp_v6_reqsk_md5_lookup,
cfb6eeb4 900};
b6332e6c 901#endif
cfb6eeb4 902
6d6ee43e
ACM
903static struct timewait_sock_ops tcp6_timewait_sock_ops = {
904 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
905 .twsk_unique = tcp_twsk_unique,
cfb6eeb4 906 .twsk_destructor= tcp_twsk_destructor,
6d6ee43e
ACM
907};
908
8292a17a 909static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
1da177e4
LT
910{
911 struct ipv6_pinfo *np = inet6_sk(sk);
aa8223c7 912 struct tcphdr *th = tcp_hdr(skb);
1da177e4 913
84fa7933 914 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4 915 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
663ead3b 916 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 917 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 918 } else {
1ab1457c
YH
919 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
920 csum_partial((char *)th, th->doff<<2,
1da177e4
LT
921 skb->csum));
922 }
923}
924
a430a43d
HX
925static int tcp_v6_gso_send_check(struct sk_buff *skb)
926{
927 struct ipv6hdr *ipv6h;
928 struct tcphdr *th;
929
930 if (!pskb_may_pull(skb, sizeof(*th)))
931 return -EINVAL;
932
0660e03f 933 ipv6h = ipv6_hdr(skb);
aa8223c7 934 th = tcp_hdr(skb);
a430a43d
HX
935
936 th->check = 0;
937 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
938 IPPROTO_TCP, 0);
663ead3b 939 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 940 skb->csum_offset = offsetof(struct tcphdr, check);
84fa7933 941 skb->ip_summed = CHECKSUM_PARTIAL;
a430a43d
HX
942 return 0;
943}
1da177e4 944
cfb6eeb4 945static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 946{
aa8223c7 947 struct tcphdr *th = tcp_hdr(skb), *t1;
1da177e4
LT
948 struct sk_buff *buff;
949 struct flowi fl;
c346dca1 950 struct net *net = dev_net(skb->dst->dev);
e5047992 951 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 952 unsigned int tot_len = sizeof(struct tcphdr);
81ada62d 953 __be32 *topt;
cfb6eeb4
YH
954#ifdef CONFIG_TCP_MD5SIG
955 struct tcp_md5sig_key *key;
956#endif
1da177e4
LT
957
958 if (th->rst)
959 return;
960
961 if (!ipv6_unicast_destination(skb))
1ab1457c 962 return;
1da177e4 963
cfb6eeb4
YH
964#ifdef CONFIG_TCP_MD5SIG
965 if (sk)
0660e03f 966 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
cfb6eeb4
YH
967 else
968 key = NULL;
969
970 if (key)
971 tot_len += TCPOLEN_MD5SIG_ALIGNED;
972#endif
973
1da177e4
LT
974 /*
975 * We need to grab some memory, and put together an RST,
976 * and then put it into the queue to be sent.
977 */
978
cfb6eeb4 979 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 980 GFP_ATOMIC);
1ab1457c
YH
981 if (buff == NULL)
982 return;
1da177e4 983
cfb6eeb4 984 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 985
cfb6eeb4 986 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1da177e4
LT
987
988 /* Swap the send and the receive. */
989 memset(t1, 0, sizeof(*t1));
990 t1->dest = th->source;
991 t1->source = th->dest;
cfb6eeb4 992 t1->doff = tot_len / 4;
1da177e4 993 t1->rst = 1;
1ab1457c 994
1da177e4 995 if(th->ack) {
1ab1457c 996 t1->seq = th->ack_seq;
1da177e4
LT
997 } else {
998 t1->ack = 1;
999 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
1000 + skb->len - (th->doff<<2));
1001 }
1002
81ada62d
IJ
1003 topt = (__be32 *)(t1 + 1);
1004
cfb6eeb4
YH
1005#ifdef CONFIG_TCP_MD5SIG
1006 if (key) {
81ada62d
IJ
1007 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1008 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1009 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
1010 &ipv6_hdr(skb)->saddr,
1011 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
1012 }
1013#endif
1014
52cd5750 1015 buff->csum = csum_partial((char *)t1, tot_len, 0);
1da177e4
LT
1016
1017 memset(&fl, 0, sizeof(fl));
0660e03f
ACM
1018 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1019 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1da177e4
LT
1020
1021 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
52cd5750 1022 tot_len, IPPROTO_TCP,
1da177e4
LT
1023 buff->csum);
1024
1025 fl.proto = IPPROTO_TCP;
505cbfc5 1026 fl.oif = inet6_iif(skb);
1da177e4
LT
1027 fl.fl_ip_dport = t1->dest;
1028 fl.fl_ip_sport = t1->source;
beb8d13b 1029 security_skb_classify_flow(skb, &fl);
1da177e4 1030
c20121ae
DL
1031 /* Pass a socket to ip6_dst_lookup either it is for RST
1032 * Underlying function will use this to retrieve the network
1033 * namespace
1034 */
e5047992 1035 if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
ecc51b6d 1036 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
e5047992 1037 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
63231bdd
PE
1038 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1039 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1da177e4 1040 return;
ecc51b6d 1041 }
1da177e4
LT
1042 }
1043
1044 kfree_skb(buff);
1045}
1046
9501f972
YH
1047static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1048 struct tcp_md5sig_key *key)
1da177e4 1049{
aa8223c7 1050 struct tcphdr *th = tcp_hdr(skb), *t1;
1da177e4
LT
1051 struct sk_buff *buff;
1052 struct flowi fl;
2a5b8275 1053 struct net *net = dev_net(skb->dst->dev);
e5047992 1054 struct sock *ctl_sk = net->ipv6.tcp_sk;
9cb5734e 1055 unsigned int tot_len = sizeof(struct tcphdr);
e69a4adc 1056 __be32 *topt;
1da177e4
LT
1057
1058 if (ts)
4244f8a9 1059 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4
YH
1060#ifdef CONFIG_TCP_MD5SIG
1061 if (key)
1062 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1063#endif
1da177e4
LT
1064
1065 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1066 GFP_ATOMIC);
1067 if (buff == NULL)
1068 return;
1069
1070 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1071
77c676da 1072 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1da177e4
LT
1073
1074 /* Swap the send and the receive. */
1075 memset(t1, 0, sizeof(*t1));
1076 t1->dest = th->source;
1077 t1->source = th->dest;
77c676da 1078 t1->doff = tot_len / 4;
1da177e4
LT
1079 t1->seq = htonl(seq);
1080 t1->ack_seq = htonl(ack);
1081 t1->ack = 1;
1082 t1->window = htons(win);
cfb6eeb4 1083
e69a4adc 1084 topt = (__be32 *)(t1 + 1);
1ab1457c 1085
1da177e4 1086 if (ts) {
cfb6eeb4
YH
1087 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1088 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1089 *topt++ = htonl(tcp_time_stamp);
53b12577 1090 *topt++ = htonl(ts);
1da177e4
LT
1091 }
1092
cfb6eeb4
YH
1093#ifdef CONFIG_TCP_MD5SIG
1094 if (key) {
1095 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1096 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
49a72dfb 1097 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
90b7e112
AL
1098 &ipv6_hdr(skb)->saddr,
1099 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
1100 }
1101#endif
1102
1da177e4
LT
1103 buff->csum = csum_partial((char *)t1, tot_len, 0);
1104
1105 memset(&fl, 0, sizeof(fl));
0660e03f
ACM
1106 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1107 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1da177e4
LT
1108
1109 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1110 tot_len, IPPROTO_TCP,
1111 buff->csum);
1112
1113 fl.proto = IPPROTO_TCP;
505cbfc5 1114 fl.oif = inet6_iif(skb);
1da177e4
LT
1115 fl.fl_ip_dport = t1->dest;
1116 fl.fl_ip_sport = t1->source;
beb8d13b 1117 security_skb_classify_flow(skb, &fl);
1da177e4 1118
e5047992 1119 if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
ecc51b6d 1120 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
e5047992 1121 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
63231bdd 1122 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1da177e4 1123 return;
ecc51b6d 1124 }
1da177e4
LT
1125 }
1126
1127 kfree_skb(buff);
1128}
1129
1130static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1131{
8feaf0c0 1132 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 1133 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 1134
9501f972 1135 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 1136 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972 1137 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1da177e4 1138
8feaf0c0 1139 inet_twsk_put(tw);
1da177e4
LT
1140}
1141
6edafaaf
GJ
1142static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1143 struct request_sock *req)
1da177e4 1144{
9501f972 1145 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
6edafaaf 1146 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
1da177e4
LT
1147}
1148
1149
1150static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1151{
60236fdd 1152 struct request_sock *req, **prev;
aa8223c7 1153 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
1154 struct sock *nsk;
1155
1156 /* Find possible connection requests. */
8129765a 1157 req = inet6_csk_search_req(sk, &prev, th->source,
0660e03f
ACM
1158 &ipv6_hdr(skb)->saddr,
1159 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1da177e4
LT
1160 if (req)
1161 return tcp_check_req(sk, skb, req, prev);
1162
3b1e0a65 1163 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
d86e0dac
PE
1164 &ipv6_hdr(skb)->saddr, th->source,
1165 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1da177e4
LT
1166
1167 if (nsk) {
1168 if (nsk->sk_state != TCP_TIME_WAIT) {
1169 bh_lock_sock(nsk);
1170 return nsk;
1171 }
9469c7b4 1172 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1173 return NULL;
1174 }
1175
c6aefafb 1176#ifdef CONFIG_SYN_COOKIES
1da177e4 1177 if (!th->rst && !th->syn && th->ack)
c6aefafb 1178 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1179#endif
1180 return sk;
1181}
1182
1da177e4
LT
1183/* FIXME: this is substantially similar to the ipv4 code.
1184 * Can some kind of merge be done? -- erics
1185 */
1186static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1187{
ca304b61 1188 struct inet6_request_sock *treq;
1da177e4
LT
1189 struct ipv6_pinfo *np = inet6_sk(sk);
1190 struct tcp_options_received tmp_opt;
1191 struct tcp_sock *tp = tcp_sk(sk);
60236fdd 1192 struct request_sock *req = NULL;
1da177e4 1193 __u32 isn = TCP_SKB_CB(skb)->when;
c6aefafb
GG
1194#ifdef CONFIG_SYN_COOKIES
1195 int want_cookie = 0;
1196#else
1197#define want_cookie 0
1198#endif
1da177e4
LT
1199
1200 if (skb->protocol == htons(ETH_P_IP))
1201 return tcp_v4_conn_request(sk, skb);
1202
1203 if (!ipv6_unicast_destination(skb))
1ab1457c 1204 goto drop;
1da177e4 1205
463c84b9 1206 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4 1207 if (net_ratelimit())
c6aefafb
GG
1208 syn_flood_warning(skb);
1209#ifdef CONFIG_SYN_COOKIES
1210 if (sysctl_tcp_syncookies)
1211 want_cookie = 1;
1212 else
1213#endif
1ab1457c 1214 goto drop;
1da177e4
LT
1215 }
1216
463c84b9 1217 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1218 goto drop;
1219
ca304b61 1220 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1da177e4
LT
1221 if (req == NULL)
1222 goto drop;
1223
cfb6eeb4
YH
1224#ifdef CONFIG_TCP_MD5SIG
1225 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1226#endif
1227
1da177e4
LT
1228 tcp_clear_options(&tmp_opt);
1229 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1230 tmp_opt.user_mss = tp->rx_opt.user_mss;
1231
1232 tcp_parse_options(skb, &tmp_opt, 0);
1233
4dfc2817 1234 if (want_cookie && !tmp_opt.saw_tstamp)
c6aefafb 1235 tcp_clear_options(&tmp_opt);
c6aefafb 1236
1da177e4
LT
1237 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1238 tcp_openreq_init(req, &tmp_opt, skb);
1239
ca304b61 1240 treq = inet6_rsk(req);
0660e03f
ACM
1241 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1242 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
c6aefafb
GG
1243 if (!want_cookie)
1244 TCP_ECN_create_request(req, tcp_hdr(skb));
1245
1246 if (want_cookie) {
1247 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
4dfc2817 1248 req->cookie_ts = tmp_opt.tstamp_ok;
c6aefafb
GG
1249 } else if (!isn) {
1250 if (ipv6_opt_accepted(sk, skb) ||
1251 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1252 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1253 atomic_inc(&skb->users);
1254 treq->pktopts = skb;
1255 }
1256 treq->iif = sk->sk_bound_dev_if;
1da177e4 1257
c6aefafb
GG
1258 /* So that link locals have meaning */
1259 if (!sk->sk_bound_dev_if &&
1260 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1261 treq->iif = inet6_iif(skb);
1da177e4 1262
a94f723d 1263 isn = tcp_v6_init_sequence(skb);
c6aefafb 1264 }
1da177e4 1265
2e6599cb 1266 tcp_rsk(req)->snt_isn = isn;
1da177e4 1267
4237c75c
VY
1268 security_inet_conn_request(sk, skb, req);
1269
fd80eb94 1270 if (tcp_v6_send_synack(sk, req))
1da177e4
LT
1271 goto drop;
1272
c6aefafb
GG
1273 if (!want_cookie) {
1274 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1275 return 0;
1276 }
1da177e4
LT
1277
1278drop:
1279 if (req)
60236fdd 1280 reqsk_free(req);
1da177e4 1281
1da177e4
LT
1282 return 0; /* don't send reset */
1283}
1284
1285static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1286 struct request_sock *req,
1da177e4
LT
1287 struct dst_entry *dst)
1288{
78d15e82 1289 struct inet6_request_sock *treq;
1da177e4
LT
1290 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1291 struct tcp6_sock *newtcp6sk;
1292 struct inet_sock *newinet;
1293 struct tcp_sock *newtp;
1294 struct sock *newsk;
1295 struct ipv6_txoptions *opt;
cfb6eeb4
YH
1296#ifdef CONFIG_TCP_MD5SIG
1297 struct tcp_md5sig_key *key;
1298#endif
1da177e4
LT
1299
1300 if (skb->protocol == htons(ETH_P_IP)) {
1301 /*
1302 * v6 mapped
1303 */
1304
1305 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1306
1ab1457c 1307 if (newsk == NULL)
1da177e4
LT
1308 return NULL;
1309
1310 newtcp6sk = (struct tcp6_sock *)newsk;
1311 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1312
1313 newinet = inet_sk(newsk);
1314 newnp = inet6_sk(newsk);
1315 newtp = tcp_sk(newsk);
1316
1317 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1318
1319 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1320 newinet->daddr);
1321
1322 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1323 newinet->saddr);
1324
1325 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1326
8292a17a 1327 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1328 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1329#ifdef CONFIG_TCP_MD5SIG
1330 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1331#endif
1332
1da177e4
LT
1333 newnp->pktoptions = NULL;
1334 newnp->opt = NULL;
505cbfc5 1335 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1336 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4 1337
e6848976
ACM
1338 /*
1339 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1340 * here, tcp_create_openreq_child now does this for us, see the comment in
1341 * that function for the gory details. -acme
1da177e4 1342 */
1da177e4
LT
1343
1344 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1345 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1346 Sync it now.
1347 */
d83d8461 1348 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1349
1350 return newsk;
1351 }
1352
78d15e82 1353 treq = inet6_rsk(req);
1da177e4
LT
1354 opt = np->opt;
1355
1356 if (sk_acceptq_is_full(sk))
1357 goto out_overflow;
1358
1da177e4
LT
1359 if (dst == NULL) {
1360 struct in6_addr *final_p = NULL, final;
1361 struct flowi fl;
1362
1363 memset(&fl, 0, sizeof(fl));
1364 fl.proto = IPPROTO_TCP;
2e6599cb 1365 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4
LT
1366 if (opt && opt->srcrt) {
1367 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1368 ipv6_addr_copy(&final, &fl.fl6_dst);
1369 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1370 final_p = &final;
1371 }
2e6599cb 1372 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 1373 fl.oif = sk->sk_bound_dev_if;
2e6599cb 1374 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1da177e4 1375 fl.fl_ip_sport = inet_sk(sk)->sport;
4237c75c 1376 security_req_classify_flow(req, &fl);
1da177e4
LT
1377
1378 if (ip6_dst_lookup(sk, &dst, &fl))
1379 goto out;
1380
1381 if (final_p)
1382 ipv6_addr_copy(&fl.fl6_dst, final_p);
1383
1384 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1385 goto out;
1ab1457c 1386 }
1da177e4
LT
1387
1388 newsk = tcp_create_openreq_child(sk, req, skb);
1389 if (newsk == NULL)
1390 goto out;
1391
e6848976
ACM
1392 /*
1393 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1394 * count here, tcp_create_openreq_child now does this for us, see the
1395 * comment in that function for the gory details. -acme
1396 */
1da177e4 1397
59eed279 1398 newsk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 1399 __ip6_dst_store(newsk, dst, NULL, NULL);
1da177e4
LT
1400
1401 newtcp6sk = (struct tcp6_sock *)newsk;
1402 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1403
1404 newtp = tcp_sk(newsk);
1405 newinet = inet_sk(newsk);
1406 newnp = inet6_sk(newsk);
1407
1408 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1409
2e6599cb
ACM
1410 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1411 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1412 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1413 newsk->sk_bound_dev_if = treq->iif;
1da177e4 1414
1ab1457c 1415 /* Now IPv6 options...
1da177e4
LT
1416
1417 First: no IPv4 options.
1418 */
1419 newinet->opt = NULL;
d35690be 1420 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1421
1422 /* Clone RX bits */
1423 newnp->rxopt.all = np->rxopt.all;
1424
1425 /* Clone pktoptions received with SYN */
1426 newnp->pktoptions = NULL;
2e6599cb
ACM
1427 if (treq->pktopts != NULL) {
1428 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1429 kfree_skb(treq->pktopts);
1430 treq->pktopts = NULL;
1da177e4
LT
1431 if (newnp->pktoptions)
1432 skb_set_owner_r(newnp->pktoptions, newsk);
1433 }
1434 newnp->opt = NULL;
505cbfc5 1435 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1436 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4
LT
1437
1438 /* Clone native IPv6 options from listening socket (if any)
1439
1440 Yes, keeping reference count would be much more clever,
1441 but we make one more one thing there: reattach optmem
1442 to newsk.
1443 */
1444 if (opt) {
1445 newnp->opt = ipv6_dup_options(newsk, opt);
1446 if (opt != np->opt)
1447 sock_kfree_s(sk, opt, opt->tot_len);
1448 }
1449
d83d8461 1450 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1451 if (newnp->opt)
d83d8461
ACM
1452 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1453 newnp->opt->opt_flen);
1da177e4 1454
5d424d5a 1455 tcp_mtup_init(newsk);
1da177e4
LT
1456 tcp_sync_mss(newsk, dst_mtu(dst));
1457 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1458 tcp_initialize_rcv_mss(newsk);
1459
1460 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1461
cfb6eeb4
YH
1462#ifdef CONFIG_TCP_MD5SIG
1463 /* Copy over the MD5 key from the original socket */
1464 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1465 /* We're using one, so create a matching key
1466 * on the newsk structure. If we fail to get
1467 * memory, then we end up not copying the key
1468 * across. Shucks.
1469 */
af879cc7
ACM
1470 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1471 if (newkey != NULL)
cfb6eeb4
YH
1472 tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
1473 newkey, key->keylen);
cfb6eeb4
YH
1474 }
1475#endif
1476
ab1e0a13 1477 __inet6_hash(newsk);
e56d8b8a 1478 __inet_inherit_port(sk, newsk);
1da177e4
LT
1479
1480 return newsk;
1481
1482out_overflow:
de0744af 1483 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1da177e4 1484out:
de0744af 1485 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1486 if (opt && opt != np->opt)
1487 sock_kfree_s(sk, opt, opt->tot_len);
1488 dst_release(dst);
1489 return NULL;
1490}
1491
b51655b9 1492static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1da177e4 1493{
84fa7933 1494 if (skb->ip_summed == CHECKSUM_COMPLETE) {
aa8223c7 1495 if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,
0660e03f 1496 &ipv6_hdr(skb)->daddr, skb->csum)) {
fb286bb2 1497 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1498 return 0;
fb286bb2 1499 }
1da177e4 1500 }
fb286bb2 1501
aa8223c7 1502 skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,
0660e03f
ACM
1503 &ipv6_hdr(skb)->saddr,
1504 &ipv6_hdr(skb)->daddr, 0));
fb286bb2 1505
1da177e4 1506 if (skb->len <= 76) {
fb286bb2 1507 return __skb_checksum_complete(skb);
1da177e4
LT
1508 }
1509 return 0;
1510}
1511
1512/* The socket must have it's spinlock held when we get
1513 * here.
1514 *
1515 * We have a potential double-lock case here, so even when
1516 * doing backlog processing we use the BH locking scheme.
1517 * This is because we cannot sleep with the original spinlock
1518 * held.
1519 */
1520static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1521{
1522 struct ipv6_pinfo *np = inet6_sk(sk);
1523 struct tcp_sock *tp;
1524 struct sk_buff *opt_skb = NULL;
1525
1526 /* Imagine: socket is IPv6. IPv4 packet arrives,
1527 goes to IPv4 receive handler and backlogged.
1528 From backlog it always goes here. Kerboom...
1529 Fortunately, tcp_rcv_established and rcv_established
1530 handle them correctly, but it is not case with
1531 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1532 */
1533
1534 if (skb->protocol == htons(ETH_P_IP))
1535 return tcp_v4_do_rcv(sk, skb);
1536
cfb6eeb4
YH
1537#ifdef CONFIG_TCP_MD5SIG
1538 if (tcp_v6_inbound_md5_hash (sk, skb))
1539 goto discard;
1540#endif
1541
fda9ef5d 1542 if (sk_filter(sk, skb))
1da177e4
LT
1543 goto discard;
1544
1545 /*
1546 * socket locking is here for SMP purposes as backlog rcv
1547 * is currently called with bh processing disabled.
1548 */
1549
1550 /* Do Stevens' IPV6_PKTOPTIONS.
1551
1552 Yes, guys, it is the only place in our code, where we
1553 may make it not affecting IPv4.
1554 The rest of code is protocol independent,
1555 and I do not like idea to uglify IPv4.
1556
1557 Actually, all the idea behind IPV6_PKTOPTIONS
1558 looks not very well thought. For now we latch
1559 options, received in the last packet, enqueued
1560 by tcp. Feel free to propose better solution.
1ab1457c 1561 --ANK (980728)
1da177e4
LT
1562 */
1563 if (np->rxopt.all)
1564 opt_skb = skb_clone(skb, GFP_ATOMIC);
1565
1566 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1567 TCP_CHECK_TIMER(sk);
aa8223c7 1568 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1da177e4
LT
1569 goto reset;
1570 TCP_CHECK_TIMER(sk);
1571 if (opt_skb)
1572 goto ipv6_pktoptions;
1573 return 0;
1574 }
1575
ab6a5bb6 1576 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1577 goto csum_err;
1578
1ab1457c 1579 if (sk->sk_state == TCP_LISTEN) {
1da177e4
LT
1580 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1581 if (!nsk)
1582 goto discard;
1583
1584 /*
1585 * Queue it on the new socket if the new socket is active,
1586 * otherwise we just shortcircuit this and continue with
1587 * the new socket..
1588 */
1ab1457c 1589 if(nsk != sk) {
1da177e4
LT
1590 if (tcp_child_process(sk, nsk, skb))
1591 goto reset;
1592 if (opt_skb)
1593 __kfree_skb(opt_skb);
1594 return 0;
1595 }
1596 }
1597
1598 TCP_CHECK_TIMER(sk);
aa8223c7 1599 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1da177e4
LT
1600 goto reset;
1601 TCP_CHECK_TIMER(sk);
1602 if (opt_skb)
1603 goto ipv6_pktoptions;
1604 return 0;
1605
1606reset:
cfb6eeb4 1607 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1608discard:
1609 if (opt_skb)
1610 __kfree_skb(opt_skb);
1611 kfree_skb(skb);
1612 return 0;
1613csum_err:
63231bdd 1614 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1615 goto discard;
1616
1617
1618ipv6_pktoptions:
1619 /* Do you ask, what is it?
1620
1621 1. skb was enqueued by tcp.
1622 2. skb is added to tail of read queue, rather than out of order.
1623 3. socket is not in passive state.
1624 4. Finally, it really contains options, which user wants to receive.
1625 */
1626 tp = tcp_sk(sk);
1627 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1628 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1629 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
505cbfc5 1630 np->mcast_oif = inet6_iif(opt_skb);
333fad53 1631 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1632 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1da177e4
LT
1633 if (ipv6_opt_accepted(sk, opt_skb)) {
1634 skb_set_owner_r(opt_skb, sk);
1635 opt_skb = xchg(&np->pktoptions, opt_skb);
1636 } else {
1637 __kfree_skb(opt_skb);
1638 opt_skb = xchg(&np->pktoptions, NULL);
1639 }
1640 }
1641
1642 if (opt_skb)
1643 kfree_skb(opt_skb);
1644 return 0;
1645}
1646
e5bbef20 1647static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1648{
1ab1457c 1649 struct tcphdr *th;
1da177e4
LT
1650 struct sock *sk;
1651 int ret;
a86b1e30 1652 struct net *net = dev_net(skb->dev);
1da177e4
LT
1653
1654 if (skb->pkt_type != PACKET_HOST)
1655 goto discard_it;
1656
1657 /*
1658 * Count it even if it's bad.
1659 */
63231bdd 1660 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1661
1662 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1663 goto discard_it;
1664
aa8223c7 1665 th = tcp_hdr(skb);
1da177e4
LT
1666
1667 if (th->doff < sizeof(struct tcphdr)/4)
1668 goto bad_packet;
1669 if (!pskb_may_pull(skb, th->doff*4))
1670 goto discard_it;
1671
60476372 1672 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1da177e4
LT
1673 goto bad_packet;
1674
aa8223c7 1675 th = tcp_hdr(skb);
1da177e4
LT
1676 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1677 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1678 skb->len - th->doff*4);
1679 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1680 TCP_SKB_CB(skb)->when = 0;
0660e03f 1681 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1da177e4
LT
1682 TCP_SKB_CB(skb)->sacked = 0;
1683
9a1f27c4 1684 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1685 if (!sk)
1686 goto no_tcp_socket;
1687
1688process:
1689 if (sk->sk_state == TCP_TIME_WAIT)
1690 goto do_time_wait;
1691
1692 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1693 goto discard_and_relse;
1694
fda9ef5d 1695 if (sk_filter(sk, skb))
1da177e4
LT
1696 goto discard_and_relse;
1697
1698 skb->dev = NULL;
1699
293b9c42 1700 bh_lock_sock_nested(sk);
1da177e4
LT
1701 ret = 0;
1702 if (!sock_owned_by_user(sk)) {
1a2449a8 1703#ifdef CONFIG_NET_DMA
1ab1457c 1704 struct tcp_sock *tp = tcp_sk(sk);
b4caea8a
DM
1705 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1706 tp->ucopy.dma_chan = get_softnet_dma();
1ab1457c
YH
1707 if (tp->ucopy.dma_chan)
1708 ret = tcp_v6_do_rcv(sk, skb);
1709 else
1a2449a8
CL
1710#endif
1711 {
1712 if (!tcp_prequeue(sk, skb))
1713 ret = tcp_v6_do_rcv(sk, skb);
1714 }
1da177e4
LT
1715 } else
1716 sk_add_backlog(sk, skb);
1717 bh_unlock_sock(sk);
1718
1719 sock_put(sk);
1720 return ret ? -1 : 0;
1721
1722no_tcp_socket:
1723 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1724 goto discard_it;
1725
1726 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1727bad_packet:
63231bdd 1728 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1729 } else {
cfb6eeb4 1730 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1731 }
1732
1733discard_it:
1734
1735 /*
1736 * Discard frame
1737 */
1738
1739 kfree_skb(skb);
1740 return 0;
1741
1742discard_and_relse:
1743 sock_put(sk);
1744 goto discard_it;
1745
1746do_time_wait:
1747 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1748 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1749 goto discard_it;
1750 }
1751
1752 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
63231bdd 1753 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1754 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1755 goto discard_it;
1756 }
1757
9469c7b4 1758 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1759 case TCP_TW_SYN:
1760 {
1761 struct sock *sk2;
1762
c346dca1 1763 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
0660e03f 1764 &ipv6_hdr(skb)->daddr,
505cbfc5 1765 ntohs(th->dest), inet6_iif(skb));
1da177e4 1766 if (sk2 != NULL) {
295ff7ed
ACM
1767 struct inet_timewait_sock *tw = inet_twsk(sk);
1768 inet_twsk_deschedule(tw, &tcp_death_row);
1769 inet_twsk_put(tw);
1da177e4
LT
1770 sk = sk2;
1771 goto process;
1772 }
1773 /* Fall through to ACK */
1774 }
1775 case TCP_TW_ACK:
1776 tcp_v6_timewait_ack(sk, skb);
1777 break;
1778 case TCP_TW_RST:
1779 goto no_tcp_socket;
1780 case TCP_TW_SUCCESS:;
1781 }
1782 goto discard_it;
1783}
1784
1da177e4
LT
1785static int tcp_v6_remember_stamp(struct sock *sk)
1786{
1787 /* Alas, not yet... */
1788 return 0;
1789}
1790
8292a17a 1791static struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1792 .queue_xmit = inet6_csk_xmit,
1793 .send_check = tcp_v6_send_check,
1794 .rebuild_header = inet6_sk_rebuild_header,
1795 .conn_request = tcp_v6_conn_request,
1796 .syn_recv_sock = tcp_v6_syn_recv_sock,
1797 .remember_stamp = tcp_v6_remember_stamp,
1798 .net_header_len = sizeof(struct ipv6hdr),
1799 .setsockopt = ipv6_setsockopt,
1800 .getsockopt = ipv6_getsockopt,
1801 .addr2sockaddr = inet6_csk_addr2sockaddr,
1802 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1803 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1804#ifdef CONFIG_COMPAT
543d9cfe
ACM
1805 .compat_setsockopt = compat_ipv6_setsockopt,
1806 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1807#endif
1da177e4
LT
1808};
1809
cfb6eeb4 1810#ifdef CONFIG_TCP_MD5SIG
a928630a 1811static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1812 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1813 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4
YH
1814 .md5_add = tcp_v6_md5_add_func,
1815 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1816};
a928630a 1817#endif
cfb6eeb4 1818
1da177e4
LT
1819/*
1820 * TCP over IPv4 via INET6 API
1821 */
1822
8292a17a 1823static struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1824 .queue_xmit = ip_queue_xmit,
1825 .send_check = tcp_v4_send_check,
1826 .rebuild_header = inet_sk_rebuild_header,
1827 .conn_request = tcp_v6_conn_request,
1828 .syn_recv_sock = tcp_v6_syn_recv_sock,
1829 .remember_stamp = tcp_v4_remember_stamp,
1830 .net_header_len = sizeof(struct iphdr),
1831 .setsockopt = ipv6_setsockopt,
1832 .getsockopt = ipv6_getsockopt,
1833 .addr2sockaddr = inet6_csk_addr2sockaddr,
1834 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1835 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1836#ifdef CONFIG_COMPAT
543d9cfe
ACM
1837 .compat_setsockopt = compat_ipv6_setsockopt,
1838 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1839#endif
1da177e4
LT
1840};
1841
cfb6eeb4 1842#ifdef CONFIG_TCP_MD5SIG
a928630a 1843static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1844 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1845 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4
YH
1846 .md5_add = tcp_v6_md5_add_func,
1847 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1848};
a928630a 1849#endif
cfb6eeb4 1850
1da177e4
LT
1851/* NOTE: A lot of things set to zero explicitly by call to
1852 * sk_alloc() so need not be done here.
1853 */
1854static int tcp_v6_init_sock(struct sock *sk)
1855{
6687e988 1856 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1857 struct tcp_sock *tp = tcp_sk(sk);
1858
1859 skb_queue_head_init(&tp->out_of_order_queue);
1860 tcp_init_xmit_timers(sk);
1861 tcp_prequeue_init(tp);
1862
6687e988 1863 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1864 tp->mdev = TCP_TIMEOUT_INIT;
1865
1866 /* So many TCP implementations out there (incorrectly) count the
1867 * initial SYN frame in their delayed-ACK and congestion control
1868 * algorithms that we must have the following bandaid to talk
1869 * efficiently to them. -DaveM
1870 */
1871 tp->snd_cwnd = 2;
1872
1873 /* See draft-stevens-tcpca-spec-01 for discussion of the
1874 * initialization of these values.
1875 */
1876 tp->snd_ssthresh = 0x7fffffff;
1877 tp->snd_cwnd_clamp = ~0;
c1b4a7e6 1878 tp->mss_cache = 536;
1da177e4
LT
1879
1880 tp->reordering = sysctl_tcp_reordering;
1881
1882 sk->sk_state = TCP_CLOSE;
1883
8292a17a 1884 icsk->icsk_af_ops = &ipv6_specific;
6687e988 1885 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
d83d8461 1886 icsk->icsk_sync_mss = tcp_sync_mss;
1da177e4
LT
1887 sk->sk_write_space = sk_stream_write_space;
1888 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1889
cfb6eeb4
YH
1890#ifdef CONFIG_TCP_MD5SIG
1891 tp->af_specific = &tcp_sock_ipv6_specific;
1892#endif
1893
1da177e4
LT
1894 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1895 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1896
1897 atomic_inc(&tcp_sockets_allocated);
1898
1899 return 0;
1900}
1901
7d06b2e0 1902static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1903{
cfb6eeb4
YH
1904#ifdef CONFIG_TCP_MD5SIG
1905 /* Clean up the MD5 key list */
1906 if (tcp_sk(sk)->md5sig_info)
1907 tcp_v6_clear_md5_list(sk);
1908#endif
1da177e4 1909 tcp_v4_destroy_sock(sk);
7d06b2e0 1910 inet6_destroy_sock(sk);
1da177e4
LT
1911}
1912
952a10be 1913#ifdef CONFIG_PROC_FS
1da177e4 1914/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1915static void get_openreq6(struct seq_file *seq,
60236fdd 1916 struct sock *sk, struct request_sock *req, int i, int uid)
1da177e4 1917{
1da177e4 1918 int ttd = req->expires - jiffies;
ca304b61
ACM
1919 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1920 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1da177e4
LT
1921
1922 if (ttd < 0)
1923 ttd = 0;
1924
1da177e4
LT
1925 seq_printf(seq,
1926 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1927 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1928 i,
1929 src->s6_addr32[0], src->s6_addr32[1],
1930 src->s6_addr32[2], src->s6_addr32[3],
1931 ntohs(inet_sk(sk)->sport),
1932 dest->s6_addr32[0], dest->s6_addr32[1],
1933 dest->s6_addr32[2], dest->s6_addr32[3],
2e6599cb 1934 ntohs(inet_rsk(req)->rmt_port),
1da177e4
LT
1935 TCP_SYN_RECV,
1936 0,0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1937 1, /* timers active (only the expire timer) */
1938 jiffies_to_clock_t(ttd),
1da177e4
LT
1939 req->retrans,
1940 uid,
1ab1457c 1941 0, /* non standard timer */
1da177e4
LT
1942 0, /* open_requests have no inode */
1943 0, req);
1944}
1945
1946static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1947{
1948 struct in6_addr *dest, *src;
1949 __u16 destp, srcp;
1950 int timer_active;
1951 unsigned long timer_expires;
1952 struct inet_sock *inet = inet_sk(sp);
1953 struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1954 const struct inet_connection_sock *icsk = inet_csk(sp);
1da177e4
LT
1955 struct ipv6_pinfo *np = inet6_sk(sp);
1956
1957 dest = &np->daddr;
1958 src = &np->rcv_saddr;
1959 destp = ntohs(inet->dport);
1960 srcp = ntohs(inet->sport);
463c84b9
ACM
1961
1962 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 1963 timer_active = 1;
463c84b9
ACM
1964 timer_expires = icsk->icsk_timeout;
1965 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1966 timer_active = 4;
463c84b9 1967 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1968 } else if (timer_pending(&sp->sk_timer)) {
1969 timer_active = 2;
1970 timer_expires = sp->sk_timer.expires;
1971 } else {
1972 timer_active = 0;
1973 timer_expires = jiffies;
1974 }
1975
1976 seq_printf(seq,
1977 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
7be87351 1978 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
1da177e4
LT
1979 i,
1980 src->s6_addr32[0], src->s6_addr32[1],
1981 src->s6_addr32[2], src->s6_addr32[3], srcp,
1982 dest->s6_addr32[0], dest->s6_addr32[1],
1983 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1ab1457c 1984 sp->sk_state,
47da8ee6
SS
1985 tp->write_seq-tp->snd_una,
1986 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
1987 timer_active,
1988 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 1989 icsk->icsk_retransmits,
1da177e4 1990 sock_i_uid(sp),
6687e988 1991 icsk->icsk_probes_out,
1da177e4
LT
1992 sock_i_ino(sp),
1993 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
1994 jiffies_to_clock_t(icsk->icsk_rto),
1995 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 1996 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1da177e4
LT
1997 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
1998 );
1999}
2000
1ab1457c 2001static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 2002 struct inet_timewait_sock *tw, int i)
1da177e4
LT
2003{
2004 struct in6_addr *dest, *src;
2005 __u16 destp, srcp;
0fa1a53e 2006 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1da177e4
LT
2007 int ttd = tw->tw_ttd - jiffies;
2008
2009 if (ttd < 0)
2010 ttd = 0;
2011
0fa1a53e
ACM
2012 dest = &tw6->tw_v6_daddr;
2013 src = &tw6->tw_v6_rcv_saddr;
1da177e4
LT
2014 destp = ntohs(tw->tw_dport);
2015 srcp = ntohs(tw->tw_sport);
2016
2017 seq_printf(seq,
2018 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2019 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2020 i,
2021 src->s6_addr32[0], src->s6_addr32[1],
2022 src->s6_addr32[2], src->s6_addr32[3], srcp,
2023 dest->s6_addr32[0], dest->s6_addr32[1],
2024 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2025 tw->tw_substate, 0, 0,
2026 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2027 atomic_read(&tw->tw_refcnt), tw);
2028}
2029
1da177e4
LT
2030static int tcp6_seq_show(struct seq_file *seq, void *v)
2031{
2032 struct tcp_iter_state *st;
2033
2034 if (v == SEQ_START_TOKEN) {
2035 seq_puts(seq,
2036 " sl "
2037 "local_address "
2038 "remote_address "
2039 "st tx_queue rx_queue tr tm->when retrnsmt"
2040 " uid timeout inode\n");
2041 goto out;
2042 }
2043 st = seq->private;
2044
2045 switch (st->state) {
2046 case TCP_SEQ_STATE_LISTENING:
2047 case TCP_SEQ_STATE_ESTABLISHED:
2048 get_tcp6_sock(seq, v, st->num);
2049 break;
2050 case TCP_SEQ_STATE_OPENREQ:
2051 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2052 break;
2053 case TCP_SEQ_STATE_TIME_WAIT:
2054 get_timewait6_sock(seq, v, st->num);
2055 break;
2056 }
2057out:
2058 return 0;
2059}
2060
1da177e4 2061static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
2062 .name = "tcp6",
2063 .family = AF_INET6,
5f4472c5
DL
2064 .seq_fops = {
2065 .owner = THIS_MODULE,
2066 },
9427c4b3
DL
2067 .seq_ops = {
2068 .show = tcp6_seq_show,
2069 },
1da177e4
LT
2070};
2071
6f8b13bc 2072int tcp6_proc_init(struct net *net)
1da177e4 2073{
6f8b13bc 2074 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
2075}
2076
6f8b13bc 2077void tcp6_proc_exit(struct net *net)
1da177e4 2078{
6f8b13bc 2079 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
2080}
2081#endif
2082
2083struct proto tcpv6_prot = {
2084 .name = "TCPv6",
2085 .owner = THIS_MODULE,
2086 .close = tcp_close,
2087 .connect = tcp_v6_connect,
2088 .disconnect = tcp_disconnect,
463c84b9 2089 .accept = inet_csk_accept,
1da177e4
LT
2090 .ioctl = tcp_ioctl,
2091 .init = tcp_v6_init_sock,
2092 .destroy = tcp_v6_destroy_sock,
2093 .shutdown = tcp_shutdown,
2094 .setsockopt = tcp_setsockopt,
2095 .getsockopt = tcp_getsockopt,
1da177e4
LT
2096 .recvmsg = tcp_recvmsg,
2097 .backlog_rcv = tcp_v6_do_rcv,
2098 .hash = tcp_v6_hash,
ab1e0a13
ACM
2099 .unhash = inet_unhash,
2100 .get_port = inet_csk_get_port,
1da177e4
LT
2101 .enter_memory_pressure = tcp_enter_memory_pressure,
2102 .sockets_allocated = &tcp_sockets_allocated,
2103 .memory_allocated = &tcp_memory_allocated,
2104 .memory_pressure = &tcp_memory_pressure,
0a5578cf 2105 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2106 .sysctl_mem = sysctl_tcp_mem,
2107 .sysctl_wmem = sysctl_tcp_wmem,
2108 .sysctl_rmem = sysctl_tcp_rmem,
2109 .max_header = MAX_TCP_HEADER,
2110 .obj_size = sizeof(struct tcp6_sock),
6d6ee43e 2111 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 2112 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 2113 .h.hashinfo = &tcp_hashinfo,
543d9cfe
ACM
2114#ifdef CONFIG_COMPAT
2115 .compat_setsockopt = compat_tcp_setsockopt,
2116 .compat_getsockopt = compat_tcp_getsockopt,
2117#endif
1da177e4
LT
2118};
2119
2120static struct inet6_protocol tcpv6_protocol = {
2121 .handler = tcp_v6_rcv,
2122 .err_handler = tcp_v6_err,
a430a43d 2123 .gso_send_check = tcp_v6_gso_send_check,
adcfc7d0 2124 .gso_segment = tcp_tso_segment,
1da177e4
LT
2125 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2126};
2127
1da177e4
LT
2128static struct inet_protosw tcpv6_protosw = {
2129 .type = SOCK_STREAM,
2130 .protocol = IPPROTO_TCP,
2131 .prot = &tcpv6_prot,
2132 .ops = &inet6_stream_ops,
2133 .capability = -1,
2134 .no_check = 0,
d83d8461
ACM
2135 .flags = INET_PROTOSW_PERMANENT |
2136 INET_PROTOSW_ICSK,
1da177e4
LT
2137};
2138
93ec926b
DL
2139static int tcpv6_net_init(struct net *net)
2140{
5677242f
DL
2141 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2142 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
2143}
2144
2145static void tcpv6_net_exit(struct net *net)
2146{
5677242f 2147 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
d315492b 2148 inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET6);
93ec926b
DL
2149}
2150
2151static struct pernet_operations tcpv6_net_ops = {
2152 .init = tcpv6_net_init,
2153 .exit = tcpv6_net_exit,
2154};
2155
7f4e4868 2156int __init tcpv6_init(void)
1da177e4 2157{
7f4e4868
DL
2158 int ret;
2159
2160 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2161 if (ret)
2162 goto out;
2163
1da177e4 2164 /* register inet6 protocol */
7f4e4868
DL
2165 ret = inet6_register_protosw(&tcpv6_protosw);
2166 if (ret)
2167 goto out_tcpv6_protocol;
2168
93ec926b 2169 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2170 if (ret)
2171 goto out_tcpv6_protosw;
2172out:
2173 return ret;
ae0f7d5f 2174
7f4e4868
DL
2175out_tcpv6_protocol:
2176 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2177out_tcpv6_protosw:
2178 inet6_unregister_protosw(&tcpv6_protosw);
2179 goto out;
2180}
2181
09f7709f 2182void tcpv6_exit(void)
7f4e4868 2183{
93ec926b 2184 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2185 inet6_unregister_protosw(&tcpv6_protosw);
2186 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2187}