[DCCP]: Use reqsk_free in dccp_v4_conn_request
[linux-block.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on:
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
14 *
15 * Fixes:
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27
28#include <linux/module.h>
29#include <linux/config.h>
30#include <linux/errno.h>
31#include <linux/types.h>
32#include <linux/socket.h>
33#include <linux/sockios.h>
34#include <linux/net.h>
35#include <linux/jiffies.h>
36#include <linux/in.h>
37#include <linux/in6.h>
38#include <linux/netdevice.h>
39#include <linux/init.h>
40#include <linux/jhash.h>
41#include <linux/ipsec.h>
42#include <linux/times.h>
43
44#include <linux/ipv6.h>
45#include <linux/icmpv6.h>
46#include <linux/random.h>
47
48#include <net/tcp.h>
49#include <net/ndisc.h>
5324a040 50#include <net/inet6_hashtables.h>
8129765a 51#include <net/inet6_connection_sock.h>
1da177e4
LT
52#include <net/ipv6.h>
53#include <net/transp_v6.h>
54#include <net/addrconf.h>
55#include <net/ip6_route.h>
56#include <net/ip6_checksum.h>
57#include <net/inet_ecn.h>
58#include <net/protocol.h>
59#include <net/xfrm.h>
60#include <net/addrconf.h>
61#include <net/snmp.h>
62#include <net/dsfield.h>
63
64#include <asm/uaccess.h>
65
66#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
69static void tcp_v6_send_reset(struct sk_buff *skb);
60236fdd 70static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
8292a17a 71static void tcp_v6_send_check(struct sock *sk, int len,
1da177e4
LT
72 struct sk_buff *skb);
73
74static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 75
8292a17a
ACM
76static struct inet_connection_sock_af_ops ipv6_mapped;
77static struct inet_connection_sock_af_ops ipv6_specific;
1da177e4 78
1da177e4
LT
79static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
80{
971af18b
ACM
81 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
82 inet6_csk_bind_conflict);
1da177e4
LT
83}
84
1da177e4
LT
85static void tcp_v6_hash(struct sock *sk)
86{
87 if (sk->sk_state != TCP_CLOSE) {
8292a17a 88 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
1da177e4
LT
89 tcp_prot.hash(sk);
90 return;
91 }
92 local_bh_disable();
90b19d31 93 __inet6_hash(&tcp_hashinfo, sk);
1da177e4
LT
94 local_bh_enable();
95 }
96}
97
1da177e4
LT
98static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,
99 struct in6_addr *saddr,
100 struct in6_addr *daddr,
101 unsigned long base)
102{
103 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
104}
105
106static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
107{
108 if (skb->protocol == htons(ETH_P_IPV6)) {
109 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
110 skb->nh.ipv6h->saddr.s6_addr32,
111 skb->h.th->dest,
112 skb->h.th->source);
113 } else {
114 return secure_tcp_sequence_number(skb->nh.iph->daddr,
115 skb->nh.iph->saddr,
116 skb->h.th->dest,
117 skb->h.th->source);
118 }
119}
120
505cbfc5 121static int __tcp_v6_check_established(struct sock *sk, const __u16 lport,
8feaf0c0 122 struct inet_timewait_sock **twp)
1da177e4
LT
123{
124 struct inet_sock *inet = inet_sk(sk);
505cbfc5
ACM
125 const struct ipv6_pinfo *np = inet6_sk(sk);
126 const struct in6_addr *daddr = &np->rcv_saddr;
127 const struct in6_addr *saddr = &np->daddr;
128 const int dif = sk->sk_bound_dev_if;
8feaf0c0 129 const u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
81c3d547
ED
130 unsigned int hash = inet6_ehashfn(daddr, inet->num, saddr, inet->dport);
131 struct inet_ehash_bucket *head = inet_ehash_bucket(&tcp_hashinfo, hash);
1da177e4 132 struct sock *sk2;
8feaf0c0
ACM
133 const struct hlist_node *node;
134 struct inet_timewait_sock *tw;
1da177e4 135
81c3d547 136 prefetch(head->chain.first);
1da177e4
LT
137 write_lock(&head->lock);
138
139 /* Check TIME-WAIT sockets first. */
6e04e021 140 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
0fa1a53e 141 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk2);
8feaf0c0
ACM
142
143 tw = inet_twsk(sk2);
1da177e4
LT
144
145 if(*((__u32 *)&(tw->tw_dport)) == ports &&
146 sk2->sk_family == PF_INET6 &&
0fa1a53e
ACM
147 ipv6_addr_equal(&tw6->tw_v6_daddr, saddr) &&
148 ipv6_addr_equal(&tw6->tw_v6_rcv_saddr, daddr) &&
1da177e4 149 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) {
8feaf0c0 150 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
1da177e4
LT
151 struct tcp_sock *tp = tcp_sk(sk);
152
8feaf0c0
ACM
153 if (tcptw->tw_ts_recent_stamp &&
154 (!twp ||
155 (sysctl_tcp_tw_reuse &&
156 xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) {
1da177e4 157 /* See comment in tcp_ipv4.c */
8feaf0c0 158 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
1da177e4
LT
159 if (!tp->write_seq)
160 tp->write_seq = 1;
8feaf0c0
ACM
161 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
162 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
1da177e4
LT
163 sock_hold(sk2);
164 goto unique;
165 } else
166 goto not_unique;
167 }
168 }
169 tw = NULL;
170
171 /* And established part... */
172 sk_for_each(sk2, node, &head->chain) {
81c3d547 173 if (INET6_MATCH(sk2, hash, saddr, daddr, ports, dif))
1da177e4
LT
174 goto not_unique;
175 }
176
177unique:
178 BUG_TRAP(sk_unhashed(sk));
179 __sk_add_node(sk, &head->chain);
81c3d547 180 sk->sk_hash = hash;
1da177e4
LT
181 sock_prot_inc_use(sk->sk_prot);
182 write_unlock(&head->lock);
183
184 if (twp) {
185 *twp = tw;
186 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
187 } else if (tw) {
188 /* Silly. Should hash-dance instead... */
295ff7ed 189 inet_twsk_deschedule(tw, &tcp_death_row);
1da177e4
LT
190 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
191
8feaf0c0 192 inet_twsk_put(tw);
1da177e4
LT
193 }
194 return 0;
195
196not_unique:
197 write_unlock(&head->lock);
198 return -EADDRNOTAVAIL;
199}
200
201static inline u32 tcpv6_port_offset(const struct sock *sk)
202{
203 const struct inet_sock *inet = inet_sk(sk);
204 const struct ipv6_pinfo *np = inet6_sk(sk);
205
206 return secure_tcpv6_port_ephemeral(np->rcv_saddr.s6_addr32,
207 np->daddr.s6_addr32,
208 inet->dport);
209}
210
211static int tcp_v6_hash_connect(struct sock *sk)
212{
213 unsigned short snum = inet_sk(sk)->num;
0f7ff927
ACM
214 struct inet_bind_hashbucket *head;
215 struct inet_bind_bucket *tb;
1da177e4
LT
216 int ret;
217
218 if (!snum) {
219 int low = sysctl_local_port_range[0];
220 int high = sysctl_local_port_range[1];
221 int range = high - low;
222 int i;
223 int port;
224 static u32 hint;
225 u32 offset = hint + tcpv6_port_offset(sk);
226 struct hlist_node *node;
8feaf0c0 227 struct inet_timewait_sock *tw = NULL;
1da177e4
LT
228
229 local_bh_disable();
230 for (i = 1; i <= range; i++) {
231 port = low + (i + offset) % range;
6e04e021 232 head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)];
1da177e4
LT
233 spin_lock(&head->lock);
234
235 /* Does not bother with rcv_saddr checks,
236 * because the established check is already
237 * unique enough.
238 */
0f7ff927 239 inet_bind_bucket_for_each(tb, node, &head->chain) {
1da177e4
LT
240 if (tb->port == port) {
241 BUG_TRAP(!hlist_empty(&tb->owners));
242 if (tb->fastreuse >= 0)
243 goto next_port;
244 if (!__tcp_v6_check_established(sk,
245 port,
246 &tw))
247 goto ok;
248 goto next_port;
249 }
250 }
251
6e04e021 252 tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port);
1da177e4
LT
253 if (!tb) {
254 spin_unlock(&head->lock);
255 break;
256 }
257 tb->fastreuse = -1;
258 goto ok;
259
260 next_port:
261 spin_unlock(&head->lock);
262 }
263 local_bh_enable();
264
265 return -EADDRNOTAVAIL;
266
267ok:
268 hint += i;
269
270 /* Head lock still held and bh's disabled */
2d8c4ce5 271 inet_bind_hash(sk, tb, port);
1da177e4
LT
272 if (sk_unhashed(sk)) {
273 inet_sk(sk)->sport = htons(port);
90b19d31 274 __inet6_hash(&tcp_hashinfo, sk);
1da177e4
LT
275 }
276 spin_unlock(&head->lock);
277
278 if (tw) {
295ff7ed 279 inet_twsk_deschedule(tw, &tcp_death_row);
8feaf0c0 280 inet_twsk_put(tw);
1da177e4
LT
281 }
282
283 ret = 0;
284 goto out;
285 }
286
6e04e021 287 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
463c84b9 288 tb = inet_csk(sk)->icsk_bind_hash;
1da177e4
LT
289 spin_lock_bh(&head->lock);
290
291 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
90b19d31 292 __inet6_hash(&tcp_hashinfo, sk);
1da177e4
LT
293 spin_unlock_bh(&head->lock);
294 return 0;
295 } else {
296 spin_unlock(&head->lock);
297 /* No definite answer... Walk to established hash table */
298 ret = __tcp_v6_check_established(sk, snum, NULL);
299out:
300 local_bh_enable();
301 return ret;
302 }
303}
304
1da177e4
LT
305static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
306 int addr_len)
307{
308 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
309 struct inet_sock *inet = inet_sk(sk);
310 struct ipv6_pinfo *np = inet6_sk(sk);
311 struct tcp_sock *tp = tcp_sk(sk);
312 struct in6_addr *saddr = NULL, *final_p = NULL, final;
313 struct flowi fl;
314 struct dst_entry *dst;
315 int addr_type;
316 int err;
317
318 if (addr_len < SIN6_LEN_RFC2133)
319 return -EINVAL;
320
321 if (usin->sin6_family != AF_INET6)
322 return(-EAFNOSUPPORT);
323
324 memset(&fl, 0, sizeof(fl));
325
326 if (np->sndflow) {
327 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
328 IP6_ECN_flow_init(fl.fl6_flowlabel);
329 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
330 struct ip6_flowlabel *flowlabel;
331 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
332 if (flowlabel == NULL)
333 return -EINVAL;
334 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
335 fl6_sock_release(flowlabel);
336 }
337 }
338
339 /*
340 * connect() to INADDR_ANY means loopback (BSD'ism).
341 */
342
343 if(ipv6_addr_any(&usin->sin6_addr))
344 usin->sin6_addr.s6_addr[15] = 0x1;
345
346 addr_type = ipv6_addr_type(&usin->sin6_addr);
347
348 if(addr_type & IPV6_ADDR_MULTICAST)
349 return -ENETUNREACH;
350
351 if (addr_type&IPV6_ADDR_LINKLOCAL) {
352 if (addr_len >= sizeof(struct sockaddr_in6) &&
353 usin->sin6_scope_id) {
354 /* If interface is set while binding, indices
355 * must coincide.
356 */
357 if (sk->sk_bound_dev_if &&
358 sk->sk_bound_dev_if != usin->sin6_scope_id)
359 return -EINVAL;
360
361 sk->sk_bound_dev_if = usin->sin6_scope_id;
362 }
363
364 /* Connect to link-local address requires an interface */
365 if (!sk->sk_bound_dev_if)
366 return -EINVAL;
367 }
368
369 if (tp->rx_opt.ts_recent_stamp &&
370 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
371 tp->rx_opt.ts_recent = 0;
372 tp->rx_opt.ts_recent_stamp = 0;
373 tp->write_seq = 0;
374 }
375
376 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
377 np->flow_label = fl.fl6_flowlabel;
378
379 /*
380 * TCP over IPv4
381 */
382
383 if (addr_type == IPV6_ADDR_MAPPED) {
384 u32 exthdrlen = tp->ext_header_len;
385 struct sockaddr_in sin;
386
387 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
388
389 if (__ipv6_only_sock(sk))
390 return -ENETUNREACH;
391
392 sin.sin_family = AF_INET;
393 sin.sin_port = usin->sin6_port;
394 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
395
8292a17a 396 inet_csk(sk)->icsk_af_ops = &ipv6_mapped;
1da177e4
LT
397 sk->sk_backlog_rcv = tcp_v4_do_rcv;
398
399 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
400
401 if (err) {
402 tp->ext_header_len = exthdrlen;
8292a17a 403 inet_csk(sk)->icsk_af_ops = &ipv6_specific;
1da177e4
LT
404 sk->sk_backlog_rcv = tcp_v6_do_rcv;
405 goto failure;
406 } else {
407 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
408 inet->saddr);
409 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
410 inet->rcv_saddr);
411 }
412
413 return err;
414 }
415
416 if (!ipv6_addr_any(&np->rcv_saddr))
417 saddr = &np->rcv_saddr;
418
419 fl.proto = IPPROTO_TCP;
420 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
421 ipv6_addr_copy(&fl.fl6_src,
422 (saddr ? saddr : &np->saddr));
423 fl.oif = sk->sk_bound_dev_if;
424 fl.fl_ip_dport = usin->sin6_port;
425 fl.fl_ip_sport = inet->sport;
426
427 if (np->opt && np->opt->srcrt) {
428 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
429 ipv6_addr_copy(&final, &fl.fl6_dst);
430 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
431 final_p = &final;
432 }
433
434 err = ip6_dst_lookup(sk, &dst, &fl);
435 if (err)
436 goto failure;
437 if (final_p)
438 ipv6_addr_copy(&fl.fl6_dst, final_p);
439
e104411b 440 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1da177e4 441 goto failure;
1da177e4
LT
442
443 if (saddr == NULL) {
444 saddr = &fl.fl6_src;
445 ipv6_addr_copy(&np->rcv_saddr, saddr);
446 }
447
448 /* set the source address */
449 ipv6_addr_copy(&np->saddr, saddr);
450 inet->rcv_saddr = LOOPBACK4_IPV6;
451
452 ip6_dst_store(sk, dst, NULL);
453 sk->sk_route_caps = dst->dev->features &
454 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
455
456 tp->ext_header_len = 0;
457 if (np->opt)
458 tp->ext_header_len = np->opt->opt_flen + np->opt->opt_nflen;
459
460 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
461
462 inet->dport = usin->sin6_port;
463
464 tcp_set_state(sk, TCP_SYN_SENT);
465 err = tcp_v6_hash_connect(sk);
466 if (err)
467 goto late_failure;
468
469 if (!tp->write_seq)
470 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
471 np->daddr.s6_addr32,
472 inet->sport,
473 inet->dport);
474
475 err = tcp_connect(sk);
476 if (err)
477 goto late_failure;
478
479 return 0;
480
481late_failure:
482 tcp_set_state(sk, TCP_CLOSE);
483 __sk_dst_reset(sk);
484failure:
485 inet->dport = 0;
486 sk->sk_route_caps = 0;
487 return err;
488}
489
490static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
491 int type, int code, int offset, __u32 info)
492{
493 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
505cbfc5 494 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
1da177e4
LT
495 struct ipv6_pinfo *np;
496 struct sock *sk;
497 int err;
498 struct tcp_sock *tp;
499 __u32 seq;
500
505cbfc5
ACM
501 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
502 th->source, skb->dev->ifindex);
1da177e4
LT
503
504 if (sk == NULL) {
505 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
506 return;
507 }
508
509 if (sk->sk_state == TCP_TIME_WAIT) {
8feaf0c0 510 inet_twsk_put((struct inet_timewait_sock *)sk);
1da177e4
LT
511 return;
512 }
513
514 bh_lock_sock(sk);
515 if (sock_owned_by_user(sk))
516 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
517
518 if (sk->sk_state == TCP_CLOSE)
519 goto out;
520
521 tp = tcp_sk(sk);
522 seq = ntohl(th->seq);
523 if (sk->sk_state != TCP_LISTEN &&
524 !between(seq, tp->snd_una, tp->snd_nxt)) {
525 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
526 goto out;
527 }
528
529 np = inet6_sk(sk);
530
531 if (type == ICMPV6_PKT_TOOBIG) {
532 struct dst_entry *dst = NULL;
533
534 if (sock_owned_by_user(sk))
535 goto out;
536 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
537 goto out;
538
539 /* icmp should have updated the destination cache entry */
540 dst = __sk_dst_check(sk, np->dst_cookie);
541
542 if (dst == NULL) {
543 struct inet_sock *inet = inet_sk(sk);
544 struct flowi fl;
545
546 /* BUGGG_FUTURE: Again, it is not clear how
547 to handle rthdr case. Ignore this complexity
548 for now.
549 */
550 memset(&fl, 0, sizeof(fl));
551 fl.proto = IPPROTO_TCP;
552 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
553 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
554 fl.oif = sk->sk_bound_dev_if;
555 fl.fl_ip_dport = inet->dport;
556 fl.fl_ip_sport = inet->sport;
557
558 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
559 sk->sk_err_soft = -err;
560 goto out;
561 }
562
563 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
564 sk->sk_err_soft = -err;
565 goto out;
566 }
567
568 } else
569 dst_hold(dst);
570
571 if (tp->pmtu_cookie > dst_mtu(dst)) {
572 tcp_sync_mss(sk, dst_mtu(dst));
573 tcp_simple_retransmit(sk);
574 } /* else let the usual retransmit timer handle it */
575 dst_release(dst);
576 goto out;
577 }
578
579 icmpv6_err_convert(type, code, &err);
580
60236fdd 581 /* Might be for an request_sock */
1da177e4 582 switch (sk->sk_state) {
60236fdd 583 struct request_sock *req, **prev;
1da177e4
LT
584 case TCP_LISTEN:
585 if (sock_owned_by_user(sk))
586 goto out;
587
8129765a
ACM
588 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
589 &hdr->saddr, inet6_iif(skb));
1da177e4
LT
590 if (!req)
591 goto out;
592
593 /* ICMPs are not backlogged, hence we cannot get
594 * an established socket here.
595 */
596 BUG_TRAP(req->sk == NULL);
597
2e6599cb 598 if (seq != tcp_rsk(req)->snt_isn) {
1da177e4
LT
599 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
600 goto out;
601 }
602
463c84b9 603 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
604 goto out;
605
606 case TCP_SYN_SENT:
607 case TCP_SYN_RECV: /* Cannot happen.
608 It can, it SYNs are crossed. --ANK */
609 if (!sock_owned_by_user(sk)) {
610 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
611 sk->sk_err = err;
612 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
613
614 tcp_done(sk);
615 } else
616 sk->sk_err_soft = err;
617 goto out;
618 }
619
620 if (!sock_owned_by_user(sk) && np->recverr) {
621 sk->sk_err = err;
622 sk->sk_error_report(sk);
623 } else
624 sk->sk_err_soft = err;
625
626out:
627 bh_unlock_sock(sk);
628 sock_put(sk);
629}
630
631
60236fdd 632static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
1da177e4
LT
633 struct dst_entry *dst)
634{
ca304b61 635 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
636 struct ipv6_pinfo *np = inet6_sk(sk);
637 struct sk_buff * skb;
638 struct ipv6_txoptions *opt = NULL;
639 struct in6_addr * final_p = NULL, final;
640 struct flowi fl;
641 int err = -1;
642
643 memset(&fl, 0, sizeof(fl));
644 fl.proto = IPPROTO_TCP;
2e6599cb
ACM
645 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
646 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 647 fl.fl6_flowlabel = 0;
2e6599cb
ACM
648 fl.oif = treq->iif;
649 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1da177e4
LT
650 fl.fl_ip_sport = inet_sk(sk)->sport;
651
652 if (dst == NULL) {
653 opt = np->opt;
654 if (opt == NULL &&
333fad53 655 np->rxopt.bits.osrcrt == 2 &&
2e6599cb
ACM
656 treq->pktopts) {
657 struct sk_buff *pktopts = treq->pktopts;
1da177e4
LT
658 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
659 if (rxopt->srcrt)
660 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
661 }
662
663 if (opt && opt->srcrt) {
664 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
665 ipv6_addr_copy(&final, &fl.fl6_dst);
666 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
667 final_p = &final;
668 }
669
670 err = ip6_dst_lookup(sk, &dst, &fl);
671 if (err)
672 goto done;
673 if (final_p)
674 ipv6_addr_copy(&fl.fl6_dst, final_p);
675 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
676 goto done;
677 }
678
679 skb = tcp_make_synack(sk, dst, req);
680 if (skb) {
681 struct tcphdr *th = skb->h.th;
682
683 th->check = tcp_v6_check(th, skb->len,
2e6599cb 684 &treq->loc_addr, &treq->rmt_addr,
1da177e4
LT
685 csum_partial((char *)th, skb->len, skb->csum));
686
2e6599cb 687 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4
LT
688 err = ip6_xmit(sk, skb, &fl, opt, 0);
689 if (err == NET_XMIT_CN)
690 err = 0;
691 }
692
693done:
1da177e4
LT
694 if (opt && opt != np->opt)
695 sock_kfree_s(sk, opt, opt->tot_len);
696 return err;
697}
698
60236fdd 699static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 700{
ca304b61
ACM
701 if (inet6_rsk(req)->pktopts)
702 kfree_skb(inet6_rsk(req)->pktopts);
1da177e4
LT
703}
704
60236fdd 705static struct request_sock_ops tcp6_request_sock_ops = {
1da177e4 706 .family = AF_INET6,
2e6599cb 707 .obj_size = sizeof(struct tcp6_request_sock),
1da177e4 708 .rtx_syn_ack = tcp_v6_send_synack,
60236fdd
ACM
709 .send_ack = tcp_v6_reqsk_send_ack,
710 .destructor = tcp_v6_reqsk_destructor,
1da177e4
LT
711 .send_reset = tcp_v6_send_reset
712};
713
8292a17a 714static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
1da177e4
LT
715{
716 struct ipv6_pinfo *np = inet6_sk(sk);
8292a17a 717 struct tcphdr *th = skb->h.th;
1da177e4
LT
718
719 if (skb->ip_summed == CHECKSUM_HW) {
720 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
721 skb->csum = offsetof(struct tcphdr, check);
722 } else {
723 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
724 csum_partial((char *)th, th->doff<<2,
725 skb->csum));
726 }
727}
728
729
730static void tcp_v6_send_reset(struct sk_buff *skb)
731{
732 struct tcphdr *th = skb->h.th, *t1;
733 struct sk_buff *buff;
734 struct flowi fl;
735
736 if (th->rst)
737 return;
738
739 if (!ipv6_unicast_destination(skb))
740 return;
741
742 /*
743 * We need to grab some memory, and put together an RST,
744 * and then put it into the queue to be sent.
745 */
746
747 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr),
748 GFP_ATOMIC);
749 if (buff == NULL)
750 return;
751
752 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
753
754 t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr));
755
756 /* Swap the send and the receive. */
757 memset(t1, 0, sizeof(*t1));
758 t1->dest = th->source;
759 t1->source = th->dest;
760 t1->doff = sizeof(*t1)/4;
761 t1->rst = 1;
762
763 if(th->ack) {
764 t1->seq = th->ack_seq;
765 } else {
766 t1->ack = 1;
767 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
768 + skb->len - (th->doff<<2));
769 }
770
771 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
772
773 memset(&fl, 0, sizeof(fl));
774 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
775 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
776
777 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
778 sizeof(*t1), IPPROTO_TCP,
779 buff->csum);
780
781 fl.proto = IPPROTO_TCP;
505cbfc5 782 fl.oif = inet6_iif(skb);
1da177e4
LT
783 fl.fl_ip_dport = t1->dest;
784 fl.fl_ip_sport = t1->source;
785
786 /* sk = NULL, but it is safe for now. RST socket required. */
787 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
788
ecc51b6d
ACM
789 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
790 ip6_xmit(NULL, buff, &fl, NULL, 0);
791 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
792 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1da177e4 793 return;
ecc51b6d 794 }
1da177e4
LT
795 }
796
797 kfree_skb(buff);
798}
799
800static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
801{
802 struct tcphdr *th = skb->h.th, *t1;
803 struct sk_buff *buff;
804 struct flowi fl;
805 int tot_len = sizeof(struct tcphdr);
806
807 if (ts)
808 tot_len += 3*4;
809
810 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
811 GFP_ATOMIC);
812 if (buff == NULL)
813 return;
814
815 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
816
817 t1 = (struct tcphdr *) skb_push(buff,tot_len);
818
819 /* Swap the send and the receive. */
820 memset(t1, 0, sizeof(*t1));
821 t1->dest = th->source;
822 t1->source = th->dest;
823 t1->doff = tot_len/4;
824 t1->seq = htonl(seq);
825 t1->ack_seq = htonl(ack);
826 t1->ack = 1;
827 t1->window = htons(win);
828
829 if (ts) {
830 u32 *ptr = (u32*)(t1 + 1);
831 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
832 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
833 *ptr++ = htonl(tcp_time_stamp);
834 *ptr = htonl(ts);
835 }
836
837 buff->csum = csum_partial((char *)t1, tot_len, 0);
838
839 memset(&fl, 0, sizeof(fl));
840 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
841 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
842
843 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
844 tot_len, IPPROTO_TCP,
845 buff->csum);
846
847 fl.proto = IPPROTO_TCP;
505cbfc5 848 fl.oif = inet6_iif(skb);
1da177e4
LT
849 fl.fl_ip_dport = t1->dest;
850 fl.fl_ip_sport = t1->source;
851
852 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
ecc51b6d
ACM
853 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
854 ip6_xmit(NULL, buff, &fl, NULL, 0);
855 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1da177e4 856 return;
ecc51b6d 857 }
1da177e4
LT
858 }
859
860 kfree_skb(buff);
861}
862
863static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
864{
8feaf0c0
ACM
865 struct inet_timewait_sock *tw = inet_twsk(sk);
866 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 867
8feaf0c0
ACM
868 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
869 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
870 tcptw->tw_ts_recent);
1da177e4 871
8feaf0c0 872 inet_twsk_put(tw);
1da177e4
LT
873}
874
60236fdd 875static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1da177e4 876{
2e6599cb 877 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
1da177e4
LT
878}
879
880
881static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
882{
60236fdd 883 struct request_sock *req, **prev;
505cbfc5 884 const struct tcphdr *th = skb->h.th;
1da177e4
LT
885 struct sock *nsk;
886
887 /* Find possible connection requests. */
8129765a
ACM
888 req = inet6_csk_search_req(sk, &prev, th->source,
889 &skb->nh.ipv6h->saddr,
890 &skb->nh.ipv6h->daddr, inet6_iif(skb));
1da177e4
LT
891 if (req)
892 return tcp_check_req(sk, skb, req, prev);
893
505cbfc5
ACM
894 nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr,
895 th->source, &skb->nh.ipv6h->daddr,
896 ntohs(th->dest), inet6_iif(skb));
1da177e4
LT
897
898 if (nsk) {
899 if (nsk->sk_state != TCP_TIME_WAIT) {
900 bh_lock_sock(nsk);
901 return nsk;
902 }
8feaf0c0 903 inet_twsk_put((struct inet_timewait_sock *)nsk);
1da177e4
LT
904 return NULL;
905 }
906
907#if 0 /*def CONFIG_SYN_COOKIES*/
908 if (!th->rst && !th->syn && th->ack)
909 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
910#endif
911 return sk;
912}
913
1da177e4
LT
914/* FIXME: this is substantially similar to the ipv4 code.
915 * Can some kind of merge be done? -- erics
916 */
917static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
918{
ca304b61 919 struct inet6_request_sock *treq;
1da177e4
LT
920 struct ipv6_pinfo *np = inet6_sk(sk);
921 struct tcp_options_received tmp_opt;
922 struct tcp_sock *tp = tcp_sk(sk);
60236fdd 923 struct request_sock *req = NULL;
1da177e4
LT
924 __u32 isn = TCP_SKB_CB(skb)->when;
925
926 if (skb->protocol == htons(ETH_P_IP))
927 return tcp_v4_conn_request(sk, skb);
928
929 if (!ipv6_unicast_destination(skb))
930 goto drop;
931
932 /*
933 * There are no SYN attacks on IPv6, yet...
934 */
463c84b9 935 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4
LT
936 if (net_ratelimit())
937 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
938 goto drop;
939 }
940
463c84b9 941 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
942 goto drop;
943
ca304b61 944 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1da177e4
LT
945 if (req == NULL)
946 goto drop;
947
948 tcp_clear_options(&tmp_opt);
949 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
950 tmp_opt.user_mss = tp->rx_opt.user_mss;
951
952 tcp_parse_options(skb, &tmp_opt, 0);
953
954 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
955 tcp_openreq_init(req, &tmp_opt, skb);
956
ca304b61 957 treq = inet6_rsk(req);
2e6599cb
ACM
958 ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr);
959 ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr);
1da177e4 960 TCP_ECN_create_request(req, skb->h.th);
2e6599cb 961 treq->pktopts = NULL;
1da177e4 962 if (ipv6_opt_accepted(sk, skb) ||
333fad53
YH
963 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
964 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1da177e4 965 atomic_inc(&skb->users);
2e6599cb 966 treq->pktopts = skb;
1da177e4 967 }
2e6599cb 968 treq->iif = sk->sk_bound_dev_if;
1da177e4
LT
969
970 /* So that link locals have meaning */
971 if (!sk->sk_bound_dev_if &&
2e6599cb 972 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
505cbfc5 973 treq->iif = inet6_iif(skb);
1da177e4
LT
974
975 if (isn == 0)
976 isn = tcp_v6_init_sequence(sk,skb);
977
2e6599cb 978 tcp_rsk(req)->snt_isn = isn;
1da177e4
LT
979
980 if (tcp_v6_send_synack(sk, req, NULL))
981 goto drop;
982
8129765a 983 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1da177e4
LT
984 return 0;
985
986drop:
987 if (req)
60236fdd 988 reqsk_free(req);
1da177e4
LT
989
990 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
991 return 0; /* don't send reset */
992}
993
994static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 995 struct request_sock *req,
1da177e4
LT
996 struct dst_entry *dst)
997{
ca304b61 998 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
999 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1000 struct tcp6_sock *newtcp6sk;
1001 struct inet_sock *newinet;
1002 struct tcp_sock *newtp;
1003 struct sock *newsk;
1004 struct ipv6_txoptions *opt;
1005
1006 if (skb->protocol == htons(ETH_P_IP)) {
1007 /*
1008 * v6 mapped
1009 */
1010
1011 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1012
1013 if (newsk == NULL)
1014 return NULL;
1015
1016 newtcp6sk = (struct tcp6_sock *)newsk;
1017 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1018
1019 newinet = inet_sk(newsk);
1020 newnp = inet6_sk(newsk);
1021 newtp = tcp_sk(newsk);
1022
1023 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1024
1025 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1026 newinet->daddr);
1027
1028 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1029 newinet->saddr);
1030
1031 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1032
8292a17a 1033 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4
LT
1034 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1035 newnp->pktoptions = NULL;
1036 newnp->opt = NULL;
505cbfc5 1037 newnp->mcast_oif = inet6_iif(skb);
1da177e4
LT
1038 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1039
e6848976
ACM
1040 /*
1041 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1042 * here, tcp_create_openreq_child now does this for us, see the comment in
1043 * that function for the gory details. -acme
1da177e4 1044 */
1da177e4
LT
1045
1046 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1047 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1048 Sync it now.
1049 */
1050 tcp_sync_mss(newsk, newtp->pmtu_cookie);
1051
1052 return newsk;
1053 }
1054
1055 opt = np->opt;
1056
1057 if (sk_acceptq_is_full(sk))
1058 goto out_overflow;
1059
333fad53 1060 if (np->rxopt.bits.osrcrt == 2 &&
2e6599cb
ACM
1061 opt == NULL && treq->pktopts) {
1062 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
1da177e4 1063 if (rxopt->srcrt)
2e6599cb 1064 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt));
1da177e4
LT
1065 }
1066
1067 if (dst == NULL) {
1068 struct in6_addr *final_p = NULL, final;
1069 struct flowi fl;
1070
1071 memset(&fl, 0, sizeof(fl));
1072 fl.proto = IPPROTO_TCP;
2e6599cb 1073 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4
LT
1074 if (opt && opt->srcrt) {
1075 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1076 ipv6_addr_copy(&final, &fl.fl6_dst);
1077 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1078 final_p = &final;
1079 }
2e6599cb 1080 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 1081 fl.oif = sk->sk_bound_dev_if;
2e6599cb 1082 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1da177e4
LT
1083 fl.fl_ip_sport = inet_sk(sk)->sport;
1084
1085 if (ip6_dst_lookup(sk, &dst, &fl))
1086 goto out;
1087
1088 if (final_p)
1089 ipv6_addr_copy(&fl.fl6_dst, final_p);
1090
1091 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1092 goto out;
1093 }
1094
1095 newsk = tcp_create_openreq_child(sk, req, skb);
1096 if (newsk == NULL)
1097 goto out;
1098
e6848976
ACM
1099 /*
1100 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1101 * count here, tcp_create_openreq_child now does this for us, see the
1102 * comment in that function for the gory details. -acme
1103 */
1da177e4
LT
1104
1105 ip6_dst_store(newsk, dst, NULL);
1106 newsk->sk_route_caps = dst->dev->features &
1107 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1108
1109 newtcp6sk = (struct tcp6_sock *)newsk;
1110 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1111
1112 newtp = tcp_sk(newsk);
1113 newinet = inet_sk(newsk);
1114 newnp = inet6_sk(newsk);
1115
1116 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1117
2e6599cb
ACM
1118 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1119 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1120 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1121 newsk->sk_bound_dev_if = treq->iif;
1da177e4
LT
1122
1123 /* Now IPv6 options...
1124
1125 First: no IPv4 options.
1126 */
1127 newinet->opt = NULL;
1128
1129 /* Clone RX bits */
1130 newnp->rxopt.all = np->rxopt.all;
1131
1132 /* Clone pktoptions received with SYN */
1133 newnp->pktoptions = NULL;
2e6599cb
ACM
1134 if (treq->pktopts != NULL) {
1135 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1136 kfree_skb(treq->pktopts);
1137 treq->pktopts = NULL;
1da177e4
LT
1138 if (newnp->pktoptions)
1139 skb_set_owner_r(newnp->pktoptions, newsk);
1140 }
1141 newnp->opt = NULL;
505cbfc5 1142 newnp->mcast_oif = inet6_iif(skb);
1da177e4
LT
1143 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1144
1145 /* Clone native IPv6 options from listening socket (if any)
1146
1147 Yes, keeping reference count would be much more clever,
1148 but we make one more one thing there: reattach optmem
1149 to newsk.
1150 */
1151 if (opt) {
1152 newnp->opt = ipv6_dup_options(newsk, opt);
1153 if (opt != np->opt)
1154 sock_kfree_s(sk, opt, opt->tot_len);
1155 }
1156
1157 newtp->ext_header_len = 0;
1158 if (newnp->opt)
1159 newtp->ext_header_len = newnp->opt->opt_nflen +
1160 newnp->opt->opt_flen;
1161
1162 tcp_sync_mss(newsk, dst_mtu(dst));
1163 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1164 tcp_initialize_rcv_mss(newsk);
1165
1166 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1167
90b19d31 1168 __inet6_hash(&tcp_hashinfo, newsk);
2d8c4ce5 1169 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1da177e4
LT
1170
1171 return newsk;
1172
1173out_overflow:
1174 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1175out:
1176 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1177 if (opt && opt != np->opt)
1178 sock_kfree_s(sk, opt, opt->tot_len);
1179 dst_release(dst);
1180 return NULL;
1181}
1182
1183static int tcp_v6_checksum_init(struct sk_buff *skb)
1184{
1185 if (skb->ip_summed == CHECKSUM_HW) {
1da177e4 1186 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
fb286bb2
HX
1187 &skb->nh.ipv6h->daddr,skb->csum)) {
1188 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1189 return 0;
fb286bb2 1190 }
1da177e4 1191 }
fb286bb2
HX
1192
1193 skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1194 &skb->nh.ipv6h->daddr, 0);
1195
1da177e4 1196 if (skb->len <= 76) {
fb286bb2 1197 return __skb_checksum_complete(skb);
1da177e4
LT
1198 }
1199 return 0;
1200}
1201
1202/* The socket must have it's spinlock held when we get
1203 * here.
1204 *
1205 * We have a potential double-lock case here, so even when
1206 * doing backlog processing we use the BH locking scheme.
1207 * This is because we cannot sleep with the original spinlock
1208 * held.
1209 */
1210static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1211{
1212 struct ipv6_pinfo *np = inet6_sk(sk);
1213 struct tcp_sock *tp;
1214 struct sk_buff *opt_skb = NULL;
1215
1216 /* Imagine: socket is IPv6. IPv4 packet arrives,
1217 goes to IPv4 receive handler and backlogged.
1218 From backlog it always goes here. Kerboom...
1219 Fortunately, tcp_rcv_established and rcv_established
1220 handle them correctly, but it is not case with
1221 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1222 */
1223
1224 if (skb->protocol == htons(ETH_P_IP))
1225 return tcp_v4_do_rcv(sk, skb);
1226
1227 if (sk_filter(sk, skb, 0))
1228 goto discard;
1229
1230 /*
1231 * socket locking is here for SMP purposes as backlog rcv
1232 * is currently called with bh processing disabled.
1233 */
1234
1235 /* Do Stevens' IPV6_PKTOPTIONS.
1236
1237 Yes, guys, it is the only place in our code, where we
1238 may make it not affecting IPv4.
1239 The rest of code is protocol independent,
1240 and I do not like idea to uglify IPv4.
1241
1242 Actually, all the idea behind IPV6_PKTOPTIONS
1243 looks not very well thought. For now we latch
1244 options, received in the last packet, enqueued
1245 by tcp. Feel free to propose better solution.
1246 --ANK (980728)
1247 */
1248 if (np->rxopt.all)
1249 opt_skb = skb_clone(skb, GFP_ATOMIC);
1250
1251 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1252 TCP_CHECK_TIMER(sk);
1253 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1254 goto reset;
1255 TCP_CHECK_TIMER(sk);
1256 if (opt_skb)
1257 goto ipv6_pktoptions;
1258 return 0;
1259 }
1260
1261 if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
1262 goto csum_err;
1263
1264 if (sk->sk_state == TCP_LISTEN) {
1265 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1266 if (!nsk)
1267 goto discard;
1268
1269 /*
1270 * Queue it on the new socket if the new socket is active,
1271 * otherwise we just shortcircuit this and continue with
1272 * the new socket..
1273 */
1274 if(nsk != sk) {
1275 if (tcp_child_process(sk, nsk, skb))
1276 goto reset;
1277 if (opt_skb)
1278 __kfree_skb(opt_skb);
1279 return 0;
1280 }
1281 }
1282
1283 TCP_CHECK_TIMER(sk);
1284 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1285 goto reset;
1286 TCP_CHECK_TIMER(sk);
1287 if (opt_skb)
1288 goto ipv6_pktoptions;
1289 return 0;
1290
1291reset:
1292 tcp_v6_send_reset(skb);
1293discard:
1294 if (opt_skb)
1295 __kfree_skb(opt_skb);
1296 kfree_skb(skb);
1297 return 0;
1298csum_err:
1299 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1300 goto discard;
1301
1302
1303ipv6_pktoptions:
1304 /* Do you ask, what is it?
1305
1306 1. skb was enqueued by tcp.
1307 2. skb is added to tail of read queue, rather than out of order.
1308 3. socket is not in passive state.
1309 4. Finally, it really contains options, which user wants to receive.
1310 */
1311 tp = tcp_sk(sk);
1312 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1313 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1314 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
505cbfc5 1315 np->mcast_oif = inet6_iif(opt_skb);
333fad53 1316 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1da177e4
LT
1317 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1318 if (ipv6_opt_accepted(sk, opt_skb)) {
1319 skb_set_owner_r(opt_skb, sk);
1320 opt_skb = xchg(&np->pktoptions, opt_skb);
1321 } else {
1322 __kfree_skb(opt_skb);
1323 opt_skb = xchg(&np->pktoptions, NULL);
1324 }
1325 }
1326
1327 if (opt_skb)
1328 kfree_skb(opt_skb);
1329 return 0;
1330}
1331
1332static int tcp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
1333{
1334 struct sk_buff *skb = *pskb;
1335 struct tcphdr *th;
1336 struct sock *sk;
1337 int ret;
1338
1339 if (skb->pkt_type != PACKET_HOST)
1340 goto discard_it;
1341
1342 /*
1343 * Count it even if it's bad.
1344 */
1345 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1346
1347 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1348 goto discard_it;
1349
1350 th = skb->h.th;
1351
1352 if (th->doff < sizeof(struct tcphdr)/4)
1353 goto bad_packet;
1354 if (!pskb_may_pull(skb, th->doff*4))
1355 goto discard_it;
1356
1357 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
fb286bb2 1358 tcp_v6_checksum_init(skb)))
1da177e4
LT
1359 goto bad_packet;
1360
1361 th = skb->h.th;
1362 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1363 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1364 skb->len - th->doff*4);
1365 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1366 TCP_SKB_CB(skb)->when = 0;
1367 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1368 TCP_SKB_CB(skb)->sacked = 0;
1369
505cbfc5
ACM
1370 sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source,
1371 &skb->nh.ipv6h->daddr, ntohs(th->dest),
1372 inet6_iif(skb));
1da177e4
LT
1373
1374 if (!sk)
1375 goto no_tcp_socket;
1376
1377process:
1378 if (sk->sk_state == TCP_TIME_WAIT)
1379 goto do_time_wait;
1380
1381 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1382 goto discard_and_relse;
1383
1384 if (sk_filter(sk, skb, 0))
1385 goto discard_and_relse;
1386
1387 skb->dev = NULL;
1388
1389 bh_lock_sock(sk);
1390 ret = 0;
1391 if (!sock_owned_by_user(sk)) {
1392 if (!tcp_prequeue(sk, skb))
1393 ret = tcp_v6_do_rcv(sk, skb);
1394 } else
1395 sk_add_backlog(sk, skb);
1396 bh_unlock_sock(sk);
1397
1398 sock_put(sk);
1399 return ret ? -1 : 0;
1400
1401no_tcp_socket:
1402 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1403 goto discard_it;
1404
1405 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1406bad_packet:
1407 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1408 } else {
1409 tcp_v6_send_reset(skb);
1410 }
1411
1412discard_it:
1413
1414 /*
1415 * Discard frame
1416 */
1417
1418 kfree_skb(skb);
1419 return 0;
1420
1421discard_and_relse:
1422 sock_put(sk);
1423 goto discard_it;
1424
1425do_time_wait:
1426 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
8feaf0c0 1427 inet_twsk_put((struct inet_timewait_sock *)sk);
1da177e4
LT
1428 goto discard_it;
1429 }
1430
1431 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1432 TCP_INC_STATS_BH(TCP_MIB_INERRS);
8feaf0c0 1433 inet_twsk_put((struct inet_timewait_sock *)sk);
1da177e4
LT
1434 goto discard_it;
1435 }
1436
8feaf0c0
ACM
1437 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1438 skb, th)) {
1da177e4
LT
1439 case TCP_TW_SYN:
1440 {
1441 struct sock *sk2;
1442
505cbfc5
ACM
1443 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1444 &skb->nh.ipv6h->daddr,
1445 ntohs(th->dest), inet6_iif(skb));
1da177e4 1446 if (sk2 != NULL) {
295ff7ed
ACM
1447 struct inet_timewait_sock *tw = inet_twsk(sk);
1448 inet_twsk_deschedule(tw, &tcp_death_row);
1449 inet_twsk_put(tw);
1da177e4
LT
1450 sk = sk2;
1451 goto process;
1452 }
1453 /* Fall through to ACK */
1454 }
1455 case TCP_TW_ACK:
1456 tcp_v6_timewait_ack(sk, skb);
1457 break;
1458 case TCP_TW_RST:
1459 goto no_tcp_socket;
1460 case TCP_TW_SUCCESS:;
1461 }
1462 goto discard_it;
1463}
1464
1da177e4
LT
1465static int tcp_v6_remember_stamp(struct sock *sk)
1466{
1467 /* Alas, not yet... */
1468 return 0;
1469}
1470
8292a17a 1471static struct inet_connection_sock_af_ops ipv6_specific = {
b9750ce1 1472 .queue_xmit = inet6_csk_xmit,
1da177e4 1473 .send_check = tcp_v6_send_check,
b9750ce1 1474 .rebuild_header = inet6_sk_rebuild_header,
1da177e4
LT
1475 .conn_request = tcp_v6_conn_request,
1476 .syn_recv_sock = tcp_v6_syn_recv_sock,
1477 .remember_stamp = tcp_v6_remember_stamp,
1478 .net_header_len = sizeof(struct ipv6hdr),
1479
1480 .setsockopt = ipv6_setsockopt,
1481 .getsockopt = ipv6_getsockopt,
b9750ce1 1482 .addr2sockaddr = inet6_csk_addr2sockaddr,
1da177e4
LT
1483 .sockaddr_len = sizeof(struct sockaddr_in6)
1484};
1485
1486/*
1487 * TCP over IPv4 via INET6 API
1488 */
1489
8292a17a 1490static struct inet_connection_sock_af_ops ipv6_mapped = {
1da177e4
LT
1491 .queue_xmit = ip_queue_xmit,
1492 .send_check = tcp_v4_send_check,
32519f11 1493 .rebuild_header = inet_sk_rebuild_header,
1da177e4
LT
1494 .conn_request = tcp_v6_conn_request,
1495 .syn_recv_sock = tcp_v6_syn_recv_sock,
1496 .remember_stamp = tcp_v4_remember_stamp,
1497 .net_header_len = sizeof(struct iphdr),
1498
1499 .setsockopt = ipv6_setsockopt,
1500 .getsockopt = ipv6_getsockopt,
b9750ce1 1501 .addr2sockaddr = inet6_csk_addr2sockaddr,
1da177e4
LT
1502 .sockaddr_len = sizeof(struct sockaddr_in6)
1503};
1504
1505
1506
1507/* NOTE: A lot of things set to zero explicitly by call to
1508 * sk_alloc() so need not be done here.
1509 */
1510static int tcp_v6_init_sock(struct sock *sk)
1511{
6687e988 1512 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1513 struct tcp_sock *tp = tcp_sk(sk);
1514
1515 skb_queue_head_init(&tp->out_of_order_queue);
1516 tcp_init_xmit_timers(sk);
1517 tcp_prequeue_init(tp);
1518
6687e988 1519 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1520 tp->mdev = TCP_TIMEOUT_INIT;
1521
1522 /* So many TCP implementations out there (incorrectly) count the
1523 * initial SYN frame in their delayed-ACK and congestion control
1524 * algorithms that we must have the following bandaid to talk
1525 * efficiently to them. -DaveM
1526 */
1527 tp->snd_cwnd = 2;
1528
1529 /* See draft-stevens-tcpca-spec-01 for discussion of the
1530 * initialization of these values.
1531 */
1532 tp->snd_ssthresh = 0x7fffffff;
1533 tp->snd_cwnd_clamp = ~0;
c1b4a7e6 1534 tp->mss_cache = 536;
1da177e4
LT
1535
1536 tp->reordering = sysctl_tcp_reordering;
1537
1538 sk->sk_state = TCP_CLOSE;
1539
8292a17a 1540 icsk->icsk_af_ops = &ipv6_specific;
6687e988 1541 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1da177e4
LT
1542 sk->sk_write_space = sk_stream_write_space;
1543 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1544
1545 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1546 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1547
1548 atomic_inc(&tcp_sockets_allocated);
1549
1550 return 0;
1551}
1552
1553static int tcp_v6_destroy_sock(struct sock *sk)
1554{
1da177e4
LT
1555 tcp_v4_destroy_sock(sk);
1556 return inet6_destroy_sock(sk);
1557}
1558
1559/* Proc filesystem TCPv6 sock list dumping. */
1560static void get_openreq6(struct seq_file *seq,
60236fdd 1561 struct sock *sk, struct request_sock *req, int i, int uid)
1da177e4 1562{
1da177e4 1563 int ttd = req->expires - jiffies;
ca304b61
ACM
1564 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1565 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1da177e4
LT
1566
1567 if (ttd < 0)
1568 ttd = 0;
1569
1da177e4
LT
1570 seq_printf(seq,
1571 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1572 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1573 i,
1574 src->s6_addr32[0], src->s6_addr32[1],
1575 src->s6_addr32[2], src->s6_addr32[3],
1576 ntohs(inet_sk(sk)->sport),
1577 dest->s6_addr32[0], dest->s6_addr32[1],
1578 dest->s6_addr32[2], dest->s6_addr32[3],
2e6599cb 1579 ntohs(inet_rsk(req)->rmt_port),
1da177e4
LT
1580 TCP_SYN_RECV,
1581 0,0, /* could print option size, but that is af dependent. */
1582 1, /* timers active (only the expire timer) */
1583 jiffies_to_clock_t(ttd),
1584 req->retrans,
1585 uid,
1586 0, /* non standard timer */
1587 0, /* open_requests have no inode */
1588 0, req);
1589}
1590
1591static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1592{
1593 struct in6_addr *dest, *src;
1594 __u16 destp, srcp;
1595 int timer_active;
1596 unsigned long timer_expires;
1597 struct inet_sock *inet = inet_sk(sp);
1598 struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1599 const struct inet_connection_sock *icsk = inet_csk(sp);
1da177e4
LT
1600 struct ipv6_pinfo *np = inet6_sk(sp);
1601
1602 dest = &np->daddr;
1603 src = &np->rcv_saddr;
1604 destp = ntohs(inet->dport);
1605 srcp = ntohs(inet->sport);
463c84b9
ACM
1606
1607 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 1608 timer_active = 1;
463c84b9
ACM
1609 timer_expires = icsk->icsk_timeout;
1610 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1611 timer_active = 4;
463c84b9 1612 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1613 } else if (timer_pending(&sp->sk_timer)) {
1614 timer_active = 2;
1615 timer_expires = sp->sk_timer.expires;
1616 } else {
1617 timer_active = 0;
1618 timer_expires = jiffies;
1619 }
1620
1621 seq_printf(seq,
1622 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1623 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
1624 i,
1625 src->s6_addr32[0], src->s6_addr32[1],
1626 src->s6_addr32[2], src->s6_addr32[3], srcp,
1627 dest->s6_addr32[0], dest->s6_addr32[1],
1628 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1629 sp->sk_state,
1630 tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
1631 timer_active,
1632 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 1633 icsk->icsk_retransmits,
1da177e4 1634 sock_i_uid(sp),
6687e988 1635 icsk->icsk_probes_out,
1da177e4
LT
1636 sock_i_ino(sp),
1637 atomic_read(&sp->sk_refcnt), sp,
463c84b9
ACM
1638 icsk->icsk_rto,
1639 icsk->icsk_ack.ato,
1640 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1da177e4
LT
1641 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
1642 );
1643}
1644
1645static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1646 struct inet_timewait_sock *tw, int i)
1da177e4
LT
1647{
1648 struct in6_addr *dest, *src;
1649 __u16 destp, srcp;
0fa1a53e 1650 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1da177e4
LT
1651 int ttd = tw->tw_ttd - jiffies;
1652
1653 if (ttd < 0)
1654 ttd = 0;
1655
0fa1a53e
ACM
1656 dest = &tw6->tw_v6_daddr;
1657 src = &tw6->tw_v6_rcv_saddr;
1da177e4
LT
1658 destp = ntohs(tw->tw_dport);
1659 srcp = ntohs(tw->tw_sport);
1660
1661 seq_printf(seq,
1662 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1663 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1664 i,
1665 src->s6_addr32[0], src->s6_addr32[1],
1666 src->s6_addr32[2], src->s6_addr32[3], srcp,
1667 dest->s6_addr32[0], dest->s6_addr32[1],
1668 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1669 tw->tw_substate, 0, 0,
1670 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1671 atomic_read(&tw->tw_refcnt), tw);
1672}
1673
1674#ifdef CONFIG_PROC_FS
1675static int tcp6_seq_show(struct seq_file *seq, void *v)
1676{
1677 struct tcp_iter_state *st;
1678
1679 if (v == SEQ_START_TOKEN) {
1680 seq_puts(seq,
1681 " sl "
1682 "local_address "
1683 "remote_address "
1684 "st tx_queue rx_queue tr tm->when retrnsmt"
1685 " uid timeout inode\n");
1686 goto out;
1687 }
1688 st = seq->private;
1689
1690 switch (st->state) {
1691 case TCP_SEQ_STATE_LISTENING:
1692 case TCP_SEQ_STATE_ESTABLISHED:
1693 get_tcp6_sock(seq, v, st->num);
1694 break;
1695 case TCP_SEQ_STATE_OPENREQ:
1696 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1697 break;
1698 case TCP_SEQ_STATE_TIME_WAIT:
1699 get_timewait6_sock(seq, v, st->num);
1700 break;
1701 }
1702out:
1703 return 0;
1704}
1705
1706static struct file_operations tcp6_seq_fops;
1707static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1708 .owner = THIS_MODULE,
1709 .name = "tcp6",
1710 .family = AF_INET6,
1711 .seq_show = tcp6_seq_show,
1712 .seq_fops = &tcp6_seq_fops,
1713};
1714
1715int __init tcp6_proc_init(void)
1716{
1717 return tcp_proc_register(&tcp6_seq_afinfo);
1718}
1719
1720void tcp6_proc_exit(void)
1721{
1722 tcp_proc_unregister(&tcp6_seq_afinfo);
1723}
1724#endif
1725
1726struct proto tcpv6_prot = {
1727 .name = "TCPv6",
1728 .owner = THIS_MODULE,
1729 .close = tcp_close,
1730 .connect = tcp_v6_connect,
1731 .disconnect = tcp_disconnect,
463c84b9 1732 .accept = inet_csk_accept,
1da177e4
LT
1733 .ioctl = tcp_ioctl,
1734 .init = tcp_v6_init_sock,
1735 .destroy = tcp_v6_destroy_sock,
1736 .shutdown = tcp_shutdown,
1737 .setsockopt = tcp_setsockopt,
1738 .getsockopt = tcp_getsockopt,
1739 .sendmsg = tcp_sendmsg,
1740 .recvmsg = tcp_recvmsg,
1741 .backlog_rcv = tcp_v6_do_rcv,
1742 .hash = tcp_v6_hash,
1743 .unhash = tcp_unhash,
1744 .get_port = tcp_v6_get_port,
1745 .enter_memory_pressure = tcp_enter_memory_pressure,
1746 .sockets_allocated = &tcp_sockets_allocated,
1747 .memory_allocated = &tcp_memory_allocated,
1748 .memory_pressure = &tcp_memory_pressure,
0a5578cf 1749 .orphan_count = &tcp_orphan_count,
1da177e4
LT
1750 .sysctl_mem = sysctl_tcp_mem,
1751 .sysctl_wmem = sysctl_tcp_wmem,
1752 .sysctl_rmem = sysctl_tcp_rmem,
1753 .max_header = MAX_TCP_HEADER,
1754 .obj_size = sizeof(struct tcp6_sock),
8feaf0c0 1755 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
60236fdd 1756 .rsk_prot = &tcp6_request_sock_ops,
1da177e4
LT
1757};
1758
1759static struct inet6_protocol tcpv6_protocol = {
1760 .handler = tcp_v6_rcv,
1761 .err_handler = tcp_v6_err,
1762 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1763};
1764
1da177e4
LT
1765static struct inet_protosw tcpv6_protosw = {
1766 .type = SOCK_STREAM,
1767 .protocol = IPPROTO_TCP,
1768 .prot = &tcpv6_prot,
1769 .ops = &inet6_stream_ops,
1770 .capability = -1,
1771 .no_check = 0,
1772 .flags = INET_PROTOSW_PERMANENT,
1773};
1774
1775void __init tcpv6_init(void)
1776{
1777 /* register inet6 protocol */
1778 if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
1779 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
1780 inet6_register_protosw(&tcpv6_protosw);
1781}