Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6-block.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#include <linux/module.h>
1da177e4
LT
27#include <linux/errno.h>
28#include <linux/types.h>
29#include <linux/socket.h>
30#include <linux/sockios.h>
31#include <linux/net.h>
32#include <linux/jiffies.h>
33#include <linux/in.h>
34#include <linux/in6.h>
35#include <linux/netdevice.h>
36#include <linux/init.h>
37#include <linux/jhash.h>
38#include <linux/ipsec.h>
39#include <linux/times.h>
40
41#include <linux/ipv6.h>
42#include <linux/icmpv6.h>
43#include <linux/random.h>
44
45#include <net/tcp.h>
46#include <net/ndisc.h>
5324a040 47#include <net/inet6_hashtables.h>
8129765a 48#include <net/inet6_connection_sock.h>
1da177e4
LT
49#include <net/ipv6.h>
50#include <net/transp_v6.h>
51#include <net/addrconf.h>
52#include <net/ip6_route.h>
53#include <net/ip6_checksum.h>
54#include <net/inet_ecn.h>
55#include <net/protocol.h>
56#include <net/xfrm.h>
1da177e4
LT
57#include <net/snmp.h>
58#include <net/dsfield.h>
6d6ee43e 59#include <net/timewait_sock.h>
18134bed 60#include <net/netdma.h>
3d58b5fa 61#include <net/inet_common.h>
1da177e4
LT
62
63#include <asm/uaccess.h>
64
65#include <linux/proc_fs.h>
66#include <linux/seq_file.h>
67
cfb6eeb4
YH
68#include <linux/crypto.h>
69#include <linux/scatterlist.h>
70
cfb6eeb4 71static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
60236fdd 72static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
1ab1457c 73static void tcp_v6_send_check(struct sock *sk, int len,
1da177e4
LT
74 struct sk_buff *skb);
75
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
1da177e4 77
8292a17a
ACM
78static struct inet_connection_sock_af_ops ipv6_mapped;
79static struct inet_connection_sock_af_ops ipv6_specific;
a928630a 80#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
81static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972
YH
83#else
84static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
85 struct in6_addr *addr)
86{
87 return NULL;
88}
a928630a 89#endif
1da177e4 90
1da177e4
LT
91static void tcp_v6_hash(struct sock *sk)
92{
93 if (sk->sk_state != TCP_CLOSE) {
8292a17a 94 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
1da177e4
LT
95 tcp_prot.hash(sk);
96 return;
97 }
98 local_bh_disable();
ab1e0a13 99 __inet6_hash(sk);
1da177e4
LT
100 local_bh_enable();
101 }
102}
103
868c86bc 104static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
1ab1457c
YH
105 struct in6_addr *saddr,
106 struct in6_addr *daddr,
868c86bc 107 __wsum base)
1da177e4
LT
108{
109 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
110}
111
a94f723d 112static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
1da177e4 113{
0660e03f
ACM
114 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
115 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7
ACM
116 tcp_hdr(skb)->dest,
117 tcp_hdr(skb)->source);
1da177e4
LT
118}
119
1ab1457c 120static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
121 int addr_len)
122{
123 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 124 struct inet_sock *inet = inet_sk(sk);
d83d8461 125 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
126 struct ipv6_pinfo *np = inet6_sk(sk);
127 struct tcp_sock *tp = tcp_sk(sk);
128 struct in6_addr *saddr = NULL, *final_p = NULL, final;
129 struct flowi fl;
130 struct dst_entry *dst;
131 int addr_type;
132 int err;
133
1ab1457c 134 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
135 return -EINVAL;
136
1ab1457c 137 if (usin->sin6_family != AF_INET6)
1da177e4
LT
138 return(-EAFNOSUPPORT);
139
140 memset(&fl, 0, sizeof(fl));
141
142 if (np->sndflow) {
143 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
144 IP6_ECN_flow_init(fl.fl6_flowlabel);
145 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
146 struct ip6_flowlabel *flowlabel;
147 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
148 if (flowlabel == NULL)
149 return -EINVAL;
150 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
151 fl6_sock_release(flowlabel);
152 }
153 }
154
155 /*
1ab1457c
YH
156 * connect() to INADDR_ANY means loopback (BSD'ism).
157 */
158
159 if(ipv6_addr_any(&usin->sin6_addr))
160 usin->sin6_addr.s6_addr[15] = 0x1;
1da177e4
LT
161
162 addr_type = ipv6_addr_type(&usin->sin6_addr);
163
164 if(addr_type & IPV6_ADDR_MULTICAST)
165 return -ENETUNREACH;
166
167 if (addr_type&IPV6_ADDR_LINKLOCAL) {
168 if (addr_len >= sizeof(struct sockaddr_in6) &&
169 usin->sin6_scope_id) {
170 /* If interface is set while binding, indices
171 * must coincide.
172 */
173 if (sk->sk_bound_dev_if &&
174 sk->sk_bound_dev_if != usin->sin6_scope_id)
175 return -EINVAL;
176
177 sk->sk_bound_dev_if = usin->sin6_scope_id;
178 }
179
180 /* Connect to link-local address requires an interface */
181 if (!sk->sk_bound_dev_if)
182 return -EINVAL;
183 }
184
185 if (tp->rx_opt.ts_recent_stamp &&
186 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
187 tp->rx_opt.ts_recent = 0;
188 tp->rx_opt.ts_recent_stamp = 0;
189 tp->write_seq = 0;
190 }
191
192 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
193 np->flow_label = fl.fl6_flowlabel;
194
195 /*
196 * TCP over IPv4
197 */
198
199 if (addr_type == IPV6_ADDR_MAPPED) {
d83d8461 200 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
201 struct sockaddr_in sin;
202
203 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
204
205 if (__ipv6_only_sock(sk))
206 return -ENETUNREACH;
207
208 sin.sin_family = AF_INET;
209 sin.sin_port = usin->sin6_port;
210 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
211
d83d8461 212 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 213 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
214#ifdef CONFIG_TCP_MD5SIG
215 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
216#endif
1da177e4
LT
217
218 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
219
220 if (err) {
d83d8461
ACM
221 icsk->icsk_ext_hdr_len = exthdrlen;
222 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 223 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
224#ifdef CONFIG_TCP_MD5SIG
225 tp->af_specific = &tcp_sock_ipv6_specific;
226#endif
1da177e4
LT
227 goto failure;
228 } else {
229 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
230 inet->saddr);
231 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
232 inet->rcv_saddr);
233 }
234
235 return err;
236 }
237
238 if (!ipv6_addr_any(&np->rcv_saddr))
239 saddr = &np->rcv_saddr;
240
241 fl.proto = IPPROTO_TCP;
242 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
243 ipv6_addr_copy(&fl.fl6_src,
244 (saddr ? saddr : &np->saddr));
245 fl.oif = sk->sk_bound_dev_if;
246 fl.fl_ip_dport = usin->sin6_port;
247 fl.fl_ip_sport = inet->sport;
248
249 if (np->opt && np->opt->srcrt) {
250 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
251 ipv6_addr_copy(&final, &fl.fl6_dst);
252 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
253 final_p = &final;
254 }
255
beb8d13b
VY
256 security_sk_classify_flow(sk, &fl);
257
1da177e4
LT
258 err = ip6_dst_lookup(sk, &dst, &fl);
259 if (err)
260 goto failure;
261 if (final_p)
262 ipv6_addr_copy(&fl.fl6_dst, final_p);
263
bb72845e 264 if ((err = __xfrm_lookup(&dst, &fl, sk, XFRM_LOOKUP_WAIT)) < 0) {
14e50e57
DM
265 if (err == -EREMOTE)
266 err = ip6_dst_blackhole(sk, &dst, &fl);
267 if (err < 0)
268 goto failure;
269 }
1da177e4
LT
270
271 if (saddr == NULL) {
272 saddr = &fl.fl6_src;
273 ipv6_addr_copy(&np->rcv_saddr, saddr);
274 }
275
276 /* set the source address */
277 ipv6_addr_copy(&np->saddr, saddr);
278 inet->rcv_saddr = LOOPBACK4_IPV6;
279
f83ef8c0 280 sk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 281 __ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 282
d83d8461 283 icsk->icsk_ext_hdr_len = 0;
1da177e4 284 if (np->opt)
d83d8461
ACM
285 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
286 np->opt->opt_nflen);
1da177e4
LT
287
288 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
289
290 inet->dport = usin->sin6_port;
291
292 tcp_set_state(sk, TCP_SYN_SENT);
d8313f5c 293 err = inet6_hash_connect(&tcp_death_row, sk);
1da177e4
LT
294 if (err)
295 goto late_failure;
296
297 if (!tp->write_seq)
298 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
299 np->daddr.s6_addr32,
300 inet->sport,
301 inet->dport);
302
303 err = tcp_connect(sk);
304 if (err)
305 goto late_failure;
306
307 return 0;
308
309late_failure:
310 tcp_set_state(sk, TCP_CLOSE);
311 __sk_dst_reset(sk);
312failure:
313 inet->dport = 0;
314 sk->sk_route_caps = 0;
315 return err;
316}
317
318static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
04ce6909 319 int type, int code, int offset, __be32 info)
1da177e4
LT
320{
321 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
505cbfc5 322 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
1da177e4
LT
323 struct ipv6_pinfo *np;
324 struct sock *sk;
325 int err;
1ab1457c 326 struct tcp_sock *tp;
1da177e4
LT
327 __u32 seq;
328
c346dca1 329 sk = inet6_lookup(dev_net(skb->dev), &tcp_hashinfo, &hdr->daddr,
d86e0dac 330 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
1da177e4
LT
331
332 if (sk == NULL) {
333 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
334 return;
335 }
336
337 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 338 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
339 return;
340 }
341
342 bh_lock_sock(sk);
343 if (sock_owned_by_user(sk))
344 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
345
346 if (sk->sk_state == TCP_CLOSE)
347 goto out;
348
349 tp = tcp_sk(sk);
1ab1457c 350 seq = ntohl(th->seq);
1da177e4
LT
351 if (sk->sk_state != TCP_LISTEN &&
352 !between(seq, tp->snd_una, tp->snd_nxt)) {
353 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
354 goto out;
355 }
356
357 np = inet6_sk(sk);
358
359 if (type == ICMPV6_PKT_TOOBIG) {
360 struct dst_entry *dst = NULL;
361
362 if (sock_owned_by_user(sk))
363 goto out;
364 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
365 goto out;
366
367 /* icmp should have updated the destination cache entry */
368 dst = __sk_dst_check(sk, np->dst_cookie);
369
370 if (dst == NULL) {
371 struct inet_sock *inet = inet_sk(sk);
372 struct flowi fl;
373
374 /* BUGGG_FUTURE: Again, it is not clear how
375 to handle rthdr case. Ignore this complexity
376 for now.
377 */
378 memset(&fl, 0, sizeof(fl));
379 fl.proto = IPPROTO_TCP;
380 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
381 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
382 fl.oif = sk->sk_bound_dev_if;
383 fl.fl_ip_dport = inet->dport;
384 fl.fl_ip_sport = inet->sport;
beb8d13b 385 security_skb_classify_flow(skb, &fl);
1da177e4
LT
386
387 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
388 sk->sk_err_soft = -err;
389 goto out;
390 }
391
392 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
393 sk->sk_err_soft = -err;
394 goto out;
395 }
396
397 } else
398 dst_hold(dst);
399
d83d8461 400 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
1da177e4
LT
401 tcp_sync_mss(sk, dst_mtu(dst));
402 tcp_simple_retransmit(sk);
403 } /* else let the usual retransmit timer handle it */
404 dst_release(dst);
405 goto out;
406 }
407
408 icmpv6_err_convert(type, code, &err);
409
60236fdd 410 /* Might be for an request_sock */
1da177e4 411 switch (sk->sk_state) {
60236fdd 412 struct request_sock *req, **prev;
1da177e4
LT
413 case TCP_LISTEN:
414 if (sock_owned_by_user(sk))
415 goto out;
416
8129765a
ACM
417 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
418 &hdr->saddr, inet6_iif(skb));
1da177e4
LT
419 if (!req)
420 goto out;
421
422 /* ICMPs are not backlogged, hence we cannot get
423 * an established socket here.
424 */
425 BUG_TRAP(req->sk == NULL);
426
2e6599cb 427 if (seq != tcp_rsk(req)->snt_isn) {
1da177e4
LT
428 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
429 goto out;
430 }
431
463c84b9 432 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
433 goto out;
434
435 case TCP_SYN_SENT:
436 case TCP_SYN_RECV: /* Cannot happen.
1ab1457c 437 It can, it SYNs are crossed. --ANK */
1da177e4 438 if (!sock_owned_by_user(sk)) {
1da177e4
LT
439 sk->sk_err = err;
440 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
441
442 tcp_done(sk);
443 } else
444 sk->sk_err_soft = err;
445 goto out;
446 }
447
448 if (!sock_owned_by_user(sk) && np->recverr) {
449 sk->sk_err = err;
450 sk->sk_error_report(sk);
451 } else
452 sk->sk_err_soft = err;
453
454out:
455 bh_unlock_sock(sk);
456 sock_put(sk);
457}
458
459
fd80eb94 460static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
1da177e4 461{
ca304b61 462 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
463 struct ipv6_pinfo *np = inet6_sk(sk);
464 struct sk_buff * skb;
465 struct ipv6_txoptions *opt = NULL;
466 struct in6_addr * final_p = NULL, final;
467 struct flowi fl;
fd80eb94 468 struct dst_entry *dst;
1da177e4
LT
469 int err = -1;
470
471 memset(&fl, 0, sizeof(fl));
472 fl.proto = IPPROTO_TCP;
2e6599cb
ACM
473 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
474 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 475 fl.fl6_flowlabel = 0;
2e6599cb
ACM
476 fl.oif = treq->iif;
477 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1da177e4 478 fl.fl_ip_sport = inet_sk(sk)->sport;
4237c75c 479 security_req_classify_flow(req, &fl);
1da177e4 480
fd80eb94
DL
481 opt = np->opt;
482 if (opt && opt->srcrt) {
483 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
484 ipv6_addr_copy(&final, &fl.fl6_dst);
485 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
486 final_p = &final;
1da177e4
LT
487 }
488
fd80eb94
DL
489 err = ip6_dst_lookup(sk, &dst, &fl);
490 if (err)
491 goto done;
492 if (final_p)
493 ipv6_addr_copy(&fl.fl6_dst, final_p);
494 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
495 goto done;
496
1da177e4
LT
497 skb = tcp_make_synack(sk, dst, req);
498 if (skb) {
aa8223c7 499 struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
500
501 th->check = tcp_v6_check(th, skb->len,
2e6599cb 502 &treq->loc_addr, &treq->rmt_addr,
1da177e4
LT
503 csum_partial((char *)th, skb->len, skb->csum));
504
2e6599cb 505 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4 506 err = ip6_xmit(sk, skb, &fl, opt, 0);
b9df3cb8 507 err = net_xmit_eval(err);
1da177e4
LT
508 }
509
510done:
1ab1457c 511 if (opt && opt != np->opt)
1da177e4 512 sock_kfree_s(sk, opt, opt->tot_len);
78b91042 513 dst_release(dst);
1da177e4
LT
514 return err;
515}
516
c6aefafb
GG
517static inline void syn_flood_warning(struct sk_buff *skb)
518{
519#ifdef CONFIG_SYN_COOKIES
520 if (sysctl_tcp_syncookies)
521 printk(KERN_INFO
522 "TCPv6: Possible SYN flooding on port %d. "
523 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
524 else
525#endif
526 printk(KERN_INFO
527 "TCPv6: Possible SYN flooding on port %d. "
528 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
529}
530
60236fdd 531static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 532{
ca304b61
ACM
533 if (inet6_rsk(req)->pktopts)
534 kfree_skb(inet6_rsk(req)->pktopts);
1da177e4
LT
535}
536
cfb6eeb4
YH
537#ifdef CONFIG_TCP_MD5SIG
538static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
539 struct in6_addr *addr)
540{
541 struct tcp_sock *tp = tcp_sk(sk);
542 int i;
543
544 BUG_ON(tp == NULL);
545
546 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
547 return NULL;
548
549 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 550 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
f8ab18d2 551 return &tp->md5sig_info->keys6[i].base;
cfb6eeb4
YH
552 }
553 return NULL;
554}
555
556static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
557 struct sock *addr_sk)
558{
559 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
560}
561
562static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
563 struct request_sock *req)
564{
565 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
566}
567
568static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
569 char *newkey, u8 newkeylen)
570{
571 /* Add key to the list */
b0a713e9 572 struct tcp_md5sig_key *key;
cfb6eeb4
YH
573 struct tcp_sock *tp = tcp_sk(sk);
574 struct tcp6_md5sig_key *keys;
575
b0a713e9 576 key = tcp_v6_md5_do_lookup(sk, peer);
cfb6eeb4
YH
577 if (key) {
578 /* modify existing entry - just update that one */
b0a713e9
MD
579 kfree(key->key);
580 key->key = newkey;
581 key->keylen = newkeylen;
cfb6eeb4
YH
582 } else {
583 /* reallocate new list if current one is full. */
584 if (!tp->md5sig_info) {
585 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
586 if (!tp->md5sig_info) {
587 kfree(newkey);
588 return -ENOMEM;
589 }
3d7dbeac 590 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4 591 }
aacbe8c8
YH
592 if (tcp_alloc_md5sig_pool() == NULL) {
593 kfree(newkey);
594 return -ENOMEM;
595 }
cfb6eeb4
YH
596 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
597 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
598 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
599
600 if (!keys) {
601 tcp_free_md5sig_pool();
602 kfree(newkey);
603 return -ENOMEM;
604 }
605
606 if (tp->md5sig_info->entries6)
607 memmove(keys, tp->md5sig_info->keys6,
608 (sizeof (tp->md5sig_info->keys6[0]) *
609 tp->md5sig_info->entries6));
610
611 kfree(tp->md5sig_info->keys6);
612 tp->md5sig_info->keys6 = keys;
613 tp->md5sig_info->alloced6++;
614 }
615
616 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
617 peer);
f8ab18d2
DM
618 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
619 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
cfb6eeb4
YH
620
621 tp->md5sig_info->entries6++;
622 }
623 return 0;
624}
625
626static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
627 u8 *newkey, __u8 newkeylen)
628{
629 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
630 newkey, newkeylen);
631}
632
633static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
634{
635 struct tcp_sock *tp = tcp_sk(sk);
636 int i;
637
638 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 639 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
cfb6eeb4 640 /* Free the key */
f8ab18d2 641 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
642 tp->md5sig_info->entries6--;
643
644 if (tp->md5sig_info->entries6 == 0) {
645 kfree(tp->md5sig_info->keys6);
646 tp->md5sig_info->keys6 = NULL;
ca983cef 647 tp->md5sig_info->alloced6 = 0;
cfb6eeb4
YH
648 } else {
649 /* shrink the database */
650 if (tp->md5sig_info->entries6 != i)
651 memmove(&tp->md5sig_info->keys6[i],
652 &tp->md5sig_info->keys6[i+1],
653 (tp->md5sig_info->entries6 - i)
654 * sizeof (tp->md5sig_info->keys6[0]));
655 }
77adefdc
YH
656 tcp_free_md5sig_pool();
657 return 0;
cfb6eeb4
YH
658 }
659 }
660 return -ENOENT;
661}
662
663static void tcp_v6_clear_md5_list (struct sock *sk)
664{
665 struct tcp_sock *tp = tcp_sk(sk);
666 int i;
667
668 if (tp->md5sig_info->entries6) {
669 for (i = 0; i < tp->md5sig_info->entries6; i++)
f8ab18d2 670 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
671 tp->md5sig_info->entries6 = 0;
672 tcp_free_md5sig_pool();
673 }
674
675 kfree(tp->md5sig_info->keys6);
676 tp->md5sig_info->keys6 = NULL;
677 tp->md5sig_info->alloced6 = 0;
678
679 if (tp->md5sig_info->entries4) {
680 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 681 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
682 tp->md5sig_info->entries4 = 0;
683 tcp_free_md5sig_pool();
684 }
685
686 kfree(tp->md5sig_info->keys4);
687 tp->md5sig_info->keys4 = NULL;
688 tp->md5sig_info->alloced4 = 0;
689}
690
691static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
692 int optlen)
693{
694 struct tcp_md5sig cmd;
695 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
696 u8 *newkey;
697
698 if (optlen < sizeof(cmd))
699 return -EINVAL;
700
701 if (copy_from_user(&cmd, optval, sizeof(cmd)))
702 return -EFAULT;
703
704 if (sin6->sin6_family != AF_INET6)
705 return -EINVAL;
706
707 if (!cmd.tcpm_keylen) {
708 if (!tcp_sk(sk)->md5sig_info)
709 return -ENOENT;
e773e4fa 710 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
cfb6eeb4
YH
711 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
712 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
713 }
714
715 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
716 return -EINVAL;
717
718 if (!tcp_sk(sk)->md5sig_info) {
719 struct tcp_sock *tp = tcp_sk(sk);
720 struct tcp_md5sig_info *p;
721
722 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
723 if (!p)
724 return -ENOMEM;
725
726 tp->md5sig_info = p;
3d7dbeac 727 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
728 }
729
af879cc7 730 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
731 if (!newkey)
732 return -ENOMEM;
e773e4fa 733 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
cfb6eeb4
YH
734 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
735 newkey, cmd.tcpm_keylen);
736 }
737 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
738}
739
740static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
741 struct in6_addr *saddr,
742 struct in6_addr *daddr,
076fb722 743 struct tcphdr *th, unsigned int tcplen)
cfb6eeb4 744{
cfb6eeb4
YH
745 struct tcp_md5sig_pool *hp;
746 struct tcp6_pseudohdr *bp;
cfb6eeb4 747 int err;
cfb6eeb4
YH
748
749 hp = tcp_get_md5sig_pool();
750 if (!hp) {
0dc47877 751 printk(KERN_WARNING "%s(): hash pool not found...\n", __func__);
cfb6eeb4
YH
752 goto clear_hash_noput;
753 }
8d26d76d 754
cfb6eeb4 755 bp = &hp->md5_blk.ip6;
cfb6eeb4
YH
756
757 /* 1. TCP pseudo-header (RFC2460) */
758 ipv6_addr_copy(&bp->saddr, saddr);
759 ipv6_addr_copy(&bp->daddr, daddr);
760 bp->len = htonl(tcplen);
076fb722 761 bp->protocol = htonl(IPPROTO_TCP);
cfb6eeb4 762
8d26d76d
YH
763 err = tcp_calc_md5_hash(md5_hash, key, sizeof(*bp),
764 th, tcplen, hp);
c7da57a1 765
8d26d76d 766 if (err)
cfb6eeb4 767 goto clear_hash;
cfb6eeb4 768
8d26d76d 769 /* Free up the crypto pool */
cfb6eeb4 770 tcp_put_md5sig_pool();
cfb6eeb4
YH
771out:
772 return 0;
773clear_hash:
774 tcp_put_md5sig_pool();
775clear_hash_noput:
776 memset(md5_hash, 0, 16);
777 goto out;
778}
779
780static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
781 struct sock *sk,
782 struct dst_entry *dst,
783 struct request_sock *req,
076fb722 784 struct tcphdr *th, unsigned int tcplen)
cfb6eeb4
YH
785{
786 struct in6_addr *saddr, *daddr;
787
788 if (sk) {
789 saddr = &inet6_sk(sk)->saddr;
790 daddr = &inet6_sk(sk)->daddr;
791 } else {
792 saddr = &inet6_rsk(req)->loc_addr;
793 daddr = &inet6_rsk(req)->rmt_addr;
794 }
795 return tcp_v6_do_calc_md5_hash(md5_hash, key,
796 saddr, daddr,
076fb722 797 th, tcplen);
cfb6eeb4
YH
798}
799
800static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
801{
802 __u8 *hash_location = NULL;
803 struct tcp_md5sig_key *hash_expected;
0660e03f 804 struct ipv6hdr *ip6h = ipv6_hdr(skb);
aa8223c7 805 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 806 int genhash;
cfb6eeb4
YH
807 u8 newhash[16];
808
809 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 810 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 811
cfb6eeb4
YH
812 /* do we have a hash as expected? */
813 if (!hash_expected) {
814 if (!hash_location)
815 return 0;
816 if (net_ratelimit()) {
817 printk(KERN_INFO "MD5 Hash NOT expected but found "
818 "(" NIP6_FMT ", %u)->"
819 "(" NIP6_FMT ", %u)\n",
820 NIP6(ip6h->saddr), ntohs(th->source),
821 NIP6(ip6h->daddr), ntohs(th->dest));
822 }
823 return 1;
824 }
825
826 if (!hash_location) {
827 if (net_ratelimit()) {
828 printk(KERN_INFO "MD5 Hash expected but NOT found "
829 "(" NIP6_FMT ", %u)->"
830 "(" NIP6_FMT ", %u)\n",
831 NIP6(ip6h->saddr), ntohs(th->source),
832 NIP6(ip6h->daddr), ntohs(th->dest));
833 }
834 return 1;
835 }
836
837 /* check the signature */
838 genhash = tcp_v6_do_calc_md5_hash(newhash,
839 hash_expected,
840 &ip6h->saddr, &ip6h->daddr,
076fb722 841 th, skb->len);
cfb6eeb4
YH
842 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
843 if (net_ratelimit()) {
844 printk(KERN_INFO "MD5 Hash %s for "
845 "(" NIP6_FMT ", %u)->"
846 "(" NIP6_FMT ", %u)\n",
847 genhash ? "failed" : "mismatch",
848 NIP6(ip6h->saddr), ntohs(th->source),
849 NIP6(ip6h->daddr), ntohs(th->dest));
850 }
851 return 1;
852 }
853 return 0;
854}
855#endif
856
c6aefafb 857struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 858 .family = AF_INET6,
2e6599cb 859 .obj_size = sizeof(struct tcp6_request_sock),
1da177e4 860 .rtx_syn_ack = tcp_v6_send_synack,
60236fdd
ACM
861 .send_ack = tcp_v6_reqsk_send_ack,
862 .destructor = tcp_v6_reqsk_destructor,
1da177e4
LT
863 .send_reset = tcp_v6_send_reset
864};
865
cfb6eeb4 866#ifdef CONFIG_TCP_MD5SIG
b6332e6c 867static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
cfb6eeb4 868 .md5_lookup = tcp_v6_reqsk_md5_lookup,
cfb6eeb4 869};
b6332e6c 870#endif
cfb6eeb4 871
6d6ee43e
ACM
872static struct timewait_sock_ops tcp6_timewait_sock_ops = {
873 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
874 .twsk_unique = tcp_twsk_unique,
cfb6eeb4 875 .twsk_destructor= tcp_twsk_destructor,
6d6ee43e
ACM
876};
877
8292a17a 878static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
1da177e4
LT
879{
880 struct ipv6_pinfo *np = inet6_sk(sk);
aa8223c7 881 struct tcphdr *th = tcp_hdr(skb);
1da177e4 882
84fa7933 883 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4 884 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
663ead3b 885 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 886 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 887 } else {
1ab1457c
YH
888 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
889 csum_partial((char *)th, th->doff<<2,
1da177e4
LT
890 skb->csum));
891 }
892}
893
a430a43d
HX
894static int tcp_v6_gso_send_check(struct sk_buff *skb)
895{
896 struct ipv6hdr *ipv6h;
897 struct tcphdr *th;
898
899 if (!pskb_may_pull(skb, sizeof(*th)))
900 return -EINVAL;
901
0660e03f 902 ipv6h = ipv6_hdr(skb);
aa8223c7 903 th = tcp_hdr(skb);
a430a43d
HX
904
905 th->check = 0;
906 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
907 IPPROTO_TCP, 0);
663ead3b 908 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 909 skb->csum_offset = offsetof(struct tcphdr, check);
84fa7933 910 skb->ip_summed = CHECKSUM_PARTIAL;
a430a43d
HX
911 return 0;
912}
1da177e4 913
cfb6eeb4 914static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 915{
aa8223c7 916 struct tcphdr *th = tcp_hdr(skb), *t1;
1da177e4
LT
917 struct sk_buff *buff;
918 struct flowi fl;
c346dca1 919 struct net *net = dev_net(skb->dst->dev);
e5047992 920 struct sock *ctl_sk = net->ipv6.tcp_sk;
9cb5734e 921 unsigned int tot_len = sizeof(*th);
cfb6eeb4
YH
922#ifdef CONFIG_TCP_MD5SIG
923 struct tcp_md5sig_key *key;
924#endif
1da177e4
LT
925
926 if (th->rst)
927 return;
928
929 if (!ipv6_unicast_destination(skb))
1ab1457c 930 return;
1da177e4 931
cfb6eeb4
YH
932#ifdef CONFIG_TCP_MD5SIG
933 if (sk)
0660e03f 934 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
cfb6eeb4
YH
935 else
936 key = NULL;
937
938 if (key)
939 tot_len += TCPOLEN_MD5SIG_ALIGNED;
940#endif
941
1da177e4
LT
942 /*
943 * We need to grab some memory, and put together an RST,
944 * and then put it into the queue to be sent.
945 */
946
cfb6eeb4 947 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 948 GFP_ATOMIC);
1ab1457c
YH
949 if (buff == NULL)
950 return;
1da177e4 951
cfb6eeb4 952 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 953
cfb6eeb4 954 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1da177e4
LT
955
956 /* Swap the send and the receive. */
957 memset(t1, 0, sizeof(*t1));
958 t1->dest = th->source;
959 t1->source = th->dest;
cfb6eeb4 960 t1->doff = tot_len / 4;
1da177e4 961 t1->rst = 1;
1ab1457c 962
1da177e4 963 if(th->ack) {
1ab1457c 964 t1->seq = th->ack_seq;
1da177e4
LT
965 } else {
966 t1->ack = 1;
967 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
968 + skb->len - (th->doff<<2));
969 }
970
cfb6eeb4
YH
971#ifdef CONFIG_TCP_MD5SIG
972 if (key) {
8e5200f5 973 __be32 *opt = (__be32*)(t1 + 1);
cfb6eeb4
YH
974 opt[0] = htonl((TCPOPT_NOP << 24) |
975 (TCPOPT_NOP << 16) |
976 (TCPOPT_MD5SIG << 8) |
977 TCPOLEN_MD5SIG);
0660e03f
ACM
978 tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key,
979 &ipv6_hdr(skb)->daddr,
980 &ipv6_hdr(skb)->saddr,
076fb722 981 t1, tot_len);
cfb6eeb4
YH
982 }
983#endif
984
1da177e4
LT
985 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
986
987 memset(&fl, 0, sizeof(fl));
0660e03f
ACM
988 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
989 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1da177e4
LT
990
991 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
992 sizeof(*t1), IPPROTO_TCP,
993 buff->csum);
994
995 fl.proto = IPPROTO_TCP;
505cbfc5 996 fl.oif = inet6_iif(skb);
1da177e4
LT
997 fl.fl_ip_dport = t1->dest;
998 fl.fl_ip_sport = t1->source;
beb8d13b 999 security_skb_classify_flow(skb, &fl);
1da177e4 1000
c20121ae
DL
1001 /* Pass a socket to ip6_dst_lookup either it is for RST
1002 * Underlying function will use this to retrieve the network
1003 * namespace
1004 */
e5047992 1005 if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
1da177e4 1006
ecc51b6d 1007 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
e5047992 1008 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
ecc51b6d
ACM
1009 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1010 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1da177e4 1011 return;
ecc51b6d 1012 }
1da177e4
LT
1013 }
1014
1015 kfree_skb(buff);
1016}
1017
9501f972
YH
1018static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1019 struct tcp_md5sig_key *key)
1da177e4 1020{
aa8223c7 1021 struct tcphdr *th = tcp_hdr(skb), *t1;
1da177e4
LT
1022 struct sk_buff *buff;
1023 struct flowi fl;
c346dca1 1024 struct net *net = dev_net(skb->dev);
e5047992 1025 struct sock *ctl_sk = net->ipv6.tcp_sk;
9cb5734e 1026 unsigned int tot_len = sizeof(struct tcphdr);
e69a4adc 1027 __be32 *topt;
1da177e4
LT
1028
1029 if (ts)
4244f8a9 1030 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4
YH
1031#ifdef CONFIG_TCP_MD5SIG
1032 if (key)
1033 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1034#endif
1da177e4
LT
1035
1036 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1037 GFP_ATOMIC);
1038 if (buff == NULL)
1039 return;
1040
1041 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1042
1043 t1 = (struct tcphdr *) skb_push(buff,tot_len);
1044
1045 /* Swap the send and the receive. */
1046 memset(t1, 0, sizeof(*t1));
1047 t1->dest = th->source;
1048 t1->source = th->dest;
1049 t1->doff = tot_len/4;
1050 t1->seq = htonl(seq);
1051 t1->ack_seq = htonl(ack);
1052 t1->ack = 1;
1053 t1->window = htons(win);
cfb6eeb4 1054
e69a4adc 1055 topt = (__be32 *)(t1 + 1);
1ab1457c 1056
1da177e4 1057 if (ts) {
cfb6eeb4
YH
1058 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1059 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1060 *topt++ = htonl(tcp_time_stamp);
1061 *topt = htonl(ts);
1da177e4
LT
1062 }
1063
cfb6eeb4
YH
1064#ifdef CONFIG_TCP_MD5SIG
1065 if (key) {
1066 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1067 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
0660e03f
ACM
1068 tcp_v6_do_calc_md5_hash((__u8 *)topt, key,
1069 &ipv6_hdr(skb)->daddr,
1070 &ipv6_hdr(skb)->saddr,
076fb722 1071 t1, tot_len);
cfb6eeb4
YH
1072 }
1073#endif
1074
1da177e4
LT
1075 buff->csum = csum_partial((char *)t1, tot_len, 0);
1076
1077 memset(&fl, 0, sizeof(fl));
0660e03f
ACM
1078 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1079 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1da177e4
LT
1080
1081 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1082 tot_len, IPPROTO_TCP,
1083 buff->csum);
1084
1085 fl.proto = IPPROTO_TCP;
505cbfc5 1086 fl.oif = inet6_iif(skb);
1da177e4
LT
1087 fl.fl_ip_dport = t1->dest;
1088 fl.fl_ip_sport = t1->source;
beb8d13b 1089 security_skb_classify_flow(skb, &fl);
1da177e4 1090
e5047992 1091 if (!ip6_dst_lookup(ctl_sk, &buff->dst, &fl)) {
ecc51b6d 1092 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
e5047992 1093 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
ecc51b6d 1094 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1da177e4 1095 return;
ecc51b6d 1096 }
1da177e4
LT
1097 }
1098
1099 kfree_skb(buff);
1100}
1101
1102static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1103{
8feaf0c0 1104 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 1105 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 1106
9501f972 1107 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 1108 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972 1109 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1da177e4 1110
8feaf0c0 1111 inet_twsk_put(tw);
1da177e4
LT
1112}
1113
60236fdd 1114static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1da177e4 1115{
9501f972
YH
1116 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1117 tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr));
1da177e4
LT
1118}
1119
1120
1121static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1122{
60236fdd 1123 struct request_sock *req, **prev;
aa8223c7 1124 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
1125 struct sock *nsk;
1126
1127 /* Find possible connection requests. */
8129765a 1128 req = inet6_csk_search_req(sk, &prev, th->source,
0660e03f
ACM
1129 &ipv6_hdr(skb)->saddr,
1130 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1da177e4
LT
1131 if (req)
1132 return tcp_check_req(sk, skb, req, prev);
1133
3b1e0a65 1134 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
d86e0dac
PE
1135 &ipv6_hdr(skb)->saddr, th->source,
1136 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1da177e4
LT
1137
1138 if (nsk) {
1139 if (nsk->sk_state != TCP_TIME_WAIT) {
1140 bh_lock_sock(nsk);
1141 return nsk;
1142 }
9469c7b4 1143 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1144 return NULL;
1145 }
1146
c6aefafb 1147#ifdef CONFIG_SYN_COOKIES
1da177e4 1148 if (!th->rst && !th->syn && th->ack)
c6aefafb 1149 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1150#endif
1151 return sk;
1152}
1153
1da177e4
LT
1154/* FIXME: this is substantially similar to the ipv4 code.
1155 * Can some kind of merge be done? -- erics
1156 */
1157static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1158{
ca304b61 1159 struct inet6_request_sock *treq;
1da177e4
LT
1160 struct ipv6_pinfo *np = inet6_sk(sk);
1161 struct tcp_options_received tmp_opt;
1162 struct tcp_sock *tp = tcp_sk(sk);
60236fdd 1163 struct request_sock *req = NULL;
1da177e4 1164 __u32 isn = TCP_SKB_CB(skb)->when;
c6aefafb
GG
1165#ifdef CONFIG_SYN_COOKIES
1166 int want_cookie = 0;
1167#else
1168#define want_cookie 0
1169#endif
1da177e4
LT
1170
1171 if (skb->protocol == htons(ETH_P_IP))
1172 return tcp_v4_conn_request(sk, skb);
1173
1174 if (!ipv6_unicast_destination(skb))
1ab1457c 1175 goto drop;
1da177e4 1176
463c84b9 1177 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4 1178 if (net_ratelimit())
c6aefafb
GG
1179 syn_flood_warning(skb);
1180#ifdef CONFIG_SYN_COOKIES
1181 if (sysctl_tcp_syncookies)
1182 want_cookie = 1;
1183 else
1184#endif
1ab1457c 1185 goto drop;
1da177e4
LT
1186 }
1187
463c84b9 1188 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1189 goto drop;
1190
ca304b61 1191 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1da177e4
LT
1192 if (req == NULL)
1193 goto drop;
1194
cfb6eeb4
YH
1195#ifdef CONFIG_TCP_MD5SIG
1196 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1197#endif
1198
1da177e4
LT
1199 tcp_clear_options(&tmp_opt);
1200 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1201 tmp_opt.user_mss = tp->rx_opt.user_mss;
1202
1203 tcp_parse_options(skb, &tmp_opt, 0);
1204
4dfc2817 1205 if (want_cookie && !tmp_opt.saw_tstamp)
c6aefafb 1206 tcp_clear_options(&tmp_opt);
c6aefafb 1207
1da177e4
LT
1208 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1209 tcp_openreq_init(req, &tmp_opt, skb);
1210
ca304b61 1211 treq = inet6_rsk(req);
0660e03f
ACM
1212 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1213 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
c6aefafb
GG
1214 if (!want_cookie)
1215 TCP_ECN_create_request(req, tcp_hdr(skb));
1216
1217 if (want_cookie) {
1218 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
4dfc2817 1219 req->cookie_ts = tmp_opt.tstamp_ok;
c6aefafb
GG
1220 } else if (!isn) {
1221 if (ipv6_opt_accepted(sk, skb) ||
1222 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1223 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1224 atomic_inc(&skb->users);
1225 treq->pktopts = skb;
1226 }
1227 treq->iif = sk->sk_bound_dev_if;
1da177e4 1228
c6aefafb
GG
1229 /* So that link locals have meaning */
1230 if (!sk->sk_bound_dev_if &&
1231 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1232 treq->iif = inet6_iif(skb);
1da177e4 1233
a94f723d 1234 isn = tcp_v6_init_sequence(skb);
c6aefafb 1235 }
1da177e4 1236
2e6599cb 1237 tcp_rsk(req)->snt_isn = isn;
1da177e4 1238
4237c75c
VY
1239 security_inet_conn_request(sk, skb, req);
1240
fd80eb94 1241 if (tcp_v6_send_synack(sk, req))
1da177e4
LT
1242 goto drop;
1243
c6aefafb
GG
1244 if (!want_cookie) {
1245 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1246 return 0;
1247 }
1da177e4
LT
1248
1249drop:
1250 if (req)
60236fdd 1251 reqsk_free(req);
1da177e4 1252
1da177e4
LT
1253 return 0; /* don't send reset */
1254}
1255
1256static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1257 struct request_sock *req,
1da177e4
LT
1258 struct dst_entry *dst)
1259{
ca304b61 1260 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
1261 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1262 struct tcp6_sock *newtcp6sk;
1263 struct inet_sock *newinet;
1264 struct tcp_sock *newtp;
1265 struct sock *newsk;
1266 struct ipv6_txoptions *opt;
cfb6eeb4
YH
1267#ifdef CONFIG_TCP_MD5SIG
1268 struct tcp_md5sig_key *key;
1269#endif
1da177e4
LT
1270
1271 if (skb->protocol == htons(ETH_P_IP)) {
1272 /*
1273 * v6 mapped
1274 */
1275
1276 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1277
1ab1457c 1278 if (newsk == NULL)
1da177e4
LT
1279 return NULL;
1280
1281 newtcp6sk = (struct tcp6_sock *)newsk;
1282 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1283
1284 newinet = inet_sk(newsk);
1285 newnp = inet6_sk(newsk);
1286 newtp = tcp_sk(newsk);
1287
1288 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1289
1290 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1291 newinet->daddr);
1292
1293 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1294 newinet->saddr);
1295
1296 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1297
8292a17a 1298 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1299 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1300#ifdef CONFIG_TCP_MD5SIG
1301 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1302#endif
1303
1da177e4
LT
1304 newnp->pktoptions = NULL;
1305 newnp->opt = NULL;
505cbfc5 1306 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1307 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4 1308
e6848976
ACM
1309 /*
1310 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1311 * here, tcp_create_openreq_child now does this for us, see the comment in
1312 * that function for the gory details. -acme
1da177e4 1313 */
1da177e4
LT
1314
1315 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1316 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1317 Sync it now.
1318 */
d83d8461 1319 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1320
1321 return newsk;
1322 }
1323
1324 opt = np->opt;
1325
1326 if (sk_acceptq_is_full(sk))
1327 goto out_overflow;
1328
1da177e4
LT
1329 if (dst == NULL) {
1330 struct in6_addr *final_p = NULL, final;
1331 struct flowi fl;
1332
1333 memset(&fl, 0, sizeof(fl));
1334 fl.proto = IPPROTO_TCP;
2e6599cb 1335 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1da177e4
LT
1336 if (opt && opt->srcrt) {
1337 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1338 ipv6_addr_copy(&final, &fl.fl6_dst);
1339 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1340 final_p = &final;
1341 }
2e6599cb 1342 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1da177e4 1343 fl.oif = sk->sk_bound_dev_if;
2e6599cb 1344 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1da177e4 1345 fl.fl_ip_sport = inet_sk(sk)->sport;
4237c75c 1346 security_req_classify_flow(req, &fl);
1da177e4
LT
1347
1348 if (ip6_dst_lookup(sk, &dst, &fl))
1349 goto out;
1350
1351 if (final_p)
1352 ipv6_addr_copy(&fl.fl6_dst, final_p);
1353
1354 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1355 goto out;
1ab1457c 1356 }
1da177e4
LT
1357
1358 newsk = tcp_create_openreq_child(sk, req, skb);
1359 if (newsk == NULL)
1360 goto out;
1361
e6848976
ACM
1362 /*
1363 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1364 * count here, tcp_create_openreq_child now does this for us, see the
1365 * comment in that function for the gory details. -acme
1366 */
1da177e4 1367
59eed279 1368 newsk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 1369 __ip6_dst_store(newsk, dst, NULL, NULL);
1da177e4
LT
1370
1371 newtcp6sk = (struct tcp6_sock *)newsk;
1372 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1373
1374 newtp = tcp_sk(newsk);
1375 newinet = inet_sk(newsk);
1376 newnp = inet6_sk(newsk);
1377
1378 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1379
2e6599cb
ACM
1380 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1381 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1382 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1383 newsk->sk_bound_dev_if = treq->iif;
1da177e4 1384
1ab1457c 1385 /* Now IPv6 options...
1da177e4
LT
1386
1387 First: no IPv4 options.
1388 */
1389 newinet->opt = NULL;
d35690be 1390 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1391
1392 /* Clone RX bits */
1393 newnp->rxopt.all = np->rxopt.all;
1394
1395 /* Clone pktoptions received with SYN */
1396 newnp->pktoptions = NULL;
2e6599cb
ACM
1397 if (treq->pktopts != NULL) {
1398 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1399 kfree_skb(treq->pktopts);
1400 treq->pktopts = NULL;
1da177e4
LT
1401 if (newnp->pktoptions)
1402 skb_set_owner_r(newnp->pktoptions, newsk);
1403 }
1404 newnp->opt = NULL;
505cbfc5 1405 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1406 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4
LT
1407
1408 /* Clone native IPv6 options from listening socket (if any)
1409
1410 Yes, keeping reference count would be much more clever,
1411 but we make one more one thing there: reattach optmem
1412 to newsk.
1413 */
1414 if (opt) {
1415 newnp->opt = ipv6_dup_options(newsk, opt);
1416 if (opt != np->opt)
1417 sock_kfree_s(sk, opt, opt->tot_len);
1418 }
1419
d83d8461 1420 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1421 if (newnp->opt)
d83d8461
ACM
1422 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1423 newnp->opt->opt_flen);
1da177e4 1424
5d424d5a 1425 tcp_mtup_init(newsk);
1da177e4
LT
1426 tcp_sync_mss(newsk, dst_mtu(dst));
1427 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1428 tcp_initialize_rcv_mss(newsk);
1429
1430 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1431
cfb6eeb4
YH
1432#ifdef CONFIG_TCP_MD5SIG
1433 /* Copy over the MD5 key from the original socket */
1434 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1435 /* We're using one, so create a matching key
1436 * on the newsk structure. If we fail to get
1437 * memory, then we end up not copying the key
1438 * across. Shucks.
1439 */
af879cc7
ACM
1440 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1441 if (newkey != NULL)
cfb6eeb4
YH
1442 tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
1443 newkey, key->keylen);
cfb6eeb4
YH
1444 }
1445#endif
1446
ab1e0a13 1447 __inet6_hash(newsk);
e56d8b8a 1448 __inet_inherit_port(sk, newsk);
1da177e4
LT
1449
1450 return newsk;
1451
1452out_overflow:
1453 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1454out:
1455 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1456 if (opt && opt != np->opt)
1457 sock_kfree_s(sk, opt, opt->tot_len);
1458 dst_release(dst);
1459 return NULL;
1460}
1461
b51655b9 1462static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1da177e4 1463{
84fa7933 1464 if (skb->ip_summed == CHECKSUM_COMPLETE) {
aa8223c7 1465 if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,
0660e03f 1466 &ipv6_hdr(skb)->daddr, skb->csum)) {
fb286bb2 1467 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1468 return 0;
fb286bb2 1469 }
1da177e4 1470 }
fb286bb2 1471
aa8223c7 1472 skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,
0660e03f
ACM
1473 &ipv6_hdr(skb)->saddr,
1474 &ipv6_hdr(skb)->daddr, 0));
fb286bb2 1475
1da177e4 1476 if (skb->len <= 76) {
fb286bb2 1477 return __skb_checksum_complete(skb);
1da177e4
LT
1478 }
1479 return 0;
1480}
1481
1482/* The socket must have it's spinlock held when we get
1483 * here.
1484 *
1485 * We have a potential double-lock case here, so even when
1486 * doing backlog processing we use the BH locking scheme.
1487 * This is because we cannot sleep with the original spinlock
1488 * held.
1489 */
1490static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1491{
1492 struct ipv6_pinfo *np = inet6_sk(sk);
1493 struct tcp_sock *tp;
1494 struct sk_buff *opt_skb = NULL;
1495
1496 /* Imagine: socket is IPv6. IPv4 packet arrives,
1497 goes to IPv4 receive handler and backlogged.
1498 From backlog it always goes here. Kerboom...
1499 Fortunately, tcp_rcv_established and rcv_established
1500 handle them correctly, but it is not case with
1501 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1502 */
1503
1504 if (skb->protocol == htons(ETH_P_IP))
1505 return tcp_v4_do_rcv(sk, skb);
1506
cfb6eeb4
YH
1507#ifdef CONFIG_TCP_MD5SIG
1508 if (tcp_v6_inbound_md5_hash (sk, skb))
1509 goto discard;
1510#endif
1511
fda9ef5d 1512 if (sk_filter(sk, skb))
1da177e4
LT
1513 goto discard;
1514
1515 /*
1516 * socket locking is here for SMP purposes as backlog rcv
1517 * is currently called with bh processing disabled.
1518 */
1519
1520 /* Do Stevens' IPV6_PKTOPTIONS.
1521
1522 Yes, guys, it is the only place in our code, where we
1523 may make it not affecting IPv4.
1524 The rest of code is protocol independent,
1525 and I do not like idea to uglify IPv4.
1526
1527 Actually, all the idea behind IPV6_PKTOPTIONS
1528 looks not very well thought. For now we latch
1529 options, received in the last packet, enqueued
1530 by tcp. Feel free to propose better solution.
1ab1457c 1531 --ANK (980728)
1da177e4
LT
1532 */
1533 if (np->rxopt.all)
1534 opt_skb = skb_clone(skb, GFP_ATOMIC);
1535
1536 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1537 TCP_CHECK_TIMER(sk);
aa8223c7 1538 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1da177e4
LT
1539 goto reset;
1540 TCP_CHECK_TIMER(sk);
1541 if (opt_skb)
1542 goto ipv6_pktoptions;
1543 return 0;
1544 }
1545
ab6a5bb6 1546 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1547 goto csum_err;
1548
1ab1457c 1549 if (sk->sk_state == TCP_LISTEN) {
1da177e4
LT
1550 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1551 if (!nsk)
1552 goto discard;
1553
1554 /*
1555 * Queue it on the new socket if the new socket is active,
1556 * otherwise we just shortcircuit this and continue with
1557 * the new socket..
1558 */
1ab1457c 1559 if(nsk != sk) {
1da177e4
LT
1560 if (tcp_child_process(sk, nsk, skb))
1561 goto reset;
1562 if (opt_skb)
1563 __kfree_skb(opt_skb);
1564 return 0;
1565 }
1566 }
1567
1568 TCP_CHECK_TIMER(sk);
aa8223c7 1569 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1da177e4
LT
1570 goto reset;
1571 TCP_CHECK_TIMER(sk);
1572 if (opt_skb)
1573 goto ipv6_pktoptions;
1574 return 0;
1575
1576reset:
cfb6eeb4 1577 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1578discard:
1579 if (opt_skb)
1580 __kfree_skb(opt_skb);
1581 kfree_skb(skb);
1582 return 0;
1583csum_err:
1584 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1585 goto discard;
1586
1587
1588ipv6_pktoptions:
1589 /* Do you ask, what is it?
1590
1591 1. skb was enqueued by tcp.
1592 2. skb is added to tail of read queue, rather than out of order.
1593 3. socket is not in passive state.
1594 4. Finally, it really contains options, which user wants to receive.
1595 */
1596 tp = tcp_sk(sk);
1597 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1598 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1599 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
505cbfc5 1600 np->mcast_oif = inet6_iif(opt_skb);
333fad53 1601 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1602 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1da177e4
LT
1603 if (ipv6_opt_accepted(sk, opt_skb)) {
1604 skb_set_owner_r(opt_skb, sk);
1605 opt_skb = xchg(&np->pktoptions, opt_skb);
1606 } else {
1607 __kfree_skb(opt_skb);
1608 opt_skb = xchg(&np->pktoptions, NULL);
1609 }
1610 }
1611
1612 if (opt_skb)
1613 kfree_skb(opt_skb);
1614 return 0;
1615}
1616
e5bbef20 1617static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1618{
1ab1457c 1619 struct tcphdr *th;
1da177e4
LT
1620 struct sock *sk;
1621 int ret;
1622
1623 if (skb->pkt_type != PACKET_HOST)
1624 goto discard_it;
1625
1626 /*
1627 * Count it even if it's bad.
1628 */
1629 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1630
1631 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1632 goto discard_it;
1633
aa8223c7 1634 th = tcp_hdr(skb);
1da177e4
LT
1635
1636 if (th->doff < sizeof(struct tcphdr)/4)
1637 goto bad_packet;
1638 if (!pskb_may_pull(skb, th->doff*4))
1639 goto discard_it;
1640
60476372 1641 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1da177e4
LT
1642 goto bad_packet;
1643
aa8223c7 1644 th = tcp_hdr(skb);
1da177e4
LT
1645 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1646 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1647 skb->len - th->doff*4);
1648 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1649 TCP_SKB_CB(skb)->when = 0;
0660e03f 1650 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1da177e4
LT
1651 TCP_SKB_CB(skb)->sacked = 0;
1652
c346dca1 1653 sk = __inet6_lookup(dev_net(skb->dev), &tcp_hashinfo,
d86e0dac
PE
1654 &ipv6_hdr(skb)->saddr, th->source,
1655 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1656 inet6_iif(skb));
1da177e4
LT
1657
1658 if (!sk)
1659 goto no_tcp_socket;
1660
1661process:
1662 if (sk->sk_state == TCP_TIME_WAIT)
1663 goto do_time_wait;
1664
1665 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1666 goto discard_and_relse;
1667
fda9ef5d 1668 if (sk_filter(sk, skb))
1da177e4
LT
1669 goto discard_and_relse;
1670
1671 skb->dev = NULL;
1672
293b9c42 1673 bh_lock_sock_nested(sk);
1da177e4
LT
1674 ret = 0;
1675 if (!sock_owned_by_user(sk)) {
1a2449a8 1676#ifdef CONFIG_NET_DMA
1ab1457c 1677 struct tcp_sock *tp = tcp_sk(sk);
b4caea8a
DM
1678 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1679 tp->ucopy.dma_chan = get_softnet_dma();
1ab1457c
YH
1680 if (tp->ucopy.dma_chan)
1681 ret = tcp_v6_do_rcv(sk, skb);
1682 else
1a2449a8
CL
1683#endif
1684 {
1685 if (!tcp_prequeue(sk, skb))
1686 ret = tcp_v6_do_rcv(sk, skb);
1687 }
1da177e4
LT
1688 } else
1689 sk_add_backlog(sk, skb);
1690 bh_unlock_sock(sk);
1691
1692 sock_put(sk);
1693 return ret ? -1 : 0;
1694
1695no_tcp_socket:
1696 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1697 goto discard_it;
1698
1699 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1700bad_packet:
1701 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1702 } else {
cfb6eeb4 1703 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1704 }
1705
1706discard_it:
1707
1708 /*
1709 * Discard frame
1710 */
1711
1712 kfree_skb(skb);
1713 return 0;
1714
1715discard_and_relse:
1716 sock_put(sk);
1717 goto discard_it;
1718
1719do_time_wait:
1720 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1721 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1722 goto discard_it;
1723 }
1724
1725 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1726 TCP_INC_STATS_BH(TCP_MIB_INERRS);
9469c7b4 1727 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1728 goto discard_it;
1729 }
1730
9469c7b4 1731 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1732 case TCP_TW_SYN:
1733 {
1734 struct sock *sk2;
1735
c346dca1 1736 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
0660e03f 1737 &ipv6_hdr(skb)->daddr,
505cbfc5 1738 ntohs(th->dest), inet6_iif(skb));
1da177e4 1739 if (sk2 != NULL) {
295ff7ed
ACM
1740 struct inet_timewait_sock *tw = inet_twsk(sk);
1741 inet_twsk_deschedule(tw, &tcp_death_row);
1742 inet_twsk_put(tw);
1da177e4
LT
1743 sk = sk2;
1744 goto process;
1745 }
1746 /* Fall through to ACK */
1747 }
1748 case TCP_TW_ACK:
1749 tcp_v6_timewait_ack(sk, skb);
1750 break;
1751 case TCP_TW_RST:
1752 goto no_tcp_socket;
1753 case TCP_TW_SUCCESS:;
1754 }
1755 goto discard_it;
1756}
1757
1da177e4
LT
1758static int tcp_v6_remember_stamp(struct sock *sk)
1759{
1760 /* Alas, not yet... */
1761 return 0;
1762}
1763
8292a17a 1764static struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1765 .queue_xmit = inet6_csk_xmit,
1766 .send_check = tcp_v6_send_check,
1767 .rebuild_header = inet6_sk_rebuild_header,
1768 .conn_request = tcp_v6_conn_request,
1769 .syn_recv_sock = tcp_v6_syn_recv_sock,
1770 .remember_stamp = tcp_v6_remember_stamp,
1771 .net_header_len = sizeof(struct ipv6hdr),
1772 .setsockopt = ipv6_setsockopt,
1773 .getsockopt = ipv6_getsockopt,
1774 .addr2sockaddr = inet6_csk_addr2sockaddr,
1775 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1776 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1777#ifdef CONFIG_COMPAT
543d9cfe
ACM
1778 .compat_setsockopt = compat_ipv6_setsockopt,
1779 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1780#endif
1da177e4
LT
1781};
1782
cfb6eeb4 1783#ifdef CONFIG_TCP_MD5SIG
a928630a 1784static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4
YH
1785 .md5_lookup = tcp_v6_md5_lookup,
1786 .calc_md5_hash = tcp_v6_calc_md5_hash,
1787 .md5_add = tcp_v6_md5_add_func,
1788 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1789};
a928630a 1790#endif
cfb6eeb4 1791
1da177e4
LT
1792/*
1793 * TCP over IPv4 via INET6 API
1794 */
1795
8292a17a 1796static struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1797 .queue_xmit = ip_queue_xmit,
1798 .send_check = tcp_v4_send_check,
1799 .rebuild_header = inet_sk_rebuild_header,
1800 .conn_request = tcp_v6_conn_request,
1801 .syn_recv_sock = tcp_v6_syn_recv_sock,
1802 .remember_stamp = tcp_v4_remember_stamp,
1803 .net_header_len = sizeof(struct iphdr),
1804 .setsockopt = ipv6_setsockopt,
1805 .getsockopt = ipv6_getsockopt,
1806 .addr2sockaddr = inet6_csk_addr2sockaddr,
1807 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1808 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1809#ifdef CONFIG_COMPAT
543d9cfe
ACM
1810 .compat_setsockopt = compat_ipv6_setsockopt,
1811 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1812#endif
1da177e4
LT
1813};
1814
cfb6eeb4 1815#ifdef CONFIG_TCP_MD5SIG
a928630a 1816static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4
YH
1817 .md5_lookup = tcp_v4_md5_lookup,
1818 .calc_md5_hash = tcp_v4_calc_md5_hash,
1819 .md5_add = tcp_v6_md5_add_func,
1820 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1821};
a928630a 1822#endif
cfb6eeb4 1823
1da177e4
LT
1824/* NOTE: A lot of things set to zero explicitly by call to
1825 * sk_alloc() so need not be done here.
1826 */
1827static int tcp_v6_init_sock(struct sock *sk)
1828{
6687e988 1829 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1830 struct tcp_sock *tp = tcp_sk(sk);
1831
1832 skb_queue_head_init(&tp->out_of_order_queue);
1833 tcp_init_xmit_timers(sk);
1834 tcp_prequeue_init(tp);
1835
6687e988 1836 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1837 tp->mdev = TCP_TIMEOUT_INIT;
1838
1839 /* So many TCP implementations out there (incorrectly) count the
1840 * initial SYN frame in their delayed-ACK and congestion control
1841 * algorithms that we must have the following bandaid to talk
1842 * efficiently to them. -DaveM
1843 */
1844 tp->snd_cwnd = 2;
1845
1846 /* See draft-stevens-tcpca-spec-01 for discussion of the
1847 * initialization of these values.
1848 */
1849 tp->snd_ssthresh = 0x7fffffff;
1850 tp->snd_cwnd_clamp = ~0;
c1b4a7e6 1851 tp->mss_cache = 536;
1da177e4
LT
1852
1853 tp->reordering = sysctl_tcp_reordering;
1854
1855 sk->sk_state = TCP_CLOSE;
1856
8292a17a 1857 icsk->icsk_af_ops = &ipv6_specific;
6687e988 1858 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
d83d8461 1859 icsk->icsk_sync_mss = tcp_sync_mss;
1da177e4
LT
1860 sk->sk_write_space = sk_stream_write_space;
1861 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1862
cfb6eeb4
YH
1863#ifdef CONFIG_TCP_MD5SIG
1864 tp->af_specific = &tcp_sock_ipv6_specific;
1865#endif
1866
1da177e4
LT
1867 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1868 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1869
1870 atomic_inc(&tcp_sockets_allocated);
1871
1872 return 0;
1873}
1874
1875static int tcp_v6_destroy_sock(struct sock *sk)
1876{
cfb6eeb4
YH
1877#ifdef CONFIG_TCP_MD5SIG
1878 /* Clean up the MD5 key list */
1879 if (tcp_sk(sk)->md5sig_info)
1880 tcp_v6_clear_md5_list(sk);
1881#endif
1da177e4
LT
1882 tcp_v4_destroy_sock(sk);
1883 return inet6_destroy_sock(sk);
1884}
1885
952a10be 1886#ifdef CONFIG_PROC_FS
1da177e4 1887/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1888static void get_openreq6(struct seq_file *seq,
60236fdd 1889 struct sock *sk, struct request_sock *req, int i, int uid)
1da177e4 1890{
1da177e4 1891 int ttd = req->expires - jiffies;
ca304b61
ACM
1892 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1893 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1da177e4
LT
1894
1895 if (ttd < 0)
1896 ttd = 0;
1897
1da177e4
LT
1898 seq_printf(seq,
1899 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1900 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1901 i,
1902 src->s6_addr32[0], src->s6_addr32[1],
1903 src->s6_addr32[2], src->s6_addr32[3],
1904 ntohs(inet_sk(sk)->sport),
1905 dest->s6_addr32[0], dest->s6_addr32[1],
1906 dest->s6_addr32[2], dest->s6_addr32[3],
2e6599cb 1907 ntohs(inet_rsk(req)->rmt_port),
1da177e4
LT
1908 TCP_SYN_RECV,
1909 0,0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1910 1, /* timers active (only the expire timer) */
1911 jiffies_to_clock_t(ttd),
1da177e4
LT
1912 req->retrans,
1913 uid,
1ab1457c 1914 0, /* non standard timer */
1da177e4
LT
1915 0, /* open_requests have no inode */
1916 0, req);
1917}
1918
1919static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1920{
1921 struct in6_addr *dest, *src;
1922 __u16 destp, srcp;
1923 int timer_active;
1924 unsigned long timer_expires;
1925 struct inet_sock *inet = inet_sk(sp);
1926 struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1927 const struct inet_connection_sock *icsk = inet_csk(sp);
1da177e4
LT
1928 struct ipv6_pinfo *np = inet6_sk(sp);
1929
1930 dest = &np->daddr;
1931 src = &np->rcv_saddr;
1932 destp = ntohs(inet->dport);
1933 srcp = ntohs(inet->sport);
463c84b9
ACM
1934
1935 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 1936 timer_active = 1;
463c84b9
ACM
1937 timer_expires = icsk->icsk_timeout;
1938 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1939 timer_active = 4;
463c84b9 1940 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1941 } else if (timer_pending(&sp->sk_timer)) {
1942 timer_active = 2;
1943 timer_expires = sp->sk_timer.expires;
1944 } else {
1945 timer_active = 0;
1946 timer_expires = jiffies;
1947 }
1948
1949 seq_printf(seq,
1950 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1951 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
1952 i,
1953 src->s6_addr32[0], src->s6_addr32[1],
1954 src->s6_addr32[2], src->s6_addr32[3], srcp,
1955 dest->s6_addr32[0], dest->s6_addr32[1],
1956 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1ab1457c 1957 sp->sk_state,
47da8ee6
SS
1958 tp->write_seq-tp->snd_una,
1959 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
1960 timer_active,
1961 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 1962 icsk->icsk_retransmits,
1da177e4 1963 sock_i_uid(sp),
6687e988 1964 icsk->icsk_probes_out,
1da177e4
LT
1965 sock_i_ino(sp),
1966 atomic_read(&sp->sk_refcnt), sp,
463c84b9
ACM
1967 icsk->icsk_rto,
1968 icsk->icsk_ack.ato,
1969 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1da177e4
LT
1970 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
1971 );
1972}
1973
1ab1457c 1974static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1975 struct inet_timewait_sock *tw, int i)
1da177e4
LT
1976{
1977 struct in6_addr *dest, *src;
1978 __u16 destp, srcp;
0fa1a53e 1979 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1da177e4
LT
1980 int ttd = tw->tw_ttd - jiffies;
1981
1982 if (ttd < 0)
1983 ttd = 0;
1984
0fa1a53e
ACM
1985 dest = &tw6->tw_v6_daddr;
1986 src = &tw6->tw_v6_rcv_saddr;
1da177e4
LT
1987 destp = ntohs(tw->tw_dport);
1988 srcp = ntohs(tw->tw_sport);
1989
1990 seq_printf(seq,
1991 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1992 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1993 i,
1994 src->s6_addr32[0], src->s6_addr32[1],
1995 src->s6_addr32[2], src->s6_addr32[3], srcp,
1996 dest->s6_addr32[0], dest->s6_addr32[1],
1997 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1998 tw->tw_substate, 0, 0,
1999 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2000 atomic_read(&tw->tw_refcnt), tw);
2001}
2002
1da177e4
LT
2003static int tcp6_seq_show(struct seq_file *seq, void *v)
2004{
2005 struct tcp_iter_state *st;
2006
2007 if (v == SEQ_START_TOKEN) {
2008 seq_puts(seq,
2009 " sl "
2010 "local_address "
2011 "remote_address "
2012 "st tx_queue rx_queue tr tm->when retrnsmt"
2013 " uid timeout inode\n");
2014 goto out;
2015 }
2016 st = seq->private;
2017
2018 switch (st->state) {
2019 case TCP_SEQ_STATE_LISTENING:
2020 case TCP_SEQ_STATE_ESTABLISHED:
2021 get_tcp6_sock(seq, v, st->num);
2022 break;
2023 case TCP_SEQ_STATE_OPENREQ:
2024 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2025 break;
2026 case TCP_SEQ_STATE_TIME_WAIT:
2027 get_timewait6_sock(seq, v, st->num);
2028 break;
2029 }
2030out:
2031 return 0;
2032}
2033
1da177e4 2034static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
2035 .name = "tcp6",
2036 .family = AF_INET6,
5f4472c5
DL
2037 .seq_fops = {
2038 .owner = THIS_MODULE,
2039 },
9427c4b3
DL
2040 .seq_ops = {
2041 .show = tcp6_seq_show,
2042 },
1da177e4
LT
2043};
2044
6f8b13bc 2045int tcp6_proc_init(struct net *net)
1da177e4 2046{
6f8b13bc 2047 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
2048}
2049
6f8b13bc 2050void tcp6_proc_exit(struct net *net)
1da177e4 2051{
6f8b13bc 2052 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
2053}
2054#endif
2055
2056struct proto tcpv6_prot = {
2057 .name = "TCPv6",
2058 .owner = THIS_MODULE,
2059 .close = tcp_close,
2060 .connect = tcp_v6_connect,
2061 .disconnect = tcp_disconnect,
463c84b9 2062 .accept = inet_csk_accept,
1da177e4
LT
2063 .ioctl = tcp_ioctl,
2064 .init = tcp_v6_init_sock,
2065 .destroy = tcp_v6_destroy_sock,
2066 .shutdown = tcp_shutdown,
2067 .setsockopt = tcp_setsockopt,
2068 .getsockopt = tcp_getsockopt,
1da177e4
LT
2069 .recvmsg = tcp_recvmsg,
2070 .backlog_rcv = tcp_v6_do_rcv,
2071 .hash = tcp_v6_hash,
ab1e0a13
ACM
2072 .unhash = inet_unhash,
2073 .get_port = inet_csk_get_port,
1da177e4
LT
2074 .enter_memory_pressure = tcp_enter_memory_pressure,
2075 .sockets_allocated = &tcp_sockets_allocated,
2076 .memory_allocated = &tcp_memory_allocated,
2077 .memory_pressure = &tcp_memory_pressure,
0a5578cf 2078 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2079 .sysctl_mem = sysctl_tcp_mem,
2080 .sysctl_wmem = sysctl_tcp_wmem,
2081 .sysctl_rmem = sysctl_tcp_rmem,
2082 .max_header = MAX_TCP_HEADER,
2083 .obj_size = sizeof(struct tcp6_sock),
6d6ee43e 2084 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 2085 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 2086 .h.hashinfo = &tcp_hashinfo,
543d9cfe
ACM
2087#ifdef CONFIG_COMPAT
2088 .compat_setsockopt = compat_tcp_setsockopt,
2089 .compat_getsockopt = compat_tcp_getsockopt,
2090#endif
1da177e4
LT
2091};
2092
2093static struct inet6_protocol tcpv6_protocol = {
2094 .handler = tcp_v6_rcv,
2095 .err_handler = tcp_v6_err,
a430a43d 2096 .gso_send_check = tcp_v6_gso_send_check,
adcfc7d0 2097 .gso_segment = tcp_tso_segment,
1da177e4
LT
2098 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2099};
2100
1da177e4
LT
2101static struct inet_protosw tcpv6_protosw = {
2102 .type = SOCK_STREAM,
2103 .protocol = IPPROTO_TCP,
2104 .prot = &tcpv6_prot,
2105 .ops = &inet6_stream_ops,
2106 .capability = -1,
2107 .no_check = 0,
d83d8461
ACM
2108 .flags = INET_PROTOSW_PERMANENT |
2109 INET_PROTOSW_ICSK,
1da177e4
LT
2110};
2111
93ec926b
DL
2112static int tcpv6_net_init(struct net *net)
2113{
5677242f
DL
2114 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2115 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
2116}
2117
2118static void tcpv6_net_exit(struct net *net)
2119{
5677242f 2120 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
93ec926b
DL
2121}
2122
2123static struct pernet_operations tcpv6_net_ops = {
2124 .init = tcpv6_net_init,
2125 .exit = tcpv6_net_exit,
2126};
2127
7f4e4868 2128int __init tcpv6_init(void)
1da177e4 2129{
7f4e4868
DL
2130 int ret;
2131
2132 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2133 if (ret)
2134 goto out;
2135
1da177e4 2136 /* register inet6 protocol */
7f4e4868
DL
2137 ret = inet6_register_protosw(&tcpv6_protosw);
2138 if (ret)
2139 goto out_tcpv6_protocol;
2140
93ec926b 2141 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2142 if (ret)
2143 goto out_tcpv6_protosw;
2144out:
2145 return ret;
ae0f7d5f 2146
7f4e4868
DL
2147out_tcpv6_protocol:
2148 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2149out_tcpv6_protosw:
2150 inet6_unregister_protosw(&tcpv6_protosw);
2151 goto out;
2152}
2153
09f7709f 2154void tcpv6_exit(void)
7f4e4868 2155{
93ec926b 2156 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2157 inet6_unregister_protosw(&tcpv6_protosw);
2158 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2159}